[PATCH 1/1] rcu: fix implicit conversion in bit shift
Andre Muezerie
andremue at linux.microsoft.com
Mon Dec 2 18:46:01 CET 2024
[ upstream commit ffe827f38e6e0be8a307d7ef9c0e1347874f0af7 ]
../lib/rcu/rte_rcu_qsbr.c(101): warning C4334: '<<': result of 32-bit
shift implicitly converted to 64 bits (was 64-bit shift intended?)
../lib/rcu/rte_rcu_qsbr.c(107): warning C4334: '<<': result of 32-bit
shift implicitly converted to 64 bits (was 64-bit shift intended?)
../lib/rcu/rte_rcu_qsbr.c(145): warning C4334: '<<': result of 32-bit
shift implicitly converted to 64 bits (was 64-bit shift intended?)
These warnings are being issued by the MSVC compiler. Since the result is
being stored in a variable of type uint64_t, it makes sense to shift a
64-bit number instead of shifting a 32-bit number and then having the
compiler to convert the result implicitly to 64 bits.
UINT64_C was used in the fix as it is the portable way to define a 64-bit
constant (ULL suffix is architecture dependent).
>From reading the code this is also a bugfix:
(1 << id), where id = thread_id & 0x3f, was wrong when thread_id > 0x1f.
Signed-off-by: Andre Muezerie <andremue at linux.microsoft.com>
---
lib/rcu/rte_rcu_qsbr.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/lib/rcu/rte_rcu_qsbr.c b/lib/rcu/rte_rcu_qsbr.c
index 7510db2f81..3f05ba0949 100644
--- a/lib/rcu/rte_rcu_qsbr.c
+++ b/lib/rcu/rte_rcu_qsbr.c
@@ -9,6 +9,7 @@
#include <inttypes.h>
#include <errno.h>
+#include <rte_bitops.h>
#include <rte_common.h>
#include <rte_log.h>
#include <rte_memory.h>
@@ -108,11 +109,11 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
/* Check if the thread is already registered */
old_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
__ATOMIC_RELAXED);
- if (old_bmap & 1UL << id)
+ if (old_bmap & RTE_BIT64(id))
return 0;
do {
- new_bmap = old_bmap | (1UL << id);
+ new_bmap = old_bmap | RTE_BIT64(id);
success = __atomic_compare_exchange(
__RTE_QSBR_THRID_ARRAY_ELM(v, i),
&old_bmap, &new_bmap, 0,
@@ -121,7 +122,7 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
if (success)
__atomic_fetch_add(&v->num_threads,
1, __ATOMIC_RELAXED);
- else if (old_bmap & (1UL << id))
+ else if (old_bmap & RTE_BIT64(id))
/* Someone else registered this thread.
* Counter should not be incremented.
*/
@@ -160,11 +161,11 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
/* Check if the thread is already unregistered */
old_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
__ATOMIC_RELAXED);
- if (!(old_bmap & (1UL << id)))
+ if (!(old_bmap & RTE_BIT64(id)))
return 0;
do {
- new_bmap = old_bmap & ~(1UL << id);
+ new_bmap = old_bmap & ~RTE_BIT64(id);
/* Make sure any loads of the shared data structure are
* completed before removal of the thread from the list of
* reporting threads.
@@ -177,7 +178,7 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
if (success)
__atomic_fetch_sub(&v->num_threads,
1, __ATOMIC_RELAXED);
- else if (!(old_bmap & (1UL << id)))
+ else if (!(old_bmap & RTE_BIT64(id)))
/* Someone else unregistered this thread.
* Counter should not be incremented.
*/
@@ -238,7 +239,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
t = __builtin_ctzl(bmap);
fprintf(f, "%u ", id + t);
- bmap &= ~(1UL << t);
+ bmap &= ~RTE_BIT64(t);
}
}
@@ -265,7 +266,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
__atomic_load_n(
&v->qsbr_cnt[id + t].lock_cnt,
__ATOMIC_RELAXED));
- bmap &= ~(1UL << t);
+ bmap &= ~RTE_BIT64(t);
}
}
--
2.47.0.vfs.0.3
More information about the stable
mailing list