[PATCH 23.11] rcu: fix implicit conversion in bit shift

Andre Muezerie andremue at linux.microsoft.com
Mon Dec 9 16:56:38 CET 2024


[ upstream commit ffe827f38e6e0be8a307d7ef9c0e1347874f0af7 ]

../lib/rcu/rte_rcu_qsbr.c(101): warning C4334: '<<': result of 32-bit
 shift implicitly converted to 64 bits (was 64-bit shift intended?)
../lib/rcu/rte_rcu_qsbr.c(107): warning C4334: '<<': result of 32-bit
 shift implicitly converted to 64 bits (was 64-bit shift intended?)
../lib/rcu/rte_rcu_qsbr.c(145): warning C4334: '<<': result of 32-bit
 shift implicitly converted to 64 bits (was 64-bit shift intended?)

These warnings are being issued by the MSVC compiler. Since the result is
being stored in a variable of type uint64_t, it makes sense to shift a
64-bit number instead of shifting a 32-bit number and then having the
compiler to convert the result implicitly to 64 bits.
UINT64_C was used in the fix as it is the portable way to define a 64-bit
constant (ULL suffix is architecture dependent).

>From reading the code this is also a bugfix:
(1 << id), where id = thread_id & 0x3f, was wrong when thread_id > 0x1f.

Signed-off-by: Andre Muezerie <andremue at linux.microsoft.com>
---
 lib/rcu/rte_rcu_qsbr.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/lib/rcu/rte_rcu_qsbr.c b/lib/rcu/rte_rcu_qsbr.c
index 41a44be4b9..e46ce7958e 100644
--- a/lib/rcu/rte_rcu_qsbr.c
+++ b/lib/rcu/rte_rcu_qsbr.c
@@ -104,11 +104,11 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
 	/* Check if the thread is already registered */
 	old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
 					rte_memory_order_relaxed);
-	if (old_bmap & 1UL << id)
+	if (old_bmap & RTE_BIT64(id))
 		return 0;
 
 	do {
-		new_bmap = old_bmap | (1UL << id);
+		new_bmap = old_bmap | RTE_BIT64(id);
 		success = rte_atomic_compare_exchange_strong_explicit(
 					__RTE_QSBR_THRID_ARRAY_ELM(v, i),
 					&old_bmap, new_bmap,
@@ -117,7 +117,7 @@ rte_rcu_qsbr_thread_register(struct rte_rcu_qsbr *v, unsigned int thread_id)
 		if (success)
 			rte_atomic_fetch_add_explicit(&v->num_threads,
 						1, rte_memory_order_relaxed);
-		else if (old_bmap & (1UL << id))
+		else if (old_bmap & RTE_BIT64(id))
 			/* Someone else registered this thread.
 			 * Counter should not be incremented.
 			 */
@@ -156,11 +156,11 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
 	/* Check if the thread is already unregistered */
 	old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i),
 					rte_memory_order_relaxed);
-	if (!(old_bmap & (1UL << id)))
+	if (!(old_bmap & RTE_BIT64(id)))
 		return 0;
 
 	do {
-		new_bmap = old_bmap & ~(1UL << id);
+		new_bmap = old_bmap & ~RTE_BIT64(id);
 		/* Make sure any loads of the shared data structure are
 		 * completed before removal of the thread from the list of
 		 * reporting threads.
@@ -173,7 +173,7 @@ rte_rcu_qsbr_thread_unregister(struct rte_rcu_qsbr *v, unsigned int thread_id)
 		if (success)
 			rte_atomic_fetch_sub_explicit(&v->num_threads,
 						1, rte_memory_order_relaxed);
-		else if (!(old_bmap & (1UL << id)))
+		else if (!(old_bmap & RTE_BIT64(id)))
 			/* Someone else unregistered this thread.
 			 * Counter should not be incremented.
 			 */
@@ -234,7 +234,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
 			t = rte_ctz64(bmap);
 			fprintf(f, "%u ", id + t);
 
-			bmap &= ~(1UL << t);
+			bmap &= ~RTE_BIT64(t);
 		}
 	}
 
@@ -261,7 +261,7 @@ rte_rcu_qsbr_dump(FILE *f, struct rte_rcu_qsbr *v)
 				rte_atomic_load_explicit(
 					&v->qsbr_cnt[id + t].lock_cnt,
 					rte_memory_order_relaxed));
-			bmap &= ~(1UL << t);
+			bmap &= ~RTE_BIT64(t);
 		}
 	}
 
-- 
2.47.0.vfs.0.3



More information about the stable mailing list