[dpdk-dev] [PATCH v2 05/15] test/hash: rectify slaveid to point to valid cores

David Marchand david.marchand at redhat.com
Sat Jun 15 08:42:20 CEST 2019


From: Dharmik Thakkar <dharmik.thakkar at arm.com>

This patch rectifies slave_id to point to valid core indexes rather than
core ranks in read-write lock-free concurrency test.

It also replaces a 'for' loop with RTE_LCORE_FOREACH API.

Fixes: c7eb0972e74b ("test/hash: add lock-free r/w concurrency")
Cc: stable at dpdk.org

Signed-off-by: Dharmik Thakkar <dharmik.thakkar at arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
Signed-off-by: David Marchand <david.marchand at redhat.com>
Acked-by: Yipeng Wang <yipeng1.wang at intel.com>
---
 app/test/test_hash_readwrite_lf.c | 24 +++++++++++-------------
 1 file changed, 11 insertions(+), 13 deletions(-)

diff --git a/app/test/test_hash_readwrite_lf.c b/app/test/test_hash_readwrite_lf.c
index f9f233a..5644361 100644
--- a/app/test/test_hash_readwrite_lf.c
+++ b/app/test/test_hash_readwrite_lf.c
@@ -126,11 +126,9 @@ struct {
 	uint32_t i = 0;
 	uint16_t core_id;
 	uint32_t max_cores = rte_lcore_count();
-	for (core_id = 0; core_id < RTE_MAX_LCORE && i < max_cores; core_id++) {
-		if (rte_lcore_is_enabled(core_id)) {
-			enabled_core_ids[i] = core_id;
-			i++;
-		}
+	RTE_LCORE_FOREACH(core_id) {
+		enabled_core_ids[i] = core_id;
+		i++;
 	}
 
 	if (i != max_cores) {
@@ -738,7 +736,7 @@ struct {
 							enabled_core_ids[i]);
 
 			for (i = 1; i <= rwc_core_cnt[n]; i++)
-				if (rte_eal_wait_lcore(i) < 0)
+				if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 					goto err;
 
 			unsigned long long cycles_per_lookup =
@@ -810,7 +808,7 @@ struct {
 			if (ret < 0)
 				goto err;
 			for (i = 1; i <= rwc_core_cnt[n]; i++)
-				if (rte_eal_wait_lcore(i) < 0)
+				if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 					goto err;
 
 			unsigned long long cycles_per_lookup =
@@ -886,7 +884,7 @@ struct {
 			if (ret < 0)
 				goto err;
 			for (i = 1; i <= rwc_core_cnt[n]; i++)
-				if (rte_eal_wait_lcore(i) < 0)
+				if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 					goto err;
 
 			unsigned long long cycles_per_lookup =
@@ -962,7 +960,7 @@ struct {
 			if (ret < 0)
 				goto err;
 			for (i = 1; i <= rwc_core_cnt[n]; i++)
-				if (rte_eal_wait_lcore(i) < 0)
+				if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 					goto err;
 
 			unsigned long long cycles_per_lookup =
@@ -1037,7 +1035,7 @@ struct {
 			if (ret < 0)
 				goto err;
 			for (i = 1; i <= rwc_core_cnt[n]; i++)
-				if (rte_eal_wait_lcore(i) < 0)
+				if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 					goto err;
 
 			unsigned long long cycles_per_lookup =
@@ -1132,12 +1130,12 @@ struct {
 				for (i = rwc_core_cnt[n] + 1;
 				     i <= rwc_core_cnt[m] + rwc_core_cnt[n];
 				     i++)
-					rte_eal_wait_lcore(i);
+					rte_eal_wait_lcore(enabled_core_ids[i]);
 
 				writer_done = 1;
 
 				for (i = 1; i <= rwc_core_cnt[n]; i++)
-					if (rte_eal_wait_lcore(i) < 0)
+					if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 						goto err;
 
 				unsigned long long cycles_per_lookup =
@@ -1221,7 +1219,7 @@ struct {
 			writer_done = 1;
 
 			for (i = 1; i <= rwc_core_cnt[n]; i++)
-				if (rte_eal_wait_lcore(i) < 0)
+				if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
 					goto err;
 
 			unsigned long long cycles_per_lookup =
-- 
1.8.3.1



More information about the dev mailing list