[dpdk-dev] [PATCH v5 08/18] app/test: replace uses of master/slave

Stephen Hemminger stephen at networkplumber.org
Tue Oct 13 17:26:00 CEST 2020


Replace master lcore with main lcore and slave lcore with worker lcore.
Mostly automatic replacement.

Acked-by: Anatoly Burakov <anatoly.burakov at intel.com>
Signed-off-by: Stephen Hemminger <stephen at networkplumber.org>
---
 app/test/autotest_test_funcs.py         |   2 +-
 app/test/meson.build                    |   2 +-
 app/test/test.c                         |   2 +-
 app/test/test_atomic.c                  |  26 +++---
 app/test/test_barrier.c                 |   2 +-
 app/test/test_compressdev_test_buffer.h |   2 +-
 app/test/test_cryptodev.c               |  16 ++--
 app/test/test_distributor.c             |   8 +-
 app/test/test_distributor_perf.c        |  10 +--
 app/test/test_eal_flags.c               |  32 +++----
 app/test/test_efd.c                     |   2 +-
 app/test/test_efd_perf.c                |   2 +-
 app/test/test_func_reentrancy.c         |  20 ++---
 app/test/test_hash_multiwriter.c        |   4 +-
 app/test/test_hash_readwrite.c          |  39 +++++----
 app/test/test_kni.c                     |  16 ++--
 app/test/test_lpm_perf.c                |   4 +-
 app/test/test_malloc.c                  |  12 +--
 app/test/test_mbuf.c                    |  41 +++++----
 app/test/test_mcslock.c                 |  28 +++---
 app/test/test_mempool_perf.c            |  10 +--
 app/test/test_mp_secondary.c            |   2 +-
 app/test/test_pdump.c                   |   2 +-
 app/test/test_per_lcore.c               |  14 +--
 app/test/test_pmd_perf.c                |  20 ++---
 app/test/test_rcu_qsbr.c                |   2 +-
 app/test/test_rcu_qsbr_perf.c           |   2 +-
 app/test/test_ring_perf.c               |  14 +--
 app/test/test_ring_stress_impl.h        |  10 +--
 app/test/test_rwlock.c                  |  28 +++---
 app/test/test_service_cores.c           |  14 +--
 app/test/test_spinlock.c                |  34 ++++----
 app/test/test_stack.c                   |   2 +-
 app/test/test_stack_perf.c              |   6 +-
 app/test/test_ticketlock.c              |  36 ++++----
 app/test/test_timer.c                   | 109 ++++++++++++------------
 app/test/test_timer_racecond.c          |  28 +++---
 app/test/test_timer_secondary.c         |   8 +-
 app/test/test_trace_perf.c              |   4 +-
 39 files changed, 307 insertions(+), 308 deletions(-)

diff --git a/app/test/autotest_test_funcs.py b/app/test/autotest_test_funcs.py
index 775dfd1dc5d9..0811066cb054 100644
--- a/app/test/autotest_test_funcs.py
+++ b/app/test/autotest_test_funcs.py
@@ -103,7 +103,7 @@ def rwlock_autotest(child, test_name):
         index = child.expect(["Test OK",
                               "Test Failed",
                               "Hello from core ([0-9]*) !",
-                              "Global write lock taken on master "
+                              "Global write lock taken on main "
                               "core ([0-9]*)",
                               pexpect.TIMEOUT], timeout=10)
         # ok
diff --git a/app/test/meson.build b/app/test/meson.build
index dedf29dd7f26..6e07302d8fdc 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -189,7 +189,7 @@ fast_tests = [
         ['cycles_autotest', true],
         ['debug_autotest', true],
         ['eal_flags_c_opt_autotest', false],
-        ['eal_flags_master_opt_autotest', false],
+        ['eal_flags_main_opt_autotest', false],
         ['eal_flags_n_opt_autotest', false],
         ['eal_flags_hpet_autotest', false],
         ['eal_flags_no_huge_autotest', false],
diff --git a/app/test/test.c b/app/test/test.c
index 94d26ab1f67c..3b3d172c91fa 100644
--- a/app/test/test.c
+++ b/app/test/test.c
@@ -58,7 +58,7 @@ do_recursive_call(void)
 #endif
 #endif
 			{ "test_missing_c_flag", no_action },
-			{ "test_master_lcore_flag", no_action },
+			{ "test_main_lcore_flag", no_action },
 			{ "test_invalid_n_flag", no_action },
 			{ "test_no_hpet_flag", no_action },
 			{ "test_whitelist_flag", no_action },
diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c
index 214452e54399..f10f555af8b4 100644
--- a/app/test/test_atomic.c
+++ b/app/test/test_atomic.c
@@ -456,7 +456,7 @@ test_atomic(void)
 
 	printf("usual inc/dec/add/sub functions\n");
 
-	rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MASTER);
+	rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MAIN);
 	rte_atomic32_set(&synchro, 1);
 	rte_eal_mp_wait_lcore();
 	rte_atomic32_set(&synchro, 0);
@@ -482,7 +482,7 @@ test_atomic(void)
 	rte_atomic32_set(&a32, 0);
 	rte_atomic16_set(&a16, 0);
 	rte_atomic64_set(&count, 0);
-	rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER);
+	rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MAIN);
 	rte_atomic32_set(&synchro, 1);
 	rte_eal_mp_wait_lcore();
 	rte_atomic32_set(&synchro, 0);
@@ -499,7 +499,7 @@ test_atomic(void)
 	rte_atomic16_set(&a16, 0);
 	rte_atomic64_set(&count, 0);
 	rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL,
-				 SKIP_MASTER);
+				 SKIP_MAIN);
 	rte_atomic32_set(&synchro, 1);
 	rte_eal_mp_wait_lcore();
 	rte_atomic32_set(&synchro, 0);
@@ -510,8 +510,8 @@ test_atomic(void)
 	}
 
 	/*
-	 * Set a64, a32 and a16 with the same value of minus "number of slave
-	 * lcores", launch all slave lcores to atomically increase by one and
+	 * Set a64, a32 and a16 with the same value of minus "number of worker
+	 * lcores", launch all worker lcores to atomically increase by one and
 	 * test them respectively.
 	 * Each lcore should have only one chance to increase a64 by one and
 	 * then check if it is equal to 0, but there should be only one lcore
@@ -519,7 +519,7 @@ test_atomic(void)
 	 * Then a variable of "count", initialized to zero, is increased by
 	 * one if a64, a32 or a16 is 0 after being increased and tested
 	 * atomically.
-	 * We can check if "count" is finally equal to 3 to see if all slave
+	 * We can check if "count" is finally equal to 3 to see if all worker
 	 * lcores performed "atomic inc and test" right.
 	 */
 	printf("inc and test\n");
@@ -533,7 +533,7 @@ test_atomic(void)
 	rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count()));
 	rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count()));
 	rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count()));
-	rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MASTER);
+	rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MAIN);
 	rte_atomic32_set(&synchro, 1);
 	rte_eal_mp_wait_lcore();
 	rte_atomic32_clear(&synchro);
@@ -544,7 +544,7 @@ test_atomic(void)
 	}
 
 	/*
-	 * Same as above, but this time we set the values to "number of slave
+	 * Same as above, but this time we set the values to "number of worker
 	 * lcores", and decrement instead of increment.
 	 */
 	printf("dec and test\n");
@@ -555,7 +555,7 @@ test_atomic(void)
 	rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1));
 	rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1));
 	rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1));
-	rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MASTER);
+	rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MAIN);
 	rte_atomic32_set(&synchro, 1);
 	rte_eal_mp_wait_lcore();
 	rte_atomic32_clear(&synchro);
@@ -569,10 +569,10 @@ test_atomic(void)
 	/*
 	 * This case tests the functionality of rte_atomic128_cmp_exchange
 	 * API. It calls rte_atomic128_cmp_exchange with four kinds of memory
-	 * models successively on each slave core. Once each 128-bit atomic
+	 * models successively on each worker core. Once each 128-bit atomic
 	 * compare and swap operation is successful, it updates the global
 	 * 128-bit counter by 2 for the first 64-bit and 1 for the second
-	 * 64-bit. Each slave core iterates this test N times.
+	 * 64-bit. Each worker core iterates this test N times.
 	 * At the end of test, verify whether the first 64-bits of the 128-bit
 	 * counter and the second 64bits is differ by the total iterations. If
 	 * it is, the test passes.
@@ -585,7 +585,7 @@ test_atomic(void)
 	count128.val[1] = 0;
 
 	rte_eal_mp_remote_launch(test_atomic128_cmp_exchange, NULL,
-				 SKIP_MASTER);
+				 SKIP_MAIN);
 	rte_atomic32_set(&synchro, 1);
 	rte_eal_mp_wait_lcore();
 	rte_atomic32_clear(&synchro);
@@ -619,7 +619,7 @@ test_atomic(void)
 	token64 = ((uint64_t)get_crc8(&t.u8[0], sizeof(token64) - 1) << 56)
 		| (t.u64 & 0x00ffffffffffffff);
 
-	rte_eal_mp_remote_launch(test_atomic_exchange, NULL, SKIP_MASTER);
+	rte_eal_mp_remote_launch(test_atomic_exchange, NULL, SKIP_MAIN);
 	rte_atomic32_set(&synchro, 1);
 	rte_eal_mp_wait_lcore();
 	rte_atomic32_clear(&synchro);
diff --git a/app/test/test_barrier.c b/app/test/test_barrier.c
index 43b5f6232c6d..c27f8a0742f2 100644
--- a/app/test/test_barrier.c
+++ b/app/test/test_barrier.c
@@ -236,7 +236,7 @@ plock_test(uint64_t iter, enum plock_use_type utype)
 
 	/* test phase - start and wait for completion on each active lcore */
 
-	rte_eal_mp_remote_launch(plock_test1_lcore, lpt, CALL_MASTER);
+	rte_eal_mp_remote_launch(plock_test1_lcore, lpt, CALL_MAIN);
 	rte_eal_mp_wait_lcore();
 
 	/* validation phase - make sure that shared and local data match */
diff --git a/app/test/test_compressdev_test_buffer.h b/app/test/test_compressdev_test_buffer.h
index 4a16ade150aa..b09a7e99688f 100644
--- a/app/test/test_compressdev_test_buffer.h
+++ b/app/test/test_compressdev_test_buffer.h
@@ -190,7 +190,7 @@ static const char test_buf_shakespeare[] =
 	"	servitude: I will no longer endure it, though yet I\n"
 	"	know no wise remedy how to avoid it.\n"
 	"\n"
-	"ADAM	Yonder comes my master, your brother.\n"
+	"ADAM	Yonder comes my main, your brother.\n"
 	"\n"
 	"ORLANDO	Go apart, Adam, and thou shalt hear how he will\n";
 
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 153d00119491..61de6a783ac5 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -479,29 +479,29 @@ testsuite_setup(void)
 	char vdev_args[VDEV_ARGS_SIZE] = {""};
 	char temp_str[VDEV_ARGS_SIZE] = {"mode=multi-core,"
 		"ordering=enable,name=cryptodev_test_scheduler,corelist="};
-	uint16_t slave_core_count = 0;
+	uint16_t worker_core_count = 0;
 	uint16_t socket_id = 0;
 
 	if (gbl_driver_id == rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
 
-		/* Identify the Slave Cores
-		 * Use 2 slave cores for the device args
+		/* Identify the Worker Cores
+		 * Use 2 worker cores for the device args
 		 */
-		RTE_LCORE_FOREACH_SLAVE(i) {
-			if (slave_core_count > 1)
+		RTE_LCORE_FOREACH_WORKER(i) {
+			if (worker_core_count > 1)
 				break;
 			snprintf(vdev_args, sizeof(vdev_args),
 					"%s%d", temp_str, i);
 			strcpy(temp_str, vdev_args);
 			strlcat(temp_str, ";", sizeof(temp_str));
-			slave_core_count++;
+			worker_core_count++;
 			socket_id = rte_lcore_to_socket_id(i);
 		}
-		if (slave_core_count != 2) {
+		if (worker_core_count != 2) {
 			RTE_LOG(ERR, USER1,
 				"Cryptodev scheduler test require at least "
-				"two slave cores to run. "
+				"two worker cores to run. "
 				"Please use the correct coremask.\n");
 			return TEST_FAILED;
 		}
diff --git a/app/test/test_distributor.c b/app/test/test_distributor.c
index ba1f81cf8d19..19aa9f90e501 100644
--- a/app/test/test_distributor.c
+++ b/app/test/test_distributor.c
@@ -654,13 +654,13 @@ test_distributor(void)
 					sizeof(worker_params.name));
 
 		rte_eal_mp_remote_launch(handle_work,
-				&worker_params, SKIP_MASTER);
+				&worker_params, SKIP_MAIN);
 		if (sanity_test(&worker_params, p) < 0)
 			goto err;
 		quit_workers(&worker_params, p);
 
 		rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
-				&worker_params, SKIP_MASTER);
+				&worker_params, SKIP_MAIN);
 		if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
 			goto err;
 		quit_workers(&worker_params, p);
@@ -668,7 +668,7 @@ test_distributor(void)
 		if (rte_lcore_count() > 2) {
 			rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
 					&worker_params,
-					SKIP_MASTER);
+					SKIP_MAIN);
 			if (sanity_test_with_worker_shutdown(&worker_params,
 					p) < 0)
 				goto err;
@@ -676,7 +676,7 @@ test_distributor(void)
 
 			rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
 					&worker_params,
-					SKIP_MASTER);
+					SKIP_MAIN);
 			if (test_flush_with_worker_shutdown(&worker_params,
 					p) < 0)
 				goto err;
diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c
index f153bcf9bd87..b25f79a3486c 100644
--- a/app/test/test_distributor_perf.c
+++ b/app/test/test_distributor_perf.c
@@ -54,10 +54,10 @@ time_cache_line_switch(void)
 	/* allocate a full cache line for data, we use only first byte of it */
 	uint64_t data[RTE_CACHE_LINE_SIZE*3 / sizeof(uint64_t)];
 
-	unsigned i, slaveid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
+	unsigned int i, workerid = rte_get_next_lcore(rte_lcore_id(), 0, 0);
 	volatile uint64_t *pdata = &data[0];
 	*pdata = 1;
-	rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], slaveid);
+	rte_eal_remote_launch((lcore_function_t *)flip_bit, &data[0], workerid);
 	while (*pdata)
 		rte_pause();
 
@@ -72,7 +72,7 @@ time_cache_line_switch(void)
 	while (*pdata)
 		rte_pause();
 	*pdata = 2;
-	rte_eal_wait_lcore(slaveid);
+	rte_eal_wait_lcore(workerid);
 	printf("==== Cache line switch test ===\n");
 	printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER_CL),
 			end_time-start_time);
@@ -251,13 +251,13 @@ test_distributor_perf(void)
 	}
 
 	printf("=== Performance test of distributor (single mode) ===\n");
-	rte_eal_mp_remote_launch(handle_work, ds, SKIP_MASTER);
+	rte_eal_mp_remote_launch(handle_work, ds, SKIP_MAIN);
 	if (perf_test(ds, p) < 0)
 		return -1;
 	quit_workers(ds, p);
 
 	printf("=== Performance test of distributor (burst mode) ===\n");
-	rte_eal_mp_remote_launch(handle_work, db, SKIP_MASTER);
+	rte_eal_mp_remote_launch(handle_work, db, SKIP_MAIN);
 	if (perf_test(db, p) < 0)
 		return -1;
 	quit_workers(db, p);
diff --git a/app/test/test_eal_flags.c b/app/test/test_eal_flags.c
index b019656b29c3..a12836d46d61 100644
--- a/app/test/test_eal_flags.c
+++ b/app/test/test_eal_flags.c
@@ -599,10 +599,10 @@ test_missing_c_flag(void)
 }
 
 /*
- * Test --master-lcore option with matching coremask
+ * Test --main-lcore option with matching coremask
  */
 static int
-test_master_lcore_flag(void)
+test_main_lcore_flag(void)
 {
 #ifdef RTE_EXEC_ENV_FREEBSD
 	/* BSD target doesn't support prefixes at this point */
@@ -619,34 +619,34 @@ test_master_lcore_flag(void)
 	if (!rte_lcore_is_enabled(0) || !rte_lcore_is_enabled(1))
 		return TEST_SKIPPED;
 
-	/* --master-lcore flag but no value */
+	/* --main-lcore flag but no value */
 	const char *argv1[] = { prgname, prefix, mp_flag,
-				"-c", "3", "--master-lcore"};
-	/* --master-lcore flag with invalid value */
+				"-c", "3", "--main-lcore"};
+	/* --main-lcore flag with invalid value */
 	const char *argv2[] = { prgname, prefix, mp_flag,
-				"-c", "3", "--master-lcore", "-1"};
+				"-c", "3", "--main-lcore", "-1"};
 	const char *argv3[] = { prgname, prefix, mp_flag,
-				"-c", "3", "--master-lcore", "X"};
-	/* master lcore not in coremask */
+				"-c", "3", "--main-lcore", "X"};
+	/* main lcore not in coremask */
 	const char *argv4[] = { prgname, prefix, mp_flag,
-				"-c", "3", "--master-lcore", "2"};
+				"-c", "3", "--main-lcore", "2"};
 	/* valid value */
 	const char *argv5[] = { prgname, prefix, mp_flag,
-				"-c", "3", "--master-lcore", "1"};
+				"-c", "3", "--main-lcore", "1"};
 	/* valid value set before coremask */
 	const char *argv6[] = { prgname, prefix, mp_flag,
-				"--master-lcore", "1", "-c", "3"};
+				"--main-lcore", "1", "-c", "3"};
 
 	if (launch_proc(argv1) == 0
 			|| launch_proc(argv2) == 0
 			|| launch_proc(argv3) == 0
 			|| launch_proc(argv4) == 0) {
-		printf("Error - process ran without error with wrong --master-lcore\n");
+		printf("Error - process ran without error with wrong --main-lcore\n");
 		return -1;
 	}
 	if (launch_proc(argv5) != 0
 			|| launch_proc(argv6) != 0) {
-		printf("Error - process did not run ok with valid --master-lcore\n");
+		printf("Error - process did not run ok with valid --main-lcore\n");
 		return -1;
 	}
 	return 0;
@@ -1468,9 +1468,9 @@ test_eal_flags(void)
 		return ret;
 	}
 
-	ret = test_master_lcore_flag();
+	ret = test_main_lcore_flag();
 	if (ret < 0) {
-		printf("Error in test_master_lcore_flag()\n");
+		printf("Error in test_main_lcore_flag()\n");
 		return ret;
 	}
 
@@ -1542,7 +1542,7 @@ REGISTER_TEST_COMMAND(eal_flags_autotest, test_eal_flags);
 
 /* subtests used in meson for CI */
 REGISTER_TEST_COMMAND(eal_flags_c_opt_autotest, test_missing_c_flag);
-REGISTER_TEST_COMMAND(eal_flags_master_opt_autotest, test_master_lcore_flag);
+REGISTER_TEST_COMMAND(eal_flags_main_opt_autotest, test_main_lcore_flag);
 REGISTER_TEST_COMMAND(eal_flags_n_opt_autotest, test_invalid_n_flag);
 REGISTER_TEST_COMMAND(eal_flags_hpet_autotest, test_no_hpet_flag);
 REGISTER_TEST_COMMAND(eal_flags_no_huge_autotest, test_no_huge_flag);
diff --git a/app/test/test_efd.c b/app/test/test_efd.c
index e9df46967d61..180dc4748ed0 100644
--- a/app/test/test_efd.c
+++ b/app/test/test_efd.c
@@ -95,7 +95,7 @@ static inline uint8_t efd_get_all_sockets_bitmask(void)
 {
 	uint8_t all_cpu_sockets_bitmask = 0;
 	unsigned int i;
-	unsigned int next_lcore = rte_get_master_lcore();
+	unsigned int next_lcore = rte_get_main_lcore();
 	const int val_true = 1, val_false = 0;
 	for (i = 0; i < rte_lcore_count(); i++) {
 		all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
diff --git a/app/test/test_efd_perf.c b/app/test/test_efd_perf.c
index d47622d5cafd..1c4770447526 100644
--- a/app/test/test_efd_perf.c
+++ b/app/test/test_efd_perf.c
@@ -33,7 +33,7 @@ static inline uint8_t efd_get_all_sockets_bitmask(void)
 {
 	uint8_t all_cpu_sockets_bitmask = 0;
 	unsigned int i;
-	unsigned int next_lcore = rte_get_master_lcore();
+	unsigned int next_lcore = rte_get_main_lcore();
 	const int val_true = 1, val_false = 0;
 	for (i = 0; i < rte_lcore_count(); i++) {
 		all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c
index f5ddd03d7670..f407d82d5de7 100644
--- a/app/test/test_func_reentrancy.c
+++ b/app/test/test_func_reentrancy.c
@@ -57,8 +57,8 @@ typedef void (*case_clean_t)(unsigned lcore_id);
 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
 
-#define WAIT_SYNCHRO_FOR_SLAVES()   do{ \
-	if (lcore_self != rte_get_master_lcore())                  \
+#define WAIT_SYNCHRO_FOR_WORKERS()   do { \
+	if (lcore_self != rte_get_main_lcore())                  \
 		while (rte_atomic32_read(&synchro) == 0);        \
 } while(0)
 
@@ -70,7 +70,7 @@ test_eal_init_once(__rte_unused void *arg)
 {
 	unsigned lcore_self =  rte_lcore_id();
 
-	WAIT_SYNCHRO_FOR_SLAVES();
+	WAIT_SYNCHRO_FOR_WORKERS();
 
 	rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
 	if (rte_eal_init(0, NULL) != -1)
@@ -106,7 +106,7 @@ ring_create_lookup(__rte_unused void *arg)
 	char ring_name[MAX_STRING_SIZE];
 	int i;
 
-	WAIT_SYNCHRO_FOR_SLAVES();
+	WAIT_SYNCHRO_FOR_WORKERS();
 
 	/* create the same ring simultaneously on all threads */
 	for (i = 0; i < MAX_ITER_ONCE; i++) {
@@ -166,7 +166,7 @@ mempool_create_lookup(__rte_unused void *arg)
 	char mempool_name[MAX_STRING_SIZE];
 	int i;
 
-	WAIT_SYNCHRO_FOR_SLAVES();
+	WAIT_SYNCHRO_FOR_WORKERS();
 
 	/* create the same mempool simultaneously on all threads */
 	for (i = 0; i < MAX_ITER_ONCE; i++) {
@@ -232,7 +232,7 @@ hash_create_free(__rte_unused void *arg)
 		.socket_id = 0,
 	};
 
-	WAIT_SYNCHRO_FOR_SLAVES();
+	WAIT_SYNCHRO_FOR_WORKERS();
 
 	/* create the same hash simultaneously on all threads */
 	hash_params.name = "fr_test_once";
@@ -296,7 +296,7 @@ fbk_create_free(__rte_unused void *arg)
 		.init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
 	};
 
-	WAIT_SYNCHRO_FOR_SLAVES();
+	WAIT_SYNCHRO_FOR_WORKERS();
 
 	/* create the same fbk hash table simultaneously on all threads */
 	fbk_params.name = "fr_test_once";
@@ -359,7 +359,7 @@ lpm_create_free(__rte_unused void *arg)
 	char lpm_name[MAX_STRING_SIZE];
 	int i;
 
-	WAIT_SYNCHRO_FOR_SLAVES();
+	WAIT_SYNCHRO_FOR_WORKERS();
 
 	/* create the same lpm simultaneously on all threads */
 	for (i = 0; i < MAX_ITER_ONCE; i++) {
@@ -430,7 +430,7 @@ launch_test(struct test_case *pt_case)
 	rte_atomic32_set(&obj_count, 0);
 	rte_atomic32_set(&synchro, 0);
 
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (cores == 1)
 			break;
 		cores--;
@@ -443,7 +443,7 @@ launch_test(struct test_case *pt_case)
 		ret = -1;
 
 	cores = cores_save;
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (cores == 1)
 			break;
 		cores--;
diff --git a/app/test/test_hash_multiwriter.c b/app/test/test_hash_multiwriter.c
index 46ab45f82a99..afa3c7b93d85 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/app/test/test_hash_multiwriter.c
@@ -64,7 +64,7 @@ test_hash_multiwriter_worker(void *arg)
 
 	/*
 	 * Calculate offset for entries based on the position of the
-	 * logical core, from the master core (not counting not enabled cores)
+	 * logical core, from the main core (not counting not enabled cores)
 	 */
 	offset = pos_core * tbl_multiwriter_test_params.nb_tsx_insertion;
 
@@ -194,7 +194,7 @@ test_hash_multiwriter(void)
 
 	/* Fire all threads. */
 	rte_eal_mp_remote_launch(test_hash_multiwriter_worker,
-				 enabled_core_ids, CALL_MASTER);
+				 enabled_core_ids, CALL_MAIN);
 	rte_eal_mp_wait_lcore();
 
 	count = rte_hash_count(handle);
diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 73333dff1392..4860768a6491 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -25,7 +25,7 @@
 #define NUM_TEST 3
 unsigned int core_cnt[NUM_TEST] = {2, 4, 8};
 
-unsigned int slave_core_ids[RTE_MAX_LCORE];
+unsigned int worker_core_ids[RTE_MAX_LCORE];
 struct perf {
 	uint32_t single_read;
 	uint32_t single_write;
@@ -65,7 +65,7 @@ test_hash_readwrite_worker(__rte_unused void *arg)
 	ret = rte_malloc(NULL, sizeof(int) *
 				tbl_rw_test_param.num_insert, 0);
 	for (i = 0; i < rte_lcore_count(); i++) {
-		if (slave_core_ids[i] == lcore_id)
+		if (worker_core_ids[i] == lcore_id)
 			break;
 	}
 	offset = tbl_rw_test_param.num_insert * i;
@@ -206,7 +206,7 @@ test_hash_readwrite_functional(int use_htm, int use_rw_lf, int use_ext)
 	uint32_t duplicated_keys = 0;
 	uint32_t lost_keys = 0;
 	int use_jhash = 1;
-	int slave_cnt = rte_lcore_count() - 1;
+	int worker_cnt = rte_lcore_count() - 1;
 	uint32_t tot_insert = 0;
 
 	rte_atomic64_init(&gcycles);
@@ -224,11 +224,10 @@ test_hash_readwrite_functional(int use_htm, int use_rw_lf, int use_ext)
 		tot_insert = TOTAL_INSERT;
 
 	tbl_rw_test_param.num_insert =
-		tot_insert / slave_cnt;
+		tot_insert / worker_cnt;
 
 	tbl_rw_test_param.rounded_tot_insert =
-		tbl_rw_test_param.num_insert
-		* slave_cnt;
+		tbl_rw_test_param.num_insert * worker_cnt;
 
 	printf("\nHTM = %d, RW-LF = %d, EXT-Table = %d\n",
 		use_htm, use_rw_lf, use_ext);
@@ -236,7 +235,7 @@ test_hash_readwrite_functional(int use_htm, int use_rw_lf, int use_ext)
 
 	/* Fire all threads. */
 	rte_eal_mp_remote_launch(test_hash_readwrite_worker,
-				 NULL, SKIP_MASTER);
+				 NULL, SKIP_MAIN);
 	rte_eal_mp_wait_lcore();
 
 	while (rte_hash_iterate(tbl_rw_test_param.h, &next_key,
@@ -330,7 +329,7 @@ test_rw_writer(void *arg)
 	uint64_t offset;
 
 	for (i = 0; i < rte_lcore_count(); i++) {
-		if (slave_core_ids[i] == lcore_id)
+		if (worker_core_ids[i] == lcore_id)
 			break;
 	}
 
@@ -433,8 +432,8 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 	perf_results->single_read = end / i;
 
 	for (n = 0; n < NUM_TEST; n++) {
-		unsigned int tot_slave_lcore = rte_lcore_count() - 1;
-		if (tot_slave_lcore < core_cnt[n] * 2)
+		unsigned int tot_worker_lcore = rte_lcore_count() - 1;
+		if (tot_worker_lcore < core_cnt[n] * 2)
 			goto finish;
 
 		rte_atomic64_clear(&greads);
@@ -467,7 +466,7 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 		for (i = 0; i < core_cnt[n]; i++)
 			rte_eal_remote_launch(test_rw_reader,
 					(void *)(uintptr_t)read_cnt,
-					slave_core_ids[i]);
+					worker_core_ids[i]);
 
 		rte_eal_mp_wait_lcore();
 
@@ -476,7 +475,7 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 		for (; i < core_cnt[n] * 2; i++)
 			rte_eal_remote_launch(test_rw_writer,
 					(void *)((uintptr_t)start_coreid),
-					slave_core_ids[i]);
+					worker_core_ids[i]);
 
 		rte_eal_mp_wait_lcore();
 
@@ -521,20 +520,20 @@ test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
 			for (i = core_cnt[n]; i < core_cnt[n] * 2; i++)
 				rte_eal_remote_launch(test_rw_writer,
 					(void *)((uintptr_t)start_coreid),
-					slave_core_ids[i]);
+					worker_core_ids[i]);
 			for (i = 0; i < core_cnt[n]; i++)
 				rte_eal_remote_launch(test_rw_reader,
 					(void *)(uintptr_t)read_cnt,
-					slave_core_ids[i]);
+					worker_core_ids[i]);
 		} else {
 			for (i = 0; i < core_cnt[n]; i++)
 				rte_eal_remote_launch(test_rw_reader,
 					(void *)(uintptr_t)read_cnt,
-					slave_core_ids[i]);
+					worker_core_ids[i]);
 			for (; i < core_cnt[n] * 2; i++)
 				rte_eal_remote_launch(test_rw_writer,
 					(void *)((uintptr_t)start_coreid),
-					slave_core_ids[i]);
+					worker_core_ids[i]);
 		}
 
 		rte_eal_mp_wait_lcore();
@@ -626,8 +625,8 @@ test_hash_rw_perf_main(void)
 		return TEST_SKIPPED;
 	}
 
-	RTE_LCORE_FOREACH_SLAVE(core_id) {
-		slave_core_ids[i] = core_id;
+	RTE_LCORE_FOREACH_WORKER(core_id) {
+		worker_core_ids[i] = core_id;
 		i++;
 	}
 
@@ -710,8 +709,8 @@ test_hash_rw_func_main(void)
 		return TEST_SKIPPED;
 	}
 
-	RTE_LCORE_FOREACH_SLAVE(core_id) {
-		slave_core_ids[i] = core_id;
+	RTE_LCORE_FOREACH_WORKER(core_id) {
+		worker_core_ids[i] = core_id;
 		i++;
 	}
 
diff --git a/app/test/test_kni.c b/app/test/test_kni.c
index e47ab36e0231..d4e31e4ae13d 100644
--- a/app/test/test_kni.c
+++ b/app/test/test_kni.c
@@ -85,7 +85,7 @@ static struct rte_kni_ops kni_ops = {
 	.config_promiscusity = NULL,
 };
 
-static unsigned lcore_master, lcore_ingress, lcore_egress;
+static unsigned int lcore_main, lcore_ingress, lcore_egress;
 static struct rte_kni *test_kni_ctx;
 static struct test_kni_stats stats;
 
@@ -202,7 +202,7 @@ test_kni_link_change(void)
  * supported by KNI kernel module. The ingress lcore will allocate mbufs and
  * transmit them to kernel space; while the egress lcore will receive the mbufs
  * from kernel space and free them.
- * On the master lcore, several commands will be run to check handling the
+ * On the main lcore, several commands will be run to check handling the
  * kernel requests. And it will finally set the flag to exit the KNI
  * transmitting/receiving to/from the kernel space.
  *
@@ -217,7 +217,7 @@ test_kni_loop(__rte_unused void *arg)
 	const unsigned lcore_id = rte_lcore_id();
 	struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
 
-	if (lcore_id == lcore_master) {
+	if (lcore_id == lcore_main) {
 		rte_delay_ms(KNI_TIMEOUT_MS);
 		/* tests of handling kernel request */
 		if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
@@ -276,12 +276,12 @@ test_kni_allocate_lcores(void)
 {
 	unsigned i, count = 0;
 
-	lcore_master = rte_get_master_lcore();
-	printf("master lcore: %u\n", lcore_master);
+	lcore_main = rte_get_main_lcore();
+	printf("main lcore: %u\n", lcore_main);
 	for (i = 0; i < RTE_MAX_LCORE; i++) {
 		if (count >=2 )
 			break;
-		if (rte_lcore_is_enabled(i) && i != lcore_master) {
+		if (rte_lcore_is_enabled(i) && i != lcore_main) {
 			count ++;
 			if (count == 1)
 				lcore_ingress = i;
@@ -487,8 +487,8 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
 	if (ret != 0)
 		goto fail_kni;
 
-	rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MASTER);
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MAIN);
+	RTE_LCORE_FOREACH_WORKER(i) {
 		if (rte_eal_wait_lcore(i) < 0) {
 			ret = -1;
 			goto fail_kni;
diff --git a/app/test/test_lpm_perf.c b/app/test/test_lpm_perf.c
index 58076795ec3a..c5a238b9d1e8 100644
--- a/app/test/test_lpm_perf.c
+++ b/app/test/test_lpm_perf.c
@@ -498,7 +498,7 @@ test_lpm_rcu_perf_multi_writer(void)
 	}
 
 	num_cores = 0;
-	RTE_LCORE_FOREACH_SLAVE(core_id) {
+	RTE_LCORE_FOREACH_WORKER(core_id) {
 		enabled_core_ids[num_cores] = core_id;
 		num_cores++;
 	}
@@ -651,7 +651,7 @@ test_lpm_rcu_perf(void)
 	}
 
 	num_cores = 0;
-	RTE_LCORE_FOREACH_SLAVE(core_id) {
+	RTE_LCORE_FOREACH_WORKER(core_id) {
 		enabled_core_ids[num_cores] = core_id;
 		num_cores++;
 	}
diff --git a/app/test/test_malloc.c b/app/test/test_malloc.c
index 71b3cfdde5cf..758e6194a852 100644
--- a/app/test/test_malloc.c
+++ b/app/test/test_malloc.c
@@ -1007,11 +1007,11 @@ test_malloc(void)
 	else printf("test_realloc() passed\n");
 
 	/*----------------------------*/
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		rte_eal_remote_launch(test_align_overlap_per_lcore, NULL, lcore_id);
 	}
 
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (rte_eal_wait_lcore(lcore_id) < 0)
 			ret = -1;
 	}
@@ -1022,11 +1022,11 @@ test_malloc(void)
 	else printf("test_align_overlap_per_lcore() passed\n");
 
 	/*----------------------------*/
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		rte_eal_remote_launch(test_reordered_free_per_lcore, NULL, lcore_id);
 	}
 
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (rte_eal_wait_lcore(lcore_id) < 0)
 			ret = -1;
 	}
@@ -1037,11 +1037,11 @@ test_malloc(void)
 	else printf("test_reordered_free_per_lcore() passed\n");
 
 	/*----------------------------*/
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		rte_eal_remote_launch(test_random_alloc_free, NULL, lcore_id);
 	}
 
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (rte_eal_wait_lcore(lcore_id) < 0)
 			ret = -1;
 	}
diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index 06e44f0a79f8..80d1850da9d1 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -72,7 +72,7 @@
 
 #ifdef RTE_MBUF_REFCNT_ATOMIC
 
-static volatile uint32_t refcnt_stop_slaves;
+static volatile uint32_t refcnt_stop_workers;
 static unsigned refcnt_lcore[RTE_MAX_LCORE];
 
 #endif
@@ -1000,7 +1000,7 @@ test_pktmbuf_free_segment(struct rte_mempool *pktmbuf_pool)
 #ifdef RTE_MBUF_REFCNT_ATOMIC
 
 static int
-test_refcnt_slave(void *arg)
+test_refcnt_worker(void *arg)
 {
 	unsigned lcore, free;
 	void *mp = 0;
@@ -1010,7 +1010,7 @@ test_refcnt_slave(void *arg)
 	printf("%s started at lcore %u\n", __func__, lcore);
 
 	free = 0;
-	while (refcnt_stop_slaves == 0) {
+	while (refcnt_stop_workers == 0) {
 		if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
 			free++;
 			rte_pktmbuf_free(mp);
@@ -1038,7 +1038,7 @@ test_refcnt_iter(unsigned int lcore, unsigned int iter,
 	/* For each mbuf in the pool:
 	 * - allocate mbuf,
 	 * - increment it's reference up to N+1,
-	 * - enqueue it N times into the ring for slave cores to free.
+	 * - enqueue it N times into the ring for worker cores to free.
 	 */
 	for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
 	    i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
@@ -1062,7 +1062,7 @@ test_refcnt_iter(unsigned int lcore, unsigned int iter,
 		rte_panic("(lcore=%u, iter=%u): was able to allocate only "
 		          "%u from %u mbufs\n", lcore, iter, i, n);
 
-	/* wait till slave lcores  will consume all mbufs */
+	/* wait till worker lcores  will consume all mbufs */
 	while (!rte_ring_empty(refcnt_mbuf_ring))
 		;
 
@@ -1083,7 +1083,7 @@ test_refcnt_iter(unsigned int lcore, unsigned int iter,
 }
 
 static int
-test_refcnt_master(struct rte_mempool *refcnt_pool,
+test_refcnt_main(struct rte_mempool *refcnt_pool,
 		   struct rte_ring *refcnt_mbuf_ring)
 {
 	unsigned i, lcore;
@@ -1094,7 +1094,7 @@ test_refcnt_master(struct rte_mempool *refcnt_pool,
 	for (i = 0; i != REFCNT_MAX_ITER; i++)
 		test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
 
-	refcnt_stop_slaves = 1;
+	refcnt_stop_workers = 1;
 	rte_wmb();
 
 	printf("%s finished at lcore %u\n", __func__, lcore);
@@ -1107,7 +1107,7 @@ static int
 test_refcnt_mbuf(void)
 {
 #ifdef RTE_MBUF_REFCNT_ATOMIC
-	unsigned int master, slave, tref;
+	unsigned int main_lcore, worker, tref;
 	int ret = -1;
 	struct rte_mempool *refcnt_pool = NULL;
 	struct rte_ring *refcnt_mbuf_ring = NULL;
@@ -1126,39 +1126,38 @@ test_refcnt_mbuf(void)
 					      SOCKET_ID_ANY);
 	if (refcnt_pool == NULL) {
 		printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
-		    __func__);
+		       __func__);
 		return -1;
 	}
 
 	refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
-			rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
-					RING_F_SP_ENQ);
+					   rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
+					   RING_F_SP_ENQ);
 	if (refcnt_mbuf_ring == NULL) {
 		printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
-		    "\n", __func__);
+		       "\n", __func__);
 		goto err;
 	}
 
-	refcnt_stop_slaves = 0;
+	refcnt_stop_workers = 0;
 	memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
 
-	rte_eal_mp_remote_launch(test_refcnt_slave, refcnt_mbuf_ring,
-				 SKIP_MASTER);
+	rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
 
-	test_refcnt_master(refcnt_pool, refcnt_mbuf_ring);
+	test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
 
 	rte_eal_mp_wait_lcore();
 
 	/* check that we porcessed all references */
 	tref = 0;
-	master = rte_get_master_lcore();
+	main_lcore = rte_get_main_lcore();
 
-	RTE_LCORE_FOREACH_SLAVE(slave)
-		tref += refcnt_lcore[slave];
+	RTE_LCORE_FOREACH_WORKER(worker)
+		tref += refcnt_lcore[worker];
 
-	if (tref != refcnt_lcore[master])
+	if (tref != refcnt_lcore[main_lcore])
 		rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
-		          tref, refcnt_lcore[master]);
+			  tref, refcnt_lcore[main_lcore]);
 
 	rte_mempool_dump(stdout, refcnt_pool);
 	rte_ring_dump(stdout, refcnt_mbuf_ring);
diff --git a/app/test/test_mcslock.c b/app/test/test_mcslock.c
index ddccaafa9242..fbca78707d2e 100644
--- a/app/test/test_mcslock.c
+++ b/app/test/test_mcslock.c
@@ -28,7 +28,7 @@
  * These tests are derived from spin lock test cases.
  *
  * - The functional test takes all of these locks and launches the
- *   ''test_mcslock_per_core()'' function on each core (except the master).
+ *   ''test_mcslock_per_core()'' function on each core (except the main).
  *
  *   - The function takes the global lock, display something, then releases
  *     the global lock on each core.
@@ -123,9 +123,9 @@ test_mcslock_perf(void)
 	printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
 
 	rte_atomic32_set(&synchro, 0);
-	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
+	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
 
-	/* start synchro and launch test on master */
+	/* start synchro and launch test on main */
 	rte_atomic32_set(&synchro, 1);
 	load_loop_fn(&lock);
 
@@ -154,8 +154,8 @@ test_mcslock_try(__rte_unused void *arg)
 	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
 	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
 
-	/* Locked ml_try in the master lcore, so it should fail
-	 * when trying to lock it in the slave lcore.
+	/* Locked ml_try in the main lcore, so it should fail
+	 * when trying to lock it in the worker lcore.
 	 */
 	if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0) {
 		rte_mcslock_lock(&p_ml, &ml_me);
@@ -185,20 +185,20 @@ test_mcslock(void)
 	 * Test mcs lock & unlock on each core
 	 */
 
-	/* slave cores should be waiting: print it */
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	/* worker cores should be waiting: print it */
+	RTE_LCORE_FOREACH_WORKER(i) {
 		printf("lcore %d state: %d\n", i,
 				(int) rte_eal_get_lcore_state(i));
 	}
 
 	rte_mcslock_lock(&p_ml, &ml_me);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_eal_remote_launch(test_mcslock_per_core, NULL, i);
 	}
 
-	/* slave cores should be busy: print it */
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	/* worker cores should be busy: print it */
+	RTE_LCORE_FOREACH_WORKER(i) {
 		printf("lcore %d state: %d\n", i,
 				(int) rte_eal_get_lcore_state(i));
 	}
@@ -210,19 +210,19 @@ test_mcslock(void)
 	/*
 	 * Test if it could return immediately from try-locking a locked object.
 	 * Here it will lock the mcs lock object first, then launch all the
-	 * slave lcores to trylock the same mcs lock object.
-	 * All the slave lcores should give up try-locking a locked object and
+	 * worker lcores to trylock the same mcs lock object.
+	 * All the worker lcores should give up try-locking a locked object and
 	 * return immediately, and then increase the "count" initialized with
 	 * zero by one per times.
 	 * We can check if the "count" is finally equal to the number of all
-	 * slave lcores to see if the behavior of try-locking a locked
+	 * worker lcores to see if the behavior of try-locking a locked
 	 * mcslock object is correct.
 	 */
 	if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0)
 		return -1;
 
 	count = 0;
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_eal_remote_launch(test_mcslock_try, NULL, i);
 	}
 	rte_eal_mp_wait_lcore();
diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c
index 60bda8aadbe8..d7d0aaa3340c 100644
--- a/app/test/test_mempool_perf.c
+++ b/app/test/test_mempool_perf.c
@@ -143,8 +143,8 @@ per_lcore_mempool_test(void *arg)
 
 	stats[lcore_id].enq_count = 0;
 
-	/* wait synchro for slaves */
-	if (lcore_id != rte_get_master_lcore())
+	/* wait synchro for workers */
+	if (lcore_id != rte_get_main_lcore())
 		while (rte_atomic32_read(&synchro) == 0);
 
 	start_cycles = rte_get_timer_cycles();
@@ -214,7 +214,7 @@ launch_cores(struct rte_mempool *mp, unsigned int cores)
 		return -1;
 	}
 
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (cores == 1)
 			break;
 		cores--;
@@ -222,13 +222,13 @@ launch_cores(struct rte_mempool *mp, unsigned int cores)
 				      mp, lcore_id);
 	}
 
-	/* start synchro and launch test on master */
+	/* start synchro and launch test on main */
 	rte_atomic32_set(&synchro, 1);
 
 	ret = per_lcore_mempool_test(mp);
 
 	cores = cores_save;
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (cores == 1)
 			break;
 		cores--;
diff --git a/app/test/test_mp_secondary.c b/app/test/test_mp_secondary.c
index 3a655c340041..4fa0dfe9c0b8 100644
--- a/app/test/test_mp_secondary.c
+++ b/app/test/test_mp_secondary.c
@@ -94,7 +94,7 @@ run_secondary_instances(void)
 #endif
 
 	snprintf(coremask, sizeof(coremask), "%x", \
-			(1 << rte_get_master_lcore()));
+			(1 << rte_get_main_lcore()));
 
 	ret |= launch_proc(argv1);
 	printf("### Testing rte_mp_disable() reject:\n");
diff --git a/app/test/test_pdump.c b/app/test/test_pdump.c
index 6a1180bcb78e..21fcc1bc4cba 100644
--- a/app/test/test_pdump.c
+++ b/app/test/test_pdump.c
@@ -184,7 +184,7 @@ run_pdump_server_tests(void)
 	};
 
 	snprintf(coremask, sizeof(coremask), "%x",
-		 (1 << rte_get_master_lcore()));
+		 (1 << rte_get_main_lcore()));
 
 	ret = test_pdump_init();
 	ret |= launch_p(argv1);
diff --git a/app/test/test_per_lcore.c b/app/test/test_per_lcore.c
index fcd00212f1eb..129578d1a30f 100644
--- a/app/test/test_per_lcore.c
+++ b/app/test/test_per_lcore.c
@@ -73,31 +73,31 @@ test_per_lcore(void)
 	unsigned lcore_id;
 	int ret;
 
-	rte_eal_mp_remote_launch(assign_vars, NULL, SKIP_MASTER);
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	rte_eal_mp_remote_launch(assign_vars, NULL, SKIP_MAIN);
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (rte_eal_wait_lcore(lcore_id) < 0)
 			return -1;
 	}
 
-	rte_eal_mp_remote_launch(display_vars, NULL, SKIP_MASTER);
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	rte_eal_mp_remote_launch(display_vars, NULL, SKIP_MAIN);
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (rte_eal_wait_lcore(lcore_id) < 0)
 			return -1;
 	}
 
 	/* test if it could do remote launch twice at the same time or not */
-	ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER);
+	ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MAIN);
 	if (ret < 0) {
 		printf("It fails to do remote launch but it should able to do\n");
 		return -1;
 	}
 	/* it should not be able to launch a lcore which is running */
-	ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER);
+	ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MAIN);
 	if (ret == 0) {
 		printf("It does remote launch successfully but it should not at this time\n");
 		return -1;
 	}
-	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+	RTE_LCORE_FOREACH_WORKER(lcore_id) {
 		if (rte_eal_wait_lcore(lcore_id) < 0)
 			return -1;
 	}
diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c
index d1240b76f955..890c2d7a3055 100644
--- a/app/test/test_pmd_perf.c
+++ b/app/test/test_pmd_perf.c
@@ -275,7 +275,7 @@ alloc_lcore(uint16_t socketid)
 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
 		if (LCORE_AVAIL != lcore_conf[lcore_id].status ||
 		    lcore_conf[lcore_id].socketid != socketid ||
-		    lcore_id == rte_get_master_lcore())
+		    lcore_id == rte_get_main_lcore())
 			continue;
 		lcore_conf[lcore_id].status = LCORE_USED;
 		lcore_conf[lcore_id].nb_ports = 0;
@@ -661,7 +661,7 @@ exec_burst(uint32_t flags, int lcore)
 static int
 test_pmd_perf(void)
 {
-	uint16_t nb_ports, num, nb_lcores, slave_id = (uint16_t)-1;
+	uint16_t nb_ports, num, nb_lcores, worker_id = (uint16_t)-1;
 	uint16_t nb_rxd = MAX_TRAFFIC_BURST;
 	uint16_t nb_txd = MAX_TRAFFIC_BURST;
 	uint16_t portid;
@@ -699,13 +699,13 @@ test_pmd_perf(void)
 	RTE_ETH_FOREACH_DEV(portid) {
 		if (socketid == -1) {
 			socketid = rte_eth_dev_socket_id(portid);
-			slave_id = alloc_lcore(socketid);
-			if (slave_id == (uint16_t)-1) {
+			worker_id = alloc_lcore(socketid);
+			if (worker_id == (uint16_t)-1) {
 				printf("No avail lcore to run test\n");
 				return -1;
 			}
 			printf("Performance test runs on lcore %u socket %u\n",
-			       slave_id, socketid);
+			       worker_id, socketid);
 		}
 
 		if (socketid != rte_eth_dev_socket_id(portid)) {
@@ -762,8 +762,8 @@ test_pmd_perf(void)
 				 "rte_eth_promiscuous_enable: err=%s, port=%d\n",
 				 rte_strerror(-ret), portid);
 
-		lcore_conf[slave_id].portlist[num++] = portid;
-		lcore_conf[slave_id].nb_ports++;
+		lcore_conf[worker_id].portlist[num++] = portid;
+		lcore_conf[worker_id].nb_ports++;
 	}
 	check_all_ports_link_status(nb_ports, RTE_PORT_ALL);
 
@@ -788,13 +788,13 @@ test_pmd_perf(void)
 		if (NULL == do_measure)
 			do_measure = measure_rxtx;
 
-		rte_eal_remote_launch(main_loop, NULL, slave_id);
+		rte_eal_remote_launch(main_loop, NULL, worker_id);
 
-		if (rte_eal_wait_lcore(slave_id) < 0)
+		if (rte_eal_wait_lcore(worker_id) < 0)
 			return -1;
 	} else if (sc_flag == SC_BURST_POLL_FIRST ||
 		   sc_flag == SC_BURST_XMIT_FIRST)
-		if (exec_burst(sc_flag, slave_id) < 0)
+		if (exec_burst(sc_flag, worker_id) < 0)
 			return -1;
 
 	/* port tear down */
diff --git a/app/test/test_rcu_qsbr.c b/app/test/test_rcu_qsbr.c
index 0a9e5ecd1a44..7ae66e4dfb76 100644
--- a/app/test/test_rcu_qsbr.c
+++ b/app/test/test_rcu_qsbr.c
@@ -1327,7 +1327,7 @@ test_rcu_qsbr_main(void)
 	}
 
 	num_cores = 0;
-	RTE_LCORE_FOREACH_SLAVE(core_id) {
+	RTE_LCORE_FOREACH_WORKER(core_id) {
 		enabled_core_ids[num_cores] = core_id;
 		num_cores++;
 	}
diff --git a/app/test/test_rcu_qsbr_perf.c b/app/test/test_rcu_qsbr_perf.c
index d35a6d089784..3017e71120ad 100644
--- a/app/test/test_rcu_qsbr_perf.c
+++ b/app/test/test_rcu_qsbr_perf.c
@@ -625,7 +625,7 @@ test_rcu_qsbr_main(void)
 	rte_atomic64_init(&check_cycles);
 
 	num_cores = 0;
-	RTE_LCORE_FOREACH_SLAVE(core_id) {
+	RTE_LCORE_FOREACH_WORKER(core_id) {
 		enabled_core_ids[num_cores] = core_id;
 		num_cores++;
 	}
diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c
index ac9bf5608daa..e63e25a867f0 100644
--- a/app/test/test_ring_perf.c
+++ b/app/test/test_ring_perf.c
@@ -297,7 +297,7 @@ run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r, const int esize)
 		lcore_count = 0;
 		param1.size = param2.size = bulk_sizes[i];
 		param1.r = param2.r = r;
-		if (cores->c1 == rte_get_master_lcore()) {
+		if (cores->c1 == rte_get_main_lcore()) {
 			rte_eal_remote_launch(f2, &param2, cores->c2);
 			f1(&param1);
 			rte_eal_wait_lcore(cores->c2);
@@ -340,8 +340,8 @@ load_loop_fn_helper(struct thread_params *p, const int esize)
 	if (burst == NULL)
 		return -1;
 
-	/* wait synchro for slaves */
-	if (lcore != rte_get_master_lcore())
+	/* wait synchro for workers */
+	if (lcore != rte_get_main_lcore())
 		while (rte_atomic32_read(&synchro) == 0)
 			rte_pause();
 
@@ -397,12 +397,12 @@ run_on_all_cores(struct rte_ring *r, const int esize)
 		param.size = bulk_sizes[i];
 		param.r = r;
 
-		/* clear synchro and start slaves */
+		/* clear synchro and start workers */
 		rte_atomic32_set(&synchro, 0);
-		if (rte_eal_mp_remote_launch(lcore_f, &param, SKIP_MASTER) < 0)
+		if (rte_eal_mp_remote_launch(lcore_f, &param, SKIP_MAIN) < 0)
 			return -1;
 
-		/* start synchro and launch test on master */
+		/* start synchro and launch test on main */
 		rte_atomic32_set(&synchro, 1);
 		lcore_f(&param);
 
@@ -553,7 +553,7 @@ test_ring_perf_esize(const int esize)
 			goto test_fail;
 	}
 
-	printf("\n### Testing using all slave nodes ###\n");
+	printf("\n### Testing using all worker nodes ###\n");
 	if (run_on_all_cores(r, esize) < 0)
 		goto test_fail;
 
diff --git a/app/test/test_ring_stress_impl.h b/app/test/test_ring_stress_impl.h
index 222d62bc4f4d..3b9a480eb9cc 100644
--- a/app/test/test_ring_stress_impl.h
+++ b/app/test/test_ring_stress_impl.h
@@ -6,7 +6,7 @@
 
 /**
  * Stress test for ring enqueue/dequeue operations.
- * Performs the following pattern on each slave worker:
+ * Performs the following pattern on each worker:
  * dequeue/read-write data from the dequeued objects/enqueue.
  * Serves as both functional and performance test of ring
  * enqueue/dequeue operations under high contention
@@ -348,8 +348,8 @@ test_mt1(int (*test)(void *))
 
 	memset(arg, 0, sizeof(arg));
 
-	/* launch on all slaves */
-	RTE_LCORE_FOREACH_SLAVE(lc) {
+	/* launch on all workers */
+	RTE_LCORE_FOREACH_WORKER(lc) {
 		arg[lc].rng = r;
 		arg[lc].stats = init_stat;
 		rte_eal_remote_launch(test, &arg[lc], lc);
@@ -365,12 +365,12 @@ test_mt1(int (*test)(void *))
 	wrk_cmd = WRK_CMD_STOP;
 	rte_smp_wmb();
 
-	/* wait for slaves and collect stats. */
+	/* wait for workers and collect stats. */
 	mc = rte_lcore_id();
 	arg[mc].stats = init_stat;
 
 	rc = 0;
-	RTE_LCORE_FOREACH_SLAVE(lc) {
+	RTE_LCORE_FOREACH_WORKER(lc) {
 		rc |= rte_eal_wait_lcore(lc);
 		lcore_stat_aggr(&arg[mc].stats, &arg[lc].stats);
 		if (verbose != 0)
diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
index 61bee7d7c296..701187f39884 100644
--- a/app/test/test_rwlock.c
+++ b/app/test/test_rwlock.c
@@ -99,8 +99,8 @@ load_loop_fn(__rte_unused void *arg)
 	uint64_t lcount = 0;
 	const unsigned int lcore = rte_lcore_id();
 
-	/* wait synchro for slaves */
-	if (lcore != rte_get_master_lcore())
+	/* wait synchro for workers */
+	if (lcore != rte_get_main_lcore())
 		while (rte_atomic32_read(&synchro) == 0)
 			;
 
@@ -134,12 +134,12 @@ test_rwlock_perf(void)
 
 	printf("\nRwlock Perf Test on %u cores...\n", rte_lcore_count());
 
-	/* clear synchro and start slaves */
+	/* clear synchro and start workers */
 	rte_atomic32_set(&synchro, 0);
-	if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MASTER) < 0)
+	if (rte_eal_mp_remote_launch(load_loop_fn, NULL, SKIP_MAIN) < 0)
 		return -1;
 
-	/* start synchro and launch test on master */
+	/* start synchro and launch test on main */
 	rte_atomic32_set(&synchro, 1);
 	load_loop_fn(NULL);
 
@@ -161,7 +161,7 @@ test_rwlock_perf(void)
  * - There is a global rwlock and a table of rwlocks (one per lcore).
  *
  * - The test function takes all of these locks and launches the
- *   ``test_rwlock_per_core()`` function on each core (except the master).
+ *   ``test_rwlock_per_core()`` function on each core (except the main).
  *
  *   - The function takes the global write lock, display something,
  *     then releases the global lock.
@@ -187,21 +187,21 @@ rwlock_test1(void)
 
 	rte_rwlock_write_lock(&sl);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_rwlock_write_lock(&sl_tab[i]);
 		rte_eal_remote_launch(test_rwlock_per_core, NULL, i);
 	}
 
 	rte_rwlock_write_unlock(&sl);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_rwlock_write_unlock(&sl_tab[i]);
 		rte_delay_ms(100);
 	}
 
 	rte_rwlock_write_lock(&sl);
 	/* this message should be the last message of test */
-	printf("Global write lock taken on master core %u\n", rte_lcore_id());
+	printf("Global write lock taken on main core %u\n", rte_lcore_id());
 	rte_rwlock_write_unlock(&sl);
 
 	rte_eal_mp_wait_lcore();
@@ -462,26 +462,26 @@ try_rwlock_test_rda(void)
 	try_test_reset();
 
 	/* start read test on all avaialble lcores */
-	rte_eal_mp_remote_launch(try_read_lcore, NULL, CALL_MASTER);
+	rte_eal_mp_remote_launch(try_read_lcore, NULL, CALL_MAIN);
 	rte_eal_mp_wait_lcore();
 
 	return process_try_lcore_stats();
 }
 
-/* all slave lcores grab RDLOCK, master one grabs WRLOCK */
+/* all worker lcores grab RDLOCK, main one grabs WRLOCK */
 static int
 try_rwlock_test_rds_wrm(void)
 {
 	try_test_reset();
 
-	rte_eal_mp_remote_launch(try_read_lcore, NULL, SKIP_MASTER);
+	rte_eal_mp_remote_launch(try_read_lcore, NULL, SKIP_MAIN);
 	try_write_lcore(NULL);
 	rte_eal_mp_wait_lcore();
 
 	return process_try_lcore_stats();
 }
 
-/* master and even slave lcores grab RDLOCK, odd lcores grab WRLOCK */
+/* main and even worker lcores grab RDLOCK, odd lcores grab WRLOCK */
 static int
 try_rwlock_test_rde_wro(void)
 {
@@ -489,7 +489,7 @@ try_rwlock_test_rde_wro(void)
 
 	try_test_reset();
 
-	mlc = rte_get_master_lcore();
+	mlc = rte_get_main_lcore();
 
 	RTE_LCORE_FOREACH(lc) {
 		if (lc != mlc) {
diff --git a/app/test/test_service_cores.c b/app/test/test_service_cores.c
index 5d92bea8af79..b387b468b75a 100644
--- a/app/test/test_service_cores.c
+++ b/app/test/test_service_cores.c
@@ -30,7 +30,7 @@ static int
 testsuite_setup(void)
 {
 	slcore_id = rte_get_next_lcore(/* start core */ -1,
-				       /* skip master */ 1,
+				       /* skip main */ 1,
 				       /* wrap */ 0);
 
 	return TEST_SUCCESS;
@@ -552,12 +552,12 @@ service_lcore_add_del(void)
 	TEST_ASSERT_EQUAL(1, rte_service_lcore_count(),
 			"Service core count not equal to one");
 	uint32_t slcore_1 = rte_get_next_lcore(/* start core */ -1,
-					       /* skip master */ 1,
+					       /* skip main */ 1,
 					       /* wrap */ 0);
 	TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_1),
 			"Service core add did not return zero");
 	uint32_t slcore_2 = rte_get_next_lcore(/* start core */ slcore_1,
-					       /* skip master */ 1,
+					       /* skip main */ 1,
 					       /* wrap */ 0);
 	TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2),
 			"Service core add did not return zero");
@@ -603,12 +603,12 @@ service_threaded_test(int mt_safe)
 
 	/* add next 2 cores */
 	uint32_t slcore_1 = rte_get_next_lcore(/* start core */ -1,
-					       /* skip master */ 1,
+					       /* skip main */ 1,
 					       /* wrap */ 0);
 	TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_1),
 			"mt safe lcore add fail");
 	uint32_t slcore_2 = rte_get_next_lcore(/* start core */ slcore_1,
-					       /* skip master */ 1,
+					       /* skip main */ 1,
 					       /* wrap */ 0);
 	TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_2),
 			"mt safe lcore add fail");
@@ -942,10 +942,10 @@ service_active_two_cores(void)
 	int i;
 
 	uint32_t lcore = rte_get_next_lcore(/* start core */ -1,
-					    /* skip master */ 1,
+					    /* skip main */ 1,
 					    /* wrap */ 0);
 	uint32_t slcore = rte_get_next_lcore(/* start core */ lcore,
-					     /* skip master */ 1,
+					     /* skip main */ 1,
 					     /* wrap */ 0);
 
 	/* start the service on the second available lcore */
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
index 842990ed3b30..054fb43a9fe5 100644
--- a/app/test/test_spinlock.c
+++ b/app/test/test_spinlock.c
@@ -28,7 +28,7 @@
  * - There is a global spinlock and a table of spinlocks (one per lcore).
  *
  * - The test function takes all of these locks and launches the
- *   ``test_spinlock_per_core()`` function on each core (except the master).
+ *   ``test_spinlock_per_core()`` function on each core (except the main).
  *
  *   - The function takes the global lock, display something, then releases
  *     the global lock.
@@ -109,8 +109,8 @@ load_loop_fn(void *func_param)
 	const int use_lock = *(int*)func_param;
 	const unsigned lcore = rte_lcore_id();
 
-	/* wait synchro for slaves */
-	if (lcore != rte_get_master_lcore())
+	/* wait synchro for workers */
+	if (lcore != rte_get_main_lcore())
 		while (rte_atomic32_read(&synchro) == 0);
 
 	begin = rte_get_timer_cycles();
@@ -149,11 +149,11 @@ test_spinlock_perf(void)
 
 	printf("\nTest with lock on %u cores...\n", rte_lcore_count());
 
-	/* Clear synchro and start slaves */
+	/* Clear synchro and start workers */
 	rte_atomic32_set(&synchro, 0);
-	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
+	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
 
-	/* start synchro and launch test on master */
+	/* start synchro and launch test on main */
 	rte_atomic32_set(&synchro, 1);
 	load_loop_fn(&lock);
 
@@ -200,8 +200,8 @@ test_spinlock(void)
 	int ret = 0;
 	int i;
 
-	/* slave cores should be waiting: print it */
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	/* worker cores should be waiting: print it */
+	RTE_LCORE_FOREACH_WORKER(i) {
 		printf("lcore %d state: %d\n", i,
 		       (int) rte_eal_get_lcore_state(i));
 	}
@@ -214,19 +214,19 @@ test_spinlock(void)
 
 	rte_spinlock_lock(&sl);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_spinlock_lock(&sl_tab[i]);
 		rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
 	}
 
-	/* slave cores should be busy: print it */
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	/* worker cores should be busy: print it */
+	RTE_LCORE_FOREACH_WORKER(i) {
 		printf("lcore %d state: %d\n", i,
 		       (int) rte_eal_get_lcore_state(i));
 	}
 	rte_spinlock_unlock(&sl);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_spinlock_unlock(&sl_tab[i]);
 		rte_delay_ms(10);
 	}
@@ -245,7 +245,7 @@ test_spinlock(void)
 	} else
 		rte_spinlock_recursive_unlock(&slr);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
 	}
 	rte_spinlock_recursive_unlock(&slr);
@@ -253,12 +253,12 @@ test_spinlock(void)
 
 	/*
 	 * Test if it could return immediately from try-locking a locked object.
-	 * Here it will lock the spinlock object first, then launch all the slave
+	 * Here it will lock the spinlock object first, then launch all the worker
 	 * lcores to trylock the same spinlock object.
-	 * All the slave lcores should give up try-locking a locked object and
+	 * All the worker lcores should give up try-locking a locked object and
 	 * return immediately, and then increase the "count" initialized with zero
 	 * by one per times.
-	 * We can check if the "count" is finally equal to the number of all slave
+	 * We can check if the "count" is finally equal to the number of all worker
 	 * lcores to see if the behavior of try-locking a locked spinlock object
 	 * is correct.
 	 */
@@ -266,7 +266,7 @@ test_spinlock(void)
 		return -1;
 	}
 	count = 0;
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_eal_remote_launch(test_spinlock_try, NULL, i);
 	}
 	rte_eal_mp_wait_lcore();
diff --git a/app/test/test_stack.c b/app/test/test_stack.c
index 463460ccc65f..02422a32d64f 100644
--- a/app/test/test_stack.c
+++ b/app/test/test_stack.c
@@ -328,7 +328,7 @@ test_stack_multithreaded(uint32_t flags)
 
 	thread_test_args.s = s;
 
-	if (rte_eal_mp_remote_launch(stack_thread_push_pop, NULL, CALL_MASTER))
+	if (rte_eal_mp_remote_launch(stack_thread_push_pop, NULL, CALL_MAIN))
 		rte_panic("Failed to launch tests\n");
 
 	RTE_LCORE_FOREACH(lcore_id) {
diff --git a/app/test/test_stack_perf.c b/app/test/test_stack_perf.c
index 3ab7267b1b72..3590625c49e7 100644
--- a/app/test/test_stack_perf.c
+++ b/app/test/test_stack_perf.c
@@ -180,7 +180,7 @@ run_on_core_pair(struct lcore_pair *cores, struct rte_stack *s,
 		args[0].sz = args[1].sz = bulk_sizes[i];
 		args[0].s = args[1].s = s;
 
-		if (cores->c1 == rte_get_master_lcore()) {
+		if (cores->c1 == rte_get_main_lcore()) {
 			rte_eal_remote_launch(fn, &args[1], cores->c2);
 			fn(&args[0]);
 			rte_eal_wait_lcore(cores->c2);
@@ -210,7 +210,7 @@ run_on_n_cores(struct rte_stack *s, lcore_function_t fn, int n)
 
 		rte_atomic32_set(&lcore_barrier, n);
 
-		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+		RTE_LCORE_FOREACH_WORKER(lcore_id) {
 			if (++cnt >= n)
 				break;
 
@@ -235,7 +235,7 @@ run_on_n_cores(struct rte_stack *s, lcore_function_t fn, int n)
 		avg = args[rte_lcore_id()].avg;
 
 		cnt = 0;
-		RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+		RTE_LCORE_FOREACH_WORKER(lcore_id) {
 			if (++cnt >= n)
 				break;
 			avg += args[lcore_id].avg;
diff --git a/app/test/test_ticketlock.c b/app/test/test_ticketlock.c
index 66ab3d1a0248..7aab8665bc77 100644
--- a/app/test/test_ticketlock.c
+++ b/app/test/test_ticketlock.c
@@ -28,7 +28,7 @@
  * - There is a global ticketlock and a table of ticketlocks (one per lcore).
  *
  * - The test function takes all of these locks and launches the
- *   ``test_ticketlock_per_core()`` function on each core (except the master).
+ *   ``test_ticketlock_per_core()`` function on each core (except the main).
  *
  *   - The function takes the global lock, display something, then releases
  *     the global lock.
@@ -110,8 +110,8 @@ load_loop_fn(void *func_param)
 	const int use_lock = *(int *)func_param;
 	const unsigned int lcore = rte_lcore_id();
 
-	/* wait synchro for slaves */
-	if (lcore != rte_get_master_lcore())
+	/* wait synchro for workers */
+	if (lcore != rte_get_main_lcore())
 		while (rte_atomic32_read(&synchro) == 0)
 			;
 
@@ -154,11 +154,11 @@ test_ticketlock_perf(void)
 	lcount = 0;
 	printf("\nTest with lock on %u cores...\n", rte_lcore_count());
 
-	/* Clear synchro and start slaves */
+	/* Clear synchro and start workers */
 	rte_atomic32_set(&synchro, 0);
-	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
+	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
 
-	/* start synchro and launch test on master */
+	/* start synchro and launch test on main */
 	rte_atomic32_set(&synchro, 1);
 	load_loop_fn(&lock);
 
@@ -208,8 +208,8 @@ test_ticketlock(void)
 	int ret = 0;
 	int i;
 
-	/* slave cores should be waiting: print it */
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	/* worker cores should be waiting: print it */
+	RTE_LCORE_FOREACH_WORKER(i) {
 		printf("lcore %d state: %d\n", i,
 		       (int) rte_eal_get_lcore_state(i));
 	}
@@ -217,25 +217,25 @@ test_ticketlock(void)
 	rte_ticketlock_init(&tl);
 	rte_ticketlock_init(&tl_try);
 	rte_ticketlock_recursive_init(&tlr);
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_ticketlock_init(&tl_tab[i]);
 	}
 
 	rte_ticketlock_lock(&tl);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_ticketlock_lock(&tl_tab[i]);
 		rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
 	}
 
-	/* slave cores should be busy: print it */
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	/* worker cores should be busy: print it */
+	RTE_LCORE_FOREACH_WORKER(i) {
 		printf("lcore %d state: %d\n", i,
 		       (int) rte_eal_get_lcore_state(i));
 	}
 	rte_ticketlock_unlock(&tl);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_ticketlock_unlock(&tl_tab[i]);
 		rte_delay_ms(10);
 	}
@@ -254,7 +254,7 @@ test_ticketlock(void)
 	} else
 		rte_ticketlock_recursive_unlock(&tlr);
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_eal_remote_launch(test_ticketlock_recursive_per_core,
 					NULL, i);
 	}
@@ -264,19 +264,19 @@ test_ticketlock(void)
 	/*
 	 * Test if it could return immediately from try-locking a locked object.
 	 * Here it will lock the ticketlock object first, then launch all the
-	 * slave lcores to trylock the same ticketlock object.
-	 * All the slave lcores should give up try-locking a locked object and
+	 * worker lcores to trylock the same ticketlock object.
+	 * All the worker lcores should give up try-locking a locked object and
 	 * return immediately, and then increase the "count" initialized with
 	 * zero by one per times.
 	 * We can check if the "count" is finally equal to the number of all
-	 * slave lcores to see if the behavior of try-locking a locked
+	 * worker lcores to see if the behavior of try-locking a locked
 	 * ticketlock object is correct.
 	 */
 	if (rte_ticketlock_trylock(&tl_try) == 0)
 		return -1;
 
 	count = 0;
-	RTE_LCORE_FOREACH_SLAVE(i) {
+	RTE_LCORE_FOREACH_WORKER(i) {
 		rte_eal_remote_launch(test_ticketlock_try, NULL, i);
 	}
 	rte_eal_mp_wait_lcore();
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
index 5933f56ed544..a10b2fe9daf4 100644
--- a/app/test/test_timer.c
+++ b/app/test/test_timer.c
@@ -37,7 +37,7 @@
  *    - All cores then simultaneously are set to schedule all the timers at
  *      the same time, so conflicts should occur.
  *    - Then there is a delay while we wait for the timers to expire
- *    - Then the master lcore calls timer_manage() and we check that all
+ *    - Then the main lcore calls timer_manage() and we check that all
  *      timers have had their callbacks called exactly once - no more no less.
  *    - Then we repeat the process, except after setting up the timers, we have
  *      all cores randomly reschedule them.
@@ -58,7 +58,7 @@
  *
  *    - timer0
  *
- *      - At initialization, timer0 is loaded by the master core, on master core
+ *      - At initialization, timer0 is loaded by the main core, on main core
  *        in "single" mode (time = 1 second).
  *      - In the first 19 callbacks, timer0 is reloaded on the same core,
  *        then, it is explicitly stopped at the 20th call.
@@ -66,21 +66,21 @@
  *
  *    - timer1
  *
- *      - At initialization, timer1 is loaded by the master core, on the
- *        master core in "single" mode (time = 2 seconds).
+ *      - At initialization, timer1 is loaded by the main core, on the
+ *        main core in "single" mode (time = 2 seconds).
  *      - In the first 9 callbacks, timer1 is reloaded on another
  *        core. After the 10th callback, timer1 is not reloaded anymore.
  *
  *    - timer2
  *
- *      - At initialization, timer2 is loaded by the master core, on the
- *        master core in "periodical" mode (time = 1 second).
+ *      - At initialization, timer2 is loaded by the main core, on the
+ *        main core in "periodical" mode (time = 1 second).
  *      - In the callback, when t=25s, it stops timer3 and reloads timer0
  *        on the current core.
  *
  *    - timer3
  *
- *      - At initialization, timer3 is loaded by the master core, on
+ *      - At initialization, timer3 is loaded by the main core, on
  *        another core in "periodical" mode (time = 1 second).
  *      - It is stopped at t=25s by timer2.
  */
@@ -201,68 +201,69 @@ timer_stress_main_loop(__rte_unused void *arg)
 	return 0;
 }
 
-/* Need to synchronize slave lcores through multiple steps. */
-enum { SLAVE_WAITING = 1, SLAVE_RUN_SIGNAL, SLAVE_RUNNING, SLAVE_FINISHED };
-static rte_atomic16_t slave_state[RTE_MAX_LCORE];
+/* Need to synchronize worker lcores through multiple steps. */
+enum { WORKER_WAITING = 1, WORKER_RUN_SIGNAL, WORKER_RUNNING, WORKER_FINISHED };
+static rte_atomic16_t lcore_state[RTE_MAX_LCORE];
 
 static void
-master_init_slaves(void)
+main_init_workers(void)
 {
 	unsigned i;
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
-		rte_atomic16_set(&slave_state[i], SLAVE_WAITING);
+	RTE_LCORE_FOREACH_WORKER(i) {
+		rte_atomic16_set(&lcore_state[i], WORKER_WAITING);
 	}
 }
 
 static void
-master_start_slaves(void)
+main_start_workers(void)
 {
 	unsigned i;
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
-		rte_atomic16_set(&slave_state[i], SLAVE_RUN_SIGNAL);
+	RTE_LCORE_FOREACH_WORKER(i) {
+		rte_atomic16_set(&lcore_state[i], WORKER_RUN_SIGNAL);
 	}
-	RTE_LCORE_FOREACH_SLAVE(i) {
-		while (rte_atomic16_read(&slave_state[i]) != SLAVE_RUNNING)
+	RTE_LCORE_FOREACH_WORKER(i) {
+		while (rte_atomic16_read(&lcore_state[i]) != WORKER_RUNNING)
 			rte_pause();
 	}
 }
 
 static void
-master_wait_for_slaves(void)
+main_wait_for_workers(void)
 {
 	unsigned i;
 
-	RTE_LCORE_FOREACH_SLAVE(i) {
-		while (rte_atomic16_read(&slave_state[i]) != SLAVE_FINISHED)
+	RTE_LCORE_FOREACH_WORKER(i) {
+		while (rte_atomic16_read(&lcore_state[i]) != WORKER_FINISHED)
 			rte_pause();
 	}
 }
 
 static void
-slave_wait_to_start(void)
+worker_wait_to_start(void)
 {
 	unsigned lcore_id = rte_lcore_id();
 
-	while (rte_atomic16_read(&slave_state[lcore_id]) != SLAVE_RUN_SIGNAL)
+	while (rte_atomic16_read(&lcore_state[lcore_id]) != WORKER_RUN_SIGNAL)
 		rte_pause();
-	rte_atomic16_set(&slave_state[lcore_id], SLAVE_RUNNING);
+	rte_atomic16_set(&lcore_state[lcore_id], WORKER_RUNNING);
 }
 
 static void
-slave_finish(void)
+worker_finish(void)
 {
 	unsigned lcore_id = rte_lcore_id();
 
-	rte_atomic16_set(&slave_state[lcore_id], SLAVE_FINISHED);
+	rte_atomic16_set(&lcore_state[lcore_id], WORKER_FINISHED);
 }
 
 
 static volatile int cb_count = 0;
 
 /* callback for second stress test. will only be called
- * on master lcore */
+ * on main lcore
+ */
 static void
 timer_stress2_cb(struct rte_timer *tim __rte_unused, void *arg __rte_unused)
 {
@@ -277,36 +278,36 @@ timer_stress2_main_loop(__rte_unused void *arg)
 	static struct rte_timer *timers;
 	int i, ret;
 	uint64_t delay = rte_get_timer_hz() / 20;
-	unsigned lcore_id = rte_lcore_id();
-	unsigned master = rte_get_master_lcore();
+	unsigned int lcore_id = rte_lcore_id();
+	unsigned int main_lcore = rte_get_main_lcore();
 	int32_t my_collisions = 0;
 	static rte_atomic32_t collisions;
 
-	if (lcore_id == master) {
+	if (lcore_id == main_lcore) {
 		cb_count = 0;
 		test_failed = 0;
 		rte_atomic32_set(&collisions, 0);
-		master_init_slaves();
+		main_init_workers();
 		timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
 		if (timers == NULL) {
 			printf("Test Failed\n");
 			printf("- Cannot allocate memory for timers\n" );
 			test_failed = 1;
-			master_start_slaves();
+			main_start_workers();
 			goto cleanup;
 		}
 		for (i = 0; i < NB_STRESS2_TIMERS; i++)
 			rte_timer_init(&timers[i]);
-		master_start_slaves();
+		main_start_workers();
 	} else {
-		slave_wait_to_start();
+		worker_wait_to_start();
 		if (test_failed)
 			goto cleanup;
 	}
 
-	/* have all cores schedule all timers on master lcore */
+	/* have all cores schedule all timers on main lcore */
 	for (i = 0; i < NB_STRESS2_TIMERS; i++) {
-		ret = rte_timer_reset(&timers[i], delay, SINGLE, master,
+		ret = rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
 				timer_stress2_cb, NULL);
 		/* there will be collisions when multiple cores simultaneously
 		 * configure the same timers */
@@ -320,14 +321,14 @@ timer_stress2_main_loop(__rte_unused void *arg)
 	rte_delay_ms(100);
 
 	/* all cores rendezvous */
-	if (lcore_id == master) {
-		master_wait_for_slaves();
+	if (lcore_id == main_lcore) {
+		main_wait_for_workers();
 	} else {
-		slave_finish();
+		worker_finish();
 	}
 
 	/* now check that we get the right number of callbacks */
-	if (lcore_id == master) {
+	if (lcore_id == main_lcore) {
 		my_collisions = rte_atomic32_read(&collisions);
 		if (my_collisions != 0)
 			printf("- %d timer reset collisions (OK)\n", my_collisions);
@@ -338,23 +339,23 @@ timer_stress2_main_loop(__rte_unused void *arg)
 			printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
 					cb_count);
 			test_failed = 1;
-			master_start_slaves();
+			main_start_workers();
 			goto cleanup;
 		}
 		cb_count = 0;
 
 		/* proceed */
-		master_start_slaves();
+		main_start_workers();
 	} else {
 		/* proceed */
-		slave_wait_to_start();
+		worker_wait_to_start();
 		if (test_failed)
 			goto cleanup;
 	}
 
 	/* now test again, just stop and restart timers at random after init*/
 	for (i = 0; i < NB_STRESS2_TIMERS; i++)
-		rte_timer_reset(&timers[i], delay, SINGLE, master,
+		rte_timer_reset(&timers[i], delay, SINGLE, main_lcore,
 				timer_stress2_cb, NULL);
 
 	/* pick random timer to reset, stopping them first half the time */
@@ -362,7 +363,7 @@ timer_stress2_main_loop(__rte_unused void *arg)
 		int r = rand() % NB_STRESS2_TIMERS;
 		if (i % 2)
 			rte_timer_stop(&timers[r]);
-		rte_timer_reset(&timers[r], delay, SINGLE, master,
+		rte_timer_reset(&timers[r], delay, SINGLE, main_lcore,
 				timer_stress2_cb, NULL);
 	}
 
@@ -370,8 +371,8 @@ timer_stress2_main_loop(__rte_unused void *arg)
 	rte_delay_ms(100);
 
 	/* now check that we get the right number of callbacks */
-	if (lcore_id == master) {
-		master_wait_for_slaves();
+	if (lcore_id == main_lcore) {
+		main_wait_for_workers();
 
 		rte_timer_manage();
 		if (cb_count != NB_STRESS2_TIMERS) {
@@ -386,14 +387,14 @@ timer_stress2_main_loop(__rte_unused void *arg)
 	}
 
 cleanup:
-	if (lcore_id == master) {
-		master_wait_for_slaves();
+	if (lcore_id == main_lcore) {
+		main_wait_for_workers();
 		if (timers != NULL) {
 			rte_free(timers);
 			timers = NULL;
 		}
 	} else {
-		slave_finish();
+		worker_finish();
 	}
 
 	return 0;
@@ -465,7 +466,7 @@ timer_basic_main_loop(__rte_unused void *arg)
 	int64_t diff = 0;
 
 	/* launch all timers on core 0 */
-	if (lcore_id == rte_get_master_lcore()) {
+	if (lcore_id == rte_get_main_lcore()) {
 		mytimer_reset(&mytiminfo[0], hz/4, SINGLE, lcore_id,
 			      timer_basic_cb);
 		mytimer_reset(&mytiminfo[1], hz/2, SINGLE, lcore_id,
@@ -563,7 +564,7 @@ test_timer(void)
 
 	/* start other cores */
 	printf("Start timer stress tests\n");
-	rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MASTER);
+	rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MAIN);
 	rte_eal_mp_wait_lcore();
 
 	/* stop timer 0 used for stress test */
@@ -572,7 +573,7 @@ test_timer(void)
 	/* run a second, slightly different set of stress tests */
 	printf("\nStart timer stress tests 2\n");
 	test_failed = 0;
-	rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MASTER);
+	rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MAIN);
 	rte_eal_mp_wait_lcore();
 	if (test_failed)
 		return TEST_FAILED;
@@ -584,7 +585,7 @@ test_timer(void)
 
 	/* start other cores */
 	printf("\nStart timer basic tests\n");
-	rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MASTER);
+	rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MAIN);
 	rte_eal_mp_wait_lcore();
 
 	/* stop all timers */
diff --git a/app/test/test_timer_racecond.c b/app/test/test_timer_racecond.c
index 28af9ca765ea..133d3324ee66 100644
--- a/app/test/test_timer_racecond.c
+++ b/app/test/test_timer_racecond.c
@@ -54,10 +54,10 @@
 #define N_TIMERS    50
 
 static struct rte_timer timer[N_TIMERS];
-static unsigned timer_lcore_id[N_TIMERS];
+static unsigned int timer_lcore_id[N_TIMERS];
 
-static unsigned master;
-static volatile unsigned stop_slaves;
+static unsigned int main_lcore;
+static volatile unsigned int stop_workers;
 
 static int reload_timer(struct rte_timer *tim);
 
@@ -90,7 +90,7 @@ reload_timer(struct rte_timer *tim)
 	    (tim - timer);
 	int ret;
 
-	ret = rte_timer_reset(tim, ticks, PERIODICAL, master, timer_cb, NULL);
+	ret = rte_timer_reset(tim, ticks, PERIODICAL, main_lcore, timer_cb, NULL);
 	if (ret != 0) {
 		rte_log(RTE_LOG_DEBUG, timer_logtype_test,
 			"- core %u failed to reset timer %" PRIuPTR " (OK)\n",
@@ -101,7 +101,7 @@ reload_timer(struct rte_timer *tim)
 }
 
 static int
-slave_main_loop(__rte_unused void *arg)
+worker_main_loop(__rte_unused void *arg)
 {
 	unsigned lcore_id = rte_lcore_id();
 	unsigned i;
@@ -110,7 +110,7 @@ slave_main_loop(__rte_unused void *arg)
 
 	printf("Starting main loop on core %u\n", lcore_id);
 
-	while (!stop_slaves) {
+	while (!stop_workers) {
 		/* Wait until the timer manager is running.
 		 * We know it's running when we see timer[0] NOT pending.
 		 */
@@ -147,7 +147,7 @@ test_timer_racecond(void)
 	unsigned lcore_id;
 	unsigned i;
 
-	master = lcore_id = rte_lcore_id();
+	main_lcore = lcore_id = rte_lcore_id();
 	hz = rte_get_timer_hz();
 
 	/* init and start timers */
@@ -156,8 +156,8 @@ test_timer_racecond(void)
 		ret = reload_timer(&timer[i]);
 		TEST_ASSERT(ret == 0, "reload_timer failed");
 
-		/* Distribute timers to slaves.
-		 * Note that we assign timer[0] to the master.
+		/* Distribute timers to workers.
+		 * Note that we assign timer[0] to the main.
 		 */
 		timer_lcore_id[i] = lcore_id;
 		lcore_id = rte_get_next_lcore(lcore_id, 1, 1);
@@ -167,11 +167,11 @@ test_timer_racecond(void)
 	cur_time = rte_get_timer_cycles();
 	end_time = cur_time + (hz * TEST_DURATION_S);
 
-	/* start slave cores */
-	stop_slaves = 0;
+	/* start worker cores */
+	stop_workers = 0;
 	printf("Start timer manage race condition test (%u seconds)\n",
 			TEST_DURATION_S);
-	rte_eal_mp_remote_launch(slave_main_loop, NULL, SKIP_MASTER);
+	rte_eal_mp_remote_launch(worker_main_loop, NULL, SKIP_MAIN);
 
 	while (diff >= 0) {
 		/* run the timers */
@@ -184,9 +184,9 @@ test_timer_racecond(void)
 		diff = end_time - cur_time;
 	}
 
-	/* stop slave cores */
+	/* stop worker cores */
 	printf("Stopping timer manage race condition test\n");
-	stop_slaves = 1;
+	stop_workers = 1;
 	rte_eal_mp_wait_lcore();
 
 	/* stop timers */
diff --git a/app/test/test_timer_secondary.c b/app/test/test_timer_secondary.c
index 7a3bc873b359..1e8f1d45499a 100644
--- a/app/test/test_timer_secondary.c
+++ b/app/test/test_timer_secondary.c
@@ -26,7 +26,7 @@
 #define launch_proc(ARGV) process_dup(ARGV, RTE_DIM(ARGV), __func__)
 
 struct test_info {
-	unsigned int mstr_lcore;
+	unsigned int main_lcore;
 	unsigned int mgr_lcore;
 	unsigned int sec_lcore;
 	uint32_t timer_data_id;
@@ -137,12 +137,12 @@ test_timer_secondary(void)
 		TEST_ASSERT_SUCCESS(ret, "Failed to allocate timer data "
 				    "instance");
 
-		unsigned int *mstr_lcorep = &test_info->mstr_lcore;
+		unsigned int *main_lcorep = &test_info->main_lcore;
 		unsigned int *mgr_lcorep = &test_info->mgr_lcore;
 		unsigned int *sec_lcorep = &test_info->sec_lcore;
 
-		*mstr_lcorep = rte_get_master_lcore();
-		*mgr_lcorep = rte_get_next_lcore(*mstr_lcorep, 1, 1);
+		*main_lcorep = rte_get_main_lcore();
+		*mgr_lcorep = rte_get_next_lcore(*main_lcorep, 1, 1);
 		*sec_lcorep = rte_get_next_lcore(*mgr_lcorep, 1, 1);
 
 		ret = rte_eal_remote_launch(timer_manage_loop,
diff --git a/app/test/test_trace_perf.c b/app/test/test_trace_perf.c
index 50c7381b77e7..e1ad8e6f555c 100644
--- a/app/test/test_trace_perf.c
+++ b/app/test/test_trace_perf.c
@@ -132,7 +132,7 @@ run_test(const char *str, lcore_function_t f, struct test_data *data, size_t sz)
 
 	memset(data, 0, sz);
 	data->nb_workers = rte_lcore_count() - 1;
-	RTE_LCORE_FOREACH_SLAVE(id)
+	RTE_LCORE_FOREACH_WORKER(id)
 		rte_eal_remote_launch(f, &data->ldata[worker++], id);
 
 	wait_till_workers_are_ready(data);
@@ -140,7 +140,7 @@ run_test(const char *str, lcore_function_t f, struct test_data *data, size_t sz)
 	measure_perf(str, data);
 	signal_workers_to_finish(data);
 
-	RTE_LCORE_FOREACH_SLAVE(id)
+	RTE_LCORE_FOREACH_WORKER(id)
 		rte_eal_wait_lcore(id);
 }
 
-- 
2.27.0



More information about the dev mailing list