[dpdk-dev] [RFC PATCH 4/7] eal: add simple API for multi-pthread

Cunming Liang cunming.liang at intel.com
Thu Dec 11 03:04:47 CET 2014


Signed-off-by: Cunming Liang <cunming.liang at intel.com>
---
 config/common_linuxapp                   |   1 +
 lib/librte_eal/common/include/rte_eal.h  |  10 +++
 lib/librte_eal/linuxapp/eal/eal_thread.c | 105 ++++++++++++++++++++++++++++++-
 3 files changed, 115 insertions(+), 1 deletion(-)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 2f9643b..4800b31 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -89,6 +89,7 @@ CONFIG_RTE_LIBNAME="intel_dpdk"
 #
 CONFIG_RTE_LIBRTE_EAL=y
 CONFIG_RTE_MAX_LCORE=128
+CONFIG_RTE_MAX_THREAD=256
 CONFIG_RTE_MAX_NUMA_NODES=8
 CONFIG_RTE_MAX_MEMSEG=256
 CONFIG_RTE_MAX_MEMZONE=2560
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
index 2640167..0bce5f3 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -41,6 +41,7 @@
  */
 
 #include <stdint.h>
+#include <pthread.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -266,6 +267,15 @@ int rte_eal_has_hugepages(void);
 #define RTE_MAX_THREAD                RTE_MAX_LCORE
 #endif
 
+int rte_pthread_create(pthread_t *tid, void *(*work)(void *), void *arg);
+
+int rte_pthread_assign_lcore(pthread_t thread, unsigned lcore);
+
+int rte_pthread_assign_cpuset(pthread_t thread, unsigned lcore[], unsigned num);
+
+int rte_pthread_prepare(void);
+
+void rte_pthread_cleanup(void);
 
 #ifdef __cplusplus
 }
diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c
index 52478d6..a584e3b 100644
--- a/lib/librte_eal/linuxapp/eal/eal_thread.c
+++ b/lib/librte_eal/linuxapp/eal/eal_thread.c
@@ -241,7 +241,7 @@ __get_linear_tid(uint64_t *tid)
 	return 0;
 }
 
-static void __rte_unused
+static void
 __put_linear_tid(uint64_t tid)
 {
 	const struct rte_memzone *mz;
@@ -334,3 +334,106 @@ eal_thread_loop(__attribute__((unused)) void *arg)
 	/* pthread_exit(NULL); */
 	/* return NULL; */
 }
+
+int
+rte_pthread_assign_lcore(pthread_t thread, unsigned lcore)
+{
+	if (!rte_lcore_is_enabled(lcore))
+		return -1;
+
+	if (__eal_thread_set_affinity(thread, lcore) < 0)
+		return -1;
+
+	return 0;
+}
+
+int
+rte_pthread_assign_cpuset(pthread_t thread, unsigned lcore[], unsigned num)
+{
+	int s;
+	unsigned i;
+
+#if defined(CPU_ALLOC)
+	size_t size;
+	cpu_set_t *cpusetp;
+
+	cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
+	if (cpusetp == NULL) {
+		RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
+		return -1;
+	}
+
+	size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
+	CPU_ZERO_S(size, cpusetp);
+
+	for (i = 0; i < num; i++) {
+		if (!rte_lcore_is_enabled(lcore[i])) {
+			RTE_LOG(ERR, EAL, "lcore %u not enabled\n", lcore[i]);
+			CPU_FREE(cpusetp);
+			return -1;
+		}
+
+		CPU_SET_S(lcore[i], size, cpusetp);
+	}
+	s = pthread_setaffinity_np(thread, size, cpusetp);
+	if (s != 0) {
+		RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+		CPU_FREE(cpusetp);
+		return -1;
+	}
+
+	CPU_FREE(cpusetp);
+#else /* CPU_ALLOC */
+	cpu_set_t cpuset;
+	CPU_ZERO(&cpuset);
+
+	for (i = 0; i < num; i++) {
+		if (!rte_lcore_is_enabled(lcore[i])) {
+			RTE_LOG(ERR, EAL, "lcore %u not enabled\n", lcore[i]);
+			return -1;
+		}
+		CPU_SET(lcore[i], &cpuset);
+	}
+
+	s = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
+	if (s != 0) {
+		RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+		return -1;
+	}
+#endif
+
+	return 0;
+}
+
+int
+rte_pthread_prepare(void)
+{
+	unsigned long ltid;
+	if (__get_linear_tid(&ltid) < 0)
+		return -1;
+	RTE_PER_LCORE(_thread_id) = ltid;
+}
+
+void
+rte_pthread_cleanup(void)
+{
+	__put_linear_tid(RTE_PER_LCORE(_thread_id));
+}
+
+int
+rte_pthread_create(pthread_t *tid, void *(*work)(void *), void *arg)
+{
+	int ret;
+
+	if (tid == NULL || work == NULL)
+		return -1;
+
+	ret = pthread_create(tid, NULL, work, arg);
+	if (ret != 0)
+		return -1;
+
+	if (__eal_thread_set_affinity(*tid, rte_lcore_id()) < 0)
+		rte_panic("cannot set affinity\n");
+
+	return 0;
+}
-- 
1.8.1.4



More information about the dev mailing list