[dpdk-dev] [PATCH] eal: fix possible UB on creation of ctrl thread

Luc Pelletier lucp.at.work at gmail.com
Wed Mar 24 14:04:22 CET 2021


The creation of control threads uses a pthread barrier for
synchronization. This patch fixes a race condition where the pthread
barrier could get destroyed while one of the threads has not yet
returned from the pthread_barrier_wait function, which could result in
undefined behaviour.

Fixes: 3a0d465d4c53 ("eal: fix use-after-free on control thread creation")
Cc: jianfeng.tan at intel.com
Cc: stable at dpdk.org

Signed-off-by: Luc Pelletier <lucp.at.work at gmail.com>
---
 lib/librte_eal/common/eal_common_thread.c | 20 +++++++++++++++++---
 1 file changed, 17 insertions(+), 3 deletions(-)

diff --git a/lib/librte_eal/common/eal_common_thread.c b/lib/librte_eal/common/eal_common_thread.c
index 73a055902..bd9a9fdbc 100644
--- a/lib/librte_eal/common/eal_common_thread.c
+++ b/lib/librte_eal/common/eal_common_thread.c
@@ -170,6 +170,7 @@ struct rte_thread_ctrl_params {
 	void *(*start_routine)(void *);
 	void *arg;
 	pthread_barrier_t configured;
+	bool barrier_in_use;
 };
 
 static void *ctrl_thread_init(void *arg)
@@ -186,9 +187,13 @@ static void *ctrl_thread_init(void *arg)
 
 	ret = pthread_barrier_wait(&params->configured);
 	if (ret == PTHREAD_BARRIER_SERIAL_THREAD) {
+		while (__atomic_load_n(&params->barrier_in_use,
+				       __ATOMIC_ACQUIRE))
+			sched_yield();
 		pthread_barrier_destroy(&params->configured);
 		free(params);
-	}
+	} else
+		__atomic_store_n(&params->barrier_in_use, 0, __ATOMIC_RELEASE);
 
 	return start_routine(routine_arg);
 }
@@ -210,6 +215,7 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
 
 	params->start_routine = start_routine;
 	params->arg = arg;
+	params->barrier_in_use = 1;
 
 	pthread_barrier_init(&params->configured, NULL, 2);
 
@@ -232,18 +238,26 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
 
 	ret = pthread_barrier_wait(&params->configured);
 	if (ret == PTHREAD_BARRIER_SERIAL_THREAD) {
+		while (__atomic_load_n(&params->barrier_in_use,
+				       __ATOMIC_ACQUIRE))
+			sched_yield();
 		pthread_barrier_destroy(&params->configured);
 		free(params);
-	}
+	} else
+		__atomic_store_n(&params->barrier_in_use, 0, __ATOMIC_RELEASE);
 
 	return 0;
 
 fail:
 	if (PTHREAD_BARRIER_SERIAL_THREAD ==
 	    pthread_barrier_wait(&params->configured)) {
+		while (__atomic_load_n(&params->barrier_in_use,
+				       __ATOMIC_ACQUIRE))
+			sched_yield();
 		pthread_barrier_destroy(&params->configured);
 		free(params);
-	}
+	} else
+		__atomic_store_n(&params->barrier_in_use, 0, __ATOMIC_RELEASE);
 	pthread_cancel(*thread);
 	pthread_join(*thread, NULL);
 	return -ret;
-- 
2.25.1



More information about the dev mailing list