[dpdk-dev] [PATCH 2/2] eal: use c11 atomics for interrupt status

Phil Yang phil.yang at arm.com
Thu Jun 11 12:24:25 CEST 2020


The event status is defined as a volatile variable and shared
between threads. Use c11 atomics with explicit ordering instead
of rte_atomic ops which enforce unnecessary barriers on aarch64.

Signed-off-by: Phil Yang <phil.yang at arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
---
 lib/librte_eal/include/rte_eal_interrupts.h |  2 +-
 lib/librte_eal/linux/eal_interrupts.c       | 47 ++++++++++++++++++++---------
 2 files changed, 34 insertions(+), 15 deletions(-)

diff --git a/lib/librte_eal/include/rte_eal_interrupts.h b/lib/librte_eal/include/rte_eal_interrupts.h
index 773a34a..b1e8a29 100644
--- a/lib/librte_eal/include/rte_eal_interrupts.h
+++ b/lib/librte_eal/include/rte_eal_interrupts.h
@@ -59,7 +59,7 @@ enum {
 
 /** interrupt epoll event obj, taken by epoll_event.ptr */
 struct rte_epoll_event {
-	volatile uint32_t status;  /**< OUT: event status */
+	uint32_t status;           /**< OUT: event status */
 	int fd;                    /**< OUT: event fd */
 	int epfd;       /**< OUT: epoll instance the ev associated with */
 	struct rte_epoll_data epdata;
diff --git a/lib/librte_eal/linux/eal_interrupts.c b/lib/librte_eal/linux/eal_interrupts.c
index 2f369dc..1486acf 100644
--- a/lib/librte_eal/linux/eal_interrupts.c
+++ b/lib/librte_eal/linux/eal_interrupts.c
@@ -26,7 +26,6 @@
 #include <rte_eal.h>
 #include <rte_per_lcore.h>
 #include <rte_lcore.h>
-#include <rte_atomic.h>
 #include <rte_branch_prediction.h>
 #include <rte_debug.h>
 #include <rte_log.h>
@@ -1221,11 +1220,18 @@ eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
 {
 	unsigned int i, count = 0;
 	struct rte_epoll_event *rev;
+	uint32_t valid_status;
 
 	for (i = 0; i < n; i++) {
 		rev = evs[i].data.ptr;
-		if (!rev || !rte_atomic32_cmpset(&rev->status, RTE_EPOLL_VALID,
-						 RTE_EPOLL_EXEC))
+		valid_status =  RTE_EPOLL_VALID;
+		/* ACQUIRE memory ordering here pairs with RELEASE
+		 * ordering bellow acting as a lock to synchronize
+		 * the event data updating.
+		 */
+		if (!rev || !__atomic_compare_exchange_n(&rev->status,
+				    &valid_status, RTE_EPOLL_EXEC, 0,
+				    __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
 			continue;
 
 		events[count].status        = RTE_EPOLL_VALID;
@@ -1237,8 +1243,11 @@ eal_epoll_process_event(struct epoll_event *evs, unsigned int n,
 			rev->epdata.cb_fun(rev->fd,
 					   rev->epdata.cb_arg);
 
-		rte_compiler_barrier();
-		rev->status = RTE_EPOLL_VALID;
+		/* the status update should be observed after
+		 * the other fields changes.
+		 */
+		__atomic_store_n(&rev->status, RTE_EPOLL_VALID,
+				__ATOMIC_RELEASE);
 		count++;
 	}
 	return count;
@@ -1308,10 +1317,14 @@ rte_epoll_wait(int epfd, struct rte_epoll_event *events,
 static inline void
 eal_epoll_data_safe_free(struct rte_epoll_event *ev)
 {
-	while (!rte_atomic32_cmpset(&ev->status, RTE_EPOLL_VALID,
-				    RTE_EPOLL_INVALID))
-		while (ev->status != RTE_EPOLL_VALID)
+	uint32_t valid_status = RTE_EPOLL_VALID;
+	while (!__atomic_compare_exchange_n(&ev->status, &valid_status,
+		    RTE_EPOLL_INVALID, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+		while (__atomic_load_n(&ev->status,
+				__ATOMIC_RELAXED) != RTE_EPOLL_VALID)
 			rte_pause();
+		valid_status = RTE_EPOLL_VALID;
+	}
 	memset(&ev->epdata, 0, sizeof(ev->epdata));
 	ev->fd = -1;
 	ev->epfd = -1;
@@ -1333,7 +1346,8 @@ rte_epoll_ctl(int epfd, int op, int fd,
 		epfd = rte_intr_tls_epfd();
 
 	if (op == EPOLL_CTL_ADD) {
-		event->status = RTE_EPOLL_VALID;
+		__atomic_store_n(&event->status, RTE_EPOLL_VALID,
+				__ATOMIC_RELAXED);
 		event->fd = fd;  /* ignore fd in event */
 		event->epfd = epfd;
 		ev.data.ptr = (void *)event;
@@ -1345,11 +1359,13 @@ rte_epoll_ctl(int epfd, int op, int fd,
 			op, fd, strerror(errno));
 		if (op == EPOLL_CTL_ADD)
 			/* rollback status when CTL_ADD fail */
-			event->status = RTE_EPOLL_INVALID;
+			__atomic_store_n(&event->status, RTE_EPOLL_INVALID,
+					__ATOMIC_RELAXED);
 		return -1;
 	}
 
-	if (op == EPOLL_CTL_DEL && event->status != RTE_EPOLL_INVALID)
+	if (op == EPOLL_CTL_DEL && __atomic_load_n(&event->status,
+			__ATOMIC_RELAXED) != RTE_EPOLL_INVALID)
 		eal_epoll_data_safe_free(event);
 
 	return 0;
@@ -1378,7 +1394,8 @@ rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
 	case RTE_INTR_EVENT_ADD:
 		epfd_op = EPOLL_CTL_ADD;
 		rev = &intr_handle->elist[efd_idx];
-		if (rev->status != RTE_EPOLL_INVALID) {
+		if (__atomic_load_n(&rev->status,
+				__ATOMIC_RELAXED) != RTE_EPOLL_INVALID) {
 			RTE_LOG(INFO, EAL, "Event already been added.\n");
 			return -EEXIST;
 		}
@@ -1401,7 +1418,8 @@ rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
 	case RTE_INTR_EVENT_DEL:
 		epfd_op = EPOLL_CTL_DEL;
 		rev = &intr_handle->elist[efd_idx];
-		if (rev->status == RTE_EPOLL_INVALID) {
+		if (__atomic_load_n(&rev->status,
+				__ATOMIC_RELAXED) == RTE_EPOLL_INVALID) {
 			RTE_LOG(INFO, EAL, "Event does not exist.\n");
 			return -EPERM;
 		}
@@ -1426,7 +1444,8 @@ rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
 
 	for (i = 0; i < intr_handle->nb_efd; i++) {
 		rev = &intr_handle->elist[i];
-		if (rev->status == RTE_EPOLL_INVALID)
+		if (__atomic_load_n(&rev->status,
+				__ATOMIC_RELAXED) == RTE_EPOLL_INVALID)
 			continue;
 		if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
 			/* force free if the entry valid */
-- 
2.7.4



More information about the dev mailing list