[PATCH] test: sync soft expiry check in inline IPsec

Aarnav JP ajp at marvell.com
Tue May 5 12:43:50 CEST 2026


Soft expiry events are delivered asynchronously via
the err-ring polling thread. The test checked notify_event
right after the RX loop, before the polling thread could
process the err-ring entry — causing intermittent failures
with fast algorithms where TX/RX completes faster than the
polling thread's usleep() wake-up.

Add a bounded poll loop after RX for soft expiry cases, using
rte_io_rmb() to ensure cross-core visibility. Fix store ordering
in the callback with rte_io_wmb() so that the event subtype is
visible before notify_event is set.

Fixes: 34e8a9d9b4f2 ("test/security: add inline IPsec SA soft expiry cases")
Cc: stable at dpdk.org

Signed-off-by: Aarnav JP <ajp at marvell.com>
---
 app/test/test_security_inline_proto.c | 25 +++++++++++++++++++++----
 1 file changed, 21 insertions(+), 4 deletions(-)

diff --git a/app/test/test_security_inline_proto.c b/app/test/test_security_inline_proto.c
index b0cce5ebd9..5db8718a34 100644
--- a/app/test/test_security_inline_proto.c
+++ b/app/test/test_security_inline_proto.c
@@ -38,6 +38,7 @@ test_inline_ipsec_sg(void)
 
 #else
 
+#include <rte_cycles.h>
 #include <rte_eventdev.h>
 #include <rte_event_eth_rx_adapter.h>
 #include <rte_event_eth_tx_adapter.h>
@@ -1306,7 +1307,6 @@ test_ipsec_inline_sa_exp_event_callback(uint16_t port_id,
 		printf("Event descriptor not set\n");
 		return -1;
 	}
-	vector->notify_event = true;
 	if (event_desc->metadata != (uint64_t)vector->sa_data) {
 		printf("Mismatch in event specific metadata\n");
 		return -1;
@@ -1329,6 +1329,10 @@ test_ipsec_inline_sa_exp_event_callback(uint16_t port_id,
 		return -1;
 	}
 
+	/* Ensure event subtype is visible before signaling notify_event. */
+	rte_io_wmb();
+	vector->notify_event = true;
+
 	return 0;
 }
 
@@ -1483,6 +1487,16 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td,
 				break;
 		} while (j++ < 5 || nb_rx == 0);
 
+	/* Wait for soft expiry event from the err-ring poll thread. */
+	if ((flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft) &&
+	    td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		j = 0;
+		while (!vector.notify_event && j++ < 100) {
+			rte_delay_us(1000);
+			rte_io_rmb();
+		}
+	}
+
 	if (!flags->sa_expiry_pkts_hard &&
 			!flags->sa_expiry_bytes_hard &&
 			(nb_rx != nb_sent)) {
@@ -1546,10 +1560,13 @@ test_ipsec_inline_proto_process(struct ipsec_test_data *td,
 		destroy_default_flow(port_id);
 	if (flags->sa_expiry_pkts_soft || flags->sa_expiry_bytes_soft ||
 		flags->sa_expiry_pkts_hard || flags->sa_expiry_bytes_hard) {
-		if (vector.notify_event && (vector.event == event))
-			ret = TEST_SUCCESS;
-		else
+		if (vector.notify_event) {
+			rte_io_rmb();
+			ret = (vector.event == event) ?
+				TEST_SUCCESS : TEST_FAILED;
+		} else {
 			ret = TEST_FAILED;
+		}
 
 		rte_eth_dev_callback_unregister(port_id, RTE_ETH_EVENT_IPSEC,
 			test_ipsec_inline_sa_exp_event_callback, &vector);
-- 
2.43.0



More information about the dev mailing list