[PATCH v2 3/4] net/null: optimize Rx

Stephen Hemminger stephen at networkplumber.org
Wed Apr 2 01:47:28 CEST 2025


No other rx_burst function checks args, remove it.

Since rx_burst can only safely be called by a single thread
at a time, there is no need for atomic operations on statistics.
Add byte count statistics.

Signed-off-by: Stephen Hemminger <stephen at networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 38 ++++++++++++++++-----------------
 1 file changed, 18 insertions(+), 20 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index b7cc90300d..40ce5c6ed2 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -37,7 +37,9 @@ struct null_queue {
 	struct rte_mempool *mb_pool;
 	void *dummy_packet;
 
-	RTE_ATOMIC(uint64_t) rx_pkts;
+	uint64_t rx_pkts;
+	uint64_t rx_bytes;
+
 	RTE_ATOMIC(uint64_t) tx_pkts;
 	RTE_ATOMIC(uint64_t) tx_bytes;
 };
@@ -85,12 +87,10 @@ RTE_LOG_REGISTER_DEFAULT(eth_null_logtype, NOTICE);
 static uint16_t
 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
-	int i;
+	unsigned int i;
 	struct null_queue *h = q;
 	unsigned int packet_size;
-
-	if ((q == NULL) || (bufs == NULL))
-		return 0;
+	uint64_t bytes = 0;
 
 	packet_size = h->internals->packet_size;
 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
@@ -99,24 +99,22 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 	for (i = 0; i < nb_bufs; i++) {
 		bufs[i]->data_len = (uint16_t)packet_size;
 		bufs[i]->pkt_len = packet_size;
+		bytes += packet_size;
 		bufs[i]->port = h->internals->port_id;
 	}
 
-	/* NOTE: review for potential ordering optimization */
-	rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
-
-	return i;
+	h->rx_pkts += nb_bufs;
+	h->rx_bytes += bytes;
+	return nb_bufs;
 }
 
 static uint16_t
 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 {
-	int i;
+	unsigned int i;
 	struct null_queue *h = q;
 	unsigned int packet_size;
-
-	if ((q == NULL) || (bufs == NULL))
-		return 0;
+	uint64_t bytes = 0;
 
 	packet_size = h->internals->packet_size;
 	if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
@@ -127,13 +125,13 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 					packet_size);
 		bufs[i]->data_len = (uint16_t)packet_size;
 		bufs[i]->pkt_len = packet_size;
+		bytes += packet_size;
 		bufs[i]->port = h->internals->port_id;
 	}
 
-	/* NOTE: review for potential ordering optimization */
-	rte_atomic_fetch_add_explicit(&h->rx_pkts, i, rte_memory_order_seq_cst);
-
-	return i;
+	h->rx_pkts += nb_bufs;
+	h->rx_bytes += bytes;
+	return nb_bufs;
 }
 
 static uint16_t
@@ -335,7 +333,6 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
 			RTE_MIN(dev->data->nb_rx_queues,
 				RTE_DIM(internal->rx_null_queues)));
 	for (i = 0; i < num_stats; i++) {
-		/* NOTE: review for atomic access */
 		igb_stats->q_ipackets[i] =
 			internal->rx_null_queues[i].rx_pkts;
 		rx_total += igb_stats->q_ipackets[i];
@@ -368,9 +365,10 @@ eth_stats_reset(struct rte_eth_dev *dev)
 		return -EINVAL;
 
 	internal = dev->data->dev_private;
-	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
-		/* NOTE: review for atomic access */
+	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++) {
 		internal->rx_null_queues[i].rx_pkts = 0;
+		internal->rx_null_queues[i].rx_bytes = 0;
+	}
 
 	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
 		struct null_queue *q = &internal->tx_null_queues[i];
-- 
2.47.2



More information about the dev mailing list