[PATCH v5 2/4] net/af_packet: RX/TX bulk free, unlikely hint
scott.k.mitch1 at gmail.com
scott.k.mitch1 at gmail.com
Tue Feb 3 08:07:38 CET 2026
From: Scott Mitchell <scott.k.mitch1 at gmail.com>
- Use rte_pktmbuf_free_bulk() in TX path instead of individual
rte_pktmbuf_free() calls for better batch efficiency
- Add unlikely() hints for error paths (oversized packets, VLAN
insertion failures, sendto errors) to optimize branch prediction
- Remove unnecessary early nb_pkts == 0 when loop handles this
and app may never call with 0 frames.
Signed-off-by: Scott Mitchell <scott.k.mitch1 at gmail.com>
---
drivers/net/af_packet/rte_eth_af_packet.c | 41 ++++++++---------------
1 file changed, 14 insertions(+), 27 deletions(-)
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index d0cc2c419a..51ac95ff5e 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -161,9 +161,6 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint32_t tp_status;
unsigned int framecount, framenum;
- if (unlikely(nb_pkts == 0))
- return 0;
-
/*
* Reads the given number of packets from the AF_PACKET socket one by
* one and copies the packet data into a newly allocated mbuf.
@@ -261,9 +258,6 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
uint32_t num_tx_bytes = 0;
uint16_t i;
- if (unlikely(nb_pkts == 0))
- return 0;
-
memset(&pfd, 0, sizeof(pfd));
pfd.fd = pkt_q->sockfd;
pfd.events = POLLOUT;
@@ -271,24 +265,17 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
framecount = pkt_q->framecount;
framenum = pkt_q->framenum;
- ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
for (i = 0; i < nb_pkts; i++) {
- mbuf = *bufs++;
+ mbuf = bufs[i];
- /* drop oversized packets */
- if (mbuf->pkt_len > pkt_q->frame_data_size) {
- rte_pktmbuf_free(mbuf);
+ /* Drop oversized packets. Insert VLAN if necessary */
+ if (unlikely(mbuf->pkt_len > pkt_q->frame_data_size ||
+ ((mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) != 0 &&
+ rte_vlan_insert(&mbuf) != 0))) {
continue;
}
- /* insert vlan info if necessary */
- if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
- if (rte_vlan_insert(&mbuf)) {
- rte_pktmbuf_free(mbuf);
- continue;
- }
- }
-
+ ppd = (struct tpacket2_hdr *)pkt_q->rd[framenum].iov_base;
/*
* poll() will almost always return POLLOUT, even if there
* are no extra buffers available
@@ -312,6 +299,9 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
pbuf = (uint8_t *)ppd + ETH_AF_PACKET_FRAME_OVERHEAD;
+ ppd->tp_len = mbuf->pkt_len;
+ ppd->tp_snaplen = mbuf->pkt_len;
+
struct rte_mbuf *tmp_mbuf = mbuf;
do {
uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf);
@@ -320,23 +310,20 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
tmp_mbuf = tmp_mbuf->next;
} while (tmp_mbuf);
- ppd->tp_len = mbuf->pkt_len;
- ppd->tp_snaplen = mbuf->pkt_len;
-
/* release incoming frame and advance ring buffer */
tpacket_write_status(&ppd->tp_status, TP_STATUS_SEND_REQUEST);
if (++framenum >= framecount)
framenum = 0;
- ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
-
num_tx++;
num_tx_bytes += mbuf->pkt_len;
- rte_pktmbuf_free(mbuf);
}
+ rte_pktmbuf_free_bulk(&bufs[0], i);
+
/* kick-off transmits */
- if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1 &&
- errno != ENOBUFS && errno != EAGAIN) {
+ if (unlikely(num_tx > 0 &&
+ sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1 &&
+ errno != ENOBUFS && errno != EAGAIN)) {
/*
* In case of a ENOBUFS/EAGAIN error all of the enqueued
* packets will be considered successful even though only some
--
2.39.5 (Apple Git-154)
More information about the dev
mailing list