[PATCH v11 2/2] net/af_xdp: Refactor af_xdp_tx_zc
Ariel Otilibili
ariel.otilibili at 6wind.com
Wed Feb 26 20:55:04 CET 2025
Both legs of the loop share the same logic: the common parts are about
reserving and filling both address and length into the description.
This is moved into reserve_and_fill().
Bugzilla ID: 1440
Suggested-by: Maryam Tahhan <mtahhan at redhat.com>
Signed-off-by: Ariel Otilibili <ariel.otilibili at 6wind.com>
Acked-by: Maryam Tahhan <mtahhan at redhat.com>
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 75 ++++++++++++++++-------------
1 file changed, 42 insertions(+), 33 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 092bcb73aa0a..013f30dfc3bf 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -536,21 +536,49 @@ kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
}
}
+static inline struct xdp_desc *
+reserve_and_fill(struct pkt_tx_queue *txq, struct rte_mbuf *mbuf,
+ struct xsk_umem_info *umem, void **pkt_ptr)
+{
+ struct xdp_desc *desc = NULL;
+ uint64_t addr, offset;
+ uint32_t idx_tx;
+
+ if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx))
+ goto out;
+
+ desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
+ desc->len = mbuf->pkt_len;
+
+ addr = (uint64_t)mbuf - (uint64_t)umem->buffer
+ - umem->mb_pool->header_size;
+ offset = rte_pktmbuf_mtod(mbuf, uint64_t) - (uint64_t)mbuf
+ + umem->mb_pool->header_size;
+
+ if (pkt_ptr)
+ *pkt_ptr = xsk_umem__get_data(umem->buffer, addr + offset);
+
+ offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+ desc->addr = addr | offset;
+
+out:
+ return desc;
+}
+
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
static uint16_t
af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
struct pkt_tx_queue *txq = queue;
struct xsk_umem_info *umem = txq->umem;
- struct rte_mbuf *mbuf;
+ struct rte_mbuf *mbuf, *local_mbuf = NULL;
unsigned long tx_bytes = 0;
int i;
- uint32_t idx_tx;
uint16_t count = 0;
struct xdp_desc *desc;
- uint64_t addr, offset;
struct xsk_ring_cons *cq = &txq->pair->cq;
uint32_t free_thresh = cq->size >> 1;
+ void *pkt;
if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
@@ -559,51 +587,32 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
mbuf = bufs[i];
if (mbuf->pool == umem->mb_pool) {
- if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
+ desc = reserve_and_fill(txq, mbuf, umem, NULL);
+ if (!desc) {
kick_tx(txq, cq);
- if (!xsk_ring_prod__reserve(&txq->tx, 1,
- &idx_tx))
+ desc = reserve_and_fill(txq, mbuf, umem, NULL);
+ if (!desc)
goto out;
}
- desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
- desc->len = mbuf->pkt_len;
- addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
- umem->mb_pool->header_size;
- offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
- (uint64_t)mbuf +
- umem->mb_pool->header_size;
- offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
- desc->addr = addr | offset;
+
tx_bytes += desc->len;
count++;
} else {
- struct rte_mbuf *local_mbuf =
- rte_pktmbuf_alloc(umem->mb_pool);
- void *pkt;
-
- if (local_mbuf == NULL)
+ local_mbuf = rte_pktmbuf_alloc(umem->mb_pool);
+ if (!local_mbuf)
goto out;
- if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
+ desc = reserve_and_fill(txq, local_mbuf, umem, &pkt);
+ if (!desc) {
rte_pktmbuf_free(local_mbuf);
goto out;
}
- desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
desc->len = mbuf->pkt_len;
-
- addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
- umem->mb_pool->header_size;
- offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
- (uint64_t)local_mbuf +
- umem->mb_pool->header_size;
- pkt = xsk_umem__get_data(umem->buffer, addr + offset);
- offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
- desc->addr = addr | offset;
rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
- desc->len);
- tx_bytes += desc->len;
+ desc->len);
rte_pktmbuf_free(mbuf);
+ tx_bytes += desc->len;
count++;
}
}
--
2.30.2
More information about the stable
mailing list