[PATCH v3] net/af_xdp: fix umem map size for zero copy
Frank Du
frank.du at intel.com
Thu May 23 08:53:02 CEST 2024
The current calculation assumes that the mbufs are contiguous. However,
this assumption is incorrect when the memory spans across a huge page.
Correct to directly read the size from the mempool memory chunks.
Fixes: d8a210774e1d ("net/af_xdp: support unaligned umem chunks")
Cc: stable at dpdk.org
Signed-off-by: Frank Du <frank.du at intel.com>
---
v2:
* Add virtual contiguous detect for for multiple memhdrs.
v3:
* Use RTE_ALIGN_FLOOR to get the aligned addr
* Add check on the first memhdr of memory chunks
---
drivers/net/af_xdp/rte_eth_af_xdp.c | 40 ++++++++++++++++++++++++-----
1 file changed, 33 insertions(+), 7 deletions(-)
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 6ba455bb9b..986665d1d4 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -1040,16 +1040,39 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
}
#if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
-static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
+static inline uintptr_t get_memhdr_info(struct rte_mempool *mp, uint64_t *align, size_t *len)
{
- struct rte_mempool_memhdr *memhdr;
+ struct rte_mempool_memhdr *memhdr, *next;
uintptr_t memhdr_addr, aligned_addr;
+ size_t memhdr_len = 0;
+ /* get the mempool base addr and align */
memhdr = STAILQ_FIRST(&mp->mem_list);
+ if (!memhdr) {
+ AF_XDP_LOG(ERR, "The mempool is not populated\n");
+ return 0;
+ }
memhdr_addr = (uintptr_t)memhdr->addr;
- aligned_addr = memhdr_addr & ~(getpagesize() - 1);
+ aligned_addr = RTE_ALIGN_FLOOR(memhdr_addr, getpagesize());
*align = memhdr_addr - aligned_addr;
+ memhdr_len += memhdr->len;
+
+ /* check if virtual contiguous memory for multiple memhdrs */
+ next = STAILQ_NEXT(memhdr, next);
+ while (next) {
+ if ((uintptr_t)next->addr != (uintptr_t)memhdr->addr + memhdr->len) {
+ AF_XDP_LOG(ERR, "Memory chunks not virtual contiguous, "
+ "next: %p, cur: %p(len: %" PRId64 " )\n",
+ next->addr, memhdr->addr, memhdr->len);
+ return 0;
+ }
+ /* virtual contiguous */
+ memhdr = next;
+ memhdr_len += memhdr->len;
+ next = STAILQ_NEXT(memhdr, next);
+ }
+ *len = memhdr_len;
return aligned_addr;
}
@@ -1126,6 +1149,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
void *base_addr = NULL;
struct rte_mempool *mb_pool = rxq->mb_pool;
uint64_t umem_size, align = 0;
+ size_t len = 0;
if (internals->shared_umem) {
if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
@@ -1157,10 +1181,12 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
}
umem->mb_pool = mb_pool;
- base_addr = (void *)get_base_addr(mb_pool, &align);
- umem_size = (uint64_t)mb_pool->populated_size *
- (uint64_t)usr_config.frame_size +
- align;
+ base_addr = (void *)get_memhdr_info(mb_pool, &align, &len);
+ if (!base_addr) {
+ AF_XDP_LOG(ERR, "The memory pool can't be mapped into umem\n");
+ goto err;
+ }
+ umem_size = (uint64_t)len + align;
ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
&rxq->fq, &rxq->cq, &usr_config);
--
2.34.1
More information about the dev
mailing list