patch 'net/gve: allocate Rx QPL pages using malloc' has been queued to stable release 22.11.11
luca.boccassi at gmail.com
luca.boccassi at gmail.com
Mon Oct 27 17:18:39 CET 2025
Hi,
FYI, your patch has been queued to stable release 22.11.11
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 10/29/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/bluca/dpdk-stable
This queued commit can be viewed at:
https://github.com/bluca/dpdk-stable/commit/1d1ce9c3bf042be1763e2164ab6427f2d674a590
Thanks.
Luca Boccassi
---
>From 1d1ce9c3bf042be1763e2164ab6427f2d674a590 Mon Sep 17 00:00:00 2001
From: Praveen Kaligineedi <pkaligineedi at google.com>
Date: Thu, 4 Sep 2025 13:59:32 -0700
Subject: [PATCH] net/gve: allocate Rx QPL pages using malloc
Allocating QPL for an RX queue might fail if enough contiguous IOVA
memory cannot be allocated. This can commonly occur when using 2MB huge
pages because the 1024 4K buffers are allocated for each RX ring by
default, resulting in 4MB for each ring. However, the only requirement
for RX QPLs is that each 4K buffer be IOVA contiguous, not the entire
QPL. Therefore, malloc will be used to allocate RX QPLs instead.
Note that TX queues require the entire QPL to be IOVA contiguous, so it
will continue to use the memzone-based allocation.
Fixes: a46583cf43c8 ("net/gve: support Rx/Tx")
Signed-off-by: Praveen Kaligineedi <pkaligineedi at google.com>
Signed-off-by: Joshua Washington <joshwash at google.com>
---
drivers/net/gve/gve_ethdev.c | 142 +++++++++++++++++++++++++++++------
drivers/net/gve/gve_ethdev.h | 5 +-
drivers/net/gve/gve_rx.c | 4 +-
3 files changed, 126 insertions(+), 25 deletions(-)
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 0796d37760..43b2b8b2b0 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -28,13 +28,45 @@ gve_write_version(uint8_t *driver_version_register)
writeb('\n', driver_version_register);
}
+static const struct rte_memzone *
+gve_alloc_using_mz(const char *name, uint32_t num_pages)
+{
+ const struct rte_memzone *mz;
+ mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE,
+ rte_socket_id(),
+ RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
+ if (mz == NULL)
+ PMD_DRV_LOG(ERR, "Failed to alloc memzone %s.", name);
+ return mz;
+}
+
+static int
+gve_alloc_using_malloc(void **bufs, uint32_t num_entries)
+{
+ uint32_t i;
+
+ for (i = 0; i < num_entries; i++) {
+ bufs[i] = rte_malloc_socket(NULL, PAGE_SIZE, PAGE_SIZE, rte_socket_id());
+ if (bufs[i] == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to malloc");
+ goto free_bufs;
+ }
+ }
+ return 0;
+
+free_bufs:
+ while (i > 0)
+ rte_free(bufs[--i]);
+
+ return -ENOMEM;
+}
+
static int
-gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
+gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages,
+ bool is_rx)
{
- char z_name[RTE_MEMZONE_NAMESIZE];
struct gve_queue_page_list *qpl;
- const struct rte_memzone *mz;
- dma_addr_t page_bus;
+ int err = 0;
uint32_t i;
if (priv->num_registered_pages + pages >
@@ -45,31 +77,79 @@ gve_alloc_queue_page_list(struct gve_priv *priv, uint32_t id, uint32_t pages)
return -EINVAL;
}
qpl = &priv->qpl[id];
- snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
- mz = rte_memzone_reserve_aligned(z_name, pages * PAGE_SIZE,
- rte_socket_id(),
- RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
- if (mz == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc %s.", z_name);
- return -ENOMEM;
- }
+
qpl->page_buses = rte_zmalloc("qpl page buses", pages * sizeof(dma_addr_t), 0);
if (qpl->page_buses == NULL) {
PMD_DRV_LOG(ERR, "Failed to alloc qpl %u page buses", id);
return -ENOMEM;
}
- page_bus = mz->iova;
- for (i = 0; i < pages; i++) {
- qpl->page_buses[i] = page_bus;
- page_bus += PAGE_SIZE;
+
+ if (is_rx) {
+ /* RX QPL need not be IOVA contiguous.
+ * Allocate 4K size buffers using malloc
+ */
+ qpl->qpl_bufs = rte_zmalloc("qpl bufs",
+ pages * sizeof(void *), 0);
+ if (qpl->qpl_bufs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc qpl bufs");
+ err = -ENOMEM;
+ goto free_qpl_page_buses;
+ }
+
+ err = gve_alloc_using_malloc(qpl->qpl_bufs, pages);
+ if (err)
+ goto free_qpl_page_bufs;
+
+ /* Populate the IOVA addresses */
+ for (i = 0; i < pages; i++)
+ qpl->page_buses[i] =
+ rte_malloc_virt2iova(qpl->qpl_bufs[i]);
+ } else {
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ snprintf(z_name, sizeof(z_name), "gve_%s_qpl%d", priv->pci_dev->device.name, id);
+
+ /* TX QPL needs to be IOVA contiguous
+ * Allocate QPL using memzone
+ */
+ qpl->mz = gve_alloc_using_mz(z_name, pages);
+ if (!qpl->mz) {
+ err = -ENOMEM;
+ goto free_qpl_page_buses;
+ }
+
+ /* Populate the IOVA addresses */
+ for (i = 0; i < pages; i++)
+ qpl->page_buses[i] = qpl->mz->iova + i * PAGE_SIZE;
}
+
qpl->id = id;
- qpl->mz = mz;
qpl->num_entries = pages;
priv->num_registered_pages += pages;
return 0;
+
+free_qpl_page_bufs:
+ rte_free(qpl->qpl_bufs);
+free_qpl_page_buses:
+ rte_free(qpl->page_buses);
+ return err;
+}
+
+/*
+ * Free QPL bufs in RX QPLs. Should not be used on TX QPLs.
+ **/
+static void
+gve_free_qpl_bufs(struct gve_queue_page_list *qpl)
+{
+ uint32_t i;
+
+ for (i = 0; i < qpl->num_entries; i++)
+ rte_free(qpl->qpl_bufs[i]);
+
+ rte_free(qpl->qpl_bufs);
+ qpl->qpl_bufs = NULL;
}
static void
@@ -79,9 +159,22 @@ gve_free_qpls(struct gve_priv *priv)
uint16_t nb_rxqs = priv->max_nb_rxq;
uint32_t i;
- for (i = 0; i < nb_txqs + nb_rxqs; i++) {
- if (priv->qpl[i].mz != NULL)
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ return;
+
+ /* Free TX QPLs. */
+ for (i = 0; i < nb_txqs; i++) {
+ if (priv->qpl[i].mz) {
rte_memzone_free(priv->qpl[i].mz);
+ priv->qpl[i].mz = NULL;
+ }
+ rte_free(priv->qpl[i].page_buses);
+ }
+
+ /* Free RX QPLs. */
+ for (; i < nb_rxqs; i++) {
+ if (priv->qpl[i].qpl_bufs)
+ gve_free_qpl_bufs(&priv->qpl[i]);
rte_free(priv->qpl[i].page_buses);
}
@@ -562,11 +655,16 @@ gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
}
for (i = 0; i < priv->max_nb_txq + priv->max_nb_rxq; i++) {
- if (i < priv->max_nb_txq)
+ bool is_rx;
+
+ if (i < priv->max_nb_txq) {
pages = priv->tx_pages_per_qpl;
- else
+ is_rx = false;
+ } else {
pages = priv->rx_data_slot_cnt;
- err = gve_alloc_queue_page_list(priv, i, pages);
+ is_rx = true;
+ }
+ err = gve_alloc_queue_page_list(priv, i, pages, is_rx);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to alloc qpl %u.", i);
goto err_qpl;
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index b7702a1249..effacc2795 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -39,7 +39,10 @@ struct gve_queue_page_list {
uint32_t id; /* unique id */
uint32_t num_entries;
dma_addr_t *page_buses; /* the dma addrs of the pages */
- const struct rte_memzone *mz;
+ union {
+ const struct rte_memzone *mz; /* memzone allocated for TX queue */
+ void **qpl_bufs; /* RX qpl-buffer list allocated using malloc*/
+ };
};
/* A TX desc ring entry */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 50f9f5c370..e020b4af10 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -105,9 +105,9 @@ gve_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
len = rte_be_to_cpu_16(rxd->len) - GVE_RX_PAD;
rxe = rxq->sw_ring[rx_id];
if (rxq->is_gqi_qpl) {
- addr = (uint64_t)(rxq->qpl->mz->addr) + rx_id * PAGE_SIZE + GVE_RX_PAD;
+ addr = (uint64_t)rxq->qpl->qpl_bufs[rx_id] + GVE_RX_PAD;
rte_memcpy((void *)((size_t)rxe->buf_addr + rxe->data_off),
- (void *)(size_t)addr, len);
+ (void *)(size_t)addr, len);
}
rxe->pkt_len = len;
rxe->data_len = len;
--
2.47.3
More information about the stable
mailing list