[PATCH 23.11 1/3] net/gve: send whole packet when mbuf is large

Shani Peretz shperetz at nvidia.com
Thu Dec 25 09:51:22 CET 2025



> -----Original Message-----
> From: Joshua Washington <joshwash at google.com>
> Sent: Wednesday, 24 December 2025 0:31
> To: stable at dpdk.org; NBU-Contact-Thomas Monjalon (EXTERNAL)
> <thomas at monjalon.net>; Junfeng Guo <junfengg at nvidia.com>; Jeroen de
> Borst <jeroendb at google.com>; Rushil Gupta <rushilg at google.com>; Joshua
> Washington <joshwash at google.com>
> Cc: Ankit Garg <nktgrg at google.com>
> Subject: [PATCH 23.11 1/3] net/gve: send whole packet when mbuf is large
> 
> External email: Use caution opening links or attachments
> 
> 
> Before this patch, only one descriptor would be written per mbuf in a packet.
> In cases like TSO, it is possible for a single mbuf to have more bytes than
> GVE_MAX_TX_BUF_SIZE_DQO. As such, instead of simply truncating the data
> down to this size, the driver should actually write descriptors for the rest of
> the buffers in the mbuf segment.
> 
> To that effect, the number of descriptors needed to send a packet must be
> corrected to account for the potential additional descriptors.
> 
> Fixes: 4022f9999f56 ("net/gve: support basic Tx data path for DQO")
> Cc: stable at dpdk.org
> 
> Signed-off-by: Joshua Washington <joshwash at google.com>
> Reviewed-by: Ankit Garg <nktgrg at google.com>
> ---
>  .mailmap                     |  1 +
>  drivers/net/gve/gve_tx_dqo.c | 53 ++++++++++++++++++++++++++---------
> -
>  2 files changed, 39 insertions(+), 15 deletions(-)
> 
> diff --git a/.mailmap b/.mailmap
> index 96b7809f89..eb6a3afa44 100644
> --- a/.mailmap
> +++ b/.mailmap
> @@ -119,6 +119,7 @@ Andy Green <andy at warmcat.com>  Andy Moreton
> <andy.moreton at amd.com> <amoreton at xilinx.com>
> <amoreton at solarflare.com>  Andy Pei <andy.pei at intel.com>  Anirudh
> Venkataramanan <anirudh.venkataramanan at intel.com>
> +Ankit Garg <nktgrg at google.com>
>  Ankur Dwivedi <adwivedi at marvell.com>
> <ankur.dwivedi at caviumnetworks.com> <ankur.dwivedi at cavium.com>  Anna
> Lukin <annal at silicom.co.il>  Anoob Joseph <anoobj at marvell.com>
> <anoob.joseph at caviumnetworks.com> diff --git
> a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c index
> 95a02bab17..a4ba8c3536 100644
> --- a/drivers/net/gve/gve_tx_dqo.c
> +++ b/drivers/net/gve/gve_tx_dqo.c
> @@ -74,6 +74,19 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq)
>         txq->complq_tail = next;
>  }
> 
> +static uint16_t
> +gve_tx_pkt_nb_data_descs(struct rte_mbuf *tx_pkt) {
> +       int nb_descs = 0;
> +
> +       while (tx_pkt) {
> +               nb_descs += (GVE_TX_MAX_BUF_SIZE_DQO - 1 + tx_pkt->data_len) /
> +                       GVE_TX_MAX_BUF_SIZE_DQO;
> +               tx_pkt = tx_pkt->next;
> +       }
> +       return nb_descs;
> +}
> +
>  uint16_t
>  gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t
> nb_pkts)  { @@ -88,7 +101,7 @@ gve_tx_burst_dqo(void *tx_queue, struct
> rte_mbuf **tx_pkts, uint16_t nb_pkts)
>         const char *reason;
>         uint16_t nb_tx = 0;
>         uint64_t ol_flags;
> -       uint16_t nb_used;
> +       uint16_t nb_descs;
>         uint16_t tx_id;
>         uint16_t sw_id;
>         uint64_t bytes;
> @@ -122,11 +135,14 @@ gve_tx_burst_dqo(void *tx_queue, struct
> rte_mbuf **tx_pkts, uint16_t nb_pkts)
>                 }
> 
>                 ol_flags = tx_pkt->ol_flags;
> -               nb_used = tx_pkt->nb_segs;
>                 first_sw_id = sw_id;
> 
>                 csum = !!(ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK_DQO);
> 
> +               nb_descs = gve_tx_pkt_nb_data_descs(tx_pkt);
> +               if (txq->nb_free < nb_descs)
> +                       break;
> +
>                 do {
>                         if (sw_ring[sw_id] != NULL)
>                                 PMD_DRV_LOG(DEBUG, "Overwriting an entry in sw_ring");
> @@ -135,22 +151,29 @@ gve_tx_burst_dqo(void *tx_queue, struct
> rte_mbuf **tx_pkts, uint16_t nb_pkts)
>                         if (!tx_pkt->data_len)
>                                 goto finish_mbuf;
> 
> -                       txd = &txr[tx_id];
>                         sw_ring[sw_id] = tx_pkt;
> 
> -                       /* fill Tx descriptor */
> -                       txd->pkt.buf_addr =
> rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
> -                       txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO;
> -                       txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id);
> -                       txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len,
> GVE_TX_MAX_BUF_SIZE_DQO);
> -                       txd->pkt.end_of_packet = 0;
> -                       txd->pkt.checksum_offload_enable = csum;
> +                       /* fill Tx descriptors */
> +                       int mbuf_offset = 0;
> +                       while (mbuf_offset < tx_pkt->data_len) {
> +                               uint64_t buf_addr = rte_mbuf_data_iova(tx_pkt) +
> +                                       mbuf_offset;
> +
> +                               txd = &txr[tx_id];
> +                               txd->pkt.buf_addr = rte_cpu_to_le_64(buf_addr);
> +                               txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id);
> +                               txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO;
> +                               txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len -
> mbuf_offset,
> +                                                           GVE_TX_MAX_BUF_SIZE_DQO);
> +                               txd->pkt.end_of_packet = 0;
> +                               txd->pkt.checksum_offload_enable = csum;
> +
> +                               mbuf_offset += txd->pkt.buf_size;
> +                               tx_id = (tx_id + 1) & mask;
> +                       }
> 
> -                       /* size of desc_ring and sw_ring could be different */
> -                       tx_id = (tx_id + 1) & mask;
>  finish_mbuf:
>                         sw_id = (sw_id + 1) & sw_mask;
> -
>                         bytes += tx_pkt->data_len;
>                         tx_pkt = tx_pkt->next;
>                 } while (tx_pkt);
> @@ -159,8 +182,8 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>                 txd = &txr[(tx_id - 1) & mask];
>                 txd->pkt.end_of_packet = 1;
> 
> -               txq->nb_free -= nb_used;
> -               txq->nb_used += nb_used;
> +               txq->nb_free -= nb_descs;
> +               txq->nb_used += nb_descs;
>         }
> 
>         /* update the tail pointer if any packets were processed */
> --
> 2.52.0.351.gbe84eed79e-goog


Thanks Joshua, I'll add the series to 23.11



More information about the stable mailing list