[PATCH 2/7] net/gve: clean when there are insufficient Tx descs
Joshua Washington
joshwash at google.com
Tue Jul 8 01:18:06 CEST 2025
A single packet can technically require more than 32 (free_thresh)
descriptors to send. Count the number of descriptors needed to send out
a packet in DQO Tx, and ensure that there are enough descriptors in the
ring before writing. If there are not enough free descriptors, drop the
packet and increment drop counters.
Fixes: 4022f9999f56 ("net/gve: support basic Tx data path for DQO")
Cc: junfeng.guo at intel.com
Cc: stable at dpdk.org
Signed-off-by: Joshua Washington <joshwash at google.com>
Reviewed-by: Ankit Garg <nktgrg at google.com>
---
drivers/net/gve/gve_tx_dqo.c | 27 +++++++++++++++++++--------
1 file changed, 19 insertions(+), 8 deletions(-)
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 6227fa73b0..652a0e5175 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -74,6 +74,12 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq)
txq->complq_tail = next;
}
+static inline void
+gve_tx_clean_descs_dqo(struct gve_tx_queue *txq, uint16_t nb_descs) {
+ while (--nb_descs)
+ gve_tx_clean_dqo(txq);
+}
+
static uint16_t
gve_tx_pkt_nb_data_descs(struct rte_mbuf *tx_pkt)
{
@@ -107,7 +113,6 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct rte_mbuf **sw_ring;
struct rte_mbuf *tx_pkt;
uint16_t mask, sw_mask;
- uint16_t nb_to_clean;
uint16_t nb_tx = 0;
uint64_t ol_flags;
uint16_t nb_descs;
@@ -130,11 +135,9 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
tx_pkt = tx_pkts[nb_tx];
- if (txq->nb_free <= txq->free_thresh) {
- nb_to_clean = DQO_TX_MULTIPLIER * txq->rs_thresh;
- while (nb_to_clean--)
- gve_tx_clean_dqo(txq);
- }
+ if (txq->nb_free <= txq->free_thresh)
+ gve_tx_clean_descs_dqo(txq, DQO_TX_MULTIPLIER *
+ txq->rs_thresh);
ol_flags = tx_pkt->ol_flags;
first_sw_id = sw_id;
@@ -144,8 +147,16 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nb_descs = gve_tx_pkt_nb_data_descs(tx_pkt);
nb_descs += tso;
- if (txq->nb_free < nb_descs)
- break;
+
+ /* Clean if there aren't enough descriptors to send the packet. */
+ if (unlikely(txq->nb_free < nb_descs)) {
+ int nb_to_clean = RTE_MAX(DQO_TX_MULTIPLIER * txq->rs_thresh,
+ nb_descs);
+
+ gve_tx_clean_descs_dqo(txq, nb_to_clean);
+ if (txq->nb_free < nb_descs)
+ break;
+ }
if (tso) {
txd = &txr[tx_id];
--
2.50.0.727.gbf7dc18ff4-goog
More information about the dev
mailing list