[dpdk-dev] [PATCH 6/8] net/iavf: cleanup Tx buffers

Robin Zhang robinx.zhang at intel.com
Sun Sep 27 09:26:24 CEST 2020


Add support to the iavf driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.

Signed-off-by: Robin Zhang <robinx.zhang at intel.com>
---
 drivers/net/iavf/iavf_ethdev.c |  1 +
 drivers/net/iavf/iavf_rxtx.c   | 68 +++++++++++++++++++++++++++++++++-
 drivers/net/iavf/iavf_rxtx.h   |  1 +
 3 files changed, 69 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index b32302c43..5b3c68f2e 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -121,6 +121,7 @@ static const struct eth_dev_ops iavf_eth_dev_ops = {
 	.rx_queue_intr_enable       = iavf_dev_rx_queue_intr_enable,
 	.rx_queue_intr_disable      = iavf_dev_rx_queue_intr_disable,
 	.filter_ctrl                = iavf_dev_filter_ctrl,
+	.tx_done_cleanup	    = iavf_dev_tx_done_cleanup,
 };
 
 static int
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 05a7dd898..1b0efe043 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1905,7 +1905,6 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		tx_offload.l3_len = tx_pkt->l3_len;
 		tx_offload.l4_len = tx_pkt->l4_len;
 		tx_offload.tso_segsz = tx_pkt->tso_segsz;
-
 		/* Calculate the number of context descriptors needed. */
 		nb_ctx = iavf_calc_context_desc(ol_flags);
 
@@ -2208,6 +2207,73 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 }
 
+static int
+iavf_tx_done_cleanup_full(struct iavf_tx_queue *txq,
+			uint32_t free_cnt)
+{
+	struct iavf_tx_entry *swr_ring = txq->sw_ring;
+	uint16_t i, tx_last, tx_id;
+	uint16_t nb_tx_free_last;
+	uint16_t nb_tx_to_clean;
+	uint32_t pkt_cnt;
+
+	/* Start free mbuf from the next of tx_tail */
+	tx_last = txq->tx_tail;
+	tx_id  = swr_ring[tx_last].next_id;
+
+	if (txq->nb_free == 0 && iavf_xmit_cleanup(txq))
+		return 0;
+
+	nb_tx_to_clean = txq->nb_free;
+	nb_tx_free_last = txq->nb_free;
+	if (!free_cnt)
+		free_cnt = txq->nb_tx_desc;
+
+	/* Loop through swr_ring to count the amount of
+	 * freeable mubfs and packets.
+	 */
+	for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+		for (i = 0; i < nb_tx_to_clean &&
+			pkt_cnt < free_cnt &&
+			tx_id != tx_last; i++) {
+			if (swr_ring[tx_id].mbuf != NULL) {
+				rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+				swr_ring[tx_id].mbuf = NULL;
+
+				/*
+				 * last segment in the packet,
+				 * increment packet count
+				 */
+				pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+			}
+
+			tx_id = swr_ring[tx_id].next_id;
+		}
+
+		if (txq->rs_thresh > txq->nb_tx_desc -
+			txq->nb_free || tx_id == tx_last)
+			break;
+
+		if (pkt_cnt < free_cnt) {
+			if (iavf_xmit_cleanup(txq))
+				break;
+
+			nb_tx_to_clean = txq->nb_free - nb_tx_free_last;
+			nb_tx_free_last = txq->nb_free;
+		}
+	}
+
+	return (int)pkt_cnt;
+}
+
+int
+iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+	struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
+
+	return iavf_tx_done_cleanup_full(q, free_cnt);
+}
+
 void
 iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 		     struct rte_eth_rxq_info *qinfo)
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 59625a979..3d02c6589 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -379,6 +379,7 @@ int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			   const struct rte_eth_txconf *tx_conf);
 int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
 void iavf_dev_tx_queue_release(void *txq);
 void iavf_stop_queues(struct rte_eth_dev *dev);
 uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-- 
2.17.1



More information about the dev mailing list