[dpdk-dev] [PATCH v4 32/58] net/txgbe: add Rx and Tx queue info get

Jiawen Wu jiawenwu at trustnetic.com
Mon Oct 19 10:53:49 CEST 2020


Add Rx and Tx queue information get operation.

Signed-off-by: Jiawen Wu <jiawenwu at trustnetic.com>
---
 drivers/net/txgbe/txgbe_ethdev.c |  2 +
 drivers/net/txgbe/txgbe_ethdev.h |  6 +++
 drivers/net/txgbe/txgbe_rxtx.c   | 77 ++++++++++++++++++++++++++++++++
 3 files changed, 85 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 9151542ef..d74d822ad 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -1743,6 +1743,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.uc_hash_table_set          = txgbe_uc_hash_table_set,
 	.uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
 	.set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
+	.rxq_info_get               = txgbe_rxq_info_get,
+	.txq_info_get               = txgbe_txq_info_get,
 };
 
 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index f47c64ca6..ab1ffe9fc 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -117,6 +117,12 @@ int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
 int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+void txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+void txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo);
+
 uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
 
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index f1b038013..fd6a3f436 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -13,6 +13,7 @@
 #include <unistd.h>
 #include <inttypes.h>
 
+#include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_log.h>
@@ -1946,9 +1947,48 @@ txgbe_dev_tx_queue_release(void *txq)
 	txgbe_tx_queue_release(txq);
 }
 
+/* (Re)set dynamic txgbe_tx_queue fields to defaults */
+static void __rte_cold
+txgbe_reset_tx_queue(struct txgbe_tx_queue *txq)
+{
+	static const struct txgbe_tx_desc zeroed_desc = {0};
+	struct txgbe_tx_entry *txe = txq->sw_ring;
+	uint16_t prev, i;
+
+	/* Zero out HW ring memory */
+	for (i = 0; i < txq->nb_tx_desc; i++)
+		txq->tx_ring[i] = zeroed_desc;
+
+	/* Initialize SW ring entries */
+	prev = (uint16_t)(txq->nb_tx_desc - 1);
+	for (i = 0; i < txq->nb_tx_desc; i++) {
+		volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i];
+
+		txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD);
+		txe[i].mbuf = NULL;
+		txe[i].last_id = i;
+		txe[prev].next_id = i;
+		prev = i;
+	}
+
+	txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
+	txq->tx_tail = 0;
+
+	/*
+	 * Always allow 1 descriptor to be un-allocated to avoid
+	 * a H/W race condition
+	 */
+	txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+	txq->ctx_curr = 0;
+	memset((void *)&txq->ctx_cache, 0,
+		TXGBE_CTX_NUM * sizeof(struct txgbe_ctx_info));
+}
+
 static const struct txgbe_txq_ops def_txq_ops = {
 	.release_mbufs = txgbe_tx_queue_release_mbufs,
 	.free_swring = txgbe_tx_free_swring,
+	.reset = txgbe_reset_tx_queue,
 };
 
 /* Takes an ethdev and a queue and sets up the tx function to be used based on
@@ -3218,3 +3258,40 @@ txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	return 0;
 }
 
+void
+txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	struct txgbe_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = rxq->nb_rx_desc;
+
+	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+	qinfo->conf.rx_drop_en = rxq->drop_en;
+	qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+	qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo)
+{
+	struct txgbe_tx_queue *txq;
+
+	txq = dev->data->tx_queues[queue_id];
+
+	qinfo->nb_desc = txq->nb_tx_desc;
+
+	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+	qinfo->conf.offloads = txq->offloads;
+	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
-- 
2.18.4





More information about the dev mailing list