[dpdk-dev] [RFC 7/9] net/igb: add handler for tx queue descriptor count

Olivier Matz olivier.matz at 6wind.com
Thu Nov 24 10:54:19 CET 2016


Like for TX, use a binary search algorithm to get the number of used Tx
descriptors.

PR=52423
Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
Acked-by: Ivan Boule <ivan.boule at 6wind.com>
---
 drivers/net/e1000/e1000_ethdev.h |  5 +++-
 drivers/net/e1000/igb_ethdev.c   |  1 +
 drivers/net/e1000/igb_rxtx.c     | 51 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 6c25c8d..ad9ddaf 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -300,7 +300,10 @@ int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 		struct rte_mempool *mb_pool);
 
 uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
-		uint16_t rx_queue_id);
+				uint16_t rx_queue_id);
+
+uint32_t eth_igb_tx_queue_count(struct rte_eth_dev *dev,
+				uint16_t tx_queue_id);
 
 int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
 
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 08f2a68..a54d374 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -399,6 +399,7 @@ static const struct eth_dev_ops eth_igb_ops = {
 	.rx_queue_intr_disable = eth_igb_rx_queue_intr_disable,
 	.rx_queue_release     = eth_igb_rx_queue_release,
 	.rx_queue_count       = eth_igb_rx_queue_count,
+	.tx_queue_count       = eth_igb_tx_queue_count,
 	.rx_descriptor_done   = eth_igb_rx_descriptor_done,
 	.tx_queue_setup       = eth_igb_tx_queue_setup,
 	.tx_queue_release     = eth_igb_tx_queue_release,
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 6b0111f..2ff2417 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1554,6 +1554,57 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	return offset;
 }
 
+uint32_t
+eth_igb_tx_queue_count(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+	volatile uint32_t *status;
+	struct igb_tx_queue *txq;
+	int32_t offset, interval, idx, resolution;
+
+	txq = dev->data->tx_queues[tx_queue_id];
+
+	/* check if ring empty */
+	idx = txq->tx_tail - 1;
+	if (idx < 0)
+		idx += txq->nb_tx_desc;
+	status = &txq->tx_ring[idx].wb.status;
+	if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
+		return 0;
+
+	/* check if ring full */
+	idx = txq->tx_tail + 1;
+	if (idx >= txq->nb_tx_desc)
+		idx -= txq->nb_tx_desc;
+	status = &txq->tx_ring[idx].wb.status;
+	if (!(*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD)))
+		return txq->nb_tx_desc;
+
+	/* decrease the precision if ring is large */
+	if (txq->nb_tx_desc <= 256)
+		resolution = 4;
+	else
+		resolution = 16;
+
+	/* use a binary search */
+	interval = txq->nb_tx_desc >> 1;
+	offset = interval;
+
+	do {
+		interval >>= 1;
+		idx = txq->tx_tail + offset;
+		if (idx >= txq->nb_tx_desc)
+			idx -= txq->nb_tx_desc;
+
+		status = &txq->tx_ring[idx].wb.status;
+		if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
+			offset += interval;
+		else
+			offset -= interval;
+	} while (interval >= resolution);
+
+	return txq->nb_tx_desc - offset;
+}
+
 int
 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
 {
-- 
2.8.1



More information about the dev mailing list