[PATCH v2] net/cnxk: support Tx queue descriptor count
skoteshwar at marvell.com
skoteshwar at marvell.com
Fri Feb 23 16:33:57 CET 2024
From: Satha Rao <skoteshwar at marvell.com>
Added CNXK APIs to get used txq descriptor count.
Signed-off-by: Satha Rao <skoteshwar at marvell.com>
---
Depends-on: series-30833 ("ethdev: support Tx queue used count")
v2:
Updated release notes and fixed API for CPT queues.
doc/guides/nics/features/cnxk.ini | 1 +
doc/guides/rel_notes/release_24_03.rst | 1 +
drivers/net/cnxk/cn10k_tx_select.c | 22 ++++++++++++++++++++++
drivers/net/cnxk/cn9k_tx_select.c | 23 +++++++++++++++++++++++
drivers/net/cnxk/cnxk_ethdev.h | 24 ++++++++++++++++++++++++
5 files changed, 71 insertions(+)
diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini
index 94e7a6a..ab18f38 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -40,6 +40,7 @@ Timesync = Y
Timestamp offload = Y
Rx descriptor status = Y
Tx descriptor status = Y
+Tx queue count = Y
Basic stats = Y
Stats per queue = Y
Extended stats = Y
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 32d0ad8..5f8fb9e 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -110,6 +110,7 @@ New Features
* Added support for ``RTE_FLOW_ITEM_TYPE_PPPOES`` flow item.
* Added support for ``RTE_FLOW_ACTION_TYPE_SAMPLE`` flow item.
+ * Added support for fast path function ``rte_eth_tx_queue_count``.
* **Updated Marvell OCTEON EP driver.**
diff --git a/drivers/net/cnxk/cn10k_tx_select.c b/drivers/net/cnxk/cn10k_tx_select.c
index 404f5ba..aa0620e 100644
--- a/drivers/net/cnxk/cn10k_tx_select.c
+++ b/drivers/net/cnxk/cn10k_tx_select.c
@@ -20,6 +20,24 @@
eth_dev->tx_pkt_burst;
}
+#if defined(RTE_ARCH_ARM64)
+static int
+cn10k_nix_tx_queue_count(void *tx_queue)
+{
+ struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
+
+ return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
+}
+
+static int
+cn10k_nix_tx_queue_sec_count(void *tx_queue)
+{
+ struct cn10k_eth_txq *txq = (struct cn10k_eth_txq *)tx_queue;
+
+ return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
+}
+#endif
+
void
cn10k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
{
@@ -63,6 +81,10 @@
if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ eth_dev->tx_queue_count = cn10k_nix_tx_queue_sec_count;
+ else
+ eth_dev->tx_queue_count = cn10k_nix_tx_queue_count;
rte_mb();
#else
diff --git a/drivers/net/cnxk/cn9k_tx_select.c b/drivers/net/cnxk/cn9k_tx_select.c
index e08883f..0e09ed6 100644
--- a/drivers/net/cnxk/cn9k_tx_select.c
+++ b/drivers/net/cnxk/cn9k_tx_select.c
@@ -20,6 +20,24 @@
eth_dev->tx_pkt_burst;
}
+#if defined(RTE_ARCH_ARM64)
+static int
+cn9k_nix_tx_queue_count(void *tx_queue)
+{
+ struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
+
+ return cnxk_nix_tx_queue_count(txq->fc_mem, txq->sqes_per_sqb_log2);
+}
+
+static int
+cn9k_nix_tx_queue_sec_count(void *tx_queue)
+{
+ struct cn9k_eth_txq *txq = (struct cn9k_eth_txq *)tx_queue;
+
+ return cnxk_nix_tx_queue_sec_count(txq->fc_mem, txq->sqes_per_sqb_log2, txq->cpt_fc);
+}
+#endif
+
void
cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
{
@@ -60,6 +78,11 @@
pick_tx_func(eth_dev, nix_eth_tx_vec_burst_mseg);
}
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ eth_dev->tx_queue_count = cn9k_nix_tx_queue_sec_count;
+ else
+ eth_dev->tx_queue_count = cn9k_nix_tx_queue_count;
+
rte_mb();
#else
RTE_SET_USED(eth_dev);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 37b6395..f810bb8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -458,6 +458,30 @@ struct cnxk_eth_txq_sp {
return ((struct cnxk_eth_txq_sp *)__txq) - 1;
}
+static inline int
+cnxk_nix_tx_queue_count(uint64_t *mem, uint16_t sqes_per_sqb_log2)
+{
+ uint64_t val;
+
+ val = rte_atomic_load_explicit(mem, rte_memory_order_relaxed);
+ val = (val << sqes_per_sqb_log2) - val;
+
+ return (val & 0xFFFF);
+}
+
+static inline int
+cnxk_nix_tx_queue_sec_count(uint64_t *mem, uint16_t sqes_per_sqb_log2, uint64_t *sec_fc)
+{
+ uint64_t sq_cnt, sec_cnt, val;
+
+ sq_cnt = rte_atomic_load_explicit(mem, rte_memory_order_relaxed);
+ sq_cnt = (sq_cnt << sqes_per_sqb_log2) - sq_cnt;
+ sec_cnt = rte_atomic_load_explicit(sec_fc, rte_memory_order_relaxed);
+ val = RTE_MAX(sq_cnt, sec_cnt);
+
+ return (val & 0xFFFF);
+}
+
/* Common ethdev ops */
extern struct eth_dev_ops cnxk_eth_dev_ops;
--
1.8.3.1
More information about the dev
mailing list