[PATCH v2] net/iavf: fix txq flags setting after Tx path selection

Ciara Loftus ciara.loftus at intel.com
Mon Feb 16 11:22:53 CET 2026


Ensure the txq flags "use_ctx" and "use_vec_entry" are set/unset
properly after tx path selection. Prior to this commit these flags were
only configured if a vector path was chosen. Fix this by making their
configuration unconditional. Also simplify how the "use_vec_entry" flag
is set by removing the dedicated function that sets the flag in favour
of just setting it inline.

Fixes: ebcfb039afa8 ("net/iavf: use common Tx path selection infrastructure")

Signed-off-by: Ciara Loftus <ciara.loftus at intel.com>
Acked-by: Bruce Richardson <bruce.richardson at intel.com>
---
v2:
* Get a pointer to the selected tx path features and use it instead of
 directly referencing it twice in a row.
* Use >= SIMD_128 instead of != 0 to determine if a selected path is
 vector (future proofing).
---
 drivers/net/intel/iavf/iavf_rxtx.c          | 17 ++++++++---------
 drivers/net/intel/iavf/iavf_rxtx.h          |  1 -
 drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c |  7 -------
 3 files changed, 8 insertions(+), 17 deletions(-)

diff --git a/drivers/net/intel/iavf/iavf_rxtx.c b/drivers/net/intel/iavf/iavf_rxtx.c
index e621d4bf47..4bcdd202c0 100644
--- a/drivers/net/intel/iavf/iavf_rxtx.c
+++ b/drivers/net/intel/iavf/iavf_rxtx.c
@@ -3871,6 +3871,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #ifdef RTE_ARCH_X86
 	struct ci_tx_queue *txq;
 	int i;
+	const struct ci_tx_path_features *selected_features;
 #endif
 	struct ci_tx_path_features req_features = {
 		.tx_offloads = dev->data->dev_conf.txmode.offloads,
@@ -3905,15 +3906,13 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 
 out:
 #ifdef RTE_ARCH_X86
-	if (iavf_tx_path_infos[adapter->tx_func_type].features.simd_width != 0) {
-		for (i = 0; i < dev->data->nb_tx_queues; i++) {
-			txq = dev->data->tx_queues[i];
-			if (!txq)
-				continue;
-			iavf_txq_vec_setup(txq);
-			txq->use_ctx =
-				iavf_tx_path_infos[adapter->tx_func_type].features.ctx_desc;
-		}
+	selected_features = &iavf_tx_path_infos[adapter->tx_func_type].features;
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		if (!txq)
+			continue;
+		txq->use_ctx = selected_features->ctx_desc;
+		txq->use_vec_entry = selected_features->simd_width >= RTE_VECT_SIMD_128;
 	}
 #endif
 
diff --git a/drivers/net/intel/iavf/iavf_rxtx.h b/drivers/net/intel/iavf/iavf_rxtx.h
index fe3385dcf6..80b06518b0 100644
--- a/drivers/net/intel/iavf/iavf_rxtx.h
+++ b/drivers/net/intel/iavf/iavf_rxtx.h
@@ -592,7 +592,6 @@ int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
 int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
 int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
 int iavf_rxq_vec_setup(struct ci_rx_queue *rxq);
-int iavf_txq_vec_setup(struct ci_tx_queue *txq);
 uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
 				   uint16_t nb_pkts);
 uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
diff --git a/drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c
index 2e18be3616..db0462f0f5 100644
--- a/drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/intel/iavf/iavf_rxtx_vec_avx2.c
@@ -1800,13 +1800,6 @@ iavf_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts,
 	return iavf_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, true);
 }
 
-int __rte_cold
-iavf_txq_vec_setup(struct ci_tx_queue *txq)
-{
-	txq->use_vec_entry = true;
-	return 0;
-}
-
 void __rte_cold
 iavf_rx_queue_release_mbufs_vec(struct ci_rx_queue *rxq)
 {
-- 
2.43.0



More information about the dev mailing list