patch 'net/ice: fix path selection for QinQ Tx offload' has been queued to stable release 24.11.4
Kevin Traynor
ktraynor at redhat.com
Fri Nov 21 12:20:54 CET 2025
Hi,
FYI, your patch has been queued to stable release 24.11.4
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/26/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable
This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/3fdaa456b2b0018fcdb5d0596e3216bd5e920958
Thanks.
Kevin
---
>From 3fdaa456b2b0018fcdb5d0596e3216bd5e920958 Mon Sep 17 00:00:00 2001
From: Bruce Richardson <bruce.richardson at intel.com>
Date: Wed, 12 Nov 2025 11:57:26 +0000
Subject: [PATCH] net/ice: fix path selection for QinQ Tx offload
[ upstream commit 61ccab85e3972d6e3ee61b3e6a6a6872a33e5ac3 ]
The capabilities flag for the vector offload path include the QinQ
offload capability, but in fact the offload path lacks any ability to
create context descriptors. This means that it cannot insert multiple
vlan tags for QinQ support, so move the offload from the VECTOR_OFFLOAD
list to the NO_VECTOR list. Similarly, remove any check for the QinQ
mbuf flag in any packets being transmitted, since that offload is
invalid to request if the feature is not enabled.
Fixes: 808a17b3c1e6 ("net/ice: add Rx AVX512 offload path")
Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
Acked-by: Ciara Loftus <ciara.loftus at intel.com>
---
drivers/net/intel/ice/ice_rxtx_vec_common.h | 207 ++++++++++++++++++++
1 file changed, 207 insertions(+)
create mode 100644 drivers/net/intel/ice/ice_rxtx_vec_common.h
diff --git a/drivers/net/intel/ice/ice_rxtx_vec_common.h b/drivers/net/intel/ice/ice_rxtx_vec_common.h
new file mode 100644
index 0000000000..39581cb7ae
--- /dev/null
+++ b/drivers/net/intel/ice/ice_rxtx_vec_common.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_RXTX_VEC_COMMON_H_
+#define _ICE_RXTX_VEC_COMMON_H_
+
+#include "../common/rx.h"
+#include "ice_rxtx.h"
+
+static inline int
+ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
+{
+ return (txq->ice_tx_ring[idx].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
+ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
+}
+
+static inline void
+_ice_rx_queue_release_mbufs_vec(struct ci_rx_queue *rxq)
+{
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
+
+ if (unlikely(!rxq->sw_ring)) {
+ PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
+ return;
+ }
+
+ if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i].mbuf)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+#define ICE_TX_NO_VECTOR_FLAGS ( \
+ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+ RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
+ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP)
+
+#define ICE_TX_VECTOR_OFFLOAD ( \
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
+ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
+
+#define ICE_VECTOR_PATH 0
+#define ICE_VECTOR_OFFLOAD_PATH 1
+
+static inline int
+ice_rx_vec_queue_default(struct ci_rx_queue *rxq)
+{
+ if (!rxq)
+ return -1;
+
+ if (!ci_rxq_vec_capable(rxq->nb_rx_desc, rxq->rx_free_thresh))
+ return -1;
+
+ if (rxq->proto_xtr != PROTO_XTR_NONE)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+ice_tx_vec_queue_default(struct ci_tx_queue *txq)
+{
+ if (!txq)
+ return -1;
+
+ if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
+ txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
+ return -1;
+
+ if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
+ return -1;
+
+ if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
+ return ICE_VECTOR_OFFLOAD_PATH;
+
+ return ICE_VECTOR_PATH;
+}
+
+static inline int
+ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
+{
+ int i;
+ struct ci_rx_queue *rxq;
+ int ret = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ ret = (ice_rx_vec_queue_default(rxq));
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static inline int
+ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
+{
+ int i;
+ struct ci_tx_queue *txq;
+ int ret = 0;
+ int result = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ ret = ice_tx_vec_queue_default(txq);
+ if (ret < 0)
+ return -1;
+ if (ret == ICE_VECTOR_OFFLOAD_PATH)
+ result = ret;
+ }
+
+ return result;
+}
+
+static inline void
+ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
+ uint64_t *txd_hi)
+{
+ uint64_t ol_flags = tx_pkt->ol_flags;
+ uint32_t td_cmd = 0;
+ uint32_t td_offset = 0;
+
+ /* Tx Checksum Offload */
+ /* SET MACLEN */
+ td_offset |= (tx_pkt->l2_len >> 1) <<
+ ICE_TX_DESC_LEN_MACLEN_S;
+
+ /* Enable L3 checksum offload */
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
+ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+ td_offset |= (tx_pkt->l3_len >> 2) <<
+ ICE_TX_DESC_LEN_IPLEN_S;
+ }
+
+ /* Enable L4 checksum offloads */
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+ td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+ td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ case RTE_MBUF_F_TX_UDP_CKSUM:
+ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+ td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ break;
+ default:
+ break;
+ }
+
+ *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
+
+ /* Tx VLAN insertion Offload */
+ if (ol_flags & RTE_MBUF_F_TX_VLAN) {
+ td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
+ *txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
+ ICE_TXD_QW1_L2TAG1_S);
+ }
+
+ *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
+}
+#endif
--
2.51.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2025-11-21 11:05:11.759890234 +0000
+++ 0070-net-ice-fix-path-selection-for-QinQ-Tx-offload.patch 2025-11-21 11:05:09.546978177 +0000
@@ -1 +1 @@
-From 61ccab85e3972d6e3ee61b3e6a6a6872a33e5ac3 Mon Sep 17 00:00:00 2001
+From 3fdaa456b2b0018fcdb5d0596e3216bd5e920958 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 61ccab85e3972d6e3ee61b3e6a6a6872a33e5ac3 ]
+
@@ -15 +16,0 @@
-Cc: stable at dpdk.org
@@ -20,2 +21,3 @@
- drivers/net/intel/ice/ice_rxtx_vec_common.h | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
+ drivers/net/intel/ice/ice_rxtx_vec_common.h | 207 ++++++++++++++++++++
+ 1 file changed, 207 insertions(+)
+ create mode 100644 drivers/net/intel/ice/ice_rxtx_vec_common.h
@@ -24,2 +26,3 @@
-index a24694c0b1..39581cb7ae 100644
---- a/drivers/net/intel/ice/ice_rxtx_vec_common.h
+new file mode 100644
+index 0000000000..39581cb7ae
+--- /dev/null
@@ -27,3 +30,56 @@
-@@ -54,4 +54,5 @@ _ice_rx_queue_release_mbufs_vec(struct ci_rx_queue *rxq)
- #define ICE_TX_NO_VECTOR_FLAGS ( \
- RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
+@@ -0,0 +1,207 @@
++/* SPDX-License-Identifier: BSD-3-Clause
++ * Copyright(c) 2019 Intel Corporation
++ */
++
++#ifndef _ICE_RXTX_VEC_COMMON_H_
++#define _ICE_RXTX_VEC_COMMON_H_
++
++#include "../common/rx.h"
++#include "ice_rxtx.h"
++
++static inline int
++ice_tx_desc_done(struct ci_tx_queue *txq, uint16_t idx)
++{
++ return (txq->ice_tx_ring[idx].cmd_type_offset_bsz &
++ rte_cpu_to_le_64(ICE_TXD_QW1_DTYPE_M)) ==
++ rte_cpu_to_le_64(ICE_TX_DESC_DTYPE_DESC_DONE);
++}
++
++static inline void
++_ice_rx_queue_release_mbufs_vec(struct ci_rx_queue *rxq)
++{
++ const unsigned int mask = rxq->nb_rx_desc - 1;
++ unsigned int i;
++
++ if (unlikely(!rxq->sw_ring)) {
++ PMD_DRV_LOG(DEBUG, "sw_ring is NULL");
++ return;
++ }
++
++ if (rxq->rxrearm_nb >= rxq->nb_rx_desc)
++ return;
++
++ /* free all mbufs that are valid in the ring */
++ if (rxq->rxrearm_nb == 0) {
++ for (i = 0; i < rxq->nb_rx_desc; i++) {
++ if (rxq->sw_ring[i].mbuf)
++ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
++ }
++ } else {
++ for (i = rxq->rx_tail;
++ i != rxq->rxrearm_start;
++ i = (i + 1) & mask) {
++ if (rxq->sw_ring[i].mbuf)
++ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
++ }
++ }
++
++ rxq->rxrearm_nb = rxq->nb_rx_desc;
++
++ /* set all entries to NULL */
++ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
++}
++
++#define ICE_TX_NO_VECTOR_FLAGS ( \
++ RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \
@@ -31,13 +87,141 @@
- RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- RTE_ETH_TX_OFFLOAD_TCP_TSO | \
-@@ -65,5 +66,4 @@ _ice_rx_queue_release_mbufs_vec(struct ci_rx_queue *rxq)
- #define ICE_TX_VECTOR_OFFLOAD ( \
- RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
-- RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \
- RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
- RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
-@@ -196,6 +196,6 @@ ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
- *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
-
-- /* Tx VLAN/QINQ insertion Offload */
-- if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
++ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
++ RTE_ETH_TX_OFFLOAD_TCP_TSO | \
++ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | \
++ RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | \
++ RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO | \
++ RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO | \
++ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM | \
++ RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP)
++
++#define ICE_TX_VECTOR_OFFLOAD ( \
++ RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
++ RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \
++ RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \
++ RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \
++ RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
++
++#define ICE_VECTOR_PATH 0
++#define ICE_VECTOR_OFFLOAD_PATH 1
++
++static inline int
++ice_rx_vec_queue_default(struct ci_rx_queue *rxq)
++{
++ if (!rxq)
++ return -1;
++
++ if (!ci_rxq_vec_capable(rxq->nb_rx_desc, rxq->rx_free_thresh))
++ return -1;
++
++ if (rxq->proto_xtr != PROTO_XTR_NONE)
++ return -1;
++
++ return 0;
++}
++
++static inline int
++ice_tx_vec_queue_default(struct ci_tx_queue *txq)
++{
++ if (!txq)
++ return -1;
++
++ if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST ||
++ txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ)
++ return -1;
++
++ if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS)
++ return -1;
++
++ if (txq->offloads & ICE_TX_VECTOR_OFFLOAD)
++ return ICE_VECTOR_OFFLOAD_PATH;
++
++ return ICE_VECTOR_PATH;
++}
++
++static inline int
++ice_rx_vec_dev_check_default(struct rte_eth_dev *dev)
++{
++ int i;
++ struct ci_rx_queue *rxq;
++ int ret = 0;
++
++ for (i = 0; i < dev->data->nb_rx_queues; i++) {
++ rxq = dev->data->rx_queues[i];
++ ret = (ice_rx_vec_queue_default(rxq));
++ if (ret < 0)
++ break;
++ }
++
++ return ret;
++}
++
++static inline int
++ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
++{
++ int i;
++ struct ci_tx_queue *txq;
++ int ret = 0;
++ int result = 0;
++
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txq = dev->data->tx_queues[i];
++ ret = ice_tx_vec_queue_default(txq);
++ if (ret < 0)
++ return -1;
++ if (ret == ICE_VECTOR_OFFLOAD_PATH)
++ result = ret;
++ }
++
++ return result;
++}
++
++static inline void
++ice_txd_enable_offload(struct rte_mbuf *tx_pkt,
++ uint64_t *txd_hi)
++{
++ uint64_t ol_flags = tx_pkt->ol_flags;
++ uint32_t td_cmd = 0;
++ uint32_t td_offset = 0;
++
++ /* Tx Checksum Offload */
++ /* SET MACLEN */
++ td_offset |= (tx_pkt->l2_len >> 1) <<
++ ICE_TX_DESC_LEN_MACLEN_S;
++
++ /* Enable L3 checksum offload */
++ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
++ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
++ td_offset |= (tx_pkt->l3_len >> 2) <<
++ ICE_TX_DESC_LEN_IPLEN_S;
++ } else if (ol_flags & RTE_MBUF_F_TX_IPV4) {
++ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
++ td_offset |= (tx_pkt->l3_len >> 2) <<
++ ICE_TX_DESC_LEN_IPLEN_S;
++ } else if (ol_flags & RTE_MBUF_F_TX_IPV6) {
++ td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
++ td_offset |= (tx_pkt->l3_len >> 2) <<
++ ICE_TX_DESC_LEN_IPLEN_S;
++ }
++
++ /* Enable L4 checksum offloads */
++ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
++ case RTE_MBUF_F_TX_TCP_CKSUM:
++ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
++ td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
++ ICE_TX_DESC_LEN_L4_LEN_S;
++ break;
++ case RTE_MBUF_F_TX_SCTP_CKSUM:
++ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
++ td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
++ ICE_TX_DESC_LEN_L4_LEN_S;
++ break;
++ case RTE_MBUF_F_TX_UDP_CKSUM:
++ td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
++ td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
++ ICE_TX_DESC_LEN_L4_LEN_S;
++ break;
++ default:
++ break;
++ }
++
++ *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S;
++
@@ -46,2 +230,8 @@
- td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
- *txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
++ td_cmd |= ICE_TX_DESC_CMD_IL2TAG1;
++ *txd_hi |= ((uint64_t)tx_pkt->vlan_tci <<
++ ICE_TXD_QW1_L2TAG1_S);
++ }
++
++ *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S;
++}
++#endif
More information about the stable
mailing list