[PATCH v3] net/i40e: add diagnostic support in TX path
Mingjin Ye
mingjinx.ye at intel.com
Thu Jan 4 11:20:20 CET 2024
The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.
In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.
supported cases: mbuf, size, segment, offload.
1. mbuf: check for corrupted mbuf.
2. size: check min/max packet length according to hw spec.
3. segment: check number of mbuf segments not exceed hw limitation.
4. offload: check any unsupported offload flag.
parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
Signed-off-by: Mingjin Ye <mingjinx.ye at intel.com>
---
v2: remove strict.
---
v3: optimised.
---
doc/guides/nics/i40e.rst | 11 +++
drivers/net/i40e/i40e_ethdev.c | 137 ++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 28 ++++++
drivers/net/i40e/i40e_rxtx.c | 153 +++++++++++++++++++++++++++++++--
drivers/net/i40e/i40e_rxtx.h | 2 +
5 files changed, 323 insertions(+), 8 deletions(-)
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 15689ac958..b15b5b61c5 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -275,6 +275,17 @@ Runtime Configuration
-a 84:00.0,vf_msg_cfg=80 at 120:180
+- ``Support TX diagnostics`` (default ``not enabled``)
+
+ Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
+ ``-a 18:01.0,mbuf_check=mbuf`` or ``-a 18:01.0,mbuf_check=[mbuf,size]``.
+ Supported cases:
+
+ * mbuf: Check for corrupted mbuf.
+ * size: Check min/max packet length according to hw spec.
+ * segment: Check number of mbuf segments not exceed hw limitation.
+ * offload: Check any unsupported offload flag.
+
Vector RX Pre-conditions
~~~~~~~~~~~~~~~~~~~~~~~~
For Vector RX it is assumed that the number of descriptor rings will be a power
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 3ca226156b..e554bae1ab 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -48,6 +48,7 @@
#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
#define ETH_I40E_VF_MSG_CFG "vf_msg_cfg"
+#define ETH_I40E_MBUF_CHECK_ARG "mbuf_check"
#define I40E_CLEAR_PXE_WAIT_MS 200
#define I40E_VSI_TSR_QINQ_STRIP 0x4010
@@ -412,6 +413,7 @@ static const char *const valid_keys[] = {
ETH_I40E_SUPPORT_MULTI_DRIVER,
ETH_I40E_QUEUE_NUM_PER_VF_ARG,
ETH_I40E_VF_MSG_CFG,
+ ETH_I40E_MBUF_CHECK_ARG,
NULL};
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -545,6 +547,14 @@ static const struct rte_i40e_xstats_name_off rte_i40e_stats_strings[] = {
#define I40E_NB_ETH_XSTATS (sizeof(rte_i40e_stats_strings) / \
sizeof(rte_i40e_stats_strings[0]))
+static const struct rte_i40e_xstats_name_off i40e_mbuf_strings[] = {
+ {"tx_mbuf_error_packets", offsetof(struct i40e_mbuf_stats,
+ tx_pkt_errors)},
+};
+
+#define I40E_NB_MBUF_XSTATS (sizeof(i40e_mbuf_strings) / \
+ sizeof(i40e_mbuf_strings[0]))
+
static const struct rte_i40e_xstats_name_off rte_i40e_hw_port_strings[] = {
{"tx_link_down_dropped", offsetof(struct i40e_hw_port_stats,
tx_dropped_link_down)},
@@ -1373,6 +1383,88 @@ read_vf_msg_config(__rte_unused const char *key,
return 0;
}
+static int
+read_mbuf_check_config(__rte_unused const char *key, const char *value, void *args)
+{
+ char *cur;
+ char *tmp;
+ int str_len;
+ int valid_len;
+
+ int ret = 0;
+ uint64_t *mc_flags = args;
+ char *str2 = strdup(value);
+ if (str2 == NULL)
+ return -1;
+
+ str_len = strlen(str2);
+ if (str2[0] == '[' && str2[str_len - 1] == ']') {
+ if (str_len < 3) {
+ ret = -1;
+ goto mdd_end;
+ }
+ valid_len = str_len - 2;
+ memmove(str2, str2 + 1, valid_len);
+ memset(str2 + valid_len, '\0', 2);
+ }
+ cur = strtok_r(str2, ",", &tmp);
+ while (cur != NULL) {
+ if (!strcmp(cur, "mbuf"))
+ *mc_flags |= I40E_MBUF_CHECK_F_TX_MBUF;
+ else if (!strcmp(cur, "size"))
+ *mc_flags |= I40E_MBUF_CHECK_F_TX_SIZE;
+ else if (!strcmp(cur, "segment"))
+ *mc_flags |= I40E_MBUF_CHECK_F_TX_SEGMENT;
+ else if (!strcmp(cur, "offload"))
+ *mc_flags |= I40E_MBUF_CHECK_F_TX_OFFLOAD;
+ else
+ PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+ cur = strtok_r(NULL, ",", &tmp);
+ }
+
+mdd_end:
+ free(str2);
+ return ret;
+}
+
+static int
+i40e_parse_mbuf_check(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct rte_kvargs *kvlist;
+ int kvargs_count;
+ int ret = 0;
+
+ if (!dev->device->devargs)
+ return ret;
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_MBUF_CHECK_ARG);
+ if (!kvargs_count)
+ goto free_end;
+
+ if (kvargs_count > 1)
+ PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
+ "the first invalid or last valid one is used !",
+ ETH_I40E_MBUF_CHECK_ARG);
+
+ ret = rte_kvargs_process(kvlist, ETH_I40E_MBUF_CHECK_ARG,
+ read_mbuf_check_config, &ad->mc_flags);
+ if (ret)
+ goto free_end;
+
+ if (ad->mc_flags)
+ ad->devargs.mbuf_check = 1;
+
+free_end:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
static int
i40e_parse_vf_msg_config(struct rte_eth_dev *dev,
struct i40e_vf_msg_cfg *msg_cfg)
@@ -1488,6 +1580,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
}
i40e_parse_vf_msg_config(dev, &pf->vf_msg_cfg);
+ i40e_parse_mbuf_check(dev);
/* Check if need to support multi-driver */
i40e_support_multi_driver(dev);
@@ -2324,6 +2417,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
int ret, i;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
@@ -2483,6 +2578,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
max_frame_size = dev->data->mtu ?
dev->data->mtu + I40E_ETH_OVERHEAD :
I40E_FRAME_SIZE_MAX;
+ ad->max_pkt_len = max_frame_size;
/* Set the max frame size to HW*/
i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
@@ -3502,13 +3598,17 @@ i40e_dev_stats_reset(struct rte_eth_dev *dev)
/* read the stats, reading current register values into offset */
i40e_read_stats_registers(pf, hw);
+ memset(&pf->mbuf_stats, 0,
+ sizeof(struct i40e_mbuf_stats));
+
return 0;
}
static uint32_t
i40e_xstats_calc_num(void)
{
- return I40E_NB_ETH_XSTATS + I40E_NB_HW_PORT_XSTATS +
+ return I40E_NB_ETH_XSTATS + I40E_NB_MBUF_XSTATS +
+ I40E_NB_HW_PORT_XSTATS +
(I40E_NB_RXQ_PRIO_XSTATS * 8) +
(I40E_NB_TXQ_PRIO_XSTATS * 8);
}
@@ -3533,6 +3633,14 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
count++;
}
+ /* Get stats from i40e_mbuf_stats struct */
+ for (i = 0; i < I40E_NB_MBUF_XSTATS; i++) {
+ strlcpy(xstats_names[count].name,
+ i40e_mbuf_strings[i].name,
+ sizeof(xstats_names[count].name));
+ count++;
+ }
+
/* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
strlcpy(xstats_names[count].name,
@@ -3563,12 +3671,27 @@ static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
return count;
}
+static void
+i40e_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
+ struct i40e_mbuf_stats *mbuf_stats)
+{
+ uint16_t idx;
+ struct i40e_tx_queue *txq;
+
+ for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+ txq = ethdev->data->tx_queues[idx];
+ mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+ }
+}
+
static int
i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_adapter *adapter =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
unsigned i, count, prio;
struct i40e_hw_port_stats *hw_stats = &pf->stats;
@@ -3583,6 +3706,9 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count = 0;
+ if (adapter->devargs.mbuf_check)
+ i40e_dev_update_mbuf_stats(dev, &pf->mbuf_stats);
+
/* Get stats from i40e_eth_stats struct */
for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
@@ -3591,6 +3717,15 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count++;
}
+ /* Get stats from i40e_mbuf_stats struct */
+ for (i = 0; i < I40E_NB_MBUF_XSTATS; i++) {
+ xstats[count].value =
+ *(uint64_t *)((char *)&pf->mbuf_stats +
+ i40e_mbuf_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
/* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 1bbe7ad376..41f9aab6ce 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1108,6 +1108,10 @@ struct i40e_vf_msg_cfg {
uint32_t ignore_second;
};
+struct i40e_mbuf_stats {
+ uint64_t tx_pkt_errors;
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -1122,6 +1126,7 @@ struct i40e_pf {
struct i40e_hw_port_stats stats_offset;
struct i40e_hw_port_stats stats;
+ struct i40e_mbuf_stats mbuf_stats;
u64 rx_err1; /* rxerr1 */
u64 rx_err1_offset;
@@ -1224,6 +1229,25 @@ struct i40e_vsi_vlan_pvid_info {
#define I40E_MAX_PKT_TYPE 256
#define I40E_FLOW_TYPE_MAX 64
+#define I40E_MBUF_CHECK_F_TX_MBUF (1ULL << 0)
+#define I40E_MBUF_CHECK_F_TX_SIZE (1ULL << 1)
+#define I40E_MBUF_CHECK_F_TX_SEGMENT (1ULL << 2)
+#define I40E_MBUF_CHECK_F_TX_OFFLOAD (1ULL << 3)
+
+enum i40e_tx_burst_type {
+ I40E_TX_DEFAULT,
+ I40E_TX_SIMPLE,
+ I40E_TX_SSE,
+ I40E_TX_AVX2,
+ I40E_TX_AVX512,
+};
+
+/**
+ * Cache devargs parse result.
+ */
+struct i40e_devargs {
+ int mbuf_check;
+};
/*
* Structure to store private data for each PF/VF instance.
*/
@@ -1240,6 +1264,10 @@ struct i40e_adapter {
bool tx_simple_allowed;
bool tx_vec_allowed;
+ struct i40e_devargs devargs;
+ uint64_t mc_flags; /* mbuf check flags. */
+ uint16_t max_pkt_len; /* Maximum packet length */
+ enum i40e_tx_burst_type tx_burst_type;
/* For PTP */
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 9aa5facb53..6fed922253 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1536,6 +1536,138 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+static
+const eth_tx_burst_t i40e_tx_pkt_burst_ops[] = {
+ [I40E_TX_DEFAULT] = i40e_xmit_pkts,
+ [I40E_TX_SIMPLE] = i40e_xmit_pkts_simple,
+#ifdef RTE_ARCH_X86
+ [I40E_TX_SSE] = i40e_xmit_pkts_vec,
+ [I40E_TX_AVX2] = i40e_xmit_pkts_vec_avx2,
+#ifdef CC_AVX512_SUPPORT
+ [I40E_TX_AVX512] = i40e_xmit_pkts_vec_avx512,
+#endif
+#endif
+};
+
+/* Tx mbuf check */
+static uint16_t
+i40e_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = tx_queue;
+ uint16_t idx;
+ uint64_t ol_flags;
+ struct rte_mbuf *mb;
+ bool pkt_error = false;
+ const char *reason = NULL;
+ uint16_t good_pkts = nb_pkts;
+ struct i40e_adapter *adapter = txq->vsi->adapter;
+ enum i40e_tx_burst_type tx_burst_type =
+ txq->vsi->adapter->tx_burst_type;
+
+
+ for (idx = 0; idx < nb_pkts; idx++) {
+ mb = tx_pkts[idx];
+ ol_flags = mb->ol_flags;
+
+ if ((adapter->mc_flags & I40E_MBUF_CHECK_F_TX_MBUF) &&
+ (rte_mbuf_check(mb, 0, &reason) != 0)) {
+ PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+ pkt_error = true;
+ break;
+ }
+
+ if ((adapter->mc_flags & I40E_MBUF_CHECK_F_TX_SIZE) &&
+ (mb->data_len > mb->pkt_len ||
+ mb->data_len < I40E_TX_MIN_PKT_LEN ||
+ mb->data_len > adapter->max_pkt_len)) {
+ PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+ "of range, reasonable range (%d - %u)\n", mb->data_len,
+ I40E_TX_MIN_PKT_LEN, adapter->max_pkt_len);
+ pkt_error = true;
+ break;
+ }
+
+ if (adapter->mc_flags & I40E_MBUF_CHECK_F_TX_SEGMENT) {
+ if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+ /**
+ * No TSO case: nb->segs, pkt_len to not exceed
+ * the limites.
+ */
+ if (mb->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+ "HW limit, maximum allowed value is %d\n", mb->nb_segs,
+ I40E_TX_MAX_MTU_SEG);
+ pkt_error = true;
+ break;
+ }
+ if (mb->pkt_len > I40E_FRAME_SIZE_MAX) {
+ PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds "
+ "HW limit, maximum allowed value is %d\n", mb->nb_segs,
+ I40E_FRAME_SIZE_MAX);
+ pkt_error = true;
+ break;
+ }
+ } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+ /** TSO case: tso_segsz, nb_segs, pkt_len not exceed
+ * the limits.
+ */
+ if (mb->tso_segsz < I40E_MIN_TSO_MSS ||
+ mb->tso_segsz > I40E_MAX_TSO_MSS) {
+ /**
+ * MSS outside the range are considered malicious
+ */
+ PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+ "of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+ I40E_MIN_TSO_MSS, I40E_MAX_TSO_MSS);
+ pkt_error = true;
+ break;
+ }
+ if (mb->nb_segs >
+ ((struct i40e_tx_queue *)tx_queue)->nb_tx_desc) {
+ PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+ "of ring length\n");
+ pkt_error = true;
+ break;
+ }
+ if (mb->pkt_len > I40E_TSO_FRAME_SIZE_MAX) {
+ PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds "
+ "HW limit, maximum allowed value is %d\n", mb->nb_segs,
+ I40E_TSO_FRAME_SIZE_MAX);
+ pkt_error = true;
+ break;
+ }
+ }
+ }
+
+ if (adapter->mc_flags & I40E_MBUF_CHECK_F_TX_OFFLOAD) {
+ if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+ PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+ "is not supported\n");
+ pkt_error = true;
+ break;
+ }
+
+ if (!rte_validate_tx_offload(mb)) {
+ PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+ "setup error\n");
+ pkt_error = true;
+ break;
+ }
+ }
+ }
+
+ if (pkt_error) {
+ txq->mbuf_errors++;
+ good_pkts = idx;
+ if (good_pkts == 0)
+ return 0;
+ }
+
+ return i40e_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+ tx_pkts, good_pkts);
+}
+
/*********************************************************************
*
* TX simple prep functions
@@ -3467,6 +3599,8 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ enum i40e_tx_burst_type tx_burst_type;
+ int mbuf_check = ad->devargs.mbuf_check;
int i;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
@@ -3501,34 +3635,39 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
dev->data->port_id);
- dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx512;
+ tx_burst_type = I40E_TX_AVX512;
#endif
} else {
PMD_INIT_LOG(DEBUG, "Using %sVector Tx (port %d).",
ad->tx_use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->tx_pkt_burst = ad->tx_use_avx2 ?
- i40e_xmit_pkts_vec_avx2 :
- i40e_xmit_pkts_vec;
+ tx_burst_type = ad->tx_use_avx2 ? I40E_TX_AVX2 : I40E_TX_SSE;
dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
}
#else /* RTE_ARCH_X86 */
PMD_INIT_LOG(DEBUG, "Using Vector Tx (port %d).",
dev->data->port_id);
- dev->tx_pkt_burst = i40e_xmit_pkts_vec;
+ tx_burst_type = I40E_TX_SSE;
dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
#endif /* RTE_ARCH_X86 */
} else {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
- dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ tx_burst_type = I40E_TX_SIMPLE;
dev->recycle_tx_mbufs_reuse = i40e_recycle_tx_mbufs_reuse_vec;
}
dev->tx_pkt_prepare = i40e_simple_prep_pkts;
} else {
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
- dev->tx_pkt_burst = i40e_xmit_pkts;
+ tx_burst_type = I40E_TX_DEFAULT;
dev->tx_pkt_prepare = i40e_prep_pkts;
}
+
+ if (mbuf_check) {
+ ad->tx_burst_type = tx_burst_type;
+ dev->tx_pkt_burst = i40e_xmit_pkts_check;
+ } else {
+ dev->tx_pkt_burst = i40e_tx_pkt_burst_ops[tx_burst_type];
+ }
}
static const struct {
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index b191f23e1f..818bf9d859 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -167,6 +167,8 @@ struct i40e_tx_queue {
uint16_t tx_next_dd;
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
+ uint64_t mbuf_errors;
+
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
--
2.25.1
More information about the dev
mailing list