[PATCH v3 2/3] net/bonding: support Tx prepare fail stats
Chengwen Feng
fengchengwen at huawei.com
Sat Sep 17 06:15:36 CEST 2022
If the Tx prepare fails, the bonding driver will free the corresponding
packets internally, and only the packets of the Tx prepare OK are xmit.
In this patch, the number of Tx prepare fails will be counted, the
result is added in the 'struct rte_eth_stats' oerrors field.
Signed-off-by: Chengwen Feng <fengchengwen at huawei.com>
Reviewed-by: Min Hu (Connor) <humin29 at huawei.com>
---
drivers/net/bonding/eth_bond_private.h | 7 +++++++
drivers/net/bonding/rte_eth_bond_pmd.c | 16 ++++++++++++++++
2 files changed, 23 insertions(+)
diff --git a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h
index 976163b06b..077f180f94 100644
--- a/drivers/net/bonding/eth_bond_private.h
+++ b/drivers/net/bonding/eth_bond_private.h
@@ -72,6 +72,13 @@ struct bond_tx_queue {
/**< Number of TX descriptors available for the queue */
struct rte_eth_txconf tx_conf;
/**< Copy of TX configuration structure for queue */
+
+ /*
+ * The following fields are statistical value, and maybe update
+ * at runtime, so start with one new cache line.
+ */
+ uint64_t prepare_fails __rte_cache_aligned;
+ /**< Tx prepare fail cnt */
};
/** Bonded slave devices structure */
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index ec9d7d7bab..72d97ab7c8 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -622,6 +622,7 @@ bond_ethdev_tx_user_wrap(struct bond_tx_queue *bd_tx_q, uint16_t slave_port_id,
rte_pktmbuf_free(fail_pkts[i]);
}
+ bd_tx_q->prepare_fails += fail_cnt;
if (fail_cnt == nb_pkts)
return nb_pkts;
tx_burst:
@@ -2423,6 +2424,8 @@ bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
bd_tx_q->nb_tx_desc = nb_tx_desc;
memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));
+ bd_tx_q->prepare_fails = 0;
+
dev->data->tx_queues[tx_queue_id] = bd_tx_q;
return 0;
@@ -2633,6 +2636,7 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_eth_stats slave_stats;
+ struct bond_tx_queue *bd_tx_q;
int i, j;
for (i = 0; i < internals->slave_count; i++) {
@@ -2654,7 +2658,12 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->q_obytes[j] += slave_stats.q_obytes[j];
stats->q_errors[j] += slave_stats.q_errors[j];
}
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ bd_tx_q = (struct bond_tx_queue *)dev->data->tx_queues[i];
+ if (bd_tx_q)
+ stats->oerrors += bd_tx_q->prepare_fails;
}
return 0;
@@ -2664,6 +2673,7 @@ static int
bond_ethdev_stats_reset(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
+ struct bond_tx_queue *bd_tx_q;
int i;
int err;
int ret;
@@ -2674,6 +2684,12 @@ bond_ethdev_stats_reset(struct rte_eth_dev *dev)
err = ret;
}
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ bd_tx_q = (struct bond_tx_queue *)dev->data->tx_queues[i];
+ if (bd_tx_q)
+ bd_tx_q->prepare_fails = 0;
+ }
+
return err;
}
--
2.17.1
More information about the dev
mailing list