basic stats ops implementations.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> doc/guides/nics/features/zxdh.ini  |   2 +<br /> doc/guides/nics/zxdh.rst           |   1 +<br /> drivers/net/zxdh/zxdh_ethdev.c     |   2 +<br /> drivers/net/zxdh/zxdh_ethdev_ops.c | 353 +++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_ethdev_ops.h |  27 +++<br /> drivers/net/zxdh/zxdh_msg.h        |  16 ++<br /> drivers/net/zxdh/zxdh_np.c         | 341 ++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_np.h         |  30 +++<br /> drivers/net/zxdh/zxdh_queue.h      |   2 +<br /> drivers/net/zxdh/zxdh_rxtx.c       |  83 ++++++-<br /> drivers/net/zxdh/zxdh_tables.h     |   5 +<br /> 11 files changed, 859 insertions(+), 3 deletions(-)<br /> <br />diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini<br />index 415ca547d0..98c141cf95 100644<br />--- a/doc/guides/nics/features/zxdh.ini<br />+++ b/doc/guides/nics/features/zxdh.ini<br />@@ -22,3 +22,5 @@ QinQ offload         = Y<br /> RSS hash             = Y<br /> RSS reta update      = Y<br /> Inner RSS            = Y<br />+Basic stats          = Y<br />+Stats per queue      = Y<br />diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst<br />index 3cc6a1d348..c8a52b587c 100644<br />--- a/doc/guides/nics/zxdh.rst<br />+++ b/doc/guides/nics/zxdh.rst<br />@@ -32,6 +32,7 @@ Features of the ZXDH PMD are:<br /> - VLAN stripping and inserting<br /> - QINQ stripping and inserting<br /> - Receive Side Scaling (RSS)<br />+- Port hardware statistics<br />  <br />  <br /> Driver compilation and testing<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index e504b239c6..636a4bb4fb 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -1172,6 +1172,8 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {<br />     .reta_query                 = zxdh_dev_rss_reta_query,<br />     .rss_hash_update         = zxdh_rss_hash_update,<br />     .rss_hash_conf_get         = zxdh_rss_hash_conf_get,<br />+    .stats_get                 = zxdh_dev_stats_get,<br />+    .stats_reset             = zxdh_dev_stats_reset,<br /> };<br />  <br /> static int32_t<br />diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c<br />index d333717e87..32f948e844 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev_ops.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c<br />@@ -11,6 +11,8 @@<br /> #include "zxdh_ethdev_ops.h" <br /> #include "zxdh_tables.h" <br /> #include "zxdh_logs.h" <br />+#include "zxdh_rxtx.h" <br />+#include "zxdh_np.h" <br />  <br /> #define ZXDH_VLAN_FILTER_GROUPS       64<br /> #define ZXDH_INVALID_LOGIC_QID        0xFFFFU<br />@@ -22,6 +24,108 @@<br /> #define ZXDH_HF_MAC_VLAN     4<br /> #define ZXDH_HF_ALL          0<br />  <br />+struct __rte_packed_begin zxdh_hw_mac_stats {<br />+    uint64_t rx_total;<br />+    uint64_t rx_pause;<br />+    uint64_t rx_unicast;<br />+    uint64_t rx_multicast;<br />+    uint64_t rx_broadcast;<br />+    uint64_t rx_vlan;<br />+    uint64_t rx_size_64;<br />+    uint64_t rx_size_65_127;<br />+    uint64_t rx_size_128_255;<br />+    uint64_t rx_size_256_511;<br />+    uint64_t rx_size_512_1023;<br />+    uint64_t rx_size_1024_1518;<br />+    uint64_t rx_size_1519_mru;<br />+    uint64_t rx_undersize;<br />+    uint64_t rx_oversize;<br />+    uint64_t rx_fragment;<br />+    uint64_t rx_jabber;<br />+    uint64_t rx_control;<br />+    uint64_t rx_eee;<br />+<br />+    uint64_t tx_total;<br />+    uint64_t tx_pause;<br />+    uint64_t tx_unicast;<br />+    uint64_t tx_multicast;<br />+    uint64_t tx_broadcast;<br />+    uint64_t tx_vlan;<br />+    uint64_t tx_size_64;<br />+    uint64_t tx_size_65_127;<br />+    uint64_t tx_size_128_255;<br />+    uint64_t tx_size_256_511;<br />+    uint64_t tx_size_512_1023;<br />+    uint64_t tx_size_1024_1518;<br />+    uint64_t tx_size_1519_mtu;<br />+    uint64_t tx_undersize;<br />+    uint64_t tx_oversize;<br />+    uint64_t tx_fragment;<br />+    uint64_t tx_jabber;<br />+    uint64_t tx_control;<br />+    uint64_t tx_eee;<br />+<br />+    uint64_t rx_error;<br />+    uint64_t rx_fcs_error;<br />+    uint64_t rx_drop;<br />+<br />+    uint64_t tx_error;<br />+    uint64_t tx_fcs_error;<br />+    uint64_t tx_drop;<br />+<br />+} __rte_packed_end;<br />+<br />+struct __rte_packed_begin zxdh_hw_mac_bytes {<br />+    uint64_t rx_total_bytes;<br />+    uint64_t rx_good_bytes;<br />+    uint64_t tx_total_bytes;<br />+    uint64_t tx_good_bytes;<br />+} __rte_packed_end;<br />+<br />+struct zxdh_np_stats_data {<br />+    uint64_t n_pkts_dropped;<br />+    uint64_t n_bytes_dropped;<br />+};<br />+<br />+struct zxdh_xstats_name_off {<br />+    char name[RTE_ETH_XSTATS_NAME_SIZE];<br />+    unsigned int offset;<br />+};<br />+<br />+static const struct zxdh_xstats_name_off zxdh_rxq_stat_strings[] = {<br />+    {"good_packets",           offsetof(struct zxdh_virtnet_rx, stats.packets)},<br />+    {"good_bytes",             offsetof(struct zxdh_virtnet_rx, stats.bytes)},<br />+    {"errors",                 offsetof(struct zxdh_virtnet_rx, stats.errors)},<br />+    {"multicast_packets",      offsetof(struct zxdh_virtnet_rx, stats.multicast)},<br />+    {"broadcast_packets",      offsetof(struct zxdh_virtnet_rx, stats.broadcast)},<br />+    {"truncated_err",          offsetof(struct zxdh_virtnet_rx, stats.truncated_err)},<br />+    {"undersize_packets",      offsetof(struct zxdh_virtnet_rx, stats.size_bins[0])},<br />+    {"size_64_packets",        offsetof(struct zxdh_virtnet_rx, stats.size_bins[1])},<br />+    {"size_65_127_packets",    offsetof(struct zxdh_virtnet_rx, stats.size_bins[2])},<br />+    {"size_128_255_packets",   offsetof(struct zxdh_virtnet_rx, stats.size_bins[3])},<br />+    {"size_256_511_packets",   offsetof(struct zxdh_virtnet_rx, stats.size_bins[4])},<br />+    {"size_512_1023_packets",  offsetof(struct zxdh_virtnet_rx, stats.size_bins[5])},<br />+    {"size_1024_1518_packets", offsetof(struct zxdh_virtnet_rx, stats.size_bins[6])},<br />+    {"size_1519_max_packets",  offsetof(struct zxdh_virtnet_rx, stats.size_bins[7])},<br />+};<br />+<br />+static const struct zxdh_xstats_name_off zxdh_txq_stat_strings[] = {<br />+    {"good_packets",           offsetof(struct zxdh_virtnet_tx, stats.packets)},<br />+    {"good_bytes",             offsetof(struct zxdh_virtnet_tx, stats.bytes)},<br />+    {"errors",                 offsetof(struct zxdh_virtnet_tx, stats.errors)},<br />+    {"multicast_packets",      offsetof(struct zxdh_virtnet_tx, stats.multicast)},<br />+    {"broadcast_packets",      offsetof(struct zxdh_virtnet_tx, stats.broadcast)},<br />+    {"truncated_err",          offsetof(struct zxdh_virtnet_tx, stats.truncated_err)},<br />+    {"undersize_packets",      offsetof(struct zxdh_virtnet_tx, stats.size_bins[0])},<br />+    {"size_64_packets",        offsetof(struct zxdh_virtnet_tx, stats.size_bins[1])},<br />+    {"size_65_127_packets",    offsetof(struct zxdh_virtnet_tx, stats.size_bins[2])},<br />+    {"size_128_255_packets",   offsetof(struct zxdh_virtnet_tx, stats.size_bins[3])},<br />+    {"size_256_511_packets",   offsetof(struct zxdh_virtnet_tx, stats.size_bins[4])},<br />+    {"size_512_1023_packets",  offsetof(struct zxdh_virtnet_tx, stats.size_bins[5])},<br />+    {"size_1024_1518_packets", offsetof(struct zxdh_virtnet_tx, stats.size_bins[6])},<br />+    {"size_1519_max_packets",  offsetof(struct zxdh_virtnet_tx, stats.size_bins[7])},<br />+};<br />+<br /> static int32_t zxdh_config_port_status(struct rte_eth_dev *dev, uint16_t link_status)<br /> {<br />     struct zxdh_hw *hw = dev->data->dev_private;<br />@@ -1153,3 +1257,252 @@ zxdh_rss_configure(struct rte_eth_dev *dev)<br />     }<br />     return 0;<br /> }<br />+<br />+static int32_t<br />+zxdh_hw_vqm_stats_get(struct rte_eth_dev *dev, enum zxdh_agent_msg_type opcode,<br />+            struct zxdh_hw_vqm_stats *hw_stats)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_msg_info msg_info = {0};<br />+    struct zxdh_msg_reply_info reply_info = {0};<br />+    enum ZXDH_BAR_MODULE_ID module_id;<br />+    int ret = 0;<br />+<br />+    switch (opcode) {<br />+    case ZXDH_VQM_DEV_STATS_GET:<br />+    case ZXDH_VQM_QUEUE_STATS_GET:<br />+    case ZXDH_VQM_QUEUE_STATS_RESET:<br />+        module_id = ZXDH_BAR_MODULE_VQM;<br />+        break;<br />+    case ZXDH_MAC_STATS_GET:<br />+    case ZXDH_MAC_STATS_RESET:<br />+        module_id = ZXDH_BAR_MODULE_MAC;<br />+        break;<br />+    default:<br />+        PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);<br />+        return -1;<br />+    }<br />+<br />+    zxdh_agent_msg_build(hw, opcode, &msg_info);<br />+<br />+    ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),<br />+                &reply_info, sizeof(struct zxdh_msg_reply_info), module_id);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "Failed to get hw stats");<br />+        return -1;<br />+    }<br />+    struct zxdh_msg_reply_body *reply_body = &reply_info.reply_body;<br />+<br />+    rte_memcpy(hw_stats, &reply_body->vqm_stats, sizeof(struct zxdh_hw_vqm_stats));<br />+    return 0;<br />+}<br />+<br />+static int zxdh_hw_mac_stats_get(struct rte_eth_dev *dev,<br />+                struct zxdh_hw_mac_stats *mac_stats,<br />+                struct zxdh_hw_mac_bytes *mac_bytes)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MAC_OFFSET);<br />+    uint64_t stats_addr =  0;<br />+    uint64_t bytes_addr =  0;<br />+<br />+    if (hw->speed <= RTE_ETH_SPEED_NUM_25G) {<br />+        stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * (hw->phyport % 4);<br />+        bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * (hw->phyport % 4);<br />+    } else {<br />+        stats_addr = virt_addr + ZXDH_MAC_STATS_OFFSET + 352 * 4;<br />+        bytes_addr = virt_addr + ZXDH_MAC_BYTES_OFFSET + 32 * 4;<br />+    }<br />+<br />+    rte_memcpy(mac_stats, (void *)stats_addr, sizeof(struct zxdh_hw_mac_stats));<br />+    rte_memcpy(mac_bytes, (void *)bytes_addr, sizeof(struct zxdh_hw_mac_bytes));<br />+    return 0;<br />+}<br />+<br />+static void zxdh_data_hi_to_lo(uint64_t *data)<br />+{<br />+    uint32_t n_data_hi;<br />+    uint32_t n_data_lo;<br />+<br />+    n_data_lo = *data >> 32;<br />+    n_data_hi = *data;<br />+    *data =  (uint64_t)(rte_le_to_cpu_32(n_data_hi)) << 32 |<br />+                rte_le_to_cpu_32(n_data_lo);<br />+}<br />+<br />+static int zxdh_np_stats_get(struct rte_eth_dev *dev, struct zxdh_hw_np_stats *np_stats)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_np_stats_data stats_data;<br />+    uint32_t stats_id = zxdh_vport_to_vfid(hw->vport);<br />+    uint32_t idx = 0;<br />+    int ret = 0;<br />+<br />+    idx = stats_id + ZXDH_BROAD_STATS_EGRESS_BASE;<br />+    ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,<br />+                0, idx, (uint32_t *)&np_stats->np_tx_broadcast);<br />+    if (ret)<br />+        return ret;<br />+    zxdh_data_hi_to_lo(&np_stats->np_tx_broadcast);<br />+<br />+    idx = stats_id + ZXDH_BROAD_STATS_INGRESS_BASE;<br />+    memset(&stats_data, 0, sizeof(stats_data));<br />+    ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,<br />+                0, idx, (uint32_t *)&np_stats->np_rx_broadcast);<br />+    if (ret)<br />+        return ret;<br />+    zxdh_data_hi_to_lo(&np_stats->np_rx_broadcast);<br />+<br />+    idx = stats_id + ZXDH_MTU_STATS_EGRESS_BASE;<br />+    memset(&stats_data, 0, sizeof(stats_data));<br />+    ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,<br />+                1, idx, (uint32_t *)&stats_data);<br />+    if (ret)<br />+        return ret;<br />+<br />+    np_stats->np_tx_mtu_drop_pkts = stats_data.n_pkts_dropped;<br />+    np_stats->np_tx_mtu_drop_bytes = stats_data.n_bytes_dropped;<br />+    zxdh_data_hi_to_lo(&np_stats->np_tx_mtu_drop_pkts);<br />+    zxdh_data_hi_to_lo(&np_stats->np_tx_mtu_drop_bytes);<br />+<br />+    idx = stats_id + ZXDH_MTU_STATS_INGRESS_BASE;<br />+    memset(&stats_data, 0, sizeof(stats_data));<br />+    ret = zxdh_np_dtb_stats_get(ZXDH_DEVICE_NO, g_dtb_data.queueid,<br />+                1, idx, (uint32_t *)&stats_data);<br />+    if (ret)<br />+        return ret;<br />+    np_stats->np_rx_mtu_drop_pkts = stats_data.n_pkts_dropped;<br />+    np_stats->np_rx_mtu_drop_bytes = stats_data.n_bytes_dropped;<br />+    zxdh_data_hi_to_lo(&np_stats->np_rx_mtu_drop_pkts);<br />+    zxdh_data_hi_to_lo(&np_stats->np_rx_mtu_drop_bytes);<br />+<br />+    return 0;<br />+}<br />+<br />+static int<br />+zxdh_hw_np_stats_get(struct rte_eth_dev *dev,  struct zxdh_hw_np_stats *np_stats)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_msg_info msg_info = {0};<br />+    struct zxdh_msg_reply_info reply_info = {0};<br />+    int ret = 0;<br />+<br />+    if (hw->is_pf) {<br />+        ret = zxdh_np_stats_get(dev, np_stats);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "get np stats failed");<br />+            return -1;<br />+        }<br />+    } else {<br />+        zxdh_msg_head_build(hw, ZXDH_GET_NP_STATS, &msg_info);<br />+        ret = zxdh_vf_send_msg_to_pf(dev, &msg_info, sizeof(struct zxdh_msg_info),<br />+                    &reply_info, sizeof(struct zxdh_msg_reply_info));<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR,<br />+                "Failed to send msg: port 0x%x msg type ZXDH_PORT_METER_STAT_GET",<br />+                hw->vport.vport);<br />+            return -1;<br />+        }<br />+        memcpy(np_stats, &reply_info.reply_body.np_stats, sizeof(struct zxdh_hw_np_stats));<br />+    }<br />+    return ret;<br />+}<br />+<br />+int<br />+zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_hw_vqm_stats vqm_stats = {0};<br />+    struct zxdh_hw_np_stats np_stats = {0};<br />+    struct zxdh_hw_mac_stats mac_stats = {0};<br />+    struct zxdh_hw_mac_bytes mac_bytes = {0};<br />+    uint32_t i = 0;<br />+<br />+    zxdh_hw_vqm_stats_get(dev, ZXDH_VQM_DEV_STATS_GET,  &vqm_stats);<br />+    if (hw->is_pf)<br />+        zxdh_hw_mac_stats_get(dev, &mac_stats, &mac_bytes);<br />+<br />+    zxdh_hw_np_stats_get(dev, &np_stats);<br />+<br />+    stats->ipackets = vqm_stats.rx_total;<br />+    stats->opackets = vqm_stats.tx_total;<br />+    stats->ibytes = vqm_stats.rx_bytes;<br />+    stats->obytes = vqm_stats.tx_bytes;<br />+    stats->imissed = vqm_stats.rx_drop + mac_stats.rx_drop;<br />+    stats->ierrors = vqm_stats.rx_error + mac_stats.rx_error + np_stats.np_rx_mtu_drop_pkts;<br />+    stats->oerrors = vqm_stats.tx_error + mac_stats.tx_error + np_stats.np_tx_mtu_drop_pkts;<br />+<br />+    stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;<br />+    for (i = 0; (i < dev->data->nb_rx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {<br />+        struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[i];<br />+<br />+        if (rxvq == NULL)<br />+            continue;<br />+        stats->q_ipackets[i] = *(uint64_t *)(((char *)rxvq) +<br />+                zxdh_rxq_stat_strings[0].offset);<br />+        stats->q_ibytes[i] = *(uint64_t *)(((char *)rxvq) +<br />+                zxdh_rxq_stat_strings[1].offset);<br />+        stats->q_errors[i] = *(uint64_t *)(((char *)rxvq) +<br />+                zxdh_rxq_stat_strings[2].offset);<br />+        stats->q_errors[i] += *(uint64_t *)(((char *)rxvq) +<br />+                zxdh_rxq_stat_strings[5].offset);<br />+    }<br />+<br />+    for (i = 0; (i < dev->data->nb_tx_queues) && (i < RTE_ETHDEV_QUEUE_STAT_CNTRS); i++) {<br />+        struct zxdh_virtnet_tx *txvq = dev->data->tx_queues[i];<br />+<br />+        if (txvq == NULL)<br />+            continue;<br />+        stats->q_opackets[i] = *(uint64_t *)(((char *)txvq) +<br />+                zxdh_txq_stat_strings[0].offset);<br />+        stats->q_obytes[i] = *(uint64_t *)(((char *)txvq) +<br />+                zxdh_txq_stat_strings[1].offset);<br />+        stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +<br />+                zxdh_txq_stat_strings[2].offset);<br />+        stats->q_errors[i] += *(uint64_t *)(((char *)txvq) +<br />+                zxdh_txq_stat_strings[5].offset);<br />+    }<br />+    return 0;<br />+}<br />+<br />+static int zxdh_hw_stats_reset(struct rte_eth_dev *dev, enum zxdh_agent_msg_type opcode)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_msg_info msg_info = {0};<br />+    struct zxdh_msg_reply_info reply_info = {0};<br />+    enum ZXDH_BAR_MODULE_ID module_id;<br />+    int ret = 0;<br />+<br />+    switch (opcode) {<br />+    case ZXDH_VQM_DEV_STATS_RESET:<br />+        module_id = ZXDH_BAR_MODULE_VQM;<br />+        break;<br />+    case ZXDH_MAC_STATS_RESET:<br />+        module_id = ZXDH_BAR_MODULE_MAC;<br />+        break;<br />+    default:<br />+        PMD_DRV_LOG(ERR, "invalid opcode %u", opcode);<br />+        return -1;<br />+    }<br />+<br />+    zxdh_agent_msg_build(hw, opcode, &msg_info);<br />+<br />+    ret = zxdh_send_msg_to_riscv(dev, &msg_info, sizeof(struct zxdh_msg_info),<br />+                &reply_info, sizeof(struct zxdh_msg_reply_info), module_id);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "Failed to reset hw stats");<br />+        return -1;<br />+    }<br />+    return 0;<br />+}<br />+<br />+int zxdh_dev_stats_reset(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    zxdh_hw_stats_reset(dev, ZXDH_VQM_DEV_STATS_RESET);<br />+    if (hw->is_pf)<br />+        zxdh_hw_stats_reset(dev, ZXDH_MAC_STATS_RESET);<br />+<br />+    return 0;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h<br />index 860716d079..eb722d7244 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev_ops.h<br />+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h<br />@@ -5,6 +5,8 @@<br /> #ifndef ZXDH_ETHDEV_OPS_H<br /> #define ZXDH_ETHDEV_OPS_H<br />  <br />+#include <stdint.h> <br />+<br /> #include <rte_ether.h> <br />  <br /> #include "zxdh_ethdev.h" <br />@@ -24,6 +26,29 @@<br /> #define ZXDH_HF_MAC_VLAN_ETH  ZXDH_ETH_RSS_L2<br /> #define ZXDH_RSS_HF  ((ZXDH_HF_MAC_VLAN_ETH | ZXDH_HF_F3_ETH | ZXDH_HF_F5_ETH))<br />  <br />+struct __rte_packed_begin zxdh_hw_vqm_stats {<br />+    uint64_t rx_total;<br />+    uint64_t tx_total;<br />+    uint64_t rx_bytes;<br />+    uint64_t tx_bytes;<br />+    uint64_t rx_error;<br />+    uint64_t tx_error;<br />+    uint64_t rx_drop;<br />+} __rte_packed_end;<br />+<br />+struct zxdh_hw_np_stats {<br />+    uint64_t np_rx_broadcast;<br />+    uint64_t np_tx_broadcast;<br />+    uint64_t np_rx_mtu_drop_pkts;<br />+    uint64_t np_tx_mtu_drop_pkts;<br />+    uint64_t np_rx_mtu_drop_bytes;<br />+    uint64_t np_tx_mtu_drop_bytes;<br />+    uint64_t np_rx_mtr_drop_pkts;<br />+    uint64_t np_tx_mtr_drop_pkts;<br />+    uint64_t np_rx_mtr_drop_bytes;<br />+    uint64_t np_tx_mtr_drop_bytes;<br />+};<br />+<br /> int zxdh_dev_set_link_up(struct rte_eth_dev *dev);<br /> int zxdh_dev_set_link_down(struct rte_eth_dev *dev);<br /> int32_t zxdh_dev_link_update(struct rte_eth_dev *dev, int32_t wait_to_complete __rte_unused);<br />@@ -46,5 +71,7 @@ int zxdh_dev_rss_reta_query(struct rte_eth_dev *dev,<br /> int zxdh_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);<br /> int zxdh_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf);<br /> int zxdh_rss_configure(struct rte_eth_dev *dev);<br />+int zxdh_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);<br />+int zxdh_dev_stats_reset(struct rte_eth_dev *dev);<br />  <br /> #endif /* ZXDH_ETHDEV_OPS_H */<br />diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h<br />index dec0a95ded..176073fc70 100644<br />--- a/drivers/net/zxdh/zxdh_msg.h<br />+++ b/drivers/net/zxdh/zxdh_msg.h<br />@@ -9,10 +9,16 @@<br />  <br /> #include <ethdev_driver.h> <br />  <br />+#include "zxdh_ethdev_ops.h" <br />+<br /> #define ZXDH_BAR0_INDEX                 0<br /> #define ZXDH_CTRLCH_OFFSET              (0x2000)<br /> #define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET  (ZXDH_CTRLCH_OFFSET + 0x1000)<br />  <br />+#define ZXDH_MAC_OFFSET                 (0x24000)<br />+#define ZXDH_MAC_STATS_OFFSET           (0x1408)<br />+#define ZXDH_MAC_BYTES_OFFSET           (0xb000)<br />+<br /> #define ZXDH_MSIX_INTR_MSG_VEC_BASE   1<br /> #define ZXDH_MSIX_INTR_MSG_VEC_NUM    3<br /> #define ZXDH_MSIX_INTR_DTB_VEC        (ZXDH_MSIX_INTR_MSG_VEC_BASE + ZXDH_MSIX_INTR_MSG_VEC_NUM)<br />@@ -173,7 +179,13 @@ enum pciebar_layout_type {<br />  <br /> /* riscv msg opcodes */<br /> enum zxdh_agent_msg_type {<br />+    ZXDH_MAC_STATS_GET = 10,<br />+    ZXDH_MAC_STATS_RESET,<br />     ZXDH_MAC_LINK_GET = 14,<br />+    ZXDH_VQM_DEV_STATS_GET = 21,<br />+    ZXDH_VQM_DEV_STATS_RESET,<br />+    ZXDH_VQM_QUEUE_STATS_GET = 24,<br />+    ZXDH_VQM_QUEUE_STATS_RESET,<br /> };<br />  <br /> enum zxdh_msg_type {<br />@@ -195,6 +207,8 @@ enum zxdh_msg_type {<br />     ZXDH_PORT_ATTRS_SET = 25,<br />     ZXDH_PORT_PROMISC_SET = 26,<br />  <br />+    ZXDH_GET_NP_STATS = 31,<br />+<br />     ZXDH_MSG_TYPE_END,<br /> };<br />  <br />@@ -319,9 +333,11 @@ struct __rte_packed_begin zxdh_msg_reply_body {<br />     enum zxdh_reps_flag flag;<br />     union __rte_packed_begin {<br />         uint8_t reply_data[ZXDH_MSG_REPLY_BODY_MAX_LEN - sizeof(enum zxdh_reps_flag)];<br />+        struct zxdh_hw_np_stats np_stats;<br />         struct zxdh_link_info_msg link_msg;<br />         struct zxdh_rss_hf rss_hf;<br />         struct zxdh_rss_reta rss_reta;<br />+        struct zxdh_hw_vqm_stats vqm_stats;<br />     } __rte_packed_end;<br /> } __rte_packed_end;<br />  <br />diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c<br />index 1f06539263..42679635f4 100644<br />--- a/drivers/net/zxdh/zxdh_np.c<br />+++ b/drivers/net/zxdh/zxdh_np.c<br />@@ -26,6 +26,7 @@ ZXDH_TLB_MGR_T *g_p_dpp_tlb_mgr[ZXDH_DEV_CHANNEL_MAX];<br /> ZXDH_REG_T g_dpp_reg_info[4];<br /> ZXDH_DTB_TABLE_T g_dpp_dtb_table_info[4];<br /> ZXDH_SDT_TBL_DATA_T g_sdt_info[ZXDH_DEV_CHANNEL_MAX][ZXDH_DEV_SDT_ID_MAX];<br />+ZXDH_PPU_STAT_CFG_T g_ppu_stat_cfg;<br />  <br /> #define ZXDH_SDT_MGR_PTR_GET()    (&g_sdt_mgr)<br /> #define ZXDH_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id])<br />@@ -117,6 +118,18 @@ do {\<br /> #define ZXDH_COMM_CONVERT16(w_data) \<br />             (((w_data) & 0xff) << 8)<br />  <br />+#define ZXDH_DTB_TAB_UP_WR_INDEX_GET(DEV_ID, QUEUE_ID)       \<br />+        (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.wr_index)<br />+<br />+#define ZXDH_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(DEV_ID, QUEUE_ID, INDEX)     \<br />+    (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.user_addr[(INDEX)].user_flag)<br />+<br />+#define ZXDH_DTB_TAB_UP_USER_PHY_ADDR_GET(DEV_ID, QUEUE_ID, INDEX)     \<br />+        (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.user_addr[(INDEX)].phy_addr)<br />+<br />+#define ZXDH_DTB_TAB_UP_DATA_LEN_GET(DEV_ID, QUEUE_ID, INDEX)       \<br />+        (p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.data_len[(INDEX)])<br />+<br /> #define ZXDH_DTB_TAB_UP_VIR_ADDR_GET(DEV_ID, QUEUE_ID, INDEX)     \<br />         ((INDEX) * p_dpp_dtb_mgr[(DEV_ID)]->queue_info[(QUEUE_ID)].tab_up.item_size)<br />  <br />@@ -1717,3 +1730,331 @@ zxdh_np_dtb_table_entry_get(uint32_t dev_id,<br />  <br />     return 0;<br /> }<br />+<br />+static void<br />+zxdh_np_stat_cfg_soft_get(uint32_t dev_id,<br />+                ZXDH_PPU_STAT_CFG_T *p_stat_cfg)<br />+{<br />+    ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_stat_cfg);<br />+<br />+    p_stat_cfg->ddr_base_addr = g_ppu_stat_cfg.ddr_base_addr;<br />+    p_stat_cfg->eram_baddr = g_ppu_stat_cfg.eram_baddr;<br />+    p_stat_cfg->eram_depth = g_ppu_stat_cfg.eram_depth;<br />+    p_stat_cfg->ppu_addr_offset = g_ppu_stat_cfg.ppu_addr_offset;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dtb_tab_up_info_set(uint32_t dev_id,<br />+            uint32_t queue_id,<br />+            uint32_t item_index,<br />+            uint32_t int_flag,<br />+            uint32_t data_len,<br />+            uint32_t desc_len,<br />+            uint32_t *p_desc_data)<br />+{<br />+    ZXDH_DTB_QUEUE_ITEM_INFO_T item_info = {0};<br />+    uint32_t queue_en = 0;<br />+    uint32_t rc;<br />+<br />+    zxdh_np_dtb_queue_enable_get(dev_id, queue_id, &queue_en);<br />+    if (!queue_en) {<br />+        PMD_DRV_LOG(ERR, "the queue %d is not enable!", queue_id);<br />+        return ZXDH_RC_DTB_QUEUE_NOT_ENABLE;<br />+    }<br />+<br />+    if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {<br />+        PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);<br />+        return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;<br />+    }<br />+<br />+    if (desc_len % 4 != 0)<br />+        return ZXDH_RC_DTB_PARA_INVALID;<br />+<br />+    zxdh_np_dtb_item_buff_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE,<br />+        item_index, 0, desc_len, p_desc_data);<br />+<br />+    ZXDH_DTB_TAB_UP_DATA_LEN_GET(dev_id, queue_id, item_index) = data_len;<br />+<br />+    item_info.cmd_vld = 1;<br />+    item_info.cmd_type = ZXDH_DTB_DIR_UP_TYPE;<br />+    item_info.int_en = int_flag;<br />+    item_info.data_len = desc_len / 4;<br />+<br />+    if (zxdh_np_dev_get_dev_type(dev_id) == ZXDH_DEV_TYPE_SIM)<br />+        return 0;<br />+<br />+    rc = zxdh_np_dtb_queue_item_info_set(dev_id, queue_id, &item_info);<br />+<br />+    return rc;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dtb_write_dump_desc_info(uint32_t dev_id,<br />+        uint32_t queue_id,<br />+        uint32_t queue_element_id,<br />+        uint32_t *p_dump_info,<br />+        uint32_t data_len,<br />+        uint32_t desc_len,<br />+        uint32_t *p_dump_data)<br />+{<br />+    uint32_t dtb_interrupt_status = 0;<br />+    uint32_t rc;<br />+<br />+    ZXDH_COMM_CHECK_POINT(p_dump_data);<br />+    rc = zxdh_np_dtb_tab_up_info_set(dev_id,<br />+                queue_id,<br />+                queue_element_id,<br />+                dtb_interrupt_status,<br />+                data_len,<br />+                desc_len,<br />+                p_dump_info);<br />+    if (rc != 0) {<br />+        PMD_DRV_LOG(ERR, "the queue %d element id %d dump" <br />+            " info set failed!", queue_id, queue_element_id);<br />+        zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE,<br />+            queue_element_id, 0, ZXDH_DTB_TAB_ACK_UNUSED_MASK);<br />+    }<br />+<br />+    return rc;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dtb_tab_up_free_item_get(uint32_t dev_id,<br />+                    uint32_t queue_id,<br />+                    uint32_t *p_item_index)<br />+{<br />+    uint32_t ack_vale = 0;<br />+    uint32_t item_index = 0;<br />+    uint32_t unused_item_num = 0;<br />+    uint32_t i;<br />+<br />+    if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {<br />+        PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);<br />+        return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;<br />+    }<br />+<br />+    zxdh_np_dtb_queue_unused_item_num_get(dev_id, queue_id, &unused_item_num);<br />+<br />+    if (unused_item_num == 0)<br />+        return ZXDH_RC_DTB_QUEUE_ITEM_HW_EMPTY;<br />+<br />+    for (i = 0; i < ZXDH_DTB_QUEUE_ITEM_NUM_MAX; i++) {<br />+        item_index = ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id) %<br />+            ZXDH_DTB_QUEUE_ITEM_NUM_MAX;<br />+<br />+        zxdh_np_dtb_item_ack_rd(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index,<br />+            0, &ack_vale);<br />+<br />+        ZXDH_DTB_TAB_UP_WR_INDEX_GET(dev_id, queue_id)++;<br />+<br />+        if ((ack_vale >> 8) == ZXDH_DTB_TAB_ACK_UNUSED_MASK)<br />+            break;<br />+    }<br />+<br />+    if (i == ZXDH_DTB_QUEUE_ITEM_NUM_MAX)<br />+        return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY;<br />+<br />+    zxdh_np_dtb_item_ack_wr(dev_id, queue_id, ZXDH_DTB_DIR_UP_TYPE, item_index,<br />+        0, ZXDH_DTB_TAB_ACK_IS_USING_MASK);<br />+<br />+    *p_item_index = item_index;<br />+<br />+<br />+    return 0;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dtb_tab_up_item_addr_get(uint32_t dev_id,<br />+                    uint32_t queue_id,<br />+                    uint32_t item_index,<br />+                    uint32_t *p_phy_haddr,<br />+                    uint32_t *p_phy_laddr)<br />+{<br />+    uint32_t rc = 0;<br />+    uint64_t addr;<br />+<br />+    if (ZXDH_DTB_QUEUE_INIT_FLAG_GET(dev_id, queue_id) == 0) {<br />+        PMD_DRV_LOG(ERR, "dtb queue %d is not init.", queue_id);<br />+        return ZXDH_RC_DTB_QUEUE_IS_NOT_INIT;<br />+    }<br />+<br />+    if (ZXDH_DTB_TAB_UP_USER_PHY_ADDR_FLAG_GET(dev_id, queue_id, item_index) ==<br />+        ZXDH_DTB_TAB_UP_USER_ADDR_TYPE)<br />+        addr = ZXDH_DTB_TAB_UP_USER_PHY_ADDR_GET(dev_id, queue_id, item_index);<br />+    else<br />+        addr = ZXDH_DTB_ITEM_ACK_SIZE;<br />+<br />+    *p_phy_haddr = (addr >> 32) & 0xffffffff;<br />+    *p_phy_laddr = addr & 0xffffffff;<br />+<br />+    return rc;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dtb_se_smmu0_dma_dump(uint32_t dev_id,<br />+        uint32_t queue_id,<br />+        uint32_t base_addr,<br />+        uint32_t depth,<br />+        uint32_t *p_data,<br />+        uint32_t *element_id)<br />+{<br />+    uint8_t form_buff[ZXDH_DTB_TABLE_CMD_SIZE_BIT / 8] = {0};<br />+    uint32_t dump_dst_phy_haddr = 0;<br />+    uint32_t dump_dst_phy_laddr = 0;<br />+    uint32_t queue_item_index = 0;<br />+    uint32_t data_len;<br />+    uint32_t desc_len;<br />+    uint32_t rc;<br />+<br />+    rc = zxdh_np_dtb_tab_up_free_item_get(dev_id, queue_id, &queue_item_index);<br />+    if (rc != 0) {<br />+        PMD_DRV_LOG(ERR, "dpp_dtb_tab_up_free_item_get failed = %d!", base_addr);<br />+        return ZXDH_RC_DTB_QUEUE_ITEM_SW_EMPTY;<br />+    }<br />+<br />+    *element_id = queue_item_index;<br />+<br />+    rc = zxdh_np_dtb_tab_up_item_addr_get(dev_id, queue_id, queue_item_index,<br />+        &dump_dst_phy_haddr, &dump_dst_phy_laddr);<br />+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_tab_up_item_addr_get");<br />+<br />+    data_len = depth * 128 / 32;<br />+    desc_len = ZXDH_DTB_LEN_POS_SETP / 4;<br />+<br />+    rc = zxdh_np_dtb_write_dump_desc_info(dev_id, queue_id, queue_item_index,<br />+        (uint32_t *)form_buff, data_len, desc_len, p_data);<br />+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_write_dump_desc_info");<br />+<br />+    return rc;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dtb_se_smmu0_ind_read(uint32_t dev_id,<br />+        uint32_t queue_id,<br />+        uint32_t base_addr,<br />+        uint32_t index,<br />+        uint32_t rd_mode,<br />+        uint32_t *p_data)<br />+{<br />+    uint32_t temp_data[4] = {0};<br />+    uint32_t element_id = 0;<br />+    uint32_t row_index = 0;<br />+    uint32_t col_index = 0;<br />+    uint32_t eram_dump_base_addr;<br />+    uint32_t rc;<br />+<br />+    switch (rd_mode) {<br />+    case ZXDH_ERAM128_OPR_128b:<br />+    {<br />+        row_index = index;<br />+        break;<br />+    }<br />+    case ZXDH_ERAM128_OPR_64b:<br />+    {<br />+        row_index = (index >> 1);<br />+        col_index = index & 0x1;<br />+        break;<br />+    }<br />+    case ZXDH_ERAM128_OPR_1b:<br />+    {<br />+        row_index = (index >> 7);<br />+        col_index = index & 0x7F;<br />+        break;<br />+    }<br />+    }<br />+<br />+    eram_dump_base_addr = base_addr + row_index;<br />+    rc = zxdh_np_dtb_se_smmu0_dma_dump(dev_id,<br />+            queue_id,<br />+            eram_dump_base_addr,<br />+            1,<br />+            temp_data,<br />+            &element_id);<br />+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_np_dtb_se_smmu0_dma_dump");<br />+<br />+    switch (rd_mode) {<br />+    case ZXDH_ERAM128_OPR_128b:<br />+    {<br />+        memcpy(p_data, temp_data, (128 / 8));<br />+        break;<br />+    }<br />+<br />+    case ZXDH_ERAM128_OPR_64b:<br />+    {<br />+        memcpy(p_data, temp_data + ((1 - col_index) << 1), (64 / 8));<br />+        break;<br />+    }<br />+<br />+    case ZXDH_ERAM128_OPR_1b:<br />+    {<br />+        ZXDH_COMM_UINT32_GET_BITS(p_data[0], *(temp_data +<br />+            (3 - col_index / 32)), (col_index % 32), 1);<br />+        break;<br />+    }<br />+    }<br />+<br />+    return rc;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dtb_stat_smmu0_int_read(uint32_t dev_id,<br />+        uint32_t queue_id,<br />+        uint32_t smmu0_base_addr,<br />+        ZXDH_STAT_CNT_MODE_E rd_mode,<br />+        uint32_t index,<br />+        uint32_t *p_data)<br />+{<br />+    uint32_t eram_rd_mode;<br />+    uint32_t rc;<br />+<br />+    ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_data);<br />+<br />+    if (rd_mode == ZXDH_STAT_128_MODE)<br />+        eram_rd_mode = ZXDH_ERAM128_OPR_128b;<br />+    else<br />+        eram_rd_mode = ZXDH_ERAM128_OPR_64b;<br />+<br />+    rc = zxdh_np_dtb_se_smmu0_ind_read(dev_id,<br />+                                   queue_id,<br />+                                   smmu0_base_addr,<br />+                                   index,<br />+                                   eram_rd_mode,<br />+                                   p_data);<br />+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_np_dtb_se_smmu0_ind_read");<br />+<br />+    return rc;<br />+}<br />+<br />+int<br />+zxdh_np_dtb_stats_get(uint32_t dev_id,<br />+        uint32_t queue_id,<br />+        ZXDH_STAT_CNT_MODE_E rd_mode,<br />+        uint32_t index,<br />+        uint32_t *p_data)<br />+{<br />+    ZXDH_PPU_STAT_CFG_T stat_cfg = {0};<br />+    uint32_t ppu_eram_baddr;<br />+    uint32_t ppu_eram_depth;<br />+    uint32_t rc = 0;<br />+<br />+    ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_data);<br />+<br />+    memset(&stat_cfg, 0x0, sizeof(stat_cfg));<br />+<br />+    zxdh_np_stat_cfg_soft_get(dev_id, &stat_cfg);<br />+<br />+    ppu_eram_depth = stat_cfg.eram_depth;<br />+    ppu_eram_baddr = stat_cfg.eram_baddr;<br />+<br />+    if ((index >> (ZXDH_STAT_128_MODE - rd_mode)) < ppu_eram_depth) {<br />+        rc = zxdh_np_dtb_stat_smmu0_int_read(dev_id,<br />+                                    queue_id,<br />+                                    ppu_eram_baddr,<br />+                                    rd_mode,<br />+                                    index,<br />+                                    p_data);<br />+        ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "dpp_dtb_stat_smmu0_int_read");<br />+    }<br />+<br />+    return rc;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h<br />index 19d1f03f59..7da29cf7bd 100644<br />--- a/drivers/net/zxdh/zxdh_np.h<br />+++ b/drivers/net/zxdh/zxdh_np.h<br />@@ -432,6 +432,18 @@ typedef enum zxdh_sdt_table_type_e {<br />     ZXDH_SDT_TBLT_MAX     = 7,<br /> } ZXDH_SDT_TABLE_TYPE_E;<br />  <br />+typedef enum zxdh_dtb_dir_type_e {<br />+    ZXDH_DTB_DIR_DOWN_TYPE    = 0,<br />+    ZXDH_DTB_DIR_UP_TYPE    = 1,<br />+    ZXDH_DTB_DIR_TYPE_MAX,<br />+} ZXDH_DTB_DIR_TYPE_E;<br />+<br />+typedef enum zxdh_dtb_tab_up_user_addr_type_e {<br />+    ZXDH_DTB_TAB_UP_NOUSER_ADDR_TYPE     = 0,<br />+    ZXDH_DTB_TAB_UP_USER_ADDR_TYPE       = 1,<br />+    ZXDH_DTB_TAB_UP_USER_ADDR_TYPE_MAX,<br />+} ZXDH_DTB_TAB_UP_USER_ADDR_TYPE_E;<br />+<br /> typedef struct zxdh_dtb_lpm_entry_t {<br />     uint32_t dtb_len0;<br />     uint8_t *p_data_buff0;<br />@@ -537,6 +549,19 @@ typedef struct zxdh_dtb_hash_entry_info_t {<br />     uint8_t *p_rst;<br /> } ZXDH_DTB_HASH_ENTRY_INFO_T;<br />  <br />+typedef struct zxdh_ppu_stat_cfg_t {<br />+    uint32_t eram_baddr;<br />+    uint32_t eram_depth;<br />+    uint32_t ddr_base_addr;<br />+    uint32_t ppu_addr_offset;<br />+} ZXDH_PPU_STAT_CFG_T;<br />+<br />+typedef enum zxdh_stat_cnt_mode_e {<br />+    ZXDH_STAT_64_MODE  = 0,<br />+    ZXDH_STAT_128_MODE = 1,<br />+    ZXDH_STAT_MAX_MODE,<br />+} ZXDH_STAT_CNT_MODE_E;<br />+<br /> int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);<br /> int zxdh_np_online_uninit(uint32_t dev_id, char *port_name, uint32_t queue_id);<br /> int zxdh_np_dtb_table_entry_write(uint32_t dev_id, uint32_t queue_id,<br />@@ -545,5 +570,10 @@ int zxdh_np_dtb_table_entry_delete(uint32_t dev_id, uint32_t queue_id,<br />              uint32_t entrynum, ZXDH_DTB_USER_ENTRY_T *delete_entries);<br /> int zxdh_np_dtb_table_entry_get(uint32_t dev_id, uint32_t queue_id,<br />             ZXDH_DTB_USER_ENTRY_T *get_entry, uint32_t srh_mode);<br />+int zxdh_np_dtb_stats_get(uint32_t dev_id,<br />+            uint32_t queue_id,<br />+            ZXDH_STAT_CNT_MODE_E rd_mode,<br />+            uint32_t index,<br />+            uint32_t *p_data);<br />  <br /> #endif /* ZXDH_NP_H */<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />index daabb3530c..1d89703ccb 100644<br />--- a/drivers/net/zxdh/zxdh_queue.h<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -53,6 +53,8 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };<br /> #define ZXDH_PI_HDR_SIZE          sizeof(struct zxdh_pi_hdr)<br /> #define ZXDH_DL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_dl)<br /> #define ZXDH_UL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_ul)<br />+#define ZXDH_PD_HDR_SIZE_MAX              256<br />+#define ZXDH_PD_HDR_SIZE_MIN              ZXDH_TYPE_HDR_SIZE<br />  <br /> /*<br />  * ring descriptors: 16 bytes.<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c<br />index 0ffce50042..27a61d46dd 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.c<br />+++ b/drivers/net/zxdh/zxdh_rxtx.c<br />@@ -406,6 +406,40 @@ static inline void zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,<br />     zxdh_queue_store_flags_packed(head_dp, head_flags, vq->hw->weak_barriers);<br /> }<br />  <br />+static void<br />+zxdh_update_packet_stats(struct zxdh_virtnet_stats *stats, struct rte_mbuf *mbuf)<br />+{<br />+    uint32_t s = mbuf->pkt_len;<br />+    struct rte_ether_addr *ea = NULL;<br />+<br />+    stats->bytes += s;<br />+<br />+    if (s == 64) {<br />+        stats->size_bins[1]++;<br />+    } else if (s > 64 && s < 1024) {<br />+        uint32_t bin;<br />+<br />+        /* count zeros, and offset into correct bin */<br />+        bin = (sizeof(s) * 8) - rte_clz32(s) - 5;<br />+        stats->size_bins[bin]++;<br />+    } else {<br />+        if (s < 64)<br />+            stats->size_bins[0]++;<br />+        else if (s < 1519)<br />+            stats->size_bins[6]++;<br />+        else<br />+            stats->size_bins[7]++;<br />+    }<br />+<br />+    ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);<br />+    if (rte_is_multicast_ether_addr(ea)) {<br />+        if (rte_is_broadcast_ether_addr(ea))<br />+            stats->broadcast++;<br />+        else<br />+            stats->multicast++;<br />+    }<br />+}<br />+<br /> uint16_t<br /> zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)<br /> {<br />@@ -459,12 +493,19 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt<br />                 break;<br />             }<br />         }<br />+        if (txm->nb_segs > ZXDH_TX_MAX_SEGS) {<br />+            PMD_TX_LOG(ERR, "%d segs  dropped", txm->nb_segs);<br />+            txvq->stats.truncated_err += nb_pkts - nb_tx;<br />+            break;<br />+        }<br />         /* Enqueue Packet buffers */<br />         if (can_push)<br />             zxdh_enqueue_xmit_packed_fast(txvq, txm, in_order);<br />         else<br />             zxdh_enqueue_xmit_packed(txvq, txm, slots, use_indirect, in_order);<br />+        zxdh_update_packet_stats(&txvq->stats, txm);<br />     }<br />+    txvq->stats.packets += nb_tx;<br />     if (likely(nb_tx)) {<br />         if (unlikely(zxdh_queue_kick_prepare_packed(vq))) {<br />             zxdh_queue_notify(vq);<br />@@ -474,9 +515,10 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt<br />     return nb_tx;<br /> }<br />  <br />-uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,<br />+uint16_t zxdh_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts,<br />                 uint16_t nb_pkts)<br /> {<br />+    struct zxdh_virtnet_tx *txvq = tx_queue;<br />     uint16_t nb_tx;<br />  <br />     for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {<br />@@ -496,6 +538,12 @@ uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **t<br />             rte_errno = -error;<br />             break;<br />         }<br />+        if (m->nb_segs > ZXDH_TX_MAX_SEGS) {<br />+            PMD_TX_LOG(ERR, "%d segs dropped", m->nb_segs);<br />+            txvq->stats.truncated_err += nb_pkts - nb_tx;<br />+            rte_errno = ENOMEM;<br />+            break;<br />+        }<br />     }<br />     return nb_tx;<br /> }<br />@@ -571,7 +619,7 @@ static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *h<br />     return 0;<br /> }<br />  <br />-static inline void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)<br />+static void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)<br /> {<br />     int32_t error = 0;<br />     /*<br />@@ -613,7 +661,13 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />  <br />     for (i = 0; i < num; i++) {<br />         rxm = rcv_pkts[i];<br />-<br />+        if (unlikely(len[i] < ZXDH_UL_NET_HDR_SIZE)) {<br />+            nb_enqueued++;<br />+            PMD_RX_LOG(ERR, "RX, len:%u err", len[i]);<br />+            zxdh_discard_rxbuf(vq, rxm);<br />+            rxvq->stats.errors++;<br />+            continue;<br />+        }<br />         struct zxdh_net_hdr_ul *header =<br />             (struct zxdh_net_hdr_ul *)((char *)rxm->buf_addr +<br />             RTE_PKTMBUF_HEADROOM);<br />@@ -623,8 +677,22 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />             PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);<br />             seg_num = 1;<br />         }<br />+        if (seg_num > ZXDH_RX_MAX_SEGS) {<br />+            PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);<br />+            nb_enqueued++;<br />+            zxdh_discard_rxbuf(vq, rxm);<br />+            rxvq->stats.errors++;<br />+            continue;<br />+        }<br />         /* bit[0:6]-pd_len unit:2B */<br />         uint16_t pd_len = header->type_hdr.pd_len << 1;<br />+        if (pd_len > ZXDH_PD_HDR_SIZE_MAX || pd_len < ZXDH_PD_HDR_SIZE_MIN) {<br />+            PMD_RX_LOG(ERR, "pd_len:%d is invalid", pd_len);<br />+            nb_enqueued++;<br />+            zxdh_discard_rxbuf(vq, rxm);<br />+            rxvq->stats.errors++;<br />+            continue;<br />+        }<br />         /* Private queue only handle type hdr */<br />         hdr_size = pd_len;<br />         rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size;<br />@@ -639,6 +707,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />         /* Update rte_mbuf according to pi/pd header */<br />         if (zxdh_rx_update_mbuf(rxm, header) < 0) {<br />             zxdh_discard_rxbuf(vq, rxm);<br />+            rxvq->stats.errors++;<br />             continue;<br />         }<br />         seg_res = seg_num - 1;<br />@@ -661,8 +730,11 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />                 PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.",<br />                     rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);<br />                 zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);<br />+                rxvq->stats.errors++;<br />+                rxvq->stats.truncated_err++;<br />                 continue;<br />             }<br />+            zxdh_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);<br />             nb_rx++;<br />         }<br />     }<br />@@ -675,6 +747,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />         if (unlikely(rcv_cnt == 0)) {<br />             PMD_RX_LOG(ERR, "No enough segments for packet.");<br />             rte_pktmbuf_free(rx_pkts[nb_rx]);<br />+            rxvq->stats.errors++;<br />             break;<br />         }<br />         while (extra_idx < rcv_cnt) {<br />@@ -694,11 +767,15 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />                 PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.",<br />                     rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);<br />                 zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);<br />+                rxvq->stats.errors++;<br />+                rxvq->stats.truncated_err++;<br />                 continue;<br />             }<br />+            zxdh_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);<br />             nb_rx++;<br />         }<br />     }<br />+    rxvq->stats.packets += nb_rx;<br />  <br />     /* Allocate new mbuf for the used descriptor */<br />     if (likely(!zxdh_queue_full(vq))) {<br />diff --git a/drivers/net/zxdh/zxdh_tables.h b/drivers/net/zxdh/zxdh_tables.h<br />index 7bac39375c..c7da40f294 100644<br />--- a/drivers/net/zxdh/zxdh_tables.h<br />+++ b/drivers/net/zxdh/zxdh_tables.h<br />@@ -11,6 +11,11 @@<br /> #define ZXDH_PORT_BASE_QID_FLAG           10<br /> #define ZXDH_PORT_ATTR_IS_UP_FLAG         35<br />  <br />+#define ZXDH_MTU_STATS_EGRESS_BASE        0x8481<br />+#define ZXDH_MTU_STATS_INGRESS_BASE       0x8981<br />+#define ZXDH_BROAD_STATS_EGRESS_BASE      0xC902<br />+#define ZXDH_BROAD_STATS_INGRESS_BASE     0xD102<br />+<br /> extern struct zxdh_dtb_shared_data g_dtb_data;<br />  <br /> struct zxdh_port_attr_table {<br />--  <br />2.27.0<br />