provided zxdh initialization of zxdh PMD driver.<br />include msg channel, np init and etc.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br />V4: Resolve compilation issues<br />V3: Resolve compilation issues<br />V2: Resolve compilation issues and modify doc(zxdh.ini zdh.rst)<br />V1: Provide zxdh basic init and open source NPSDK lib<br />---<br /> doc/guides/nics/features/zxdh.ini |   10 +<br /> doc/guides/nics/index.rst         |    1 +<br /> doc/guides/nics/zxdh.rst          |   34 +<br /> drivers/net/meson.build           |    1 +<br /> drivers/net/zxdh/meson.build      |   23 +<br /> drivers/net/zxdh/zxdh_common.c    |   59 ++<br /> drivers/net/zxdh/zxdh_common.h    |   32 +<br /> drivers/net/zxdh/zxdh_ethdev.c    | 1328 +++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_ethdev.h    |  202 +++++<br /> drivers/net/zxdh/zxdh_logs.h      |   38 +<br /> drivers/net/zxdh/zxdh_msg.c       | 1177 +++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_msg.h       |  408 +++++++++<br /> drivers/net/zxdh/zxdh_npsdk.c     |  158 ++++<br /> drivers/net/zxdh/zxdh_npsdk.h     |  216 +++++<br /> drivers/net/zxdh/zxdh_pci.c       |  462 ++++++++++<br /> drivers/net/zxdh/zxdh_pci.h       |  259 ++++++<br /> drivers/net/zxdh/zxdh_queue.c     |  138 +++<br /> drivers/net/zxdh/zxdh_queue.h     |   85 ++<br /> drivers/net/zxdh/zxdh_ring.h      |   87 ++<br /> drivers/net/zxdh/zxdh_rxtx.h      |   48 ++<br /> 20 files changed, 4766 insertions(+)<br /> create mode 100644 doc/guides/nics/features/zxdh.ini<br /> create mode 100644 doc/guides/nics/zxdh.rst<br /> create mode 100644 drivers/net/zxdh/meson.build<br /> create mode 100644 drivers/net/zxdh/zxdh_common.c<br /> create mode 100644 drivers/net/zxdh/zxdh_common.h<br /> create mode 100644 drivers/net/zxdh/zxdh_ethdev.c<br /> create mode 100644 drivers/net/zxdh/zxdh_ethdev.h<br /> create mode 100644 drivers/net/zxdh/zxdh_logs.h<br /> create mode 100644 drivers/net/zxdh/zxdh_msg.c<br /> create mode 100644 drivers/net/zxdh/zxdh_msg.h<br /> create mode 100644 drivers/net/zxdh/zxdh_npsdk.c<br /> create mode 100644 drivers/net/zxdh/zxdh_npsdk.h<br /> create mode 100644 drivers/net/zxdh/zxdh_pci.c<br /> create mode 100644 drivers/net/zxdh/zxdh_pci.h<br /> create mode 100644 drivers/net/zxdh/zxdh_queue.c<br /> create mode 100644 drivers/net/zxdh/zxdh_queue.h<br /> create mode 100644 drivers/net/zxdh/zxdh_ring.h<br /> create mode 100644 drivers/net/zxdh/zxdh_rxtx.h<br /> <br />diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini<br />new file mode 100644<br />index 0000000000..083c75511b<br />--- /dev/null<br />+++ b/doc/guides/nics/features/zxdh.ini<br />@@ -0,0 +1,10 @@<br />+;<br />+; Supported features of the 'zxdh' network poll mode driver.<br />+;<br />+; Refer to default.ini for the full list of available PMD features.<br />+;<br />+[Features]<br />+Linux                = Y<br />+x86-64               = Y<br />+ARMv8                = Y<br />+<br />diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst<br />index c14bc7988a..8e371ac4a5 100644<br />--- a/doc/guides/nics/index.rst<br />+++ b/doc/guides/nics/index.rst<br />@@ -69,3 +69,4 @@ Network Interface Controller Drivers<br />     vhost<br />     virtio<br />     vmxnet3<br />+    zxdh<br />diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst<br />new file mode 100644<br />index 0000000000..e878058b7b<br />--- /dev/null<br />+++ b/doc/guides/nics/zxdh.rst<br />@@ -0,0 +1,34 @@<br />+..  SPDX-License-Identifier: BSD-3-Clause<br />+    Copyright(c) 2023 ZTE Corporation.<br />+<br />+ZXDH Poll Mode Driver<br />+======================<br />+<br />+The ZXDH PMD (**librte_net_zxdh**) provides poll mode driver support<br />+for 25/100 Gbps ZXDH NX Series Ethernet Controller based on<br />+the ZTE Ethernet Controller E310/E312.<br />+<br />+<br />+Features<br />+--------<br />+<br />+Features of the zxdh PMD are:<br />+<br />+- Multi arch support: x86_64, ARMv8.<br />+<br />+Prerequisites<br />+-------------<br />+<br />+- Learning about ZXDH NX Series Ethernet Controller NICs using<br />+  `<https://enterprise.zte.com.cn/sup-detail.html?id=271&suptype=1>`_.<br />+<br />+Driver compilation and testing<br />+------------------------------<br />+<br />+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`<br />+for details.<br />+<br />+Limitations or Known issues<br />+---------------------------<br />+X86-32, Power8, ARMv7 and BSD are not supported yet.<br />+<br />diff --git a/drivers/net/meson.build b/drivers/net/meson.build<br />index fb6d34b782..1a3db8a04d 100644<br />--- a/drivers/net/meson.build<br />+++ b/drivers/net/meson.build<br />@@ -62,6 +62,7 @@ drivers = [<br />         'vhost',<br />         'virtio',<br />         'vmxnet3',<br />+    'zxdh',<br /> ]<br /> std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc<br /> std_deps += ['bus_pci']         # very many PMDs depend on PCI, so make std<br />diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build<br />new file mode 100644<br />index 0000000000..593e3c5933<br />--- /dev/null<br />+++ b/drivers/net/zxdh/meson.build<br />@@ -0,0 +1,23 @@<br />+# SPDX-License-Identifier: BSD-3-Clause<br />+# Copyright(c) 2024 ZTE Corporation<br />+<br />+if not is_linux<br />+    build = false<br />+    reason = 'only supported on Linux' <br />+    subdir_done()<br />+endif<br />+<br />+if arch_subdir != 'x86' and arch_subdir != 'arm' or not dpdk_conf.get('RTE_ARCH_64')<br />+    build = false<br />+    reason = 'only supported on x86_64 and aarch64' <br />+    subdir_done()<br />+endif<br />+<br />+sources = files(<br />+    'zxdh_ethdev.c',<br />+    'zxdh_common.c',<br />+    'zxdh_pci.c',<br />+    'zxdh_msg.c',<br />+    'zxdh_queue.c',<br />+    'zxdh_npsdk.c',<br />+    )<br />diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c<br />new file mode 100644<br />index 0000000000..55497f8a24<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_common.c<br />@@ -0,0 +1,59 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+#include <ethdev_driver.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+#include "zxdh_common.h" <br />+<br />+uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);<br />+    uint32_t val      = *((volatile uint32_t *)(baseaddr + reg));<br />+    return val;<br />+}<br />+<br />+void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);<br />+    *((volatile uint32_t *)(baseaddr + reg)) = val;<br />+}<br />+<br />+int32_t zxdh_acquire_lock(struct zxdh_hw *hw)<br />+{<br />+    uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG);<br />+<br />+    /* check whether lock is used */<br />+    if (!(var & ZXDH_VF_LOCK_ENABLE_MASK))<br />+        return -1;<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t zxdh_release_lock(struct zxdh_hw *hw)<br />+{<br />+    uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG);<br />+<br />+    if (var & ZXDH_VF_LOCK_ENABLE_MASK) {<br />+        var &= ~ZXDH_VF_LOCK_ENABLE_MASK;<br />+        zxdh_write_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG, var);<br />+        return 0;<br />+    }<br />+<br />+    return -1;<br />+}<br />+<br />+uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg)<br />+{<br />+    uint32_t val = *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg));<br />+    return val;<br />+}<br />+<br />+void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val)<br />+{<br />+    *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)) = val;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h<br />new file mode 100644<br />index 0000000000..912eb9ad42<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_common.h<br />@@ -0,0 +1,32 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_COMMON_H_<br />+#define _ZXDH_COMMON_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+#include <rte_ethdev.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+<br />+#define ZXDH_VF_LOCK_ENABLE_MASK      0x1<br />+#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX   10<br />+#define ZXDH_VF_LOCK_REG             0x90<br />+<br />+uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg);<br />+void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val);<br />+int32_t zxdh_release_lock(struct zxdh_hw *hw);<br />+int32_t zxdh_acquire_lock(struct zxdh_hw *hw);<br />+uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg);<br />+void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val);<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif /* _ZXDH_COMMON_H_ */<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />new file mode 100644<br />index 0000000000..813ced24cd<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -0,0 +1,1328 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#include <rte_memcpy.h> <br />+#include <rte_malloc.h> <br />+#include <rte_interrupts.h> <br />+#include <eal_interrupts.h> <br />+#include <ethdev_pci.h> <br />+#include <rte_kvargs.h> <br />+#include <rte_hexdump.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+#include "zxdh_pci.h" <br />+#include "zxdh_logs.h" <br />+#include "zxdh_queue.h" <br />+#include "zxdh_rxtx.h" <br />+#include "zxdh_ethdev.h" <br />+#include "zxdh_msg.h" <br />+#include "zxdh_npsdk.h" <br />+<br />+struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];<br />+struct zxdh_shared_data *zxdh_shared_data;<br />+const char *MZ_ZXDH_PMD_SHARED_DATA = "zxdh_pmd_shared_data";<br />+rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;<br />+struct zxdh_dtb_shared_data g_dtb_data = {0};<br />+<br />+#define ZXDH_PMD_DEFAULT_HOST_FEATURES   \<br />+    (1ULL << ZXDH_NET_F_MRG_RXBUF | \<br />+     1ULL << ZXDH_NET_F_STATUS    | \<br />+     1ULL << ZXDH_NET_F_MQ        | \<br />+     1ULL << ZXDH_F_ANY_LAYOUT    | \<br />+     1ULL << ZXDH_F_VERSION_1   | \<br />+     1ULL << ZXDH_F_RING_PACKED | \<br />+     1ULL << ZXDH_F_IN_ORDER    | \<br />+     1ULL << ZXDH_F_ORDER_PLATFORM | \<br />+     1ULL << ZXDH_F_NOTIFICATION_DATA |\<br />+     1ULL << ZXDH_NET_F_MAC | \<br />+     1ULL << ZXDH_NET_F_CSUM |\<br />+     1ULL << ZXDH_NET_F_GUEST_CSUM |\<br />+     1ULL << ZXDH_NET_F_GUEST_TSO4 |\<br />+     1ULL << ZXDH_NET_F_GUEST_TSO6 |\<br />+     1ULL << ZXDH_NET_F_HOST_TSO4 |\<br />+     1ULL << ZXDH_NET_F_HOST_TSO6 |\<br />+     1ULL << ZXDH_NET_F_GUEST_UFO |\<br />+     1ULL << ZXDH_NET_F_HOST_UFO)<br />+<br />+#define ZXDH_PMD_DEFAULT_GUEST_FEATURES   \<br />+    (1ULL << ZXDH_NET_F_MRG_RXBUF | \<br />+     1ULL << ZXDH_NET_F_STATUS    | \<br />+     1ULL << ZXDH_NET_F_MQ        | \<br />+     1ULL << ZXDH_F_ANY_LAYOUT    | \<br />+     1ULL << ZXDH_F_VERSION_1     | \<br />+     1ULL << ZXDH_F_RING_PACKED   | \<br />+     1ULL << ZXDH_F_IN_ORDER      | \<br />+     1ULL << ZXDH_F_NOTIFICATION_DATA | \<br />+     1ULL << ZXDH_NET_F_MAC)<br />+<br />+#define ZXDH_RX_QUEUES_MAX  128U<br />+#define ZXDH_TX_QUEUES_MAX  128U<br />+<br />+static unsigned int<br />+log2above(unsigned int v)<br />+{<br />+    unsigned int l;<br />+    unsigned int r;<br />+<br />+    for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)<br />+        r |= (v & 1);<br />+    return l + r;<br />+}<br />+<br />+static uint16_t zxdh_queue_desc_pre_setup(uint16_t desc)<br />+{<br />+    uint32_t nb_desc = desc;<br />+<br />+    if (desc < ZXDH_MIN_QUEUE_DEPTH) {<br />+        PMD_RX_LOG(WARNING,<br />+            "nb_desc(%u) increased number of descriptors to the min queue depth (%u)",<br />+            desc, ZXDH_MIN_QUEUE_DEPTH);<br />+        return ZXDH_MIN_QUEUE_DEPTH;<br />+    }<br />+<br />+    if (desc > ZXDH_MAX_QUEUE_DEPTH) {<br />+        PMD_RX_LOG(WARNING,<br />+            "nb_desc(%u) can't be greater than max_rxds (%d), turn to max queue depth",<br />+            desc, ZXDH_MAX_QUEUE_DEPTH);<br />+        return ZXDH_MAX_QUEUE_DEPTH;<br />+    }<br />+<br />+    if (!rte_is_power_of_2(desc)) {<br />+        nb_desc = 1 << log2above(desc);<br />+        if (nb_desc > ZXDH_MAX_QUEUE_DEPTH)<br />+            nb_desc = ZXDH_MAX_QUEUE_DEPTH;<br />+<br />+        PMD_RX_LOG(WARNING,<br />+            "nb_desc(%u) increased number of descriptors to the next power of two (%d)",<br />+            desc, nb_desc);<br />+    }<br />+<br />+    return nb_desc;<br />+}<br />+<br />+static int32_t hw_q_depth_handler(const char *key __rte_unused,<br />+                const char *value, void *ret_val)<br />+{<br />+    uint16_t val = 0;<br />+    struct zxdh_hw *hw = ret_val;<br />+<br />+    val = strtoul(value, NULL, 0);<br />+    uint16_t q_depth = zxdh_queue_desc_pre_setup(val);<br />+<br />+    hw->q_depth = q_depth;<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_dev_devargs_parse(struct rte_devargs *devargs, struct zxdh_hw *hw)<br />+{<br />+    struct rte_kvargs *kvlist = NULL;<br />+    int32_t ret = 0;<br />+<br />+    if (devargs == NULL)<br />+        return 0;<br />+<br />+    kvlist = rte_kvargs_parse(devargs->args, NULL);<br />+    if (kvlist == NULL) {<br />+        PMD_INIT_LOG(ERR, "error when parsing param");<br />+        return 0;<br />+    }<br />+<br />+    ret = rte_kvargs_process(kvlist, "q_depth", hw_q_depth_handler, hw);<br />+    if (ret < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to parse q_depth");<br />+        goto exit;<br />+    }<br />+    if (!hw->q_depth)<br />+        hw->q_depth = ZXDH_MIN_QUEUE_DEPTH;<br />+<br />+exit:<br />+    rte_kvargs_free(kvlist);<br />+    return ret;<br />+}<br />+<br />+static int zxdh_init_shared_data(void)<br />+{<br />+    const struct rte_memzone *mz;<br />+    int ret = 0;<br />+<br />+    rte_spinlock_lock(&zxdh_shared_data_lock);<br />+    if (zxdh_shared_data == NULL) {<br />+        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {<br />+            /* Allocate shared memory. */<br />+            mz = rte_memzone_reserve(MZ_ZXDH_PMD_SHARED_DATA,<br />+                    sizeof(*zxdh_shared_data), SOCKET_ID_ANY, 0);<br />+            if (mz == NULL) {<br />+                PMD_INIT_LOG(ERR, "Cannot allocate zxdh shared data");<br />+                ret = -rte_errno;<br />+                goto error;<br />+            }<br />+            zxdh_shared_data = mz->addr;<br />+            memset(zxdh_shared_data, 0, sizeof(*zxdh_shared_data));<br />+            rte_spinlock_init(&zxdh_shared_data->lock);<br />+        } else { /* Lookup allocated shared memory. */<br />+            mz = rte_memzone_lookup(MZ_ZXDH_PMD_SHARED_DATA);<br />+            if (mz == NULL) {<br />+                PMD_INIT_LOG(ERR, "Cannot attach zxdh shared data");<br />+                ret = -rte_errno;<br />+                goto error;<br />+            }<br />+            zxdh_shared_data = mz->addr;<br />+        }<br />+    }<br />+<br />+error:<br />+    rte_spinlock_unlock(&zxdh_shared_data_lock);<br />+    return ret;<br />+}<br />+<br />+static int zxdh_init_once(struct rte_eth_dev *eth_dev)<br />+{<br />+    PMD_INIT_LOG(DEBUG, "port 0x%x init...", eth_dev->data->port_id);<br />+    if (zxdh_init_shared_data())<br />+        return -rte_errno;<br />+<br />+    struct zxdh_shared_data *sd = zxdh_shared_data;<br />+    int ret = 0;<br />+<br />+    rte_spinlock_lock(&sd->lock);<br />+    if (rte_eal_process_type() == RTE_PROC_SECONDARY) {<br />+        if (!sd->init_done) {<br />+            ++sd->secondary_cnt;<br />+            sd->init_done = true;<br />+        }<br />+        goto out;<br />+    }<br />+<br />+    sd->dev_refcnt++;<br />+out:<br />+    rte_spinlock_unlock(&sd->lock);<br />+    return ret;<br />+}<br />+<br />+static int32_t zxdh_get_pci_dev_config(struct zxdh_hw *hw)<br />+{<br />+    hw->host_features = zxdh_vtpci_get_features(hw);<br />+    hw->host_features = ZXDH_PMD_DEFAULT_HOST_FEATURES;<br />+<br />+    uint64_t guest_features = (uint64_t)ZXDH_PMD_DEFAULT_GUEST_FEATURES;<br />+    uint64_t nego_features = guest_features & hw->host_features;<br />+<br />+    hw->guest_features = nego_features;<br />+<br />+    if (hw->guest_features & (1ULL << ZXDH_NET_F_MAC)) {<br />+        zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, mac),<br />+                &hw->mac_addr, RTE_ETHER_ADDR_LEN);<br />+        PMD_INIT_LOG(DEBUG, "get dev mac: %02X:%02X:%02X:%02X:%02X:%02X",<br />+                hw->mac_addr[0], hw->mac_addr[1],<br />+                hw->mac_addr[2], hw->mac_addr[3],<br />+                hw->mac_addr[4], hw->mac_addr[5]);<br />+    } else {<br />+        rte_eth_random_addr(&hw->mac_addr[0]);<br />+        PMD_INIT_LOG(DEBUG, "random dev mac: %02X:%02X:%02X:%02X:%02X:%02X",<br />+                hw->mac_addr[0], hw->mac_addr[1],<br />+                hw->mac_addr[2], hw->mac_addr[3],<br />+                hw->mac_addr[4], hw->mac_addr[5]);<br />+    }<br />+    uint32_t max_queue_pairs;<br />+<br />+    zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, max_virtqueue_pairs),<br />+            &max_queue_pairs, sizeof(max_queue_pairs));<br />+    PMD_INIT_LOG(DEBUG, "get max queue pairs %u", max_queue_pairs);<br />+    if (max_queue_pairs == 0)<br />+        hw->max_queue_pairs = ZXDH_RX_QUEUES_MAX;<br />+    else<br />+        hw->max_queue_pairs = RTE_MIN(ZXDH_RX_QUEUES_MAX, max_queue_pairs);<br />+<br />+    PMD_INIT_LOG(DEBUG, "set max queue pairs %d", hw->max_queue_pairs);<br />+<br />+    hw->weak_barriers = !vtpci_with_feature(hw, ZXDH_F_ORDER_PLATFORM);<br />+    return 0;<br />+}<br />+<br />+static void zxdh_dev_free_mbufs(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    uint32_t i, mbuf_num = 0;<br />+<br />+    const char *type __rte_unused;<br />+    struct virtqueue *vq = NULL;<br />+    struct rte_mbuf *buf = NULL;<br />+    int32_t queue_type = 0;<br />+<br />+    if (hw->vqs == NULL)<br />+        return;<br />+<br />+    for (i = 0; i < nr_vq; i++) {<br />+        vq = hw->vqs[i];<br />+        if (!vq)<br />+            continue;<br />+<br />+        queue_type = get_queue_type(i);<br />+        if (queue_type == VTNET_RQ)<br />+            type = "rxq";<br />+        else if (queue_type == VTNET_TQ)<br />+            type = "txq";<br />+        else<br />+            continue;<br />+<br />+        PMD_INIT_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);<br />+<br />+        while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL) {<br />+            rte_pktmbuf_free(buf);<br />+            mbuf_num++;<br />+        }<br />+<br />+        PMD_INIT_LOG(DEBUG, "After freeing %s[%d] used and unused buf", type, i);<br />+    }<br />+<br />+    PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);<br />+}<br />+<br />+static int32_t zxdh_init_device(struct rte_eth_dev *eth_dev)<br />+{<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+    struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);<br />+    int ret = zxdh_read_pci_caps(pci_dev, hw);<br />+<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "port 0x%x pci caps read failed .", hw->vport.vport);<br />+        goto err;<br />+    }<br />+    zxdh_hw_internal[hw->port_id].vtpci_ops = &zxdh_modern_ops;<br />+    zxdh_vtpci_reset(hw);<br />+    zxdh_get_pci_dev_config(hw);<br />+    if (hw->vqs) { /* not reachable? */<br />+        zxdh_dev_free_mbufs(eth_dev);<br />+        ret = zxdh_free_queues(eth_dev);<br />+        if (ret < 0) {<br />+            PMD_INIT_LOG(ERR, "port 0x%x free queue failed.", hw->vport.vport);<br />+            goto err;<br />+        }<br />+    }<br />+    eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;<br />+    hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN;<br />+    hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;<br />+<br />+    rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, &eth_dev->data->mac_addrs[0]);<br />+    PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",<br />+        eth_dev->data->mac_addrs->addr_bytes[0],<br />+        eth_dev->data->mac_addrs->addr_bytes[1],<br />+        eth_dev->data->mac_addrs->addr_bytes[2],<br />+        eth_dev->data->mac_addrs->addr_bytes[3],<br />+        eth_dev->data->mac_addrs->addr_bytes[4],<br />+        eth_dev->data->mac_addrs->addr_bytes[5]);<br />+    /* If host does not support both status and MSI-X then disable LSC */<br />+    if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && (hw->use_msix != ZXDH_MSIX_NONE)) {<br />+        eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;<br />+        PMD_INIT_LOG(DEBUG, "LSC enable");<br />+    } else {<br />+        eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;<br />+    }<br />+    return 0;<br />+<br />+err:<br />+    PMD_INIT_LOG(ERR, "port %d init device failed", eth_dev->data->port_id);<br />+    return ret;<br />+}<br />+<br />+<br />+static void zxdh_queues_unbind_intr(struct rte_eth_dev *dev)<br />+{<br />+    PMD_INIT_LOG(INFO, "queue/interrupt unbinding");<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t i;<br />+<br />+    for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+        VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);<br />+        VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2 + 1], ZXDH_MSI_NO_VECTOR);<br />+    }<br />+}<br />+<br />+static int32_t zxdh_intr_unmask(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (rte_intr_ack(dev->intr_handle) < 0)<br />+        return -1;<br />+<br />+    hw->use_msix = zxdh_vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));<br />+<br />+    return 0;<br />+}<br />+<br />+<br />+static void zxdh_devconf_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t status = 0;<br />+    /* Read interrupt status which clears interrupt */<br />+    uint8_t isr = zxdh_vtpci_isr(hw);<br />+<br />+    if (zxdh_intr_unmask(dev) < 0)<br />+        PMD_DRV_LOG(ERR, "interrupt enable failed");<br />+    if (isr & ZXDH_PCI_ISR_CONFIG) {<br />+        /** todo provided later<br />+         * if (zxdh_dev_link_update(dev, 0) == 0)<br />+         * rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);<br />+         */<br />+<br />+        if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS)) {<br />+            zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, status),<br />+                    &status, sizeof(status));<br />+            if (status & ZXDH_NET_S_ANNOUNCE)<br />+                zxdh_notify_peers(dev);<br />+        }<br />+    }<br />+}<br />+<br />+/* Interrupt handler triggered by NIC for handling specific interrupt. */<br />+static void zxdh_frompfvf_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t virt_addr = 0;<br />+<br />+    virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MSG_CHAN_PFVFSHARE_OFFSET);<br />+    if (hw->is_pf) {<br />+        PMD_INIT_LOG(INFO, "zxdh_pf2vf_intr_handler  PF ");<br />+        zxdh_bar_irq_recv(MSG_CHAN_END_VF, MSG_CHAN_END_PF, virt_addr, dev);<br />+    } else {<br />+        PMD_INIT_LOG(INFO, "zxdh_pf2vf_intr_handler  VF ");<br />+        zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_VF, virt_addr, dev);<br />+    }<br />+}<br />+<br />+/* Interrupt handler triggered by NIC for handling specific interrupt. */<br />+static void zxdh_fromriscv_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t virt_addr = 0;<br />+<br />+    virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);<br />+    if (hw->is_pf) {<br />+        PMD_INIT_LOG(INFO, "zxdh_risc2pf_intr_handler  PF ");<br />+        zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_PF, virt_addr, dev);<br />+    } else {<br />+        PMD_INIT_LOG(INFO, "zxdh_riscvf_intr_handler  VF ");<br />+        zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_VF, virt_addr, dev);<br />+    }<br />+}<br />+<br />+static void zxdh_intr_cb_unreg(struct rte_eth_dev *dev)<br />+{<br />+    PMD_INIT_LOG(ERR, "");<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    /* register callback to update dev config intr */<br />+    rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+    /* Register rsic_v to pf interrupt callback */<br />+    struct rte_intr_handle *tmp = hw->risc_intr +<br />+            (MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+<br />+    rte_intr_callback_unregister(tmp, zxdh_frompfvf_intr_handler, dev);<br />+    tmp = hw->risc_intr + (MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+    rte_intr_callback_unregister(tmp, zxdh_fromriscv_intr_handler, dev);<br />+}<br />+<br />+static int32_t zxdh_intr_disable(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->intr_enabled)<br />+        return 0;<br />+<br />+    zxdh_intr_cb_unreg(dev);<br />+    if (rte_intr_disable(dev->intr_handle) < 0)<br />+        return -1;<br />+<br />+    hw->intr_enabled = 0;<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_intr_release(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        VTPCI_OPS(hw)->set_config_irq(hw, ZXDH_MSI_NO_VECTOR);<br />+<br />+    zxdh_queues_unbind_intr(dev);<br />+    zxdh_intr_disable(dev);<br />+<br />+    rte_intr_efd_disable(dev->intr_handle);<br />+    rte_intr_vec_list_free(dev->intr_handle);<br />+    rte_free(hw->risc_intr);<br />+    hw->risc_intr = NULL;<br />+    rte_free(hw->dtb_intr);<br />+    hw->dtb_intr = NULL;<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_setup_risc_interrupts(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint8_t i;<br />+<br />+    if (!hw->risc_intr) {<br />+        PMD_INIT_LOG(ERR, " to allocate risc_intr");<br />+        hw->risc_intr = rte_zmalloc("risc_intr",<br />+            ZXDH_MSIX_INTR_MSG_VEC_NUM * sizeof(struct rte_intr_handle), 0);<br />+        if (hw->risc_intr == NULL) {<br />+            PMD_INIT_LOG(ERR, "Failed to allocate risc_intr");<br />+            return -ENOMEM;<br />+        }<br />+    }<br />+<br />+    for (i = 0; i < ZXDH_MSIX_INTR_MSG_VEC_NUM; i++) {<br />+        if (dev->intr_handle->efds[i] < 0) {<br />+            PMD_INIT_LOG(ERR, "[%u]risc interrupt fd is invalid", i);<br />+            rte_free(hw->risc_intr);<br />+            hw->risc_intr = NULL;<br />+            return -1;<br />+        }<br />+<br />+        struct rte_intr_handle *intr_handle = hw->risc_intr + i;<br />+<br />+        intr_handle->fd = dev->intr_handle->efds[i];<br />+        intr_handle->type = dev->intr_handle->type;<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_setup_dtb_interrupts(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->dtb_intr) {<br />+        hw->dtb_intr = rte_zmalloc("dtb_intr", sizeof(struct rte_intr_handle), 0);<br />+        if (hw->dtb_intr == NULL) {<br />+            PMD_INIT_LOG(ERR, "Failed to allocate dtb_intr");<br />+            return -ENOMEM;<br />+        }<br />+    }<br />+<br />+    if (dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1] < 0) {<br />+        PMD_INIT_LOG(ERR, "[%d]dtb interrupt fd is invalid", ZXDH_MSIX_INTR_DTB_VEC - 1);<br />+        rte_free(hw->dtb_intr);<br />+        hw->dtb_intr = NULL;<br />+        return -1;<br />+    }<br />+    hw->dtb_intr->fd = dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1];<br />+    hw->dtb_intr->type = dev->intr_handle->type;<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_queues_bind_intr(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t i;<br />+    uint16_t vec;<br />+<br />+    if (!dev->data->dev_conf.intr_conf.rxq) {<br />+        PMD_INIT_LOG(INFO, "queue/interrupt mask, nb_rx_queues %u",<br />+                dev->data->nb_rx_queues);<br />+        for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+            vec = VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                    hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);<br />+            PMD_INIT_LOG(INFO, "vq%d irq set 0x%x, get 0x%x",<br />+                    i * 2, ZXDH_MSI_NO_VECTOR, vec);<br />+        }<br />+    } else {<br />+        PMD_INIT_LOG(DEBUG, "queue/interrupt binding, nb_rx_queues %u",<br />+                dev->data->nb_rx_queues);<br />+        for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+            vec = VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                    hw->vqs[i * 2], i + ZXDH_QUE_INTR_VEC_BASE);<br />+            PMD_INIT_LOG(INFO, "vq%d irq set %d, get %d",<br />+                    i * 2, i + ZXDH_QUE_INTR_VEC_BASE, vec);<br />+        }<br />+    }<br />+    /* mask all txq intr */<br />+    for (i = 0; i < dev->data->nb_tx_queues; ++i) {<br />+        vec = VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                hw->vqs[(i * 2) + 1], ZXDH_MSI_NO_VECTOR);<br />+        PMD_INIT_LOG(INFO, "vq%d irq set 0x%x, get 0x%x",<br />+                (i * 2) + 1, ZXDH_MSI_NO_VECTOR, vec);<br />+    }<br />+    return 0;<br />+}<br />+<br />+/*<br />+ * Should be called only after device is paused.<br />+ */<br />+int32_t zxdh_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts, int32_t nb_pkts)<br />+{<br />+    struct zxdh_hw    *hw   = dev->data->dev_private;<br />+    struct virtnet_tx *txvq = dev->data->tx_queues[0];<br />+    int32_t ret = 0;<br />+<br />+    hw->inject_pkts = tx_pkts;<br />+    ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);<br />+    hw->inject_pkts = NULL;<br />+<br />+    return ret;<br />+}<br />+<br />+int32_t zxdh_dev_pause(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (hw->started == 0) {<br />+        /* Device is just stopped. */<br />+        return -1;<br />+    }<br />+    hw->started = 0;<br />+    hw->admin_status = 0;<br />+    /*<br />+     * Prevent the worker threads from touching queues to avoid contention,<br />+     * 1 ms should be enough for the ongoing Tx function to finish.<br />+     */<br />+    rte_delay_ms(1);<br />+    return 0;<br />+}<br />+<br />+void zxdh_notify_peers(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct virtnet_rx *rxvq = NULL;<br />+    struct rte_mbuf *rarp_mbuf = NULL;<br />+<br />+    if (!dev->data->rx_queues)<br />+        return;<br />+<br />+    rxvq = dev->data->rx_queues[0];<br />+    if (!rxvq)<br />+        return;<br />+<br />+    rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool, (struct rte_ether_addr *)hw->mac_addr);<br />+    if (rarp_mbuf == NULL) {<br />+        PMD_DRV_LOG(ERR, "failed to make RARP packet.");<br />+        return;<br />+    }<br />+<br />+    /* If virtio port just stopped, no need to send RARP */<br />+    rte_spinlock_lock(&hw->state_lock);<br />+    if (zxdh_dev_pause(dev) < 0) {<br />+        rte_pktmbuf_free(rarp_mbuf);<br />+        rte_spinlock_unlock(&hw->state_lock);<br />+        return;<br />+    }<br />+    zxdh_inject_pkts(dev, &rarp_mbuf, 1);<br />+    hw->started = 1;<br />+    hw->admin_status = 1;<br />+    rte_spinlock_unlock(&hw->state_lock);<br />+}<br />+<br />+static void zxdh_intr_cb_reg(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+<br />+    /* register callback to update dev config intr */<br />+    rte_intr_callback_register(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+    /* Register rsic_v to pf interrupt callback */<br />+    struct rte_intr_handle *tmp = hw->risc_intr +<br />+            (MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+<br />+    rte_intr_callback_register(tmp, zxdh_frompfvf_intr_handler, dev);<br />+<br />+    tmp = hw->risc_intr + (MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+    rte_intr_callback_register(tmp, zxdh_fromriscv_intr_handler, dev);<br />+}<br />+<br />+static int32_t zxdh_intr_enable(struct rte_eth_dev *dev)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->intr_enabled) {<br />+        zxdh_intr_cb_reg(dev);<br />+        ret = rte_intr_enable(dev->intr_handle);<br />+        if (unlikely(ret))<br />+            PMD_INIT_LOG(ERR, "Failed to enable %s intr", dev->data->name);<br />+<br />+        hw->intr_enabled = 1;<br />+    }<br />+    return ret;<br />+}<br />+<br />+static int32_t zxdh_configure_intr(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t ret = 0;<br />+<br />+    if (!rte_intr_cap_multiple(dev->intr_handle)) {<br />+        PMD_INIT_LOG(ERR, "Multiple intr vector not supported");<br />+        return -ENOTSUP;<br />+    }<br />+    zxdh_intr_release(dev);<br />+    uint8_t nb_efd = ZXDH_MSIX_INTR_DTB_VEC_NUM + ZXDH_MSIX_INTR_MSG_VEC_NUM;<br />+<br />+    if (dev->data->dev_conf.intr_conf.rxq)<br />+        nb_efd += dev->data->nb_rx_queues;<br />+<br />+    if (rte_intr_efd_enable(dev->intr_handle, nb_efd)) {<br />+        PMD_INIT_LOG(ERR, "Fail to create eventfd");<br />+        return -1;<br />+    }<br />+<br />+    if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",<br />+                    hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM)) {<br />+        PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",<br />+                    hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM);<br />+        return -ENOMEM;<br />+    }<br />+    PMD_INIT_LOG(INFO, "allocate %u rxq vectors", dev->intr_handle->vec_list_size);<br />+    if (zxdh_setup_risc_interrupts(dev) != 0) {<br />+        PMD_INIT_LOG(ERR, "Error setting up rsic_v interrupts!");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+    if (zxdh_setup_dtb_interrupts(dev) != 0) {<br />+        PMD_INIT_LOG(ERR, "Error setting up dtb interrupts!");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+<br />+    if (zxdh_queues_bind_intr(dev) < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+    /** DO NOT try to remove this! This function will enable msix,<br />+     * or QEMU will encounter SIGSEGV when DRIVER_OK is sent.<br />+     * And for legacy devices, this should be done before queue/vec<br />+     * binding to change the config size from 20 to 24, or<br />+     * ZXDH_MSI_QUEUE_VECTOR (22) will be ignored.<br />+     **/<br />+    if (zxdh_intr_enable(dev) < 0) {<br />+        PMD_DRV_LOG(ERR, "interrupt enable failed");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+    return 0;<br />+<br />+free_intr_vec:<br />+    zxdh_intr_release(dev);<br />+    return ret;<br />+}<br />+<br />+/* dev_ops for zxdh, bare necessities for basic operation */<br />+static const struct eth_dev_ops zxdh_eth_dev_ops = {<br />+    .dev_configure             = NULL,<br />+    .dev_start                 = NULL,<br />+    .dev_stop                 = NULL,<br />+    .dev_close                 = NULL,<br />+<br />+    .rx_queue_setup             = NULL,<br />+    .rx_queue_intr_enable     = NULL,<br />+    .rx_queue_intr_disable     = NULL,<br />+<br />+    .tx_queue_setup             = NULL,<br />+};<br />+<br />+<br />+static int32_t set_rxtx_funcs(struct rte_eth_dev *eth_dev)<br />+{<br />+    /** todo later<br />+     * eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare;<br />+     */<br />+<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+<br />+    if (!vtpci_packed_queue(hw)) {<br />+        PMD_INIT_LOG(ERR, " port %u not support packed queue", eth_dev->data->port_id);<br />+        return -1;<br />+    }<br />+    if (!vtpci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) {<br />+        PMD_INIT_LOG(ERR, " port %u not support rx mergeable", eth_dev->data->port_id);<br />+        return -1;<br />+    }<br />+    /** todo later provided rx/tx<br />+     * eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed;<br />+     * eth_dev->rx_pkt_burst = &zxdh_recv_mergeable_pkts_packed;<br />+     */<br />+<br />+    return 0;<br />+}<br />+<br />+static void zxdh_msg_cb_reg(struct zxdh_hw *hw)<br />+{<br />+    if (hw->is_pf)<br />+        zxdh_bar_chan_msg_recv_register(MODULE_BAR_MSG_TO_PF, pf_recv_bar_msg);<br />+    else<br />+        zxdh_bar_chan_msg_recv_register(MODULE_BAR_MSG_TO_VF, vf_recv_bar_msg);<br />+}<br />+<br />+static void zxdh_priv_res_init(struct zxdh_hw *hw)<br />+{<br />+    hw->vlan_fiter = (uint64_t *)rte_malloc("vlan_filter", 64 * sizeof(uint64_t), 1);<br />+    memset(hw->vlan_fiter, 0, 64 * sizeof(uint64_t));<br />+    if (hw->is_pf)<br />+        hw->vfinfo = rte_zmalloc("vfinfo", ZXDH_MAX_VF * sizeof(struct vfinfo), 4);<br />+    else<br />+        hw->vfinfo = NULL;<br />+}<br />+<br />+static void set_vfs_pcieid(struct zxdh_hw *hw)<br />+{<br />+    if (hw->pfinfo.vf_nums > ZXDH_MAX_VF) {<br />+        PMD_DRV_LOG(ERR, "vf nums %u out of range", hw->pfinfo.vf_nums);<br />+        return;<br />+    }<br />+    if (hw->vfinfo == NULL) {<br />+        PMD_DRV_LOG(ERR, " vfinfo uninited");<br />+        return;<br />+    }<br />+<br />+    PMD_DRV_LOG(INFO, "vf nums %d", hw->pfinfo.vf_nums);<br />+    int vf_idx;<br />+<br />+    for (vf_idx = 0; vf_idx < hw->pfinfo.vf_nums; vf_idx++)<br />+        hw->vfinfo[vf_idx].pcieid = VF_PCIE_ID(hw->pcie_id, vf_idx);<br />+}<br />+<br />+<br />+static void zxdh_sriovinfo_init(struct zxdh_hw *hw)<br />+{<br />+    hw->pfinfo.pcieid = PF_PCIE_ID(hw->pcie_id);<br />+<br />+    if (hw->is_pf)<br />+        set_vfs_pcieid(hw);<br />+}<br />+<br />+static int zxdh_tbl_entry_offline_destroy(struct zxdh_hw *hw)<br />+{<br />+    int ret = 0;<br />+    uint32_t sdt_no;<br />+<br />+    if (!g_dtb_data.init_done)<br />+        return ret;<br />+<br />+    if (hw->is_pf) {<br />+        sdt_no = MK_SDT_NO(L2_ENTRY, hw->hash_search_index);<br />+        ret = dpp_dtb_hash_offline_delete(0, g_dtb_data.queueid, sdt_no, 0);<br />+        PMD_DRV_LOG(DEBUG, "%d dpp_dtb_hash_offline_delete sdt_no %d",<br />+                hw->port_id, sdt_no);<br />+        if (ret)<br />+            PMD_DRV_LOG(ERR, "%d dpp_dtb_hash_offline_delete sdt_no %d failed",<br />+                    hw->port_id, sdt_no);<br />+<br />+        sdt_no = MK_SDT_NO(MC, hw->hash_search_index);<br />+        ret = dpp_dtb_hash_offline_delete(0, g_dtb_data.queueid, sdt_no, 0);<br />+        PMD_DRV_LOG(DEBUG, "%d dpp_dtb_hash_offline_delete sdt_no %d",<br />+                hw->port_id, sdt_no);<br />+        if (ret)<br />+            PMD_DRV_LOG(ERR, "%d dpp_dtb_hash_offline_delete sdt_no %d failed",<br />+                hw->port_id, sdt_no);<br />+    }<br />+    return ret;<br />+}<br />+<br />+static inline int zxdh_dtb_dump_res_init(struct zxdh_hw *hw __rte_unused,<br />+            DPP_DEV_INIT_CTRL_T *dpp_ctrl)<br />+{<br />+    int ret = 0;<br />+    int i;<br />+<br />+    struct zxdh_dtb_bulk_dump_info dtb_dump_baseres[] = {<br />+    /* eram */<br />+    {"zxdh_sdt_vport_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VPORT_ATT_TABLE, NULL},<br />+    {"zxdh_sdt_panel_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_PANEL_ATT_TABLE, NULL},<br />+    {"zxdh_sdt_rss_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_RSS_ATT_TABLE, NULL},<br />+    {"zxdh_sdt_vlan_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VLAN_ATT_TABLE, NULL},<br />+    /* hash */<br />+    {"zxdh_sdt_l2_entry_table0", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE0, NULL},<br />+    {"zxdh_sdt_l2_entry_table1", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE1, NULL},<br />+    {"zxdh_sdt_l2_entry_table2", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE2, NULL},<br />+    {"zxdh_sdt_l2_entry_table3", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE3, NULL},<br />+    {"zxdh_sdt_mc_table0", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE0, NULL},<br />+    {"zxdh_sdt_mc_table1", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE1, NULL},<br />+    {"zxdh_sdt_mc_table2", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE2, NULL},<br />+    {"zxdh_sdt_mc_table3", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE3, NULL},<br />+    };<br />+    for (i = 0; i < (int)RTE_DIM(dtb_dump_baseres); i++) {<br />+        struct zxdh_dtb_bulk_dump_info *p = dtb_dump_baseres + i;<br />+        const struct rte_memzone *generic_dump_mz = rte_memzone_reserve_aligned(p->mz_name,<br />+                    p->mz_size, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);<br />+<br />+        if (generic_dump_mz == NULL) {<br />+            PMD_DRV_LOG(ERR,<br />+                "Cannot alloc mem for dtb tbl bulk dump, mz_name is %s, mz_size is %u",<br />+                p->mz_name, p->mz_size);<br />+            ret = -ENOMEM;<br />+            return ret;<br />+        }<br />+        p->mz = generic_dump_mz;<br />+        dpp_ctrl->dump_addr_info[i].vir_addr = generic_dump_mz->addr_64;<br />+        dpp_ctrl->dump_addr_info[i].phy_addr = generic_dump_mz->iova;<br />+        dpp_ctrl->dump_addr_info[i].sdt_no   = p->sdt_no;<br />+        dpp_ctrl->dump_addr_info[i].size     = p->mz_size;<br />+<br />+        g_dtb_data.dtb_table_bulk_dump_mz[dpp_ctrl->dump_sdt_num] = generic_dump_mz;<br />+        dpp_ctrl->dump_sdt_num++;<br />+    }<br />+    return ret;<br />+}<br />+<br />+static void dtb_data_res_free(struct zxdh_hw *hw)<br />+{<br />+    struct rte_eth_dev *dev = hw->eth_dev;<br />+<br />+    if ((g_dtb_data.init_done) && (g_dtb_data.bind_device == dev))  {<br />+        PMD_DRV_LOG(INFO, "%s g_dtb_data free queue %d",<br />+                dev->data->name, g_dtb_data.queueid);<br />+<br />+        int ret = 0;<br />+<br />+        ret = dpp_np_online_uninstall(0, dev->data->name, g_dtb_data.queueid);<br />+        if (ret)<br />+            PMD_DRV_LOG(ERR, "%s dpp_np_online_uninstall failed", dev->data->name);<br />+<br />+        if (g_dtb_data.dtb_table_conf_mz) {<br />+            rte_memzone_free(g_dtb_data.dtb_table_conf_mz);<br />+            PMD_DRV_LOG(INFO, "%s free  dtb_table_conf_mz  ", dev->data->name);<br />+            g_dtb_data.dtb_table_conf_mz = NULL;<br />+        }<br />+        if (g_dtb_data.dtb_table_dump_mz) {<br />+            PMD_DRV_LOG(INFO, "%s free  dtb_table_dump_mz  ", dev->data->name);<br />+            rte_memzone_free(g_dtb_data.dtb_table_dump_mz);<br />+            g_dtb_data.dtb_table_dump_mz = NULL;<br />+        }<br />+        int i;<br />+<br />+        for (i = 0; i < ZXDH_MAX_BASE_DTB_TABLE_COUNT; i++) {<br />+            if (g_dtb_data.dtb_table_bulk_dump_mz[i]) {<br />+                rte_memzone_free(g_dtb_data.dtb_table_bulk_dump_mz[i]);<br />+                PMD_DRV_LOG(INFO, "%s free dtb_table_bulk_dump_mz[%d]",<br />+                        dev->data->name, i);<br />+                g_dtb_data.dtb_table_bulk_dump_mz[i] = NULL;<br />+            }<br />+        }<br />+        g_dtb_data.init_done = 0;<br />+        g_dtb_data.bind_device = NULL;<br />+    }<br />+    if (zxdh_shared_data != NULL)<br />+        zxdh_shared_data->npsdk_init_done = 0;<br />+}<br />+<br />+static inline int npsdk_dtb_res_init(struct rte_eth_dev *dev)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (g_dtb_data.init_done) {<br />+        PMD_INIT_LOG(DEBUG, "DTB res already init done, dev %s no need init",<br />+            dev->device->name);<br />+        return 0;<br />+    }<br />+    g_dtb_data.queueid = INVALID_DTBQUE;<br />+    g_dtb_data.bind_device = dev;<br />+    g_dtb_data.dev_refcnt++;<br />+    g_dtb_data.init_done = 1;<br />+    DPP_DEV_INIT_CTRL_T *dpp_ctrl = rte_malloc(NULL, sizeof(*dpp_ctrl) +<br />+            sizeof(DPP_DTB_ADDR_INFO_T) * 256, 0);<br />+<br />+    if (dpp_ctrl == NULL) {<br />+        PMD_INIT_LOG(ERR, "dev %s annot allocate memory for dpp_ctrl", dev->device->name);<br />+        ret = -ENOMEM;<br />+        goto free_res;<br />+    }<br />+    memset(dpp_ctrl, 0, sizeof(*dpp_ctrl) + sizeof(DPP_DTB_ADDR_INFO_T) * 256);<br />+<br />+    dpp_ctrl->queue_id = 0xff;<br />+    dpp_ctrl->vport     = hw->vport.vport;<br />+    dpp_ctrl->vector = ZXDH_MSIX_INTR_DTB_VEC;<br />+    strcpy((char *)dpp_ctrl->port_name, dev->device->name);<br />+    dpp_ctrl->pcie_vir_addr = (uint32_t)hw->bar_addr[0];<br />+<br />+    struct bar_offset_params param = {0};<br />+    struct bar_offset_res  res = {0};<br />+<br />+    param.pcie_id = hw->pcie_id;<br />+    param.virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;<br />+    param.type = URI_NP;<br />+<br />+    ret = zxdh_get_bar_offset(&param, &res);<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "dev %s get npbar offset failed", dev->device->name);<br />+        goto free_res;<br />+    }<br />+    dpp_ctrl->np_bar_len = res.bar_length;<br />+    dpp_ctrl->np_bar_offset = res.bar_offset;<br />+    if (!g_dtb_data.dtb_table_conf_mz) {<br />+        const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_conf_mz",<br />+                ZXDH_DTB_TABLE_CONF_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);<br />+<br />+        if (conf_mz == NULL) {<br />+            PMD_INIT_LOG(ERR,<br />+                "dev %s annot allocate memory for dtb table conf",<br />+                dev->device->name);<br />+            ret = -ENOMEM;<br />+            goto free_res;<br />+        }<br />+        dpp_ctrl->down_vir_addr = conf_mz->addr_64;<br />+        dpp_ctrl->down_phy_addr = conf_mz->iova;<br />+        g_dtb_data.dtb_table_conf_mz = conf_mz;<br />+    }<br />+    /* */<br />+    if (!g_dtb_data.dtb_table_dump_mz) {<br />+        const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_dump_mz",<br />+                ZXDH_DTB_TABLE_DUMP_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);<br />+<br />+        if (dump_mz == NULL) {<br />+            PMD_INIT_LOG(ERR,<br />+                "dev %s Cannot allocate memory for dtb table dump",<br />+                dev->device->name);<br />+            ret = -ENOMEM;<br />+            goto free_res;<br />+        }<br />+        dpp_ctrl->dump_vir_addr = dump_mz->addr_64;<br />+        dpp_ctrl->dump_phy_addr = dump_mz->iova;<br />+        g_dtb_data.dtb_table_dump_mz = dump_mz;<br />+    }<br />+    /* init bulk dump */<br />+    zxdh_dtb_dump_res_init(hw, dpp_ctrl);<br />+<br />+    ret = dpp_host_np_init(0, dpp_ctrl);<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "dev %s dpp host np init failed .ret %d", dev->device->name, ret);<br />+        goto free_res;<br />+    }<br />+<br />+    PMD_INIT_LOG(INFO, "dev %s dpp host np init ok.dtb queue %d",<br />+        dev->device->name, dpp_ctrl->queue_id);<br />+    g_dtb_data.queueid = dpp_ctrl->queue_id;<br />+    free(dpp_ctrl);<br />+    return 0;<br />+<br />+free_res:<br />+    dtb_data_res_free(hw);<br />+    rte_free(dpp_ctrl);<br />+    return -ret;<br />+}<br />+<br />+static int32_t dpp_res_uni_init(uint32_t type)<br />+{<br />+    uint32_t ret = 0;<br />+    uint32_t dev_id = 0;<br />+    DPP_APT_HASH_RES_INIT_T HashResInit = {0};<br />+    DPP_APT_ERAM_RES_INIT_T EramResInit = {0};<br />+    DPP_APT_STAT_RES_INIT_T StatResInit = {0};<br />+<br />+    memset(&HashResInit, 0x0, sizeof(DPP_APT_HASH_RES_INIT_T));<br />+    memset(&EramResInit, 0x0, sizeof(DPP_APT_ERAM_RES_INIT_T));<br />+    memset(&StatResInit, 0x0, sizeof(DPP_APT_STAT_RES_INIT_T));<br />+<br />+    ret = dpp_apt_hash_res_get(type, &HashResInit);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_res_get failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_eram_res_get(type, &EramResInit);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s eram_res_get failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_stat_res_get(type, &StatResInit);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s stat_res_get failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_hash_global_res_init(dev_id);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_global_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+<br />+    ret = dpp_apt_hash_func_res_init(dev_id, HashResInit.func_num, HashResInit.func_res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_func_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+<br />+    ret = dpp_apt_hash_bulk_res_init(dev_id, HashResInit.bulk_num, HashResInit.bulk_res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_bulk_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_hash_tbl_res_init(dev_id, HashResInit.tbl_num, HashResInit.tbl_res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_tbl_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_eram_res_init(dev_id, EramResInit.tbl_num, EramResInit.eram_res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s eram_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_stat_ppu_eram_baddr_set(dev_id, StatResInit.eram_baddr);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s stat_ppu_eram_baddr_set failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_stat_ppu_eram_depth_set(dev_id, StatResInit.eram_depth); /* unit: 128bits */<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s stat_ppu_eram_depth_set failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_se_cmmu_smmu1_cfg_set(dev_id, StatResInit.ddr_baddr);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s dpp_se_cmmu_smmu1_cfg_set failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_stat_ppu_ddr_baddr_set(dev_id, StatResInit.ppu_ddr_offset); /* unit: 128bits */<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s stat_ppu_ddr_baddr_set failed!", __func__);<br />+        return -1;<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static inline int npsdk_apt_res_init(struct rte_eth_dev *dev __rte_unused)<br />+{<br />+    int32_t ret = 0;<br />+<br />+    ret = dpp_res_uni_init(SE_NIC_RES_TYPE);<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "init stand dpp res failed");<br />+        return -1;<br />+    }<br />+<br />+    return ret;<br />+}<br />+static int zxdh_np_init(struct rte_eth_dev *eth_dev)<br />+{<br />+    uint32_t ret = 0;<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+<br />+    if ((zxdh_shared_data != NULL) && zxdh_shared_data->npsdk_init_done) {<br />+        g_dtb_data.dev_refcnt++;<br />+        zxdh_tbl_entry_offline_destroy(hw);<br />+        PMD_DRV_LOG(DEBUG, "no need to init dtb  dtb chanenl %d devref %d",<br />+                g_dtb_data.queueid, g_dtb_data.dev_refcnt);<br />+        return 0;<br />+    }<br />+<br />+    if (hw->is_pf) {<br />+        ret = npsdk_dtb_res_init(eth_dev);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "dpp apt init failed, ret:%d ", ret);<br />+            return -ret;<br />+        }<br />+<br />+        ret = npsdk_apt_res_init(eth_dev);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "dpp apt init failed, ret:%d ", ret);<br />+            return -ret;<br />+        }<br />+    }<br />+    if (zxdh_shared_data != NULL)<br />+        zxdh_shared_data->npsdk_init_done = 1;<br />+<br />+    return 0;<br />+}<br />+<br />+static void zxdh_priv_res_free(struct zxdh_hw *priv)<br />+{<br />+    rte_free(priv->vlan_fiter);<br />+    priv->vlan_fiter = NULL;<br />+    rte_free(priv->vfinfo);<br />+    priv->vfinfo = NULL;<br />+}<br />+<br />+static int zxdh_tbl_entry_destroy(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint32_t sdt_no;<br />+    int ret = 0;<br />+<br />+    if (!g_dtb_data.init_done)<br />+        return ret;<br />+<br />+    if (hw->is_pf) {<br />+        sdt_no = MK_SDT_NO(L2_ENTRY, hw->hash_search_index);<br />+        ret = dpp_dtb_hash_online_delete(0, g_dtb_data.queueid, sdt_no);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "%s dpp_dtb_hash_online_delete sdt_no %d failed ",<br />+                dev->data->name, sdt_no);<br />+            return -1;<br />+        }<br />+<br />+        sdt_no = MK_SDT_NO(MC, hw->hash_search_index);<br />+        ret = dpp_dtb_hash_online_delete(0, g_dtb_data.queueid, sdt_no);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "%s dpp_dtb_hash_online_delete sdt_no %d failed ",<br />+                dev->data->name, sdt_no);<br />+            return -1;<br />+        }<br />+    }<br />+    return ret;<br />+}<br />+<br />+static void zxdh_np_destroy(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int ret;<br />+<br />+    ret = zxdh_tbl_entry_destroy(dev);<br />+    if (ret)<br />+        return;<br />+<br />+    if ((!g_dtb_data.init_done) && (!g_dtb_data.dev_refcnt))<br />+        return;<br />+<br />+    if (--g_dtb_data.dev_refcnt == 0)<br />+        dtb_data_res_free(hw);<br />+<br />+    PMD_DRV_LOG(DEBUG, "g_dtb_data dev_refcnt %d", g_dtb_data.dev_refcnt);<br />+}<br />+<br />+static int32_t zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)<br />+{<br />+    struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);<br />+    int32_t ret;<br />+<br />+    eth_dev->dev_ops = &zxdh_eth_dev_ops;<br />+<br />+    /**<br />+     * Primary process does the whole initialization,<br />+     * for secondaryprocesses, we just select the same Rx and Tx function as primary.<br />+     */<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+<br />+    if (rte_eal_process_type() == RTE_PROC_SECONDARY) {<br />+        VTPCI_OPS(hw) = &zxdh_modern_ops;<br />+        set_rxtx_funcs(eth_dev);<br />+        return 0;<br />+    }<br />+    /* Allocate memory for storing MAC addresses */<br />+    eth_dev->data->mac_addrs = rte_zmalloc("zxdh_mac",<br />+            ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);<br />+    if (eth_dev->data->mac_addrs == NULL) {<br />+        PMD_INIT_LOG(ERR, "Failed to allocate %d bytes store MAC addresses",<br />+                ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);<br />+        return -ENOMEM;<br />+    }<br />+    memset(hw, 0, sizeof(*hw));<br />+    ret = zxdh_dev_devargs_parse(eth_dev->device->devargs, hw);<br />+    if (ret < 0) {<br />+        PMD_INIT_LOG(ERR, "dev args parse failed");<br />+        return -EINVAL;<br />+    }<br />+<br />+    hw->bar_addr[0] = (uint64_t)pci_dev->mem_resource[0].addr;<br />+    if (hw->bar_addr[0] == 0) {<br />+        PMD_INIT_LOG(ERR, "Bad mem resource.");<br />+        return -EIO;<br />+    }<br />+    hw->device_id = pci_dev->id.device_id;<br />+    hw->port_id = eth_dev->data->port_id;<br />+    hw->eth_dev = eth_dev;<br />+    hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN;<br />+    hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;<br />+    hw->is_pf = 0;<br />+<br />+    rte_spinlock_init(&hw->state_lock);<br />+    if (pci_dev->id.device_id == ZXDH_E310_PF_DEVICEID ||<br />+        pci_dev->id.device_id == ZXDH_E312_PF_DEVICEID) {<br />+        hw->is_pf = 1;<br />+        hw->pfinfo.vf_nums = pci_dev->max_vfs;<br />+    }<br />+<br />+    /* reset device and get dev config*/<br />+    ret = zxdh_init_once(eth_dev);<br />+    if (ret != 0)<br />+        goto err_zxdh_init;<br />+<br />+    ret = zxdh_init_device(eth_dev);<br />+    if (ret < 0)<br />+        goto err_zxdh_init;<br />+<br />+    ret = zxdh_np_init(eth_dev);<br />+    if (ret)<br />+        goto err_zxdh_init;<br />+<br />+    zxdh_priv_res_init(hw);<br />+    zxdh_sriovinfo_init(hw);<br />+    zxdh_msg_cb_reg(hw);<br />+    zxdh_configure_intr(eth_dev);<br />+    return 0;<br />+<br />+err_zxdh_init:<br />+    zxdh_intr_release(eth_dev);<br />+    zxdh_np_destroy(eth_dev);<br />+    zxdh_bar_msg_chan_exit();<br />+    zxdh_priv_res_free(hw);<br />+    rte_free(eth_dev->data->mac_addrs);<br />+    eth_dev->data->mac_addrs = NULL;<br />+    return ret;<br />+}<br />+<br />+int32_t zxdh_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,<br />+            struct rte_pci_device *pci_dev)<br />+{<br />+    return rte_eth_dev_pci_generic_probe(pci_dev,<br />+                        sizeof(struct zxdh_hw),<br />+                        zxdh_eth_dev_init);<br />+}<br />+<br />+<br />+static int32_t zxdh_eth_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)<br />+{<br />+    if (rte_eal_process_type() == RTE_PROC_SECONDARY)<br />+        return 0;<br />+    /** todo later<br />+     * zxdh_dev_close(eth_dev);<br />+     */<br />+    return 0;<br />+}<br />+<br />+int32_t zxdh_eth_pci_remove(struct rte_pci_device *pci_dev)<br />+{<br />+    int32_t ret = rte_eth_dev_pci_generic_remove(pci_dev, zxdh_eth_dev_uninit);<br />+<br />+    if (ret == -ENODEV) { /* Port has already been released by close. */<br />+        ret = 0;<br />+    }<br />+    return ret;<br />+}<br />+<br />+static const struct rte_pci_id pci_id_zxdh_map[] = {<br />+    {RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_E310_PF_DEVICEID)},<br />+    {RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_E310_VF_DEVICEID)},<br />+    {RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_E312_PF_DEVICEID)},<br />+    {RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_E312_VF_DEVICEID)},<br />+    {.vendor_id = 0, /* sentinel */ },<br />+};<br />+static struct rte_pci_driver zxdh_pmd = {<br />+    .driver = {.name = "net_zxdh", },<br />+    .id_table = pci_id_zxdh_map,<br />+    .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,<br />+    .probe = zxdh_eth_pci_probe,<br />+    .remove = zxdh_eth_pci_remove,<br />+};<br />+<br />+RTE_PMD_REGISTER_PCI(net_zxdh, zxdh_pmd);<br />+RTE_PMD_REGISTER_PCI_TABLE(net_zxdh, pci_id_zxdh_map);<br />+RTE_PMD_REGISTER_KMOD_DEP(net_zxdh, "* vfio-pci");<br />+RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_init, init, NOTICE);<br />+RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_driver, driver, NOTICE);<br />+RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_rx, rx, DEBUG);<br />+RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_tx, tx, DEBUG);<br />+<br />+RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_msg, msg, DEBUG);<br />+RTE_PMD_REGISTER_PARAM_STRING(net_zxdh,<br />+    "q_depth=<int>");<br />+<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h<br />new file mode 100644<br />index 0000000000..6683ec5edc<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_ethdev.h<br />@@ -0,0 +1,202 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_ETHDEV_H_<br />+#define _ZXDH_ETHDEV_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+#include "ethdev_pci.h" <br />+<br />+extern struct zxdh_dtb_shared_data g_dtb_data;<br />+#define PF_PCIE_ID(pcie_id)         ((pcie_id & 0xff00) | 1 << 11)<br />+#define VF_PCIE_ID(pcie_id, vf_idx) ((pcie_id & 0xff00) | (1 << 11) | (vf_idx & 0xff))<br />+<br />+#define ZXDH_QUEUES_NUM_MAX          256<br />+<br />+/* ZXDH PCI vendor/device ID. */<br />+#define PCI_VENDOR_ID_ZTE        0x1cf2<br />+<br />+#define ZXDH_E310_PF_DEVICEID     0x8061<br />+#define ZXDH_E310_VF_DEVICEID     0x8062<br />+#define ZXDH_E312_PF_DEVICEID     0x8049<br />+#define ZXDH_E312_VF_DEVICEID     0x8060<br />+<br />+#define ZXDH_MAX_UC_MAC_ADDRS  32<br />+#define ZXDH_MAX_MC_MAC_ADDRS  32<br />+#define ZXDH_MAX_MAC_ADDRS     (ZXDH_MAX_UC_MAC_ADDRS + ZXDH_MAX_MC_MAC_ADDRS)<br />+<br />+/* BAR definitions */<br />+#define ZXDH_NUM_BARS    2<br />+#define ZXDH_BAR0_INDEX  0<br />+<br />+#define ZXDH_MIN_QUEUE_DEPTH 1024<br />+#define ZXDH_MAX_QUEUE_DEPTH 32768<br />+<br />+#define ZXDH_MAX_VF 256<br />+<br />+#define ZXDH_TBL_ERAM_DUMP_SIZE  (4 * 1024 * 1024)<br />+#define ZXDH_TBL_ZCAM_DUMP_SIZE  (5 * 1024 * 1024)<br />+<br />+#define INVALID_DTBQUE  0xFFFF<br />+#define ZXDH_MAX_BASE_DTB_TABLE_COUNT 30<br />+#define ZXDH_DTB_TABLE_CONF_SIZE  (32 * (16 + 16 * 1024))<br />+#define ZXDH_DTB_TABLE_DUMP_SIZE  (32 * (16 + 16 * 1024))<br />+<br />+/*<br />+ * Process  dev config changed interrupt. Call the callback<br />+ * if link state changed, generate gratuitous RARP packet if<br />+ * the status indicates an ANNOUNCE.<br />+ */<br />+#define ZXDH_NET_S_LINK_UP   1 /* Link is up */<br />+#define ZXDH_NET_S_ANNOUNCE  2 /* Announcement is needed */<br />+<br />+struct pfinfo {<br />+    uint16_t pcieid;<br />+    uint16_t vf_nums;<br />+};<br />+struct vfinfo {<br />+    uint16_t vf_idx;<br />+    uint16_t pcieid;<br />+    uint16_t vport;<br />+    uint8_t flag;<br />+    uint8_t state;<br />+    uint8_t rsv;<br />+    struct rte_ether_addr mac_addr;<br />+    struct rte_ether_addr vf_mac[ZXDH_MAX_MAC_ADDRS];<br />+};<br />+<br />+union VPORT {<br />+    uint16_t vport;<br />+<br />+    __extension__<br />+    struct {<br />+        uint16_t vfid:8;<br />+        uint16_t pfid:3;<br />+        uint16_t vf_flag:1;<br />+        uint16_t epid:3;<br />+        uint16_t direct_flag:1;<br />+    };<br />+};<br />+<br />+struct chnl_context {<br />+    uint16_t valid;<br />+    uint16_t ph_chno;<br />+}; /* 4B */<br />+<br />+struct zxdh_hw {<br />+    uint64_t host_features;<br />+    uint64_t guest_features;<br />+    uint32_t max_queue_pairs;<br />+    uint16_t max_mtu;<br />+    uint8_t  vtnet_hdr_size;<br />+    uint8_t  vlan_strip;<br />+    uint8_t  use_msix;<br />+    uint8_t  intr_enabled;<br />+    uint8_t  started;<br />+    uint8_t  weak_barriers;<br />+<br />+    bool has_tx_offload;<br />+    bool has_rx_offload;<br />+<br />+    uint8_t  mac_addr[RTE_ETHER_ADDR_LEN];<br />+    uint16_t port_id;<br />+<br />+    uint32_t  notify_off_multiplier;<br />+    uint32_t  speed;  /* link speed in MB */<br />+    uint32_t  speed_mode;  /* link speed in 1x 2x 3x */<br />+    uint8_t   duplex;<br />+    uint8_t  *isr;<br />+    uint16_t *notify_base;<br />+<br />+    struct zxdh_pci_common_cfg *common_cfg;<br />+    struct zxdh_net_config     *dev_cfg;<br />+<br />+    uint16_t queue_num;<br />+    uint16_t device_id;<br />+<br />+    uint16_t pcie_id;<br />+    uint8_t  phyport;<br />+    bool     msg_chan_init;<br />+<br />+    uint8_t panel_id;<br />+    uint8_t rsv[1];<br />+<br />+    /**<br />+     * App management thread and virtio interrupt handler<br />+     * thread both can change device state,<br />+     * this lock is meant to avoid such a contention.<br />+     */<br />+    rte_spinlock_t     state_lock;<br />+    struct rte_mbuf  **inject_pkts;<br />+    struct virtqueue **vqs;<br />+<br />+    uint64_t bar_addr[ZXDH_NUM_BARS];<br />+    struct rte_intr_handle *risc_intr;  /* Interrupt handle of rsic_v to host */<br />+    struct rte_intr_handle *dtb_intr;  /* Interrupt handle of rsic_v to host */<br />+<br />+    struct chnl_context channel_context[ZXDH_QUEUES_NUM_MAX];<br />+    union VPORT vport;<br />+<br />+    uint8_t is_pf         : 1,<br />+            switchoffload : 1;<br />+    uint8_t hash_search_index;<br />+    uint8_t admin_status;<br />+<br />+    uint16_t vfid;<br />+    uint16_t q_depth;<br />+    uint64_t *vlan_fiter;<br />+    struct pfinfo pfinfo;<br />+    struct vfinfo *vfinfo;<br />+    struct rte_eth_dev *eth_dev;<br />+};<br />+<br />+/* Shared data between primary and secondary processes. */<br />+struct zxdh_shared_data {<br />+    rte_spinlock_t lock; /* Global spinlock for primary and secondary processes. */<br />+    int init_done;       /* Whether primary has done initialization. */<br />+    unsigned int secondary_cnt; /* Number of secondary processes init'd. */<br />+<br />+    int npsdk_init_done;<br />+    uint32_t  dev_refcnt;<br />+    struct zxdh_dtb_shared_data *dtb_data;<br />+};<br />+<br />+struct zxdh_dtb_shared_data {<br />+    int init_done;<br />+    char name[32];<br />+    uint16_t queueid;<br />+    uint16_t vport;<br />+    uint32_t vector;<br />+    const struct rte_memzone *dtb_table_conf_mz;<br />+    const struct rte_memzone *dtb_table_dump_mz;<br />+    const struct rte_memzone *dtb_table_bulk_dump_mz[ZXDH_MAX_BASE_DTB_TABLE_COUNT];<br />+    struct rte_eth_dev *bind_device;<br />+    uint32_t dev_refcnt;<br />+};<br />+<br />+struct zxdh_dtb_bulk_dump_info {<br />+    const char *mz_name;<br />+    uint32_t mz_size;<br />+    uint32_t sdt_no;        /** <@brief sdt no 0~255 */<br />+    const struct rte_memzone *mz;<br />+};<br />+<br />+void zxdh_interrupt_handler(void *param);<br />+int32_t zxdh_dev_pause(struct rte_eth_dev *dev);<br />+int32_t zxdh_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts, int32_t nb_pkts);<br />+void zxdh_notify_peers(struct rte_eth_dev *dev);<br />+<br />+int32_t zxdh_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,<br />+            struct rte_pci_device *pci_dev);<br />+int32_t zxdh_eth_pci_remove(struct rte_pci_device *pci_dev);<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif /* _ZXDH_ETHDEV_H_ */<br />diff --git a/drivers/net/zxdh/zxdh_logs.h b/drivers/net/zxdh/zxdh_logs.h<br />new file mode 100644<br />index 0000000000..fb9b2d452f<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_logs.h<br />@@ -0,0 +1,38 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_LOGS_H_<br />+#define _ZXDH_LOGS_H_<br />+<br />+#include <rte_log.h> <br />+<br />+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")<br />+<br />+extern int32_t zxdh_logtype_init;<br />+#define PMD_INIT_LOG(level, fmt, args...) \<br />+    rte_log(RTE_LOG_ ## level, zxdh_logtype_init, \<br />+    "offload_zxdh %s(): " fmt "\n", __func__, ## args)<br />+<br />+extern int32_t zxdh_logtype_driver;<br />+#define PMD_DRV_LOG(level, fmt, args...) \<br />+    rte_log(RTE_LOG_ ## level, zxdh_logtype_driver, \<br />+    "offload_zxdh %s(): " fmt "\n", __func__, ## args)<br />+<br />+extern int zxdh_logtype_rx;<br />+#define PMD_RX_LOG(level, fmt, args...) \<br />+    rte_log(RTE_LOG_ ## level, zxdh_logtype_rx, \<br />+    "offload_zxdh %s(): " fmt "\n", __func__, ## args)<br />+<br />+extern int zxdh_logtype_tx;<br />+#define PMD_TX_LOG(level, fmt, args...) \<br />+    rte_log(RTE_LOG_ ## level, zxdh_logtype_tx, \<br />+    "offload_zxdh %s(): " fmt "\n", __func__, ## args)<br />+<br />+extern int32_t zxdh_logtype_msg;<br />+#define PMD_MSG_LOG(level, fmt, args...) \<br />+    rte_log(RTE_LOG_ ## level, zxdh_logtype_msg, \<br />+    "offload_zxdh %s(): " fmt "\n", __func__, ## args)<br />+<br />+#endif /* _ZXDH_LOGS_H_ */<br />+<br />diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c<br />new file mode 100644<br />index 0000000000..e625cbea82<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_msg.c<br />@@ -0,0 +1,1177 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <stdbool.h> <br />+<br />+#include <rte_common.h> <br />+#include <rte_memcpy.h> <br />+#include <pthread.h> <br />+#include <rte_cycles.h> <br />+#include <inttypes.h> <br />+#include <rte_malloc.h> <br />+<br />+#include "zxdh_logs.h" <br />+#include "zxdh_msg.h" <br />+<br />+#define REPS_INFO_FLAG_USABLE  0x00<br />+#define REPS_INFO_FLAG_USED    0xa0<br />+<br />+#define BDF_ECAM(bus, devid, func)   (((bus & 0xff) << 8) | (func & 0x07) | ((devid & 0x1f) << 3))<br />+<br />+/**<br />+ * common.ko will work in 5 scenarios<br />+ * 1: SCENE_HOST_IN_DPU  : host in DPU card<br />+ * 2: SCENE_ZF_IN_DPU    : zf   in DPU card<br />+ * 3: SCENE_NIC_WITH_DDR : inic with DDR<br />+ * 4: SCENE_NIC_NO_DDR   : inic without DDR<br />+ * 5: SCENE_STD_NIC      : std card<br />+ */<br />+#ifdef SCENE_HOST_IN_DPU<br />+#define BAR_PF_NUM             31<br />+#define BAR_VF_NUM             1024<br />+#define BAR_INDEX_PF_TO_VF     1<br />+#define BAR_INDEX_MPF_TO_MPF   1<br />+#define BAR_INDEX_MPF_TO_PFVF  0xff<br />+#define BAR_INDEX_PFVF_TO_MPF  0xff<br />+#endif<br />+<br />+#ifdef SCENE_ZF_IN_DPU<br />+#define BAR_PF_NUM             7<br />+#define BAR_VF_NUM             128<br />+#define BAR_INDEX_PF_TO_VF     0xff<br />+#define BAR_INDEX_MPF_TO_MPF   1<br />+#define BAR_INDEX_MPF_TO_PFVF  0xff<br />+#define BAR_INDEX_PFVF_TO_MPF  0xff<br />+#endif<br />+<br />+#ifdef SCENE_NIC_WITH_DDR<br />+#define BAR_PF_NUM             31<br />+#define BAR_VF_NUM             1024<br />+#define BAR_INDEX_PF_TO_VF     1<br />+#define BAR_INDEX_MPF_TO_MPF   0xff<br />+#define BAR_INDEX_MPF_TO_PFVF  0xff<br />+#define BAR_INDEX_PFVF_TO_MPF  0xff<br />+#endif<br />+<br />+#ifdef SCENE_NIC_NO_DDR<br />+#define BAR_PF_NUM             31<br />+#define BAR_VF_NUM             1024<br />+#define BAR_INDEX_PF_TO_VF     1<br />+#define BAR_INDEX_MPF_TO_MPF   0xff<br />+#define BAR_INDEX_MPF_TO_PFVF  1<br />+#define BAR_INDEX_PFVF_TO_MPF  2<br />+#endif<br />+<br />+#ifdef SCENE_STD_NIC<br />+#define BAR_PF_NUM             7<br />+#define BAR_VF_NUM             256<br />+#define BAR_INDEX_PF_TO_VF     1<br />+#define BAR_INDEX_MPF_TO_MPF   0xff<br />+#define BAR_INDEX_MPF_TO_PFVF  1<br />+#define BAR_INDEX_PFVF_TO_MPF  2<br />+#endif<br />+<br />+#define SCENE_TEST<br />+#ifdef SCENE_TEST<br />+#define BAR_PF_NUM             7<br />+#define BAR_VF_NUM             256<br />+#define BAR_INDEX_PF_TO_VF     0<br />+#define BAR_INDEX_MPF_TO_MPF   0xff<br />+#define BAR_INDEX_MPF_TO_PFVF  0<br />+#define BAR_INDEX_PFVF_TO_MPF  0<br />+#endif<br />+<br />+/**<br />+ * 0: left 2K,    1: right 2K<br />+ * src/dst: TO_RISC, TO_PFVF, TO_MPF<br />+ * MPF:       0         0       0<br />+ * PF:        0         0       1<br />+ * VF:        0         1       1<br />+ **/<br />+#define BAR_MSG_SRC_NUM   3<br />+#define BAR_MSG_SRC_MPF   0<br />+#define BAR_MSG_SRC_PF    1<br />+#define BAR_MSG_SRC_VF    2<br />+#define BAR_MSG_SRC_ERR   0xff<br />+<br />+#define BAR_MSG_DST_NUM   3<br />+#define BAR_MSG_DST_RISC  0<br />+#define BAR_MSG_DST_MPF   2<br />+#define BAR_MSG_DST_PFVF  1<br />+#define BAR_MSG_DST_ERR   0xff<br />+<br />+#define BAR_SUBCHAN_INDEX_SEND  0<br />+#define BAR_SUBCHAN_INDEX_RECV  1<br />+#define BAR_SEQID_NUM_MAX  256<br />+<br />+#define BAR_ALIGN_WORD_MASK  0xfffffffc<br />+#define BAR_MSG_VALID_MASK    1<br />+#define BAR_MSG_VALID_OFFSET  0<br />+<br />+#define BAR_MSG_CHAN_USABLE  0<br />+#define BAR_MSG_CHAN_USED    1<br />+<br />+#define LOCK_TYPE_HARD  (1)<br />+#define LOCK_TYPE_SOFT  (0)<br />+#define BAR_INDEX_TO_RISC  0<br />+<br />+#define BAR_MSG_POL_MASK    (0x10)<br />+#define BAR_MSG_POL_OFFSET  (4)<br />+<br />+#define REPS_HEADER_LEN_OFFSET      1<br />+#define REPS_HEADER_PAYLOAD_OFFSET  4<br />+#define REPS_HEADER_REPLYED         0xff<br />+<br />+#define READ_CHECK  1<br />+<br />+uint8_t subchan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {<br />+    {BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND},<br />+    {BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV},<br />+    {BAR_SUBCHAN_INDEX_SEND, BAR_SUBCHAN_INDEX_RECV, BAR_SUBCHAN_INDEX_RECV}<br />+};<br />+<br />+uint8_t chan_id_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {<br />+    {BAR_INDEX_TO_RISC, BAR_INDEX_MPF_TO_PFVF, BAR_INDEX_MPF_TO_MPF},<br />+    {BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF,    BAR_INDEX_PFVF_TO_MPF},<br />+    {BAR_INDEX_TO_RISC, BAR_INDEX_PF_TO_VF,    BAR_INDEX_PFVF_TO_MPF}<br />+};<br />+<br />+uint8_t lock_type_tbl[BAR_MSG_SRC_NUM][BAR_MSG_DST_NUM] = {<br />+    {LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD},<br />+    {LOCK_TYPE_SOFT, LOCK_TYPE_SOFT, LOCK_TYPE_HARD},<br />+    {LOCK_TYPE_HARD, LOCK_TYPE_HARD, LOCK_TYPE_HARD}<br />+};<br />+<br />+#define PCIEID_IS_PF_MASK   (0x0800)<br />+#define PCIEID_PF_IDX_MASK  (0x0700)<br />+#define PCIEID_VF_IDX_MASK  (0x00ff)<br />+#define PCIEID_EP_IDX_MASK  (0x7000)<br />+/* PCIEID bit field offset */<br />+#define PCIEID_PF_IDX_OFFSET  (8)<br />+#define PCIEID_EP_IDX_OFFSET  (12)<br />+<br />+#define MAX_EP_NUM     (4)<br />+#define PF_NUM_PER_EP  (8)<br />+#define VF_NUM_PER_PF  (32)<br />+<br />+#define MULTIPLY_BY_8(x)    ((x) << 3)<br />+#define MULTIPLY_BY_32(x)   ((x) << 5)<br />+#define MULTIPLY_BY_256(x)  ((x) << 8)<br />+<br />+#define MAX_HARD_SPINLOCK_NUM        (511)<br />+#define MAX_HARD_SPINLOCK_ASK_TIMES  (1000)<br />+#define SPINLOCK_POLLING_SPAN_US     (100)<br />+<br />+#define LOCK_MASTER_ID_MASK                (0x8000)<br />+/* bar offset */<br />+#define BAR0_CHAN_RISC_OFFSET              (0x2000)<br />+#define BAR0_CHAN_PFVF_OFFSET              (0x3000)<br />+#define BAR0_SPINLOCK_OFFSET               (0x4000)<br />+#define FW_SHRD_OFFSET                     (0x5000)<br />+#define FW_SHRD_INNER_HW_LABEL_PAT         (0x800)<br />+#define HW_LABEL_OFFSET                    (FW_SHRD_OFFSET + FW_SHRD_INNER_HW_LABEL_PAT)<br />+<br />+#define CHAN_RISC_SPINLOCK_OFFSET          (BAR0_SPINLOCK_OFFSET - BAR0_CHAN_RISC_OFFSET)<br />+#define CHAN_PFVF_SPINLOCK_OFFSET          (BAR0_SPINLOCK_OFFSET - BAR0_CHAN_PFVF_OFFSET)<br />+#define CHAN_RISC_LABEL_OFFSET             (HW_LABEL_OFFSET - BAR0_CHAN_RISC_OFFSET)<br />+#define CHAN_PFVF_LABEL_OFFSET             (HW_LABEL_OFFSET - BAR0_CHAN_PFVF_OFFSET)<br />+<br />+#define RSC_TBL_CONTENT_LEN_MAX  (257 * 2)<br />+#define TBL_MSG_PRO_SUCCESS  0xaa<br />+<br />+zxdh_bar_chan_msg_recv_callback msg_recv_func_tbl[BAR_MSG_MODULE_NUM];<br />+<br />+struct dev_stat {<br />+    bool is_mpf_scanned;<br />+    bool is_res_init;<br />+    int16_t dev_cnt; /* probe cnt */<br />+};<br />+struct dev_stat g_dev_stat = {0};<br />+<br />+static uint8_t __bar_msg_src_index_trans(uint8_t src)<br />+{<br />+    uint8_t src_index = 0;<br />+<br />+    switch (src) {<br />+    case MSG_CHAN_END_MPF:<br />+        src_index = BAR_MSG_SRC_MPF;<br />+        break;<br />+    case MSG_CHAN_END_PF:<br />+        src_index = BAR_MSG_SRC_PF;<br />+        break;<br />+    case MSG_CHAN_END_VF:<br />+        src_index = BAR_MSG_SRC_VF;<br />+        break;<br />+    default:<br />+        src_index = BAR_MSG_SRC_ERR;<br />+        break;<br />+    }<br />+    return src_index;<br />+}<br />+<br />+static uint8_t __bar_msg_dst_index_trans(uint8_t dst)<br />+{<br />+    uint8_t dst_index = 0;<br />+<br />+    switch (dst) {<br />+    case MSG_CHAN_END_MPF:<br />+        dst_index = BAR_MSG_DST_MPF;<br />+        break;<br />+    case MSG_CHAN_END_PF:<br />+        dst_index = BAR_MSG_DST_PFVF;<br />+        break;<br />+    case MSG_CHAN_END_VF:<br />+        dst_index = BAR_MSG_DST_PFVF;<br />+        break;<br />+    case MSG_CHAN_END_RISC:<br />+        dst_index = BAR_MSG_DST_RISC;<br />+        break;<br />+    default:<br />+        dst_index = BAR_MSG_SRC_ERR;<br />+        break;<br />+    }<br />+    return dst_index;<br />+}<br />+<br />+struct seqid_item {<br />+    void *reps_addr;<br />+    uint16_t id;<br />+    uint16_t buffer_len;<br />+    uint16_t flag;<br />+};<br />+<br />+struct seqid_ring {<br />+    uint16_t cur_id;<br />+    pthread_spinlock_t lock;<br />+    struct seqid_item reps_info_tbl[BAR_SEQID_NUM_MAX];<br />+};<br />+struct seqid_ring g_seqid_ring = {0};<br />+<br />+static int __bar_chan_msgid_allocate(uint16_t *msgid)<br />+{<br />+    struct seqid_item *seqid_reps_info = NULL;<br />+<br />+    pthread_spin_lock(&g_seqid_ring.lock);<br />+    uint16_t g_id = g_seqid_ring.cur_id;<br />+    uint16_t count = 0;<br />+<br />+    do {<br />+        count++;<br />+        ++g_id;<br />+        g_id %= BAR_SEQID_NUM_MAX;<br />+        seqid_reps_info = &g_seqid_ring.reps_info_tbl[g_id];<br />+    } while ((seqid_reps_info->flag != REPS_INFO_FLAG_USABLE) && (count < BAR_SEQID_NUM_MAX));<br />+    int rc;<br />+<br />+    if (count >= BAR_SEQID_NUM_MAX) {<br />+        rc = -1;<br />+        goto out;<br />+    }<br />+    seqid_reps_info->flag = REPS_INFO_FLAG_USED;<br />+    g_seqid_ring.cur_id = g_id;<br />+    *msgid = g_id;<br />+    rc = BAR_MSG_OK;<br />+<br />+out:<br />+    pthread_spin_unlock(&g_seqid_ring.lock);<br />+    return rc;<br />+}<br />+<br />+static uint16_t __bar_chan_save_recv_info(struct zxdh_msg_recviver_mem *result, uint16_t *msg_id)<br />+{<br />+    int ret = __bar_chan_msgid_allocate(msg_id);<br />+<br />+    if (ret != BAR_MSG_OK)<br />+        return BAR_MSG_ERR_MSGID;<br />+<br />+    PMD_MSG_LOG(DEBUG, "allocate msg_id: %u", *msg_id);<br />+    struct seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[*msg_id];<br />+<br />+    reps_info->reps_addr = result->recv_buffer;<br />+    reps_info->buffer_len = result->buffer_len;<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static void __bar_chan_msgid_free(uint16_t msg_id)<br />+{<br />+    struct seqid_item *seqid_reps_info = &g_seqid_ring.reps_info_tbl[msg_id];<br />+<br />+    pthread_spin_lock(&g_seqid_ring.lock);<br />+    seqid_reps_info->flag = REPS_INFO_FLAG_USABLE;<br />+    PMD_MSG_LOG(DEBUG, "free msg_id: %u", msg_id);<br />+    pthread_spin_unlock(&g_seqid_ring.lock);<br />+}<br />+<br />+static uint64_t subchan_addr_cal(uint64_t virt_addr, uint8_t chan_id, uint8_t subchan_id)<br />+{<br />+    return virt_addr + (2 * chan_id + subchan_id) * BAR_MSG_ADDR_CHAN_INTERVAL;<br />+}<br />+<br />+static uint16_t __bar_chan_subchan_addr_get(struct zxdh_pci_bar_msg *in, uint64_t *subchan_addr)<br />+{<br />+    uint8_t src_index = __bar_msg_src_index_trans(in->src);<br />+    uint8_t dst_index = __bar_msg_dst_index_trans(in->dst);<br />+    uint16_t chan_id = chan_id_tbl[src_index][dst_index];<br />+    uint16_t subchan_id = subchan_id_tbl[src_index][dst_index];<br />+<br />+    *subchan_addr = subchan_addr_cal(in->virt_addr, chan_id, subchan_id);<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static int __bar_chan_reg_write(uint64_t subchan_addr, uint32_t offset, uint32_t data)<br />+{<br />+    uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK);<br />+<br />+    if (unlikely(algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL)) {<br />+        PMD_MSG_LOG(ERR, "subchan addr: %" PRIu64 "offset: %" PRIu32,<br />+            subchan_addr, algin_offset);<br />+        return -1;<br />+    }<br />+    *(uint32_t *)(subchan_addr + algin_offset) = data;<br />+    return 0;<br />+}<br />+<br />+static int __bar_chan_reg_read(uint64_t subchan_addr, uint32_t offset, uint32_t *pdata)<br />+{<br />+    uint32_t algin_offset = (offset & BAR_ALIGN_WORD_MASK);<br />+<br />+    if (unlikely(algin_offset >= BAR_MSG_ADDR_CHAN_INTERVAL)) {<br />+        PMD_MSG_LOG(ERR, "subchan addr: %" PRIu64 "offset: %" PRIu32,<br />+            subchan_addr, algin_offset);<br />+        return -1;<br />+    }<br />+    *pdata = *(uint32_t *)(subchan_addr + algin_offset);<br />+    return 0;<br />+}<br />+<br />+static uint16_t __bar_chan_msg_header_set(uint64_t subchan_addr, struct bar_msg_header *msg_header)<br />+{<br />+    uint32_t *data = (uint32_t *)msg_header;<br />+    uint16_t idx;<br />+<br />+    for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++)<br />+        __bar_chan_reg_write(subchan_addr, idx * 4, *(data + idx));<br />+<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static uint16_t __bar_chan_msg_header_get(uint64_t subchan_addr, struct bar_msg_header *msg_header)<br />+{<br />+    uint32_t *data = (uint32_t *)msg_header;<br />+    uint16_t idx;<br />+<br />+    for (idx = 0; idx < (BAR_MSG_PLAYLOAD_OFFSET >> 2); idx++)<br />+        __bar_chan_reg_read(subchan_addr, idx * 4, data + idx);<br />+<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static uint16_t __bar_chan_msg_payload_set(uint64_t subchan_addr, uint8_t *msg, uint16_t len)<br />+{<br />+    uint32_t *data = (uint32_t *)msg;<br />+    uint32_t count = (len >> 2); /* 4B unit */<br />+    uint32_t ix;<br />+<br />+    for (ix = 0; ix < count; ix++)<br />+        __bar_chan_reg_write(subchan_addr, 4 * ix + BAR_MSG_PLAYLOAD_OFFSET, *(data + ix));<br />+<br />+    /* not 4B align part */<br />+    uint32_t remain = (len & 0x3);<br />+<br />+    if (remain) {<br />+        uint32_t remain_data = 0;<br />+<br />+        for (ix = 0; ix < remain; ix++)<br />+            remain_data |= *((uint8_t *)(msg + len - remain + ix)) << (8 * ix);<br />+<br />+        __bar_chan_reg_write(subchan_addr, 4 * count +<br />+                BAR_MSG_PLAYLOAD_OFFSET, remain_data);<br />+    }<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static uint16_t __bar_chan_msg_payload_get(uint64_t subchan_addr, uint8_t *msg, uint16_t len)<br />+{<br />+    uint32_t *data = (uint32_t *)msg;<br />+    uint32_t count = (len >> 2);<br />+    uint32_t ix;<br />+<br />+    for (ix = 0; ix < count; ix++)<br />+        __bar_chan_reg_read(subchan_addr, 4 * ix + BAR_MSG_PLAYLOAD_OFFSET, (data + ix));<br />+<br />+    uint32_t remain = (len & 0x3);<br />+<br />+    if (remain) {<br />+        uint32_t remain_data = 0;<br />+<br />+        __bar_chan_reg_read(subchan_addr, 4 * count +<br />+                BAR_MSG_PLAYLOAD_OFFSET, &remain_data);<br />+        for (ix = 0; ix < remain; ix++)<br />+            *((uint8_t *)(msg + (len - remain + ix))) = remain_data >> (8 * ix);<br />+    }<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static uint16_t __bar_chan_msg_valid_set(uint64_t subchan_addr, uint8_t valid_label)<br />+{<br />+    uint32_t data;<br />+<br />+    __bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);<br />+    data &= (~BAR_MSG_VALID_MASK);<br />+    data |= (uint32_t)valid_label;<br />+    __bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data);<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static uint16_t __bar_msg_valid_stat_get(uint64_t subchan_addr)<br />+{<br />+    uint32_t data;<br />+<br />+    __bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);<br />+    if (BAR_MSG_CHAN_USABLE == (data & BAR_MSG_VALID_MASK))<br />+        return BAR_MSG_CHAN_USABLE;<br />+<br />+    return BAR_MSG_CHAN_USED;<br />+}<br />+<br />+#if READ_CHECK<br />+static uint8_t temp_msg[BAR_MSG_ADDR_CHAN_INTERVAL];<br />+#endif<br />+static uint16_t __bar_chan_msg_send(uint64_t subchan_addr, void *payload_addr,<br />+                    uint16_t payload_len, struct bar_msg_header *msg_header)<br />+{<br />+    __bar_chan_msg_header_set(subchan_addr, msg_header);<br />+#if READ_CHECK<br />+    __bar_chan_msg_header_get(subchan_addr, (struct bar_msg_header *)temp_msg);<br />+#endif<br />+    __bar_chan_msg_payload_set(subchan_addr, (uint8_t *)(payload_addr), payload_len);<br />+#if READ_CHECK<br />+    __bar_chan_msg_payload_get(subchan_addr, temp_msg, payload_len);<br />+#endif<br />+    __bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USED);<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static uint16_t __bar_chan_msg_poltag_set(uint64_t subchan_addr, uint8_t label)<br />+{<br />+    uint32_t data;<br />+<br />+    __bar_chan_reg_read(subchan_addr, BAR_MSG_VALID_OFFSET, &data);<br />+    data &= (~(uint32_t)BAR_MSG_POL_MASK);<br />+    data |= ((uint32_t)label << BAR_MSG_POL_OFFSET);<br />+    __bar_chan_reg_write(subchan_addr, BAR_MSG_VALID_OFFSET, data);<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static uint16_t __bar_chan_sync_msg_reps_get(uint64_t subchan_addr,<br />+                    uint64_t recv_buffer, uint16_t buffer_len)<br />+{<br />+    struct bar_msg_header msg_header = {0};<br />+    uint16_t msg_id = 0;<br />+    uint16_t msg_len = 0;<br />+<br />+    __bar_chan_msg_header_get(subchan_addr, &msg_header);<br />+    msg_id = msg_header.msg_id;<br />+    struct seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[msg_id];<br />+<br />+    if (reps_info->flag != REPS_INFO_FLAG_USED) {<br />+        PMD_MSG_LOG(ERR, "msg_id %u unused", msg_id);<br />+        return BAR_MSG_ERR_REPLY;<br />+    }<br />+    msg_len = msg_header.len;<br />+<br />+    if (msg_len > buffer_len - 4) {<br />+        PMD_MSG_LOG(ERR, "recv buffer len is: %u, but reply msg len is: %u",<br />+                buffer_len, msg_len + 4);<br />+        return BAR_MSG_ERR_REPSBUFF_LEN;<br />+    }<br />+    uint8_t *recv_msg = (uint8_t *)recv_buffer;<br />+<br />+    __bar_chan_msg_payload_get(subchan_addr, recv_msg + REPS_HEADER_PAYLOAD_OFFSET, msg_len);<br />+    *(uint16_t *)(recv_msg + REPS_HEADER_LEN_OFFSET) = msg_len;<br />+    *recv_msg = REPS_HEADER_REPLYED; /* set reps's valid */<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static int __bar_chan_send_para_check(struct zxdh_pci_bar_msg *in,<br />+                    struct zxdh_msg_recviver_mem *result)<br />+{<br />+    if (in == NULL || result == NULL) {<br />+        PMD_MSG_LOG(ERR, "send para ERR: null para.");<br />+        return BAR_MSG_ERR_NULL_PARA;<br />+    }<br />+    uint8_t src_index = __bar_msg_src_index_trans(in->src);<br />+    uint8_t dst_index = __bar_msg_dst_index_trans(in->dst);<br />+<br />+    if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {<br />+        PMD_MSG_LOG(ERR, "send para ERR: chan doesn't exist.");<br />+        return BAR_MSG_ERR_TYPE;<br />+    }<br />+    if (in->module_id >= BAR_MSG_MODULE_NUM) {<br />+        PMD_MSG_LOG(ERR, "send para ERR: invalid module_id: %d.", in->module_id);<br />+        return BAR_MSG_ERR_MODULE;<br />+    }<br />+    if (in->payload_addr == NULL) {<br />+        PMD_MSG_LOG(ERR, "send para ERR: null message.");<br />+        return BAR_MSG_ERR_BODY_NULL;<br />+    }<br />+    if (in->payload_len > BAR_MSG_PAYLOAD_MAX_LEN) {<br />+        PMD_MSG_LOG(ERR, "send para ERR: len %d is too long.", in->payload_len);<br />+        return BAR_MSG_ERR_LEN;<br />+    }<br />+    if (in->virt_addr == 0 || result->recv_buffer == NULL) {<br />+        PMD_MSG_LOG(ERR, "send para ERR: virt_addr or recv_buffer is NULL.");<br />+        return BAR_MSG_ERR_VIRTADDR_NULL;<br />+    }<br />+    if (result->buffer_len < REPS_HEADER_PAYLOAD_OFFSET)<br />+        PMD_MSG_LOG(ERR,<br />+            "recv buffer len: %" PRIu64 " is short than mininal 4 bytes\n",<br />+            result->buffer_len);<br />+<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static uint16_t pcie_id_to_hard_lock(uint16_t src_pcieid, uint8_t dst)<br />+{<br />+    uint16_t lock_id = 0;<br />+    uint16_t pf_idx = (src_pcieid & PCIEID_PF_IDX_MASK) >> PCIEID_PF_IDX_OFFSET;<br />+    uint16_t ep_idx = (src_pcieid & PCIEID_EP_IDX_MASK) >> PCIEID_EP_IDX_OFFSET;<br />+<br />+    switch (dst) {<br />+    /* msg to risc */<br />+    case MSG_CHAN_END_RISC:<br />+        lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx;<br />+        break;<br />+    /* msg to pf/vf */<br />+    case MSG_CHAN_END_VF:<br />+    case MSG_CHAN_END_PF:<br />+        lock_id = MULTIPLY_BY_8(ep_idx) + pf_idx + MULTIPLY_BY_8(1 + MAX_EP_NUM);<br />+        break;<br />+    default:<br />+        lock_id = 0;<br />+        break;<br />+    }<br />+    if (lock_id >= MAX_HARD_SPINLOCK_NUM)<br />+        lock_id = 0;<br />+<br />+    return lock_id;<br />+}<br />+<br />+static uint8_t spinklock_read(uint64_t virt_lock_addr, uint32_t lock_id)<br />+{<br />+    return *(volatile uint8_t *)((uint64_t)virt_lock_addr + (uint64_t)lock_id);<br />+}<br />+<br />+static void spinlock_write(uint64_t virt_lock_addr, uint32_t lock_id, uint8_t data)<br />+{<br />+    *(volatile uint8_t *)((uint64_t)virt_lock_addr + (uint64_t)lock_id) = data;<br />+}<br />+<br />+static void label_write(uint64_t label_lock_addr, uint32_t lock_id, uint16_t value)<br />+{<br />+    *(volatile uint16_t *)(label_lock_addr + lock_id * 2) = value;<br />+}<br />+<br />+static int32_t zxdh_spinlock_lock(uint32_t virt_lock_id, uint64_t virt_addr,<br />+                    uint64_t label_addr, uint16_t master_id)<br />+{<br />+    uint32_t lock_rd_cnt = 0;<br />+<br />+    do {<br />+        /* read to lock */<br />+        uint8_t spl_val = spinklock_read(virt_addr, virt_lock_id);<br />+<br />+        if (spl_val == 0) {<br />+            label_write((uint64_t)label_addr, virt_lock_id, master_id);<br />+            break;<br />+        }<br />+        rte_delay_us_block(SPINLOCK_POLLING_SPAN_US);<br />+        lock_rd_cnt++;<br />+    } while (lock_rd_cnt < MAX_HARD_SPINLOCK_ASK_TIMES);<br />+    if (lock_rd_cnt >= MAX_HARD_SPINLOCK_ASK_TIMES)<br />+        return -1;<br />+<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_spinlock_unlock(uint32_t virt_lock_id, uint64_t virt_addr, uint64_t label_addr)<br />+{<br />+    label_write((uint64_t)label_addr, virt_lock_id, 0);<br />+    spinlock_write(virt_addr, virt_lock_id, 0);<br />+    return 0;<br />+}<br />+<br />+int pf_recv_bar_msg(void *pay_load __rte_unused,<br />+                    uint16_t len __rte_unused,<br />+                    void *reps_buffer __rte_unused,<br />+                    uint16_t *reps_len __rte_unused,<br />+                    void *eth_dev __rte_unused)<br />+{<br />+    /* todo later provided*/<br />+    return 0;<br />+}<br />+<br />+int vf_recv_bar_msg(void *pay_load __rte_unused,<br />+                    uint16_t len __rte_unused,<br />+                    void *reps_buffer __rte_unused,<br />+                    uint16_t *reps_len __rte_unused,<br />+                    void *eth_dev __rte_unused)<br />+{<br />+    /* todo later provided*/<br />+    return 0;<br />+}<br />+<br />+static int bar_hard_lock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_addr)<br />+{<br />+    int ret = 0;<br />+    uint16_t lockid = pcie_id_to_hard_lock(src_pcieid, dst);<br />+<br />+    PMD_MSG_LOG(DEBUG, "dev pcieid: 0x%x lock, get hardlockid: %u\n", src_pcieid, lockid);<br />+    if (dst == MSG_CHAN_END_RISC)<br />+        ret = zxdh_spinlock_lock(lockid, virt_addr + CHAN_RISC_SPINLOCK_OFFSET,<br />+                    virt_addr + CHAN_RISC_LABEL_OFFSET,<br />+                    src_pcieid | LOCK_MASTER_ID_MASK);<br />+    else<br />+        ret = zxdh_spinlock_lock(lockid, virt_addr + CHAN_PFVF_SPINLOCK_OFFSET,<br />+                    virt_addr + CHAN_PFVF_LABEL_OFFSET,<br />+                    src_pcieid | LOCK_MASTER_ID_MASK);<br />+<br />+    return ret;<br />+}<br />+<br />+static void bar_hard_unlock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_addr)<br />+{<br />+    uint16_t lockid = pcie_id_to_hard_lock(src_pcieid, dst);<br />+<br />+    PMD_MSG_LOG(DEBUG, "dev pcieid: 0x%x unlock, get hardlockid: %u\n", src_pcieid, lockid);<br />+    if (dst == MSG_CHAN_END_RISC)<br />+        zxdh_spinlock_unlock(lockid, virt_addr + CHAN_RISC_SPINLOCK_OFFSET,<br />+                virt_addr + CHAN_RISC_LABEL_OFFSET);<br />+    else<br />+        zxdh_spinlock_unlock(lockid, virt_addr + CHAN_PFVF_SPINLOCK_OFFSET,<br />+                virt_addr + CHAN_PFVF_LABEL_OFFSET);<br />+}<br />+/**<br />+ * Fun: PF init hard_spinlock addr<br />+ * @pcie_id: pf's pcie_id<br />+ * @bar_base_addr:<br />+ */<br />+int bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr)<br />+{<br />+    int lock_id = pcie_id_to_hard_lock(pcie_id, MSG_CHAN_END_RISC);<br />+<br />+    zxdh_spinlock_unlock(lock_id, bar_base_addr + BAR0_SPINLOCK_OFFSET,<br />+            bar_base_addr + HW_LABEL_OFFSET);<br />+    lock_id = pcie_id_to_hard_lock(pcie_id, MSG_CHAN_END_VF);<br />+    zxdh_spinlock_unlock(lock_id, bar_base_addr + BAR0_SPINLOCK_OFFSET,<br />+            bar_base_addr + HW_LABEL_OFFSET);<br />+    return 0;<br />+}<br />+<br />+/**<br />+ * Fun: lock the channel<br />+ */<br />+pthread_spinlock_t chan_lock;<br />+static int bar_chan_lock(uint8_t src, uint8_t dst, uint16_t src_pcieid, uint64_t virt_addr)<br />+{<br />+    int ret = 0;<br />+    uint8_t src_index = __bar_msg_src_index_trans(src);<br />+    uint8_t dst_index = __bar_msg_dst_index_trans(dst);<br />+<br />+    if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {<br />+        PMD_MSG_LOG(ERR, "lock ERR: chan doesn't exist.\n");<br />+        return BAR_MSG_ERR_TYPE;<br />+    }<br />+    uint16_t idx = lock_type_tbl[src_index][dst_index];<br />+<br />+    if (idx == LOCK_TYPE_SOFT)<br />+        pthread_spin_lock(&chan_lock);<br />+    else<br />+        ret = bar_hard_lock(src_pcieid, dst, virt_addr);<br />+<br />+    if (ret != 0)<br />+        PMD_MSG_LOG(ERR, "dev: 0x%x failed to lock.\n", src_pcieid);<br />+<br />+    return ret;<br />+}<br />+/**<br />+ * Fun: unlock the channel<br />+ */<br />+static int bar_chan_unlock(uint8_t src, uint8_t dst, uint16_t src_pcieid, uint64_t virt_addr)<br />+{<br />+    uint8_t src_index = __bar_msg_src_index_trans(src);<br />+    uint8_t dst_index = __bar_msg_dst_index_trans(dst);<br />+<br />+    if (src_index == BAR_MSG_SRC_ERR || dst_index == BAR_MSG_DST_ERR) {<br />+        PMD_MSG_LOG(ERR, "unlock ERR: chan doesn't exist.\n");<br />+        return BAR_MSG_ERR_TYPE;<br />+    }<br />+    uint16_t idx = lock_type_tbl[src_index][dst_index];<br />+<br />+    if (idx == LOCK_TYPE_SOFT)<br />+        pthread_spin_unlock(&chan_lock);<br />+    else<br />+        bar_hard_unlock(src_pcieid, dst, virt_addr);<br />+<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result)<br />+{<br />+    uint16_t ret = __bar_chan_send_para_check(in, result);<br />+<br />+    if (ret != BAR_MSG_OK)<br />+        goto exit;<br />+<br />+    uint16_t seq_id;<br />+<br />+    ret = __bar_chan_save_recv_info(result, &seq_id);<br />+    if (ret != BAR_MSG_OK)<br />+        goto exit;<br />+<br />+    uint64_t subchan_addr;<br />+<br />+    __bar_chan_subchan_addr_get(in, &subchan_addr);<br />+    struct bar_msg_header msg_header = {0};<br />+<br />+    msg_header.sync = BAR_CHAN_MSG_SYNC;<br />+    msg_header.emec = in->emec;<br />+    msg_header.usr  = 0;<br />+    msg_header.rsv  = 0;<br />+    msg_header.module_id  = in->module_id;<br />+    msg_header.len        = in->payload_len;<br />+    msg_header.msg_id     = seq_id;<br />+    msg_header.src_pcieid = in->src_pcieid;<br />+    msg_header.dst_pcieid = in->dst_pcieid;<br />+<br />+    ret = bar_chan_lock(in->src, in->dst, in->src_pcieid, in->virt_addr);<br />+    if (ret != BAR_MSG_OK) {<br />+        __bar_chan_msgid_free(seq_id);<br />+        goto exit;<br />+    }<br />+    __bar_chan_msg_send(subchan_addr, in->payload_addr, in->payload_len, &msg_header);<br />+    /* wait unset valid */<br />+    uint32_t time_out_cnt = 0;<br />+    uint16_t valid;<br />+<br />+    do {<br />+        rte_delay_us_block(BAR_MSG_POLLING_SPAN);<br />+        valid = __bar_msg_valid_stat_get(subchan_addr);<br />+        ++time_out_cnt;<br />+    } while ((time_out_cnt < BAR_MSG_TIMEOUT_TH) && (valid == BAR_MSG_CHAN_USED));<br />+<br />+    if ((time_out_cnt == BAR_MSG_TIMEOUT_TH) && (valid != BAR_MSG_CHAN_USABLE)) {<br />+        __bar_chan_msg_valid_set(subchan_addr, BAR_MSG_CHAN_USABLE);<br />+        __bar_chan_msg_poltag_set(subchan_addr, 0);<br />+        PMD_MSG_LOG(ERR, "BAR MSG ERR: chan type time out.");<br />+        ret = BAR_MSG_ERR_TIME_OUT;<br />+    } else {<br />+        ret = __bar_chan_sync_msg_reps_get(subchan_addr,<br />+                    (uint64_t)result->recv_buffer, result->buffer_len);<br />+    }<br />+    __bar_chan_msgid_free(seq_id);<br />+    bar_chan_unlock(in->src, in->dst, in->src_pcieid, in->virt_addr);<br />+<br />+exit:<br />+    return ret;<br />+}<br />+<br />+static uint64_t recv_addr_get(uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)<br />+{<br />+    uint8_t src = __bar_msg_dst_index_trans(src_type);<br />+    uint8_t dst = __bar_msg_src_index_trans(dst_type);<br />+<br />+    if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR)<br />+        return 0;<br />+<br />+    uint8_t chan_id = chan_id_tbl[dst][src];<br />+    uint8_t subchan_id = 1 - subchan_id_tbl[dst][src];<br />+<br />+    return subchan_addr_cal(virt_addr, chan_id, subchan_id);<br />+}<br />+<br />+static uint64_t reply_addr_get(uint8_t sync, uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)<br />+{<br />+    uint8_t src = __bar_msg_dst_index_trans(src_type);<br />+    uint8_t dst = __bar_msg_src_index_trans(dst_type);<br />+<br />+    if (src == BAR_MSG_SRC_ERR || dst == BAR_MSG_DST_ERR)<br />+        return 0;<br />+<br />+    uint8_t chan_id = chan_id_tbl[dst][src];<br />+    uint8_t subchan_id = 1 - subchan_id_tbl[dst][src];<br />+    uint64_t recv_rep_addr;<br />+<br />+    if (sync == BAR_CHAN_MSG_SYNC)<br />+        recv_rep_addr = subchan_addr_cal(virt_addr, chan_id, subchan_id);<br />+    else<br />+        recv_rep_addr = subchan_addr_cal(virt_addr, chan_id, 1 - subchan_id);<br />+<br />+    return recv_rep_addr;<br />+}<br />+<br />+static uint16_t __bar_chan_msg_header_check(struct bar_msg_header *msg_header)<br />+{<br />+    uint8_t module_id = 0;<br />+    uint16_t len = 0;<br />+<br />+    if (msg_header->valid != BAR_MSG_CHAN_USED) {<br />+        PMD_MSG_LOG(ERR, "recv header ERR: valid label is not used.");<br />+        return BAR_MSG_ERR_MODULE;<br />+    }<br />+<br />+    module_id = msg_header->module_id;<br />+    if (module_id >= (uint8_t)BAR_MSG_MODULE_NUM) {<br />+        PMD_MSG_LOG(ERR, "recv header ERR: invalid module_id: %u.", module_id);<br />+        return BAR_MSG_ERR_MODULE;<br />+    }<br />+<br />+    len = msg_header->len;<br />+    if (len > BAR_MSG_PAYLOAD_MAX_LEN) {<br />+        PMD_MSG_LOG(ERR, "recv header ERR: invalid mesg len: %u.", len);<br />+        return BAR_MSG_ERR_LEN;<br />+    }<br />+    if (msg_recv_func_tbl[msg_header->module_id] == NULL) {<br />+        PMD_MSG_LOG(ERR, "recv header ERR: module:%s(%u) doesn't register",<br />+                module_id_name(module_id), module_id);<br />+        return BAR_MSG_ERR_MODULE_NOEXIST;<br />+    }<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static void __bar_msg_sync_msg_proc(uint64_t reply_addr, struct bar_msg_header *msg_header,<br />+                    uint8_t *reciver_buff, void *dev)<br />+{<br />+    uint8_t *reps_buffer = rte_malloc(NULL, BAR_MSG_PAYLOAD_MAX_LEN, 0);<br />+<br />+    if (reps_buffer == NULL)<br />+        return;<br />+<br />+    zxdh_bar_chan_msg_recv_callback recv_func = msg_recv_func_tbl[msg_header->module_id];<br />+    uint16_t reps_len = 0;<br />+<br />+    recv_func(reciver_buff, msg_header->len, reps_buffer, &reps_len, dev);<br />+    msg_header->ack = BAR_CHAN_MSG_ACK;<br />+    msg_header->len = reps_len;<br />+    __bar_chan_msg_header_set(reply_addr, msg_header);<br />+    __bar_chan_msg_payload_set(reply_addr, reps_buffer, reps_len);<br />+    __bar_chan_msg_valid_set(reply_addr, BAR_MSG_CHAN_USABLE);<br />+    rte_free(reps_buffer);<br />+}<br />+<br />+static void __bar_msg_ack_async_msg_proc(struct bar_msg_header *msg_header, uint8_t *reciver_buff)<br />+{<br />+    struct seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[msg_header->msg_id];<br />+<br />+    if (reps_info->flag != REPS_INFO_FLAG_USED) {<br />+        PMD_MSG_LOG(ERR, "msg_id: %u is released", msg_header->msg_id);<br />+        return;<br />+    }<br />+    if (msg_header->len > reps_info->buffer_len - 4) {<br />+        PMD_MSG_LOG(ERR, "reps_buf_len is %u, but reps_msg_len is %u",<br />+                reps_info->buffer_len, msg_header->len + 4);<br />+        goto free_id;<br />+    }<br />+    uint8_t *reps_buffer = (uint8_t *)reps_info->reps_addr;<br />+<br />+    rte_memcpy(reps_buffer + 4, reciver_buff, msg_header->len);<br />+    *(uint16_t *)(reps_buffer + 1) = msg_header->len;<br />+    *(uint8_t *)(reps_info->reps_addr) = REPS_HEADER_REPLYED;<br />+<br />+free_id:<br />+    __bar_chan_msgid_free(msg_header->msg_id);<br />+}<br />+<br />+int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev)<br />+{<br />+    uint64_t recv_addr = recv_addr_get(src, dst, virt_addr);<br />+<br />+    if (recv_addr == 0) {<br />+        PMD_MSG_LOG(ERR, "invalid driver type(src:%u, dst:%u).", src, dst);<br />+        return -1;<br />+    }<br />+<br />+    struct bar_msg_header msg_header  = {0};<br />+<br />+    __bar_chan_msg_header_get(recv_addr, &msg_header);<br />+    uint16_t ret = __bar_chan_msg_header_check(&msg_header);<br />+<br />+    if (ret != BAR_MSG_OK) {<br />+        PMD_MSG_LOG(ERR, "recv msg_head err, ret: %u.", ret);<br />+        return -1;<br />+    }<br />+    uint8_t *recved_msg = rte_malloc(NULL, msg_header.len, 0);<br />+<br />+    if (recved_msg == NULL) {<br />+        PMD_MSG_LOG(ERR, "malloc temp buff failed.");<br />+        return -1;<br />+    }<br />+    __bar_chan_msg_payload_get(recv_addr, recved_msg, msg_header.len);<br />+<br />+    uint64_t reps_addr = reply_addr_get(msg_header.sync, src, dst, virt_addr);<br />+<br />+    if (msg_header.sync == BAR_CHAN_MSG_SYNC) {<br />+        __bar_msg_sync_msg_proc(reps_addr, &msg_header, recved_msg, dev);<br />+        goto exit;<br />+    }<br />+    __bar_chan_msg_valid_set(recv_addr, BAR_MSG_CHAN_USABLE);<br />+    if (msg_header.ack == BAR_CHAN_MSG_ACK) {<br />+        __bar_msg_ack_async_msg_proc(&msg_header, recved_msg);<br />+        goto exit;<br />+    }<br />+<br />+exit:<br />+    rte_free(recved_msg);<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+int zxdh_bar_chan_msg_recv_register(uint8_t module_id, zxdh_bar_chan_msg_recv_callback callback)<br />+{<br />+    if (module_id >= (uint16_t)BAR_MSG_MODULE_NUM) {<br />+        PMD_MSG_LOG(ERR, "register ERR: invalid module_id: %u.", module_id);<br />+        return BAR_MSG_ERR_MODULE;<br />+    }<br />+    if (callback == NULL) {<br />+        PMD_MSG_LOG(ERR, "register %s(%u) error: null callback.",<br />+            module_id_name(module_id), module_id);<br />+        return BAR_MEG_ERR_NULL_FUNC;<br />+    }<br />+    if (msg_recv_func_tbl[module_id] != NULL) {<br />+        PMD_MSG_LOG(ERR, "register warning, event:%s(%u) already be registered.",<br />+            module_id_name(module_id), module_id);<br />+        return BAR_MSG_ERR_REPEAT_REGISTER;<br />+    }<br />+    msg_recv_func_tbl[module_id] = callback;<br />+    PMD_MSG_LOG(DEBUG, "register module: %s(%u) success.",<br />+            module_id_name(module_id), module_id);<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+int zxdh_bar_chan_msg_recv_unregister(uint8_t module_id)<br />+{<br />+    if (module_id >= (uint16_t)BAR_MSG_MODULE_NUM) {<br />+        PMD_MSG_LOG(ERR, "unregister ERR: invalid module_id :%u.", module_id);<br />+        return BAR_MSG_ERR_MODULE;<br />+    }<br />+    if (msg_recv_func_tbl[module_id] == NULL) {<br />+        PMD_MSG_LOG(ERR, "unregister wanning, event: %s(%d) has already be unregistered.",<br />+            module_id_name(module_id), module_id);<br />+        return BAR_MSG_ERR_UNGISTER;<br />+    }<br />+    msg_recv_func_tbl[module_id] = NULL;<br />+    PMD_MSG_LOG(DEBUG, "unregister module %s(%d) success.",<br />+        module_id_name(module_id), module_id);<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+static int bar_get_sum(uint8_t *ptr, uint8_t len)<br />+{<br />+    uint64_t sum = 0;<br />+    int idx;<br />+<br />+    for (idx = 0; idx < len; idx++)<br />+        sum += *(ptr + idx);<br />+<br />+    return (uint16_t)sum;<br />+}<br />+<br />+static int zxdh_get_res_info(struct zxdh_res_para *dev, uint8_t field, uint8_t *res, uint16_t *len)<br />+{<br />+    if (!res || !dev)<br />+        return BAR_MSG_ERR_NULL;<br />+<br />+    struct tbl_msg_header tbl_msg = {<br />+        .type = TBL_TYPE_READ,<br />+        .field = field,<br />+        .pcieid = dev->pcie_id,<br />+        .slen = 0,<br />+        .rsv = 0,<br />+    };<br />+<br />+    struct zxdh_pci_bar_msg in = {0};<br />+<br />+    in.virt_addr = dev->virt_addr;<br />+    in.payload_addr = &tbl_msg;<br />+    in.payload_len = sizeof(tbl_msg);<br />+    in.src = dev->src_type;<br />+    in.dst = MSG_CHAN_END_RISC;<br />+    in.module_id = BAR_MODULE_TBL;<br />+    in.src_pcieid = dev->pcie_id;<br />+<br />+    uint8_t recv_buf[RSC_TBL_CONTENT_LEN_MAX + 8] = {0};<br />+    struct zxdh_msg_recviver_mem result = {<br />+        .recv_buffer = recv_buf,<br />+        .buffer_len = sizeof(recv_buf),<br />+    };<br />+    int ret = zxdh_bar_chan_sync_msg_send(&in, &result);<br />+<br />+    if (ret != BAR_MSG_OK) {<br />+        PMD_MSG_LOG(ERR,<br />+            "send sync_msg failed. pcieid: 0x%x, ret: %d.\n", dev->pcie_id, ret);<br />+        return ret;<br />+    }<br />+    struct tbl_msg_reps_header *tbl_reps =<br />+        (struct tbl_msg_reps_header *)(recv_buf + REPS_HEADER_PAYLOAD_OFFSET);<br />+<br />+    if (tbl_reps->check != TBL_MSG_PRO_SUCCESS) {<br />+        PMD_MSG_LOG(ERR,<br />+            "get resource_field failed. pcieid: 0x%x, ret: %d.\n", dev->pcie_id, ret);<br />+        return ret;<br />+    }<br />+    *len = tbl_reps->len;<br />+    rte_memcpy(res,<br />+        (recv_buf + REPS_HEADER_PAYLOAD_OFFSET + sizeof(struct tbl_msg_reps_header)), *len);<br />+    return ret;<br />+}<br />+<br />+int zxdh_get_res_panel_id(struct zxdh_res_para *in, uint8_t *panel_id)<br />+{<br />+    uint8_t reps = 0;<br />+    uint16_t reps_len = 0;<br />+<br />+    if (zxdh_get_res_info(in, TBL_FIELD_PNLID, &reps, &reps_len) != BAR_MSG_OK)<br />+        return -1;<br />+<br />+    *panel_id = reps;<br />+    return BAR_MSG_OK;<br />+}<br />+int zxdh_get_res_hash_id(struct zxdh_res_para *in, uint8_t *hash_id)<br />+{<br />+    uint8_t reps = 0;<br />+    uint16_t reps_len = 0;<br />+<br />+    if (zxdh_get_res_info(in, TBL_FIELD_HASHID, &reps, &reps_len) != BAR_MSG_OK)<br />+        return -1;<br />+<br />+    *hash_id = reps;<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+int zxdh_bar_chan_enable(struct msix_para *_msix_para, uint16_t *vport)<br />+{<br />+    int ret = 0;<br />+    uint16_t check_token = 0;<br />+    uint16_t sum_res = 0;<br />+<br />+    recv_addr_get(MSG_CHAN_END_RISC, MSG_CHAN_END_PF, 0x0);<br />+<br />+    if (!_msix_para)<br />+        return BAR_MSG_ERR_NULL;<br />+<br />+    struct msix_msg msix_msg = {<br />+        .pcie_id = _msix_para->pcie_id,<br />+        .vector_risc = _msix_para->vector_risc,<br />+        .vector_pfvf = _msix_para->vector_pfvf,<br />+        .vector_mpf = _msix_para->vector_mpf,<br />+    };<br />+    struct zxdh_pci_bar_msg in = {<br />+        .virt_addr = _msix_para->virt_addr,<br />+        .payload_addr = &msix_msg,<br />+        .payload_len = sizeof(msix_msg),<br />+        .emec = 0,<br />+        .src = _msix_para->driver_type,<br />+        .dst = MSG_CHAN_END_RISC,<br />+        .module_id = BAR_MODULE_MISX,<br />+        .src_pcieid = _msix_para->pcie_id,<br />+        .dst_pcieid = 0,<br />+        .usr = 0,<br />+    };<br />+<br />+    struct bar_recv_msg recv_msg = {0};<br />+    struct zxdh_msg_recviver_mem result = {<br />+        .recv_buffer = &recv_msg,<br />+        .buffer_len = sizeof(recv_msg),<br />+    };<br />+<br />+    ret = zxdh_bar_chan_sync_msg_send(&in, &result);<br />+<br />+    if (ret != BAR_MSG_OK)<br />+        return -ret;<br />+<br />+    check_token = recv_msg.msix_reps.check;<br />+    sum_res = bar_get_sum((uint8_t *)&msix_msg, sizeof(msix_msg));<br />+<br />+    if (check_token != sum_res) {<br />+        PMD_MSG_LOG(ERR, "expect token: 0x%x, get token: 0x%x.\n", sum_res, check_token);<br />+        return BAR_MSG_ERR_REPLY;<br />+    }<br />+    *vport = recv_msg.msix_reps.vport;<br />+<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+int zxdh_get_bar_offset(struct bar_offset_params *paras, struct bar_offset_res *res)<br />+{<br />+    uint16_t check_token = 0;<br />+    uint16_t sum_res = 0;<br />+    int ret = 0;<br />+<br />+    if (!paras)<br />+        return BAR_MSG_ERR_NULL;<br />+<br />+    struct offset_get_msg send_msg = {<br />+        .pcie_id = paras->pcie_id,<br />+        .type = paras->type,<br />+    };<br />+    struct zxdh_pci_bar_msg in = {0};<br />+<br />+    in.payload_addr = &send_msg;<br />+    in.payload_len = sizeof(send_msg);<br />+    in.virt_addr = paras->virt_addr;<br />+    in.src = MSG_CHAN_END_PF;<br />+    in.dst = MSG_CHAN_END_RISC;<br />+    in.module_id = BAR_MODULE_OFFSET_GET;<br />+    in.src_pcieid = paras->pcie_id;<br />+<br />+    struct bar_recv_msg recv_msg = {0};<br />+    struct zxdh_msg_recviver_mem result = {<br />+        .recv_buffer = &recv_msg,<br />+        .buffer_len = sizeof(recv_msg),<br />+    };<br />+    ret = zxdh_bar_chan_sync_msg_send(&in, &result);<br />+    if (ret != BAR_MSG_OK)<br />+        return -ret;<br />+<br />+    check_token = recv_msg.offset_reps.check;<br />+    sum_res = bar_get_sum((uint8_t *)&send_msg, sizeof(send_msg));<br />+<br />+    if (check_token != sum_res) {<br />+        PMD_MSG_LOG(ERR, "expect token: 0x%x, get token: 0x%x.\n", sum_res, check_token);<br />+        return BAR_MSG_ERR_REPLY;<br />+    }<br />+    res->bar_offset = recv_msg.offset_reps.offset;<br />+    res->bar_length = recv_msg.offset_reps.length;<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+int zxdh_msg_chan_init(void)<br />+{<br />+    g_dev_stat.dev_cnt++;<br />+    if (g_dev_stat.is_res_init)<br />+        return BAR_MSG_OK;<br />+<br />+    pthread_spin_init(&chan_lock, 0);<br />+    g_seqid_ring.cur_id = 0;<br />+    pthread_spin_init(&g_seqid_ring.lock, 0);<br />+    uint16_t seq_id;<br />+<br />+    for (seq_id = 0; seq_id < BAR_SEQID_NUM_MAX; seq_id++) {<br />+        struct seqid_item *reps_info = &(g_seqid_ring.reps_info_tbl[seq_id]);<br />+<br />+        reps_info->id = seq_id;<br />+        reps_info->flag = REPS_INFO_FLAG_USABLE;<br />+    }<br />+    g_dev_stat.is_res_init = true;<br />+    return BAR_MSG_OK;<br />+}<br />+<br />+int zxdh_bar_msg_chan_exit(void)<br />+{<br />+    if (!g_dev_stat.is_res_init || (--g_dev_stat.dev_cnt > 0))<br />+        return BAR_MSG_OK;<br />+<br />+    g_dev_stat.is_res_init = false;<br />+    PMD_MSG_LOG(DEBUG, "%s exit success!", __func__);<br />+    return BAR_MSG_OK;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h<br />new file mode 100644<br />index 0000000000..07c4a1b1da<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_msg.h<br />@@ -0,0 +1,408 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_MSG_CHAN_H_<br />+#define _ZXDH_MSG_CHAN_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+<br />+#define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET  (ZXDH_CTRLCH_OFFSET + 0x1000)<br />+#define ZXDH_MSIX_INTR_MSG_VEC_BASE  1<br />+#define ZXDH_MSIX_INTR_MSG_VEC_NUM   3<br />+<br />+#define BAR_MSG_POLLING_SPAN     100 /* sleep us */<br />+#define BAR_MSG_POLL_CNT_PER_MS  (1 * 1000 / BAR_MSG_POLLING_SPAN)<br />+#define BAR_MSG_POLL_CNT_PER_S   (1 * 1000 * 1000 / BAR_MSG_POLLING_SPAN)<br />+#define BAR_MSG_TIMEOUT_TH       (10 * 1000 * 1000 / BAR_MSG_POLLING_SPAN) /* 10s */<br />+<br />+#define BAR_CHAN_MSG_SYNC     0<br />+#define BAR_CHAN_MSG_ASYNC    1<br />+#define BAR_CHAN_MSG_NO_EMEC  0<br />+#define BAR_CHAN_MSG_EMEC     1<br />+#define BAR_CHAN_MSG_NO_ACK   0<br />+#define BAR_CHAN_MSG_ACK      1<br />+<br />+#define ZXDH_MSIX_INTR_DTB_VEC      (ZXDH_MSIX_INTR_MSG_VEC_BASE + ZXDH_MSIX_INTR_MSG_VEC_NUM)<br />+#define ZXDH_MSIX_INTR_DTB_VEC_NUM  1<br />+#define ZXDH_INTR_NONQUE_NUM        (ZXDH_MSIX_INTR_MSG_VEC_NUM + ZXDH_MSIX_INTR_DTB_VEC_NUM + 1)<br />+#define ZXDH_QUE_INTR_VEC_BASE      (ZXDH_MSIX_INTR_DTB_VEC + ZXDH_MSIX_INTR_DTB_VEC_NUM) /* 5 */<br />+#define ZXDH_QUE_INTR_VEC_NUM       256<br />+<br />+#define BAR_MSG_ADDR_CHAN_INTERVAL  (2 * 1024) /* channel size */<br />+#define BAR_MSG_PLAYLOAD_OFFSET     (sizeof(struct bar_msg_header))<br />+#define BAR_MSG_PAYLOAD_MAX_LEN     (BAR_MSG_ADDR_CHAN_INTERVAL - sizeof(struct bar_msg_header))<br />+<br />+#define MSG_CHAN_RET_ERR_RECV_FAIL              (-11)<br />+#define ZXDH_INDIR_RQT_SIZE 256<br />+#define MODULE_EEPROM_DATA_LEN 128<br />+<br />+enum BAR_MSG_RTN {<br />+    BAR_MSG_OK = 0,<br />+    BAR_MSG_ERR_MSGID,<br />+    BAR_MSG_ERR_NULL,<br />+    BAR_MSG_ERR_TYPE, /* Message type exception */<br />+    BAR_MSG_ERR_MODULE, /* Module ID exception */<br />+    BAR_MSG_ERR_BODY_NULL, /* Message body exception */<br />+    BAR_MSG_ERR_LEN, /* Message length exception */<br />+    BAR_MSG_ERR_TIME_OUT, /* Message sending length too long */<br />+    BAR_MSG_ERR_NOT_READY, /* Abnormal message sending conditions*/<br />+    BAR_MEG_ERR_NULL_FUNC, /* Empty receive processing function pointer*/<br />+    BAR_MSG_ERR_REPEAT_REGISTER, /* Module duplicate registration*/<br />+    BAR_MSG_ERR_UNGISTER, /* Repeated deregistration*/<br />+    /**<br />+     * The sending interface parameter boundary structure pointer is empty<br />+     */<br />+    BAR_MSG_ERR_NULL_PARA,<br />+    BAR_MSG_ERR_REPSBUFF_LEN, /* The length of reps_buff is too short*/<br />+    /**<br />+     * Unable to find the corresponding message processing function for this module<br />+     */<br />+    BAR_MSG_ERR_MODULE_NOEXIST,<br />+    /**<br />+     * The virtual address in the parameters passed in by the sending interface is empty<br />+     */<br />+    BAR_MSG_ERR_VIRTADDR_NULL,<br />+    BAR_MSG_ERR_REPLY, /* sync msg resp_error */<br />+    BAR_MSG_ERR_MPF_NOT_SCANNED,<br />+    BAR_MSG_ERR_KERNEL_READY,<br />+    BAR_MSG_ERR_USR_RET_ERR,<br />+    BAR_MSG_ERR_ERR_PCIEID,<br />+    BAR_MSG_ERR_SOCKET, /* netlink sockte err */<br />+};<br />+<br />+enum bar_module_id {<br />+    BAR_MODULE_DBG = 0, /* 0:  debug */<br />+    BAR_MODULE_TBL,     /* 1:  resource table */<br />+    BAR_MODULE_MISX,    /* 2:  config msix */<br />+    BAR_MODULE_SDA,     /* 3: */<br />+    BAR_MODULE_RDMA,    /* 4: */<br />+    BAR_MODULE_DEMO,    /* 5:  channel test */<br />+    BAR_MODULE_SMMU,    /* 6: */<br />+    BAR_MODULE_MAC,     /* 7:  mac rx/tx stats */<br />+    BAR_MODULE_VDPA,    /* 8:  vdpa live migration */<br />+    BAR_MODULE_VQM,     /* 9:  vqm live migration */<br />+    BAR_MODULE_NP,      /* 10: vf msg callback np */<br />+    BAR_MODULE_VPORT,   /* 11: get vport */<br />+    BAR_MODULE_BDF,     /* 12: get bdf */<br />+    BAR_MODULE_RISC_READY, /* 13: */<br />+    BAR_MODULE_REVERSE,    /* 14: byte stream reverse */<br />+    BAR_MDOULE_NVME,       /* 15: */<br />+    BAR_MDOULE_NPSDK,      /* 16: */<br />+    BAR_MODULE_NP_TODO,    /* 17: */<br />+    MODULE_BAR_MSG_TO_PF,  /* 18: */<br />+    MODULE_BAR_MSG_TO_VF,  /* 19: */<br />+<br />+    MODULE_FLASH = 32,<br />+    BAR_MODULE_OFFSET_GET = 33,<br />+    BAR_EVENT_OVS_WITH_VCB = 36, /* ovs<-->vcb */<br />+<br />+    BAR_MSG_MODULE_NUM = 100,<br />+};<br />+<br />+static inline const char *module_id_name(int val)<br />+{<br />+    switch (val) {<br />+    case BAR_MODULE_DBG:        return "BAR_MODULE_DBG";<br />+    case BAR_MODULE_TBL:        return "BAR_MODULE_TBL";<br />+    case BAR_MODULE_MISX:       return "BAR_MODULE_MISX";<br />+    case BAR_MODULE_SDA:        return "BAR_MODULE_SDA";<br />+    case BAR_MODULE_RDMA:       return "BAR_MODULE_RDMA";<br />+    case BAR_MODULE_DEMO:       return "BAR_MODULE_DEMO";<br />+    case BAR_MODULE_SMMU:       return "BAR_MODULE_SMMU";<br />+    case BAR_MODULE_MAC:        return "BAR_MODULE_MAC";<br />+    case BAR_MODULE_VDPA:       return "BAR_MODULE_VDPA";<br />+    case BAR_MODULE_VQM:        return "BAR_MODULE_VQM";<br />+    case BAR_MODULE_NP:         return "BAR_MODULE_NP";<br />+    case BAR_MODULE_VPORT:      return "BAR_MODULE_VPORT";<br />+    case BAR_MODULE_BDF:        return "BAR_MODULE_BDF";<br />+    case BAR_MODULE_RISC_READY: return "BAR_MODULE_RISC_READY";<br />+    case BAR_MODULE_REVERSE:    return "BAR_MODULE_REVERSE";<br />+    case BAR_MDOULE_NVME:       return "BAR_MDOULE_NVME";<br />+    case BAR_MDOULE_NPSDK:      return "BAR_MDOULE_NPSDK";<br />+    case BAR_MODULE_NP_TODO:    return "BAR_MODULE_NP_TODO";<br />+    case MODULE_BAR_MSG_TO_PF:  return "MODULE_BAR_MSG_TO_PF";<br />+    case MODULE_BAR_MSG_TO_VF:  return "MODULE_BAR_MSG_TO_VF";<br />+    case MODULE_FLASH:          return "MODULE_FLASH";<br />+    case BAR_MODULE_OFFSET_GET: return "BAR_MODULE_OFFSET_GET";<br />+    case BAR_EVENT_OVS_WITH_VCB: return "BAR_EVENT_OVS_WITH_VCB";<br />+    default: return "NA";<br />+    }<br />+}<br />+<br />+struct bar_msg_header {<br />+    uint8_t valid : 1; /* used by __bar_chan_msg_valid_set/get */<br />+    uint8_t sync  : 1;<br />+    uint8_t emec  : 1; /* emergency? */<br />+    uint8_t ack   : 1; /* ack msg? */<br />+    uint8_t poll  : 1;<br />+    uint8_t usr   : 1;<br />+    uint8_t rsv;<br />+    uint16_t module_id;<br />+    uint16_t len;<br />+    uint16_t msg_id;<br />+    uint16_t src_pcieid;<br />+    uint16_t dst_pcieid; /* used in PF-->VF */<br />+}; /* 12B */<br />+<br />+struct zxdh_pci_bar_msg {<br />+    uint64_t virt_addr; /* bar addr */<br />+    void    *payload_addr;<br />+    uint16_t payload_len;<br />+    uint16_t emec;<br />+    uint16_t src; /* refer to BAR_DRIVER_TYPE */<br />+    uint16_t dst; /* refer to BAR_DRIVER_TYPE */<br />+    uint16_t module_id;<br />+    uint16_t src_pcieid;<br />+    uint16_t dst_pcieid;<br />+    uint16_t usr;<br />+}; /* 32B */<br />+<br />+struct zxdh_msg_recviver_mem {<br />+    void    *recv_buffer; /* first 4B is head, followed by payload */<br />+    uint64_t buffer_len;<br />+}; /* 16B */<br />+<br />+struct msix_msg {<br />+    uint16_t pcie_id;<br />+    uint16_t vector_risc;<br />+    uint16_t vector_pfvf;<br />+    uint16_t vector_mpf;<br />+};<br />+/* private reps struct */<br />+struct bar_msix_reps {<br />+    uint16_t pcie_id;<br />+    uint16_t check;<br />+    uint16_t vport;<br />+    uint16_t rsv;<br />+} __rte_packed; /* 8B */<br />+<br />+struct bar_offset_reps {<br />+    uint16_t check;<br />+    uint16_t rsv;<br />+    uint32_t offset;<br />+    uint32_t length;<br />+} __rte_packed; /* 12B */<br />+<br />+struct bar_recv_msg {<br />+    /* fix 4B */<br />+    uint8_t  reps_ok;<br />+    uint16_t reps_len;<br />+    uint8_t  rsv;<br />+    union {<br />+        struct bar_msix_reps   msix_reps;   /* 8B */<br />+        struct bar_offset_reps offset_reps; /* 12B */<br />+    } __rte_packed;<br />+} __rte_packed;<br />+<br />+enum pciebar_layout_type {<br />+    URI_VQM      = 0,<br />+    URI_SPINLOCK = 1,<br />+    URI_FWCAP    = 2,<br />+    URI_FWSHR    = 3,<br />+    URI_DRS_SEC  = 4,<br />+    URI_RSV      = 5,<br />+    URI_CTRLCH   = 6,<br />+    URI_1588     = 7,<br />+    URI_QBV      = 8,<br />+    URI_MACPCS   = 9,<br />+    URI_RDMA     = 10,<br />+/* DEBUG PF */<br />+    URI_MNP      = 11,<br />+    URI_MSPM     = 12,<br />+    URI_MVQM     = 13,<br />+    URI_MDPI     = 14,<br />+    URI_NP       = 15,<br />+/* END DEBUG PF */<br />+    URI_MAX,<br />+};<br />+<br />+enum RES_TBL_FILED {<br />+    TBL_FIELD_PCIEID     = 0,<br />+    TBL_FIELD_BDF        = 1,<br />+    TBL_FIELD_MSGCH      = 2,<br />+    TBL_FIELD_DATACH     = 3,<br />+    TBL_FIELD_VPORT      = 4,<br />+    TBL_FIELD_PNLID      = 5,<br />+    TBL_FIELD_PHYPORT    = 6,<br />+    TBL_FIELD_SERDES_NUM = 7,<br />+    TBL_FIELD_NP_PORT    = 8,<br />+    TBL_FIELD_SPEED      = 9,<br />+    TBL_FIELD_HASHID     = 10,<br />+    TBL_FIELD_NON,<br />+};<br />+<br />+struct tbl_msg_header {<br />+    uint8_t  type;  /* r/w */<br />+    uint8_t  field; /* which table? */<br />+    uint16_t pcieid;<br />+    uint16_t slen;<br />+    uint16_t rsv;<br />+}; /* 8B */<br />+struct tbl_msg_reps_header {<br />+    uint8_t  check;<br />+    uint8_t  rsv;<br />+    uint16_t len;<br />+}; /* 4B */<br />+<br />+enum TBL_MSG_TYPE {<br />+    TBL_TYPE_READ,<br />+    TBL_TYPE_WRITE,<br />+    TBL_TYPE_NON,<br />+};<br />+<br />+struct bar_offset_params {<br />+    uint64_t virt_addr;  /* Bar space control space virtual address */<br />+    uint16_t pcie_id;<br />+    uint16_t type;  /* Module types corresponding to PCIBAR planning */<br />+};<br />+struct bar_offset_res {<br />+    uint32_t bar_offset;<br />+    uint32_t bar_length;<br />+};<br />+<br />+/* vec0  : dev  interrupt<br />+ * vec1~3: risc interrupt<br />+ * vec4  : dtb  interrupt<br />+ */<br />+enum {<br />+    MSIX_FROM_PFVF = ZXDH_MSIX_INTR_MSG_VEC_BASE, /* 1 */<br />+    MSIX_FROM_MPF,   /* 2 */<br />+    MSIX_FROM_RISCV, /* 3 */<br />+    MSG_VEC_NUM      /* 4 */<br />+};<br />+<br />+enum DRIVER_TYPE {<br />+    MSG_CHAN_END_MPF = 0,<br />+    MSG_CHAN_END_PF,<br />+    MSG_CHAN_END_VF,<br />+    MSG_CHAN_END_RISC,<br />+};<br />+<br />+enum MSG_TYPE {<br />+    /* loopback test type */<br />+    TYPE_DEBUG = 0,<br />+    DST_RISCV,<br />+    DST_MPF,<br />+    DST_PF_OR_VF,<br />+    DST_ZF,<br />+    MSG_TYPE_NUM,<br />+};<br />+<br />+struct msg_header {<br />+    bool is_async;<br />+    enum MSG_TYPE msg_type;<br />+    enum bar_module_id msg_module_id;<br />+    uint8_t msg_priority;<br />+    uint16_t vport_dst;<br />+    uint16_t qid_dst;<br />+};<br />+<br />+struct zxdh_res_para {<br />+    uint64_t virt_addr;<br />+    uint16_t pcie_id;<br />+    uint16_t src_type; /* refer to BAR_DRIVER_TYPE */<br />+};<br />+<br />+struct msix_para {<br />+    uint16_t pcie_id;<br />+    uint16_t vector_risc;<br />+    uint16_t vector_pfvf;<br />+    uint16_t vector_mpf;<br />+    uint64_t virt_addr;<br />+    uint16_t driver_type; /* refer to DRIVER_TYPE */<br />+};<br />+<br />+struct offset_get_msg {<br />+    uint16_t pcie_id;<br />+    uint16_t type;<br />+}; /* 4B */<br />+<br />+typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len, void *reps_buffer,<br />+                    uint16_t *reps_len, void *dev);<br />+<br />+/**<br />+ * Init msg_chan_pkt in probe()<br />+ * @return zero for success, negative for failure<br />+ */<br />+int16_t zxdh_msg_chan_pkt_init(void);<br />+void zxdh_msg_chan_pkt_remove(void); /* Remove msg_chan_pkt in probe() */<br />+<br />+/**<br />+ * Get the offset value of the specified module<br />+ * @bar_offset_params:  input parameter<br />+ * @bar_offset_res: Module offset and length<br />+ */<br />+int zxdh_get_bar_offset(struct bar_offset_params *paras, struct bar_offset_res *res);<br />+<br />+typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len, void *reps_buffer,<br />+                    uint16_t *reps_len, void *dev);<br />+<br />+/**<br />+ * Send synchronization messages through PCIE BAR space<br />+ * @in: Message sending information<br />+ * @result: Message result feedback<br />+ * @return: 0 successful, other failures<br />+ */<br />+int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in, struct zxdh_msg_recviver_mem *result);<br />+<br />+/**<br />+ * PCIE BAR spatial message method, registering message reception callback<br />+ * @module_id: Registration module ID<br />+ * @callback: Pointer to the receive processing function implemented by the module<br />+ * @return: 0 successful, other failures<br />+ * Usually called during driver initialization<br />+ */<br />+int zxdh_bar_chan_msg_recv_register(uint8_t module_id, zxdh_bar_chan_msg_recv_callback callback);<br />+<br />+/**<br />+ * PCIE BAR spatial message method, unregistered message receiving callback<br />+ * @module_id: Kernel PCIE device address<br />+ * @return: 0 successful, other failures<br />+ * Called during driver uninstallation<br />+ */<br />+int zxdh_bar_chan_msg_recv_unregister(uint8_t module_id);<br />+<br />+/**<br />+ * Provide a message receiving interface for device driver interrupt handling functions<br />+ * @src:  Driver type for sending interrupts<br />+ * @dst:  Device driver's own driver type<br />+ * @virt_addr: The communication bar address of the device<br />+ * @return: 0 successful, other failures<br />+ */<br />+int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev);<br />+<br />+/**<br />+ * Initialize spilock and clear the hardware lock address it belongs to<br />+ * @pcie_id: PCIE_id of PF device<br />+ * @bar_base_addr: Bar0 initial base address<br />+ */<br />+int bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr);<br />+<br />+int zxdh_bar_chan_enable(struct msix_para *_msix_para, uint16_t *vport);<br />+int zxdh_msg_chan_init(void);<br />+int zxdh_bar_msg_chan_exit(void);<br />+<br />+int zxdh_get_res_panel_id(struct zxdh_res_para *in, uint8_t *panel_id);<br />+int zxdh_get_res_hash_id(struct zxdh_res_para *in, uint8_t *hash_id);<br />+<br />+int pf_recv_bar_msg(void *pay_load __rte_unused,<br />+                    uint16_t len __rte_unused,<br />+                    void *reps_buffer __rte_unused,<br />+                    uint16_t *reps_len __rte_unused,<br />+                    void *eth_dev __rte_unused);<br />+int vf_recv_bar_msg(void *pay_load __rte_unused,<br />+                    uint16_t len __rte_unused,<br />+                    void *reps_buffer __rte_unused,<br />+                    uint16_t *reps_len __rte_unused,<br />+                    void *eth_dev __rte_unused);<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif /* _ZXDH_MSG_CHAN_H_  */<br />diff --git a/drivers/net/zxdh/zxdh_npsdk.c b/drivers/net/zxdh/zxdh_npsdk.c<br />new file mode 100644<br />index 0000000000..eec644b01e<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_npsdk.c<br />@@ -0,0 +1,158 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <rte_common.h> <br />+#include "zxdh_npsdk.h" <br />+<br />+int dpp_dtb_hash_offline_delete(uint32_t dev_id __rte_unused,<br />+                                uint32_t queue_id __rte_unused,<br />+                                uint32_t sdt_no __rte_unused,<br />+                                uint32_t flush_mode __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_dtb_hash_online_delete(uint32_t dev_id __rte_unused,<br />+                               uint32_t queue_id __rte_unused,<br />+                               uint32_t sdt_no __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_apt_hash_res_get(uint32_t type __rte_unused,<br />+                DPP_APT_HASH_RES_INIT_T *HashResInit __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_apt_eram_res_get(uint32_t type __rte_unused,<br />+                DPP_APT_ERAM_RES_INIT_T *EramResInit __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_apt_stat_res_get(uint32_t type __rte_unused,<br />+                DPP_APT_STAT_RES_INIT_T *StatResInit __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_apt_hash_global_res_init(uint32_t dev_id __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_apt_hash_func_res_init(uint32_t dev_id __rte_unused,<br />+                    uint32_t func_num __rte_unused,<br />+                    DPP_APT_HASH_FUNC_RES_T *HashFuncRes __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_apt_hash_bulk_res_init(uint32_t dev_id __rte_unused,<br />+                    uint32_t bulk_num __rte_unused,<br />+                    DPP_APT_HASH_BULK_RES_T *BulkRes __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_apt_hash_tbl_res_init(uint32_t dev_id __rte_unused,<br />+                    uint32_t tbl_num __rte_unused,<br />+                    DPP_APT_HASH_TABLE_T *HashTbl __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_apt_eram_res_init(uint32_t dev_id __rte_unused,<br />+                uint32_t tbl_num __rte_unused,<br />+                DPP_APT_ERAM_TABLE_T *EramTbl __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_stat_ppu_eram_baddr_set(uint32_t dev_id __rte_unused,<br />+                    uint32_t ppu_eram_baddr __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+int dpp_stat_ppu_eram_depth_set(uint32_t dev_id __rte_unused,<br />+                    uint32_t ppu_eram_depth __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+int dpp_se_cmmu_smmu1_cfg_set(uint32_t dev_id __rte_unused,<br />+                    uint32_t base_addr __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+int dpp_stat_ppu_ddr_baddr_set(uint32_t dev_id __rte_unused,<br />+                    uint32_t ppu_ddr_baddr __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_host_np_init(uint32_t dev_id __rte_unused,<br />+            DPP_DEV_INIT_CTRL_T *p_dev_init_ctrl __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+int dpp_np_online_uninstall(uint32_t dev_id __rte_unused,<br />+            char *port_name __rte_unused,<br />+            uint32_t queue_id __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_dtb_stat_ppu_cnt_get(uint32_t dev_id __rte_unused,<br />+             uint32_t queue_id __rte_unused,<br />+             STAT_CNT_MODE_E rd_mode __rte_unused,<br />+             uint32_t index __rte_unused,<br />+             uint32_t *p_data __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+int dpp_dtb_entry_get(uint32_t dev_id __rte_unused,<br />+         uint32_t queue_id __rte_unused,<br />+         DPP_DTB_USER_ENTRY_T *GetEntry __rte_unused,<br />+         uint32_t srh_mode __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+int dpp_dtb_table_entry_write(uint32_t dev_id __rte_unused,<br />+            uint32_t queue_id __rte_unused,<br />+            uint32_t entryNum __rte_unused,<br />+            DPP_DTB_USER_ENTRY_T *DownEntrys __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+int dpp_dtb_table_entry_delete(uint32_t dev_id __rte_unused,<br />+             uint32_t queue_id __rte_unused,<br />+             uint32_t entryNum __rte_unused,<br />+             DPP_DTB_USER_ENTRY_T *DeleteEntrys __rte_unused)<br />+{<br />+    /* todo provided later */<br />+    return 0;<br />+}<br />+<br />+<br />diff --git a/drivers/net/zxdh/zxdh_npsdk.h b/drivers/net/zxdh/zxdh_npsdk.h<br />new file mode 100644<br />index 0000000000..265f79d132<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_npsdk.h<br />@@ -0,0 +1,216 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+<br />+#define DPP_PORT_NAME_MAX                   (32)<br />+#define DPP_SMMU1_READ_REG_MAX_NUM          (16)<br />+#define DPP_DIR_TBL_BUF_MAX_NUM             (DPP_SMMU1_READ_REG_MAX_NUM)<br />+#define DPP_ETCAM_BLOCK_NUM                 (8)<br />+#define DPP_SMMU0_LPM_AS_TBL_ID_NUM         (8)<br />+#define SE_NIC_RES_TYPE                      0<br />+<br />+#define ZXDH_SDT_VPORT_ATT_TABLE            ((uint32_t)(1))<br />+#define ZXDH_SDT_PANEL_ATT_TABLE            ((uint32_t)(2))<br />+#define ZXDH_SDT_RSS_ATT_TABLE              ((uint32_t)(3))<br />+#define ZXDH_SDT_VLAN_ATT_TABLE             ((uint32_t)(4))<br />+#define ZXDH_SDT_BROCAST_ATT_TABLE          ((uint32_t)(6))<br />+#define ZXDH_SDT_UNICAST_ATT_TABLE          ((uint32_t)(10))<br />+#define ZXDH_SDT_MULTICAST_ATT_TABLE        ((uint32_t)(11))<br />+<br />+#define ZXDH_SDT_L2_ENTRY_TABLE0            ((uint32_t)(64))<br />+#define ZXDH_SDT_L2_ENTRY_TABLE1            ((uint32_t)(65))<br />+#define ZXDH_SDT_L2_ENTRY_TABLE2            ((uint32_t)(66))<br />+#define ZXDH_SDT_L2_ENTRY_TABLE3            ((uint32_t)(67))<br />+#define ZXDH_SDT_L2_ENTRY_TABLE4            ((uint32_t)(68))<br />+#define ZXDH_SDT_L2_ENTRY_TABLE5            ((uint32_t)(69))<br />+<br />+#define ZXDH_SDT_MC_TABLE0                  ((uint32_t)(76))<br />+#define ZXDH_SDT_MC_TABLE1                  ((uint32_t)(77))<br />+#define ZXDH_SDT_MC_TABLE2                  ((uint32_t)(78))<br />+#define ZXDH_SDT_MC_TABLE3                  ((uint32_t)(79))<br />+#define ZXDH_SDT_MC_TABLE4                  ((uint32_t)(80))<br />+#define ZXDH_SDT_MC_TABLE5                  ((uint32_t)(81))<br />+<br />+#define MK_SDT_NO(table, hash_idx) \<br />+    (ZXDH_SDT_##table##_TABLE0 + hash_idx)<br />+<br />+typedef struct dpp_dtb_addr_info_t {<br />+    uint32_t sdt_no;<br />+    uint32_t size;<br />+    uint32_t phy_addr;<br />+    uint32_t vir_addr;<br />+} DPP_DTB_ADDR_INFO_T;<br />+<br />+typedef struct dpp_dev_init_ctrl_t {<br />+    uint32_t vport;<br />+    char  port_name[DPP_PORT_NAME_MAX];<br />+    uint32_t vector;<br />+    uint32_t queue_id;<br />+    uint32_t np_bar_offset;<br />+    uint32_t np_bar_len;<br />+    uint32_t pcie_vir_addr;<br />+    uint32_t down_phy_addr;<br />+    uint32_t down_vir_addr;<br />+    uint32_t dump_phy_addr;<br />+    uint32_t dump_vir_addr;<br />+    uint32_t dump_sdt_num;<br />+    DPP_DTB_ADDR_INFO_T dump_addr_info[];<br />+} DPP_DEV_INIT_CTRL_T;<br />+<br />+typedef struct dpp_apt_hash_func_res_t {<br />+    uint32_t func_id;<br />+    uint32_t zblk_num;<br />+    uint32_t zblk_bitmap;<br />+    uint32_t ddr_dis;<br />+} DPP_APT_HASH_FUNC_RES_T;<br />+<br />+typedef enum dpp_hash_ddr_width_mode {<br />+    DDR_WIDTH_INVALID = 0,<br />+    DDR_WIDTH_256b,<br />+    DDR_WIDTH_512b,<br />+} DPP_HASH_DDR_WIDTH_MODE;<br />+<br />+typedef struct dpp_apt_hash_bulk_res_t {<br />+    uint32_t func_id;<br />+    uint32_t bulk_id;<br />+    uint32_t zcell_num;<br />+    uint32_t zreg_num;<br />+    uint32_t ddr_baddr;<br />+    uint32_t ddr_item_num;<br />+    DPP_HASH_DDR_WIDTH_MODE ddr_width_mode;<br />+    uint32_t ddr_crc_sel;<br />+    uint32_t ddr_ecc_en;<br />+} DPP_APT_HASH_BULK_RES_T;<br />+<br />+<br />+typedef struct dpp_sdt_tbl_hash_t {<br />+    uint32_t table_type;<br />+    uint32_t hash_id;<br />+    uint32_t hash_table_width;<br />+    uint32_t key_size;<br />+    uint32_t hash_table_id;<br />+    uint32_t learn_en;<br />+    uint32_t keep_alive;<br />+    uint32_t keep_alive_baddr;<br />+    uint32_t rsp_mode;<br />+    uint32_t hash_clutch_en;<br />+} DPP_SDTTBL_HASH_T;<br />+<br />+typedef struct dpp_hash_entry {<br />+    uint8_t *p_key;<br />+    uint8_t *p_rst;<br />+} DPP_HASH_ENTRY;<br />+<br />+<br />+typedef uint32_t (*DPP_APT_HASH_ENTRY_SET_FUNC)(void *Data, DPP_HASH_ENTRY *Entry);<br />+typedef uint32_t (*DPP_APT_HASH_ENTRY_GET_FUNC)(void *Data, DPP_HASH_ENTRY *Entry);<br />+<br />+typedef struct dpp_apt_hash_table_t {<br />+    uint32_t sdtNo;<br />+    uint32_t sdt_partner;<br />+    DPP_SDTTBL_HASH_T hashSdt;<br />+    uint32_t tbl_flag;<br />+    DPP_APT_HASH_ENTRY_SET_FUNC hash_set_func;<br />+    DPP_APT_HASH_ENTRY_GET_FUNC hash_get_func;<br />+} DPP_APT_HASH_TABLE_T;<br />+<br />+typedef struct dpp_apt_hash_res_init_t {<br />+    uint32_t func_num;<br />+    uint32_t bulk_num;<br />+    uint32_t tbl_num;<br />+    DPP_APT_HASH_FUNC_RES_T *func_res;<br />+    DPP_APT_HASH_BULK_RES_T *bulk_res;<br />+    DPP_APT_HASH_TABLE_T  *tbl_res;<br />+} DPP_APT_HASH_RES_INIT_T;<br />+<br />+typedef struct dpp_sdt_tbl_eram_t {<br />+    uint32_t table_type;<br />+    uint32_t eram_mode;<br />+    uint32_t eram_base_addr;<br />+    uint32_t eram_table_depth;<br />+    uint32_t eram_clutch_en;<br />+} DPP_SDTTBL_ERAM_T;<br />+<br />+typedef uint32_t (*DPP_APT_ERAM_SET_FUNC)(void *Data, uint32_t buf[4]);<br />+typedef uint32_t (*DPP_APT_ERAM_GET_FUNC)(void *Data, uint32_t buf[4]);<br />+<br />+typedef struct dpp_apt_eram_table_t {<br />+    uint32_t sdtNo;<br />+    DPP_SDTTBL_ERAM_T ERamSdt;<br />+    uint32_t opr_mode;<br />+    uint32_t rd_mode;<br />+    DPP_APT_ERAM_SET_FUNC  eram_set_func;<br />+    DPP_APT_ERAM_GET_FUNC  eram_get_func;<br />+} DPP_APT_ERAM_TABLE_T;<br />+<br />+<br />+typedef struct dpp_apt_eram_res_init_t {<br />+    uint32_t tbl_num;<br />+    DPP_APT_ERAM_TABLE_T *eram_res;<br />+} DPP_APT_ERAM_RES_INIT_T;<br />+<br />+typedef struct dpp_apt_stat_res_init_t {<br />+    uint32_t eram_baddr;<br />+    uint32_t eram_depth;<br />+    uint32_t ddr_baddr;<br />+    uint32_t ppu_ddr_offset;<br />+} DPP_APT_STAT_RES_INIT_T;<br />+<br />+typedef enum stat_cnt_mode_e {<br />+    STAT_64_MODE  = 0,<br />+    STAT_128_MODE = 1,<br />+    STAT_MAX_MODE,<br />+} STAT_CNT_MODE_E;<br />+<br />+typedef struct dpp_dtb_user_entry_t {<br />+    uint32_t sdt_no;<br />+    void *p_entry_data;<br />+} DPP_DTB_USER_ENTRY_T;<br />+<br />+<br />+int dpp_dtb_hash_offline_delete(uint32_t dev_id, uint32_t queue_id,<br />+                        uint32_t sdt_no, uint32_t flush_mode);<br />+int dpp_dtb_hash_online_delete(uint32_t dev_id, uint32_t queue_id, uint32_t sdt_no);<br />+int dpp_apt_hash_res_get(uint32_t type, DPP_APT_HASH_RES_INIT_T *HashResInit);<br />+int dpp_apt_eram_res_get(uint32_t type, DPP_APT_ERAM_RES_INIT_T *EramResInit);<br />+<br />+int dpp_apt_stat_res_get(uint32_t type, DPP_APT_STAT_RES_INIT_T *StatResInit);<br />+int dpp_apt_hash_global_res_init(uint32_t dev_id);<br />+int dpp_apt_hash_func_res_init(uint32_t dev_id, uint32_t func_num,<br />+                               DPP_APT_HASH_FUNC_RES_T *HashFuncRes);<br />+int dpp_apt_hash_bulk_res_init(uint32_t dev_id, uint32_t bulk_num,<br />+                               DPP_APT_HASH_BULK_RES_T *BulkRes);<br />+int dpp_apt_hash_tbl_res_init(uint32_t dev_id, uint32_t tbl_num,<br />+                               DPP_APT_HASH_TABLE_T *HashTbl);<br />+int dpp_apt_eram_res_init(uint32_t dev_id, uint32_t tbl_num,<br />+                          DPP_APT_ERAM_TABLE_T *EramTbl);<br />+int dpp_stat_ppu_eram_baddr_set(uint32_t dev_id, uint32_t ppu_eram_baddr);<br />+int dpp_stat_ppu_eram_depth_set(uint32_t dev_id, uint32_t ppu_eram_depth);<br />+int dpp_se_cmmu_smmu1_cfg_set(uint32_t dev_id, uint32_t base_addr);<br />+int dpp_stat_ppu_ddr_baddr_set(uint32_t dev_id, uint32_t ppu_ddr_baddr);<br />+<br />+int dpp_host_np_init(uint32_t dev_id, DPP_DEV_INIT_CTRL_T *p_dev_init_ctrl);<br />+int dpp_np_online_uninstall(uint32_t dev_id,<br />+                            char *port_name,<br />+                            uint32_t queue_id);<br />+<br />+int dpp_dtb_stat_ppu_cnt_get(uint32_t dev_id,<br />+                            uint32_t queue_id,<br />+                            STAT_CNT_MODE_E rd_mode,<br />+                            uint32_t index,<br />+                            uint32_t *p_data);<br />+<br />+int dpp_dtb_entry_get(uint32_t dev_id,<br />+                    uint32_t queue_id,<br />+                    DPP_DTB_USER_ENTRY_T *GetEntry,<br />+                    uint32_t srh_mode);<br />+int dpp_dtb_table_entry_write(uint32_t dev_id,<br />+                            uint32_t queue_id,<br />+                            uint32_t entryNum,<br />+                            DPP_DTB_USER_ENTRY_T *DownEntrys);<br />+int dpp_dtb_table_entry_delete(uint32_t dev_id,<br />+                            uint32_t queue_id,<br />+                            uint32_t entryNum,<br />+                            DPP_DTB_USER_ENTRY_T *DeleteEntrys);<br />diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c<br />new file mode 100644<br />index 0000000000..b32c2e7955<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_pci.c<br />@@ -0,0 +1,462 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+#include <unistd.h> <br />+<br />+#ifdef RTE_EXEC_ENV_LINUX<br />+ #include <dirent.h> <br />+ #include <fcntl.h> <br />+#endif<br />+<br />+#include <rte_io.h> <br />+#include <rte_bus.h> <br />+#include <rte_common.h> <br />+<br />+#include "zxdh_pci.h" <br />+#include "zxdh_logs.h" <br />+#include "zxdh_queue.h" <br />+<br />+/*<br />+ * Following macros are derived from linux/pci_regs.h, however,<br />+ * we can't simply include that header here, as there is no such<br />+ * file for non-Linux platform.<br />+ */<br />+#define PCI_CAPABILITY_LIST             0x34<br />+#define PCI_CAP_ID_VNDR                 0x09<br />+#define PCI_CAP_ID_MSIX                 0x11<br />+<br />+/*<br />+ * The remaining space is defined by each driver as the per-driver<br />+ * configuration space.<br />+ */<br />+#define ZXDH_PCI_CONFIG(hw)  (((hw)->use_msix == ZXDH_MSIX_ENABLED) ? 24 : 20)<br />+#define PCI_MSIX_ENABLE 0x8000<br />+<br />+static inline int32_t check_vq_phys_addr_ok(struct virtqueue *vq)<br />+{<br />+    /**<br />+     * Virtio PCI device ZXDH_PCI_QUEUE_PF register is 32bit,<br />+     * and only accepts 32 bit page frame number.<br />+     * Check if the allocated physical memory exceeds 16TB.<br />+     */<br />+    if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> (ZXDH_PCI_QUEUE_ADDR_SHIFT + 32)) {<br />+        PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");<br />+        return 0;<br />+    }<br />+    return 1;<br />+}<br />+static inline void io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)<br />+{<br />+    rte_write32(val & ((1ULL << 32) - 1), lo);<br />+    rte_write32(val >> 32, hi);<br />+}<br />+<br />+static void modern_read_dev_config(struct zxdh_hw *hw,<br />+                                   size_t offset,<br />+                                   void *dst,<br />+                                   int32_t length)<br />+{<br />+    int32_t i       = 0;<br />+    uint8_t *p      = NULL;<br />+    uint8_t old_gen = 0;<br />+    uint8_t new_gen = 0;<br />+<br />+    do {<br />+        old_gen = rte_read8(&hw->common_cfg->config_generation);<br />+<br />+        p = dst;<br />+        for (i = 0;  i < length; i++)<br />+            *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);<br />+<br />+        new_gen = rte_read8(&hw->common_cfg->config_generation);<br />+    } while (old_gen != new_gen);<br />+}<br />+<br />+static void modern_write_dev_config(struct zxdh_hw *hw,<br />+                                    size_t offset,<br />+                                    const void *src,<br />+                                    int32_t length)<br />+{<br />+    int32_t i = 0;<br />+    const uint8_t *p = src;<br />+<br />+    for (i = 0;  i < length; i++)<br />+        rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));<br />+}<br />+<br />+static uint64_t modern_get_features(struct zxdh_hw *hw)<br />+{<br />+    uint32_t features_lo = 0;<br />+    uint32_t features_hi = 0;<br />+<br />+    rte_write32(0, &hw->common_cfg->device_feature_select);<br />+    features_lo = rte_read32(&hw->common_cfg->device_feature);<br />+<br />+    rte_write32(1, &hw->common_cfg->device_feature_select);<br />+    features_hi = rte_read32(&hw->common_cfg->device_feature);<br />+<br />+    return ((uint64_t)features_hi << 32) | features_lo;<br />+}<br />+<br />+static void modern_set_features(struct zxdh_hw *hw, uint64_t features)<br />+{<br />+    rte_write32(0, &hw->common_cfg->guest_feature_select);<br />+    rte_write32(features & ((1ULL << 32) - 1), &hw->common_cfg->guest_feature);<br />+    rte_write32(1, &hw->common_cfg->guest_feature_select);<br />+    rte_write32(features >> 32, &hw->common_cfg->guest_feature);<br />+}<br />+<br />+static uint8_t modern_get_status(struct zxdh_hw *hw)<br />+{<br />+    return rte_read8(&hw->common_cfg->device_status);<br />+}<br />+<br />+static void modern_set_status(struct zxdh_hw *hw, uint8_t status)<br />+{<br />+    rte_write8(status, &hw->common_cfg->device_status);<br />+}<br />+<br />+static uint8_t modern_get_isr(struct zxdh_hw *hw)<br />+{<br />+    return rte_read8(hw->isr);<br />+}<br />+<br />+static uint16_t modern_set_config_irq(struct zxdh_hw *hw, uint16_t vec)<br />+{<br />+    rte_write16(vec, &hw->common_cfg->msix_config);<br />+    return rte_read16(&hw->common_cfg->msix_config);<br />+}<br />+<br />+static uint16_t modern_set_queue_irq(struct zxdh_hw *hw, struct virtqueue *vq, uint16_t vec)<br />+{<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+    rte_write16(vec, &hw->common_cfg->queue_msix_vector);<br />+    return rte_read16(&hw->common_cfg->queue_msix_vector);<br />+}<br />+<br />+static uint16_t modern_get_queue_num(struct zxdh_hw *hw, uint16_t queue_id)<br />+{<br />+    rte_write16(queue_id, &hw->common_cfg->queue_select);<br />+    return rte_read16(&hw->common_cfg->queue_size);<br />+}<br />+<br />+static void modern_set_queue_num(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size)<br />+{<br />+    rte_write16(queue_id, &hw->common_cfg->queue_select);<br />+    rte_write16(vq_size, &hw->common_cfg->queue_size);<br />+}<br />+<br />+static int32_t modern_setup_queue(struct zxdh_hw *hw, struct virtqueue *vq)<br />+{<br />+    uint64_t desc_addr  = 0;<br />+    uint64_t avail_addr = 0;<br />+    uint64_t used_addr  = 0;<br />+    uint16_t notify_off = 0;<br />+<br />+    if (!check_vq_phys_addr_ok(vq))<br />+        return -1;<br />+<br />+    desc_addr = vq->vq_ring_mem;<br />+    avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);<br />+    if (vtpci_packed_queue(vq->hw)) {<br />+        used_addr = RTE_ALIGN_CEIL((avail_addr + sizeof(struct vring_packed_desc_event)),<br />+                            ZXDH_PCI_VRING_ALIGN);<br />+    } else {<br />+        used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,<br />+                        ring[vq->vq_nentries]), ZXDH_PCI_VRING_ALIGN);<br />+    }<br />+<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+<br />+    io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,<br />+                       &hw->common_cfg->queue_desc_hi);<br />+    io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,<br />+                       &hw->common_cfg->queue_avail_hi);<br />+    io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,<br />+                       &hw->common_cfg->queue_used_hi);<br />+<br />+    notify_off = rte_read16(&hw->common_cfg->queue_notify_off); /* default 0 */<br />+    notify_off = 0;<br />+    vq->notify_addr = (void *)((uint8_t *)hw->notify_base +<br />+            notify_off * hw->notify_off_multiplier);<br />+<br />+    rte_write16(1, &hw->common_cfg->queue_enable);<br />+<br />+    return 0;<br />+}<br />+<br />+static void modern_del_queue(struct zxdh_hw *hw, struct virtqueue *vq)<br />+{<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+<br />+    io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,<br />+                       &hw->common_cfg->queue_desc_hi);<br />+    io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,<br />+                       &hw->common_cfg->queue_avail_hi);<br />+    io_write64_twopart(0, &hw->common_cfg->queue_used_lo,<br />+                       &hw->common_cfg->queue_used_hi);<br />+<br />+    rte_write16(0, &hw->common_cfg->queue_enable);<br />+}<br />+<br />+static void modern_notify_queue(struct zxdh_hw *hw, struct virtqueue *vq)<br />+{<br />+    uint32_t notify_data = 0;<br />+<br />+    if (!vtpci_with_feature(hw, ZXDH_F_NOTIFICATION_DATA)) {<br />+        rte_write16(vq->vq_queue_index, vq->notify_addr);<br />+        return;<br />+    }<br />+<br />+    if (vtpci_with_feature(hw, ZXDH_F_RING_PACKED)) {<br />+        /*<br />+         * Bit[0:15]: vq queue index<br />+         * Bit[16:30]: avail index<br />+         * Bit[31]: avail wrap counter<br />+         */<br />+        notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags & <br />+                        VRING_PACKED_DESC_F_AVAIL)) << 31) |<br />+                        ((uint32_t)vq->vq_avail_idx << 16) |<br />+                        vq->vq_queue_index;<br />+    } else {<br />+        /*<br />+         * Bit[0:15]: vq queue index<br />+         * Bit[16:31]: avail index<br />+         */<br />+        notify_data = ((uint32_t)vq->vq_avail_idx << 16) | vq->vq_queue_index;<br />+    }<br />+    PMD_DRV_LOG(DEBUG, "queue:%d notify_data 0x%x notify_addr 0x%p",<br />+                 vq->vq_queue_index, notify_data, vq->notify_addr);<br />+    rte_write32(notify_data, vq->notify_addr);<br />+}<br />+<br />+const struct zxdh_pci_ops zxdh_modern_ops = {<br />+    .read_dev_cfg   = modern_read_dev_config,<br />+    .write_dev_cfg  = modern_write_dev_config,<br />+    .get_status     = modern_get_status,<br />+    .set_status     = modern_set_status,<br />+    .get_features   = modern_get_features,<br />+    .set_features   = modern_set_features,<br />+    .get_isr        = modern_get_isr,<br />+    .set_config_irq = modern_set_config_irq,<br />+    .set_queue_irq  = modern_set_queue_irq,<br />+    .get_queue_num  = modern_get_queue_num,<br />+    .set_queue_num  = modern_set_queue_num,<br />+    .setup_queue    = modern_setup_queue,<br />+    .del_queue      = modern_del_queue,<br />+    .notify_queue   = modern_notify_queue,<br />+};<br />+<br />+void zxdh_vtpci_read_dev_config(struct zxdh_hw *hw, size_t offset, void *dst, int32_t length)<br />+{<br />+    VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);<br />+}<br />+void zxdh_vtpci_write_dev_config(struct zxdh_hw *hw, size_t offset, const void *src, int32_t length)<br />+{<br />+    VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);<br />+}<br />+<br />+uint16_t zxdh_vtpci_get_features(struct zxdh_hw *hw)<br />+{<br />+    return VTPCI_OPS(hw)->get_features(hw);<br />+}<br />+<br />+void zxdh_vtpci_reset(struct zxdh_hw *hw)<br />+{<br />+    PMD_INIT_LOG(INFO, "port %u device start reset, just wait...", hw->port_id);<br />+    uint32_t retry = 0;<br />+<br />+    VTPCI_OPS(hw)->set_status(hw, ZXDH_CONFIG_STATUS_RESET);<br />+    /* Flush status write and wait device ready max 3 seconds. */<br />+    while (VTPCI_OPS(hw)->get_status(hw) != ZXDH_CONFIG_STATUS_RESET) {<br />+        ++retry;<br />+        usleep(1000L);<br />+    }<br />+    PMD_INIT_LOG(INFO, "port %u device reset %u ms done", hw->port_id, retry);<br />+}<br />+<br />+void zxdh_vtpci_reinit_complete(struct zxdh_hw *hw)<br />+{<br />+    zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER_OK);<br />+}<br />+<br />+void zxdh_vtpci_set_status(struct zxdh_hw *hw, uint8_t status)<br />+{<br />+    if (status != ZXDH_CONFIG_STATUS_RESET)<br />+        status |= VTPCI_OPS(hw)->get_status(hw);<br />+<br />+    VTPCI_OPS(hw)->set_status(hw, status);<br />+}<br />+<br />+uint8_t zxdh_vtpci_get_status(struct zxdh_hw *hw)<br />+{<br />+    return VTPCI_OPS(hw)->get_status(hw);<br />+}<br />+<br />+uint8_t zxdh_vtpci_isr(struct zxdh_hw *hw)<br />+{<br />+    return VTPCI_OPS(hw)->get_isr(hw);<br />+}<br />+<br />+static void *get_cfg_addr(struct rte_pci_device *dev, struct zxdh_pci_cap *cap)<br />+{<br />+    uint8_t  bar    = cap->bar;<br />+    uint32_t length = cap->length;<br />+    uint32_t offset = cap->offset;<br />+<br />+    if (bar >= PCI_MAX_RESOURCE) {<br />+        PMD_INIT_LOG(ERR, "invalid bar: %u", bar);<br />+        return NULL;<br />+    }<br />+    if (offset + length < offset) {<br />+        PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", offset, length);<br />+        return NULL;<br />+    }<br />+    if (offset + length > dev->mem_resource[bar].len) {<br />+        PMD_INIT_LOG(ERR, "invalid cap: overflows bar space: %u > %" PRIu64,<br />+            offset + length, dev->mem_resource[bar].len);<br />+        return NULL;<br />+    }<br />+    uint8_t *base = dev->mem_resource[bar].addr;<br />+<br />+    if (base == NULL) {<br />+        PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);<br />+        return NULL;<br />+    }<br />+    return base + offset;<br />+}<br />+<br />+int32_t zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw)<br />+{<br />+    if (dev->mem_resource[0].addr == NULL) {<br />+        PMD_INIT_LOG(ERR, "bar0 base addr is NULL");<br />+        return -1;<br />+    }<br />+    uint8_t pos = 0;<br />+    int32_t ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);<br />+<br />+    if (ret != 1) {<br />+        PMD_INIT_LOG(DEBUG, "failed to read pci capability list, ret %d", ret);<br />+        return -1;<br />+    }<br />+    while (pos) {<br />+        struct zxdh_pci_cap cap;<br />+<br />+        ret = rte_pci_read_config(dev, &cap, 2, pos);<br />+        if (ret != 2) {<br />+            PMD_INIT_LOG(DEBUG, "failed to read pci cap at pos: %x ret %d", pos, ret);<br />+            break;<br />+        }<br />+        if (cap.cap_vndr == PCI_CAP_ID_MSIX) {<br />+            /**<br />+             * Transitional devices would also have this capability,<br />+             * that's why we also check if msix is enabled.<br />+             * 1st byte is cap ID; 2nd byte is the position of next cap;<br />+             * next two bytes are the flags.<br />+             */<br />+            uint16_t flags = 0;<br />+<br />+            ret = rte_pci_read_config(dev, &flags, sizeof(flags), pos + 2);<br />+            if (ret != sizeof(flags)) {<br />+                PMD_INIT_LOG(ERR, "failed to read pci cap at pos: %x ret %d",<br />+                    pos + 2, ret);<br />+                break;<br />+            }<br />+            hw->use_msix = (flags & PCI_MSIX_ENABLE) ?<br />+                    ZXDH_MSIX_ENABLED : ZXDH_MSIX_DISABLED;<br />+        }<br />+        if (cap.cap_vndr != PCI_CAP_ID_VNDR) {<br />+            PMD_INIT_LOG(DEBUG, "[%2x] skipping non VNDR cap id: %02x",<br />+                pos, cap.cap_vndr);<br />+            goto next;<br />+        }<br />+        ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);<br />+        if (ret != sizeof(cap)) {<br />+            PMD_INIT_LOG(ERR, "failed to read pci cap at pos: %x ret %d", pos, ret);<br />+            break;<br />+        }<br />+        PMD_INIT_LOG(DEBUG, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",<br />+            pos, cap.cfg_type, cap.bar, cap.offset, cap.length);<br />+        switch (cap.cfg_type) {<br />+        case ZXDH_PCI_CAP_COMMON_CFG:<br />+            hw->common_cfg = get_cfg_addr(dev, &cap);<br />+            break;<br />+        case ZXDH_PCI_CAP_NOTIFY_CFG: {<br />+            ret = rte_pci_read_config(dev, &hw->notify_off_multiplier,<br />+                        4, pos + sizeof(cap));<br />+            if (ret != 4)<br />+                PMD_INIT_LOG(ERR,<br />+                    "failed to read notify_off_multiplier, ret %d", ret);<br />+            else<br />+                hw->notify_base = get_cfg_addr(dev, &cap);<br />+            break;<br />+        }<br />+        case ZXDH_PCI_CAP_DEVICE_CFG:<br />+            hw->dev_cfg = get_cfg_addr(dev, &cap);<br />+            break;<br />+        case ZXDH_PCI_CAP_ISR_CFG:<br />+            hw->isr = get_cfg_addr(dev, &cap);<br />+            break;<br />+        case ZXDH_PCI_CAP_PCI_CFG: {<br />+            hw->pcie_id = *(uint16_t *)&cap.padding[1];<br />+            PMD_INIT_LOG(DEBUG, "get pcie id 0x%x", hw->pcie_id);<br />+            uint16_t pcie_id = hw->pcie_id;<br />+<br />+            if ((pcie_id >> 11) & 0x1) /* PF */ {<br />+                PMD_INIT_LOG(DEBUG, "EP %u PF %u",<br />+                    pcie_id >> 12, (pcie_id >> 8) & 0x7);<br />+            } else { /* VF */<br />+                PMD_INIT_LOG(DEBUG, "EP %u PF %u VF %u",<br />+                    pcie_id >> 12, (pcie_id >> 8) & 0x7, pcie_id & 0xff);<br />+            }<br />+            break;<br />+        }<br />+        }<br />+next:<br />+    pos = cap.cap_next;<br />+    }<br />+    if (hw->common_cfg == NULL || hw->notify_base == NULL ||<br />+        hw->dev_cfg == NULL || hw->isr == NULL) {<br />+        PMD_INIT_LOG(ERR, "no modern pci device found.");<br />+        return -1;<br />+    }<br />+    return 0;<br />+}<br />+<br />+enum zxdh_msix_status zxdh_vtpci_msix_detect(struct rte_pci_device *dev)<br />+{<br />+    uint8_t pos = 0;<br />+    int32_t ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);<br />+<br />+    if (ret != 1) {<br />+        PMD_INIT_LOG(ERR, "failed to read pci capability list, ret %d", ret);<br />+        return ZXDH_MSIX_NONE;<br />+    }<br />+    while (pos) {<br />+        uint8_t cap[2] = {0};<br />+<br />+        ret = rte_pci_read_config(dev, cap, sizeof(cap), pos);<br />+        if (ret != sizeof(cap)) {<br />+            PMD_INIT_LOG(ERR, "failed to read pci cap at pos: %x ret %d", pos, ret);<br />+            break;<br />+        }<br />+        if (cap[0] == PCI_CAP_ID_MSIX) {<br />+            uint16_t flags = 0;<br />+<br />+            ret = rte_pci_read_config(dev, &flags, sizeof(flags), pos + sizeof(cap));<br />+            if (ret != sizeof(flags)) {<br />+                PMD_INIT_LOG(ERR,<br />+                    "failed to read pci cap at pos: %x ret %d", pos + 2, ret);<br />+                break;<br />+            }<br />+            if (flags & PCI_MSIX_ENABLE)<br />+                return ZXDH_MSIX_ENABLED;<br />+            else<br />+                return ZXDH_MSIX_DISABLED;<br />+        }<br />+        pos = cap[1];<br />+    }<br />+    return ZXDH_MSIX_NONE;<br />+    }<br />diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h<br />new file mode 100644<br />index 0000000000..d6f3c552ad<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_pci.h<br />@@ -0,0 +1,259 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_PCI_H_<br />+#define _ZXDH_PCI_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+#include <stdbool.h> <br />+#include <rte_pci.h> <br />+#include <rte_bus_pci.h> <br />+#include <bus_pci_driver.h> <br />+#include <ethdev_driver.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+<br />+/* The bit of the ISR which indicates a device has an interrupt. */<br />+#define ZXDH_PCI_ISR_INTR    0x1<br />+/* The bit of the ISR which indicates a device configuration change. */<br />+#define ZXDH_PCI_ISR_CONFIG  0x2<br />+/* Vector value used to disable MSI for queue. */<br />+#define ZXDH_MSI_NO_VECTOR   0x7F<br />+<br />+/* Status byte for guest to report progress. */<br />+#define ZXDH_CONFIG_STATUS_RESET           0x00<br />+#define ZXDH_CONFIG_STATUS_ACK             0x01<br />+#define ZXDH_CONFIG_STATUS_DRIVER          0x02<br />+#define ZXDH_CONFIG_STATUS_DRIVER_OK       0x04<br />+#define ZXDH_CONFIG_STATUS_FEATURES_OK     0x08<br />+#define ZXDH_CONFIG_STATUS_DEV_NEED_RESET  0x40<br />+#define ZXDH_CONFIG_STATUS_FAILED          0x80<br />+<br />+/* The feature bitmap for net */<br />+#define ZXDH_NET_F_CSUM              0   /* Host handles pkts w/ partial csum */<br />+#define ZXDH_NET_F_GUEST_CSUM        1   /* Guest handles pkts w/ partial csum */<br />+#define ZXDH_NET_F_MTU               3   /* Initial MTU advice. */<br />+#define ZXDH_NET_F_MAC               5   /* Host has given MAC address. */<br />+#define ZXDH_NET_F_GUEST_TSO4        7   /* Guest can handle TSOv4 in. */<br />+#define ZXDH_NET_F_GUEST_TSO6        8   /* Guest can handle TSOv6 in. */<br />+#define ZXDH_NET_F_GUEST_ECN         9   /* Guest can handle TSO[6] w/ ECN in. */<br />+#define ZXDH_NET_F_GUEST_UFO         10  /* Guest can handle UFO in. */<br />+#define ZXDH_NET_F_HOST_TSO4         11  /* Host can handle TSOv4 in. */<br />+#define ZXDH_NET_F_HOST_TSO6         12  /* Host can handle TSOv6 in. */<br />+#define ZXDH_NET_F_HOST_ECN          13  /* Host can handle TSO[6] w/ ECN in. */<br />+#define ZXDH_NET_F_HOST_UFO          14  /* Host can handle UFO in. */<br />+#define ZXDH_NET_F_MRG_RXBUF         15  /* Host can merge receive buffers. */<br />+#define ZXDH_NET_F_STATUS            16  /* zxdh_net_config.status available */<br />+#define ZXDH_NET_F_CTRL_VQ           17  /* Control channel available */<br />+#define ZXDH_NET_F_CTRL_RX           18  /* Control channel RX mode support */<br />+#define ZXDH_NET_F_CTRL_VLAN         19  /* Control channel VLAN filtering */<br />+#define ZXDH_NET_F_CTRL_RX_EXTRA     20  /* Extra RX mode control support */<br />+#define ZXDH_NET_F_GUEST_ANNOUNCE    21  /* Guest can announce device on the network */<br />+#define ZXDH_NET_F_MQ                22  /* Device supports Receive Flow Steering */<br />+#define ZXDH_NET_F_CTRL_MAC_ADDR     23  /* Set MAC address */<br />+/* Do we get callbacks when the ring is completely used, even if we've suppressed them? */<br />+#define ZXDH_F_NOTIFY_ON_EMPTY       24<br />+#define ZXDH_F_ANY_LAYOUT            27 /* Can the device handle any descriptor layout? */<br />+#define VIRTIO_RING_F_INDIRECT_DESC  28 /* We support indirect buffer descriptors */<br />+#define ZXDH_F_VERSION_1             32<br />+#define ZXDH_F_IOMMU_PLATFORM        33<br />+#define ZXDH_F_RING_PACKED           34<br />+/* Inorder feature indicates that all buffers are used by the device<br />+ * in the same order in which they have been made available.<br />+ */<br />+#define ZXDH_F_IN_ORDER              35<br />+/** This feature indicates that memory accesses by the driver<br />+ * and the device are ordered in a way described by the platform.<br />+ */<br />+#define ZXDH_F_ORDER_PLATFORM        36<br />+/**<br />+ * This feature indicates that the driver passes extra data<br />+ * (besides identifying the virtqueue) in its device notifications.<br />+ */<br />+#define ZXDH_F_NOTIFICATION_DATA     38<br />+#define ZXDH_NET_F_SPEED_DUPLEX      63 /* Device set linkspeed and duplex */<br />+<br />+/* The Guest publishes the used index for which it expects an interrupt<br />+ * at the end of the avail ring. Host should ignore the avail->flags field.<br />+ */<br />+/* The Host publishes the avail index for which it expects a kick<br />+ * at the end of the used ring. Guest should ignore the used->flags field.<br />+ */<br />+#define ZXDH_RING_F_EVENT_IDX                       29<br />+<br />+/* Maximum number of virtqueues per device. */<br />+#define ZXDH_MAX_VIRTQUEUE_PAIRS  8<br />+#define ZXDH_MAX_VIRTQUEUES       (ZXDH_MAX_VIRTQUEUE_PAIRS * 2 + 1)<br />+<br />+<br />+#define ZXDH_PCI_CAP_COMMON_CFG  1 /* Common configuration */<br />+#define ZXDH_PCI_CAP_NOTIFY_CFG  2 /* Notifications */<br />+#define ZXDH_PCI_CAP_ISR_CFG     3 /* ISR Status */<br />+#define ZXDH_PCI_CAP_DEVICE_CFG  4 /* Device specific configuration */<br />+#define ZXDH_PCI_CAP_PCI_CFG     5 /* PCI configuration access */<br />+<br />+#define VTPCI_OPS(hw)  (zxdh_hw_internal[(hw)->port_id].vtpci_ops)<br />+#define VTPCI_IO(hw)   (&zxdh_hw_internal[(hw)->port_id].io)<br />+<br />+/*<br />+ * How many bits to shift physical queue address written to QUEUE_PFN.<br />+ * 12 is historical, and due to x86 page size.<br />+ */<br />+#define ZXDH_PCI_QUEUE_ADDR_SHIFT                   12<br />+<br />+/* The alignment to use between consumer and producer parts of vring. */<br />+#define ZXDH_PCI_VRING_ALIGN                        4096<br />+<br />+/******BAR0  SPACE********************************************************************/<br />+#define ZXDH_VQMREG_OFFSET    0x0000<br />+#define ZXDH_FWCAP_OFFSET     0x1000<br />+#define ZXDH_CTRLCH_OFFSET    0x2000<br />+#define ZXDH_MAC_OFFSET       0x24000<br />+#define ZXDH_SPINLOCK_OFFSET  0x4000<br />+#define ZXDH_FWSHRD_OFFSET    0x5000<br />+#define ZXDH_QUERES_SHARE_BASE   (ZXDH_FWSHRD_OFFSET)<br />+#define ZXDH_QUERES_SHARE_SIZE   512<br />+<br />+enum zxdh_msix_status {<br />+    ZXDH_MSIX_NONE     = 0,<br />+    ZXDH_MSIX_DISABLED = 1,<br />+    ZXDH_MSIX_ENABLED  = 2<br />+};<br />+<br />+static inline int32_t vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit)<br />+{<br />+    return (hw->guest_features & (1ULL << bit)) != 0;<br />+}<br />+<br />+static inline int32_t vtpci_packed_queue(struct zxdh_hw *hw)<br />+{<br />+    return vtpci_with_feature(hw, ZXDH_F_RING_PACKED);<br />+}<br />+<br />+/*<br />+ * While zxdh_hw is stored in shared memory, this structure stores<br />+ * some infos that may vary in the multiple process model locally.<br />+ * For example, the vtpci_ops pointer.<br />+ */<br />+struct zxdh_hw_internal {<br />+    const struct zxdh_pci_ops *vtpci_ops;<br />+    struct rte_pci_ioport io;<br />+};<br />+<br />+/* Fields in ZXDH_PCI_CAP_COMMON_CFG: */<br />+struct zxdh_pci_common_cfg {<br />+    /* About the whole device. */<br />+    uint32_t device_feature_select; /* read-write */<br />+    uint32_t device_feature;    /* read-only */<br />+    uint32_t guest_feature_select;  /* read-write */<br />+    uint32_t guest_feature;     /* read-write */<br />+    uint16_t msix_config;       /* read-write */<br />+    uint16_t num_queues;        /* read-only */<br />+    uint8_t  device_status;     /* read-write */<br />+    uint8_t  config_generation; /* read-only */<br />+<br />+    /* About a specific virtqueue. */<br />+    uint16_t queue_select;      /* read-write */<br />+    uint16_t queue_size;        /* read-write, power of 2. */<br />+    uint16_t queue_msix_vector; /* read-write */<br />+    uint16_t queue_enable;      /* read-write */<br />+    uint16_t queue_notify_off;  /* read-only */<br />+    uint32_t queue_desc_lo;     /* read-write */<br />+    uint32_t queue_desc_hi;     /* read-write */<br />+    uint32_t queue_avail_lo;    /* read-write */<br />+    uint32_t queue_avail_hi;    /* read-write */<br />+    uint32_t queue_used_lo;     /* read-write */<br />+    uint32_t queue_used_hi;     /* read-write */<br />+};<br />+<br />+/*<br />+ * This structure is just a reference to read<br />+ * net device specific config space; it just a chodu structure<br />+ *<br />+ */<br />+struct zxdh_net_config {<br />+    /* The config defining mac address (if ZXDH_NET_F_MAC) */<br />+    uint8_t    mac[RTE_ETHER_ADDR_LEN];<br />+    /* See ZXDH_NET_F_STATUS and ZXDH_NET_S_* above */<br />+    uint16_t   status;<br />+    uint16_t   max_virtqueue_pairs;<br />+    uint16_t   mtu;<br />+    /*<br />+     * speed, in units of 1Mb. All values 0 to INT_MAX are legal.<br />+     * Any other value stands for unknown.<br />+     */<br />+    uint32_t   speed;<br />+    /* 0x00 - half duplex<br />+     * 0x01 - full duplex<br />+     * Any other value stands for unknown.<br />+     */<br />+    uint8_t    duplex;<br />+} __rte_packed;<br />+<br />+/* This is the PCI capability header: */<br />+struct zxdh_pci_cap {<br />+    uint8_t  cap_vndr;   /* Generic PCI field: PCI_CAP_ID_VNDR */<br />+    uint8_t  cap_next;   /* Generic PCI field: next ptr. */<br />+    uint8_t  cap_len;    /* Generic PCI field: capability length */<br />+    uint8_t  cfg_type;   /* Identifies the structure. */<br />+    uint8_t  bar;        /* Where to find it. */<br />+    uint8_t  padding[3]; /* Pad to full dword. */<br />+    uint32_t offset;     /* Offset within bar. */<br />+    uint32_t length;     /* Length of the structure, in bytes. */<br />+};<br />+struct zxdh_pci_notify_cap {<br />+    struct zxdh_pci_cap cap;<br />+    uint32_t notify_off_multiplier;  /* Multiplier for queue_notify_off. */<br />+};<br />+<br />+struct zxdh_pci_ops {<br />+    void     (*read_dev_cfg)(struct zxdh_hw *hw, size_t offset, void *dst, int32_t len);<br />+    void     (*write_dev_cfg)(struct zxdh_hw *hw, size_t offset, const void *src, int32_t len);<br />+<br />+    uint8_t  (*get_status)(struct zxdh_hw *hw);<br />+    void     (*set_status)(struct zxdh_hw *hw, uint8_t status);<br />+<br />+    uint64_t (*get_features)(struct zxdh_hw *hw);<br />+    void     (*set_features)(struct zxdh_hw *hw, uint64_t features);<br />+<br />+    uint8_t  (*get_isr)(struct zxdh_hw *hw);<br />+<br />+    uint16_t (*set_config_irq)(struct zxdh_hw *hw, uint16_t vec);<br />+<br />+    uint16_t (*set_queue_irq)(struct zxdh_hw *hw, struct virtqueue *vq, uint16_t vec);<br />+<br />+    uint16_t (*get_queue_num)(struct zxdh_hw *hw, uint16_t queue_id);<br />+    void     (*set_queue_num)(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size);<br />+<br />+    int32_t  (*setup_queue)(struct zxdh_hw *hw, struct virtqueue *vq);<br />+    void     (*del_queue)(struct zxdh_hw *hw, struct virtqueue *vq);<br />+    void     (*notify_queue)(struct zxdh_hw *hw, struct virtqueue *vq);<br />+};<br />+<br />+extern struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];<br />+extern const struct zxdh_pci_ops zxdh_modern_ops;<br />+<br />+void zxdh_vtpci_reset(struct zxdh_hw *hw);<br />+void zxdh_vtpci_reinit_complete(struct zxdh_hw *hw);<br />+uint8_t zxdh_vtpci_get_status(struct zxdh_hw *hw);<br />+void zxdh_vtpci_set_status(struct zxdh_hw *hw, uint8_t status);<br />+uint16_t zxdh_vtpci_get_features(struct zxdh_hw *hw);<br />+void zxdh_vtpci_write_dev_config(struct zxdh_hw *hw, size_t offset,<br />+        const void *src, int32_t length);<br />+void zxdh_vtpci_read_dev_config(struct zxdh_hw *hw, size_t offset,<br />+        void *dst, int32_t length);<br />+uint8_t zxdh_vtpci_isr(struct zxdh_hw *hw);<br />+enum zxdh_msix_status zxdh_vtpci_msix_detect(struct rte_pci_device *dev);<br />+<br />+int32_t zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw);<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif /* _ZXDH_PCI_H_ */<br />diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c<br />new file mode 100644<br />index 0000000000..b6dd487a9d<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_queue.c<br />@@ -0,0 +1,138 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+<br />+#include <rte_mbuf.h> <br />+<br />+#include "zxdh_queue.h" <br />+#include "zxdh_logs.h" <br />+#include "zxdh_pci.h" <br />+#include "zxdh_common.h" <br />+<br />+/**<br />+ * Two types of mbuf to be cleaned:<br />+ * 1) mbuf that has been consumed by backend but not used by virtio.<br />+ * 2) mbuf that hasn't been consued by backend.<br />+ */<br />+struct rte_mbuf *zxdh_virtqueue_detach_unused(struct virtqueue *vq)<br />+{<br />+    struct rte_mbuf *cookie = NULL;<br />+    int32_t          idx    = 0;<br />+<br />+    if (vq == NULL)<br />+        return NULL;<br />+<br />+    for (idx = 0; idx < vq->vq_nentries; idx++) {<br />+        cookie = vq->vq_descx[idx].cookie;<br />+        if (cookie != NULL) {<br />+            vq->vq_descx[idx].cookie = NULL;<br />+            return cookie;<br />+        }<br />+    }<br />+<br />+    return NULL;<br />+}<br />+<br />+static int32_t zxdh_release_channel(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    uint32_t var  = 0;<br />+    uint32_t addr = 0;<br />+    uint32_t widx = 0;<br />+    uint32_t bidx = 0;<br />+    uint16_t pch  = 0;<br />+    uint16_t lch  = 0;<br />+    uint16_t timeout = 0;<br />+<br />+    while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {<br />+        if (zxdh_acquire_lock(hw) != 0) {<br />+            PMD_INIT_LOG(ERR,<br />+                "Could not acquire lock to release channel, timeout %d", timeout);<br />+            continue;<br />+        }<br />+        break;<br />+    }<br />+<br />+    if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {<br />+        PMD_INIT_LOG(ERR, "Acquire lock timeout");<br />+        return -1;<br />+    }<br />+<br />+    for (lch = 0; lch < nr_vq; lch++) {<br />+        if (hw->channel_context[lch].valid == 0) {<br />+            PMD_INIT_LOG(DEBUG, "Logic channel %d does not need to release", lch);<br />+            continue;<br />+        }<br />+<br />+        /* get coi table offset and index */<br />+        pch  = hw->channel_context[lch].ph_chno;<br />+        widx = pch / 32;<br />+        bidx = pch % 32;<br />+<br />+        addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t));<br />+        var  = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);<br />+        var &= ~(1 << bidx);<br />+        zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);<br />+<br />+        hw->channel_context[lch].valid = 0;<br />+        hw->channel_context[lch].ph_chno = 0;<br />+    }<br />+<br />+    zxdh_release_lock(hw);<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t zxdh_free_queues(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    struct virtqueue *vq = NULL;<br />+    int32_t queue_type = 0;<br />+    uint16_t i = 0;<br />+<br />+    if (hw->vqs == NULL)<br />+        return 0;<br />+<br />+    /* Clear COI table */<br />+    if (zxdh_release_channel(dev) < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to clear coi table");<br />+        return -1;<br />+    }<br />+<br />+    for (i = 0; i < nr_vq; i++) {<br />+        vq = hw->vqs[i];<br />+        if (vq == NULL)<br />+            continue;<br />+<br />+        VTPCI_OPS(hw)->del_queue(hw, vq);<br />+        queue_type = get_queue_type(i);<br />+        if (queue_type == VTNET_RQ) {<br />+            rte_free(vq->sw_ring);<br />+            rte_memzone_free(vq->rxq.mz);<br />+        } else if (queue_type == VTNET_TQ) {<br />+            rte_memzone_free(vq->txq.mz);<br />+            rte_memzone_free(vq->txq.virtio_net_hdr_mz);<br />+        }<br />+<br />+        rte_free(vq);<br />+        hw->vqs[i] = NULL;<br />+        PMD_INIT_LOG(DEBUG, "Release to queue %d success!", i);<br />+    }<br />+<br />+    rte_free(hw->vqs);<br />+    hw->vqs = NULL;<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t get_queue_type(uint16_t vtpci_queue_idx)<br />+{<br />+    if (vtpci_queue_idx % 2 == 0)<br />+        return VTNET_RQ;<br />+    else<br />+        return VTNET_TQ;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />new file mode 100644<br />index 0000000000..c2d7bbe889<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -0,0 +1,85 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_QUEUE_H_<br />+#define _ZXDH_QUEUE_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+<br />+#include <rte_atomic.h> <br />+#include <rte_memory.h> <br />+#include <rte_mempool.h> <br />+#include <rte_net.h> <br />+#include <ethdev_driver.h> <br />+<br />+#include "zxdh_pci.h" <br />+#include "zxdh_ring.h" <br />+#include "zxdh_rxtx.h" <br />+<br />+<br />+enum {<br />+    VTNET_RQ = 0,<br />+    VTNET_TQ = 1<br />+};<br />+<br />+struct vq_desc_extra {<br />+    void *cookie;<br />+    uint16_t ndescs;<br />+    uint16_t next;<br />+};<br />+<br />+struct virtqueue {<br />+    struct zxdh_hw  *hw; /**< zxdh_hw structure pointer. */<br />+    struct {<br />+        /**< vring keeping descs and events */<br />+        struct vring_packed ring;<br />+        bool used_wrap_counter;<br />+        uint8_t rsv;<br />+        uint16_t cached_flags; /**< cached flags for descs */<br />+        uint16_t event_flags_shadow;<br />+        uint16_t rsv1;<br />+    } __rte_packed vq_packed;<br />+    uint16_t vq_used_cons_idx; /**< last consumed descriptor */<br />+    uint16_t vq_nentries;  /**< vring desc numbers */<br />+    uint16_t vq_free_cnt;  /**< num of desc available */<br />+    uint16_t vq_avail_idx; /**< sync until needed */<br />+    uint16_t vq_free_thresh; /**< free threshold */<br />+    uint16_t rsv2;<br />+<br />+    void *vq_ring_virt_mem;  /**< linear address of vring*/<br />+    uint32_t vq_ring_size;<br />+<br />+    union {<br />+        struct virtnet_rx rxq;<br />+        struct virtnet_tx txq;<br />+    };<br />+<br />+    /** < physical address of vring,<br />+     * or virtual address for virtio_user.<br />+     **/<br />+    rte_iova_t vq_ring_mem;<br />+<br />+    /**<br />+     * Head of the free chain in the descriptor table. If<br />+     * there are no free descriptors, this will be set to<br />+     * VQ_RING_DESC_CHAIN_END.<br />+     **/<br />+    uint16_t  vq_desc_head_idx;<br />+    uint16_t  vq_desc_tail_idx;<br />+    uint16_t  vq_queue_index;   /**< PCI queue index */<br />+    uint16_t  offset; /**< relative offset to obtain addr in mbuf */<br />+    uint16_t *notify_addr;<br />+    struct rte_mbuf **sw_ring;  /**< RX software ring. */<br />+    struct vq_desc_extra vq_descx[0];<br />+};<br />+<br />+struct rte_mbuf *zxdh_virtqueue_detach_unused(struct virtqueue *vq);<br />+int32_t zxdh_free_queues(struct rte_eth_dev *dev);<br />+int32_t get_queue_type(uint16_t vtpci_queue_idx);<br />+<br />+#endif<br />diff --git a/drivers/net/zxdh/zxdh_ring.h b/drivers/net/zxdh/zxdh_ring.h<br />new file mode 100644<br />index 0000000000..bd7c997993<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_ring.h<br />@@ -0,0 +1,87 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_RING_H_<br />+#define _ZXDH_RING_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+#include <rte_common.h> <br />+<br />+/* This marks a buffer as continuing via the next field. */<br />+#define VRING_DESC_F_NEXT                                   1<br />+<br />+/* This marks a buffer as write-only (otherwise read-only). */<br />+#define VRING_DESC_F_WRITE                                  2<br />+<br />+/* This means the buffer contains a list of buffer descriptors. */<br />+#define VRING_DESC_F_INDIRECT                               4<br />+<br />+/* This flag means the descriptor was made available by the driver */<br />+#define VRING_PACKED_DESC_F_AVAIL                           (1 << (7))<br />+/* This flag means the descriptor was used by the device */<br />+#define VRING_PACKED_DESC_F_USED                            (1 << (15))<br />+<br />+/* Frequently used combinations */<br />+#define VRING_PACKED_DESC_F_AVAIL_USED \<br />+            (VRING_PACKED_DESC_F_AVAIL | VRING_PACKED_DESC_F_USED)<br />+<br />+/* The Host uses this in used->flags to advise the Guest: don't kick me<br />+ * when you add a buffer.  It's unreliable, so it's simply an<br />+ * optimization.  Guest will still kick if it's out of buffers.<br />+ **/<br />+#define VRING_USED_F_NO_NOTIFY                              1<br />+<br />+/** The Guest uses this in avail->flags to advise the Host: don't<br />+ * interrupt me when you consume a buffer.  It's unreliable, so it's<br />+ * simply an optimization.<br />+ **/<br />+#define VRING_AVAIL_F_NO_INTERRUPT                          1<br />+<br />+#define RING_EVENT_FLAGS_ENABLE                             0x0<br />+#define RING_EVENT_FLAGS_DISABLE                            0x1<br />+#define RING_EVENT_FLAGS_DESC                               0x2<br />+<br />+/** VirtIO ring descriptors: 16 bytes.<br />+ * These can chain together via "next".<br />+ **/<br />+struct vring_desc {<br />+    uint64_t addr;  /*  Address (guest-physical). */<br />+    uint32_t len;   /* Length. */<br />+    uint16_t flags; /* The flags as indicated above. */<br />+    uint16_t next;  /* We chain unused descriptors via this. */<br />+};<br />+<br />+struct vring_avail {<br />+    uint16_t flags;<br />+    uint16_t idx;<br />+    uint16_t ring[0];<br />+};<br />+<br />+/** For support of packed virtqueues in Virtio 1.1 the format of descriptors<br />+ * looks like this.<br />+ **/<br />+struct vring_packed_desc {<br />+    uint64_t addr;<br />+    uint32_t len;<br />+    uint16_t id;<br />+    uint16_t flags;<br />+};<br />+<br />+struct vring_packed_desc_event {<br />+    uint16_t desc_event_off_wrap;<br />+    uint16_t desc_event_flags;<br />+};<br />+<br />+struct vring_packed {<br />+    uint32_t num;<br />+    struct vring_packed_desc *desc;<br />+    struct vring_packed_desc_event *driver;<br />+    struct vring_packed_desc_event *device;<br />+};<br />+<br />+#endif<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h<br />new file mode 100644<br />index 0000000000..7aedf568fe<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_rxtx.h<br />@@ -0,0 +1,48 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_RXTX_H_<br />+#define _ZXDH_RXTX_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+<br />+struct virtnet_stats {<br />+    uint64_t packets;<br />+    uint64_t bytes;<br />+    uint64_t errors;<br />+    uint64_t multicast;<br />+    uint64_t broadcast;<br />+    uint64_t truncated_err;<br />+    uint64_t size_bins[8]; /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */<br />+};<br />+<br />+struct virtnet_rx {<br />+    struct virtqueue         *vq;<br />+<br />+    /* dummy mbuf, for wraparound when processing RX ring. */<br />+    struct rte_mbuf           fake_mbuf;<br />+<br />+    uint64_t                  mbuf_initializer; /* value to init mbufs. */<br />+    struct rte_mempool       *mpool;            /* mempool for mbuf allocation */<br />+    uint16_t                  queue_id;         /* DPDK queue index. */<br />+    uint16_t                  port_id;          /* Device port identifier. */<br />+    struct virtnet_stats      stats;<br />+    const struct rte_memzone *mz;               /* mem zone to populate RX ring. */<br />+};<br />+<br />+struct virtnet_tx {<br />+    struct virtqueue         *vq;<br />+    const struct rte_memzone *virtio_net_hdr_mz;  /* memzone to populate hdr. */<br />+    rte_iova_t                virtio_net_hdr_mem; /* hdr for each xmit packet */<br />+    uint16_t                  queue_id;           /* DPDK queue index. */<br />+    uint16_t                  port_id;            /* Device port identifier. */<br />+    struct virtnet_stats      stats;<br />+    const struct rte_memzone *mz;                 /* mem zone to populate TX ring. */<br />+};<br />+<br />+#endif<br />--  <br />2.43.0<br />