v1: zxdh basic init function code.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/meson.build   |    3 +<br /> drivers/net/zxdh/zxdh_common.c |   59 ++<br /> drivers/net/zxdh/zxdh_common.h |   32 +<br /> drivers/net/zxdh/zxdh_ethdev.c | 1310 ++++++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_ethdev.h |  203 +++++<br /> drivers/net/zxdh/zxdh_pci.c    |  462 +++++++++++<br /> drivers/net/zxdh/zxdh_pci.h    |  259 +++++++<br /> drivers/net/zxdh/zxdh_queue.c  |  138 ++++<br /> drivers/net/zxdh/zxdh_queue.h  |   85 +++<br /> drivers/net/zxdh/zxdh_ring.h   |   87 +++<br /> drivers/net/zxdh/zxdh_rxtx.h   |   48 ++<br /> 11 files changed, 2686 insertions(+)<br /> create mode 100644 drivers/net/zxdh/zxdh_common.c<br /> create mode 100644 drivers/net/zxdh/zxdh_common.h<br /> create mode 100644 drivers/net/zxdh/zxdh_ethdev.h<br /> create mode 100644 drivers/net/zxdh/zxdh_pci.c<br /> create mode 100644 drivers/net/zxdh/zxdh_pci.h<br /> create mode 100644 drivers/net/zxdh/zxdh_queue.c<br /> create mode 100644 drivers/net/zxdh/zxdh_queue.h<br /> create mode 100644 drivers/net/zxdh/zxdh_ring.h<br /> create mode 100644 drivers/net/zxdh/zxdh_rxtx.h<br /> <br />diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build<br />index 217d8920cd..0810073e09 100644<br />--- a/drivers/net/zxdh/meson.build<br />+++ b/drivers/net/zxdh/meson.build<br />@@ -9,6 +9,9 @@ endif<br />  <br /> sources = files(<br />     'zxdh_ethdev.c',<br />+    'zxdh_common.c',<br />+    'zxdh_pci.c',<br />     'zxdh_msg.c',<br />+    'zxdh_queue.c',<br />     'zxdh_npsdk.c',<br />     )<br />diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c<br />new file mode 100644<br />index 0000000000..55497f8a24<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_common.c<br />@@ -0,0 +1,59 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+#include <ethdev_driver.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+#include "zxdh_common.h" <br />+<br />+uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);<br />+    uint32_t val      = *((volatile uint32_t *)(baseaddr + reg));<br />+    return val;<br />+}<br />+<br />+void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);<br />+    *((volatile uint32_t *)(baseaddr + reg)) = val;<br />+}<br />+<br />+int32_t zxdh_acquire_lock(struct zxdh_hw *hw)<br />+{<br />+    uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG);<br />+<br />+    /* check whether lock is used */<br />+    if (!(var & ZXDH_VF_LOCK_ENABLE_MASK))<br />+        return -1;<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t zxdh_release_lock(struct zxdh_hw *hw)<br />+{<br />+    uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG);<br />+<br />+    if (var & ZXDH_VF_LOCK_ENABLE_MASK) {<br />+        var &= ~ZXDH_VF_LOCK_ENABLE_MASK;<br />+        zxdh_write_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG, var);<br />+        return 0;<br />+    }<br />+<br />+    return -1;<br />+}<br />+<br />+uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg)<br />+{<br />+    uint32_t val = *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg));<br />+    return val;<br />+}<br />+<br />+void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val)<br />+{<br />+    *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)) = val;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h<br />new file mode 100644<br />index 0000000000..912eb9ad42<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_common.h<br />@@ -0,0 +1,32 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_COMMON_H_<br />+#define _ZXDH_COMMON_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+#include <rte_ethdev.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+<br />+#define ZXDH_VF_LOCK_ENABLE_MASK      0x1<br />+#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX   10<br />+#define ZXDH_VF_LOCK_REG             0x90<br />+<br />+uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg);<br />+void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val);<br />+int32_t zxdh_release_lock(struct zxdh_hw *hw);<br />+int32_t zxdh_acquire_lock(struct zxdh_hw *hw);<br />+uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg);<br />+void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val);<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif /* _ZXDH_COMMON_H_ */<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index a3c05f9809..425a818109 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -10,6 +10,1316 @@<br /> #include <rte_kvargs.h> <br /> #include <rte_hexdump.h> <br />  <br />+struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];<br />+struct zxdh_shared_data *zxdh_shared_data;<br />+const char *MZ_ZXDH_PMD_SHARED_DATA = "zxdh_pmd_shared_data";<br />+rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;<br />+struct zxdh_dtb_shared_data g_dtb_data = {0};<br />+<br />+#define ZXDH_PMD_DEFAULT_HOST_FEATURES   \<br />+    (1ULL << ZXDH_NET_F_MRG_RXBUF | \<br />+     1ULL << ZXDH_NET_F_STATUS    | \<br />+     1ULL << ZXDH_NET_F_MQ        | \<br />+     1ULL << ZXDH_F_ANY_LAYOUT    | \<br />+     1ULL << ZXDH_F_VERSION_1   | \<br />+     1ULL << ZXDH_F_RING_PACKED | \<br />+     1ULL << ZXDH_F_IN_ORDER    | \<br />+     1ULL << ZXDH_F_ORDER_PLATFORM | \<br />+     1ULL << ZXDH_F_NOTIFICATION_DATA |\<br />+     1ULL << ZXDH_NET_F_MAC | \<br />+     1ULL << ZXDH_NET_F_CSUM |\<br />+     1ULL << ZXDH_NET_F_GUEST_CSUM |\<br />+     1ULL << ZXDH_NET_F_GUEST_TSO4 |\<br />+     1ULL << ZXDH_NET_F_GUEST_TSO6 |\<br />+     1ULL << ZXDH_NET_F_HOST_TSO4 |\<br />+     1ULL << ZXDH_NET_F_HOST_TSO6 |\<br />+     1ULL << ZXDH_NET_F_GUEST_UFO |\<br />+     1ULL << ZXDH_NET_F_HOST_UFO)<br />+<br />+#define ZXDH_PMD_DEFAULT_GUEST_FEATURES   \<br />+    (1ULL << ZXDH_NET_F_MRG_RXBUF | \<br />+     1ULL << ZXDH_NET_F_STATUS    | \<br />+     1ULL << ZXDH_NET_F_MQ        | \<br />+     1ULL << ZXDH_F_ANY_LAYOUT    | \<br />+     1ULL << ZXDH_F_VERSION_1     | \<br />+     1ULL << ZXDH_F_RING_PACKED   | \<br />+     1ULL << ZXDH_F_IN_ORDER      | \<br />+     1ULL << ZXDH_F_NOTIFICATION_DATA | \<br />+     1ULL << ZXDH_NET_F_MAC)<br />+<br />+#define ZXDH_RX_QUEUES_MAX  128U<br />+#define ZXDH_TX_QUEUES_MAX  128U<br />+<br />+static unsigned int<br />+log2above(unsigned int v)<br />+{<br />+    unsigned int l;<br />+    unsigned int r;<br />+<br />+    for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)<br />+        r |= (v & 1);<br />+    return l + r;<br />+}<br />+<br />+static uint16_t zxdh_queue_desc_pre_setup(uint16_t desc)<br />+{<br />+    uint32_t nb_desc = desc;<br />+<br />+    if (desc < ZXDH_MIN_QUEUE_DEPTH) {<br />+        PMD_RX_LOG(WARNING,<br />+            "nb_desc(%u) increased number of descriptors to the min queue depth (%u)",<br />+            desc, ZXDH_MIN_QUEUE_DEPTH);<br />+        return ZXDH_MIN_QUEUE_DEPTH;<br />+    }<br />+<br />+    if (desc > ZXDH_MAX_QUEUE_DEPTH) {<br />+        PMD_RX_LOG(WARNING,<br />+            "nb_desc(%u) can't be greater than max_rxds (%d), turn to max queue depth",<br />+            desc, ZXDH_MAX_QUEUE_DEPTH);<br />+        return ZXDH_MAX_QUEUE_DEPTH;<br />+    }<br />+<br />+    if (!rte_is_power_of_2(desc)) {<br />+        nb_desc = 1 << log2above(desc);<br />+        if (nb_desc > ZXDH_MAX_QUEUE_DEPTH)<br />+            nb_desc = ZXDH_MAX_QUEUE_DEPTH;<br />+<br />+        PMD_RX_LOG(WARNING,<br />+            "nb_desc(%u) increased number of descriptors to the next power of two (%d)",<br />+            desc, nb_desc);<br />+    }<br />+<br />+    return nb_desc;<br />+}<br />+<br />+static int32_t hw_q_depth_handler(const char *key __rte_unused,<br />+                const char *value, void *ret_val)<br />+{<br />+    uint16_t val = 0;<br />+    struct zxdh_hw *hw = ret_val;<br />+<br />+    val = strtoul(value, NULL, 0);<br />+    uint16_t q_depth = zxdh_queue_desc_pre_setup(val);<br />+<br />+    hw->q_depth = q_depth;<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_dev_devargs_parse(struct rte_devargs *devargs, struct zxdh_hw *hw)<br />+{<br />+    struct rte_kvargs *kvlist = NULL;<br />+    int32_t ret = 0;<br />+<br />+    if (devargs == NULL)<br />+        return 0;<br />+<br />+    kvlist = rte_kvargs_parse(devargs->args, NULL);<br />+    if (kvlist == NULL) {<br />+        PMD_INIT_LOG(ERR, "error when parsing param");<br />+        return 0;<br />+    }<br />+<br />+    ret = rte_kvargs_process(kvlist, "q_depth", hw_q_depth_handler, hw);<br />+    if (ret < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to parse q_depth");<br />+        goto exit;<br />+    }<br />+    if (!hw->q_depth)<br />+        hw->q_depth = ZXDH_MIN_QUEUE_DEPTH;<br />+<br />+exit:<br />+    rte_kvargs_free(kvlist);<br />+    return ret;<br />+}<br />+<br />+static int zxdh_init_shared_data(void)<br />+{<br />+    const struct rte_memzone *mz;<br />+    int ret = 0;<br />+<br />+    rte_spinlock_lock(&zxdh_shared_data_lock);<br />+    if (zxdh_shared_data == NULL) {<br />+        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {<br />+            /* Allocate shared memory. */<br />+            mz = rte_memzone_reserve(MZ_ZXDH_PMD_SHARED_DATA,<br />+                    sizeof(*zxdh_shared_data), SOCKET_ID_ANY, 0);<br />+            if (mz == NULL) {<br />+                PMD_INIT_LOG(ERR, "Cannot allocate zxdh shared data");<br />+                ret = -rte_errno;<br />+                goto error;<br />+            }<br />+            zxdh_shared_data = mz->addr;<br />+            memset(zxdh_shared_data, 0, sizeof(*zxdh_shared_data));<br />+            rte_spinlock_init(&zxdh_shared_data->lock);<br />+        } else { /* Lookup allocated shared memory. */<br />+            mz = rte_memzone_lookup(MZ_ZXDH_PMD_SHARED_DATA);<br />+            if (mz == NULL) {<br />+                PMD_INIT_LOG(ERR, "Cannot attach zxdh shared data");<br />+                ret = -rte_errno;<br />+                goto error;<br />+            }<br />+            zxdh_shared_data = mz->addr;<br />+        }<br />+    }<br />+<br />+error:<br />+    rte_spinlock_unlock(&zxdh_shared_data_lock);<br />+    return ret;<br />+}<br />+<br />+static int zxdh_init_once(struct rte_eth_dev *eth_dev)<br />+{<br />+    PMD_INIT_LOG(DEBUG, "port 0x%x init...", eth_dev->data->port_id);<br />+    if (zxdh_init_shared_data())<br />+        return -rte_errno;<br />+<br />+    struct zxdh_shared_data *sd = zxdh_shared_data;<br />+    int ret = 0;<br />+<br />+    rte_spinlock_lock(&sd->lock);<br />+    if (rte_eal_process_type() == RTE_PROC_SECONDARY) {<br />+        if (!sd->init_done) {<br />+            ++sd->secondary_cnt;<br />+            sd->init_done = true;<br />+        }<br />+        goto out;<br />+    }<br />+<br />+    sd->dev_refcnt++;<br />+out:<br />+    rte_spinlock_unlock(&sd->lock);<br />+    return ret;<br />+}<br />+<br />+static int32_t zxdh_get_pci_dev_config(struct zxdh_hw *hw)<br />+{<br />+    hw->host_features = zxdh_vtpci_get_features(hw);<br />+    hw->host_features = ZXDH_PMD_DEFAULT_HOST_FEATURES;<br />+<br />+    uint64_t guest_features = (uint64_t)ZXDH_PMD_DEFAULT_GUEST_FEATURES;<br />+    uint64_t nego_features = guest_features & hw->host_features;<br />+<br />+    hw->guest_features = nego_features;<br />+<br />+    if (hw->guest_features & (1ULL << ZXDH_NET_F_MAC)) {<br />+        zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, mac),<br />+                &hw->mac_addr, RTE_ETHER_ADDR_LEN);<br />+        PMD_INIT_LOG(DEBUG, "get dev mac: %02X:%02X:%02X:%02X:%02X:%02X",<br />+                hw->mac_addr[0], hw->mac_addr[1],<br />+                hw->mac_addr[2], hw->mac_addr[3],<br />+                hw->mac_addr[4], hw->mac_addr[5]);<br />+    } else {<br />+        rte_eth_random_addr(&hw->mac_addr[0]);<br />+        PMD_INIT_LOG(DEBUG, "random dev mac: %02X:%02X:%02X:%02X:%02X:%02X",<br />+                hw->mac_addr[0], hw->mac_addr[1],<br />+                hw->mac_addr[2], hw->mac_addr[3],<br />+                hw->mac_addr[4], hw->mac_addr[5]);<br />+    }<br />+    uint32_t max_queue_pairs;<br />+<br />+    zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, max_virtqueue_pairs),<br />+            &max_queue_pairs, sizeof(max_queue_pairs));<br />+    PMD_INIT_LOG(DEBUG, "get max queue pairs %u", max_queue_pairs);<br />+    if (max_queue_pairs == 0)<br />+        hw->max_queue_pairs = ZXDH_RX_QUEUES_MAX;<br />+    else<br />+        hw->max_queue_pairs = RTE_MIN(ZXDH_RX_QUEUES_MAX, max_queue_pairs);<br />+<br />+    PMD_INIT_LOG(DEBUG, "set max queue pairs %d", hw->max_queue_pairs);<br />+<br />+    hw->weak_barriers = !vtpci_with_feature(hw, ZXDH_F_ORDER_PLATFORM);<br />+    return 0;<br />+}<br />+<br />+static void zxdh_dev_free_mbufs(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    uint32_t i, mbuf_num = 0;<br />+<br />+    const char *type __rte_unused;<br />+    struct virtqueue *vq = NULL;<br />+    struct rte_mbuf *buf = NULL;<br />+    int32_t queue_type = 0;<br />+<br />+    if (hw->vqs == NULL)<br />+        return;<br />+<br />+    for (i = 0; i < nr_vq; i++) {<br />+        vq = hw->vqs[i];<br />+        if (!vq)<br />+            continue;<br />+<br />+        queue_type = get_queue_type(i);<br />+        if (queue_type == VTNET_RQ)<br />+            type = "rxq";<br />+        else if (queue_type == VTNET_TQ)<br />+            type = "txq";<br />+        else<br />+            continue;<br />+<br />+        PMD_INIT_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);<br />+<br />+        while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL) {<br />+            rte_pktmbuf_free(buf);<br />+            mbuf_num++;<br />+        }<br />+<br />+        PMD_INIT_LOG(DEBUG, "After freeing %s[%d] used and unused buf", type, i);<br />+    }<br />+<br />+    PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);<br />+}<br />+<br />+static int32_t zxdh_init_device(struct rte_eth_dev *eth_dev)<br />+{<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+    struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);<br />+    int ret = zxdh_read_pci_caps(pci_dev, hw);<br />+<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "port 0x%x pci caps read failed .", hw->vport.vport);<br />+        goto err;<br />+    }<br />+    zxdh_hw_internal[hw->port_id].vtpci_ops = &zxdh_modern_ops;<br />+    zxdh_vtpci_reset(hw);<br />+    zxdh_get_pci_dev_config(hw);<br />+    if (hw->vqs) { /* not reachable? */<br />+        zxdh_dev_free_mbufs(eth_dev);<br />+        ret = zxdh_free_queues(eth_dev);<br />+        if (ret < 0) {<br />+            PMD_INIT_LOG(ERR, "port 0x%x free queue failed.", hw->vport.vport);<br />+            goto err;<br />+        }<br />+    }<br />+    eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;<br />+    hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN;<br />+    hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;<br />+<br />+    rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, &eth_dev->data->mac_addrs[0]);<br />+    PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",<br />+        eth_dev->data->mac_addrs->addr_bytes[0],<br />+        eth_dev->data->mac_addrs->addr_bytes[1],<br />+        eth_dev->data->mac_addrs->addr_bytes[2],<br />+        eth_dev->data->mac_addrs->addr_bytes[3],<br />+        eth_dev->data->mac_addrs->addr_bytes[4],<br />+        eth_dev->data->mac_addrs->addr_bytes[5]);<br />+    /* If host does not support both status and MSI-X then disable LSC */<br />+    if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && (hw->use_msix != ZXDH_MSIX_NONE)) {<br />+        eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;<br />+        PMD_INIT_LOG(DEBUG, "LSC enable");<br />+    } else {<br />+        eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;<br />+    }<br />+    return 0;<br />+<br />+err:<br />+    PMD_INIT_LOG(ERR, "port %d init device failed", eth_dev->data->port_id);<br />+    return ret;<br />+}<br />+<br />+<br />+static void zxdh_queues_unbind_intr(struct rte_eth_dev *dev)<br />+{<br />+    PMD_INIT_LOG(INFO, "queue/interrupt unbinding");<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t i;<br />+<br />+    for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+        VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);<br />+        VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2 + 1], ZXDH_MSI_NO_VECTOR);<br />+    }<br />+}<br />+<br />+static int32_t zxdh_intr_unmask(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (rte_intr_ack(dev->intr_handle) < 0)<br />+        return -1;<br />+<br />+    hw->use_msix = zxdh_vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));<br />+<br />+    return 0;<br />+}<br />+<br />+<br />+static void zxdh_devconf_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t status = 0;<br />+    /* Read interrupt status which clears interrupt */<br />+    uint8_t isr = zxdh_vtpci_isr(hw);<br />+<br />+    if (zxdh_intr_unmask(dev) < 0)<br />+        PMD_DRV_LOG(ERR, "interrupt enable failed");<br />+    if (isr & ZXDH_PCI_ISR_CONFIG) {<br />+        /** todo provided later<br />+         * if (zxdh_dev_link_update(dev, 0) == 0)<br />+         * rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);<br />+         */<br />+<br />+        if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS)) {<br />+            zxdh_vtpci_read_dev_config(hw, offsetof(struct zxdh_net_config, status),<br />+                    &status, sizeof(status));<br />+            if (status & ZXDH_NET_S_ANNOUNCE)<br />+                zxdh_notify_peers(dev);<br />+        }<br />+    }<br />+}<br />+<br />+/* Interrupt handler triggered by NIC for handling specific interrupt. */<br />+static void zxdh_frompfvf_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t virt_addr = 0;<br />+<br />+    virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_MSG_CHAN_PFVFSHARE_OFFSET);<br />+    if (hw->is_pf) {<br />+        PMD_INIT_LOG(INFO, "zxdh_pf2vf_intr_handler  PF ");<br />+        zxdh_bar_irq_recv(MSG_CHAN_END_VF, MSG_CHAN_END_PF, virt_addr, dev);<br />+    } else {<br />+        PMD_INIT_LOG(INFO, "zxdh_pf2vf_intr_handler  VF ");<br />+        zxdh_bar_irq_recv(MSG_CHAN_END_PF, MSG_CHAN_END_VF, virt_addr, dev);<br />+    }<br />+}<br />+<br />+/* Interrupt handler triggered by NIC for handling specific interrupt. */<br />+static void zxdh_fromriscv_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t virt_addr = 0;<br />+<br />+    virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);<br />+    if (hw->is_pf) {<br />+        PMD_INIT_LOG(INFO, "zxdh_risc2pf_intr_handler  PF ");<br />+        zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_PF, virt_addr, dev);<br />+    } else {<br />+        PMD_INIT_LOG(INFO, "zxdh_riscvf_intr_handler  VF ");<br />+        zxdh_bar_irq_recv(MSG_CHAN_END_RISC, MSG_CHAN_END_VF, virt_addr, dev);<br />+    }<br />+}<br />+<br />+static void zxdh_intr_cb_unreg(struct rte_eth_dev *dev)<br />+{<br />+    PMD_INIT_LOG(ERR, "");<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    /* register callback to update dev config intr */<br />+    rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+    /* Register rsic_v to pf interrupt callback */<br />+    struct rte_intr_handle *tmp = hw->risc_intr +<br />+            (MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+<br />+    rte_intr_callback_unregister(tmp, zxdh_frompfvf_intr_handler, dev);<br />+    tmp = hw->risc_intr + (MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+    rte_intr_callback_unregister(tmp, zxdh_fromriscv_intr_handler, dev);<br />+}<br />+<br />+static int32_t zxdh_intr_disable(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->intr_enabled)<br />+        return 0;<br />+<br />+    zxdh_intr_cb_unreg(dev);<br />+    if (rte_intr_disable(dev->intr_handle) < 0)<br />+        return -1;<br />+<br />+    hw->intr_enabled = 0;<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_intr_release(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        VTPCI_OPS(hw)->set_config_irq(hw, ZXDH_MSI_NO_VECTOR);<br />+<br />+    zxdh_queues_unbind_intr(dev);<br />+    zxdh_intr_disable(dev);<br />+<br />+    rte_intr_efd_disable(dev->intr_handle);<br />+    rte_intr_vec_list_free(dev->intr_handle);<br />+    rte_free(hw->risc_intr);<br />+    hw->risc_intr = NULL;<br />+    rte_free(hw->dtb_intr);<br />+    hw->dtb_intr = NULL;<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_setup_risc_interrupts(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint8_t i;<br />+<br />+    if (!hw->risc_intr) {<br />+        PMD_INIT_LOG(ERR, " to allocate risc_intr");<br />+        hw->risc_intr = rte_zmalloc("risc_intr",<br />+            ZXDH_MSIX_INTR_MSG_VEC_NUM * sizeof(struct rte_intr_handle), 0);<br />+        if (hw->risc_intr == NULL) {<br />+            PMD_INIT_LOG(ERR, "Failed to allocate risc_intr");<br />+            return -ENOMEM;<br />+        }<br />+    }<br />+<br />+    for (i = 0; i < ZXDH_MSIX_INTR_MSG_VEC_NUM; i++) {<br />+        if (dev->intr_handle->efds[i] < 0) {<br />+            PMD_INIT_LOG(ERR, "[%u]risc interrupt fd is invalid", i);<br />+            rte_free(hw->risc_intr);<br />+            hw->risc_intr = NULL;<br />+            return -1;<br />+        }<br />+<br />+        struct rte_intr_handle *intr_handle = hw->risc_intr + i;<br />+<br />+        intr_handle->fd = dev->intr_handle->efds[i];<br />+        intr_handle->type = dev->intr_handle->type;<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_setup_dtb_interrupts(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->dtb_intr) {<br />+        hw->dtb_intr = rte_zmalloc("dtb_intr", sizeof(struct rte_intr_handle), 0);<br />+        if (hw->dtb_intr == NULL) {<br />+            PMD_INIT_LOG(ERR, "Failed to allocate dtb_intr");<br />+            return -ENOMEM;<br />+        }<br />+    }<br />+<br />+    if (dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1] < 0) {<br />+        PMD_INIT_LOG(ERR, "[%d]dtb interrupt fd is invalid", ZXDH_MSIX_INTR_DTB_VEC - 1);<br />+        rte_free(hw->dtb_intr);<br />+        hw->dtb_intr = NULL;<br />+        return -1;<br />+    }<br />+    hw->dtb_intr->fd = dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1];<br />+    hw->dtb_intr->type = dev->intr_handle->type;<br />+    return 0;<br />+}<br />+<br />+static int32_t zxdh_queues_bind_intr(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t i;<br />+    uint16_t vec;<br />+<br />+    if (!dev->data->dev_conf.intr_conf.rxq) {<br />+        PMD_INIT_LOG(INFO, "queue/interrupt mask, nb_rx_queues %u",<br />+                dev->data->nb_rx_queues);<br />+        for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+            vec = VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                    hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);<br />+            PMD_INIT_LOG(INFO, "vq%d irq set 0x%x, get 0x%x",<br />+                    i * 2, ZXDH_MSI_NO_VECTOR, vec);<br />+        }<br />+    } else {<br />+        PMD_INIT_LOG(DEBUG, "queue/interrupt binding, nb_rx_queues %u",<br />+                dev->data->nb_rx_queues);<br />+        for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+            vec = VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                    hw->vqs[i * 2], i + ZXDH_QUE_INTR_VEC_BASE);<br />+            PMD_INIT_LOG(INFO, "vq%d irq set %d, get %d",<br />+                    i * 2, i + ZXDH_QUE_INTR_VEC_BASE, vec);<br />+        }<br />+    }<br />+    /* mask all txq intr */<br />+    for (i = 0; i < dev->data->nb_tx_queues; ++i) {<br />+        vec = VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                hw->vqs[(i * 2) + 1], ZXDH_MSI_NO_VECTOR);<br />+        PMD_INIT_LOG(INFO, "vq%d irq set 0x%x, get 0x%x",<br />+                (i * 2) + 1, ZXDH_MSI_NO_VECTOR, vec);<br />+    }<br />+    return 0;<br />+}<br />+<br />+int32_t zxdh_dev_pause(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    rte_spinlock_lock(&hw->state_lock);<br />+<br />+    if (hw->started == 0) {<br />+        /* Device is just stopped. */<br />+        rte_spinlock_unlock(&hw->state_lock);<br />+        return -1;<br />+    }<br />+    hw->started = 0;<br />+    hw->admin_status = 0;<br />+    /*<br />+     * Prevent the worker threads from touching queues to avoid contention,<br />+     * 1 ms should be enough for the ongoing Tx function to finish.<br />+     */<br />+    rte_delay_ms(1);<br />+    return 0;<br />+}<br />+<br />+/*<br />+ * Recover hw state to let the worker threads continue.<br />+ */<br />+void zxdh_dev_resume(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    hw->started = 1;<br />+    hw->admin_status = 1;<br />+    rte_spinlock_unlock(&hw->state_lock);<br />+}<br />+<br />+/*<br />+ * Should be called only after device is paused.<br />+ */<br />+int32_t zxdh_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts, int32_t nb_pkts)<br />+{<br />+    struct zxdh_hw    *hw   = dev->data->dev_private;<br />+    struct virtnet_tx *txvq = dev->data->tx_queues[0];<br />+    int32_t ret = 0;<br />+<br />+    hw->inject_pkts = tx_pkts;<br />+    ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);<br />+    hw->inject_pkts = NULL;<br />+<br />+    return ret;<br />+}<br />+<br />+void zxdh_notify_peers(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct virtnet_rx *rxvq = NULL;<br />+    struct rte_mbuf *rarp_mbuf = NULL;<br />+<br />+    if (!dev->data->rx_queues)<br />+        return;<br />+<br />+    rxvq = dev->data->rx_queues[0];<br />+    if (!rxvq)<br />+        return;<br />+<br />+    rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool, (struct rte_ether_addr *)hw->mac_addr);<br />+    if (rarp_mbuf == NULL) {<br />+        PMD_DRV_LOG(ERR, "failed to make RARP packet.");<br />+        return;<br />+    }<br />+<br />+    /* If virtio port just stopped, no need to send RARP */<br />+    if (zxdh_dev_pause(dev) < 0) {<br />+        rte_pktmbuf_free(rarp_mbuf);<br />+        return;<br />+    }<br />+<br />+    zxdh_inject_pkts(dev, &rarp_mbuf, 1);<br />+    zxdh_dev_resume(dev);<br />+}<br />+<br />+static void zxdh_intr_cb_reg(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+<br />+    /* register callback to update dev config intr */<br />+    rte_intr_callback_register(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+    /* Register rsic_v to pf interrupt callback */<br />+    struct rte_intr_handle *tmp = hw->risc_intr +<br />+            (MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+<br />+    rte_intr_callback_register(tmp, zxdh_frompfvf_intr_handler, dev);<br />+<br />+    tmp = hw->risc_intr + (MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+    rte_intr_callback_register(tmp, zxdh_fromriscv_intr_handler, dev);<br />+}<br />+<br />+static int32_t zxdh_intr_enable(struct rte_eth_dev *dev)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->intr_enabled) {<br />+        zxdh_intr_cb_reg(dev);<br />+        ret = rte_intr_enable(dev->intr_handle);<br />+        if (unlikely(ret))<br />+            PMD_INIT_LOG(ERR, "Failed to enable %s intr", dev->data->name);<br />+<br />+        hw->intr_enabled = 1;<br />+    }<br />+    return ret;<br />+}<br />+<br />+static int32_t zxdh_configure_intr(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t ret = 0;<br />+<br />+    if (!rte_intr_cap_multiple(dev->intr_handle)) {<br />+        PMD_INIT_LOG(ERR, "Multiple intr vector not supported");<br />+        return -ENOTSUP;<br />+    }<br />+    zxdh_intr_release(dev);<br />+    uint8_t nb_efd = ZXDH_MSIX_INTR_DTB_VEC_NUM + ZXDH_MSIX_INTR_MSG_VEC_NUM;<br />+<br />+    if (dev->data->dev_conf.intr_conf.rxq)<br />+        nb_efd += dev->data->nb_rx_queues;<br />+<br />+    if (rte_intr_efd_enable(dev->intr_handle, nb_efd)) {<br />+        PMD_INIT_LOG(ERR, "Fail to create eventfd");<br />+        return -1;<br />+    }<br />+<br />+    if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",<br />+                    hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM)) {<br />+        PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",<br />+                    hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM);<br />+        return -ENOMEM;<br />+    }<br />+    PMD_INIT_LOG(INFO, "allocate %u rxq vectors", dev->intr_handle->vec_list_size);<br />+    if (zxdh_setup_risc_interrupts(dev) != 0) {<br />+        PMD_INIT_LOG(ERR, "Error setting up rsic_v interrupts!");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+    if (zxdh_setup_dtb_interrupts(dev) != 0) {<br />+        PMD_INIT_LOG(ERR, "Error setting up dtb interrupts!");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+<br />+    if (zxdh_queues_bind_intr(dev) < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+    /** DO NOT try to remove this! This function will enable msix,<br />+     * or QEMU will encounter SIGSEGV when DRIVER_OK is sent.<br />+     * And for legacy devices, this should be done before queue/vec<br />+     * binding to change the config size from 20 to 24, or<br />+     * ZXDH_MSI_QUEUE_VECTOR (22) will be ignored.<br />+     **/<br />+    if (zxdh_intr_enable(dev) < 0) {<br />+        PMD_DRV_LOG(ERR, "interrupt enable failed");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+    return 0;<br />+<br />+free_intr_vec:<br />+    zxdh_intr_release(dev);<br />+    return ret;<br />+}<br />+<br />+/* dev_ops for zxdh, bare necessities for basic operation */<br />+static const struct eth_dev_ops zxdh_eth_dev_ops = {<br />+    .dev_configure             = NULL,<br />+    .dev_start                 = NULL,<br />+    .dev_stop                 = NULL,<br />+    .dev_close                 = NULL,<br />+<br />+    .rx_queue_setup             = NULL,<br />+    .rx_queue_intr_enable     = NULL,<br />+    .rx_queue_intr_disable     = NULL,<br />+<br />+    .tx_queue_setup             = NULL,<br />+};<br />+<br />+<br />+static int32_t set_rxtx_funcs(struct rte_eth_dev *eth_dev)<br />+{<br />+    /** todo later<br />+     * eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare;<br />+     */<br />+<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+<br />+    if (!vtpci_packed_queue(hw)) {<br />+        PMD_INIT_LOG(ERR, " port %u not support packed queue", eth_dev->data->port_id);<br />+        return -1;<br />+    }<br />+    if (!vtpci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) {<br />+        PMD_INIT_LOG(ERR, " port %u not support rx mergeable", eth_dev->data->port_id);<br />+        return -1;<br />+    }<br />+    /** todo later provided rx/tx<br />+     * eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed;<br />+     * eth_dev->rx_pkt_burst = &zxdh_recv_mergeable_pkts_packed;<br />+     */<br />+<br />+    return 0;<br />+}<br />+<br />+static void zxdh_msg_cb_reg(struct zxdh_hw *hw)<br />+{<br />+    if (hw->is_pf)<br />+        zxdh_bar_chan_msg_recv_register(MODULE_BAR_MSG_TO_PF, pf_recv_bar_msg);<br />+    else<br />+        zxdh_bar_chan_msg_recv_register(MODULE_BAR_MSG_TO_VF, vf_recv_bar_msg);<br />+}<br />+<br />+static void zxdh_priv_res_init(struct zxdh_hw *hw)<br />+{<br />+    hw->vlan_fiter = (uint64_t *)rte_malloc("vlan_filter", 64 * sizeof(uint64_t), 1);<br />+    memset(hw->vlan_fiter, 0, 64 * sizeof(uint64_t));<br />+    if (hw->is_pf)<br />+        hw->vfinfo = rte_zmalloc("vfinfo", ZXDH_MAX_VF * sizeof(struct vfinfo), 4);<br />+    else<br />+        hw->vfinfo = NULL;<br />+}<br />+<br />+static void set_vfs_pcieid(struct zxdh_hw *hw)<br />+{<br />+    if (hw->pfinfo.vf_nums > ZXDH_MAX_VF) {<br />+        PMD_DRV_LOG(ERR, "vf nums %u out of range", hw->pfinfo.vf_nums);<br />+        return;<br />+    }<br />+    if (hw->vfinfo == NULL) {<br />+        PMD_DRV_LOG(ERR, " vfinfo uninited");<br />+        return;<br />+    }<br />+<br />+    PMD_DRV_LOG(INFO, "vf nums %d", hw->pfinfo.vf_nums);<br />+    int vf_idx;<br />+<br />+    for (vf_idx = 0; vf_idx < hw->pfinfo.vf_nums; vf_idx++)<br />+        hw->vfinfo[vf_idx].pcieid = VF_PCIE_ID(hw->pcie_id, vf_idx);<br />+}<br />+<br />+<br />+static void zxdh_sriovinfo_init(struct zxdh_hw *hw)<br />+{<br />+    hw->pfinfo.pcieid = PF_PCIE_ID(hw->pcie_id);<br />+<br />+    if (hw->is_pf)<br />+        set_vfs_pcieid(hw);<br />+}<br />+<br />+static int zxdh_tbl_entry_offline_destroy(struct zxdh_hw *hw)<br />+{<br />+    int ret = 0;<br />+    uint32_t sdt_no;<br />+<br />+    if (!g_dtb_data.init_done)<br />+        return ret;<br />+<br />+    if (hw->is_pf) {<br />+        sdt_no = MK_SDT_NO(L2_ENTRY, hw->hash_search_index);<br />+        ret = dpp_dtb_hash_offline_delete(0, g_dtb_data.queueid, sdt_no, 0);<br />+        PMD_DRV_LOG(DEBUG, "%d dpp_dtb_hash_offline_delete sdt_no %d",<br />+                hw->port_id, sdt_no);<br />+        if (ret)<br />+            PMD_DRV_LOG(ERR, "%d dpp_dtb_hash_offline_delete sdt_no %d failed",<br />+                    hw->port_id, sdt_no);<br />+<br />+        sdt_no = MK_SDT_NO(MC, hw->hash_search_index);<br />+        ret = dpp_dtb_hash_offline_delete(0, g_dtb_data.queueid, sdt_no, 0);<br />+        PMD_DRV_LOG(DEBUG, "%d dpp_dtb_hash_offline_delete sdt_no %d",<br />+                hw->port_id, sdt_no);<br />+        if (ret)<br />+            PMD_DRV_LOG(ERR, "%d dpp_dtb_hash_offline_delete sdt_no %d failed",<br />+                hw->port_id, sdt_no);<br />+    }<br />+    return ret;<br />+}<br />+<br />+static inline int zxdh_dtb_dump_res_init(struct zxdh_hw *hw __rte_unused,<br />+            DPP_DEV_INIT_CTRL_T *dpp_ctrl)<br />+{<br />+    int ret = 0;<br />+    int i;<br />+<br />+    struct zxdh_dtb_bulk_dump_info dtb_dump_baseres[] = {<br />+    /* eram */<br />+    {"zxdh_sdt_vport_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VPORT_ATT_TABLE, NULL},<br />+    {"zxdh_sdt_panel_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_PANEL_ATT_TABLE, NULL},<br />+    {"zxdh_sdt_rss_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_RSS_ATT_TABLE, NULL},<br />+    {"zxdh_sdt_vlan_att_table", ZXDH_TBL_ERAM_DUMP_SIZE, ZXDH_SDT_VLAN_ATT_TABLE, NULL},<br />+    /* hash */<br />+    {"zxdh_sdt_l2_entry_table0", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE0, NULL},<br />+    {"zxdh_sdt_l2_entry_table1", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE1, NULL},<br />+    {"zxdh_sdt_l2_entry_table2", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE2, NULL},<br />+    {"zxdh_sdt_l2_entry_table3", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_L2_ENTRY_TABLE3, NULL},<br />+    {"zxdh_sdt_mc_table0", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE0, NULL},<br />+    {"zxdh_sdt_mc_table1", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE1, NULL},<br />+    {"zxdh_sdt_mc_table2", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE2, NULL},<br />+    {"zxdh_sdt_mc_table3", ZXDH_TBL_ZCAM_DUMP_SIZE, ZXDH_SDT_MC_TABLE3, NULL},<br />+    };<br />+    for (i = 0; i < (int)RTE_DIM(dtb_dump_baseres); i++) {<br />+        struct zxdh_dtb_bulk_dump_info *p = dtb_dump_baseres + i;<br />+        const struct rte_memzone *generic_dump_mz = rte_memzone_reserve_aligned(p->mz_name,<br />+                    p->mz_size, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);<br />+<br />+        if (generic_dump_mz == NULL) {<br />+            PMD_DRV_LOG(ERR,<br />+                "Cannot alloc mem for dtb tbl bulk dump, mz_name is %s, mz_size is %u",<br />+                p->mz_name, p->mz_size);<br />+            ret = -ENOMEM;<br />+            return ret;<br />+        }<br />+        p->mz = generic_dump_mz;<br />+        dpp_ctrl->dump_addr_info[i].vir_addr = generic_dump_mz->addr_64;<br />+        dpp_ctrl->dump_addr_info[i].phy_addr = generic_dump_mz->iova;<br />+        dpp_ctrl->dump_addr_info[i].sdt_no   = p->sdt_no;<br />+        dpp_ctrl->dump_addr_info[i].size     = p->mz_size;<br />+<br />+        g_dtb_data.dtb_table_bulk_dump_mz[dpp_ctrl->dump_sdt_num] = generic_dump_mz;<br />+        dpp_ctrl->dump_sdt_num++;<br />+    }<br />+    return ret;<br />+}<br />+<br />+static void dtb_data_res_free(struct zxdh_hw *hw)<br />+{<br />+    struct rte_eth_dev *dev = hw->eth_dev;<br />+<br />+    if ((g_dtb_data.init_done) && (g_dtb_data.bind_device == dev))  {<br />+        PMD_DRV_LOG(INFO, "%s g_dtb_data free queue %d",<br />+                dev->data->name, g_dtb_data.queueid);<br />+<br />+        int ret = 0;<br />+<br />+        ret = dpp_np_online_uninstall(0, dev->data->name, g_dtb_data.queueid);<br />+        if (ret)<br />+            PMD_DRV_LOG(ERR, "%s dpp_np_online_uninstall failed", dev->data->name);<br />+<br />+        if (g_dtb_data.dtb_table_conf_mz) {<br />+            rte_memzone_free(g_dtb_data.dtb_table_conf_mz);<br />+            PMD_DRV_LOG(INFO, "%s free  dtb_table_conf_mz  ", dev->data->name);<br />+            g_dtb_data.dtb_table_conf_mz = NULL;<br />+        }<br />+        if (g_dtb_data.dtb_table_dump_mz) {<br />+            PMD_DRV_LOG(INFO, "%s free  dtb_table_dump_mz  ", dev->data->name);<br />+            rte_memzone_free(g_dtb_data.dtb_table_dump_mz);<br />+            g_dtb_data.dtb_table_dump_mz = NULL;<br />+        }<br />+        int i;<br />+<br />+        for (i = 0; i < ZXDH_MAX_BASE_DTB_TABLE_COUNT; i++) {<br />+            if (g_dtb_data.dtb_table_bulk_dump_mz[i]) {<br />+                rte_memzone_free(g_dtb_data.dtb_table_bulk_dump_mz[i]);<br />+                PMD_DRV_LOG(INFO, "%s free dtb_table_bulk_dump_mz[%d]",<br />+                        dev->data->name, i);<br />+                g_dtb_data.dtb_table_bulk_dump_mz[i] = NULL;<br />+            }<br />+        }<br />+        g_dtb_data.init_done = 0;<br />+        g_dtb_data.bind_device = NULL;<br />+    }<br />+    if (zxdh_shared_data != NULL)<br />+        zxdh_shared_data->npsdk_init_done = 0;<br />+}<br />+<br />+static inline int npsdk_dtb_res_init(struct rte_eth_dev *dev)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (g_dtb_data.init_done) {<br />+        PMD_INIT_LOG(DEBUG, "DTB res already init done, dev %s no need init",<br />+            dev->device->name);<br />+        return 0;<br />+    }<br />+    g_dtb_data.queueid = INVALID_DTBQUE;<br />+    g_dtb_data.bind_device = dev;<br />+    g_dtb_data.dev_refcnt++;<br />+    g_dtb_data.init_done = 1;<br />+    /* */<br />+    DPP_DEV_INIT_CTRL_T *dpp_ctrl = malloc(sizeof(*dpp_ctrl) +<br />+            sizeof(DPP_DTB_ADDR_INFO_T) * 256);<br />+<br />+    if (dpp_ctrl == NULL) {<br />+        PMD_INIT_LOG(ERR, "dev %s annot allocate memory for dpp_ctrl", dev->device->name);<br />+        ret = -ENOMEM;<br />+        goto free_res;<br />+    }<br />+    memset(dpp_ctrl, 0, sizeof(*dpp_ctrl) + sizeof(DPP_DTB_ADDR_INFO_T) * 256);<br />+<br />+    dpp_ctrl->queue_id = 0xff;<br />+    dpp_ctrl->vport     = hw->vport.vport;<br />+    dpp_ctrl->vector = ZXDH_MSIX_INTR_DTB_VEC;<br />+    strcpy((char *)dpp_ctrl->port_name, dev->device->name);<br />+    dpp_ctrl->pcie_vir_addr = (uint32_t)hw->bar_addr[0];<br />+<br />+    struct bar_offset_params param = {0};<br />+    struct bar_offset_res  res = {0};<br />+<br />+    param.pcie_id = hw->pcie_id;<br />+    param.virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;<br />+    param.type = URI_NP;<br />+<br />+    ret = zxdh_get_bar_offset(&param, &res);<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "dev %s get npbar offset failed", dev->device->name);<br />+        goto free_res;<br />+    }<br />+    dpp_ctrl->np_bar_len = res.bar_length;<br />+    dpp_ctrl->np_bar_offset = res.bar_offset;<br />+    if (!g_dtb_data.dtb_table_conf_mz) {<br />+        const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_conf_mz",<br />+                ZXDH_DTB_TABLE_CONF_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);<br />+<br />+        if (conf_mz == NULL) {<br />+            PMD_INIT_LOG(ERR,<br />+                "dev %s annot allocate memory for dtb table conf",<br />+                dev->device->name);<br />+            ret = -ENOMEM;<br />+            goto free_res;<br />+        }<br />+        dpp_ctrl->down_vir_addr = conf_mz->addr_64;<br />+        dpp_ctrl->down_phy_addr = conf_mz->iova;<br />+        g_dtb_data.dtb_table_conf_mz = conf_mz;<br />+    }<br />+    /* */<br />+    if (!g_dtb_data.dtb_table_dump_mz) {<br />+        const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_dump_mz",<br />+                ZXDH_DTB_TABLE_DUMP_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);<br />+<br />+        if (dump_mz == NULL) {<br />+            PMD_INIT_LOG(ERR,<br />+                "dev %s Cannot allocate memory for dtb table dump",<br />+                dev->device->name);<br />+            ret = -ENOMEM;<br />+            goto free_res;<br />+        }<br />+        dpp_ctrl->dump_vir_addr = dump_mz->addr_64;<br />+        dpp_ctrl->dump_phy_addr = dump_mz->iova;<br />+        g_dtb_data.dtb_table_dump_mz = dump_mz;<br />+    }<br />+    /* init bulk dump */<br />+    zxdh_dtb_dump_res_init(hw, dpp_ctrl);<br />+<br />+    ret = dpp_host_np_init(0, dpp_ctrl);<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "dev %s dpp host np init failed .ret %d", dev->device->name, ret);<br />+        goto free_res;<br />+    }<br />+<br />+    PMD_INIT_LOG(INFO, "dev %s dpp host np init ok.dtb queue %d",<br />+        dev->device->name, dpp_ctrl->queue_id);<br />+    g_dtb_data.queueid = dpp_ctrl->queue_id;<br />+    free(dpp_ctrl);<br />+    return 0;<br />+<br />+free_res:<br />+    dtb_data_res_free(hw);<br />+    free(dpp_ctrl);<br />+    return -ret;<br />+}<br />+<br />+static int32_t dpp_res_uni_init(uint32_t type)<br />+{<br />+    uint32_t ret = 0;<br />+    uint32_t dev_id = 0;<br />+    DPP_APT_HASH_RES_INIT_T HashResInit = {0};<br />+    DPP_APT_ERAM_RES_INIT_T EramResInit = {0};<br />+    DPP_APT_STAT_RES_INIT_T StatResInit = {0};<br />+<br />+    memset(&HashResInit, 0x0, sizeof(DPP_APT_HASH_RES_INIT_T));<br />+    memset(&EramResInit, 0x0, sizeof(DPP_APT_ERAM_RES_INIT_T));<br />+    memset(&StatResInit, 0x0, sizeof(DPP_APT_STAT_RES_INIT_T));<br />+<br />+    ret = dpp_apt_hash_res_get(type, &HashResInit);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_res_get failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_eram_res_get(type, &EramResInit);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s eram_res_get failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_stat_res_get(type, &StatResInit);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s stat_res_get failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_hash_global_res_init(dev_id);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_global_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+<br />+    ret = dpp_apt_hash_func_res_init(dev_id, HashResInit.func_num, HashResInit.func_res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_func_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+<br />+    ret = dpp_apt_hash_bulk_res_init(dev_id, HashResInit.bulk_num, HashResInit.bulk_res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_bulk_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_hash_tbl_res_init(dev_id, HashResInit.tbl_num, HashResInit.tbl_res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s hash_tbl_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_apt_eram_res_init(dev_id, EramResInit.tbl_num, EramResInit.eram_res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s eram_res_init failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_stat_ppu_eram_baddr_set(dev_id, StatResInit.eram_baddr);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s stat_ppu_eram_baddr_set failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_stat_ppu_eram_depth_set(dev_id, StatResInit.eram_depth); /* unit: 128bits */<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s stat_ppu_eram_depth_set failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_se_cmmu_smmu1_cfg_set(dev_id, StatResInit.ddr_baddr);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s dpp_se_cmmu_smmu1_cfg_set failed!", __func__);<br />+        return -1;<br />+    }<br />+    ret = dpp_stat_ppu_ddr_baddr_set(dev_id, StatResInit.ppu_ddr_offset); /* unit: 128bits */<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "%s stat_ppu_ddr_baddr_set failed!", __func__);<br />+        return -1;<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static inline int npsdk_apt_res_init(struct rte_eth_dev *dev __rte_unused)<br />+{<br />+    int32_t ret = 0;<br />+<br />+    ret = dpp_res_uni_init(SE_NIC_RES_TYPE);<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "init stand dpp res failed");<br />+        return -1;<br />+    }<br />+<br />+    return ret;<br />+}<br />+static int zxdh_np_init(struct rte_eth_dev *eth_dev)<br />+{<br />+    uint32_t ret = 0;<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+<br />+    if ((zxdh_shared_data != NULL) && zxdh_shared_data->npsdk_init_done) {<br />+        g_dtb_data.dev_refcnt++;<br />+        zxdh_tbl_entry_offline_destroy(hw);<br />+        PMD_DRV_LOG(DEBUG, "no need to init dtb  dtb chanenl %d devref %d",<br />+                g_dtb_data.queueid, g_dtb_data.dev_refcnt);<br />+        return 0;<br />+    }<br />+<br />+    if (hw->is_pf) {<br />+        ret = npsdk_dtb_res_init(eth_dev);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "dpp apt init failed, ret:%d ", ret);<br />+            return -ret;<br />+        }<br />+<br />+        ret = npsdk_apt_res_init(eth_dev);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "dpp apt init failed, ret:%d ", ret);<br />+            return -ret;<br />+        }<br />+    }<br />+    if (zxdh_shared_data != NULL)<br />+        zxdh_shared_data->npsdk_init_done = 1;<br />+<br />+    return 0;<br />+}<br />+<br />+static void zxdh_priv_res_free(struct zxdh_hw *priv)<br />+{<br />+    rte_free(priv->vlan_fiter);<br />+    priv->vlan_fiter = NULL;<br />+    rte_free(priv->vfinfo);<br />+    priv->vfinfo = NULL;<br />+}<br />+<br />+static int zxdh_tbl_entry_destroy(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint32_t sdt_no;<br />+    int ret = 0;<br />+<br />+    if (!g_dtb_data.init_done)<br />+        return ret;<br />+<br />+    if (hw->is_pf) {<br />+        sdt_no = MK_SDT_NO(L2_ENTRY, hw->hash_search_index);<br />+        ret = dpp_dtb_hash_online_delete(0, g_dtb_data.queueid, sdt_no);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "%s dpp_dtb_hash_online_delete sdt_no %d failed ",<br />+                dev->data->name, sdt_no);<br />+            return -1;<br />+        }<br />+<br />+        sdt_no = MK_SDT_NO(MC, hw->hash_search_index);<br />+        ret = dpp_dtb_hash_online_delete(0, g_dtb_data.queueid, sdt_no);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "%s dpp_dtb_hash_online_delete sdt_no %d failed ",<br />+                dev->data->name, sdt_no);<br />+            return -1;<br />+        }<br />+    }<br />+    return ret;<br />+}<br />+<br />+static void zxdh_np_destroy(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int ret;<br />+<br />+    ret = zxdh_tbl_entry_destroy(dev);<br />+    if (ret)<br />+        return;<br />+<br />+    if ((!g_dtb_data.init_done) && (!g_dtb_data.dev_refcnt))<br />+        return;<br />+<br />+    if (--g_dtb_data.dev_refcnt == 0)<br />+        dtb_data_res_free(hw);<br />+<br />+    PMD_DRV_LOG(DEBUG, "g_dtb_data dev_refcnt %d", g_dtb_data.dev_refcnt);<br />+}<br />+<br />+static int32_t zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)<br />+{<br />+    struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);<br />+    int32_t ret;<br />+<br />+    eth_dev->dev_ops = &zxdh_eth_dev_ops;<br />+<br />+    /**<br />+     * Primary process does the whole initialization,<br />+     * for secondaryprocesses, we just select the same Rx and Tx function as primary.<br />+     */<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+<br />+    if (rte_eal_process_type() == RTE_PROC_SECONDARY) {<br />+        VTPCI_OPS(hw) = &zxdh_modern_ops;<br />+        set_rxtx_funcs(eth_dev);<br />+        return 0;<br />+    }<br />+    /* Allocate memory for storing MAC addresses */<br />+    eth_dev->data->mac_addrs = rte_zmalloc("zxdh_mac",<br />+            ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);<br />+    if (eth_dev->data->mac_addrs == NULL) {<br />+        PMD_INIT_LOG(ERR, "Failed to allocate %d bytes store MAC addresses",<br />+                ZXDH_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);<br />+        return -ENOMEM;<br />+    }<br />+    memset(hw, 0, sizeof(*hw));<br />+    ret = zxdh_dev_devargs_parse(eth_dev->device->devargs, hw);<br />+    if (ret < 0) {<br />+        PMD_INIT_LOG(ERR, "dev args parse failed");<br />+        return -EINVAL;<br />+    }<br />+<br />+    hw->bar_addr[0] = (uint64_t)pci_dev->mem_resource[0].addr;<br />+    if (hw->bar_addr[0] == 0) {<br />+        PMD_INIT_LOG(ERR, "Bad mem resource.");<br />+        return -EIO;<br />+    }<br />+    hw->device_id = pci_dev->id.device_id;<br />+    hw->port_id = eth_dev->data->port_id;<br />+    hw->eth_dev = eth_dev;<br />+    hw->speed = RTE_ETH_SPEED_NUM_UNKNOWN;<br />+    hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;<br />+    hw->is_pf = 0;<br />+<br />+    rte_spinlock_init(&hw->state_lock);<br />+    if (pci_dev->id.device_id == ZXDH_E310_PF_DEVICEID ||<br />+        pci_dev->id.device_id == ZXDH_E312_PF_DEVICEID) {<br />+        hw->is_pf = 1;<br />+        hw->pfinfo.vf_nums = pci_dev->max_vfs;<br />+    }<br />+<br />+    /* reset device and get dev config*/<br />+    ret = zxdh_init_once(eth_dev);<br />+    if (ret != 0)<br />+        goto err_zxdh_init;<br />+<br />+    ret = zxdh_init_device(eth_dev);<br />+    if (ret < 0)<br />+        goto err_zxdh_init;<br />+<br />+    ret = zxdh_np_init(eth_dev);<br />+    if (ret)<br />+        goto err_zxdh_init;<br />+<br />+    zxdh_priv_res_init(hw);<br />+    zxdh_sriovinfo_init(hw);<br />+    zxdh_msg_cb_reg(hw);<br />+    zxdh_configure_intr(eth_dev);<br />+    return 0;<br />+<br />+err_zxdh_init:<br />+    zxdh_intr_release(eth_dev);<br />+    zxdh_np_destroy(eth_dev);<br />+    zxdh_bar_msg_chan_exit();<br />+    zxdh_priv_res_free(hw);<br />+    rte_free(eth_dev->data->mac_addrs);<br />+    eth_dev->data->mac_addrs = NULL;<br />+    return ret;<br />+}<br />+<br />+int32_t zxdh_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,<br />+            struct rte_pci_device *pci_dev)<br />+{<br />+    return rte_eth_dev_pci_generic_probe(pci_dev,<br />+                        sizeof(struct zxdh_hw),<br />+                        zxdh_eth_dev_init);<br />+}<br />+<br />+<br />+static int32_t zxdh_eth_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)<br />+{<br />+    if (rte_eal_process_type() == RTE_PROC_SECONDARY)<br />+        return 0;<br />+    /** todo later<br />+     * zxdh_dev_close(eth_dev);<br />+     */<br />+    return 0;<br />+}<br />+<br />+int32_t zxdh_eth_pci_remove(struct rte_pci_device *pci_dev)<br />+{<br />+    int32_t ret = rte_eth_dev_pci_generic_remove(pci_dev, zxdh_eth_dev_uninit);<br />+<br />+    if (ret == -ENODEV) { /* Port has already been released by close. */<br />+        ret = 0;<br />+    }<br />+    return ret;<br />+}<br />+<br />+static const struct rte_pci_id pci_id_zxdh_map[] = {<br />+    {RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_E310_PF_DEVICEID)},<br />+    {RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_E310_VF_DEVICEID)},<br />+    {RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_E312_PF_DEVICEID)},<br />+    {RTE_PCI_DEVICE(PCI_VENDOR_ID_ZTE, ZXDH_E312_VF_DEVICEID)},<br />+    {.vendor_id = 0, /* sentinel */ },<br />+};<br />+static struct rte_pci_driver zxdh_pmd = {<br />+    .driver = {.name = "net_zxdh", },<br />+    .id_table = pci_id_zxdh_map,<br />+    .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,<br />+    .probe = zxdh_eth_pci_probe,<br />+    .remove = zxdh_eth_pci_remove,<br />+};<br />+<br />+RTE_PMD_REGISTER_PCI(net_zxdh, zxdh_pmd);<br />+RTE_PMD_REGISTER_PCI_TABLE(net_zxdh, pci_id_zxdh_map);<br /> RTE_PMD_REGISTER_KMOD_DEP(net_zxdh, "* vfio-pci");<br /> RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_init, init, NOTICE);<br /> RTE_LOG_REGISTER_SUFFIX(zxdh_logtype_driver, driver, NOTICE);<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h<br />new file mode 100644<br />index 0000000000..c139d0aa5e<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_ethdev.h<br />@@ -0,0 +1,203 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_ETHDEV_H_<br />+#define _ZXDH_ETHDEV_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+#include "ethdev_pci.h" <br />+<br />+extern struct zxdh_dtb_shared_data g_dtb_data;<br />+#define PF_PCIE_ID(pcie_id)         ((pcie_id & 0xff00) | 1 << 11)<br />+#define VF_PCIE_ID(pcie_id, vf_idx) ((pcie_id & 0xff00) | (1 << 11) | (vf_idx & 0xff))<br />+<br />+#define ZXDH_QUEUES_NUM_MAX          256<br />+<br />+/* ZXDH PCI vendor/device ID. */<br />+#define PCI_VENDOR_ID_ZTE        0x1cf2<br />+<br />+#define ZXDH_E310_PF_DEVICEID     0x8061<br />+#define ZXDH_E310_VF_DEVICEID     0x8062<br />+#define ZXDH_E312_PF_DEVICEID     0x8049<br />+#define ZXDH_E312_VF_DEVICEID     0x8060<br />+<br />+#define ZXDH_MAX_UC_MAC_ADDRS  32<br />+#define ZXDH_MAX_MC_MAC_ADDRS  32<br />+#define ZXDH_MAX_MAC_ADDRS     (ZXDH_MAX_UC_MAC_ADDRS + ZXDH_MAX_MC_MAC_ADDRS)<br />+<br />+/* BAR definitions */<br />+#define ZXDH_NUM_BARS    2<br />+#define ZXDH_BAR0_INDEX  0<br />+<br />+#define ZXDH_MIN_QUEUE_DEPTH 1024<br />+#define ZXDH_MAX_QUEUE_DEPTH 32768<br />+<br />+#define ZXDH_MAX_VF 256<br />+<br />+#define ZXDH_TBL_ERAM_DUMP_SIZE  (4 * 1024 * 1024)<br />+#define ZXDH_TBL_ZCAM_DUMP_SIZE  (5 * 1024 * 1024)<br />+<br />+#define INVALID_DTBQUE  0xFFFF<br />+#define ZXDH_MAX_BASE_DTB_TABLE_COUNT 30<br />+#define ZXDH_DTB_TABLE_CONF_SIZE  (32 * (16 + 16 * 1024))<br />+#define ZXDH_DTB_TABLE_DUMP_SIZE  (32 * (16 + 16 * 1024))<br />+<br />+/*<br />+ * Process  dev config changed interrupt. Call the callback<br />+ * if link state changed, generate gratuitous RARP packet if<br />+ * the status indicates an ANNOUNCE.<br />+ */<br />+#define ZXDH_NET_S_LINK_UP   1 /* Link is up */<br />+#define ZXDH_NET_S_ANNOUNCE  2 /* Announcement is needed */<br />+<br />+struct pfinfo {<br />+    uint16_t pcieid;<br />+    uint16_t vf_nums;<br />+};<br />+struct vfinfo {<br />+    uint16_t vf_idx;<br />+    uint16_t pcieid;<br />+    uint16_t vport;<br />+    uint8_t flag;<br />+    uint8_t state;<br />+    uint8_t rsv;<br />+    struct rte_ether_addr mac_addr;<br />+    struct rte_ether_addr vf_mac[ZXDH_MAX_MAC_ADDRS];<br />+};<br />+<br />+union VPORT {<br />+    uint16_t vport;<br />+<br />+    __extension__<br />+    struct {<br />+        uint16_t vfid:8;<br />+        uint16_t pfid:3;<br />+        uint16_t vf_flag:1;<br />+        uint16_t epid:3;<br />+        uint16_t direct_flag:1;<br />+    };<br />+};<br />+<br />+struct chnl_context {<br />+    uint16_t valid;<br />+    uint16_t ph_chno;<br />+}; /* 4B */<br />+<br />+struct zxdh_hw {<br />+    uint64_t host_features;<br />+    uint64_t guest_features;<br />+    uint32_t max_queue_pairs;<br />+    uint16_t max_mtu;<br />+    uint8_t  vtnet_hdr_size;<br />+    uint8_t  vlan_strip;<br />+    uint8_t  use_msix;<br />+    uint8_t  intr_enabled;<br />+    uint8_t  started;<br />+    uint8_t  weak_barriers;<br />+<br />+    bool has_tx_offload;<br />+    bool has_rx_offload;<br />+<br />+    uint8_t  mac_addr[RTE_ETHER_ADDR_LEN];<br />+    uint16_t port_id;<br />+<br />+    uint32_t  notify_off_multiplier;<br />+    uint32_t  speed;  /* link speed in MB */<br />+    uint32_t  speed_mode;  /* link speed in 1x 2x 3x */<br />+    uint8_t   duplex;<br />+    uint8_t  *isr;<br />+    uint16_t *notify_base;<br />+<br />+    struct zxdh_pci_common_cfg *common_cfg;<br />+    struct zxdh_net_config     *dev_cfg;<br />+<br />+    uint16_t queue_num;<br />+    uint16_t device_id;<br />+<br />+    uint16_t pcie_id;<br />+    uint8_t  phyport;<br />+    bool     msg_chan_init;<br />+<br />+    uint8_t panel_id;<br />+    uint8_t rsv[1];<br />+<br />+    /**<br />+     * App management thread and virtio interrupt handler<br />+     * thread both can change device state,<br />+     * this lock is meant to avoid such a contention.<br />+     */<br />+    rte_spinlock_t     state_lock;<br />+    struct rte_mbuf  **inject_pkts;<br />+    struct virtqueue **vqs;<br />+<br />+    uint64_t bar_addr[ZXDH_NUM_BARS];<br />+    struct rte_intr_handle *risc_intr;  /* Interrupt handle of rsic_v to host */<br />+    struct rte_intr_handle *dtb_intr;  /* Interrupt handle of rsic_v to host */<br />+<br />+    struct chnl_context channel_context[ZXDH_QUEUES_NUM_MAX];<br />+    union VPORT vport;<br />+<br />+    uint8_t is_pf         : 1,<br />+            switchoffload : 1;<br />+    uint8_t hash_search_index;<br />+    uint8_t admin_status;<br />+<br />+    uint16_t vfid;<br />+    uint16_t q_depth;<br />+    uint64_t *vlan_fiter;<br />+    struct pfinfo pfinfo;<br />+    struct vfinfo *vfinfo;<br />+    struct rte_eth_dev *eth_dev;<br />+};<br />+<br />+/* Shared data between primary and secondary processes. */<br />+struct zxdh_shared_data {<br />+    rte_spinlock_t lock; /* Global spinlock for primary and secondary processes. */<br />+    int init_done;       /* Whether primary has done initialization. */<br />+    unsigned int secondary_cnt; /* Number of secondary processes init'd. */<br />+<br />+    int npsdk_init_done;<br />+    uint32_t  dev_refcnt;<br />+    struct zxdh_dtb_shared_data *dtb_data;<br />+};<br />+<br />+struct zxdh_dtb_shared_data {<br />+    int init_done;<br />+    char name[32];<br />+    uint16_t queueid;<br />+    uint16_t vport;<br />+    uint32_t vector;<br />+    const struct rte_memzone *dtb_table_conf_mz;<br />+    const struct rte_memzone *dtb_table_dump_mz;<br />+    const struct rte_memzone *dtb_table_bulk_dump_mz[ZXDH_MAX_BASE_DTB_TABLE_COUNT];<br />+    struct rte_eth_dev *bind_device;<br />+    uint32_t dev_refcnt;<br />+};<br />+<br />+struct zxdh_dtb_bulk_dump_info {<br />+    const char *mz_name;<br />+    uint32_t mz_size;<br />+    uint32_t sdt_no;        /** <@brief sdt no 0~255 */<br />+    const struct rte_memzone *mz;<br />+};<br />+<br />+void zxdh_interrupt_handler(void *param);<br />+int32_t zxdh_dev_pause(struct rte_eth_dev *dev);<br />+void zxdh_dev_resume(struct rte_eth_dev *dev);<br />+int32_t zxdh_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts, int32_t nb_pkts);<br />+void zxdh_notify_peers(struct rte_eth_dev *dev);<br />+<br />+int32_t zxdh_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,<br />+            struct rte_pci_device *pci_dev);<br />+int32_t zxdh_eth_pci_remove(struct rte_pci_device *pci_dev);<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif /* _ZXDH_ETHDEV_H_ */<br />diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c<br />new file mode 100644<br />index 0000000000..b32c2e7955<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_pci.c<br />@@ -0,0 +1,462 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+#include <unistd.h> <br />+<br />+#ifdef RTE_EXEC_ENV_LINUX<br />+ #include <dirent.h> <br />+ #include <fcntl.h> <br />+#endif<br />+<br />+#include <rte_io.h> <br />+#include <rte_bus.h> <br />+#include <rte_common.h> <br />+<br />+#include "zxdh_pci.h" <br />+#include "zxdh_logs.h" <br />+#include "zxdh_queue.h" <br />+<br />+/*<br />+ * Following macros are derived from linux/pci_regs.h, however,<br />+ * we can't simply include that header here, as there is no such<br />+ * file for non-Linux platform.<br />+ */<br />+#define PCI_CAPABILITY_LIST             0x34<br />+#define PCI_CAP_ID_VNDR                 0x09<br />+#define PCI_CAP_ID_MSIX                 0x11<br />+<br />+/*<br />+ * The remaining space is defined by each driver as the per-driver<br />+ * configuration space.<br />+ */<br />+#define ZXDH_PCI_CONFIG(hw)  (((hw)->use_msix == ZXDH_MSIX_ENABLED) ? 24 : 20)<br />+#define PCI_MSIX_ENABLE 0x8000<br />+<br />+static inline int32_t check_vq_phys_addr_ok(struct virtqueue *vq)<br />+{<br />+    /**<br />+     * Virtio PCI device ZXDH_PCI_QUEUE_PF register is 32bit,<br />+     * and only accepts 32 bit page frame number.<br />+     * Check if the allocated physical memory exceeds 16TB.<br />+     */<br />+    if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> (ZXDH_PCI_QUEUE_ADDR_SHIFT + 32)) {<br />+        PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");<br />+        return 0;<br />+    }<br />+    return 1;<br />+}<br />+static inline void io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)<br />+{<br />+    rte_write32(val & ((1ULL << 32) - 1), lo);<br />+    rte_write32(val >> 32, hi);<br />+}<br />+<br />+static void modern_read_dev_config(struct zxdh_hw *hw,<br />+                                   size_t offset,<br />+                                   void *dst,<br />+                                   int32_t length)<br />+{<br />+    int32_t i       = 0;<br />+    uint8_t *p      = NULL;<br />+    uint8_t old_gen = 0;<br />+    uint8_t new_gen = 0;<br />+<br />+    do {<br />+        old_gen = rte_read8(&hw->common_cfg->config_generation);<br />+<br />+        p = dst;<br />+        for (i = 0;  i < length; i++)<br />+            *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);<br />+<br />+        new_gen = rte_read8(&hw->common_cfg->config_generation);<br />+    } while (old_gen != new_gen);<br />+}<br />+<br />+static void modern_write_dev_config(struct zxdh_hw *hw,<br />+                                    size_t offset,<br />+                                    const void *src,<br />+                                    int32_t length)<br />+{<br />+    int32_t i = 0;<br />+    const uint8_t *p = src;<br />+<br />+    for (i = 0;  i < length; i++)<br />+        rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));<br />+}<br />+<br />+static uint64_t modern_get_features(struct zxdh_hw *hw)<br />+{<br />+    uint32_t features_lo = 0;<br />+    uint32_t features_hi = 0;<br />+<br />+    rte_write32(0, &hw->common_cfg->device_feature_select);<br />+    features_lo = rte_read32(&hw->common_cfg->device_feature);<br />+<br />+    rte_write32(1, &hw->common_cfg->device_feature_select);<br />+    features_hi = rte_read32(&hw->common_cfg->device_feature);<br />+<br />+    return ((uint64_t)features_hi << 32) | features_lo;<br />+}<br />+<br />+static void modern_set_features(struct zxdh_hw *hw, uint64_t features)<br />+{<br />+    rte_write32(0, &hw->common_cfg->guest_feature_select);<br />+    rte_write32(features & ((1ULL << 32) - 1), &hw->common_cfg->guest_feature);<br />+    rte_write32(1, &hw->common_cfg->guest_feature_select);<br />+    rte_write32(features >> 32, &hw->common_cfg->guest_feature);<br />+}<br />+<br />+static uint8_t modern_get_status(struct zxdh_hw *hw)<br />+{<br />+    return rte_read8(&hw->common_cfg->device_status);<br />+}<br />+<br />+static void modern_set_status(struct zxdh_hw *hw, uint8_t status)<br />+{<br />+    rte_write8(status, &hw->common_cfg->device_status);<br />+}<br />+<br />+static uint8_t modern_get_isr(struct zxdh_hw *hw)<br />+{<br />+    return rte_read8(hw->isr);<br />+}<br />+<br />+static uint16_t modern_set_config_irq(struct zxdh_hw *hw, uint16_t vec)<br />+{<br />+    rte_write16(vec, &hw->common_cfg->msix_config);<br />+    return rte_read16(&hw->common_cfg->msix_config);<br />+}<br />+<br />+static uint16_t modern_set_queue_irq(struct zxdh_hw *hw, struct virtqueue *vq, uint16_t vec)<br />+{<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+    rte_write16(vec, &hw->common_cfg->queue_msix_vector);<br />+    return rte_read16(&hw->common_cfg->queue_msix_vector);<br />+}<br />+<br />+static uint16_t modern_get_queue_num(struct zxdh_hw *hw, uint16_t queue_id)<br />+{<br />+    rte_write16(queue_id, &hw->common_cfg->queue_select);<br />+    return rte_read16(&hw->common_cfg->queue_size);<br />+}<br />+<br />+static void modern_set_queue_num(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size)<br />+{<br />+    rte_write16(queue_id, &hw->common_cfg->queue_select);<br />+    rte_write16(vq_size, &hw->common_cfg->queue_size);<br />+}<br />+<br />+static int32_t modern_setup_queue(struct zxdh_hw *hw, struct virtqueue *vq)<br />+{<br />+    uint64_t desc_addr  = 0;<br />+    uint64_t avail_addr = 0;<br />+    uint64_t used_addr  = 0;<br />+    uint16_t notify_off = 0;<br />+<br />+    if (!check_vq_phys_addr_ok(vq))<br />+        return -1;<br />+<br />+    desc_addr = vq->vq_ring_mem;<br />+    avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);<br />+    if (vtpci_packed_queue(vq->hw)) {<br />+        used_addr = RTE_ALIGN_CEIL((avail_addr + sizeof(struct vring_packed_desc_event)),<br />+                            ZXDH_PCI_VRING_ALIGN);<br />+    } else {<br />+        used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,<br />+                        ring[vq->vq_nentries]), ZXDH_PCI_VRING_ALIGN);<br />+    }<br />+<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+<br />+    io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,<br />+                       &hw->common_cfg->queue_desc_hi);<br />+    io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,<br />+                       &hw->common_cfg->queue_avail_hi);<br />+    io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,<br />+                       &hw->common_cfg->queue_used_hi);<br />+<br />+    notify_off = rte_read16(&hw->common_cfg->queue_notify_off); /* default 0 */<br />+    notify_off = 0;<br />+    vq->notify_addr = (void *)((uint8_t *)hw->notify_base +<br />+            notify_off * hw->notify_off_multiplier);<br />+<br />+    rte_write16(1, &hw->common_cfg->queue_enable);<br />+<br />+    return 0;<br />+}<br />+<br />+static void modern_del_queue(struct zxdh_hw *hw, struct virtqueue *vq)<br />+{<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+<br />+    io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,<br />+                       &hw->common_cfg->queue_desc_hi);<br />+    io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,<br />+                       &hw->common_cfg->queue_avail_hi);<br />+    io_write64_twopart(0, &hw->common_cfg->queue_used_lo,<br />+                       &hw->common_cfg->queue_used_hi);<br />+<br />+    rte_write16(0, &hw->common_cfg->queue_enable);<br />+}<br />+<br />+static void modern_notify_queue(struct zxdh_hw *hw, struct virtqueue *vq)<br />+{<br />+    uint32_t notify_data = 0;<br />+<br />+    if (!vtpci_with_feature(hw, ZXDH_F_NOTIFICATION_DATA)) {<br />+        rte_write16(vq->vq_queue_index, vq->notify_addr);<br />+        return;<br />+    }<br />+<br />+    if (vtpci_with_feature(hw, ZXDH_F_RING_PACKED)) {<br />+        /*<br />+         * Bit[0:15]: vq queue index<br />+         * Bit[16:30]: avail index<br />+         * Bit[31]: avail wrap counter<br />+         */<br />+        notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags & <br />+                        VRING_PACKED_DESC_F_AVAIL)) << 31) |<br />+                        ((uint32_t)vq->vq_avail_idx << 16) |<br />+                        vq->vq_queue_index;<br />+    } else {<br />+        /*<br />+         * Bit[0:15]: vq queue index<br />+         * Bit[16:31]: avail index<br />+         */<br />+        notify_data = ((uint32_t)vq->vq_avail_idx << 16) | vq->vq_queue_index;<br />+    }<br />+    PMD_DRV_LOG(DEBUG, "queue:%d notify_data 0x%x notify_addr 0x%p",<br />+                 vq->vq_queue_index, notify_data, vq->notify_addr);<br />+    rte_write32(notify_data, vq->notify_addr);<br />+}<br />+<br />+const struct zxdh_pci_ops zxdh_modern_ops = {<br />+    .read_dev_cfg   = modern_read_dev_config,<br />+    .write_dev_cfg  = modern_write_dev_config,<br />+    .get_status     = modern_get_status,<br />+    .set_status     = modern_set_status,<br />+    .get_features   = modern_get_features,<br />+    .set_features   = modern_set_features,<br />+    .get_isr        = modern_get_isr,<br />+    .set_config_irq = modern_set_config_irq,<br />+    .set_queue_irq  = modern_set_queue_irq,<br />+    .get_queue_num  = modern_get_queue_num,<br />+    .set_queue_num  = modern_set_queue_num,<br />+    .setup_queue    = modern_setup_queue,<br />+    .del_queue      = modern_del_queue,<br />+    .notify_queue   = modern_notify_queue,<br />+};<br />+<br />+void zxdh_vtpci_read_dev_config(struct zxdh_hw *hw, size_t offset, void *dst, int32_t length)<br />+{<br />+    VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);<br />+}<br />+void zxdh_vtpci_write_dev_config(struct zxdh_hw *hw, size_t offset, const void *src, int32_t length)<br />+{<br />+    VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);<br />+}<br />+<br />+uint16_t zxdh_vtpci_get_features(struct zxdh_hw *hw)<br />+{<br />+    return VTPCI_OPS(hw)->get_features(hw);<br />+}<br />+<br />+void zxdh_vtpci_reset(struct zxdh_hw *hw)<br />+{<br />+    PMD_INIT_LOG(INFO, "port %u device start reset, just wait...", hw->port_id);<br />+    uint32_t retry = 0;<br />+<br />+    VTPCI_OPS(hw)->set_status(hw, ZXDH_CONFIG_STATUS_RESET);<br />+    /* Flush status write and wait device ready max 3 seconds. */<br />+    while (VTPCI_OPS(hw)->get_status(hw) != ZXDH_CONFIG_STATUS_RESET) {<br />+        ++retry;<br />+        usleep(1000L);<br />+    }<br />+    PMD_INIT_LOG(INFO, "port %u device reset %u ms done", hw->port_id, retry);<br />+}<br />+<br />+void zxdh_vtpci_reinit_complete(struct zxdh_hw *hw)<br />+{<br />+    zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER_OK);<br />+}<br />+<br />+void zxdh_vtpci_set_status(struct zxdh_hw *hw, uint8_t status)<br />+{<br />+    if (status != ZXDH_CONFIG_STATUS_RESET)<br />+        status |= VTPCI_OPS(hw)->get_status(hw);<br />+<br />+    VTPCI_OPS(hw)->set_status(hw, status);<br />+}<br />+<br />+uint8_t zxdh_vtpci_get_status(struct zxdh_hw *hw)<br />+{<br />+    return VTPCI_OPS(hw)->get_status(hw);<br />+}<br />+<br />+uint8_t zxdh_vtpci_isr(struct zxdh_hw *hw)<br />+{<br />+    return VTPCI_OPS(hw)->get_isr(hw);<br />+}<br />+<br />+static void *get_cfg_addr(struct rte_pci_device *dev, struct zxdh_pci_cap *cap)<br />+{<br />+    uint8_t  bar    = cap->bar;<br />+    uint32_t length = cap->length;<br />+    uint32_t offset = cap->offset;<br />+<br />+    if (bar >= PCI_MAX_RESOURCE) {<br />+        PMD_INIT_LOG(ERR, "invalid bar: %u", bar);<br />+        return NULL;<br />+    }<br />+    if (offset + length < offset) {<br />+        PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", offset, length);<br />+        return NULL;<br />+    }<br />+    if (offset + length > dev->mem_resource[bar].len) {<br />+        PMD_INIT_LOG(ERR, "invalid cap: overflows bar space: %u > %" PRIu64,<br />+            offset + length, dev->mem_resource[bar].len);<br />+        return NULL;<br />+    }<br />+    uint8_t *base = dev->mem_resource[bar].addr;<br />+<br />+    if (base == NULL) {<br />+        PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);<br />+        return NULL;<br />+    }<br />+    return base + offset;<br />+}<br />+<br />+int32_t zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw)<br />+{<br />+    if (dev->mem_resource[0].addr == NULL) {<br />+        PMD_INIT_LOG(ERR, "bar0 base addr is NULL");<br />+        return -1;<br />+    }<br />+    uint8_t pos = 0;<br />+    int32_t ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);<br />+<br />+    if (ret != 1) {<br />+        PMD_INIT_LOG(DEBUG, "failed to read pci capability list, ret %d", ret);<br />+        return -1;<br />+    }<br />+    while (pos) {<br />+        struct zxdh_pci_cap cap;<br />+<br />+        ret = rte_pci_read_config(dev, &cap, 2, pos);<br />+        if (ret != 2) {<br />+            PMD_INIT_LOG(DEBUG, "failed to read pci cap at pos: %x ret %d", pos, ret);<br />+            break;<br />+        }<br />+        if (cap.cap_vndr == PCI_CAP_ID_MSIX) {<br />+            /**<br />+             * Transitional devices would also have this capability,<br />+             * that's why we also check if msix is enabled.<br />+             * 1st byte is cap ID; 2nd byte is the position of next cap;<br />+             * next two bytes are the flags.<br />+             */<br />+            uint16_t flags = 0;<br />+<br />+            ret = rte_pci_read_config(dev, &flags, sizeof(flags), pos + 2);<br />+            if (ret != sizeof(flags)) {<br />+                PMD_INIT_LOG(ERR, "failed to read pci cap at pos: %x ret %d",<br />+                    pos + 2, ret);<br />+                break;<br />+            }<br />+            hw->use_msix = (flags & PCI_MSIX_ENABLE) ?<br />+                    ZXDH_MSIX_ENABLED : ZXDH_MSIX_DISABLED;<br />+        }<br />+        if (cap.cap_vndr != PCI_CAP_ID_VNDR) {<br />+            PMD_INIT_LOG(DEBUG, "[%2x] skipping non VNDR cap id: %02x",<br />+                pos, cap.cap_vndr);<br />+            goto next;<br />+        }<br />+        ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);<br />+        if (ret != sizeof(cap)) {<br />+            PMD_INIT_LOG(ERR, "failed to read pci cap at pos: %x ret %d", pos, ret);<br />+            break;<br />+        }<br />+        PMD_INIT_LOG(DEBUG, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",<br />+            pos, cap.cfg_type, cap.bar, cap.offset, cap.length);<br />+        switch (cap.cfg_type) {<br />+        case ZXDH_PCI_CAP_COMMON_CFG:<br />+            hw->common_cfg = get_cfg_addr(dev, &cap);<br />+            break;<br />+        case ZXDH_PCI_CAP_NOTIFY_CFG: {<br />+            ret = rte_pci_read_config(dev, &hw->notify_off_multiplier,<br />+                        4, pos + sizeof(cap));<br />+            if (ret != 4)<br />+                PMD_INIT_LOG(ERR,<br />+                    "failed to read notify_off_multiplier, ret %d", ret);<br />+            else<br />+                hw->notify_base = get_cfg_addr(dev, &cap);<br />+            break;<br />+        }<br />+        case ZXDH_PCI_CAP_DEVICE_CFG:<br />+            hw->dev_cfg = get_cfg_addr(dev, &cap);<br />+            break;<br />+        case ZXDH_PCI_CAP_ISR_CFG:<br />+            hw->isr = get_cfg_addr(dev, &cap);<br />+            break;<br />+        case ZXDH_PCI_CAP_PCI_CFG: {<br />+            hw->pcie_id = *(uint16_t *)&cap.padding[1];<br />+            PMD_INIT_LOG(DEBUG, "get pcie id 0x%x", hw->pcie_id);<br />+            uint16_t pcie_id = hw->pcie_id;<br />+<br />+            if ((pcie_id >> 11) & 0x1) /* PF */ {<br />+                PMD_INIT_LOG(DEBUG, "EP %u PF %u",<br />+                    pcie_id >> 12, (pcie_id >> 8) & 0x7);<br />+            } else { /* VF */<br />+                PMD_INIT_LOG(DEBUG, "EP %u PF %u VF %u",<br />+                    pcie_id >> 12, (pcie_id >> 8) & 0x7, pcie_id & 0xff);<br />+            }<br />+            break;<br />+        }<br />+        }<br />+next:<br />+    pos = cap.cap_next;<br />+    }<br />+    if (hw->common_cfg == NULL || hw->notify_base == NULL ||<br />+        hw->dev_cfg == NULL || hw->isr == NULL) {<br />+        PMD_INIT_LOG(ERR, "no modern pci device found.");<br />+        return -1;<br />+    }<br />+    return 0;<br />+}<br />+<br />+enum zxdh_msix_status zxdh_vtpci_msix_detect(struct rte_pci_device *dev)<br />+{<br />+    uint8_t pos = 0;<br />+    int32_t ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);<br />+<br />+    if (ret != 1) {<br />+        PMD_INIT_LOG(ERR, "failed to read pci capability list, ret %d", ret);<br />+        return ZXDH_MSIX_NONE;<br />+    }<br />+    while (pos) {<br />+        uint8_t cap[2] = {0};<br />+<br />+        ret = rte_pci_read_config(dev, cap, sizeof(cap), pos);<br />+        if (ret != sizeof(cap)) {<br />+            PMD_INIT_LOG(ERR, "failed to read pci cap at pos: %x ret %d", pos, ret);<br />+            break;<br />+        }<br />+        if (cap[0] == PCI_CAP_ID_MSIX) {<br />+            uint16_t flags = 0;<br />+<br />+            ret = rte_pci_read_config(dev, &flags, sizeof(flags), pos + sizeof(cap));<br />+            if (ret != sizeof(flags)) {<br />+                PMD_INIT_LOG(ERR,<br />+                    "failed to read pci cap at pos: %x ret %d", pos + 2, ret);<br />+                break;<br />+            }<br />+            if (flags & PCI_MSIX_ENABLE)<br />+                return ZXDH_MSIX_ENABLED;<br />+            else<br />+                return ZXDH_MSIX_DISABLED;<br />+        }<br />+        pos = cap[1];<br />+    }<br />+    return ZXDH_MSIX_NONE;<br />+    }<br />diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h<br />new file mode 100644<br />index 0000000000..d6f3c552ad<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_pci.h<br />@@ -0,0 +1,259 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_PCI_H_<br />+#define _ZXDH_PCI_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+#include <stdbool.h> <br />+#include <rte_pci.h> <br />+#include <rte_bus_pci.h> <br />+#include <bus_pci_driver.h> <br />+#include <ethdev_driver.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+<br />+/* The bit of the ISR which indicates a device has an interrupt. */<br />+#define ZXDH_PCI_ISR_INTR    0x1<br />+/* The bit of the ISR which indicates a device configuration change. */<br />+#define ZXDH_PCI_ISR_CONFIG  0x2<br />+/* Vector value used to disable MSI for queue. */<br />+#define ZXDH_MSI_NO_VECTOR   0x7F<br />+<br />+/* Status byte for guest to report progress. */<br />+#define ZXDH_CONFIG_STATUS_RESET           0x00<br />+#define ZXDH_CONFIG_STATUS_ACK             0x01<br />+#define ZXDH_CONFIG_STATUS_DRIVER          0x02<br />+#define ZXDH_CONFIG_STATUS_DRIVER_OK       0x04<br />+#define ZXDH_CONFIG_STATUS_FEATURES_OK     0x08<br />+#define ZXDH_CONFIG_STATUS_DEV_NEED_RESET  0x40<br />+#define ZXDH_CONFIG_STATUS_FAILED          0x80<br />+<br />+/* The feature bitmap for net */<br />+#define ZXDH_NET_F_CSUM              0   /* Host handles pkts w/ partial csum */<br />+#define ZXDH_NET_F_GUEST_CSUM        1   /* Guest handles pkts w/ partial csum */<br />+#define ZXDH_NET_F_MTU               3   /* Initial MTU advice. */<br />+#define ZXDH_NET_F_MAC               5   /* Host has given MAC address. */<br />+#define ZXDH_NET_F_GUEST_TSO4        7   /* Guest can handle TSOv4 in. */<br />+#define ZXDH_NET_F_GUEST_TSO6        8   /* Guest can handle TSOv6 in. */<br />+#define ZXDH_NET_F_GUEST_ECN         9   /* Guest can handle TSO[6] w/ ECN in. */<br />+#define ZXDH_NET_F_GUEST_UFO         10  /* Guest can handle UFO in. */<br />+#define ZXDH_NET_F_HOST_TSO4         11  /* Host can handle TSOv4 in. */<br />+#define ZXDH_NET_F_HOST_TSO6         12  /* Host can handle TSOv6 in. */<br />+#define ZXDH_NET_F_HOST_ECN          13  /* Host can handle TSO[6] w/ ECN in. */<br />+#define ZXDH_NET_F_HOST_UFO          14  /* Host can handle UFO in. */<br />+#define ZXDH_NET_F_MRG_RXBUF         15  /* Host can merge receive buffers. */<br />+#define ZXDH_NET_F_STATUS            16  /* zxdh_net_config.status available */<br />+#define ZXDH_NET_F_CTRL_VQ           17  /* Control channel available */<br />+#define ZXDH_NET_F_CTRL_RX           18  /* Control channel RX mode support */<br />+#define ZXDH_NET_F_CTRL_VLAN         19  /* Control channel VLAN filtering */<br />+#define ZXDH_NET_F_CTRL_RX_EXTRA     20  /* Extra RX mode control support */<br />+#define ZXDH_NET_F_GUEST_ANNOUNCE    21  /* Guest can announce device on the network */<br />+#define ZXDH_NET_F_MQ                22  /* Device supports Receive Flow Steering */<br />+#define ZXDH_NET_F_CTRL_MAC_ADDR     23  /* Set MAC address */<br />+/* Do we get callbacks when the ring is completely used, even if we've suppressed them? */<br />+#define ZXDH_F_NOTIFY_ON_EMPTY       24<br />+#define ZXDH_F_ANY_LAYOUT            27 /* Can the device handle any descriptor layout? */<br />+#define VIRTIO_RING_F_INDIRECT_DESC  28 /* We support indirect buffer descriptors */<br />+#define ZXDH_F_VERSION_1             32<br />+#define ZXDH_F_IOMMU_PLATFORM        33<br />+#define ZXDH_F_RING_PACKED           34<br />+/* Inorder feature indicates that all buffers are used by the device<br />+ * in the same order in which they have been made available.<br />+ */<br />+#define ZXDH_F_IN_ORDER              35<br />+/** This feature indicates that memory accesses by the driver<br />+ * and the device are ordered in a way described by the platform.<br />+ */<br />+#define ZXDH_F_ORDER_PLATFORM        36<br />+/**<br />+ * This feature indicates that the driver passes extra data<br />+ * (besides identifying the virtqueue) in its device notifications.<br />+ */<br />+#define ZXDH_F_NOTIFICATION_DATA     38<br />+#define ZXDH_NET_F_SPEED_DUPLEX      63 /* Device set linkspeed and duplex */<br />+<br />+/* The Guest publishes the used index for which it expects an interrupt<br />+ * at the end of the avail ring. Host should ignore the avail->flags field.<br />+ */<br />+/* The Host publishes the avail index for which it expects a kick<br />+ * at the end of the used ring. Guest should ignore the used->flags field.<br />+ */<br />+#define ZXDH_RING_F_EVENT_IDX                       29<br />+<br />+/* Maximum number of virtqueues per device. */<br />+#define ZXDH_MAX_VIRTQUEUE_PAIRS  8<br />+#define ZXDH_MAX_VIRTQUEUES       (ZXDH_MAX_VIRTQUEUE_PAIRS * 2 + 1)<br />+<br />+<br />+#define ZXDH_PCI_CAP_COMMON_CFG  1 /* Common configuration */<br />+#define ZXDH_PCI_CAP_NOTIFY_CFG  2 /* Notifications */<br />+#define ZXDH_PCI_CAP_ISR_CFG     3 /* ISR Status */<br />+#define ZXDH_PCI_CAP_DEVICE_CFG  4 /* Device specific configuration */<br />+#define ZXDH_PCI_CAP_PCI_CFG     5 /* PCI configuration access */<br />+<br />+#define VTPCI_OPS(hw)  (zxdh_hw_internal[(hw)->port_id].vtpci_ops)<br />+#define VTPCI_IO(hw)   (&zxdh_hw_internal[(hw)->port_id].io)<br />+<br />+/*<br />+ * How many bits to shift physical queue address written to QUEUE_PFN.<br />+ * 12 is historical, and due to x86 page size.<br />+ */<br />+#define ZXDH_PCI_QUEUE_ADDR_SHIFT                   12<br />+<br />+/* The alignment to use between consumer and producer parts of vring. */<br />+#define ZXDH_PCI_VRING_ALIGN                        4096<br />+<br />+/******BAR0  SPACE********************************************************************/<br />+#define ZXDH_VQMREG_OFFSET    0x0000<br />+#define ZXDH_FWCAP_OFFSET     0x1000<br />+#define ZXDH_CTRLCH_OFFSET    0x2000<br />+#define ZXDH_MAC_OFFSET       0x24000<br />+#define ZXDH_SPINLOCK_OFFSET  0x4000<br />+#define ZXDH_FWSHRD_OFFSET    0x5000<br />+#define ZXDH_QUERES_SHARE_BASE   (ZXDH_FWSHRD_OFFSET)<br />+#define ZXDH_QUERES_SHARE_SIZE   512<br />+<br />+enum zxdh_msix_status {<br />+    ZXDH_MSIX_NONE     = 0,<br />+    ZXDH_MSIX_DISABLED = 1,<br />+    ZXDH_MSIX_ENABLED  = 2<br />+};<br />+<br />+static inline int32_t vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit)<br />+{<br />+    return (hw->guest_features & (1ULL << bit)) != 0;<br />+}<br />+<br />+static inline int32_t vtpci_packed_queue(struct zxdh_hw *hw)<br />+{<br />+    return vtpci_with_feature(hw, ZXDH_F_RING_PACKED);<br />+}<br />+<br />+/*<br />+ * While zxdh_hw is stored in shared memory, this structure stores<br />+ * some infos that may vary in the multiple process model locally.<br />+ * For example, the vtpci_ops pointer.<br />+ */<br />+struct zxdh_hw_internal {<br />+    const struct zxdh_pci_ops *vtpci_ops;<br />+    struct rte_pci_ioport io;<br />+};<br />+<br />+/* Fields in ZXDH_PCI_CAP_COMMON_CFG: */<br />+struct zxdh_pci_common_cfg {<br />+    /* About the whole device. */<br />+    uint32_t device_feature_select; /* read-write */<br />+    uint32_t device_feature;    /* read-only */<br />+    uint32_t guest_feature_select;  /* read-write */<br />+    uint32_t guest_feature;     /* read-write */<br />+    uint16_t msix_config;       /* read-write */<br />+    uint16_t num_queues;        /* read-only */<br />+    uint8_t  device_status;     /* read-write */<br />+    uint8_t  config_generation; /* read-only */<br />+<br />+    /* About a specific virtqueue. */<br />+    uint16_t queue_select;      /* read-write */<br />+    uint16_t queue_size;        /* read-write, power of 2. */<br />+    uint16_t queue_msix_vector; /* read-write */<br />+    uint16_t queue_enable;      /* read-write */<br />+    uint16_t queue_notify_off;  /* read-only */<br />+    uint32_t queue_desc_lo;     /* read-write */<br />+    uint32_t queue_desc_hi;     /* read-write */<br />+    uint32_t queue_avail_lo;    /* read-write */<br />+    uint32_t queue_avail_hi;    /* read-write */<br />+    uint32_t queue_used_lo;     /* read-write */<br />+    uint32_t queue_used_hi;     /* read-write */<br />+};<br />+<br />+/*<br />+ * This structure is just a reference to read<br />+ * net device specific config space; it just a chodu structure<br />+ *<br />+ */<br />+struct zxdh_net_config {<br />+    /* The config defining mac address (if ZXDH_NET_F_MAC) */<br />+    uint8_t    mac[RTE_ETHER_ADDR_LEN];<br />+    /* See ZXDH_NET_F_STATUS and ZXDH_NET_S_* above */<br />+    uint16_t   status;<br />+    uint16_t   max_virtqueue_pairs;<br />+    uint16_t   mtu;<br />+    /*<br />+     * speed, in units of 1Mb. All values 0 to INT_MAX are legal.<br />+     * Any other value stands for unknown.<br />+     */<br />+    uint32_t   speed;<br />+    /* 0x00 - half duplex<br />+     * 0x01 - full duplex<br />+     * Any other value stands for unknown.<br />+     */<br />+    uint8_t    duplex;<br />+} __rte_packed;<br />+<br />+/* This is the PCI capability header: */<br />+struct zxdh_pci_cap {<br />+    uint8_t  cap_vndr;   /* Generic PCI field: PCI_CAP_ID_VNDR */<br />+    uint8_t  cap_next;   /* Generic PCI field: next ptr. */<br />+    uint8_t  cap_len;    /* Generic PCI field: capability length */<br />+    uint8_t  cfg_type;   /* Identifies the structure. */<br />+    uint8_t  bar;        /* Where to find it. */<br />+    uint8_t  padding[3]; /* Pad to full dword. */<br />+    uint32_t offset;     /* Offset within bar. */<br />+    uint32_t length;     /* Length of the structure, in bytes. */<br />+};<br />+struct zxdh_pci_notify_cap {<br />+    struct zxdh_pci_cap cap;<br />+    uint32_t notify_off_multiplier;  /* Multiplier for queue_notify_off. */<br />+};<br />+<br />+struct zxdh_pci_ops {<br />+    void     (*read_dev_cfg)(struct zxdh_hw *hw, size_t offset, void *dst, int32_t len);<br />+    void     (*write_dev_cfg)(struct zxdh_hw *hw, size_t offset, const void *src, int32_t len);<br />+<br />+    uint8_t  (*get_status)(struct zxdh_hw *hw);<br />+    void     (*set_status)(struct zxdh_hw *hw, uint8_t status);<br />+<br />+    uint64_t (*get_features)(struct zxdh_hw *hw);<br />+    void     (*set_features)(struct zxdh_hw *hw, uint64_t features);<br />+<br />+    uint8_t  (*get_isr)(struct zxdh_hw *hw);<br />+<br />+    uint16_t (*set_config_irq)(struct zxdh_hw *hw, uint16_t vec);<br />+<br />+    uint16_t (*set_queue_irq)(struct zxdh_hw *hw, struct virtqueue *vq, uint16_t vec);<br />+<br />+    uint16_t (*get_queue_num)(struct zxdh_hw *hw, uint16_t queue_id);<br />+    void     (*set_queue_num)(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size);<br />+<br />+    int32_t  (*setup_queue)(struct zxdh_hw *hw, struct virtqueue *vq);<br />+    void     (*del_queue)(struct zxdh_hw *hw, struct virtqueue *vq);<br />+    void     (*notify_queue)(struct zxdh_hw *hw, struct virtqueue *vq);<br />+};<br />+<br />+extern struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];<br />+extern const struct zxdh_pci_ops zxdh_modern_ops;<br />+<br />+void zxdh_vtpci_reset(struct zxdh_hw *hw);<br />+void zxdh_vtpci_reinit_complete(struct zxdh_hw *hw);<br />+uint8_t zxdh_vtpci_get_status(struct zxdh_hw *hw);<br />+void zxdh_vtpci_set_status(struct zxdh_hw *hw, uint8_t status);<br />+uint16_t zxdh_vtpci_get_features(struct zxdh_hw *hw);<br />+void zxdh_vtpci_write_dev_config(struct zxdh_hw *hw, size_t offset,<br />+        const void *src, int32_t length);<br />+void zxdh_vtpci_read_dev_config(struct zxdh_hw *hw, size_t offset,<br />+        void *dst, int32_t length);<br />+uint8_t zxdh_vtpci_isr(struct zxdh_hw *hw);<br />+enum zxdh_msix_status zxdh_vtpci_msix_detect(struct rte_pci_device *dev);<br />+<br />+int32_t zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw);<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif /* _ZXDH_PCI_H_ */<br />diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c<br />new file mode 100644<br />index 0000000000..b6dd487a9d<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_queue.c<br />@@ -0,0 +1,138 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+<br />+#include <rte_mbuf.h> <br />+<br />+#include "zxdh_queue.h" <br />+#include "zxdh_logs.h" <br />+#include "zxdh_pci.h" <br />+#include "zxdh_common.h" <br />+<br />+/**<br />+ * Two types of mbuf to be cleaned:<br />+ * 1) mbuf that has been consumed by backend but not used by virtio.<br />+ * 2) mbuf that hasn't been consued by backend.<br />+ */<br />+struct rte_mbuf *zxdh_virtqueue_detach_unused(struct virtqueue *vq)<br />+{<br />+    struct rte_mbuf *cookie = NULL;<br />+    int32_t          idx    = 0;<br />+<br />+    if (vq == NULL)<br />+        return NULL;<br />+<br />+    for (idx = 0; idx < vq->vq_nentries; idx++) {<br />+        cookie = vq->vq_descx[idx].cookie;<br />+        if (cookie != NULL) {<br />+            vq->vq_descx[idx].cookie = NULL;<br />+            return cookie;<br />+        }<br />+    }<br />+<br />+    return NULL;<br />+}<br />+<br />+static int32_t zxdh_release_channel(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    uint32_t var  = 0;<br />+    uint32_t addr = 0;<br />+    uint32_t widx = 0;<br />+    uint32_t bidx = 0;<br />+    uint16_t pch  = 0;<br />+    uint16_t lch  = 0;<br />+    uint16_t timeout = 0;<br />+<br />+    while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {<br />+        if (zxdh_acquire_lock(hw) != 0) {<br />+            PMD_INIT_LOG(ERR,<br />+                "Could not acquire lock to release channel, timeout %d", timeout);<br />+            continue;<br />+        }<br />+        break;<br />+    }<br />+<br />+    if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {<br />+        PMD_INIT_LOG(ERR, "Acquire lock timeout");<br />+        return -1;<br />+    }<br />+<br />+    for (lch = 0; lch < nr_vq; lch++) {<br />+        if (hw->channel_context[lch].valid == 0) {<br />+            PMD_INIT_LOG(DEBUG, "Logic channel %d does not need to release", lch);<br />+            continue;<br />+        }<br />+<br />+        /* get coi table offset and index */<br />+        pch  = hw->channel_context[lch].ph_chno;<br />+        widx = pch / 32;<br />+        bidx = pch % 32;<br />+<br />+        addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t));<br />+        var  = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);<br />+        var &= ~(1 << bidx);<br />+        zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);<br />+<br />+        hw->channel_context[lch].valid = 0;<br />+        hw->channel_context[lch].ph_chno = 0;<br />+    }<br />+<br />+    zxdh_release_lock(hw);<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t zxdh_free_queues(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    struct virtqueue *vq = NULL;<br />+    int32_t queue_type = 0;<br />+    uint16_t i = 0;<br />+<br />+    if (hw->vqs == NULL)<br />+        return 0;<br />+<br />+    /* Clear COI table */<br />+    if (zxdh_release_channel(dev) < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to clear coi table");<br />+        return -1;<br />+    }<br />+<br />+    for (i = 0; i < nr_vq; i++) {<br />+        vq = hw->vqs[i];<br />+        if (vq == NULL)<br />+            continue;<br />+<br />+        VTPCI_OPS(hw)->del_queue(hw, vq);<br />+        queue_type = get_queue_type(i);<br />+        if (queue_type == VTNET_RQ) {<br />+            rte_free(vq->sw_ring);<br />+            rte_memzone_free(vq->rxq.mz);<br />+        } else if (queue_type == VTNET_TQ) {<br />+            rte_memzone_free(vq->txq.mz);<br />+            rte_memzone_free(vq->txq.virtio_net_hdr_mz);<br />+        }<br />+<br />+        rte_free(vq);<br />+        hw->vqs[i] = NULL;<br />+        PMD_INIT_LOG(DEBUG, "Release to queue %d success!", i);<br />+    }<br />+<br />+    rte_free(hw->vqs);<br />+    hw->vqs = NULL;<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t get_queue_type(uint16_t vtpci_queue_idx)<br />+{<br />+    if (vtpci_queue_idx % 2 == 0)<br />+        return VTNET_RQ;<br />+    else<br />+        return VTNET_TQ;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />new file mode 100644<br />index 0000000000..c2d7bbe889<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -0,0 +1,85 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_QUEUE_H_<br />+#define _ZXDH_QUEUE_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+<br />+#include <rte_atomic.h> <br />+#include <rte_memory.h> <br />+#include <rte_mempool.h> <br />+#include <rte_net.h> <br />+#include <ethdev_driver.h> <br />+<br />+#include "zxdh_pci.h" <br />+#include "zxdh_ring.h" <br />+#include "zxdh_rxtx.h" <br />+<br />+<br />+enum {<br />+    VTNET_RQ = 0,<br />+    VTNET_TQ = 1<br />+};<br />+<br />+struct vq_desc_extra {<br />+    void *cookie;<br />+    uint16_t ndescs;<br />+    uint16_t next;<br />+};<br />+<br />+struct virtqueue {<br />+    struct zxdh_hw  *hw; /**< zxdh_hw structure pointer. */<br />+    struct {<br />+        /**< vring keeping descs and events */<br />+        struct vring_packed ring;<br />+        bool used_wrap_counter;<br />+        uint8_t rsv;<br />+        uint16_t cached_flags; /**< cached flags for descs */<br />+        uint16_t event_flags_shadow;<br />+        uint16_t rsv1;<br />+    } __rte_packed vq_packed;<br />+    uint16_t vq_used_cons_idx; /**< last consumed descriptor */<br />+    uint16_t vq_nentries;  /**< vring desc numbers */<br />+    uint16_t vq_free_cnt;  /**< num of desc available */<br />+    uint16_t vq_avail_idx; /**< sync until needed */<br />+    uint16_t vq_free_thresh; /**< free threshold */<br />+    uint16_t rsv2;<br />+<br />+    void *vq_ring_virt_mem;  /**< linear address of vring*/<br />+    uint32_t vq_ring_size;<br />+<br />+    union {<br />+        struct virtnet_rx rxq;<br />+        struct virtnet_tx txq;<br />+    };<br />+<br />+    /** < physical address of vring,<br />+     * or virtual address for virtio_user.<br />+     **/<br />+    rte_iova_t vq_ring_mem;<br />+<br />+    /**<br />+     * Head of the free chain in the descriptor table. If<br />+     * there are no free descriptors, this will be set to<br />+     * VQ_RING_DESC_CHAIN_END.<br />+     **/<br />+    uint16_t  vq_desc_head_idx;<br />+    uint16_t  vq_desc_tail_idx;<br />+    uint16_t  vq_queue_index;   /**< PCI queue index */<br />+    uint16_t  offset; /**< relative offset to obtain addr in mbuf */<br />+    uint16_t *notify_addr;<br />+    struct rte_mbuf **sw_ring;  /**< RX software ring. */<br />+    struct vq_desc_extra vq_descx[0];<br />+};<br />+<br />+struct rte_mbuf *zxdh_virtqueue_detach_unused(struct virtqueue *vq);<br />+int32_t zxdh_free_queues(struct rte_eth_dev *dev);<br />+int32_t get_queue_type(uint16_t vtpci_queue_idx);<br />+<br />+#endif<br />diff --git a/drivers/net/zxdh/zxdh_ring.h b/drivers/net/zxdh/zxdh_ring.h<br />new file mode 100644<br />index 0000000000..bd7c997993<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_ring.h<br />@@ -0,0 +1,87 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_RING_H_<br />+#define _ZXDH_RING_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+#include <rte_common.h> <br />+<br />+/* This marks a buffer as continuing via the next field. */<br />+#define VRING_DESC_F_NEXT                                   1<br />+<br />+/* This marks a buffer as write-only (otherwise read-only). */<br />+#define VRING_DESC_F_WRITE                                  2<br />+<br />+/* This means the buffer contains a list of buffer descriptors. */<br />+#define VRING_DESC_F_INDIRECT                               4<br />+<br />+/* This flag means the descriptor was made available by the driver */<br />+#define VRING_PACKED_DESC_F_AVAIL                           (1 << (7))<br />+/* This flag means the descriptor was used by the device */<br />+#define VRING_PACKED_DESC_F_USED                            (1 << (15))<br />+<br />+/* Frequently used combinations */<br />+#define VRING_PACKED_DESC_F_AVAIL_USED \<br />+            (VRING_PACKED_DESC_F_AVAIL | VRING_PACKED_DESC_F_USED)<br />+<br />+/* The Host uses this in used->flags to advise the Guest: don't kick me<br />+ * when you add a buffer.  It's unreliable, so it's simply an<br />+ * optimization.  Guest will still kick if it's out of buffers.<br />+ **/<br />+#define VRING_USED_F_NO_NOTIFY                              1<br />+<br />+/** The Guest uses this in avail->flags to advise the Host: don't<br />+ * interrupt me when you consume a buffer.  It's unreliable, so it's<br />+ * simply an optimization.<br />+ **/<br />+#define VRING_AVAIL_F_NO_INTERRUPT                          1<br />+<br />+#define RING_EVENT_FLAGS_ENABLE                             0x0<br />+#define RING_EVENT_FLAGS_DISABLE                            0x1<br />+#define RING_EVENT_FLAGS_DESC                               0x2<br />+<br />+/** VirtIO ring descriptors: 16 bytes.<br />+ * These can chain together via "next".<br />+ **/<br />+struct vring_desc {<br />+    uint64_t addr;  /*  Address (guest-physical). */<br />+    uint32_t len;   /* Length. */<br />+    uint16_t flags; /* The flags as indicated above. */<br />+    uint16_t next;  /* We chain unused descriptors via this. */<br />+};<br />+<br />+struct vring_avail {<br />+    uint16_t flags;<br />+    uint16_t idx;<br />+    uint16_t ring[0];<br />+};<br />+<br />+/** For support of packed virtqueues in Virtio 1.1 the format of descriptors<br />+ * looks like this.<br />+ **/<br />+struct vring_packed_desc {<br />+    uint64_t addr;<br />+    uint32_t len;<br />+    uint16_t id;<br />+    uint16_t flags;<br />+};<br />+<br />+struct vring_packed_desc_event {<br />+    uint16_t desc_event_off_wrap;<br />+    uint16_t desc_event_flags;<br />+};<br />+<br />+struct vring_packed {<br />+    uint32_t num;<br />+    struct vring_packed_desc *desc;<br />+    struct vring_packed_desc_event *driver;<br />+    struct vring_packed_desc_event *device;<br />+};<br />+<br />+#endif<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h<br />new file mode 100644<br />index 0000000000..7aedf568fe<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_rxtx.h<br />@@ -0,0 +1,48 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZXDH_RXTX_H_<br />+#define _ZXDH_RXTX_H_<br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+#include <stdint.h> <br />+<br />+struct virtnet_stats {<br />+    uint64_t packets;<br />+    uint64_t bytes;<br />+    uint64_t errors;<br />+    uint64_t multicast;<br />+    uint64_t broadcast;<br />+    uint64_t truncated_err;<br />+    uint64_t size_bins[8]; /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */<br />+};<br />+<br />+struct virtnet_rx {<br />+    struct virtqueue         *vq;<br />+<br />+    /* dummy mbuf, for wraparound when processing RX ring. */<br />+    struct rte_mbuf           fake_mbuf;<br />+<br />+    uint64_t                  mbuf_initializer; /* value to init mbufs. */<br />+    struct rte_mempool       *mpool;            /* mempool for mbuf allocation */<br />+    uint16_t                  queue_id;         /* DPDK queue index. */<br />+    uint16_t                  port_id;          /* Device port identifier. */<br />+    struct virtnet_stats      stats;<br />+    const struct rte_memzone *mz;               /* mem zone to populate RX ring. */<br />+};<br />+<br />+struct virtnet_tx {<br />+    struct virtqueue         *vq;<br />+    const struct rte_memzone *virtio_net_hdr_mz;  /* memzone to populate hdr. */<br />+    rte_iova_t                virtio_net_hdr_mem; /* hdr for each xmit packet */<br />+    uint16_t                  queue_id;           /* DPDK queue index. */<br />+    uint16_t                  port_id;            /* Device port identifier. */<br />+    struct virtnet_stats      stats;<br />+    const struct rte_memzone *mz;                 /* mem zone to populate TX ring. */<br />+};<br />+<br />+#endif<br />--  <br />2.43.0<br />