(np)network Processor initialize resources in host,<br />and initialize a channel for some tables insert/get/del.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/meson.build   |   1 +<br /> drivers/net/zxdh/zxdh_ethdev.c | 234 +++++++++++++++++++++--<br /> drivers/net/zxdh/zxdh_ethdev.h |  30 +++<br /> drivers/net/zxdh/zxdh_msg.c    |  44 +++++<br /> drivers/net/zxdh/zxdh_msg.h    |  37 ++++<br /> drivers/net/zxdh/zxdh_np.c     | 340 +++++++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_np.h     | 198 +++++++++++++++++++<br /> drivers/net/zxdh/zxdh_pci.c    |   2 +-<br /> drivers/net/zxdh/zxdh_pci.h    |   6 +-<br /> drivers/net/zxdh/zxdh_queue.c  |   2 +-<br /> drivers/net/zxdh/zxdh_queue.h  |  14 +-<br /> 11 files changed, 875 insertions(+), 33 deletions(-)<br /> create mode 100644 drivers/net/zxdh/zxdh_np.c<br /> create mode 100644 drivers/net/zxdh/zxdh_np.h<br /> <br />diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build<br />index c9960f4c73..ab24a3145c 100644<br />--- a/drivers/net/zxdh/meson.build<br />+++ b/drivers/net/zxdh/meson.build<br />@@ -19,4 +19,5 @@ sources = files(<br />         'zxdh_msg.c',<br />         'zxdh_pci.c',<br />         'zxdh_queue.c',<br />+        'zxdh_np.c',<br /> )<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index c786198535..b8f4415e00 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -5,6 +5,7 @@<br /> #include <ethdev_pci.h> <br /> #include <bus_pci_driver.h> <br /> #include <rte_ethdev.h> <br />+#include <rte_malloc.h> <br />  <br /> #include "zxdh_ethdev.h" <br /> #include "zxdh_logs.h" <br />@@ -12,8 +13,15 @@<br /> #include "zxdh_msg.h" <br /> #include "zxdh_common.h" <br /> #include "zxdh_queue.h" <br />+#include "zxdh_np.h" <br />  <br /> struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];<br />+struct zxdh_shared_data *zxdh_shared_data;<br />+const char *ZXDH_PMD_SHARED_DATA_MZ = "zxdh_pmd_shared_data";<br />+rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;<br />+struct zxdh_dtb_shared_data g_dtb_data;<br />+<br />+#define ZXDH_INVALID_DTBQUE  0xFFFF<br />  <br /> uint16_t<br /> zxdh_vport_to_vfid(union zxdh_virport_num v)<br />@@ -406,14 +414,14 @@ zxdh_features_update(struct zxdh_hw *hw,<br />     ZXDH_VTPCI_OPS(hw)->set_features(hw, req_features);<br />  <br />     if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) && <br />-         !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) {<br />+         !zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) {<br />         PMD_DRV_LOG(ERR, "rx checksum not available on this host");<br />         return -ENOTSUP;<br />     }<br />  <br />     if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) && <br />-        (!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||<br />-         !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) {<br />+        (!zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||<br />+         !zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) {<br />         PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host");<br />         return -ENOTSUP;<br />     }<br />@@ -421,20 +429,20 @@ zxdh_features_update(struct zxdh_hw *hw,<br /> }<br />  <br /> static bool<br />-rx_offload_enabled(struct zxdh_hw *hw)<br />+zxdh_rx_offload_enabled(struct zxdh_hw *hw)<br /> {<br />-    return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||<br />-           vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||<br />-           vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6);<br />+    return zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6);<br /> }<br />  <br /> static bool<br />-tx_offload_enabled(struct zxdh_hw *hw)<br />+zxdh_tx_offload_enabled(struct zxdh_hw *hw)<br /> {<br />-    return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) ||<br />-           vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||<br />-           vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||<br />-           vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO);<br />+    return zxdh_pci_with_feature(hw, ZXDH_NET_F_CSUM) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_UFO);<br /> }<br />  <br /> static void<br />@@ -466,7 +474,7 @@ zxdh_dev_free_mbufs(struct rte_eth_dev *dev)<br />             continue;<br />         PMD_DRV_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);<br />  <br />-        while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL)<br />+        while ((buf = zxdh_queue_detach_unused(vq)) != NULL)<br />             rte_pktmbuf_free(buf);<br />     }<br /> }<br />@@ -550,9 +558,9 @@ zxdh_init_vring(struct zxdh_virtqueue *vq)<br />     vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);<br />     vq->vq_free_cnt = vq->vq_nentries;<br />     memset(vq->vq_descx, 0, sizeof(struct zxdh_vq_desc_extra) * vq->vq_nentries);<br />-    vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size);<br />-    vring_desc_init_packed(vq, size);<br />-    virtqueue_disable_intr(vq);<br />+    zxdh_vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size);<br />+    zxdh_vring_desc_init_packed(vq, size);<br />+    zxdh_queue_disable_intr(vq);<br /> }<br />  <br /> static int32_t<br />@@ -621,7 +629,7 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)<br />     /*<br />      * Reserve a memzone for vring elements<br />      */<br />-    size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN);<br />+    size = zxdh_vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN);<br />     vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN);<br />     PMD_DRV_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);<br />  <br />@@ -694,7 +702,8 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)<br />             /* first indirect descriptor is always the tx header */<br />             struct zxdh_vring_packed_desc *start_dp = txr[i].tx_packed_indir;<br />  <br />-            vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir));<br />+            zxdh_vring_desc_init_indirect_packed(start_dp,<br />+                    RTE_DIM(txr[i].tx_packed_indir));<br />             start_dp->addr = txvq->zxdh_net_hdr_mem + i * sizeof(*txr) +<br />                     offsetof(struct zxdh_tx_region, tx_hdr);<br />             /* length will be updated to actual pi hdr size when xmit pkt */<br />@@ -792,8 +801,8 @@ zxdh_dev_configure(struct rte_eth_dev *dev)<br />         }<br />     }<br />  <br />-    hw->has_tx_offload = tx_offload_enabled(hw);<br />-    hw->has_rx_offload = rx_offload_enabled(hw);<br />+    hw->has_tx_offload = zxdh_tx_offload_enabled(hw);<br />+    hw->has_rx_offload = zxdh_rx_offload_enabled(hw);<br />  <br />     nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues;<br />     if (nr_vq == hw->queue_num)<br />@@ -881,7 +890,7 @@ zxdh_init_device(struct rte_eth_dev *eth_dev)<br />     rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, &eth_dev->data->mac_addrs[0]);<br />  <br />     /* If host does not support both status and MSI-X then disable LSC */<br />-    if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && hw->use_msix != ZXDH_MSIX_NONE)<br />+    if (zxdh_pci_with_feature(hw, ZXDH_NET_F_STATUS) && hw->use_msix != ZXDH_MSIX_NONE)<br />         eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;<br />     else<br />         eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;<br />@@ -913,6 +922,181 @@ zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw)<br />     return 0;<br /> }<br />  <br />+static int<br />+zxdh_np_dtb_res_init(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_bar_offset_params param = {0};<br />+    struct zxdh_bar_offset_res res = {0};<br />+    int ret = 0;<br />+<br />+    if (g_dtb_data.init_done) {<br />+        PMD_DRV_LOG(DEBUG, "DTB res already init done, dev %s no need init",<br />+            dev->device->name);<br />+        return 0;<br />+    }<br />+    g_dtb_data.queueid = ZXDH_INVALID_DTBQUE;<br />+    g_dtb_data.bind_device = dev;<br />+    g_dtb_data.dev_refcnt++;<br />+    g_dtb_data.init_done = 1;<br />+<br />+    ZXDH_DEV_INIT_CTRL_T *dpp_ctrl = rte_zmalloc(NULL, sizeof(*dpp_ctrl) +<br />+            sizeof(ZXDH_DTB_ADDR_INFO_T) * 256, 0);<br />+    if (dpp_ctrl == NULL) {<br />+        PMD_DRV_LOG(ERR, "dev %s annot allocate memory for dpp_ctrl", dev->device->name);<br />+        ret = -ENOMEM;<br />+        goto free_res;<br />+    }<br />+    dpp_ctrl->queue_id = 0xff;<br />+    dpp_ctrl->vport = hw->vport.vport;<br />+    dpp_ctrl->vector = ZXDH_MSIX_INTR_DTB_VEC;<br />+    strlcpy(dpp_ctrl->port_name, dev->device->name, sizeof(dpp_ctrl->port_name));<br />+    dpp_ctrl->pcie_vir_addr = (uint32_t)hw->bar_addr[0];<br />+<br />+    param.pcie_id = hw->pcie_id;<br />+    param.virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;<br />+    param.type = ZXDH_URI_NP;<br />+<br />+    ret = zxdh_get_bar_offset(&param, &res);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "dev %s get npbar offset failed", dev->device->name);<br />+        goto free_res;<br />+    }<br />+    dpp_ctrl->np_bar_len = res.bar_length;<br />+    dpp_ctrl->np_bar_offset = res.bar_offset;<br />+<br />+    if (!g_dtb_data.dtb_table_conf_mz) {<br />+        const struct rte_memzone *conf_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_conf_mz",<br />+                ZXDH_DTB_TABLE_CONF_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);<br />+<br />+        if (conf_mz == NULL) {<br />+            PMD_DRV_LOG(ERR,<br />+                "dev %s annot allocate memory for dtb table conf",<br />+                dev->device->name);<br />+            ret = -ENOMEM;<br />+            goto free_res;<br />+        }<br />+        dpp_ctrl->down_vir_addr = conf_mz->addr_64;<br />+        dpp_ctrl->down_phy_addr = conf_mz->iova;<br />+        g_dtb_data.dtb_table_conf_mz = conf_mz;<br />+    }<br />+<br />+    if (!g_dtb_data.dtb_table_dump_mz) {<br />+        const struct rte_memzone *dump_mz = rte_memzone_reserve_aligned("zxdh_dtb_table_dump_mz",<br />+                ZXDH_DTB_TABLE_DUMP_SIZE, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);<br />+<br />+        if (dump_mz == NULL) {<br />+            PMD_DRV_LOG(ERR,<br />+                "dev %s Cannot allocate memory for dtb table dump",<br />+                dev->device->name);<br />+            ret = -ENOMEM;<br />+            goto free_res;<br />+        }<br />+        dpp_ctrl->dump_vir_addr = dump_mz->addr_64;<br />+        dpp_ctrl->dump_phy_addr = dump_mz->iova;<br />+        g_dtb_data.dtb_table_dump_mz = dump_mz;<br />+    }<br />+<br />+    ret = zxdh_np_host_init(0, dpp_ctrl);<br />+    if (ret) {<br />+        PMD_DRV_LOG(ERR, "dev %s dpp host np init failed .ret %d", dev->device->name, ret);<br />+        goto free_res;<br />+    }<br />+<br />+    PMD_DRV_LOG(DEBUG, "dev %s dpp host np init ok.dtb queue %d",<br />+        dev->device->name, dpp_ctrl->queue_id);<br />+    g_dtb_data.queueid = dpp_ctrl->queue_id;<br />+    rte_free(dpp_ctrl);<br />+    return 0;<br />+<br />+free_res:<br />+    rte_free(dpp_ctrl);<br />+    return ret;<br />+}<br />+<br />+static int<br />+zxdh_init_shared_data(void)<br />+{<br />+    const struct rte_memzone *mz;<br />+    int ret = 0;<br />+<br />+    rte_spinlock_lock(&zxdh_shared_data_lock);<br />+    if (zxdh_shared_data == NULL) {<br />+        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {<br />+            /* Allocate shared memory. */<br />+            mz = rte_memzone_reserve(ZXDH_PMD_SHARED_DATA_MZ,<br />+                    sizeof(*zxdh_shared_data), SOCKET_ID_ANY, 0);<br />+            if (mz == NULL) {<br />+                PMD_DRV_LOG(ERR, "Cannot allocate zxdh shared data");<br />+                ret = -rte_errno;<br />+                goto error;<br />+            }<br />+            zxdh_shared_data = mz->addr;<br />+            memset(zxdh_shared_data, 0, sizeof(*zxdh_shared_data));<br />+            rte_spinlock_init(&zxdh_shared_data->lock);<br />+        } else { /* Lookup allocated shared memory. */<br />+            mz = rte_memzone_lookup(ZXDH_PMD_SHARED_DATA_MZ);<br />+            if (mz == NULL) {<br />+                PMD_DRV_LOG(ERR, "Cannot attach zxdh shared data");<br />+                ret = -rte_errno;<br />+                goto error;<br />+            }<br />+            zxdh_shared_data = mz->addr;<br />+        }<br />+    }<br />+<br />+error:<br />+    rte_spinlock_unlock(&zxdh_shared_data_lock);<br />+    return ret;<br />+}<br />+<br />+static int<br />+zxdh_init_once(void)<br />+{<br />+    int ret = 0;<br />+<br />+    if (zxdh_init_shared_data())<br />+        return -1;<br />+<br />+    struct zxdh_shared_data *sd = zxdh_shared_data;<br />+    rte_spinlock_lock(&sd->lock);<br />+    if (rte_eal_process_type() == RTE_PROC_SECONDARY) {<br />+        if (!sd->init_done) {<br />+            ++sd->secondary_cnt;<br />+            sd->init_done = true;<br />+        }<br />+        goto out;<br />+    }<br />+    /* RTE_PROC_PRIMARY */<br />+    if (!sd->init_done)<br />+        sd->init_done = true;<br />+    sd->dev_refcnt++;<br />+<br />+out:<br />+    rte_spinlock_unlock(&sd->lock);<br />+    return ret;<br />+}<br />+<br />+static int<br />+zxdh_np_init(struct rte_eth_dev *eth_dev)<br />+{<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+    int ret = 0;<br />+<br />+    if (hw->is_pf) {<br />+        ret = zxdh_np_dtb_res_init(eth_dev);<br />+        if (ret) {<br />+            PMD_DRV_LOG(ERR, "np dtb init failed, ret:%d ", ret);<br />+            return ret;<br />+        }<br />+    }<br />+    if (zxdh_shared_data != NULL)<br />+        zxdh_shared_data->np_init_done = 1;<br />+<br />+    PMD_DRV_LOG(DEBUG, "np init ok ");<br />+    return 0;<br />+}<br />+<br /> static int<br /> zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)<br /> {<br />@@ -950,6 +1134,10 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)<br />         hw->is_pf = 1;<br />     }<br />  <br />+    ret = zxdh_init_once();<br />+    if (ret != 0)<br />+        goto err_zxdh_init;<br />+<br />     ret = zxdh_init_device(eth_dev);<br />     if (ret < 0)<br />         goto err_zxdh_init;<br />@@ -977,6 +1165,10 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)<br />     if (ret != 0)<br />         goto err_zxdh_init;<br />  <br />+    ret = zxdh_np_init(eth_dev);<br />+    if (ret)<br />+        goto err_zxdh_init;<br />+<br />     ret = zxdh_configure_intr(eth_dev);<br />     if (ret != 0)<br />         goto err_zxdh_init;<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h<br />index 7658cbb461..b1f398b28e 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.h<br />+++ b/drivers/net/zxdh/zxdh_ethdev.h<br />@@ -35,6 +35,12 @@<br />  <br /> #define ZXDH_MBUF_BURST_SZ        64<br />  <br />+#define ZXDH_MAX_BASE_DTB_TABLE_COUNT   30<br />+#define ZXDH_DTB_TABLE_DUMP_SIZE        (32 * (16 + 16 * 1024))<br />+#define ZXDH_DTB_TABLE_CONF_SIZE        (32 * (16 + 16 * 1024))<br />+<br />+#define ZXDH_MAX_NAME_LEN               32<br />+<br /> union zxdh_virport_num {<br />     uint16_t vport;<br />     struct {<br />@@ -89,6 +95,30 @@ struct zxdh_hw {<br />     uint8_t has_rx_offload;<br /> };<br />  <br />+struct zxdh_dtb_shared_data {<br />+    uint8_t init_done;<br />+    char name[ZXDH_MAX_NAME_LEN];<br />+    uint16_t queueid;<br />+    uint16_t vport;<br />+    uint32_t vector;<br />+    const struct rte_memzone *dtb_table_conf_mz;<br />+    const struct rte_memzone *dtb_table_dump_mz;<br />+    const struct rte_memzone *dtb_table_bulk_dump_mz[ZXDH_MAX_BASE_DTB_TABLE_COUNT];<br />+    struct rte_eth_dev *bind_device;<br />+    uint32_t dev_refcnt;<br />+};<br />+<br />+/* Shared data between primary and secondary processes. */<br />+struct zxdh_shared_data {<br />+    rte_spinlock_t lock; /* Global spinlock for primary and secondary processes. */<br />+    int32_t init_done;       /* Whether primary has done initialization. */<br />+    unsigned int secondary_cnt; /* Number of secondary processes init'd. */<br />+<br />+    int32_t np_init_done;<br />+    uint32_t dev_refcnt;<br />+    struct zxdh_dtb_shared_data *dtb_data;<br />+};<br />+<br /> uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v);<br />  <br /> #endif /* ZXDH_ETHDEV_H */<br />diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c<br />index 53cf972f86..dd7a518a51 100644<br />--- a/drivers/net/zxdh/zxdh_msg.c<br />+++ b/drivers/net/zxdh/zxdh_msg.c<br />@@ -1035,3 +1035,47 @@ zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev)<br />     rte_free(recved_msg);<br />     return ZXDH_BAR_MSG_OK;<br /> }<br />+<br />+int zxdh_get_bar_offset(struct zxdh_bar_offset_params *paras,<br />+        struct zxdh_bar_offset_res *res)<br />+{<br />+    uint16_t check_token;<br />+    uint16_t sum_res;<br />+    int ret;<br />+<br />+    if (!paras)<br />+        return ZXDH_BAR_MSG_ERR_NULL;<br />+<br />+    struct zxdh_offset_get_msg send_msg = {<br />+        .pcie_id = paras->pcie_id,<br />+        .type = paras->type,<br />+    };<br />+    struct zxdh_pci_bar_msg in = {<br />+        .payload_addr = &send_msg,<br />+        .payload_len = sizeof(send_msg),<br />+        .virt_addr = paras->virt_addr,<br />+        .src = ZXDH_MSG_CHAN_END_PF,<br />+        .dst = ZXDH_MSG_CHAN_END_RISC,<br />+        .module_id = ZXDH_BAR_MODULE_OFFSET_GET,<br />+        .src_pcieid = paras->pcie_id,<br />+    };<br />+    struct zxdh_bar_recv_msg recv_msg = {0};<br />+    struct zxdh_msg_recviver_mem result = {<br />+        .recv_buffer = &recv_msg,<br />+        .buffer_len = sizeof(recv_msg),<br />+    };<br />+    ret = zxdh_bar_chan_sync_msg_send(&in, &result);<br />+    if (ret != ZXDH_BAR_MSG_OK)<br />+        return -ret;<br />+<br />+    check_token = recv_msg.offset_reps.check;<br />+    sum_res = zxdh_bar_get_sum((uint8_t *)&send_msg, sizeof(send_msg));<br />+<br />+    if (check_token != sum_res) {<br />+        PMD_MSG_LOG(ERR, "expect token: 0x%x, get token: 0x%x", sum_res, check_token);<br />+        return ZXDH_BAR_MSG_ERR_REPLY;<br />+    }<br />+    res->bar_offset = recv_msg.offset_reps.offset;<br />+    res->bar_length = recv_msg.offset_reps.length;<br />+    return ZXDH_BAR_MSG_OK;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h<br />index 5f57acd055..025bce78ca 100644<br />--- a/drivers/net/zxdh/zxdh_msg.h<br />+++ b/drivers/net/zxdh/zxdh_msg.h<br />@@ -131,6 +131,26 @@ enum ZXDH_TBL_MSG_TYPE {<br />     ZXDH_TBL_TYPE_NON,<br /> };<br />  <br />+enum pciebar_layout_type {<br />+    ZXDH_URI_VQM      = 0,<br />+    ZXDH_URI_SPINLOCK = 1,<br />+    ZXDH_URI_FWCAP    = 2,<br />+    ZXDH_URI_FWSHR    = 3,<br />+    ZXDH_URI_DRS_SEC  = 4,<br />+    ZXDH_URI_RSV      = 5,<br />+    ZXDH_URI_CTRLCH   = 6,<br />+    ZXDH_URI_1588     = 7,<br />+    ZXDH_URI_QBV      = 8,<br />+    ZXDH_URI_MACPCS   = 9,<br />+    ZXDH_URI_RDMA     = 10,<br />+    ZXDH_URI_MNP      = 11,<br />+    ZXDH_URI_MSPM     = 12,<br />+    ZXDH_URI_MVQM     = 13,<br />+    ZXDH_URI_MDPI     = 14,<br />+    ZXDH_URI_NP       = 15,<br />+    ZXDH_URI_MAX,<br />+};<br />+<br /> struct zxdh_msix_para {<br />     uint16_t pcie_id;<br />     uint16_t vector_risc;<br />@@ -204,9 +224,26 @@ struct zxdh_bar_msg_header {<br />     uint16_t dst_pcieid; /* used in PF-->VF */<br /> };<br />  <br />+struct zxdh_bar_offset_params {<br />+    uint64_t virt_addr;  /* Bar space control space virtual address */<br />+    uint16_t pcie_id;<br />+    uint16_t type;  /* Module types corresponding to PCIBAR planning */<br />+};<br />+<br />+struct zxdh_bar_offset_res {<br />+    uint32_t bar_offset;<br />+    uint32_t bar_length;<br />+};<br />+<br />+struct zxdh_offset_get_msg {<br />+    uint16_t pcie_id;<br />+    uint16_t type;<br />+};<br />+<br /> typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,<br />         void *reps_buffer, uint16_t *reps_len, void *dev);<br />  <br />+int zxdh_get_bar_offset(struct zxdh_bar_offset_params *paras, struct zxdh_bar_offset_res *res);<br /> int zxdh_msg_chan_init(void);<br /> int zxdh_bar_msg_chan_exit(void);<br /> int zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev);<br />diff --git a/drivers/net/zxdh/zxdh_np.c b/drivers/net/zxdh/zxdh_np.c<br />new file mode 100644<br />index 0000000000..e44d7ff501<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_np.c<br />@@ -0,0 +1,340 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#include <stdlib.h> <br />+#include <string.h> <br />+<br />+#include <rte_common.h> <br />+#include <rte_log.h> <br />+#include <rte_debug.h> <br />+#include <rte_malloc.h> <br />+<br />+#include "zxdh_np.h" <br />+#include "zxdh_logs.h" <br />+<br />+static uint64_t g_np_bar_offset;<br />+static ZXDH_DEV_MGR_T g_dev_mgr;<br />+static ZXDH_SDT_MGR_T g_sdt_mgr;<br />+ZXDH_PPU_CLS_BITMAP_T g_ppu_cls_bit_map[ZXDH_DEV_CHANNEL_MAX];<br />+ZXDH_DTB_MGR_T *p_dpp_dtb_mgr[ZXDH_DEV_CHANNEL_MAX];<br />+<br />+#define ZXDH_SDT_MGR_PTR_GET()    (&g_sdt_mgr)<br />+#define ZXDH_SDT_SOFT_TBL_GET(id) (g_sdt_mgr.sdt_tbl_array[id])<br />+<br />+#define ZXDH_COMM_CHECK_DEV_POINT(dev_id, point)\<br />+do {\<br />+    if (NULL == (point)) {\<br />+        PMD_DRV_LOG(ERR, "dev: %d ZXIC %s:%d[Error:POINT NULL] !"\<br />+            "FUNCTION : %s!", (dev_id), __FILE__, __LINE__, __func__);\<br />+        RTE_ASSERT(0);\<br />+    } \<br />+} while (0)<br />+<br />+#define ZXDH_COMM_CHECK_DEV_RC(dev_id, rc, becall)\<br />+do {\<br />+    if ((rc) != 0) {\<br />+        PMD_DRV_LOG(ERR, "dev: %d ZXIC  %s:%d !"\<br />+        "-- %s Call %s Fail!", (dev_id), __FILE__, __LINE__, __func__, becall);\<br />+        RTE_ASSERT(0);\<br />+    } \<br />+} while (0)<br />+<br />+#define ZXDH_COMM_CHECK_POINT_NO_ASSERT(point)\<br />+do {\<br />+    if ((point) == NULL) {\<br />+        PMD_DRV_LOG(ERR, "ZXIC %s:%d[Error:POINT NULL] ! FUNCTION : %s!",\<br />+        __FILE__, __LINE__, __func__);\<br />+    } \<br />+} while (0)<br />+<br />+#define ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, becall)\<br />+do {\<br />+    if ((rc) != 0) {\<br />+        PMD_DRV_LOG(ERR, "ZXIC  %s:%d !-- %s Call %s"\<br />+        " Fail!", __FILE__, __LINE__, __func__, becall);\<br />+    } \<br />+} while (0)<br />+<br />+#define ZXDH_COMM_CHECK_RC(rc, becall)\<br />+do {\<br />+    if ((rc) != 0) {\<br />+        PMD_DRV_LOG(ERR, "ZXIC  %s:%d!-- %s Call %s "\<br />+        "Fail!", __FILE__, __LINE__, __func__, becall);\<br />+        RTE_ASSERT(0);\<br />+    } \<br />+} while (0)<br />+<br />+static uint32_t<br />+zxdh_np_dev_init(void)<br />+{<br />+    if (g_dev_mgr.is_init) {<br />+        PMD_DRV_LOG(ERR, "Dev is already initialized.");<br />+        return 0;<br />+    }<br />+<br />+    g_dev_mgr.device_num = 0;<br />+    g_dev_mgr.is_init    = 1;<br />+<br />+    return 0;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dev_add(uint32_t  dev_id, ZXDH_DEV_TYPE_E dev_type,<br />+        ZXDH_DEV_ACCESS_TYPE_E  access_type, uint64_t  pcie_addr,<br />+        uint64_t  riscv_addr, uint64_t  dma_vir_addr,<br />+        uint64_t  dma_phy_addr)<br />+{<br />+    ZXDH_DEV_CFG_T *p_dev_info = NULL;<br />+    ZXDH_DEV_MGR_T *p_dev_mgr  = NULL;<br />+<br />+    p_dev_mgr = &g_dev_mgr;<br />+    if (!p_dev_mgr->is_init) {<br />+        PMD_DRV_LOG(ERR, "ErrorCode[ 0x%x]: Device Manager is not init!!!",<br />+                                 ZXDH_RC_DEV_MGR_NOT_INIT);<br />+        return ZXDH_RC_DEV_MGR_NOT_INIT;<br />+    }<br />+<br />+    if (p_dev_mgr->p_dev_array[dev_id] != NULL) {<br />+        /* device is already exist. */<br />+        PMD_DRV_LOG(ERR, "Device is added again!!!");<br />+        p_dev_info = p_dev_mgr->p_dev_array[dev_id];<br />+    } else {<br />+        /* device is new. */<br />+        p_dev_info = rte_malloc(NULL, sizeof(ZXDH_DEV_CFG_T), 0);<br />+        ZXDH_COMM_CHECK_DEV_POINT(dev_id, p_dev_info);<br />+        p_dev_mgr->p_dev_array[dev_id] = p_dev_info;<br />+        p_dev_mgr->device_num++;<br />+    }<br />+<br />+    p_dev_info->device_id   = dev_id;<br />+    p_dev_info->dev_type    = dev_type;<br />+    p_dev_info->access_type = access_type;<br />+    p_dev_info->pcie_addr   = pcie_addr;<br />+    p_dev_info->riscv_addr   = riscv_addr;<br />+    p_dev_info->dma_vir_addr = dma_vir_addr;<br />+    p_dev_info->dma_phy_addr = dma_phy_addr;<br />+<br />+    return 0;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dev_agent_status_set(uint32_t dev_id, uint32_t agent_flag)<br />+{<br />+    ZXDH_DEV_MGR_T *p_dev_mgr = NULL;<br />+    ZXDH_DEV_CFG_T *p_dev_info = NULL;<br />+<br />+    p_dev_mgr = &g_dev_mgr;<br />+    p_dev_info = p_dev_mgr->p_dev_array[dev_id];<br />+<br />+    if (p_dev_info == NULL)<br />+        return ZXDH_DEV_TYPE_INVALID;<br />+    p_dev_info->agent_flag = agent_flag;<br />+<br />+    return 0;<br />+}<br />+<br />+static void<br />+zxdh_np_sdt_mgr_init(void)<br />+{<br />+    if (!g_sdt_mgr.is_init) {<br />+        g_sdt_mgr.channel_num = 0;<br />+        g_sdt_mgr.is_init = 1;<br />+        memset(g_sdt_mgr.sdt_tbl_array, 0, ZXDH_DEV_CHANNEL_MAX *<br />+            sizeof(ZXDH_SDT_SOFT_TABLE_T *));<br />+    }<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_sdt_mgr_create(uint32_t dev_id)<br />+{<br />+    ZXDH_SDT_SOFT_TABLE_T *p_sdt_tbl_temp = NULL;<br />+    ZXDH_SDT_MGR_T *p_sdt_mgr = NULL;<br />+<br />+    p_sdt_mgr = ZXDH_SDT_MGR_PTR_GET();<br />+<br />+    if (ZXDH_SDT_SOFT_TBL_GET(dev_id) == NULL) {<br />+        p_sdt_tbl_temp = rte_malloc(NULL, sizeof(ZXDH_SDT_SOFT_TABLE_T), 0);<br />+<br />+        p_sdt_tbl_temp->device_id = dev_id;<br />+        memset(p_sdt_tbl_temp->sdt_array, 0, ZXDH_DEV_SDT_ID_MAX * sizeof(ZXDH_SDT_ITEM_T));<br />+<br />+        ZXDH_SDT_SOFT_TBL_GET(dev_id) = p_sdt_tbl_temp;<br />+<br />+        p_sdt_mgr->channel_num++;<br />+    } else {<br />+        PMD_DRV_LOG(ERR, "Error: %s for dev[%d]" <br />+            "is called repeatedly!", __func__, dev_id);<br />+        return 1;<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_sdt_init(uint32_t dev_num, uint32_t *dev_id_array)<br />+{<br />+    uint32_t rc;<br />+    uint32_t i;<br />+<br />+    zxdh_np_sdt_mgr_init();<br />+<br />+    for (i = 0; i < dev_num; i++) {<br />+        rc = zxdh_np_sdt_mgr_create(dev_id_array[i]);<br />+        ZXDH_COMM_CHECK_RC(rc, "zxdh_sdt_mgr_create");<br />+    }<br />+<br />+    return rc;<br />+}<br />+<br />+static void<br />+zxdh_np_ppu_parse_cls_bitmap(uint32_t dev_id,<br />+                                uint32_t bitmap)<br />+{<br />+    uint32_t cls_id;<br />+    uint32_t mem_id;<br />+    uint32_t cls_use;<br />+    uint32_t instr_mem;<br />+<br />+    for (cls_id = 0; cls_id < ZXDH_PPU_CLUSTER_NUM; cls_id++) {<br />+        cls_use = (bitmap >> cls_id) & 0x1;<br />+        g_ppu_cls_bit_map[dev_id].cls_use[cls_id] = cls_use;<br />+    }<br />+<br />+    for (mem_id = 0; mem_id < ZXDH_PPU_INSTR_MEM_NUM; mem_id++) {<br />+        instr_mem = (bitmap >> (mem_id * 2)) & 0x3;<br />+        g_ppu_cls_bit_map[dev_id].instr_mem[mem_id] = ((instr_mem > 0) ? 1 : 0);<br />+    }<br />+}<br />+<br />+static ZXDH_DTB_MGR_T *<br />+zxdh_np_dtb_mgr_get(uint32_t dev_id)<br />+{<br />+    if (dev_id >= ZXDH_DEV_CHANNEL_MAX)<br />+        return NULL;<br />+    else<br />+        return p_dpp_dtb_mgr[dev_id];<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_dtb_soft_init(uint32_t dev_id)<br />+{<br />+    ZXDH_DTB_MGR_T *p_dtb_mgr = NULL;<br />+<br />+    if (dev_id >= ZXDH_DEV_CHANNEL_MAX)<br />+        return 1;<br />+<br />+    p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id);<br />+    if (p_dtb_mgr == NULL) {<br />+        p_dpp_dtb_mgr[dev_id] = rte_zmalloc(NULL, sizeof(ZXDH_DTB_MGR_T), 0);<br />+        p_dtb_mgr = zxdh_np_dtb_mgr_get(dev_id);<br />+        if (p_dtb_mgr == NULL)<br />+            return 1;<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static uint32_t<br />+zxdh_np_base_soft_init(uint32_t dev_id, ZXDH_SYS_INIT_CTRL_T *p_init_ctrl)<br />+{<br />+    uint32_t dev_id_array[ZXDH_DEV_CHANNEL_MAX] = {0};<br />+    uint32_t rt;<br />+    uint32_t access_type;<br />+    uint32_t agent_flag;<br />+<br />+    rt = zxdh_np_dev_init();<br />+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_init");<br />+<br />+    if (p_init_ctrl->flags & ZXDH_INIT_FLAG_ACCESS_TYPE)<br />+        access_type = ZXDH_DEV_ACCESS_TYPE_RISCV;<br />+    else<br />+        access_type = ZXDH_DEV_ACCESS_TYPE_PCIE;<br />+<br />+    if (p_init_ctrl->flags & ZXDH_INIT_FLAG_AGENT_FLAG)<br />+        agent_flag = ZXDH_DEV_AGENT_ENABLE;<br />+    else<br />+        agent_flag = ZXDH_DEV_AGENT_DISABLE;<br />+<br />+    rt = zxdh_np_dev_add(dev_id,<br />+                     p_init_ctrl->device_type,<br />+                     access_type,<br />+                     p_init_ctrl->pcie_vir_baddr,<br />+                     p_init_ctrl->riscv_vir_baddr,<br />+                     p_init_ctrl->dma_vir_baddr,<br />+                     p_init_ctrl->dma_phy_baddr);<br />+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_add");<br />+<br />+    rt = zxdh_np_dev_agent_status_set(dev_id, agent_flag);<br />+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dev_agent_status_set");<br />+<br />+    dev_id_array[0] = dev_id;<br />+    rt = zxdh_np_sdt_init(1, dev_id_array);<br />+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_sdt_init");<br />+<br />+    zxdh_np_ppu_parse_cls_bitmap(dev_id, ZXDH_PPU_CLS_ALL_START);<br />+<br />+    rt = zxdh_np_dtb_soft_init(dev_id);<br />+    ZXDH_COMM_CHECK_DEV_RC(dev_id, rt, "zxdh_dtb_soft_init");<br />+<br />+    return rt;<br />+}<br />+<br />+static void<br />+zxdh_np_dev_vport_set(uint32_t dev_id, uint32_t vport)<br />+{<br />+    ZXDH_DEV_MGR_T *p_dev_mgr = NULL;<br />+    ZXDH_DEV_CFG_T *p_dev_info = NULL;<br />+<br />+    p_dev_mgr =  &g_dev_mgr;<br />+    p_dev_info = p_dev_mgr->p_dev_array[dev_id];<br />+    p_dev_info->vport = vport;<br />+}<br />+<br />+static void<br />+zxdh_np_dev_agent_addr_set(uint32_t dev_id, uint64_t agent_addr)<br />+{<br />+    ZXDH_DEV_MGR_T *p_dev_mgr = NULL;<br />+    ZXDH_DEV_CFG_T *p_dev_info = NULL;<br />+<br />+    p_dev_mgr =  &g_dev_mgr;<br />+    p_dev_info = p_dev_mgr->p_dev_array[dev_id];<br />+    p_dev_info->agent_addr = agent_addr;<br />+}<br />+<br />+static uint64_t<br />+zxdh_np_addr_calc(uint64_t pcie_vir_baddr, uint32_t bar_offset)<br />+{<br />+    uint64_t np_addr;<br />+<br />+    np_addr = ((pcie_vir_baddr + bar_offset) > ZXDH_PCIE_NP_MEM_SIZE)<br />+                ? (pcie_vir_baddr + bar_offset - ZXDH_PCIE_NP_MEM_SIZE) : 0;<br />+    g_np_bar_offset = bar_offset;<br />+<br />+    return np_addr;<br />+}<br />+<br />+int<br />+zxdh_np_host_init(uint32_t dev_id,<br />+        ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl)<br />+{<br />+    ZXDH_SYS_INIT_CTRL_T sys_init_ctrl = {0};<br />+    uint32_t rc;<br />+    uint64_t agent_addr;<br />+<br />+    ZXDH_COMM_CHECK_POINT_NO_ASSERT(p_dev_init_ctrl);<br />+<br />+    sys_init_ctrl.flags = (ZXDH_DEV_ACCESS_TYPE_PCIE << 0) | (ZXDH_DEV_AGENT_ENABLE << 10);<br />+    sys_init_ctrl.pcie_vir_baddr = zxdh_np_addr_calc(p_dev_init_ctrl->pcie_vir_addr,<br />+        p_dev_init_ctrl->np_bar_offset);<br />+    sys_init_ctrl.device_type = ZXDH_DEV_TYPE_CHIP;<br />+    rc = zxdh_np_base_soft_init(dev_id, &sys_init_ctrl);<br />+    ZXDH_COMM_CHECK_RC_NO_ASSERT(rc, "zxdh_base_soft_init");<br />+<br />+    zxdh_np_dev_vport_set(dev_id, p_dev_init_ctrl->vport);<br />+<br />+    agent_addr = ZXDH_PCIE_AGENT_ADDR_OFFSET + p_dev_init_ctrl->pcie_vir_addr;<br />+    zxdh_np_dev_agent_addr_set(dev_id, agent_addr);<br />+<br />+    return 0;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_np.h b/drivers/net/zxdh/zxdh_np.h<br />new file mode 100644<br />index 0000000000..573eafe796<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_np.h<br />@@ -0,0 +1,198 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2023 ZTE Corporation<br />+ */<br />+<br />+#ifndef ZXDH_NP_H<br />+#define ZXDH_NP_H<br />+<br />+#include <stdint.h> <br />+<br />+#define ZXDH_PORT_NAME_MAX                    (32)<br />+#define ZXDH_DEV_CHANNEL_MAX                  (2)<br />+#define ZXDH_DEV_SDT_ID_MAX                   (256U)<br />+/*DTB*/<br />+#define ZXDH_DTB_QUEUE_ITEM_NUM_MAX           (32)<br />+#define ZXDH_DTB_QUEUE_NUM_MAX                (128)<br />+<br />+#define ZXDH_PPU_CLS_ALL_START                (0x3F)<br />+#define ZXDH_PPU_CLUSTER_NUM                  (6)<br />+#define ZXDH_PPU_INSTR_MEM_NUM                (3)<br />+#define ZXDH_SDT_CFG_LEN                      (2)<br />+<br />+#define ZXDH_RC_DEV_BASE                      (0x600)<br />+#define ZXDH_RC_DEV_PARA_INVALID              (ZXDH_RC_DEV_BASE | 0x0)<br />+#define ZXDH_RC_DEV_RANGE_INVALID             (ZXDH_RC_DEV_BASE | 0x1)<br />+#define ZXDH_RC_DEV_CALL_FUNC_FAIL            (ZXDH_RC_DEV_BASE | 0x2)<br />+#define ZXDH_RC_DEV_TYPE_INVALID              (ZXDH_RC_DEV_BASE | 0x3)<br />+#define ZXDH_RC_DEV_CONNECT_FAIL              (ZXDH_RC_DEV_BASE | 0x4)<br />+#define ZXDH_RC_DEV_MSG_INVALID               (ZXDH_RC_DEV_BASE | 0x5)<br />+#define ZXDH_RC_DEV_NOT_EXIST                 (ZXDH_RC_DEV_BASE | 0x6)<br />+#define ZXDH_RC_DEV_MGR_NOT_INIT              (ZXDH_RC_DEV_BASE | 0x7)<br />+#define ZXDH_RC_DEV_CFG_NOT_INIT              (ZXDH_RC_DEV_BASE | 0x8)<br />+<br />+#define ZXDH_SYS_VF_NP_BASE_OFFSET      0<br />+#define ZXDH_PCIE_DTB4K_ADDR_OFFSET     (0x6000)<br />+#define ZXDH_PCIE_NP_MEM_SIZE           (0x2000000)<br />+#define ZXDH_PCIE_AGENT_ADDR_OFFSET     (0x2000)<br />+<br />+#define ZXDH_INIT_FLAG_ACCESS_TYPE      (1 << 0)<br />+#define ZXDH_INIT_FLAG_SERDES_DOWN_TP   (1 << 1)<br />+#define ZXDH_INIT_FLAG_DDR_BACKDOOR     (1 << 2)<br />+#define ZXDH_INIT_FLAG_SA_MODE          (1 << 3)<br />+#define ZXDH_INIT_FLAG_SA_MESH          (1 << 4)<br />+#define ZXDH_INIT_FLAG_SA_SERDES_MODE   (1 << 5)<br />+#define ZXDH_INIT_FLAG_INT_DEST_MODE    (1 << 6)<br />+#define ZXDH_INIT_FLAG_LIF0_MODE        (1 << 7)<br />+#define ZXDH_INIT_FLAG_DMA_ENABLE       (1 << 8)<br />+#define ZXDH_INIT_FLAG_TM_IMEM_FLAG     (1 << 9)<br />+#define ZXDH_INIT_FLAG_AGENT_FLAG       (1 << 10)<br />+<br />+typedef enum zxdh_module_init_e {<br />+    ZXDH_MODULE_INIT_NPPU = 0,<br />+    ZXDH_MODULE_INIT_PPU,<br />+    ZXDH_MODULE_INIT_SE,<br />+    ZXDH_MODULE_INIT_ETM,<br />+    ZXDH_MODULE_INIT_DLB,<br />+    ZXDH_MODULE_INIT_TRPG,<br />+    ZXDH_MODULE_INIT_TSN,<br />+    ZXDH_MODULE_INIT_MAX<br />+} ZXDH_MODULE_INIT_E;<br />+<br />+typedef enum zxdh_dev_type_e {<br />+    ZXDH_DEV_TYPE_SIM  = 0,<br />+    ZXDH_DEV_TYPE_VCS  = 1,<br />+    ZXDH_DEV_TYPE_CHIP = 2,<br />+    ZXDH_DEV_TYPE_FPGA = 3,<br />+    ZXDH_DEV_TYPE_PCIE_ACC = 4,<br />+    ZXDH_DEV_TYPE_INVALID,<br />+} ZXDH_DEV_TYPE_E;<br />+<br />+typedef enum zxdh_dev_access_type_e {<br />+    ZXDH_DEV_ACCESS_TYPE_PCIE = 0,<br />+    ZXDH_DEV_ACCESS_TYPE_RISCV = 1,<br />+    ZXDH_DEV_ACCESS_TYPE_INVALID,<br />+} ZXDH_DEV_ACCESS_TYPE_E;<br />+<br />+typedef enum zxdh_dev_agent_flag_e {<br />+    ZXDH_DEV_AGENT_DISABLE = 0,<br />+    ZXDH_DEV_AGENT_ENABLE = 1,<br />+    ZXDH_DEV_AGENT_INVALID,<br />+} ZXDH_DEV_AGENT_FLAG_E;<br />+<br />+typedef struct zxdh_dtb_tab_up_user_addr_t {<br />+    uint32_t user_flag;<br />+    uint64_t phy_addr;<br />+    uint64_t vir_addr;<br />+} ZXDH_DTB_TAB_UP_USER_ADDR_T;<br />+<br />+typedef struct zxdh_dtb_tab_up_info_t {<br />+    uint64_t start_phy_addr;<br />+    uint64_t start_vir_addr;<br />+    uint32_t item_size;<br />+    uint32_t wr_index;<br />+    uint32_t rd_index;<br />+    uint32_t data_len[ZXDH_DTB_QUEUE_ITEM_NUM_MAX];<br />+    ZXDH_DTB_TAB_UP_USER_ADDR_T user_addr[ZXDH_DTB_QUEUE_ITEM_NUM_MAX];<br />+} ZXDH_DTB_TAB_UP_INFO_T;<br />+<br />+typedef struct zxdh_dtb_tab_down_info_t {<br />+    uint64_t start_phy_addr;<br />+    uint64_t start_vir_addr;<br />+    uint32_t item_size;<br />+    uint32_t wr_index;<br />+    uint32_t rd_index;<br />+} ZXDH_DTB_TAB_DOWN_INFO_T;<br />+<br />+typedef struct zxdh_dtb_queue_info_t {<br />+    uint32_t init_flag;<br />+    uint32_t vport;<br />+    uint32_t vector;<br />+    ZXDH_DTB_TAB_UP_INFO_T tab_up;<br />+    ZXDH_DTB_TAB_DOWN_INFO_T tab_down;<br />+} ZXDH_DTB_QUEUE_INFO_T;<br />+<br />+typedef struct zxdh_dtb_mgr_t {<br />+    ZXDH_DTB_QUEUE_INFO_T queue_info[ZXDH_DTB_QUEUE_NUM_MAX];<br />+} ZXDH_DTB_MGR_T;<br />+<br />+typedef struct zxdh_ppu_cls_bitmap_t {<br />+    uint32_t cls_use[ZXDH_PPU_CLUSTER_NUM];<br />+    uint32_t instr_mem[ZXDH_PPU_INSTR_MEM_NUM];<br />+} ZXDH_PPU_CLS_BITMAP_T;<br />+<br />+typedef struct dpp_sdt_item_t {<br />+    uint32_t     valid;<br />+    uint32_t     table_cfg[ZXDH_SDT_CFG_LEN];<br />+} ZXDH_SDT_ITEM_T;<br />+<br />+typedef struct dpp_sdt_soft_table_t {<br />+    uint32_t          device_id;<br />+    ZXDH_SDT_ITEM_T  sdt_array[ZXDH_DEV_SDT_ID_MAX];<br />+} ZXDH_SDT_SOFT_TABLE_T;<br />+<br />+typedef struct zxdh_sys_init_ctrl_t {<br />+    ZXDH_DEV_TYPE_E device_type;<br />+    uint32_t flags;<br />+    uint32_t sa_id;<br />+    uint32_t case_num;<br />+    uint32_t lif0_port_type;<br />+    uint32_t lif1_port_type;<br />+    uint64_t pcie_vir_baddr;<br />+    uint64_t riscv_vir_baddr;<br />+    uint64_t dma_vir_baddr;<br />+    uint64_t dma_phy_baddr;<br />+} ZXDH_SYS_INIT_CTRL_T;<br />+<br />+typedef struct dpp_dev_cfg_t {<br />+    uint32_t device_id;<br />+    ZXDH_DEV_TYPE_E dev_type;<br />+    uint32_t chip_ver;<br />+    uint32_t access_type;<br />+    uint32_t agent_flag;<br />+    uint32_t vport;<br />+    uint64_t pcie_addr;<br />+    uint64_t riscv_addr;<br />+    uint64_t dma_vir_addr;<br />+    uint64_t dma_phy_addr;<br />+    uint64_t agent_addr;<br />+    uint32_t init_flags[ZXDH_MODULE_INIT_MAX];<br />+} ZXDH_DEV_CFG_T;<br />+<br />+typedef struct zxdh_dev_mngr_t {<br />+    uint32_t         device_num;<br />+    uint32_t         is_init;<br />+    ZXDH_DEV_CFG_T       *p_dev_array[ZXDH_DEV_CHANNEL_MAX];<br />+} ZXDH_DEV_MGR_T;<br />+<br />+typedef struct zxdh_dtb_addr_info_t {<br />+    uint32_t sdt_no;<br />+    uint32_t size;<br />+    uint32_t phy_addr;<br />+    uint32_t vir_addr;<br />+} ZXDH_DTB_ADDR_INFO_T;<br />+<br />+typedef struct zxdh_dev_init_ctrl_t {<br />+    uint32_t vport;<br />+    char  port_name[ZXDH_PORT_NAME_MAX];<br />+    uint32_t vector;<br />+    uint32_t queue_id;<br />+    uint32_t np_bar_offset;<br />+    uint32_t np_bar_len;<br />+    uint32_t pcie_vir_addr;<br />+    uint32_t down_phy_addr;<br />+    uint32_t down_vir_addr;<br />+    uint32_t dump_phy_addr;<br />+    uint32_t dump_vir_addr;<br />+    uint32_t dump_sdt_num;<br />+    ZXDH_DTB_ADDR_INFO_T dump_addr_info[];<br />+} ZXDH_DEV_INIT_CTRL_T;<br />+<br />+typedef struct zxdh_sdt_mgr_t {<br />+    uint32_t          channel_num;<br />+    uint32_t          is_init;<br />+    ZXDH_SDT_SOFT_TABLE_T *sdt_tbl_array[ZXDH_DEV_CHANNEL_MAX];<br />+} ZXDH_SDT_MGR_T;<br />+<br />+int zxdh_np_host_init(uint32_t dev_id, ZXDH_DEV_INIT_CTRL_T *p_dev_init_ctrl);<br />+<br />+#endif /* ZXDH_NP_H */<br />diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c<br />index 06d3f92b20..250e67d560 100644<br />--- a/drivers/net/zxdh/zxdh_pci.c<br />+++ b/drivers/net/zxdh/zxdh_pci.c<br />@@ -159,7 +159,7 @@ zxdh_setup_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq)<br />  <br />     desc_addr = vq->vq_ring_mem;<br />     avail_addr = desc_addr + vq->vq_nentries * sizeof(struct zxdh_vring_desc);<br />-    if (vtpci_packed_queue(vq->hw)) {<br />+    if (zxdh_pci_packed_queue(vq->hw)) {<br />         used_addr = RTE_ALIGN_CEIL((avail_addr +<br />                 sizeof(struct zxdh_vring_packed_desc_event)),<br />                 ZXDH_PCI_VRING_ALIGN);<br />diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h<br />index fe41312cc0..c635b19161 100644<br />--- a/drivers/net/zxdh/zxdh_pci.h<br />+++ b/drivers/net/zxdh/zxdh_pci.h<br />@@ -114,15 +114,15 @@ struct zxdh_pci_common_cfg {<br /> };<br />  <br /> static inline int32_t<br />-vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit)<br />+zxdh_pci_with_feature(struct zxdh_hw *hw, uint64_t bit)<br /> {<br />     return (hw->guest_features & (1ULL << bit)) != 0;<br /> }<br />  <br /> static inline int32_t<br />-vtpci_packed_queue(struct zxdh_hw *hw)<br />+zxdh_pci_packed_queue(struct zxdh_hw *hw)<br /> {<br />-    return vtpci_with_feature(hw, ZXDH_F_RING_PACKED);<br />+    return zxdh_pci_with_feature(hw, ZXDH_F_RING_PACKED);<br /> }<br />  <br /> struct zxdh_pci_ops {<br />diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c<br />index 462a88b23c..b4ef90ea36 100644<br />--- a/drivers/net/zxdh/zxdh_queue.c<br />+++ b/drivers/net/zxdh/zxdh_queue.c<br />@@ -13,7 +13,7 @@<br /> #include "zxdh_msg.h" <br />  <br /> struct rte_mbuf *<br />-zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq)<br />+zxdh_queue_detach_unused(struct zxdh_virtqueue *vq)<br /> {<br />     struct rte_mbuf *cookie = NULL;<br />     int32_t          idx    = 0;<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />index 7f8b32f3b8..f4b4c4cd02 100644<br />--- a/drivers/net/zxdh/zxdh_queue.h<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -206,11 +206,11 @@ struct zxdh_tx_region {<br /> };<br />  <br /> static inline size_t<br />-vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align)<br />+zxdh_vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align)<br /> {<br />     size_t size;<br />  <br />-    if (vtpci_packed_queue(hw)) {<br />+    if (zxdh_pci_packed_queue(hw)) {<br />         size = num * sizeof(struct zxdh_vring_packed_desc);<br />         size += sizeof(struct zxdh_vring_packed_desc_event);<br />         size = RTE_ALIGN_CEIL(size, align);<br />@@ -226,7 +226,7 @@ vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align)<br /> }<br />  <br /> static inline void<br />-vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,<br />+zxdh_vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,<br />         unsigned long align, uint32_t num)<br /> {<br />     vr->num    = num;<br />@@ -238,7 +238,7 @@ vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,<br /> }<br />  <br /> static inline void<br />-vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n)<br />+zxdh_vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n)<br /> {<br />     int32_t i = 0;<br />  <br />@@ -251,7 +251,7 @@ vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n)<br /> }<br />  <br /> static inline void<br />-vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n)<br />+zxdh_vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n)<br /> {<br />     int32_t i = 0;<br />  <br />@@ -262,7 +262,7 @@ vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n)<br /> }<br />  <br /> static inline void<br />-virtqueue_disable_intr(struct zxdh_virtqueue *vq)<br />+zxdh_queue_disable_intr(struct zxdh_virtqueue *vq)<br /> {<br />     if (vq->vq_packed.event_flags_shadow != ZXDH_RING_EVENT_FLAGS_DISABLE) {<br />         vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;<br />@@ -270,7 +270,7 @@ virtqueue_disable_intr(struct zxdh_virtqueue *vq)<br />     }<br /> }<br />  <br />-struct rte_mbuf *zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq);<br />+struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);<br /> int32_t zxdh_free_queues(struct rte_eth_dev *dev);<br /> int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);<br />  <br />--  <br />2.27.0<br />