configure zxdh intr include risc,dtb. and release intr.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/zxdh_ethdev.c | 315 +++++++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_ethdev.h |   6 +<br /> drivers/net/zxdh/zxdh_msg.c    | 210 ++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_msg.h    |  16 +-<br /> drivers/net/zxdh/zxdh_pci.c    |  31 ++++<br /> drivers/net/zxdh/zxdh_pci.h    |  11 ++<br /> drivers/net/zxdh/zxdh_queue.h  | 110 ++++++++++++<br /> drivers/net/zxdh/zxdh_rxtx.h   |  55 ++++++<br /> 8 files changed, 752 insertions(+), 2 deletions(-)<br /> create mode 100644 drivers/net/zxdh/zxdh_queue.h<br /> create mode 100644 drivers/net/zxdh/zxdh_rxtx.h<br /> <br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index da5ac3ccd1..1a3658e74b 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -11,6 +11,7 @@<br /> #include "zxdh_pci.h" <br /> #include "zxdh_msg.h" <br /> #include "zxdh_common.h" <br />+#include "zxdh_queue.h" <br />  <br /> struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];<br />  <br />@@ -26,6 +27,315 @@ zxdh_vport_to_vfid(union zxdh_virport_num v)<br />         return (v.epid * 8 + v.pfid) + 1152;<br /> }<br />  <br />+static void<br />+zxdh_queues_unbind_intr(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t i;<br />+<br />+    for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+        ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);<br />+        ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2 + 1], ZXDH_MSI_NO_VECTOR);<br />+    }<br />+}<br />+<br />+<br />+static int32_t<br />+zxdh_intr_unmask(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (rte_intr_ack(dev->intr_handle) < 0)<br />+        return -1;<br />+<br />+    hw->use_msix = zxdh_pci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));<br />+<br />+    return 0;<br />+}<br />+<br />+static void<br />+zxdh_devconf_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+<br />+    if (zxdh_intr_unmask(dev) < 0)<br />+        PMD_DRV_LOG(ERR, "interrupt enable failed");<br />+}<br />+<br />+<br />+/* Interrupt handler triggered by NIC for handling specific interrupt. */<br />+static void<br />+zxdh_fromriscv_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);<br />+<br />+    if (hw->is_pf) {<br />+        PMD_DRV_LOG(DEBUG, "zxdh_risc2pf_intr_handler");<br />+        zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_RISC, ZXDH_MSG_CHAN_END_PF, virt_addr, dev);<br />+    } else {<br />+        PMD_DRV_LOG(DEBUG, "zxdh_riscvf_intr_handler");<br />+        zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_RISC, ZXDH_MSG_CHAN_END_VF, virt_addr, dev);<br />+    }<br />+}<br />+<br />+/* Interrupt handler triggered by NIC for handling specific interrupt. */<br />+static void<br />+zxdh_frompfvf_intr_handler(void *param)<br />+{<br />+    struct rte_eth_dev *dev = param;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] +<br />+                ZXDH_MSG_CHAN_PFVFSHARE_OFFSET);<br />+<br />+    if (hw->is_pf) {<br />+        PMD_DRV_LOG(DEBUG, "zxdh_vf2pf_intr_handler");<br />+        zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_VF, ZXDH_MSG_CHAN_END_PF, virt_addr, dev);<br />+    } else {<br />+        PMD_DRV_LOG(DEBUG, "zxdh_pf2vf_intr_handler");<br />+        zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_PF, ZXDH_MSG_CHAN_END_VF, virt_addr, dev);<br />+    }<br />+}<br />+<br />+static void<br />+zxdh_intr_cb_reg(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+<br />+    /* register callback to update dev config intr */<br />+    rte_intr_callback_register(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+    /* Register rsic_v to pf interrupt callback */<br />+    struct rte_intr_handle *tmp = hw->risc_intr +<br />+            (ZXDH_MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+<br />+    rte_intr_callback_register(tmp, zxdh_frompfvf_intr_handler, dev);<br />+<br />+    tmp = hw->risc_intr + (ZXDH_MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+    rte_intr_callback_register(tmp, zxdh_fromriscv_intr_handler, dev);<br />+}<br />+<br />+static void<br />+zxdh_intr_cb_unreg(struct rte_eth_dev *dev)<br />+{<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    /* register callback to update dev config intr */<br />+    rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);<br />+    /* Register rsic_v to pf interrupt callback */<br />+    struct rte_intr_handle *tmp = hw->risc_intr +<br />+            (ZXDH_MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+<br />+    rte_intr_callback_unregister(tmp, zxdh_frompfvf_intr_handler, dev);<br />+    tmp = hw->risc_intr + (ZXDH_MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);<br />+    rte_intr_callback_unregister(tmp, zxdh_fromriscv_intr_handler, dev);<br />+}<br />+<br />+static int32_t<br />+zxdh_intr_disable(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->intr_enabled)<br />+        return 0;<br />+<br />+    zxdh_intr_cb_unreg(dev);<br />+    if (rte_intr_disable(dev->intr_handle) < 0)<br />+        return -1;<br />+<br />+    hw->intr_enabled = 0;<br />+    return 0;<br />+}<br />+<br />+static int32_t<br />+zxdh_intr_enable(struct rte_eth_dev *dev)<br />+{<br />+    int ret = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->intr_enabled) {<br />+        zxdh_intr_cb_reg(dev);<br />+        ret = rte_intr_enable(dev->intr_handle);<br />+        if (unlikely(ret))<br />+            PMD_DRV_LOG(ERR, "Failed to enable %s intr", dev->data->name);<br />+<br />+        hw->intr_enabled = 1;<br />+    }<br />+    return ret;<br />+}<br />+<br />+static int32_t<br />+zxdh_intr_release(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)<br />+        ZXDH_VTPCI_OPS(hw)->set_config_irq(hw, ZXDH_MSI_NO_VECTOR);<br />+<br />+    zxdh_queues_unbind_intr(dev);<br />+    zxdh_intr_disable(dev);<br />+<br />+    rte_intr_efd_disable(dev->intr_handle);<br />+    rte_intr_vec_list_free(dev->intr_handle);<br />+    rte_free(hw->risc_intr);<br />+    hw->risc_intr = NULL;<br />+    rte_free(hw->dtb_intr);<br />+    hw->dtb_intr = NULL;<br />+    return 0;<br />+}<br />+<br />+static int32_t<br />+zxdh_setup_risc_interrupts(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint8_t i;<br />+<br />+    if (!hw->risc_intr) {<br />+        PMD_DRV_LOG(ERR, " to allocate risc_intr");<br />+        hw->risc_intr = rte_zmalloc("risc_intr",<br />+            ZXDH_MSIX_INTR_MSG_VEC_NUM * sizeof(struct rte_intr_handle), 0);<br />+        if (hw->risc_intr == NULL) {<br />+            PMD_DRV_LOG(ERR, "Failed to allocate risc_intr");<br />+            return -ENOMEM;<br />+        }<br />+    }<br />+<br />+    for (i = 0; i < ZXDH_MSIX_INTR_MSG_VEC_NUM; i++) {<br />+        if (dev->intr_handle->efds[i] < 0) {<br />+            PMD_DRV_LOG(ERR, "[%u]risc interrupt fd is invalid", i);<br />+            rte_free(hw->risc_intr);<br />+            hw->risc_intr = NULL;<br />+            return -1;<br />+        }<br />+<br />+        struct rte_intr_handle *intr_handle = hw->risc_intr + i;<br />+<br />+        intr_handle->fd = dev->intr_handle->efds[i];<br />+        intr_handle->type = dev->intr_handle->type;<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static int32_t<br />+zxdh_setup_dtb_interrupts(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (!hw->dtb_intr) {<br />+        hw->dtb_intr = rte_zmalloc("dtb_intr", sizeof(struct rte_intr_handle), 0);<br />+        if (hw->dtb_intr == NULL) {<br />+            PMD_DRV_LOG(ERR, "Failed to allocate dtb_intr");<br />+            return -ENOMEM;<br />+        }<br />+    }<br />+<br />+    if (dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1] < 0) {<br />+        PMD_DRV_LOG(ERR, "[%d]dtb interrupt fd is invalid", ZXDH_MSIX_INTR_DTB_VEC - 1);<br />+        rte_free(hw->dtb_intr);<br />+        hw->dtb_intr = NULL;<br />+        return -1;<br />+    }<br />+    hw->dtb_intr->fd = dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1];<br />+    hw->dtb_intr->type = dev->intr_handle->type;<br />+    return 0;<br />+}<br />+<br />+static int32_t<br />+zxdh_queues_bind_intr(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t i;<br />+    uint16_t vec;<br />+<br />+    if (!dev->data->dev_conf.intr_conf.rxq) {<br />+        for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+            vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                    hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);<br />+            PMD_DRV_LOG(DEBUG, "vq%d irq set 0x%x, get 0x%x",<br />+                    i * 2, ZXDH_MSI_NO_VECTOR, vec);<br />+        }<br />+    } else {<br />+        for (i = 0; i < dev->data->nb_rx_queues; ++i) {<br />+            vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                    hw->vqs[i * 2], i + ZXDH_QUEUE_INTR_VEC_BASE);<br />+            PMD_DRV_LOG(DEBUG, "vq%d irq set %d, get %d",<br />+                    i * 2, i + ZXDH_QUEUE_INTR_VEC_BASE, vec);<br />+        }<br />+    }<br />+    /* mask all txq intr */<br />+    for (i = 0; i < dev->data->nb_tx_queues; ++i) {<br />+        vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw,<br />+                hw->vqs[(i * 2) + 1], ZXDH_MSI_NO_VECTOR);<br />+        PMD_DRV_LOG(DEBUG, "vq%d irq set 0x%x, get 0x%x",<br />+                (i * 2) + 1, ZXDH_MSI_NO_VECTOR, vec);<br />+    }<br />+    return 0;<br />+}<br />+<br />+static int32_t<br />+zxdh_configure_intr(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    int32_t ret = 0;<br />+<br />+    if (!rte_intr_cap_multiple(dev->intr_handle)) {<br />+        PMD_DRV_LOG(ERR, "Multiple intr vector not supported");<br />+        return -ENOTSUP;<br />+    }<br />+    zxdh_intr_release(dev);<br />+    uint8_t nb_efd = ZXDH_MSIX_INTR_DTB_VEC_NUM + ZXDH_MSIX_INTR_MSG_VEC_NUM;<br />+<br />+    if (dev->data->dev_conf.intr_conf.rxq)<br />+        nb_efd += dev->data->nb_rx_queues;<br />+<br />+    if (rte_intr_efd_enable(dev->intr_handle, nb_efd)) {<br />+        PMD_DRV_LOG(ERR, "Fail to create eventfd");<br />+        return -1;<br />+    }<br />+<br />+    if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",<br />+                    hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM)) {<br />+        PMD_DRV_LOG(ERR, "Failed to allocate %u rxq vectors",<br />+                    hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM);<br />+        return -ENOMEM;<br />+    }<br />+    PMD_DRV_LOG(DEBUG, "allocate %u rxq vectors", dev->intr_handle->vec_list_size);<br />+    if (zxdh_setup_risc_interrupts(dev) != 0) {<br />+        PMD_DRV_LOG(ERR, "Error setting up rsic_v interrupts!");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+    if (zxdh_setup_dtb_interrupts(dev) != 0) {<br />+        PMD_DRV_LOG(ERR, "Error setting up dtb interrupts!");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+<br />+    if (zxdh_queues_bind_intr(dev) < 0) {<br />+        PMD_DRV_LOG(ERR, "Failed to bind queue/interrupt");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+<br />+    if (zxdh_intr_enable(dev) < 0) {<br />+        PMD_DRV_LOG(ERR, "interrupt enable failed");<br />+        ret = -1;<br />+        goto free_intr_vec;<br />+    }<br />+    return 0;<br />+<br />+free_intr_vec:<br />+    zxdh_intr_release(dev);<br />+    return ret;<br />+}<br />+<br /> static int32_t<br /> zxdh_init_device(struct rte_eth_dev *eth_dev)<br /> {<br />@@ -142,9 +452,14 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)<br />     if (ret != 0)<br />         goto err_zxdh_init;<br />  <br />+    ret = zxdh_configure_intr(eth_dev);<br />+    if (ret != 0)<br />+        goto err_zxdh_init;<br />+<br />     return ret;<br />  <br /> err_zxdh_init:<br />+    zxdh_intr_release(eth_dev);<br />     zxdh_bar_msg_chan_exit();<br />     rte_free(eth_dev->data->mac_addrs);<br />     eth_dev->data->mac_addrs = NULL;<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h<br />index 7b7bb16be8..65726f3a20 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.h<br />+++ b/drivers/net/zxdh/zxdh_ethdev.h<br />@@ -7,6 +7,8 @@<br />  <br /> #include <rte_ether.h> <br /> #include "ethdev_driver.h" <br />+#include <rte_interrupts.h> <br />+#include <eal_interrupts.h> <br />  <br /> #ifdef __cplusplus<br /> extern "C" {<br />@@ -43,6 +45,9 @@ struct zxdh_hw {<br />     struct rte_eth_dev *eth_dev;<br />     struct zxdh_pci_common_cfg *common_cfg;<br />     struct zxdh_net_config *dev_cfg;<br />+    struct rte_intr_handle *risc_intr;<br />+    struct rte_intr_handle *dtb_intr;<br />+    struct zxdh_virtqueue **vqs;<br />     union zxdh_virport_num vport;<br />  <br />     uint64_t bar_addr[ZXDH_NUM_BARS];<br />@@ -59,6 +64,7 @@ struct zxdh_hw {<br />  <br />     uint8_t *isr;<br />     uint8_t weak_barriers;<br />+    uint8_t intr_enabled;<br />     uint8_t use_msix;<br />     uint8_t mac_addr[RTE_ETHER_ADDR_LEN];<br />  <br />diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c<br />index 336ba217d3..53cf972f86 100644<br />--- a/drivers/net/zxdh/zxdh_msg.c<br />+++ b/drivers/net/zxdh/zxdh_msg.c<br />@@ -95,6 +95,12 @@<br /> #define ZXDH_BAR_CHAN_INDEX_SEND  0<br /> #define ZXDH_BAR_CHAN_INDEX_RECV  1<br />  <br />+#define ZXDH_BAR_CHAN_MSG_SYNC     0<br />+#define ZXDH_BAR_CHAN_MSG_NO_EMEC  0<br />+#define ZXDH_BAR_CHAN_MSG_EMEC     1<br />+#define ZXDH_BAR_CHAN_MSG_NO_ACK   0<br />+#define ZXDH_BAR_CHAN_MSG_ACK      1<br />+<br /> uint8_t subchan_id_tbl[ZXDH_BAR_MSG_SRC_NUM][ZXDH_BAR_MSG_DST_NUM] = {<br />     {ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND},<br />     {ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_RECV},<br />@@ -137,6 +143,39 @@ static struct zxdh_seqid_ring g_seqid_ring;<br /> static uint8_t tmp_msg_header[ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL];<br /> static rte_spinlock_t chan_lock;<br />  <br />+zxdh_bar_chan_msg_recv_callback msg_recv_func_tbl[ZXDH_BAR_MSG_MODULE_NUM];<br />+<br />+static inline const char<br />+*zxdh_module_id_name(int val)<br />+{<br />+    switch (val) {<br />+    case ZXDH_BAR_MODULE_DBG:        return "ZXDH_BAR_MODULE_DBG";<br />+    case ZXDH_BAR_MODULE_TBL:        return "ZXDH_BAR_MODULE_TBL";<br />+    case ZXDH_BAR_MODULE_MISX:       return "ZXDH_BAR_MODULE_MISX";<br />+    case ZXDH_BAR_MODULE_SDA:        return "ZXDH_BAR_MODULE_SDA";<br />+    case ZXDH_BAR_MODULE_RDMA:       return "ZXDH_BAR_MODULE_RDMA";<br />+    case ZXDH_BAR_MODULE_DEMO:       return "ZXDH_BAR_MODULE_DEMO";<br />+    case ZXDH_BAR_MODULE_SMMU:       return "ZXDH_BAR_MODULE_SMMU";<br />+    case ZXDH_BAR_MODULE_MAC:        return "ZXDH_BAR_MODULE_MAC";<br />+    case ZXDH_BAR_MODULE_VDPA:       return "ZXDH_BAR_MODULE_VDPA";<br />+    case ZXDH_BAR_MODULE_VQM:        return "ZXDH_BAR_MODULE_VQM";<br />+    case ZXDH_BAR_MODULE_NP:         return "ZXDH_BAR_MODULE_NP";<br />+    case ZXDH_BAR_MODULE_VPORT:      return "ZXDH_BAR_MODULE_VPORT";<br />+    case ZXDH_BAR_MODULE_BDF:        return "ZXDH_BAR_MODULE_BDF";<br />+    case ZXDH_BAR_MODULE_RISC_READY: return "ZXDH_BAR_MODULE_RISC_READY";<br />+    case ZXDH_BAR_MODULE_REVERSE:    return "ZXDH_BAR_MODULE_REVERSE";<br />+    case ZXDH_BAR_MDOULE_NVME:       return "ZXDH_BAR_MDOULE_NVME";<br />+    case ZXDH_BAR_MDOULE_NPSDK:      return "ZXDH_BAR_MDOULE_NPSDK";<br />+    case ZXDH_BAR_MODULE_NP_TODO:    return "ZXDH_BAR_MODULE_NP_TODO";<br />+    case ZXDH_MODULE_BAR_MSG_TO_PF:  return "ZXDH_MODULE_BAR_MSG_TO_PF";<br />+    case ZXDH_MODULE_BAR_MSG_TO_VF:  return "ZXDH_MODULE_BAR_MSG_TO_VF";<br />+    case ZXDH_MODULE_FLASH:          return "ZXDH_MODULE_FLASH";<br />+    case ZXDH_BAR_MODULE_OFFSET_GET: return "ZXDH_BAR_MODULE_OFFSET_GET";<br />+    case ZXDH_BAR_EVENT_OVS_WITH_VCB: return "ZXDH_BAR_EVENT_OVS_WITH_VCB";<br />+    default: return "NA";<br />+    }<br />+}<br />+<br /> static uint16_t<br /> zxdh_pcie_id_to_hard_lock(uint16_t src_pcieid, uint8_t dst)<br /> {<br />@@ -825,3 +864,174 @@ zxdh_msg_chan_enable(struct rte_eth_dev *dev)<br />  <br />     return zxdh_bar_chan_enable(&misx_info, &hw->vport.vport);<br /> }<br />+<br />+static uint64_t<br />+zxdh_recv_addr_get(uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)<br />+{<br />+    uint8_t chan_id = 0;<br />+    uint8_t subchan_id = 0;<br />+    uint8_t src = 0;<br />+    uint8_t dst = 0;<br />+<br />+    src = zxdh_bar_msg_dst_index_trans(src_type);<br />+    dst = zxdh_bar_msg_src_index_trans(dst_type);<br />+    if (src == ZXDH_BAR_MSG_SRC_ERR || dst == ZXDH_BAR_MSG_DST_ERR)<br />+        return 0;<br />+<br />+    chan_id = chan_id_tbl[dst][src];<br />+    subchan_id = 1 - subchan_id_tbl[dst][src];<br />+<br />+    return zxdh_subchan_addr_cal(virt_addr, chan_id, subchan_id);<br />+}<br />+<br />+static void<br />+zxdh_bar_msg_ack_async_msg_proc(struct zxdh_bar_msg_header *msg_header,<br />+        uint8_t *receiver_buff)<br />+{<br />+    struct zxdh_seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[msg_header->msg_id];<br />+<br />+    if (reps_info->flag != ZXDH_REPS_INFO_FLAG_USED) {<br />+        PMD_MSG_LOG(ERR, "msg_id: %u is released", msg_header->msg_id);<br />+        return;<br />+    }<br />+    if (msg_header->len > reps_info->buffer_len - 4) {<br />+        PMD_MSG_LOG(ERR, "reps_buf_len is %u, but reps_msg_len is %u",<br />+                reps_info->buffer_len, msg_header->len + 4);<br />+        goto free_id;<br />+    }<br />+    uint8_t *reps_buffer = (uint8_t *)reps_info->reps_addr;<br />+<br />+    rte_memcpy(reps_buffer + 4, receiver_buff, msg_header->len);<br />+    *(uint16_t *)(reps_buffer + 1) = msg_header->len;<br />+    *(uint8_t *)(reps_info->reps_addr) = ZXDH_REPS_HEADER_REPLYED;<br />+<br />+free_id:<br />+    zxdh_bar_chan_msgid_free(msg_header->msg_id);<br />+}<br />+<br />+static void<br />+zxdh_bar_msg_sync_msg_proc(uint64_t reply_addr,<br />+        struct zxdh_bar_msg_header *msg_header,<br />+        uint8_t *receiver_buff, void *dev)<br />+{<br />+    uint16_t reps_len = 0;<br />+    uint8_t *reps_buffer = NULL;<br />+<br />+    reps_buffer = rte_malloc(NULL, ZXDH_BAR_MSG_PAYLOAD_MAX_LEN, 0);<br />+    if (reps_buffer == NULL)<br />+        return;<br />+<br />+    zxdh_bar_chan_msg_recv_callback recv_func = msg_recv_func_tbl[msg_header->module_id];<br />+<br />+    recv_func(receiver_buff, msg_header->len, reps_buffer, &reps_len, dev);<br />+    msg_header->ack = ZXDH_BAR_CHAN_MSG_ACK;<br />+    msg_header->len = reps_len;<br />+    zxdh_bar_chan_msg_header_set(reply_addr, msg_header);<br />+    zxdh_bar_chan_msg_payload_set(reply_addr, reps_buffer, reps_len);<br />+    zxdh_bar_chan_msg_valid_set(reply_addr, ZXDH_BAR_MSG_CHAN_USABLE);<br />+    rte_free(reps_buffer);<br />+}<br />+<br />+static uint64_t<br />+zxdh_reply_addr_get(uint8_t sync, uint8_t src_type,<br />+        uint8_t dst_type, uint64_t virt_addr)<br />+{<br />+    uint64_t recv_rep_addr = 0;<br />+    uint8_t chan_id = 0;<br />+    uint8_t subchan_id = 0;<br />+    uint8_t src = 0;<br />+    uint8_t dst = 0;<br />+<br />+    src = zxdh_bar_msg_dst_index_trans(src_type);<br />+    dst = zxdh_bar_msg_src_index_trans(dst_type);<br />+    if (src == ZXDH_BAR_MSG_SRC_ERR || dst == ZXDH_BAR_MSG_DST_ERR)<br />+        return 0;<br />+<br />+    chan_id = chan_id_tbl[dst][src];<br />+    subchan_id = 1 - subchan_id_tbl[dst][src];<br />+<br />+    if (sync == ZXDH_BAR_CHAN_MSG_SYNC)<br />+        recv_rep_addr = zxdh_subchan_addr_cal(virt_addr, chan_id, subchan_id);<br />+    else<br />+        recv_rep_addr = zxdh_subchan_addr_cal(virt_addr, chan_id, 1 - subchan_id);<br />+<br />+    return recv_rep_addr;<br />+}<br />+<br />+static uint16_t<br />+zxdh_bar_chan_msg_header_check(struct zxdh_bar_msg_header *msg_header)<br />+{<br />+    uint16_t len = 0;<br />+    uint8_t module_id = 0;<br />+<br />+    if (msg_header->valid != ZXDH_BAR_MSG_CHAN_USED) {<br />+        PMD_MSG_LOG(ERR, "recv header ERR: valid label is not used.");<br />+        return ZXDH_BAR_MSG_ERR_MODULE;<br />+    }<br />+    module_id = msg_header->module_id;<br />+<br />+    if (module_id >= (uint8_t)ZXDH_BAR_MSG_MODULE_NUM) {<br />+        PMD_MSG_LOG(ERR, "recv header ERR: invalid module_id: %u.", module_id);<br />+        return ZXDH_BAR_MSG_ERR_MODULE;<br />+    }<br />+    len = msg_header->len;<br />+<br />+    if (len > ZXDH_BAR_MSG_PAYLOAD_MAX_LEN) {<br />+        PMD_MSG_LOG(ERR, "recv header ERR: invalid mesg len: %u.", len);<br />+        return ZXDH_BAR_MSG_ERR_LEN;<br />+    }<br />+    if (msg_recv_func_tbl[msg_header->module_id] == NULL) {<br />+        PMD_MSG_LOG(ERR, "recv header ERR: module:%s(%u) doesn't register",<br />+                zxdh_module_id_name(module_id), module_id);<br />+        return ZXDH_BAR_MSG_ERR_MODULE_NOEXIST;<br />+    }<br />+    return ZXDH_BAR_MSG_OK;<br />+}<br />+<br />+int<br />+zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev)<br />+{<br />+    struct zxdh_bar_msg_header msg_header = {0};<br />+    uint64_t recv_addr = 0;<br />+    uint64_t reps_addr = 0;<br />+    uint16_t ret = 0;<br />+    uint8_t *recved_msg = NULL;<br />+<br />+    recv_addr = zxdh_recv_addr_get(src, dst, virt_addr);<br />+    if (recv_addr == 0) {<br />+        PMD_MSG_LOG(ERR, "invalid driver type(src:%u, dst:%u).", src, dst);<br />+        return -1;<br />+    }<br />+<br />+    zxdh_bar_chan_msg_header_get(recv_addr, &msg_header);<br />+    ret = zxdh_bar_chan_msg_header_check(&msg_header);<br />+<br />+    if (ret != ZXDH_BAR_MSG_OK) {<br />+        PMD_MSG_LOG(ERR, "recv msg_head err, ret: %u.", ret);<br />+        return -1;<br />+    }<br />+<br />+    recved_msg = rte_malloc(NULL, msg_header.len, 0);<br />+    if (recved_msg == NULL) {<br />+        PMD_MSG_LOG(ERR, "malloc temp buff failed.");<br />+        return -1;<br />+    }<br />+    zxdh_bar_chan_msg_payload_get(recv_addr, recved_msg, msg_header.len);<br />+<br />+    reps_addr = zxdh_reply_addr_get(msg_header.sync, src, dst, virt_addr);<br />+<br />+    if (msg_header.sync == ZXDH_BAR_CHAN_MSG_SYNC) {<br />+        zxdh_bar_msg_sync_msg_proc(reps_addr, &msg_header, recved_msg, dev);<br />+        goto exit;<br />+    }<br />+    zxdh_bar_chan_msg_valid_set(recv_addr, ZXDH_BAR_MSG_CHAN_USABLE);<br />+    if (msg_header.ack == ZXDH_BAR_CHAN_MSG_ACK) {<br />+        zxdh_bar_msg_ack_async_msg_proc(&msg_header, recved_msg);<br />+        goto exit;<br />+    }<br />+    return 0;<br />+<br />+exit:<br />+    rte_free(recved_msg);<br />+    return ZXDH_BAR_MSG_OK;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h<br />index b2beedec64..5742000a3b 100644<br />--- a/drivers/net/zxdh/zxdh_msg.h<br />+++ b/drivers/net/zxdh/zxdh_msg.h<br />@@ -13,10 +13,17 @@<br /> extern "C" {<br /> #endif<br />  <br />-#define ZXDH_BAR0_INDEX     0<br />-#define ZXDH_CTRLCH_OFFSET            (0x2000)<br />+#define ZXDH_BAR0_INDEX                 0<br />+#define ZXDH_CTRLCH_OFFSET              (0x2000)<br />+#define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET  (ZXDH_CTRLCH_OFFSET + 0x1000)<br />  <br /> #define ZXDH_MSIX_INTR_MSG_VEC_BASE   1<br />+#define ZXDH_MSIX_INTR_MSG_VEC_NUM    3<br />+#define ZXDH_MSIX_INTR_DTB_VEC        (ZXDH_MSIX_INTR_MSG_VEC_BASE + ZXDH_MSIX_INTR_MSG_VEC_NUM)<br />+#define ZXDH_MSIX_INTR_DTB_VEC_NUM    1<br />+#define ZXDH_INTR_NONQUE_NUM          (ZXDH_MSIX_INTR_MSG_VEC_NUM + ZXDH_MSIX_INTR_DTB_VEC_NUM + 1)<br />+#define ZXDH_QUEUE_INTR_VEC_BASE      (ZXDH_MSIX_INTR_DTB_VEC + ZXDH_MSIX_INTR_DTB_VEC_NUM)<br />+#define ZXDH_QUEUE_INTR_VEC_NUM       256<br />  <br /> #define ZXDH_BAR_MSG_POLLING_SPAN     100<br /> #define ZXDH_BAR_MSG_POLL_CNT_PER_MS  (1 * 1000 / ZXDH_BAR_MSG_POLLING_SPAN)<br />@@ -201,6 +208,9 @@ struct zxdh_bar_msg_header {<br />     uint16_t dst_pcieid; /* used in PF-->VF */<br /> };<br />  <br />+typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,<br />+        void *reps_buffer, uint16_t *reps_len, void *dev);<br />+<br /> int zxdh_msg_chan_init(void);<br /> int zxdh_bar_msg_chan_exit(void);<br /> int zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev);<br />@@ -209,6 +219,8 @@ int zxdh_msg_chan_enable(struct rte_eth_dev *dev);<br /> int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in,<br />         struct zxdh_msg_recviver_mem *result);<br />  <br />+int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev);<br />+<br /> #ifdef __cplusplus<br /> }<br /> #endif<br />diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c<br />index 68785aa03e..8e7a9c1213 100644<br />--- a/drivers/net/zxdh/zxdh_pci.c<br />+++ b/drivers/net/zxdh/zxdh_pci.c<br />@@ -14,6 +14,7 @@<br /> #include "zxdh_ethdev.h" <br /> #include "zxdh_pci.h" <br /> #include "zxdh_logs.h" <br />+#include "zxdh_queue.h" <br />  <br /> #define ZXDH_PMD_DEFAULT_GUEST_FEATURES   \<br />         (1ULL << ZXDH_NET_F_MRG_RXBUF | \<br />@@ -93,6 +94,27 @@ zxdh_set_features(struct zxdh_hw *hw, uint64_t features)<br />     rte_write32(features >> 32, &hw->common_cfg->guest_feature);<br /> }<br />  <br />+static uint16_t<br />+zxdh_set_config_irq(struct zxdh_hw *hw, uint16_t vec)<br />+{<br />+    rte_write16(vec, &hw->common_cfg->msix_config);<br />+    return rte_read16(&hw->common_cfg->msix_config);<br />+}<br />+<br />+static uint16_t<br />+zxdh_set_queue_irq(struct zxdh_hw *hw, struct zxdh_virtqueue *vq, uint16_t vec)<br />+{<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+    rte_write16(vec, &hw->common_cfg->queue_msix_vector);<br />+    return rte_read16(&hw->common_cfg->queue_msix_vector);<br />+}<br />+<br />+static uint8_t<br />+zxdh_get_isr(struct zxdh_hw *hw)<br />+{<br />+    return rte_read8(hw->isr);<br />+}<br />+<br /> const struct zxdh_pci_ops zxdh_dev_pci_ops = {<br />     .read_dev_cfg   = zxdh_read_dev_config,<br />     .write_dev_cfg  = zxdh_write_dev_config,<br />@@ -100,8 +122,17 @@ const struct zxdh_pci_ops zxdh_dev_pci_ops = {<br />     .set_status     = zxdh_set_status,<br />     .get_features   = zxdh_get_features,<br />     .set_features   = zxdh_set_features,<br />+    .set_queue_irq  = zxdh_set_queue_irq,<br />+    .set_config_irq = zxdh_set_config_irq,<br />+    .get_isr        = zxdh_get_isr,<br /> };<br />  <br />+uint8_t<br />+zxdh_pci_isr(struct zxdh_hw *hw)<br />+{<br />+    return ZXDH_VTPCI_OPS(hw)->get_isr(hw);<br />+}<br />+<br /> uint16_t<br /> zxdh_pci_get_features(struct zxdh_hw *hw)<br /> {<br />diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h<br />index 7905911a34..41e47d5d3b 100644<br />--- a/drivers/net/zxdh/zxdh_pci.h<br />+++ b/drivers/net/zxdh/zxdh_pci.h<br />@@ -22,6 +22,13 @@ enum zxdh_msix_status {<br />     ZXDH_MSIX_ENABLED  = 2<br /> };<br />  <br />+/* The bit of the ISR which indicates a device has an interrupt. */<br />+#define ZXDH_PCI_ISR_INTR    0x1<br />+/* The bit of the ISR which indicates a device configuration change. */<br />+#define ZXDH_PCI_ISR_CONFIG  0x2<br />+/* Vector value used to disable MSI for queue. */<br />+#define ZXDH_MSI_NO_VECTOR   0x7F<br />+<br /> #define ZXDH_NET_F_MAC               5   /* Host has given MAC address. */<br /> #define ZXDH_NET_F_MRG_RXBUF         15  /* Host can merge receive buffers. */<br /> #define ZXDH_NET_F_STATUS            16  /* zxdh_net_config.status available */<br />@@ -110,6 +117,9 @@ struct zxdh_pci_ops {<br />  <br />     uint64_t (*get_features)(struct zxdh_hw *hw);<br />     void     (*set_features)(struct zxdh_hw *hw, uint64_t features);<br />+    uint16_t (*set_queue_irq)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq, uint16_t vec);<br />+    uint16_t (*set_config_irq)(struct zxdh_hw *hw, uint16_t vec);<br />+    uint8_t  (*get_isr)(struct zxdh_hw *hw);<br /> };<br />  <br /> struct zxdh_hw_internal {<br />@@ -130,6 +140,7 @@ void zxdh_get_pci_dev_config(struct zxdh_hw *hw);<br />  <br /> uint16_t zxdh_pci_get_features(struct zxdh_hw *hw);<br /> enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev);<br />+uint8_t zxdh_pci_isr(struct zxdh_hw *hw);<br />  <br /> #ifdef __cplusplus<br /> }<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />new file mode 100644<br />index 0000000000..9c790cd9d3<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -0,0 +1,110 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef ZXDH_QUEUE_H<br />+#define ZXDH_QUEUE_H<br />+<br />+#include <stdint.h> <br />+<br />+#include <rte_common.h> <br />+<br />+#include "zxdh_ethdev.h" <br />+#include "zxdh_rxtx.h" <br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+/*<br />+ * ring descriptors: 16 bytes.<br />+ * These can chain together via "next".<br />+ */<br />+struct zxdh_vring_desc {<br />+    uint64_t addr;  /*  Address (guest-physical). */<br />+    uint32_t len;   /* Length. */<br />+    uint16_t flags; /* The flags as indicated above. */<br />+    uint16_t next;  /* We chain unused descriptors via this. */<br />+} __rte_packed;<br />+<br />+struct zxdh_vring_avail {<br />+    uint16_t flags;<br />+    uint16_t idx;<br />+    uint16_t ring[];<br />+} __rte_packed;<br />+<br />+struct zxdh_vring_packed_desc {<br />+    uint64_t addr;<br />+    uint32_t len;<br />+    uint16_t id;<br />+    uint16_t flags;<br />+} __rte_packed;<br />+<br />+struct zxdh_vring_packed_desc_event {<br />+    uint16_t desc_event_off_wrap;<br />+    uint16_t desc_event_flags;<br />+} __rte_packed;<br />+<br />+struct zxdh_vring_packed {<br />+    uint32_t num;<br />+    struct zxdh_vring_packed_desc *desc;<br />+    struct zxdh_vring_packed_desc_event *driver;<br />+    struct zxdh_vring_packed_desc_event *device;<br />+} __rte_packed;<br />+<br />+struct zxdh_vq_desc_extra {<br />+    void *cookie;<br />+    uint16_t ndescs;<br />+    uint16_t next;<br />+} __rte_packed;<br />+<br />+struct zxdh_virtqueue {<br />+    struct zxdh_hw  *hw; /* < zxdh_hw structure pointer. */<br />+    struct {<br />+        /* vring keeping descs and events */<br />+        struct zxdh_vring_packed ring;<br />+        uint8_t used_wrap_counter;<br />+        uint8_t rsv;<br />+        uint16_t cached_flags; /* < cached flags for descs */<br />+        uint16_t event_flags_shadow;<br />+        uint16_t rsv1;<br />+    } __rte_packed vq_packed;<br />+    uint16_t vq_used_cons_idx; /* < last consumed descriptor */<br />+    uint16_t vq_nentries;  /* < vring desc numbers */<br />+    uint16_t vq_free_cnt;  /* < num of desc available */<br />+    uint16_t vq_avail_idx; /* < sync until needed */<br />+    uint16_t vq_free_thresh; /* < free threshold */<br />+    uint16_t rsv2;<br />+<br />+    void *vq_ring_virt_mem;  /* < linear address of vring */<br />+    uint32_t vq_ring_size;<br />+<br />+    union {<br />+        struct zxdh_virtnet_rx rxq;<br />+        struct zxdh_virtnet_tx txq;<br />+    };<br />+<br />+    /*<br />+     * physical address of vring, or virtual address<br />+     */<br />+    rte_iova_t vq_ring_mem;<br />+<br />+    /*<br />+     * Head of the free chain in the descriptor table. If<br />+     * there are no free descriptors, this will be set to<br />+     * VQ_RING_DESC_CHAIN_END.<br />+     */<br />+    uint16_t  vq_desc_head_idx;<br />+    uint16_t  vq_desc_tail_idx;<br />+    uint16_t  vq_queue_index;   /* < PCI queue index */<br />+    uint16_t  offset; /* < relative offset to obtain addr in mbuf */<br />+    uint16_t *notify_addr;<br />+    struct rte_mbuf **sw_ring;  /* < RX software ring. */<br />+    struct zxdh_vq_desc_extra vq_descx[];<br />+} __rte_packed;<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif /* ZXDH_QUEUE_H */<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h<br />new file mode 100644<br />index 0000000000..7d4b5481ec<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_rxtx.h<br />@@ -0,0 +1,55 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef ZXDH_RXTX_H<br />+#define ZXDH_RXTX_H<br />+<br />+#include <stdint.h> <br />+<br />+#include <rte_common.h> <br />+#include <rte_mbuf_core.h> <br />+<br />+#ifdef __cplusplus<br />+extern "C" {<br />+#endif<br />+<br />+struct zxdh_virtnet_stats {<br />+    uint64_t packets;<br />+    uint64_t bytes;<br />+    uint64_t errors;<br />+    uint64_t multicast;<br />+    uint64_t broadcast;<br />+    uint64_t truncated_err;<br />+    uint64_t size_bins[8];<br />+};<br />+<br />+struct zxdh_virtnet_rx {<br />+    struct zxdh_virtqueue         *vq;<br />+<br />+    /* dummy mbuf, for wraparound when processing RX ring. */<br />+    struct rte_mbuf           fake_mbuf;<br />+<br />+    uint64_t                  mbuf_initializer; /* value to init mbufs. */<br />+    struct rte_mempool       *mpool;            /* mempool for mbuf allocation */<br />+    uint16_t                  queue_id;         /* DPDK queue index. */<br />+    uint16_t                  port_id;          /* Device port identifier. */<br />+    struct zxdh_virtnet_stats      stats;<br />+    const struct rte_memzone *mz;               /* mem zone to populate RX ring. */<br />+} __rte_packed;<br />+<br />+struct zxdh_virtnet_tx {<br />+    struct zxdh_virtqueue         *vq;<br />+    const struct rte_memzone *zxdh_net_hdr_mz;  /* memzone to populate hdr. */<br />+    rte_iova_t                zxdh_net_hdr_mem; /* hdr for each xmit packet */<br />+    uint16_t                  queue_id;           /* DPDK queue index. */<br />+    uint16_t                  port_id;            /* Device port identifier. */<br />+    struct zxdh_virtnet_stats      stats;<br />+    const struct rte_memzone *mz;                 /* mem zone to populate TX ring. */<br />+} __rte_packed;<br />+<br />+#ifdef __cplusplus<br />+}<br />+#endif<br />+<br />+#endif  /* ZXDH_RXTX_H */<br />--  <br />2.27.0<br />