provided zxdh dev configure ops for queue<br />check,reset,alloc resources,etc.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/meson.build   |   1 +<br /> drivers/net/zxdh/zxdh_common.c | 135 ++++++++++<br /> drivers/net/zxdh/zxdh_common.h |  12 +<br /> drivers/net/zxdh/zxdh_ethdev.c | 449 +++++++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_ethdev.h |  16 ++<br /> drivers/net/zxdh/zxdh_pci.c    |  98 +++++++<br /> drivers/net/zxdh/zxdh_pci.h    |  26 ++<br /> drivers/net/zxdh/zxdh_queue.c  | 123 +++++++++<br /> drivers/net/zxdh/zxdh_queue.h  | 172 +++++++++++++<br /> 9 files changed, 1032 insertions(+)<br /> create mode 100644 drivers/net/zxdh/zxdh_queue.c<br /> <br />diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build<br />index a16db47f89..b96aa5a27e 100644<br />--- a/drivers/net/zxdh/meson.build<br />+++ b/drivers/net/zxdh/meson.build<br />@@ -18,4 +18,5 @@ sources = files(<br />         'zxdh_pci.c',<br />         'zxdh_msg.c',<br />         'zxdh_common.c',<br />+        'zxdh_queue.c',<br /> )<br />diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c<br />index 34749588d5..9535791c94 100644<br />--- a/drivers/net/zxdh/zxdh_common.c<br />+++ b/drivers/net/zxdh/zxdh_common.c<br />@@ -20,6 +20,7 @@<br /> #define ZXDH_COMMON_TABLE_WRITE       1<br />  <br /> #define ZXDH_COMMON_FIELD_PHYPORT     6<br />+#define ZXDH_COMMON_FIELD_DATACH      3<br />  <br /> #define ZXDH_RSC_TBL_CONTENT_LEN_MAX  (257 * 2)<br />  <br />@@ -248,3 +249,137 @@ int32_t zxdh_panelid_get(struct rte_eth_dev *dev, uint8_t *panelid)<br />     int32_t ret = zxdh_get_res_panel_id(&param, panelid);<br />     return ret;<br /> }<br />+<br />+uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);<br />+    uint32_t val      = *((volatile uint32_t *)(baseaddr + reg));<br />+    return val;<br />+}<br />+<br />+void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);<br />+    *((volatile uint32_t *)(baseaddr + reg)) = val;<br />+}<br />+<br />+static bool zxdh_try_lock(struct zxdh_hw *hw)<br />+{<br />+    uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG);<br />+<br />+    /* check whether lock is used */<br />+    if (!(var & ZXDH_VF_LOCK_ENABLE_MASK))<br />+        return false;<br />+<br />+    return true;<br />+}<br />+<br />+int32_t zxdh_timedlock(struct zxdh_hw *hw, uint32_t us)<br />+{<br />+    uint16_t timeout = 0;<br />+<br />+    while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {<br />+        rte_delay_us_block(us);<br />+        /* acquire hw lock */<br />+        if (!zxdh_try_lock(hw)) {<br />+            PMD_INIT_LOG(ERR, "Acquiring hw lock got failed, timeout: %d", timeout);<br />+            continue;<br />+        }<br />+        break;<br />+    }<br />+    if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {<br />+        PMD_INIT_LOG(ERR, "Failed to acquire channel");<br />+        return -1;<br />+    }<br />+    return 0;<br />+}<br />+<br />+void zxdh_release_lock(struct zxdh_hw *hw)<br />+{<br />+    uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG);<br />+<br />+    if (var & ZXDH_VF_LOCK_ENABLE_MASK) {<br />+        var &= ~ZXDH_VF_LOCK_ENABLE_MASK;<br />+        zxdh_write_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG, var);<br />+    }<br />+}<br />+<br />+uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg)<br />+{<br />+    uint32_t val = *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg));<br />+    return val;<br />+}<br />+<br />+void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val)<br />+{<br />+    *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)) = val;<br />+}<br />+<br />+static int32_t zxdh_common_table_write(struct zxdh_hw *hw, uint8_t field,<br />+            void *buff, uint16_t buff_size)<br />+{<br />+    struct zxdh_pci_bar_msg desc;<br />+    struct zxdh_msg_recviver_mem msg_rsp;<br />+    int32_t ret = 0;<br />+<br />+    if (!hw->msg_chan_init) {<br />+        PMD_DRV_LOG(ERR, "Bar messages channel not initialized");<br />+        return -1;<br />+    }<br />+    if (buff_size != 0 && buff == NULL) {<br />+        PMD_DRV_LOG(ERR, "Buff is invalid");<br />+        return -1;<br />+    }<br />+<br />+    ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_WRITE,<br />+                    field, buff, buff_size);<br />+<br />+    if (ret != 0) {<br />+        PMD_DRV_LOG(ERR, "Failed to fill common msg");<br />+        return ret;<br />+    }<br />+<br />+    ret = zxdh_send_command(hw, &desc, ZXDH_BAR_MODULE_TBL, &msg_rsp);<br />+    if (ret != 0)<br />+        goto free_msg_data;<br />+<br />+    ret = zxdh_common_rsp_check(&msg_rsp, NULL, 0);<br />+    if (ret != 0)<br />+        goto free_rsp_data;<br />+<br />+free_rsp_data:<br />+    rte_free(msg_rsp.recv_buffer);<br />+free_msg_data:<br />+    rte_free(desc.payload_addr);<br />+    return ret;<br />+}<br />+<br />+int32_t zxdh_datach_set(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t buff_size = (hw->queue_num + 1) * 2;<br />+    void *buff = rte_zmalloc(NULL, buff_size, 0);<br />+<br />+    if (unlikely(buff == NULL)) {<br />+        PMD_DRV_LOG(ERR, "Failed to allocate buff");<br />+        return -ENOMEM;<br />+    }<br />+    memset(buff, 0, buff_size);<br />+    uint16_t *pdata = (uint16_t *)buff;<br />+    *pdata++ = hw->queue_num;<br />+    uint16_t i;<br />+<br />+    for (i = 0; i < hw->queue_num; i++)<br />+        *(pdata + i) = hw->channel_context[i].ph_chno;<br />+<br />+    int32_t ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH,<br />+                        (void *)buff, buff_size);<br />+<br />+    if (ret != 0)<br />+        PMD_DRV_LOG(ERR, "Failed to setup data channel of common table");<br />+<br />+    rte_free(buff);<br />+    return ret;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h<br />index ba29ca1dad..a26f0d8d6f 100644<br />--- a/drivers/net/zxdh/zxdh_common.h<br />+++ b/drivers/net/zxdh/zxdh_common.h<br />@@ -14,6 +14,10 @@<br /> extern "C" {<br /> #endif<br />  <br />+#define ZXDH_VF_LOCK_REG               0x90<br />+#define ZXDH_VF_LOCK_ENABLE_MASK       0x1<br />+#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX   10<br />+<br /> struct zxdh_res_para {<br />     uint64_t virt_addr;<br />     uint16_t pcie_id;<br />@@ -23,6 +27,14 @@ struct zxdh_res_para {<br /> int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport);<br /> int32_t zxdh_panelid_get(struct rte_eth_dev *dev, uint8_t *pannelid);<br />  <br />+uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg);<br />+void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val);<br />+void zxdh_release_lock(struct zxdh_hw *hw);<br />+int32_t zxdh_timedlock(struct zxdh_hw *hw, uint32_t us);<br />+uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg);<br />+void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val);<br />+int32_t zxdh_datach_set(struct rte_eth_dev *dev);<br />+<br /> #ifdef __cplusplus<br /> }<br /> #endif<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index bbdbda3457..41d2fdfead 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -358,8 +358,457 @@ static int32_t zxdh_configure_intr(struct rte_eth_dev *dev)<br />     return ret;<br /> }<br />  <br />+static int32_t zxdh_features_update(struct zxdh_hw *hw,<br />+                const struct rte_eth_rxmode *rxmode,<br />+                const struct rte_eth_txmode *txmode)<br />+{<br />+    uint64_t rx_offloads = rxmode->offloads;<br />+    uint64_t tx_offloads = txmode->offloads;<br />+    uint64_t req_features = hw->guest_features;<br />+<br />+    if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))<br />+        req_features |= (1ULL << ZXDH_NET_F_GUEST_CSUM);<br />+<br />+    if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)<br />+        req_features |= (1ULL << ZXDH_NET_F_GUEST_TSO4) |<br />+                        (1ULL << ZXDH_NET_F_GUEST_TSO6);<br />+<br />+    if (tx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))<br />+        req_features |= (1ULL << ZXDH_NET_F_CSUM);<br />+<br />+    if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)<br />+        req_features |= (1ULL << ZXDH_NET_F_HOST_TSO4) |<br />+                        (1ULL << ZXDH_NET_F_HOST_TSO6);<br />+<br />+    if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_TSO)<br />+        req_features |= (1ULL << ZXDH_NET_F_HOST_UFO);<br />+<br />+    req_features = req_features & hw->host_features;<br />+    hw->guest_features = req_features;<br />+<br />+    ZXDH_VTPCI_OPS(hw)->set_features(hw, req_features);<br />+<br />+    if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) && <br />+         !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) {<br />+        PMD_DRV_LOG(ERR, "rx checksum not available on this host");<br />+        return -ENOTSUP;<br />+    }<br />+<br />+    if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) && <br />+        (!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||<br />+         !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) {<br />+        PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host");<br />+        return -ENOTSUP;<br />+    }<br />+    return 0;<br />+}<br />+<br />+static bool rx_offload_enabled(struct zxdh_hw *hw)<br />+{<br />+    return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||<br />+           vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||<br />+           vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6);<br />+}<br />+<br />+static bool tx_offload_enabled(struct zxdh_hw *hw)<br />+{<br />+    return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) ||<br />+           vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||<br />+           vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||<br />+           vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO);<br />+}<br />+<br />+static void zxdh_dev_free_mbufs(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    uint32_t i = 0;<br />+<br />+    const char *type = NULL;<br />+    struct zxdh_virtqueue *vq = NULL;<br />+    struct rte_mbuf *buf = NULL;<br />+    int32_t queue_type = 0;<br />+<br />+    if (hw->vqs == NULL)<br />+        return;<br />+<br />+    for (i = 0; i < nr_vq; i++) {<br />+        vq = hw->vqs[i];<br />+        if (!vq)<br />+            continue;<br />+<br />+        queue_type = zxdh_get_queue_type(i);<br />+        if (queue_type == ZXDH_VTNET_RQ)<br />+            type = "rxq";<br />+        else if (queue_type == ZXDH_VTNET_TQ)<br />+            type = "txq";<br />+        else<br />+            continue;<br />+        PMD_INIT_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);<br />+<br />+        while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL)<br />+            rte_pktmbuf_free(buf);<br />+    }<br />+}<br />+<br />+static int32_t zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t base    = (queue_type == ZXDH_VTNET_RQ) ? 0 : 1;<br />+    uint16_t i       = 0;<br />+    uint16_t j       = 0;<br />+    uint16_t done    = 0;<br />+    int32_t ret = 0;<br />+<br />+    ret = zxdh_timedlock(hw, 1000);<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "Acquiring hw lock got failed, timeout");<br />+        return -1;<br />+    }<br />+<br />+    /* Iterate COI table and find free channel */<br />+    for (i = ZXDH_QUEUES_BASE / 32; i < ZXDH_TOTAL_QUEUES_NUM / 32; i++) {<br />+        uint32_t addr = ZXDH_QUERES_SHARE_BASE + (i * sizeof(uint32_t));<br />+        uint32_t var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);<br />+<br />+        for (j = base; j < 32; j += 2) {<br />+            /* Got the available channel & update COI table */<br />+            if ((var & (1 << j)) == 0) {<br />+                var |= (1 << j);<br />+                zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);<br />+                done = 1;<br />+                break;<br />+            }<br />+        }<br />+        if (done)<br />+            break;<br />+    }<br />+    zxdh_release_lock(hw);<br />+    /* check for no channel condition */<br />+    if (done != 1) {<br />+        PMD_INIT_LOG(ERR, "NO availd queues");<br />+        return -1;<br />+    }<br />+    /* reruen available channel ID */<br />+    return (i * 32) + j;<br />+}<br />+<br />+static int32_t zxdh_acquire_channel(struct rte_eth_dev *dev, uint16_t lch)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    if (hw->channel_context[lch].valid == 1) {<br />+        PMD_INIT_LOG(DEBUG, "Logic channel:%u already acquired Physics channel:%u",<br />+                lch, hw->channel_context[lch].ph_chno);<br />+        return hw->channel_context[lch].ph_chno;<br />+    }<br />+    int32_t pch = zxdh_get_available_channel(dev, zxdh_get_queue_type(lch));<br />+<br />+    if (pch < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to acquire channel");<br />+        return -1;<br />+    }<br />+    hw->channel_context[lch].ph_chno = (uint16_t)pch;<br />+    hw->channel_context[lch].valid = 1;<br />+    PMD_INIT_LOG(DEBUG, "Acquire channel success lch:%u --> pch:%d", lch, pch);<br />+    return 0;<br />+}<br />+<br />+static void zxdh_init_vring(struct zxdh_virtqueue *vq)<br />+{<br />+    int32_t  size      = vq->vq_nentries;<br />+    uint8_t *ring_mem = vq->vq_ring_virt_mem;<br />+<br />+    memset(ring_mem, 0, vq->vq_ring_size);<br />+<br />+    vq->vq_used_cons_idx = 0;<br />+    vq->vq_desc_head_idx = 0;<br />+    vq->vq_avail_idx     = 0;<br />+    vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);<br />+    vq->vq_free_cnt = vq->vq_nentries;<br />+    memset(vq->vq_descx, 0, sizeof(struct zxdh_vq_desc_extra) * vq->vq_nentries);<br />+    vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size);<br />+    vring_desc_init_packed(vq, size);<br />+    virtqueue_disable_intr(vq);<br />+}<br />+<br />+static int32_t zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)<br />+{<br />+    char vq_name[ZXDH_VIRTQUEUE_MAX_NAME_SZ] = {0};<br />+    char vq_hdr_name[ZXDH_VIRTQUEUE_MAX_NAME_SZ] = {0};<br />+    const struct rte_memzone *mz = NULL;<br />+    const struct rte_memzone *hdr_mz = NULL;<br />+    uint32_t size = 0;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_virtnet_rx *rxvq = NULL;<br />+    struct zxdh_virtnet_tx *txvq = NULL;<br />+    struct zxdh_virtqueue *vq = NULL;<br />+    size_t sz_hdr_mz = 0;<br />+    void *sw_ring = NULL;<br />+    int32_t queue_type = zxdh_get_queue_type(vtpci_logic_qidx);<br />+    int32_t numa_node = dev->device->numa_node;<br />+    uint16_t vtpci_phy_qidx = 0;<br />+    uint32_t vq_size = 0;<br />+    int32_t ret = 0;<br />+<br />+    if (hw->channel_context[vtpci_logic_qidx].valid == 0) {<br />+        PMD_INIT_LOG(ERR, "lch %d is invalid", vtpci_logic_qidx);<br />+        return -EINVAL;<br />+    }<br />+    vtpci_phy_qidx = hw->channel_context[vtpci_logic_qidx].ph_chno;<br />+<br />+    PMD_INIT_LOG(DEBUG, "vtpci_logic_qidx :%d setting up physical queue: %u on NUMA node %d",<br />+            vtpci_logic_qidx, vtpci_phy_qidx, numa_node);<br />+<br />+    vq_size = ZXDH_QUEUE_DEPTH;<br />+<br />+    if (ZXDH_VTPCI_OPS(hw)->set_queue_num != NULL)<br />+        ZXDH_VTPCI_OPS(hw)->set_queue_num(hw, vtpci_phy_qidx, vq_size);<br />+<br />+    snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, vtpci_phy_qidx);<br />+<br />+    size = RTE_ALIGN_CEIL(sizeof(*vq) + vq_size * sizeof(struct zxdh_vq_desc_extra),<br />+                RTE_CACHE_LINE_SIZE);<br />+    if (queue_type == ZXDH_VTNET_TQ) {<br />+        /*<br />+         * For each xmit packet, allocate a zxdh_net_hdr<br />+         * and indirect ring elements<br />+         */<br />+        sz_hdr_mz = vq_size * sizeof(struct zxdh_tx_region);<br />+    }<br />+<br />+    vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, numa_node);<br />+    if (vq == NULL) {<br />+        PMD_INIT_LOG(ERR, "can not allocate vq");<br />+        return -ENOMEM;<br />+    }<br />+    hw->vqs[vtpci_logic_qidx] = vq;<br />+<br />+    vq->hw = hw;<br />+    vq->vq_queue_index = vtpci_phy_qidx;<br />+    vq->vq_nentries = vq_size;<br />+<br />+    vq->vq_packed.used_wrap_counter = 1;<br />+    vq->vq_packed.cached_flags = ZXDH_VRING_PACKED_DESC_F_AVAIL;<br />+    vq->vq_packed.event_flags_shadow = 0;<br />+    if (queue_type == ZXDH_VTNET_RQ)<br />+        vq->vq_packed.cached_flags |= ZXDH_VRING_DESC_F_WRITE;<br />+<br />+    /*<br />+     * Reserve a memzone for vring elements<br />+     */<br />+    size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN);<br />+    vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN);<br />+    PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);<br />+<br />+    mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,<br />+                numa_node, RTE_MEMZONE_IOVA_CONTIG,<br />+                ZXDH_PCI_VRING_ALIGN);<br />+    if (mz == NULL) {<br />+        if (rte_errno == EEXIST)<br />+            mz = rte_memzone_lookup(vq_name);<br />+        if (mz == NULL) {<br />+            ret = -ENOMEM;<br />+            goto fail_q_alloc;<br />+        }<br />+    }<br />+<br />+    memset(mz->addr, 0, mz->len);<br />+<br />+    vq->vq_ring_mem = mz->iova;<br />+    vq->vq_ring_virt_mem = mz->addr;<br />+<br />+    zxdh_init_vring(vq);<br />+<br />+    if (sz_hdr_mz) {<br />+        snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",<br />+                    dev->data->port_id, vtpci_phy_qidx);<br />+        hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,<br />+                    numa_node, RTE_MEMZONE_IOVA_CONTIG,<br />+                    RTE_CACHE_LINE_SIZE);<br />+        if (hdr_mz == NULL) {<br />+            if (rte_errno == EEXIST)<br />+                hdr_mz = rte_memzone_lookup(vq_hdr_name);<br />+            if (hdr_mz == NULL) {<br />+                ret = -ENOMEM;<br />+                goto fail_q_alloc;<br />+            }<br />+        }<br />+    }<br />+<br />+    if (queue_type == ZXDH_VTNET_RQ) {<br />+        size_t sz_sw = (ZXDH_MBUF_BURST_SZ + vq_size) * sizeof(vq->sw_ring[0]);<br />+<br />+        sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, RTE_CACHE_LINE_SIZE, numa_node);<br />+        if (!sw_ring) {<br />+            PMD_INIT_LOG(ERR, "can not allocate RX soft ring");<br />+            ret = -ENOMEM;<br />+            goto fail_q_alloc;<br />+        }<br />+<br />+        vq->sw_ring = sw_ring;<br />+        rxvq = &vq->rxq;<br />+        rxvq->vq = vq;<br />+        rxvq->port_id = dev->data->port_id;<br />+        rxvq->mz = mz;<br />+    } else {             /* queue_type == VTNET_TQ */<br />+        txvq = &vq->txq;<br />+        txvq->vq = vq;<br />+        txvq->port_id = dev->data->port_id;<br />+        txvq->mz = mz;<br />+        txvq->zxdh_net_hdr_mz = hdr_mz;<br />+        txvq->zxdh_net_hdr_mem = hdr_mz->iova;<br />+    }<br />+<br />+    vq->offset = offsetof(struct rte_mbuf, buf_iova);<br />+    if (queue_type == ZXDH_VTNET_TQ) {<br />+        struct zxdh_tx_region *txr = hdr_mz->addr;<br />+        uint32_t i;<br />+<br />+        memset(txr, 0, vq_size * sizeof(*txr));<br />+        for (i = 0; i < vq_size; i++) {<br />+            /* first indirect descriptor is always the tx header */<br />+            struct zxdh_vring_packed_desc *start_dp = txr[i].tx_packed_indir;<br />+<br />+            vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir));<br />+            start_dp->addr = txvq->zxdh_net_hdr_mem + i * sizeof(*txr) +<br />+                    offsetof(struct zxdh_tx_region, tx_hdr);<br />+            /* length will be updated to actual pi hdr size when xmit pkt */<br />+            start_dp->len = 0;<br />+        }<br />+    }<br />+    if (ZXDH_VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {<br />+        PMD_INIT_LOG(ERR, "setup_queue failed");<br />+        return -EINVAL;<br />+    }<br />+    return 0;<br />+fail_q_alloc:<br />+    rte_free(sw_ring);<br />+    rte_memzone_free(hdr_mz);<br />+    rte_memzone_free(mz);<br />+    rte_free(vq);<br />+    return ret;<br />+}<br />+<br />+static int32_t zxdh_alloc_queues(struct rte_eth_dev *dev, uint16_t nr_vq)<br />+{<br />+    uint16_t lch;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+<br />+    hw->vqs = rte_zmalloc(NULL, sizeof(struct zxdh_virtqueue *) * nr_vq, 0);<br />+    if (!hw->vqs) {<br />+        PMD_INIT_LOG(ERR, "Failed to allocate vqs");<br />+        return -ENOMEM;<br />+    }<br />+    for (lch = 0; lch < nr_vq; lch++) {<br />+        if (zxdh_acquire_channel(dev, lch) < 0) {<br />+            PMD_INIT_LOG(ERR, "Failed to acquire the channels");<br />+            zxdh_free_queues(dev);<br />+            return -1;<br />+        }<br />+        if (zxdh_init_queue(dev, lch) < 0) {<br />+            PMD_INIT_LOG(ERR, "Failed to alloc virtio queue");<br />+            zxdh_free_queues(dev);<br />+            return -1;<br />+        }<br />+    }<br />+    return 0;<br />+}<br />+<br />+<br />+static int32_t zxdh_dev_configure(struct rte_eth_dev *dev)<br />+{<br />+    const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;<br />+    const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint32_t nr_vq = 0;<br />+    int32_t  ret = 0;<br />+<br />+    if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {<br />+        PMD_INIT_LOG(ERR, "nb_rx_queues=%d and nb_tx_queues=%d not equal!",<br />+                     dev->data->nb_rx_queues, dev->data->nb_tx_queues);<br />+        return -EINVAL;<br />+    }<br />+    if ((dev->data->nb_rx_queues + dev->data->nb_tx_queues) >= ZXDH_QUEUES_NUM_MAX) {<br />+        PMD_INIT_LOG(ERR, "nb_rx_queues=%d + nb_tx_queues=%d must < (%d)!",<br />+                     dev->data->nb_rx_queues, dev->data->nb_tx_queues,<br />+                     ZXDH_QUEUES_NUM_MAX);<br />+        return -EINVAL;<br />+    }<br />+    if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {<br />+        PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);<br />+        return -EINVAL;<br />+    }<br />+<br />+    if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {<br />+        PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode);<br />+        return -EINVAL;<br />+    }<br />+    if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {<br />+        PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);<br />+        return -EINVAL;<br />+    }<br />+<br />+    if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {<br />+        PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode);<br />+        return -EINVAL;<br />+    }<br />+<br />+    ret = zxdh_features_update(hw, rxmode, txmode);<br />+    if (ret < 0)<br />+        return ret;<br />+<br />+    /* check if lsc interrupt feature is enabled */<br />+    if (dev->data->dev_conf.intr_conf.lsc) {<br />+        if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {<br />+            PMD_DRV_LOG(ERR, "link status not supported by host");<br />+            return -ENOTSUP;<br />+        }<br />+    }<br />+<br />+    hw->has_tx_offload = tx_offload_enabled(hw);<br />+    hw->has_rx_offload = rx_offload_enabled(hw);<br />+<br />+    nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues;<br />+    if (nr_vq == hw->queue_num)<br />+        return 0;<br />+<br />+    PMD_DRV_LOG(DEBUG, "queue changed need reset ");<br />+    /* Reset the device although not necessary at startup */<br />+    zxdh_pci_reset(hw);<br />+<br />+    /* Tell the host we've noticed this device. */<br />+    zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_ACK);<br />+<br />+    /* Tell the host we've known how to drive the device. */<br />+    zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER);<br />+    /* The queue needs to be released when reconfiguring*/<br />+    if (hw->vqs != NULL) {<br />+        zxdh_dev_free_mbufs(dev);<br />+        zxdh_free_queues(dev);<br />+    }<br />+<br />+    hw->queue_num = nr_vq;<br />+    ret = zxdh_alloc_queues(dev, nr_vq);<br />+    if (ret < 0)<br />+        return ret;<br />+<br />+    zxdh_datach_set(dev);<br />+<br />+    if (zxdh_configure_intr(dev) < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to configure interrupt");<br />+        zxdh_free_queues(dev);<br />+        return -1;<br />+    }<br />+<br />+    zxdh_pci_reinit_complete(hw);<br />+<br />+    return ret;<br />+}<br />+<br /> /* dev_ops for zxdh, bare necessities for basic operation */<br /> static const struct eth_dev_ops zxdh_eth_dev_ops = {<br />+    .dev_configure             = zxdh_dev_configure,<br />     .dev_infos_get             = zxdh_dev_infos_get,<br /> };<br />  <br />diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h<br />index 89c5a9bb5f..28e78b0086 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.h<br />+++ b/drivers/net/zxdh/zxdh_ethdev.h<br />@@ -31,6 +31,13 @@ extern "C" {<br /> #define ZXDH_TX_QUEUES_MAX        128U<br /> #define ZXDH_MIN_RX_BUFSIZE       64<br /> #define ZXDH_MAX_RX_PKTLEN        14000U<br />+#define ZXDH_QUEUE_DEPTH          1024<br />+#define ZXDH_QUEUES_BASE          0<br />+#define ZXDH_TOTAL_QUEUES_NUM     4096<br />+#define ZXDH_QUEUES_NUM_MAX       256<br />+#define ZXDH_QUERES_SHARE_BASE    (0x5000)<br />+<br />+#define ZXDH_MBUF_BURST_SZ        64<br />  <br /> union zxdh_virport_num {<br />     uint16_t vport;<br />@@ -43,6 +50,11 @@ union zxdh_virport_num {<br />     };<br /> };<br />  <br />+struct zxdh_chnl_context {<br />+    uint16_t valid;<br />+    uint16_t ph_chno;<br />+};<br />+<br /> struct zxdh_hw {<br />     struct rte_eth_dev *eth_dev;<br />     struct zxdh_pci_common_cfg *common_cfg;<br />@@ -50,6 +62,7 @@ struct zxdh_hw {<br />     struct rte_intr_handle *risc_intr;<br />     struct rte_intr_handle *dtb_intr;<br />     struct zxdh_virtqueue **vqs;<br />+    struct zxdh_chnl_context channel_context[ZXDH_QUEUES_NUM_MAX];<br />     union zxdh_virport_num vport;<br />  <br />     uint64_t bar_addr[ZXDH_NUM_BARS];<br />@@ -63,6 +76,7 @@ struct zxdh_hw {<br />     uint16_t device_id;<br />     uint16_t port_id;<br />     uint16_t vfid;<br />+    uint16_t queue_num;<br />  <br />     uint8_t *isr;<br />     uint8_t weak_barriers;<br />@@ -75,6 +89,8 @@ struct zxdh_hw {<br />     uint8_t msg_chan_init;<br />     uint8_t phyport;<br />     uint8_t panel_id;<br />+    uint8_t has_tx_offload;<br />+    uint8_t has_rx_offload;<br /> };<br />  <br /> uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v);<br />diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c<br />index 65164c86b7..165d9f49a3 100644<br />--- a/drivers/net/zxdh/zxdh_pci.c<br />+++ b/drivers/net/zxdh/zxdh_pci.c<br />@@ -110,6 +110,87 @@ static uint8_t zxdh_get_isr(struct zxdh_hw *hw)<br />     return rte_read8(hw->isr);<br /> }<br />  <br />+static uint16_t zxdh_get_queue_num(struct zxdh_hw *hw, uint16_t queue_id)<br />+{<br />+    rte_write16(queue_id, &hw->common_cfg->queue_select);<br />+    return rte_read16(&hw->common_cfg->queue_size);<br />+}<br />+<br />+static void zxdh_set_queue_num(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size)<br />+{<br />+    rte_write16(queue_id, &hw->common_cfg->queue_select);<br />+    rte_write16(vq_size, &hw->common_cfg->queue_size);<br />+}<br />+<br />+static int32_t check_vq_phys_addr_ok(struct zxdh_virtqueue *vq)<br />+{<br />+    if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> (ZXDH_PCI_QUEUE_ADDR_SHIFT + 32)) {<br />+        PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");<br />+        return 0;<br />+    }<br />+    return 1;<br />+}<br />+<br />+static inline void io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)<br />+{<br />+    rte_write32(val & ((1ULL << 32) - 1), lo);<br />+    rte_write32(val >> 32, hi);<br />+}<br />+<br />+static int32_t zxdh_setup_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq)<br />+{<br />+    uint64_t desc_addr  = 0;<br />+    uint64_t avail_addr = 0;<br />+    uint64_t used_addr  = 0;<br />+    uint16_t notify_off = 0;<br />+<br />+    if (!check_vq_phys_addr_ok(vq))<br />+        return -1;<br />+<br />+    desc_addr = vq->vq_ring_mem;<br />+    avail_addr = desc_addr + vq->vq_nentries * sizeof(struct zxdh_vring_desc);<br />+    if (vtpci_packed_queue(vq->hw)) {<br />+        used_addr = RTE_ALIGN_CEIL((avail_addr +<br />+                sizeof(struct zxdh_vring_packed_desc_event)),<br />+                ZXDH_PCI_VRING_ALIGN);<br />+    } else {<br />+        used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct zxdh_vring_avail,<br />+                        ring[vq->vq_nentries]), ZXDH_PCI_VRING_ALIGN);<br />+    }<br />+<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+<br />+    io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,<br />+                       &hw->common_cfg->queue_desc_hi);<br />+    io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,<br />+                       &hw->common_cfg->queue_avail_hi);<br />+    io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,<br />+                       &hw->common_cfg->queue_used_hi);<br />+<br />+    notify_off = rte_read16(&hw->common_cfg->queue_notify_off); /* default 0 */<br />+    notify_off = 0;<br />+    vq->notify_addr = (void *)((uint8_t *)hw->notify_base +<br />+            notify_off * hw->notify_off_multiplier);<br />+<br />+    rte_write16(1, &hw->common_cfg->queue_enable);<br />+<br />+    return 0;<br />+}<br />+<br />+static void zxdh_del_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq)<br />+{<br />+    rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);<br />+<br />+    io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,<br />+                       &hw->common_cfg->queue_desc_hi);<br />+    io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,<br />+                       &hw->common_cfg->queue_avail_hi);<br />+    io_write64_twopart(0, &hw->common_cfg->queue_used_lo,<br />+                       &hw->common_cfg->queue_used_hi);<br />+<br />+    rte_write16(0, &hw->common_cfg->queue_enable);<br />+}<br />+<br /> const struct zxdh_pci_ops zxdh_dev_pci_ops = {<br />     .read_dev_cfg   = zxdh_read_dev_config,<br />     .write_dev_cfg  = zxdh_write_dev_config,<br />@@ -120,6 +201,10 @@ const struct zxdh_pci_ops zxdh_dev_pci_ops = {<br />     .set_queue_irq  = zxdh_set_queue_irq,<br />     .set_config_irq = zxdh_set_config_irq,<br />     .get_isr        = zxdh_get_isr,<br />+    .get_queue_num  = zxdh_get_queue_num,<br />+    .set_queue_num  = zxdh_set_queue_num,<br />+    .setup_queue    = zxdh_setup_queue,<br />+    .del_queue      = zxdh_del_queue,<br /> };<br />  <br /> uint8_t zxdh_pci_isr(struct zxdh_hw *hw)<br />@@ -146,6 +231,19 @@ void zxdh_pci_reset(struct zxdh_hw *hw)<br />     PMD_INIT_LOG(INFO, "port %u device reset %u ms done", hw->port_id, retry);<br /> }<br />  <br />+void zxdh_pci_reinit_complete(struct zxdh_hw *hw)<br />+{<br />+    zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER_OK);<br />+}<br />+<br />+void zxdh_pci_set_status(struct zxdh_hw *hw, uint8_t status)<br />+{<br />+    if (status != ZXDH_CONFIG_STATUS_RESET)<br />+        status |= ZXDH_VTPCI_OPS(hw)->get_status(hw);<br />+<br />+    ZXDH_VTPCI_OPS(hw)->set_status(hw, status);<br />+}<br />+<br /> static void *get_cfg_addr(struct rte_pci_device *dev, struct zxdh_pci_cap *cap)<br /> {<br />     uint8_t  bar    = cap->bar;<br />diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h<br />index f362658ba6..e86667357b 100644<br />--- a/drivers/net/zxdh/zxdh_pci.h<br />+++ b/drivers/net/zxdh/zxdh_pci.h<br />@@ -29,7 +29,20 @@ enum zxdh_msix_status {<br /> /* Vector value used to disable MSI for queue. */<br /> #define ZXDH_MSI_NO_VECTOR   0x7F<br />  <br />+#define ZXDH_PCI_VRING_ALIGN         4096<br />+<br />+#define ZXDH_NET_F_CSUM              0   /* Host handles pkts w/ partial csum */<br />+#define ZXDH_NET_F_GUEST_CSUM        1   /* Guest handles pkts w/ partial csum */<br />+#define ZXDH_NET_F_MTU               3   /* Initial MTU advice. */<br /> #define ZXDH_NET_F_MAC               5   /* Host has given MAC address. */<br />+#define ZXDH_NET_F_GUEST_TSO4        7   /* Guest can handle TSOv4 in. */<br />+#define ZXDH_NET_F_GUEST_TSO6        8   /* Guest can handle TSOv6 in. */<br />+#define ZXDH_NET_F_GUEST_ECN         9   /* Guest can handle TSO[6] w/ ECN in. */<br />+#define ZXDH_NET_F_GUEST_UFO         10  /* Guest can handle UFO in. */<br />+<br />+#define ZXDH_NET_F_HOST_UFO          14  /* Host can handle UFO in. */<br />+#define ZXDH_NET_F_HOST_TSO4         11  /* Host can handle TSOv4 in. */<br />+#define ZXDH_NET_F_HOST_TSO6         12  /* Host can handle TSOv6 in. */<br /> #define ZXDH_NET_F_MRG_RXBUF         15  /* Host can merge receive buffers. */<br /> #define ZXDH_NET_F_STATUS            16  /* zxdh_net_config.status available */<br /> #define ZXDH_NET_F_MQ                22  /* Device supports Receive Flow Steering */<br />@@ -53,6 +66,7 @@ enum zxdh_msix_status {<br /> #define ZXDH_CONFIG_STATUS_FEATURES_OK     0x08<br /> #define ZXDH_CONFIG_STATUS_DEV_NEED_RESET  0x40<br /> #define ZXDH_CONFIG_STATUS_FAILED          0x80<br />+#define ZXDH_PCI_QUEUE_ADDR_SHIFT          12<br />  <br /> struct zxdh_net_config {<br />     /* The config defining mac address (if ZXDH_NET_F_MAC) */<br />@@ -108,6 +122,11 @@ static inline int32_t vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit)<br />     return (hw->guest_features & (1ULL << bit)) != 0;<br /> }<br />  <br />+static inline int32_t vtpci_packed_queue(struct zxdh_hw *hw)<br />+{<br />+    return vtpci_with_feature(hw, ZXDH_F_RING_PACKED);<br />+}<br />+<br /> struct zxdh_pci_ops {<br />     void     (*read_dev_cfg)(struct zxdh_hw *hw, size_t offset, void *dst, int32_t len);<br />     void     (*write_dev_cfg)(struct zxdh_hw *hw, size_t offset, const void *src, int32_t len);<br />@@ -120,6 +139,11 @@ struct zxdh_pci_ops {<br />     uint16_t (*set_queue_irq)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq, uint16_t vec);<br />     uint16_t (*set_config_irq)(struct zxdh_hw *hw, uint16_t vec);<br />     uint8_t  (*get_isr)(struct zxdh_hw *hw);<br />+    uint16_t (*get_queue_num)(struct zxdh_hw *hw, uint16_t queue_id);<br />+    void     (*set_queue_num)(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size);<br />+<br />+    int32_t  (*setup_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq);<br />+    void     (*del_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq);<br /> };<br />  <br /> struct zxdh_hw_internal {<br />@@ -141,6 +165,8 @@ int32_t zxdh_get_pci_dev_config(struct zxdh_hw *hw);<br /> uint16_t zxdh_pci_get_features(struct zxdh_hw *hw);<br /> enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev);<br /> uint8_t zxdh_pci_isr(struct zxdh_hw *hw);<br />+void zxdh_pci_reinit_complete(struct zxdh_hw *hw);<br />+void zxdh_pci_set_status(struct zxdh_hw *hw, uint8_t status);<br />  <br /> #ifdef __cplusplus<br /> }<br />diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c<br />new file mode 100644<br />index 0000000000..2978a9f272<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_queue.c<br />@@ -0,0 +1,123 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+#include <rte_malloc.h> <br />+#include <rte_mbuf.h> <br />+<br />+#include "zxdh_queue.h" <br />+#include "zxdh_logs.h" <br />+#include "zxdh_pci.h" <br />+#include "zxdh_common.h" <br />+#include "zxdh_msg.h" <br />+<br />+struct rte_mbuf *zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq)<br />+{<br />+    struct rte_mbuf *cookie = NULL;<br />+    int32_t          idx    = 0;<br />+<br />+    if (vq == NULL)<br />+        return NULL;<br />+<br />+    for (idx = 0; idx < vq->vq_nentries; idx++) {<br />+        cookie = vq->vq_descx[idx].cookie;<br />+        if (cookie != NULL) {<br />+            vq->vq_descx[idx].cookie = NULL;<br />+            return cookie;<br />+        }<br />+    }<br />+    return NULL;<br />+}<br />+<br />+static int32_t zxdh_release_channel(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    uint32_t var  = 0;<br />+    uint32_t addr = 0;<br />+    uint32_t widx = 0;<br />+    uint32_t bidx = 0;<br />+    uint16_t pch  = 0;<br />+    uint16_t lch  = 0;<br />+    int32_t ret = 0;<br />+<br />+    ret = zxdh_timedlock(hw, 1000);<br />+    if (ret) {<br />+        PMD_INIT_LOG(ERR, "Acquiring hw lock got failed, timeout");<br />+        return -1;<br />+    }<br />+<br />+    for (lch = 0; lch < nr_vq; lch++) {<br />+        if (hw->channel_context[lch].valid == 0) {<br />+            PMD_INIT_LOG(DEBUG, "Logic channel %d does not need to release", lch);<br />+            continue;<br />+        }<br />+<br />+        pch  = hw->channel_context[lch].ph_chno;<br />+        widx = pch / 32;<br />+        bidx = pch % 32;<br />+<br />+        addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t));<br />+        var  = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);<br />+        var &= ~(1 << bidx);<br />+        zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);<br />+<br />+        hw->channel_context[lch].valid = 0;<br />+        hw->channel_context[lch].ph_chno = 0;<br />+    }<br />+<br />+    zxdh_release_lock(hw);<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx)<br />+{<br />+    if (vtpci_queue_idx % 2 == 0)<br />+        return ZXDH_VTNET_RQ;<br />+    else<br />+        return ZXDH_VTNET_TQ;<br />+}<br />+<br />+int32_t zxdh_free_queues(struct rte_eth_dev *dev)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t nr_vq = hw->queue_num;<br />+    struct zxdh_virtqueue *vq = NULL;<br />+    int32_t queue_type = 0;<br />+    uint16_t i = 0;<br />+<br />+    if (hw->vqs == NULL)<br />+        return 0;<br />+<br />+    if (zxdh_release_channel(dev) < 0) {<br />+        PMD_INIT_LOG(ERR, "Failed to clear coi table");<br />+        return -1;<br />+    }<br />+<br />+    for (i = 0; i < nr_vq; i++) {<br />+        vq = hw->vqs[i];<br />+        if (vq == NULL)<br />+            continue;<br />+<br />+        ZXDH_VTPCI_OPS(hw)->del_queue(hw, vq);<br />+        queue_type = zxdh_get_queue_type(i);<br />+        if (queue_type == ZXDH_VTNET_RQ) {<br />+            rte_free(vq->sw_ring);<br />+            rte_memzone_free(vq->rxq.mz);<br />+        } else if (queue_type == ZXDH_VTNET_TQ) {<br />+            rte_memzone_free(vq->txq.mz);<br />+            rte_memzone_free(vq->txq.zxdh_net_hdr_mz);<br />+        }<br />+<br />+        rte_free(vq);<br />+        hw->vqs[i] = NULL;<br />+        PMD_INIT_LOG(DEBUG, "Release to queue %d success!", i);<br />+    }<br />+<br />+    rte_free(hw->vqs);<br />+    hw->vqs = NULL;<br />+<br />+    return 0;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />index 0b6f48adf9..683c4e7980 100644<br />--- a/drivers/net/zxdh/zxdh_queue.h<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -11,11 +11,30 @@<br />  <br /> #include "zxdh_ethdev.h" <br /> #include "zxdh_rxtx.h" <br />+#include "zxdh_pci.h" <br />  <br /> #ifdef __cplusplus<br /> extern "C" {<br /> #endif<br />  <br />+enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };<br />+<br />+#define ZXDH_VIRTQUEUE_MAX_NAME_SZ        32<br />+#define ZXDH_RQ_QUEUE_IDX                 0<br />+#define ZXDH_TQ_QUEUE_IDX                 1<br />+#define ZXDH_MAX_TX_INDIRECT              8<br />+<br />+/* This marks a buffer as write-only (otherwise read-only). */<br />+#define ZXDH_VRING_DESC_F_WRITE           2<br />+/* This flag means the descriptor was made available by the driver */<br />+#define ZXDH_VRING_PACKED_DESC_F_AVAIL   (1 << (7))<br />+<br />+#define ZXDH_RING_EVENT_FLAGS_ENABLE      0x0<br />+#define ZXDH_RING_EVENT_FLAGS_DISABLE     0x1<br />+#define ZXDH_RING_EVENT_FLAGS_DESC        0x2<br />+<br />+#define ZXDH_VQ_RING_DESC_CHAIN_END       32768<br />+<br /> /** ring descriptors: 16 bytes.<br />  * These can chain together via "next".<br />  **/<br />@@ -26,6 +45,19 @@ struct zxdh_vring_desc {<br />     uint16_t next;  /* We chain unused descriptors via this. */<br /> } __rte_packed;<br />  <br />+struct zxdh_vring_used_elem {<br />+    /* Index of start of used descriptor chain. */<br />+    uint32_t id;<br />+    /* Total length of the descriptor chain which was written to. */<br />+    uint32_t len;<br />+};<br />+<br />+struct zxdh_vring_used {<br />+    uint16_t flags;<br />+    uint16_t idx;<br />+    struct zxdh_vring_used_elem ring[];<br />+} __rte_packed;<br />+<br /> struct zxdh_vring_avail {<br />     uint16_t flags;<br />     uint16_t idx;<br />@@ -102,6 +134,146 @@ struct zxdh_virtqueue {<br />     struct zxdh_vq_desc_extra vq_descx[];<br /> } __rte_packed;<br />  <br />+struct zxdh_type_hdr {<br />+    uint8_t port;  /* bit[0:1] 00-np 01-DRS 10-DTP */<br />+    uint8_t pd_len;<br />+    uint8_t num_buffers;<br />+    uint8_t reserved;<br />+} __rte_packed; /* 4B */<br />+<br />+struct zxdh_pi_hdr {<br />+    uint8_t  pi_len;<br />+    uint8_t  pkt_type;<br />+    uint16_t vlan_id;<br />+    uint32_t ipv6_extend;<br />+    uint16_t l3_offset;<br />+    uint16_t l4_offset;<br />+    uint8_t  phy_port;<br />+    uint8_t  pkt_flag_hi8;<br />+    uint16_t pkt_flag_lw16;<br />+    union {<br />+        struct {<br />+            uint64_t sa_idx;<br />+            uint8_t  reserved_8[8];<br />+        } dl;<br />+        struct {<br />+            uint32_t lro_flag;<br />+            uint32_t lro_mss;<br />+            uint16_t err_code;<br />+            uint16_t pm_id;<br />+            uint16_t pkt_len;<br />+            uint8_t  reserved[2];<br />+        } ul;<br />+    };<br />+} __rte_packed; /* 32B */<br />+<br />+struct zxdh_pd_hdr_dl {<br />+    uint32_t ol_flag;<br />+    uint8_t tag_idx;<br />+    uint8_t tag_data;<br />+    uint16_t dst_vfid;<br />+    uint32_t svlan_insert;<br />+    uint32_t cvlan_insert;<br />+} __rte_packed; /* 16B */<br />+<br />+struct zxdh_net_hdr_dl {<br />+    struct zxdh_type_hdr  type_hdr; /* 4B */<br />+    struct zxdh_pi_hdr    pi_hdr; /* 32B */<br />+    struct zxdh_pd_hdr_dl pd_hdr; /* 16B */<br />+} __rte_packed;<br />+<br />+struct zxdh_pd_hdr_ul {<br />+    uint32_t pkt_flag;<br />+    uint32_t rss_hash;<br />+    uint32_t fd;<br />+    uint32_t striped_vlan_tci;<br />+    /* ovs */<br />+    uint8_t tag_idx;<br />+    uint8_t tag_data;<br />+    uint16_t src_vfid;<br />+    /* */<br />+    uint16_t pkt_type_out;<br />+    uint16_t pkt_type_in;<br />+} __rte_packed; /* 24B */<br />+<br />+struct zxdh_net_hdr_ul {<br />+    struct zxdh_type_hdr  type_hdr; /* 4B */<br />+    struct zxdh_pi_hdr    pi_hdr; /* 32B */<br />+    struct zxdh_pd_hdr_ul pd_hdr; /* 24B */<br />+} __rte_packed; /* 60B */<br />+<br />+struct zxdh_tx_region {<br />+    struct zxdh_net_hdr_dl tx_hdr;<br />+    union {<br />+        struct zxdh_vring_desc tx_indir[ZXDH_MAX_TX_INDIRECT];<br />+        struct zxdh_vring_packed_desc tx_packed_indir[ZXDH_MAX_TX_INDIRECT];<br />+    } __rte_packed;<br />+};<br />+<br />+static inline size_t vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align)<br />+{<br />+    size_t size;<br />+<br />+    if (vtpci_packed_queue(hw)) {<br />+        size = num * sizeof(struct zxdh_vring_packed_desc);<br />+        size += sizeof(struct zxdh_vring_packed_desc_event);<br />+        size = RTE_ALIGN_CEIL(size, align);<br />+        size += sizeof(struct zxdh_vring_packed_desc_event);<br />+        return size;<br />+    }<br />+<br />+    size = num * sizeof(struct zxdh_vring_desc);<br />+    size += sizeof(struct zxdh_vring_avail) + (num * sizeof(uint16_t));<br />+    size = RTE_ALIGN_CEIL(size, align);<br />+    size += sizeof(struct zxdh_vring_used) + (num * sizeof(struct zxdh_vring_used_elem));<br />+    return size;<br />+}<br />+<br />+static inline void vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,<br />+                        unsigned long align, uint32_t num)<br />+{<br />+    vr->num    = num;<br />+    vr->desc   = (struct zxdh_vring_packed_desc *)p;<br />+    vr->driver = (struct zxdh_vring_packed_desc_event *)(p +<br />+                 vr->num * sizeof(struct zxdh_vring_packed_desc));<br />+    vr->device = (struct zxdh_vring_packed_desc_event *)RTE_ALIGN_CEIL(((uintptr_t)vr->driver +<br />+                 sizeof(struct zxdh_vring_packed_desc_event)), align);<br />+}<br />+<br />+static inline void vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n)<br />+{<br />+    int32_t i = 0;<br />+<br />+    for (i = 0; i < n - 1; i++) {<br />+        vq->vq_packed.ring.desc[i].id = i;<br />+        vq->vq_descx[i].next = i + 1;<br />+    }<br />+    vq->vq_packed.ring.desc[i].id = i;<br />+    vq->vq_descx[i].next = ZXDH_VQ_RING_DESC_CHAIN_END;<br />+}<br />+<br />+static inline void vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n)<br />+{<br />+    int32_t i = 0;<br />+<br />+    for (i = 0; i < n; i++) {<br />+        dp[i].id = (uint16_t)i;<br />+        dp[i].flags = ZXDH_VRING_DESC_F_WRITE;<br />+    }<br />+}<br />+<br />+static inline void virtqueue_disable_intr(struct zxdh_virtqueue *vq)<br />+{<br />+    if (vq->vq_packed.event_flags_shadow != ZXDH_RING_EVENT_FLAGS_DISABLE) {<br />+        vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;<br />+        vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow;<br />+    }<br />+}<br />+<br />+struct rte_mbuf *zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq);<br />+int32_t zxdh_free_queues(struct rte_eth_dev *dev);<br />+int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);<br />+<br /> #ifdef __cplusplus<br /> }<br /> #endif<br />--  <br />2.27.0<br />