Reorganize structure fields for better cache locality.<br />Remove RX software ring (sw_ring) to reduce memory allocation and<br />copy.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/zxdh_ethdev.c |  33 +--------<br /> drivers/net/zxdh/zxdh_pci.c    |   2 +-<br /> drivers/net/zxdh/zxdh_queue.c  |  11 ++-<br /> drivers/net/zxdh/zxdh_queue.h  | 120 ++++++++++++++++-----------------<br /> drivers/net/zxdh/zxdh_rxtx.c   |  22 +++---<br /> 5 files changed, 77 insertions(+), 111 deletions(-)<br /> <br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index aeb01f4652..08119e28c7 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -644,7 +644,6 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)<br />     struct zxdh_virtnet_tx *txvq = NULL;<br />     struct zxdh_virtqueue *vq = NULL;<br />     size_t sz_hdr_mz = 0;<br />-    void *sw_ring = NULL;<br />     int32_t queue_type = zxdh_get_queue_type(vtpci_logic_qidx);<br />     int32_t numa_node = dev->device->numa_node;<br />     uint16_t vtpci_phy_qidx = 0;<br />@@ -692,11 +691,10 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)<br />     vq->vq_queue_index = vtpci_phy_qidx;<br />     vq->vq_nentries = vq_size;<br />  <br />-    vq->vq_packed.used_wrap_counter = 1;<br />-    vq->vq_packed.cached_flags = ZXDH_VRING_PACKED_DESC_F_AVAIL;<br />-    vq->vq_packed.event_flags_shadow = 0;<br />+    vq->used_wrap_counter = 1;<br />+    vq->cached_flags = ZXDH_VRING_PACKED_DESC_F_AVAIL;<br />     if (queue_type == ZXDH_VTNET_RQ)<br />-        vq->vq_packed.cached_flags |= ZXDH_VRING_DESC_F_WRITE;<br />+        vq->cached_flags |= ZXDH_VRING_DESC_F_WRITE;<br />  <br />     /*<br />      * Reserve a memzone for vring elements<br />@@ -741,16 +739,6 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)<br />     }<br />  <br />     if (queue_type == ZXDH_VTNET_RQ) {<br />-        size_t sz_sw = (ZXDH_MBUF_BURST_SZ + vq_size) * sizeof(vq->sw_ring[0]);<br />-<br />-        sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, RTE_CACHE_LINE_SIZE, numa_node);<br />-        if (!sw_ring) {<br />-            PMD_DRV_LOG(ERR, "can not allocate RX soft ring");<br />-            ret = -ENOMEM;<br />-            goto fail_q_alloc;<br />-        }<br />-<br />-        vq->sw_ring = sw_ring;<br />         rxvq = &vq->rxq;<br />         rxvq->vq = vq;<br />         rxvq->port_id = dev->data->port_id;<br />@@ -764,23 +752,9 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)<br />         txvq->zxdh_net_hdr_mem = hdr_mz->iova;<br />     }<br />  <br />-    vq->offset = offsetof(struct rte_mbuf, buf_iova);<br />     if (queue_type == ZXDH_VTNET_TQ) {<br />         struct zxdh_tx_region *txr = hdr_mz->addr;<br />-        uint32_t i;<br />-<br />         memset(txr, 0, vq_size * sizeof(*txr));<br />-        for (i = 0; i < vq_size; i++) {<br />-            /* first indirect descriptor is always the tx header */<br />-            struct zxdh_vring_packed_desc *start_dp = txr[i].tx_packed_indir;<br />-<br />-            zxdh_vring_desc_init_indirect_packed(start_dp,<br />-                    RTE_DIM(txr[i].tx_packed_indir));<br />-            start_dp->addr = txvq->zxdh_net_hdr_mem + i * sizeof(*txr) +<br />-                    offsetof(struct zxdh_tx_region, tx_hdr);<br />-            /* length will be updated to actual pi hdr size when xmit pkt */<br />-            start_dp->len = 0;<br />-        }<br />     }<br />     if (ZXDH_VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {<br />         PMD_DRV_LOG(ERR, "setup_queue failed");<br />@@ -788,7 +762,6 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)<br />     }<br />     return 0;<br /> fail_q_alloc:<br />-    rte_free(sw_ring);<br />     rte_memzone_free(hdr_mz);<br />     rte_memzone_free(mz);<br />     rte_free(vq);<br />diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c<br />index 4ba31905fc..0bc27ed111 100644<br />--- a/drivers/net/zxdh/zxdh_pci.c<br />+++ b/drivers/net/zxdh/zxdh_pci.c<br />@@ -231,7 +231,7 @@ zxdh_notify_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq)<br />  <br />     notify_data = ((uint32_t)vq->vq_avail_idx << 16) | vq->vq_queue_index;<br />     if (zxdh_pci_with_feature(hw, ZXDH_F_RING_PACKED) && <br />-            (vq->vq_packed.cached_flags & ZXDH_VRING_PACKED_DESC_F_AVAIL))<br />+            (vq->cached_flags & ZXDH_VRING_PACKED_DESC_F_AVAIL))<br />         notify_data |= RTE_BIT32(31);<br />  <br />     PMD_DRV_LOG(DEBUG, "queue:%d notify_data 0x%x notify_addr 0x%p",<br />diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c<br />index 7162593b16..4668cb5d13 100644<br />--- a/drivers/net/zxdh/zxdh_queue.c<br />+++ b/drivers/net/zxdh/zxdh_queue.c<br />@@ -407,7 +407,7 @@ int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq,<br /> {<br />     struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc;<br />     struct zxdh_vq_desc_extra *dxp;<br />-    uint16_t flags = vq->vq_packed.cached_flags;<br />+    uint16_t flags = vq->cached_flags;<br />     int32_t i;<br />     uint16_t idx;<br />  <br />@@ -415,7 +415,6 @@ int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq,<br />         idx = vq->vq_avail_idx;<br />         dxp = &vq->vq_descx[idx];<br />         dxp->cookie = (void *)cookie[i];<br />-        dxp->ndescs = 1;<br />         /* rx pkt fill in data_off */<br />         start_dp[idx].addr = rte_mbuf_iova_get(cookie[i]) + RTE_PKTMBUF_HEADROOM;<br />         start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM;<br />@@ -423,8 +422,8 @@ int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq,<br />         zxdh_queue_store_flags_packed(&start_dp[idx], flags);<br />         if (++vq->vq_avail_idx >= vq->vq_nentries) {<br />             vq->vq_avail_idx -= vq->vq_nentries;<br />-            vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />-            flags = vq->vq_packed.cached_flags;<br />+            vq->cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+            flags = vq->cached_flags;<br />         }<br />     }<br />     vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);<br />@@ -467,7 +466,7 @@ void zxdh_queue_rxvq_flush(struct zxdh_virtqueue *vq)<br />     int32_t cnt = 0;<br />  <br />     i = vq->vq_used_cons_idx;<br />-    while (zxdh_desc_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {<br />+    while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {<br />         dxp = &vq->vq_descx[descs[i].id];<br />         if (dxp->cookie != NULL) {<br />             rte_pktmbuf_free(dxp->cookie);<br />@@ -477,7 +476,7 @@ void zxdh_queue_rxvq_flush(struct zxdh_virtqueue *vq)<br />         vq->vq_used_cons_idx++;<br />         if (vq->vq_used_cons_idx >= vq->vq_nentries) {<br />             vq->vq_used_cons_idx -= vq->vq_nentries;<br />-            vq->vq_packed.used_wrap_counter ^= 1;<br />+            vq->used_wrap_counter ^= 1;<br />         }<br />         i = vq->vq_used_cons_idx;<br />     }<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />index 1a0c8a0d90..b079272162 100644<br />--- a/drivers/net/zxdh/zxdh_queue.h<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -9,6 +9,7 @@<br />  <br /> #include <rte_common.h> <br /> #include <rte_atomic.h> <br />+#include <rte_io.h> <br />  <br /> #include "zxdh_ethdev.h" <br /> #include "zxdh_rxtx.h" <br />@@ -117,7 +118,6 @@ struct zxdh_vring_packed_desc_event {<br /> };<br />  <br /> struct zxdh_vring_packed {<br />-    uint32_t num;<br />     struct zxdh_vring_packed_desc *desc;<br />     struct zxdh_vring_packed_desc_event *driver;<br />     struct zxdh_vring_packed_desc_event *device;<br />@@ -129,50 +129,59 @@ struct zxdh_vq_desc_extra {<br />     uint16_t next;<br /> };<br />  <br />+struct zxdh_vring {<br />+    uint32_t num;<br />+    struct zxdh_vring_desc  *desc;<br />+    struct zxdh_vring_avail *avail;<br />+    struct zxdh_vring_used  *used;<br />+};<br />+<br /> struct zxdh_virtqueue {<br />+    union {<br />+        struct {<br />+            struct zxdh_vring ring; /**< vring keeping desc, used and avail */<br />+        } vq_split;<br />+        struct __rte_packed_begin {<br />+            struct zxdh_vring_packed ring;<br />+        } __rte_packed_end vq_packed;<br />+    };<br />     struct zxdh_hw  *hw; /* < zxdh_hw structure pointer. */<br />  <br />-    struct {<br />-        /* vring keeping descs and events */<br />-        struct zxdh_vring_packed ring;<br />-        uint8_t used_wrap_counter;<br />-        uint8_t rsv;<br />-        uint16_t cached_flags; /* < cached flags for descs */<br />-        uint16_t event_flags_shadow;<br />-        uint16_t rsv1;<br />-    } vq_packed;<br />-<br />-    uint16_t vq_used_cons_idx; /* < last consumed descriptor */<br />-    uint16_t vq_nentries;  /* < vring desc numbers */<br />-    uint16_t vq_free_cnt;  /* < num of desc available */<br />-    uint16_t vq_avail_idx; /* < sync until needed */<br />-    uint16_t vq_free_thresh; /* < free threshold */<br />-    uint16_t rsv2;<br />-<br />-    void *vq_ring_virt_mem;  /* < linear address of vring */<br />-    uint32_t vq_ring_size;<br />+    uint16_t vq_used_cons_idx; /**< last consumed descriptor */<br />+    uint16_t vq_avail_idx; /**< sync until needed */<br />+    uint16_t vq_nentries;  /**< vring desc numbers */<br />+    uint16_t vq_free_cnt;  /**< num of desc available */<br />+<br />+    uint16_t cached_flags; /**< cached flags for descs */<br />+    uint8_t used_wrap_counter;<br />+    uint8_t rsv;<br />+    uint16_t vq_free_thresh; /**< free threshold */<br />+    uint16_t next_qidx;<br />+<br />+    void *notify_addr;<br />  <br />     union {<br />         struct zxdh_virtnet_rx rxq;<br />         struct zxdh_virtnet_tx txq;<br />     };<br />  <br />-    /*<br />-     * physical address of vring, or virtual address<br />-     */<br />-    rte_iova_t vq_ring_mem;<br />+    uint16_t vq_queue_index; /* PACKED: phy_idx, SPLIT: logic_idx */<br />+    uint16_t event_flags_shadow;<br />+    uint32_t vq_ring_size;<br />  <br />-    /*<br />+    /**<br />      * Head of the free chain in the descriptor table. If<br />      * there are no free descriptors, this will be set to<br />      * VQ_RING_DESC_CHAIN_END.<br />-     */<br />+     **/<br />     uint16_t  vq_desc_head_idx;<br />     uint16_t  vq_desc_tail_idx;<br />-    uint16_t  vq_queue_index;   /* < PCI queue index */<br />-    uint16_t  offset; /* < relative offset to obtain addr in mbuf */<br />-    uint16_t *notify_addr;<br />-    struct rte_mbuf **sw_ring;  /* < RX software ring. */<br />+    uint32_t rsv_8B;<br />+<br />+    void *vq_ring_virt_mem;  /**< linear address of vring*/<br />+    /* physical address of vring, or virtual address for virtio_user. */<br />+    rte_iova_t vq_ring_mem;<br />+<br />     struct zxdh_vq_desc_extra vq_descx[];<br /> };<br />  <br />@@ -296,10 +305,9 @@ static inline void<br /> zxdh_vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,<br />         unsigned long align, uint32_t num)<br /> {<br />-    vr->num    = num;<br />     vr->desc   = (struct zxdh_vring_packed_desc *)p;<br />     vr->driver = (struct zxdh_vring_packed_desc_event *)(p +<br />-                 vr->num * sizeof(struct zxdh_vring_packed_desc));<br />+                 num * sizeof(struct zxdh_vring_packed_desc));<br />     vr->device = (struct zxdh_vring_packed_desc_event *)RTE_ALIGN_CEIL(((uintptr_t)vr->driver +<br />                  sizeof(struct zxdh_vring_packed_desc_event)), align);<br /> }<br />@@ -331,30 +339,21 @@ zxdh_vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t<br /> static inline void<br /> zxdh_queue_disable_intr(struct zxdh_virtqueue *vq)<br /> {<br />-    if (vq->vq_packed.event_flags_shadow != ZXDH_RING_EVENT_FLAGS_DISABLE) {<br />-        vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;<br />-        vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow;<br />+    if (vq->event_flags_shadow != ZXDH_RING_EVENT_FLAGS_DISABLE) {<br />+        vq->event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;<br />+        vq->vq_packed.ring.driver->desc_event_flags = vq->event_flags_shadow;<br />     }<br /> }<br />  <br /> static inline void<br /> zxdh_queue_enable_intr(struct zxdh_virtqueue *vq)<br /> {<br />-    if (vq->vq_packed.event_flags_shadow == ZXDH_RING_EVENT_FLAGS_DISABLE) {<br />-        vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;<br />-        vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow;<br />+    if (vq->event_flags_shadow != ZXDH_RING_EVENT_FLAGS_ENABLE) {<br />+        vq->event_flags_shadow = ZXDH_RING_EVENT_FLAGS_ENABLE;<br />+        vq->vq_packed.ring.driver->desc_event_flags = vq->event_flags_shadow;<br />     }<br /> }<br />  <br />-static inline void<br />-zxdh_mb(uint8_t weak_barriers)<br />-{<br />-    if (weak_barriers)<br />-        rte_atomic_thread_fence(rte_memory_order_seq_cst);<br />-    else<br />-        rte_mb();<br />-}<br />-<br /> static inline<br /> int32_t desc_is_used(struct zxdh_vring_packed_desc *desc, struct zxdh_virtqueue *vq)<br /> {<br />@@ -365,7 +364,7 @@ int32_t desc_is_used(struct zxdh_vring_packed_desc *desc, struct zxdh_virtqueue<br />     rte_io_rmb();<br />     used = !!(flags & ZXDH_VRING_PACKED_DESC_F_USED);<br />     avail = !!(flags & ZXDH_VRING_PACKED_DESC_F_AVAIL);<br />-    return avail == used && used == vq->vq_packed.used_wrap_counter;<br />+    return avail == used && used == vq->used_wrap_counter;<br /> }<br />  <br /> static inline int32_t<br />@@ -381,22 +380,17 @@ zxdh_queue_store_flags_packed(struct zxdh_vring_packed_desc *dp, uint16_t flags)<br />     dp->flags = flags;<br /> }<br />  <br />-static inline int32_t<br />-zxdh_desc_used(struct zxdh_vring_packed_desc *desc, struct zxdh_virtqueue *vq)<br />-{<br />-    uint16_t flags;<br />-    uint16_t used, avail;<br />-<br />-    flags = desc->flags;<br />-    rte_io_rmb();<br />-    used = !!(flags & ZXDH_VRING_PACKED_DESC_F_USED);<br />-    avail = !!(flags & ZXDH_VRING_PACKED_DESC_F_AVAIL);<br />-    return avail == used && used == vq->vq_packed.used_wrap_counter;<br />-}<br />-<br /> static inline void zxdh_queue_notify(struct zxdh_virtqueue *vq)<br /> {<br />-    ZXDH_VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);<br />+    /* Bit[0:15]: vq queue index<br />+     * Bit[16:30]: avail index<br />+     * Bit[31]: avail wrap counter<br />+     */<br />+    uint32_t notify_data = ((uint32_t)(!!(vq->cached_flags & <br />+        ZXDH_VRING_PACKED_DESC_F_AVAIL)) << 31) |<br />+        ((uint32_t)vq->vq_avail_idx << 16) |<br />+        vq->vq_queue_index;<br />+    rte_write32(notify_data, vq->notify_addr);<br /> }<br />  <br /> static inline int32_t<br />@@ -404,7 +398,7 @@ zxdh_queue_kick_prepare_packed(struct zxdh_virtqueue *vq)<br /> {<br />     uint16_t flags = 0;<br />  <br />-    zxdh_mb(1);<br />+    rte_mb();<br />     flags = vq->vq_packed.ring.device->desc_event_flags;<br />  <br />     return (flags != ZXDH_RING_EVENT_FLAGS_DISABLE);<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c<br />index db86922aea..93506a4b49 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.c<br />+++ b/drivers/net/zxdh/zxdh_rxtx.c<br />@@ -216,7 +216,7 @@ zxdh_xmit_cleanup_inorder_packed(struct zxdh_virtqueue *vq, int32_t num)<br />     /* desc_is_used has a load-acquire or rte_io_rmb inside<br />      * and wait for used desc in virtqueue.<br />      */<br />-    while (num > 0 && zxdh_desc_used(&desc[used_idx], vq)) {<br />+    while (num > 0 && desc_is_used(&desc[used_idx], vq)) {<br />         id = desc[used_idx].id;<br />         do {<br />             curr_id = used_idx;<br />@@ -226,7 +226,7 @@ zxdh_xmit_cleanup_inorder_packed(struct zxdh_virtqueue *vq, int32_t num)<br />             num -= dxp->ndescs;<br />             if (used_idx >= size) {<br />                 used_idx -= size;<br />-                vq->vq_packed.used_wrap_counter ^= 1;<br />+                vq->used_wrap_counter ^= 1;<br />             }<br />             if (dxp->cookie != NULL) {<br />                 rte_pktmbuf_free(dxp->cookie);<br />@@ -340,7 +340,7 @@ zxdh_enqueue_xmit_packed_fast(struct zxdh_virtnet_tx *txvq,<br />     struct zxdh_virtqueue *vq = txvq->vq;<br />     uint16_t id = vq->vq_avail_idx;<br />     struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id];<br />-    uint16_t flags = vq->vq_packed.cached_flags;<br />+    uint16_t flags = vq->cached_flags;<br />     struct zxdh_net_hdr_dl *hdr = NULL;<br />     uint8_t hdr_len = vq->hw->dl_net_hdr_len;<br />     struct zxdh_vring_packed_desc *dp = &vq->vq_packed.ring.desc[id];<br />@@ -355,7 +355,7 @@ zxdh_enqueue_xmit_packed_fast(struct zxdh_virtnet_tx *txvq,<br />     dp->id   = id;<br />     if (++vq->vq_avail_idx >= vq->vq_nentries) {<br />         vq->vq_avail_idx -= vq->vq_nentries;<br />-        vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+        vq->cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />     }<br />     vq->vq_free_cnt--;<br />     zxdh_queue_store_flags_packed(dp, flags);<br />@@ -381,7 +381,7 @@ zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,<br />  <br />     dxp->ndescs = needed;<br />     dxp->cookie = cookie;<br />-    head_flags |= vq->vq_packed.cached_flags;<br />+    head_flags |= vq->cached_flags;<br />  <br />     start_dp[idx].addr = txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);<br />     start_dp[idx].len  = hdr_len;<br />@@ -392,7 +392,7 @@ zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,<br />     idx++;<br />     if (idx >= vq->vq_nentries) {<br />         idx -= vq->vq_nentries;<br />-        vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+        vq->cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />     }<br />  <br />     zxdh_xmit_fill_net_hdr(vq, cookie, hdr);<br />@@ -404,14 +404,14 @@ zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,<br />         if (likely(idx != head_idx)) {<br />             uint16_t flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0;<br />  <br />-            flags |= vq->vq_packed.cached_flags;<br />+            flags |= vq->cached_flags;<br />             start_dp[idx].flags = flags;<br />         }<br />  <br />         idx++;<br />         if (idx >= vq->vq_nentries) {<br />             idx -= vq->vq_nentries;<br />-            vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+            vq->cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />         }<br />     } while ((cookie = cookie->next) != NULL);<br />  <br />@@ -480,7 +480,7 @@ zxdh_xmit_flush(struct zxdh_virtqueue *vq)<br />             free_cnt += dxp->ndescs;<br />             if (used_idx >= size) {<br />                 used_idx -= size;<br />-                vq->vq_packed.used_wrap_counter ^= 1;<br />+                vq->used_wrap_counter ^= 1;<br />             }<br />             if (dxp->cookie != NULL) {<br />                 rte_pktmbuf_free(dxp->cookie);<br />@@ -619,7 +619,7 @@ zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,<br />          * desc_is_used has a load-acquire or rte_io_rmb inside<br />          * and wait for used desc in virtqueue.<br />          */<br />-        if (!zxdh_desc_used(&desc[used_idx], vq))<br />+        if (!desc_is_used(&desc[used_idx], vq))<br />             return i;<br />         len[i] = desc[used_idx].len;<br />         id = desc[used_idx].id;<br />@@ -637,7 +637,7 @@ zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,<br />         vq->vq_used_cons_idx++;<br />         if (vq->vq_used_cons_idx >= vq->vq_nentries) {<br />             vq->vq_used_cons_idx -= vq->vq_nentries;<br />-            vq->vq_packed.used_wrap_counter ^= 1;<br />+            vq->used_wrap_counter ^= 1;<br />         }<br />     }<br />     return i;<br />--  <br />2.27.0<br />