Add simple RX recv functions (zxdh_recv_single_pkts)<br />for single-segment packet recv.<br />And optimize Rx recv pkts packed ops.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/zxdh_ethdev.c     |  40 +++++--<br /> drivers/net/zxdh/zxdh_ethdev_ops.c |  24 ++--<br /> drivers/net/zxdh/zxdh_ethdev_ops.h |   4 +<br /> drivers/net/zxdh/zxdh_rxtx.c       | 179 +++++++++++++++++++++++------<br /> drivers/net/zxdh/zxdh_rxtx.h       |  16 +--<br /> 5 files changed, 200 insertions(+), 63 deletions(-)<br /> <br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index 625ce9d74c..7363d4c3d3 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -1269,18 +1269,44 @@ zxdh_dev_close(struct rte_eth_dev *dev)<br />     return ret;<br /> }<br />  <br />-static int32_t<br />-zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev)<br />+/*<br />+ * Determine whether the current configuration requires support for scattered<br />+ * receive; return 1 if scattered receive is required and 0 if not.<br />+ */<br />+static int zxdh_scattered_rx(struct rte_eth_dev *eth_dev)<br /> {<br />-    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+    uint16_t buf_size;<br />  <br />-    if (!zxdh_pci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) {<br />-        PMD_DRV_LOG(ERR, "port %u not support rx mergeable", eth_dev->data->port_id);<br />-        return -1;<br />+    if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {<br />+        eth_dev->data->lro = 1;<br />+        return 1;<br />     }<br />+<br />+    if (eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER)<br />+        return 1;<br />+<br />+<br />+    PMD_DRV_LOG(DEBUG, "port %d min_rx_buf_size %d",<br />+        eth_dev->data->port_id, eth_dev->data->min_rx_buf_size);<br />+    buf_size = eth_dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;<br />+    if (eth_dev->data->mtu + ZXDH_ETH_OVERHEAD > buf_size)<br />+        return 1;<br />+<br />+    return 0;<br />+}<br />+<br />+static int32_t<br />+zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev)<br />+{<br />     eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare;<br />+    eth_dev->data->scattered_rx = zxdh_scattered_rx(eth_dev);<br />+<br />     eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed;<br />-    eth_dev->rx_pkt_burst = &zxdh_recv_pkts_packed;<br />+<br />+    if (eth_dev->data->scattered_rx)<br />+        eth_dev->rx_pkt_burst = &zxdh_recv_pkts_packed;<br />+    else<br />+        eth_dev->rx_pkt_burst = &zxdh_recv_single_pkts;<br />  <br />     return 0;<br /> }<br />diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c<br />index 50247116d9..e2c2885add 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev_ops.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c<br />@@ -95,10 +95,6 @@ static const struct rte_zxdh_xstats_name_off zxdh_rxq_stat_strings[] = {<br />     {"good_bytes",             offsetof(struct zxdh_virtnet_rx, stats.bytes)},<br />     {"errors",                 offsetof(struct zxdh_virtnet_rx, stats.errors)},<br />     {"idle",                   offsetof(struct zxdh_virtnet_rx, stats.idle)},<br />-    {"full",                   offsetof(struct zxdh_virtnet_rx, stats.full)},<br />-    {"norefill",               offsetof(struct zxdh_virtnet_rx, stats.norefill)},<br />-    {"multicast_packets",      offsetof(struct zxdh_virtnet_rx, stats.multicast)},<br />-    {"broadcast_packets",      offsetof(struct zxdh_virtnet_rx, stats.broadcast)},<br />     {"truncated_err",          offsetof(struct zxdh_virtnet_rx, stats.truncated_err)},<br />     {"offload_cfg_err",        offsetof(struct zxdh_virtnet_rx, stats.offload_cfg_err)},<br />     {"invalid_hdr_len_err",    offsetof(struct zxdh_virtnet_rx, stats.invalid_hdr_len_err)},<br />@@ -117,14 +113,12 @@ static const struct rte_zxdh_xstats_name_off zxdh_txq_stat_strings[] = {<br />     {"good_packets",           offsetof(struct zxdh_virtnet_tx, stats.packets)},<br />     {"good_bytes",             offsetof(struct zxdh_virtnet_tx, stats.bytes)},<br />     {"errors",                 offsetof(struct zxdh_virtnet_tx, stats.errors)},<br />-    {"idle",                   offsetof(struct zxdh_virtnet_tx, stats.idle)},<br />-    {"norefill",               offsetof(struct zxdh_virtnet_tx, stats.norefill)},<br />-    {"multicast_packets",      offsetof(struct zxdh_virtnet_tx, stats.multicast)},<br />-    {"broadcast_packets",      offsetof(struct zxdh_virtnet_tx, stats.broadcast)},<br />+    {"idle",                 offsetof(struct zxdh_virtnet_tx, stats.idle)},<br />     {"truncated_err",          offsetof(struct zxdh_virtnet_tx, stats.truncated_err)},<br />     {"offload_cfg_err",        offsetof(struct zxdh_virtnet_tx, stats.offload_cfg_err)},<br />     {"invalid_hdr_len_err",    offsetof(struct zxdh_virtnet_tx, stats.invalid_hdr_len_err)},<br />     {"no_segs_err",            offsetof(struct zxdh_virtnet_tx, stats.no_segs_err)},<br />+    {"no_free_tx_desc_err",    offsetof(struct zxdh_virtnet_tx, stats.no_free_tx_desc_err)},<br />     {"undersize_packets",      offsetof(struct zxdh_virtnet_tx, stats.size_bins[0])},<br />     {"size_64_packets",        offsetof(struct zxdh_virtnet_tx, stats.size_bins[1])},<br />     {"size_65_127_packets",    offsetof(struct zxdh_virtnet_tx, stats.size_bins[2])},<br />@@ -2026,6 +2020,20 @@ int zxdh_dev_mtu_set(struct rte_eth_dev *dev, uint16_t new_mtu)<br />     uint16_t vfid = zxdh_vport_to_vfid(hw->vport);<br />     int ret;<br />  <br />+    /* If device is started, refuse mtu that requires the support of<br />+     * scattered packets when this feature has not been enabled before.<br />+     */<br />+    if (dev->data->dev_started && <br />+        ((!dev->data->scattered_rx && <br />+         ((uint32_t)ZXDH_MTU_TO_PKTLEN(new_mtu) > <br />+         (dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))) ||<br />+         (dev->data->scattered_rx && <br />+         ((uint32_t)ZXDH_MTU_TO_PKTLEN(new_mtu) <=<br />+         (dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))))) {<br />+        PMD_DRV_LOG(ERR, "Stop port first.");<br />+        return -EINVAL;<br />+    }<br />+<br />     if (hw->is_pf) {<br />         ret = zxdh_get_panel_attr(dev, &panel);<br />         if (ret != 0) {<br />diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.h b/drivers/net/zxdh/zxdh_ethdev_ops.h<br />index 6dfe4be473..c49d79c232 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev_ops.h<br />+++ b/drivers/net/zxdh/zxdh_ethdev_ops.h<br />@@ -40,6 +40,10 @@<br /> #define ZXDH_SPM_SPEED_4X_100G         RTE_BIT32(10)<br /> #define ZXDH_SPM_SPEED_4X_200G         RTE_BIT32(11)<br />  <br />+#define ZXDH_VLAN_TAG_LEN   4<br />+#define ZXDH_ETH_OVERHEAD  (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ZXDH_VLAN_TAG_LEN * 2)<br />+#define ZXDH_MTU_TO_PKTLEN(mtu) ((mtu) + ZXDH_ETH_OVERHEAD)<br />+<br /> struct zxdh_np_stats_data {<br />     uint64_t n_pkts_dropped;<br />     uint64_t n_bytes_dropped;<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c<br />index 93506a4b49..cc266e8659 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.c<br />+++ b/drivers/net/zxdh/zxdh_rxtx.c<br />@@ -114,6 +114,8 @@<br />         RTE_MBUF_F_TX_SEC_OFFLOAD |     \<br />         RTE_MBUF_F_TX_UDP_SEG)<br />  <br />+#define rxq_get_vq(q) ((q)->vq)<br />+<br /> uint32_t zxdh_outer_l2_type[16] = {<br />     0,<br />     RTE_PTYPE_L2_ETHER,<br />@@ -613,10 +615,12 @@ zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,<br />     uint16_t i, used_idx;<br />     uint16_t id;<br />  <br />+    used_idx = vq->vq_used_cons_idx;<br />+    rte_prefetch0(&desc[used_idx]);<br />+<br />     for (i = 0; i < num; i++) {<br />         used_idx = vq->vq_used_cons_idx;<br />-        /**<br />-         * desc_is_used has a load-acquire or rte_io_rmb inside<br />+        /* desc_is_used has a load-acquire or rte_io_rmb inside<br />          * and wait for used desc in virtqueue.<br />          */<br />         if (!desc_is_used(&desc[used_idx], vq))<br />@@ -823,17 +827,52 @@ zxdh_rx_update_mbuf(struct zxdh_hw *hw, struct rte_mbuf *m, struct zxdh_net_hdr_<br />     }<br /> }<br />  <br />-static void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)<br />+static void refill_desc_unwrap(struct zxdh_virtqueue *vq,<br />+        struct rte_mbuf **cookie, uint16_t nb_pkts)<br /> {<br />-    int32_t error = 0;<br />-    /*<br />-     * Requeue the discarded mbuf. This should always be<br />-     * successful since it was just dequeued.<br />-     */<br />-    error = zxdh_enqueue_recv_refill_packed(vq, &m, 1);<br />-    if (unlikely(error)) {<br />-        PMD_RX_LOG(ERR, "cannot enqueue discarded mbuf");<br />-        rte_pktmbuf_free(m);<br />+    struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc;<br />+    struct zxdh_vq_desc_extra *dxp;<br />+    uint16_t flags = vq->cached_flags;<br />+    int32_t i;<br />+    uint16_t idx;<br />+<br />+    idx = vq->vq_avail_idx;<br />+    for (i = 0; i < nb_pkts; i++) {<br />+        dxp = &vq->vq_descx[idx];<br />+        dxp->cookie = (void *)cookie[i];<br />+        start_dp[idx].addr = rte_mbuf_iova_get(cookie[i]) + RTE_PKTMBUF_HEADROOM;<br />+        start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM;<br />+        zxdh_queue_store_flags_packed(&start_dp[idx], flags);<br />+        idx++;<br />+    }<br />+    vq->vq_avail_idx += nb_pkts;<br />+    vq->vq_free_cnt = vq->vq_free_cnt - nb_pkts;<br />+}<br />+<br />+static void refill_que_descs(struct zxdh_virtqueue *vq, struct rte_eth_dev *dev)<br />+{<br />+    /* free_cnt may include mrg descs */<br />+    struct rte_mbuf *new_pkts[ZXDH_MBUF_BURST_SZ];<br />+    uint16_t free_cnt = RTE_MIN(ZXDH_MBUF_BURST_SZ, vq->vq_free_cnt);<br />+    struct zxdh_virtnet_rx *rxvq = &vq->rxq;<br />+    uint16_t  unwrap_cnt, left_cnt;<br />+<br />+    if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {<br />+        left_cnt = free_cnt;<br />+        unwrap_cnt = 0;<br />+        if ((vq->vq_avail_idx + free_cnt) >= vq->vq_nentries) {<br />+            unwrap_cnt = vq->vq_nentries - vq->vq_avail_idx;<br />+            left_cnt = free_cnt - unwrap_cnt;<br />+            refill_desc_unwrap(vq, new_pkts, unwrap_cnt);<br />+            vq->vq_avail_idx = 0;<br />+            vq->cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+        }<br />+        if (left_cnt)<br />+            refill_desc_unwrap(vq, new_pkts + unwrap_cnt, left_cnt);<br />+<br />+        rte_io_wmb();<br />+    } else {<br />+        dev->data->rx_mbuf_alloc_failed += free_cnt;<br />     }<br /> }<br />  <br />@@ -842,7 +881,7 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />                 uint16_t nb_pkts)<br /> {<br />     struct zxdh_virtnet_rx *rxvq = rx_queue;<br />-    struct zxdh_virtqueue *vq = rxvq->vq;<br />+    struct zxdh_virtqueue *vq = rxq_get_vq(rxvq);<br />     struct zxdh_hw *hw = vq->hw;<br />     struct rte_mbuf *rxm = NULL;<br />     struct rte_mbuf *prev = NULL;<br />@@ -852,7 +891,6 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />     uint16_t len = 0;<br />     uint32_t seg_num = 0;<br />     uint32_t seg_res = 0;<br />-    uint32_t error = 0;<br />     uint16_t hdr_size = 0;<br />     uint16_t nb_rx = 0;<br />     uint16_t i;<br />@@ -873,7 +911,8 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />         rx_pkts[nb_rx] = rxm;<br />         prev = rxm;<br />         len = lens[i];<br />-        header = rte_pktmbuf_mtod(rxm, struct zxdh_net_hdr_ul *);<br />+        header = (struct zxdh_net_hdr_ul *)((char *)<br />+                    rxm->buf_addr + RTE_PKTMBUF_HEADROOM);<br />  <br />         seg_num  = header->type_hdr.num_buffers;<br />  <br />@@ -886,7 +925,7 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />             rxvq->stats.invalid_hdr_len_err++;<br />             continue;<br />         }<br />-        rxm->data_off += hdr_size;<br />+        rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size;<br />         rxm->nb_segs = seg_num;<br />         rxm->ol_flags = 0;<br />         rcvd_pkt_len = len - hdr_size;<br />@@ -902,18 +941,19 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />             len = lens[i];<br />             rxm = rcv_pkts[i];<br />             rxm->data_len = len;<br />+            rxm->data_off = RTE_PKTMBUF_HEADROOM;<br />             rcvd_pkt_len += len;<br />             prev->next = rxm;<br />             prev = rxm;<br />             rxm->next = NULL;<br />-            seg_res -= 1;<br />+            seg_res--;<br />         }<br />  <br />         if (!seg_res) {<br />             if (rcvd_pkt_len != rx_pkts[nb_rx]->pkt_len) {<br />                 PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d",<br />                     rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);<br />-                zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);<br />+                rte_pktmbuf_free(rx_pkts[nb_rx]);<br />                 rxvq->stats.errors++;<br />                 rxvq->stats.truncated_err++;<br />                 continue;<br />@@ -942,14 +982,14 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />             prev->next = rxm;<br />             prev = rxm;<br />             rxm->next = NULL;<br />-            extra_idx += 1;<br />+            extra_idx++;<br />         }<br />         seg_res -= rcv_cnt;<br />         if (!seg_res) {<br />             if (unlikely(rcvd_pkt_len != rx_pkts[nb_rx]->pkt_len)) {<br />                 PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d",<br />                     rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);<br />-                zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);<br />+                rte_pktmbuf_free(rx_pkts[nb_rx]);<br />                 rxvq->stats.errors++;<br />                 rxvq->stats.truncated_err++;<br />                 continue;<br />@@ -961,26 +1001,91 @@ zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />     rxvq->stats.packets += nb_rx;<br />  <br /> refill:<br />-    /* Allocate new mbuf for the used descriptor */<br />-    if (likely(!zxdh_queue_full(vq))) {<br />-        struct rte_mbuf *new_pkts[ZXDH_MBUF_BURST_SZ];<br />-        /* free_cnt may include mrg descs */<br />-        uint16_t free_cnt = RTE_MIN(vq->vq_free_cnt, ZXDH_MBUF_BURST_SZ);<br />-<br />-        if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {<br />-            error = zxdh_enqueue_recv_refill_packed(vq, new_pkts, free_cnt);<br />-            if (unlikely(error)) {<br />-                for (i = 0; i < free_cnt; i++)<br />-                    rte_pktmbuf_free(new_pkts[i]);<br />-            }<br />+    if (vq->vq_free_cnt > 0) {<br />+        struct rte_eth_dev *dev = hw->eth_dev;<br />+        refill_que_descs(vq, dev);<br />+        zxdh_queue_notify(vq);<br />+    }<br />  <br />-            if (unlikely(zxdh_queue_kick_prepare_packed(vq)))<br />-                zxdh_queue_notify(vq);<br />-        } else {<br />-            struct rte_eth_dev *dev = hw->eth_dev;<br />+    return nb_rx;<br />+}<br />+<br />+static inline int zxdh_init_mbuf(struct rte_mbuf *rxm, uint16_t len,<br />+        struct zxdh_hw *hw, struct zxdh_virtnet_rx *rxvq)<br />+{<br />+    uint16_t hdr_size = 0;<br />+    struct zxdh_net_hdr_ul *header;<br />+<br />+    header = (struct zxdh_net_hdr_ul *)((char *)<br />+                    rxm->buf_addr + RTE_PKTMBUF_HEADROOM);<br />+    rxm->ol_flags = 0;<br />+    rxm->vlan_tci = 0;<br />+    rxm->vlan_tci_outer = 0;<br />+<br />+    hdr_size = header->type_hdr.pd_len << 1;<br />+    if (unlikely(header->type_hdr.num_buffers != 1)) {<br />+        PMD_RX_LOG(DEBUG, "hdr_size:%u nb_segs %d is invalid",<br />+            hdr_size, header->type_hdr.num_buffers);<br />+        rte_pktmbuf_free(rxm);<br />+        rxvq->stats.invalid_hdr_len_err++;<br />+        return -1;<br />+    }<br />+    zxdh_rx_update_mbuf(hw, rxm, header);<br />+<br />+    rxm->nb_segs = 1;<br />+    rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size;<br />+    rxm->data_len = len - hdr_size;<br />+    rxm->port = hw->port_id;<br />+<br />+    if (rxm->data_len != rxm->pkt_len) {<br />+        PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d  bufaddr %p.",<br />+                    rxm->data_len, rxm->pkt_len, rxm->buf_addr);<br />+        rte_pktmbuf_free(rxm);<br />+        rxvq->stats.truncated_err++;<br />+        rxvq->stats.errors++;<br />+        return -1;<br />+    }<br />+    return 0;<br />+}<br />+<br />+uint16_t zxdh_recv_single_pkts(void *rx_queue, struct rte_mbuf **rcv_pkts, uint16_t nb_pkts)<br />+{<br />+    struct zxdh_virtnet_rx *rxvq = rx_queue;<br />+    struct zxdh_virtqueue *vq = rxq_get_vq(rxvq);<br />+    struct zxdh_hw *hw = vq->hw;<br />+    struct rte_mbuf *rxm;<br />+    uint32_t lens[ZXDH_MBUF_BURST_SZ];<br />+    uint16_t len = 0;<br />+    uint16_t nb_rx = 0;<br />+    uint16_t num;<br />+    uint16_t i = 0;<br />  <br />-            dev->data->rx_mbuf_alloc_failed += free_cnt;<br />+    num = nb_pkts;<br />+    if (unlikely(num > ZXDH_MBUF_BURST_SZ))<br />+        num = ZXDH_MBUF_BURST_SZ;<br />+    num = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, lens, num);<br />+    if (num == 0) {<br />+        rxvq->stats.idle++;<br />+        goto refill;<br />+    }<br />+<br />+    for (i = 0; i < num; i++) {<br />+        rxm = rcv_pkts[i];<br />+        len = lens[i];<br />+        if (unlikely(zxdh_init_mbuf(rxm, len, hw, &vq->rxq) < 0)) {<br />+            rte_pktmbuf_free(rxm);<br />+            continue;<br />         }<br />+        zxdh_update_packet_stats(&rxvq->stats, rxm);<br />+        nb_rx++;<br />+    }<br />+    rxvq->stats.packets += nb_rx;<br />+<br />+refill:<br />+    if (vq->vq_free_cnt > 0) {<br />+        struct rte_eth_dev *dev = hw->eth_dev;<br />+        refill_que_descs(vq, dev);<br />+        zxdh_queue_notify(vq);<br />     }<br />     return nb_rx;<br /> }<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h<br />index 424048607e..dba9567414 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.h<br />+++ b/drivers/net/zxdh/zxdh_rxtx.h<br />@@ -36,29 +36,22 @@ struct zxdh_virtnet_stats {<br />     uint64_t bytes;<br />     uint64_t errors;<br />     uint64_t idle;<br />-    uint64_t full;<br />-    uint64_t norefill;<br />-    uint64_t multicast;<br />-    uint64_t broadcast;<br />     uint64_t truncated_err;<br />     uint64_t offload_cfg_err;<br />     uint64_t invalid_hdr_len_err;<br />     uint64_t no_segs_err;<br />+    uint64_t no_free_tx_desc_err;<br />     uint64_t size_bins[8];<br /> };<br />  <br /> struct __rte_cache_aligned zxdh_virtnet_rx {<br />     struct zxdh_virtqueue         *vq;<br />-<br />-    uint64_t                  mbuf_initializer; /* value to init mbufs. */<br />     struct rte_mempool       *mpool;            /* mempool for mbuf allocation */<br />-    uint16_t                  queue_id;         /* DPDK queue index. */<br />-    uint16_t                  port_id;          /* Device port identifier. */<br />     struct zxdh_virtnet_stats      stats;<br />     const struct rte_memzone *mz;               /* mem zone to populate RX ring. */<br />-<br />-    /* dummy mbuf, for wraparound when processing RX ring. */<br />-    struct rte_mbuf           fake_mbuf;<br />+    uint64_t offloads;<br />+    uint16_t                  queue_id;         /* DPDK queue index. */<br />+    uint16_t                  port_id;          /* Device port identifier. */<br /> };<br />  <br /> struct __rte_cache_aligned zxdh_virtnet_tx {<br />@@ -75,5 +68,6 @@ struct __rte_cache_aligned zxdh_virtnet_tx {<br /> uint16_t zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);<br /> uint16_t zxdh_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);<br /> uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);<br />+uint16_t zxdh_recv_single_pkts(void *rx_queue, struct rte_mbuf **rcv_pkts, uint16_t nb_pkts);<br />  <br /> #endif  /* ZXDH_RXTX_H */<br />--  <br />2.27.0<br />