update rx/tx process to latest version.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/zxdh_common.c |  19 +<br /> drivers/net/zxdh/zxdh_common.h |   3 +<br /> drivers/net/zxdh/zxdh_ethdev.c |  45 ++-<br /> drivers/net/zxdh/zxdh_ethdev.h |  15 +-<br /> drivers/net/zxdh/zxdh_queue.c  |   9 +-<br /> drivers/net/zxdh/zxdh_queue.h  | 118 +++---<br /> drivers/net/zxdh/zxdh_rxtx.c   | 696 +++++++++++++++++++--------------<br /> drivers/net/zxdh/zxdh_rxtx.h   |  27 ++<br /> 8 files changed, 567 insertions(+), 365 deletions(-)<br /> <br />diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c<br />index 0c9696e7ce..3d67fcc7dd 100644<br />--- a/drivers/net/zxdh/zxdh_common.c<br />+++ b/drivers/net/zxdh/zxdh_common.c<br />@@ -13,6 +13,7 @@<br /> #include "zxdh_logs.h" <br /> #include "zxdh_msg.h" <br /> #include "zxdh_common.h" <br />+#include "zxdh_pci.h" <br />  <br /> #define ZXDH_MSG_RSP_SIZE_MAX         512<br />  <br />@@ -427,3 +428,21 @@ zxdh_datach_set(struct rte_eth_dev *dev)<br />  <br />     return ret;<br /> }<br />+<br />+bool<br />+zxdh_rx_offload_enabled(struct zxdh_hw *hw)<br />+{<br />+    return zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6) ||<br />+           (hw->vlan_offload_cfg.vlan_strip == 1);<br />+}<br />+<br />+bool<br />+zxdh_tx_offload_enabled(struct zxdh_hw *hw)<br />+{<br />+    return zxdh_pci_with_feature(hw, ZXDH_NET_F_CSUM) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||<br />+           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_UFO);<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h<br />index 826f1fb95d..d78a822ebf 100644<br />--- a/drivers/net/zxdh/zxdh_common.h<br />+++ b/drivers/net/zxdh/zxdh_common.h<br />@@ -31,4 +31,7 @@ uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg);<br /> void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val);<br /> int32_t zxdh_datach_set(struct rte_eth_dev *dev);<br />  <br />+bool zxdh_rx_offload_enabled(struct zxdh_hw *hw);<br />+bool zxdh_tx_offload_enabled(struct zxdh_hw *hw);<br />+<br /> #endif /* ZXDH_COMMON_H */<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index c76818d015..255d4b5b79 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -23,6 +23,7 @@ struct zxdh_shared_data *zxdh_shared_data;<br /> const char *ZXDH_PMD_SHARED_DATA_MZ = "zxdh_pmd_shared_data";<br /> rte_spinlock_t zxdh_shared_data_lock = RTE_SPINLOCK_INITIALIZER;<br /> struct zxdh_dev_shared_data g_dev_sd[ZXDH_SLOT_MAX];<br />+struct zxdh_net_hdr_dl g_net_hdr_dl[RTE_MAX_ETHPORTS];<br />  <br /> #define ZXDH_INVALID_DTBQUE      0xFFFF<br /> #define ZXDH_INVALID_SLOT_IDX    0xFFFF<br />@@ -405,6 +406,28 @@ zxdh_configure_intr(struct rte_eth_dev *dev)<br />     return ret;<br /> }<br />  <br />+static void<br />+zxdh_update_net_hdr_dl(struct zxdh_hw *hw)<br />+{<br />+    struct zxdh_net_hdr_dl *net_hdr_dl = &g_net_hdr_dl[hw->port_id];<br />+    memset(net_hdr_dl, 0, ZXDH_DL_NET_HDR_SIZE);<br />+<br />+    if (zxdh_tx_offload_enabled(hw)) {<br />+        net_hdr_dl->type_hdr.port = ZXDH_PORT_DTP;<br />+        net_hdr_dl->type_hdr.pd_len = ZXDH_DL_NET_HDR_SIZE >> 1;<br />+<br />+        net_hdr_dl->pipd_hdr_dl.pi_hdr.pi_len = (ZXDH_PI_HDR_SIZE >> 4) - 1;<br />+        net_hdr_dl->pipd_hdr_dl.pi_hdr.pkt_flag_hi8 = ZXDH_PI_FLAG | ZXDH_PI_TYPE_PI;<br />+        net_hdr_dl->pipd_hdr_dl.pi_hdr.pkt_type = ZXDH_PKT_FORM_CPU;<br />+        hw->dl_net_hdr_len = ZXDH_DL_NET_HDR_SIZE;<br />+<br />+    } else {<br />+        net_hdr_dl->type_hdr.port = ZXDH_PORT_NP;<br />+        net_hdr_dl->type_hdr.pd_len = ZXDH_DL_NET_HDR_NOPI_SIZE >> 1;<br />+        hw->dl_net_hdr_len = ZXDH_DL_NET_HDR_NOPI_SIZE;<br />+    }<br />+}<br />+<br /> static int32_t<br /> zxdh_features_update(struct zxdh_hw *hw,<br />         const struct rte_eth_rxmode *rxmode,<br />@@ -451,23 +474,6 @@ zxdh_features_update(struct zxdh_hw *hw,<br />     return 0;<br /> }<br />  <br />-static bool<br />-zxdh_rx_offload_enabled(struct zxdh_hw *hw)<br />-{<br />-    return zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||<br />-           zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||<br />-           zxdh_pci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6);<br />-}<br />-<br />-static bool<br />-zxdh_tx_offload_enabled(struct zxdh_hw *hw)<br />-{<br />-    return zxdh_pci_with_feature(hw, ZXDH_NET_F_CSUM) ||<br />-           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||<br />-           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||<br />-           zxdh_pci_with_feature(hw, ZXDH_NET_F_HOST_UFO);<br />-}<br />-<br /> static void<br /> zxdh_dev_free_mbufs(struct rte_eth_dev *dev)<br /> {<br />@@ -892,6 +898,7 @@ zxdh_dev_configure(struct rte_eth_dev *dev)<br />     const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;<br />     const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;<br />     struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint64_t rx_offloads = rxmode->offloads;<br />     int32_t  ret = 0;<br />  <br />     if (dev->data->nb_rx_queues > hw->max_queue_pairs ||<br />@@ -932,6 +939,9 @@ zxdh_dev_configure(struct rte_eth_dev *dev)<br />         }<br />     }<br />  <br />+    if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)<br />+        hw->vlan_offload_cfg.vlan_strip = 1;<br />+<br />     hw->has_tx_offload = zxdh_tx_offload_enabled(hw);<br />     hw->has_rx_offload = zxdh_rx_offload_enabled(hw);<br />  <br />@@ -982,6 +992,7 @@ zxdh_dev_configure(struct rte_eth_dev *dev)<br />  <br /> end:<br />     zxdh_dev_conf_offload(dev);<br />+    zxdh_update_net_hdr_dl(hw);<br />     return ret;<br /> }<br />  <br />diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h<br />index c42f638c8d..7fe561ae24 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.h<br />+++ b/drivers/net/zxdh/zxdh_ethdev.h<br />@@ -58,6 +58,14 @@ struct zxdh_chnl_context {<br />     uint16_t ph_chno;<br /> };<br />  <br />+struct zxdh_vlan_offload_cfg {<br />+    uint8_t vlan_strip:1;<br />+    uint8_t vlan_filter:1;<br />+    uint8_t vlan_extend:1;<br />+    uint8_t qinq_strip:1;<br />+    uint8_t resv:4;<br />+};<br />+<br /> struct zxdh_hw {<br />     struct rte_eth_dev *eth_dev;<br />     struct zxdh_pci_common_cfg *common_cfg;<br />@@ -89,11 +97,10 @@ struct zxdh_hw {<br />     uint16_t *notify_base;<br />     uint8_t *isr;<br />  <br />-    uint8_t weak_barriers;<br />     uint8_t intr_enabled;<br />     uint8_t mac_addr[RTE_ETHER_ADDR_LEN];<br />-<br />     uint8_t use_msix;<br />+<br />     uint8_t duplex;<br />     uint8_t is_pf;<br />     uint8_t msg_chan_init;<br />@@ -112,7 +119,9 @@ struct zxdh_hw {<br />     uint8_t que_set_flag;<br />     uint16_t queue_pool_count;<br />     uint16_t queue_pool_start;<br />-    uint8_t rsv[3];<br />+    struct zxdh_vlan_offload_cfg vlan_offload_cfg;<br />+    uint8_t dl_net_hdr_len;<br />+    uint8_t rsv[2];<br /> };<br />  <br /> struct zxdh_dtb_shared_data {<br />diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c<br />index 3e3bac2efc..d92d3bcab9 100644<br />--- a/drivers/net/zxdh/zxdh_queue.c<br />+++ b/drivers/net/zxdh/zxdh_queue.c<br />@@ -291,12 +291,10 @@ zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev,<br /> int32_t<br /> zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)<br /> {<br />-    struct zxdh_hw *hw = dev->data->dev_private;<br />     struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[queue_id];<br />     struct zxdh_virtqueue *vq = rxvq->vq;<br />  <br />     zxdh_queue_enable_intr(vq);<br />-    zxdh_mb(hw->weak_barriers);<br />     return 0;<br /> }<br />  <br />@@ -314,7 +312,6 @@ int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq,<br />             struct rte_mbuf **cookie, uint16_t num)<br /> {<br />     struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc;<br />-    struct zxdh_hw *hw = vq->hw;<br />     struct zxdh_vq_desc_extra *dxp;<br />     uint16_t flags = vq->vq_packed.cached_flags;<br />     int32_t i;<br />@@ -328,10 +325,8 @@ int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq,<br />         /* rx pkt fill in data_off */<br />         start_dp[idx].addr = rte_mbuf_iova_get(cookie[i]) + RTE_PKTMBUF_HEADROOM;<br />         start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM;<br />-        vq->vq_desc_head_idx = dxp->next;<br />-        if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END)<br />-            vq->vq_desc_tail_idx = vq->vq_desc_head_idx;<br />-        zxdh_queue_store_flags_packed(&start_dp[idx], flags, hw->weak_barriers);<br />+<br />+        zxdh_queue_store_flags_packed(&start_dp[idx], flags);<br />         if (++vq->vq_avail_idx >= vq->vq_nentries) {<br />             vq->vq_avail_idx -= vq->vq_nentries;<br />             vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />index ba946dee29..05f391f080 100644<br />--- a/drivers/net/zxdh/zxdh_queue.h<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -49,13 +49,23 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };<br />  <br /> #define ZXDH_RQ_QUEUE_IDX                 0<br /> #define ZXDH_TQ_QUEUE_IDX                 1<br />+#define ZXDH_UL_1588_HDR_SIZE             8<br /> #define ZXDH_TYPE_HDR_SIZE        sizeof(struct zxdh_type_hdr)<br /> #define ZXDH_PI_HDR_SIZE          sizeof(struct zxdh_pi_hdr)<br /> #define ZXDH_DL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_dl)<br /> #define ZXDH_UL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_ul)<br />+#define ZXDH_DL_PD_HDR_SIZE       sizeof(struct zxdh_pd_hdr_dl)<br />+#define ZXDH_UL_PD_HDR_SIZE       sizeof(struct zxdh_pd_hdr_ul)<br />+#define ZXDH_DL_NET_HDR_NOPI_SIZE   (ZXDH_TYPE_HDR_SIZE + \<br />+                                    ZXDH_DL_PD_HDR_SIZE)<br />+#define ZXDH_UL_NOPI_HDR_SIZE_MAX   (ZXDH_TYPE_HDR_SIZE + \<br />+                                    ZXDH_UL_PD_HDR_SIZE + \<br />+                                    ZXDH_UL_1588_HDR_SIZE)<br /> #define ZXDH_PD_HDR_SIZE_MAX              256<br /> #define ZXDH_PD_HDR_SIZE_MIN              ZXDH_TYPE_HDR_SIZE<br />  <br />+#define rte_packet_prefetch(p)      do {} while (0)<br />+<br /> /*<br />  * ring descriptors: 16 bytes.<br />  * These can chain together via "next".<br />@@ -192,18 +202,29 @@ struct __rte_packed_begin zxdh_pi_hdr {<br /> } __rte_packed_end; /* 32B */<br />  <br /> struct __rte_packed_begin zxdh_pd_hdr_dl {<br />-    uint32_t ol_flag;<br />+    uint16_t ol_flag;<br />+    uint8_t rsv;<br />+    uint8_t panel_id;<br />+<br />+    uint16_t svlan_insert;<br />+    uint16_t cvlan_insert;<br />+<br />     uint8_t tag_idx;<br />     uint8_t tag_data;<br />     uint16_t dst_vfid;<br />-    uint32_t svlan_insert;<br />-    uint32_t cvlan_insert;<br /> } __rte_packed_end; /* 16B */<br />  <br />-struct __rte_packed_begin zxdh_net_hdr_dl {<br />-    struct zxdh_type_hdr  type_hdr; /* 4B */<br />+struct __rte_packed_begin zxdh_pipd_hdr_dl {<br />     struct zxdh_pi_hdr    pi_hdr; /* 32B */<br />-    struct zxdh_pd_hdr_dl pd_hdr; /* 16B */<br />+    struct zxdh_pd_hdr_dl pd_hdr; /* 12B */<br />+} __rte_packed_end; /* 44B */<br />+<br />+struct __rte_packed_begin zxdh_net_hdr_dl {<br />+    struct zxdh_type_hdr type_hdr; /* 4B */<br />+    union {<br />+        struct zxdh_pd_hdr_dl pd_hdr; /* 12B */<br />+        struct zxdh_pipd_hdr_dl pipd_hdr_dl; /* 44B */<br />+    };<br /> } __rte_packed_end;<br />  <br /> struct __rte_packed_begin zxdh_pd_hdr_ul {<br />@@ -211,17 +232,27 @@ struct __rte_packed_begin zxdh_pd_hdr_ul {<br />     uint32_t rss_hash;<br />     uint32_t fd;<br />     uint32_t striped_vlan_tci;<br />+<br />+    uint16_t pkt_type_out;<br />+    uint16_t pkt_type_in;<br />+    uint16_t pkt_len;<br />+<br />     uint8_t tag_idx;<br />     uint8_t tag_data;<br />     uint16_t src_vfid;<br />-    uint16_t pkt_type_out;<br />-    uint16_t pkt_type_in;<br /> } __rte_packed_end; /* 24B */<br />  <br />-struct __rte_packed_begin zxdh_net_hdr_ul {<br />-    struct zxdh_type_hdr  type_hdr; /* 4B */<br />+struct __rte_packed_begin zxdh_pipd_hdr_ul {<br />     struct zxdh_pi_hdr    pi_hdr; /* 32B */<br />-    struct zxdh_pd_hdr_ul pd_hdr; /* 24B */<br />+    struct zxdh_pd_hdr_ul pd_hdr; /* 26B */<br />+} __rte_packed_end;<br />+<br />+struct __rte_packed_begin zxdh_net_hdr_ul {<br />+    struct zxdh_type_hdr type_hdr; /* 4B */<br />+    union {<br />+        struct zxdh_pd_hdr_ul   pd_hdr; /* 26 */<br />+        struct zxdh_pipd_hdr_ul pipd_hdr_ul; /* 58B */<br />+    };<br /> } __rte_packed_end; /* 60B */<br />  <br />  <br />@@ -316,6 +347,19 @@ zxdh_mb(uint8_t weak_barriers)<br />         rte_mb();<br /> }<br />  <br />+static inline<br />+int32_t desc_is_used(struct zxdh_vring_packed_desc *desc, struct zxdh_virtqueue *vq)<br />+{<br />+    uint16_t flags;<br />+    uint16_t used, avail;<br />+<br />+    flags = desc->flags;<br />+    rte_io_rmb();<br />+    used = !!(flags & ZXDH_VRING_PACKED_DESC_F_USED);<br />+    avail = !!(flags & ZXDH_VRING_PACKED_DESC_F_AVAIL);<br />+    return avail == used && used == vq->vq_packed.used_wrap_counter;<br />+}<br />+<br /> static inline int32_t<br /> zxdh_queue_full(const struct zxdh_virtqueue *vq)<br /> {<br />@@ -323,48 +367,22 @@ zxdh_queue_full(const struct zxdh_virtqueue *vq)<br /> }<br />  <br /> static inline void<br />-zxdh_queue_store_flags_packed(struct zxdh_vring_packed_desc *dp,<br />-        uint16_t flags, uint8_t weak_barriers)<br />-    {<br />-    if (weak_barriers) {<br />-    #ifdef RTE_ARCH_X86_64<br />-        rte_io_wmb();<br />-        dp->flags = flags;<br />-    #else<br />-        rte_atomic_store_explicit(&dp->flags, flags, rte_memory_order_release);<br />-    #endif<br />-    } else {<br />-        rte_io_wmb();<br />-        dp->flags = flags;<br />-    }<br />-}<br />-<br />-static inline uint16_t<br />-zxdh_queue_fetch_flags_packed(struct zxdh_vring_packed_desc *dp,<br />-        uint8_t weak_barriers)<br />-    {<br />-    uint16_t flags;<br />-    if (weak_barriers) {<br />-    #ifdef RTE_ARCH_X86_64<br />-        flags = dp->flags;<br />-        rte_io_rmb();<br />-    #else<br />-        flags = rte_atomic_load_explicit(&dp->flags, rte_memory_order_acquire);<br />-    #endif<br />-    } else {<br />-        flags = dp->flags;<br />-        rte_io_rmb();<br />-    }<br />-<br />-    return flags;<br />+zxdh_queue_store_flags_packed(struct zxdh_vring_packed_desc *dp, uint16_t flags)<br />+{<br />+    rte_io_wmb();<br />+    dp->flags = flags;<br /> }<br />  <br /> static inline int32_t<br /> zxdh_desc_used(struct zxdh_vring_packed_desc *desc, struct zxdh_virtqueue *vq)<br /> {<br />-    uint16_t flags = zxdh_queue_fetch_flags_packed(desc, vq->hw->weak_barriers);<br />-    uint16_t used = !!(flags & ZXDH_VRING_PACKED_DESC_F_USED);<br />-    uint16_t avail = !!(flags & ZXDH_VRING_PACKED_DESC_F_AVAIL);<br />+    uint16_t flags;<br />+    uint16_t used, avail;<br />+<br />+    flags = desc->flags;<br />+    rte_io_rmb();<br />+    used = !!(flags & ZXDH_VRING_PACKED_DESC_F_USED);<br />+    avail = !!(flags & ZXDH_VRING_PACKED_DESC_F_AVAIL);<br />     return avail == used && used == vq->vq_packed.used_wrap_counter;<br /> }<br />  <br />@@ -378,12 +396,14 @@ zxdh_queue_kick_prepare_packed(struct zxdh_virtqueue *vq)<br /> {<br />     uint16_t flags = 0;<br />  <br />-    zxdh_mb(vq->hw->weak_barriers);<br />+    zxdh_mb(1);<br />     flags = vq->vq_packed.ring.device->desc_event_flags;<br />  <br />     return (flags != ZXDH_RING_EVENT_FLAGS_DISABLE);<br /> }<br />  <br />+extern struct zxdh_net_hdr_dl g_net_hdr_dl[RTE_MAX_ETHPORTS];<br />+<br /> struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);<br /> int32_t zxdh_free_queues(struct rte_eth_dev *dev);<br /> int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c<br />index 6353d496f2..5c1795ce1d 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.c<br />+++ b/drivers/net/zxdh/zxdh_rxtx.c<br />@@ -9,6 +9,8 @@<br />  <br /> #include "zxdh_logs.h" <br /> #include "zxdh_pci.h" <br />+#include "zxdh_common.h" <br />+#include "zxdh_rxtx.h" <br /> #include "zxdh_queue.h" <br />  <br /> #define ZXDH_SVLAN_TPID                       0x88a8<br />@@ -24,8 +26,12 @@<br /> #define ZXDH_PI_L3TYPE_RSV                    0xC0<br /> #define ZXDH_PI_L3TYPE_MASK                   0xC0<br />  <br />+#define  ZXDH_PD_OFFLOAD_SPEC_PHYPORT         (1 << 15)<br /> #define  ZXDH_PD_OFFLOAD_SVLAN_INSERT         (1 << 14)<br /> #define  ZXDH_PD_OFFLOAD_CVLAN_INSERT         (1 << 13)<br />+#define  ZXDH_PD_OFFLOAD_OUTER_IPCSUM         (1 << 12)<br />+#define  ZXDH_PD_OFFLOAD_PRIO_MASK            (0x7 << 8)<br />+#define  ZXDH_PD_OFFLOAD_DELAY_STAT           (1 << 7)<br />  <br /> #define ZXDH_PCODE_MASK                       0x1F<br /> #define ZXDH_PCODE_IP_PKT_TYPE                0x01<br />@@ -34,9 +40,78 @@<br /> #define ZXDH_PCODE_NO_IP_PKT_TYPE             0x09<br /> #define ZXDH_PCODE_NO_REASSMBLE_TCP_PKT_TYPE  0x0C<br />  <br />+/* Uplink pd header byte0~1 */<br />+#define ZXDH_MBUF_F_RX_OUTER_L4_CKSUM_GOOD               0x00080000<br />+#define ZXDH_MBUF_F_RX_QINQ                              0x00100000<br />+#define ZXDH_MBUF_F_RX_SEC_OFFLOAD                       0x00200000<br />+#define ZXDH_MBUF_F_RX_QINQ_STRIPPED                     0x00400000<br />+#define FELX_4BYTE                                       0x00800000<br />+#define FELX_8BYTE                                       0x01000000<br />+#define ZXDH_MBUF_F_RX_FDIR_FLX_MASK                     0x01800000<br />+#define ZXDH_MBUF_F_RX_FDIR_ID                           0x02000000<br />+#define ZXDH_MBUF_F_RX_1588_TMST                         0x04000000<br />+#define ZXDH_MBUF_F_RX_1588_PTP                          0x08000000<br />+#define ZXDH_MBUF_F_RX_VLAN_STRIPPED                     0x10000000<br />+#define ZXDH_MBUF_F_RX_OUTER_IP_CKSUM_BAD                0x20000000<br />+#define ZXDH_MBUF_F_RX_FDIR                              0x40000000<br />+#define ZXDH_MBUF_F_RX_RSS_HASH                          0x80000000<br />+<br />+/* Outer/Inner L2 type */<br />+#define ZXDH_PD_L2TYPE_MASK                              0xf000<br />+#define ZXDH_PTYPE_L2_ETHER                              0x1000<br />+#define ZXDH_PTYPE_L2_ETHER_TIMESYNC                     0x2000<br />+#define ZXDH_PTYPE_L2_ETHER_ARP                          0x3000<br />+#define ZXDH_PTYPE_L2_ETHER_LLDP                         0x4000<br />+#define ZXDH_PTYPE_L2_ETHER_NSH                          0x5000<br />+#define ZXDH_PTYPE_L2_ETHER_VLAN                         0x6000<br />+#define ZXDH_PTYPE_L2_ETHER_QINQ                         0x7000<br />+#define ZXDH_PTYPE_L2_ETHER_PPPOE                        0x8000<br />+#define ZXDH_PTYPE_L2_ETHER_FCOE                         0x9000<br />+#define ZXDH_PTYPE_L2_ETHER_MPLS                         0xa000<br />+<br />+/* Outer/Inner L3 type */<br />+#define ZXDH_PD_L3TYPE_MASK                              0x0f00<br />+#define ZXDH_PTYPE_L3_IPV4                               0x0100<br />+#define ZXDH_PTYPE_L3_IPV4_EXT                           0x0200<br />+#define ZXDH_PTYPE_L3_IPV6                               0x0300<br />+#define ZXDH_PTYPE_L3_IPV4_EXT_UNKNOWN                   0x0400<br />+#define ZXDH_PTYPE_L3_IPV6_EXT                           0x0500<br />+#define ZXDH_PTYPE_L3_IPV6_EXT_UNKNOWN                   0x0600<br />+<br />+/* Outer/Inner L4 type */<br />+#define ZXDH_PD_L4TYPE_MASK    0x00f0<br />+#define ZXDH_PTYPE_L4_TCP      0x0010<br />+#define ZXDH_PTYPE_L4_UDP      0x0020<br />+#define ZXDH_PTYPE_L4_FRAG     0x0030<br />+#define ZXDH_PTYPE_L4_SCTP     0x0040<br />+#define ZXDH_PTYPE_L4_ICMP     0x0050<br />+#define ZXDH_PTYPE_L4_NONFRAG  0x0060<br />+#define ZXDH_PTYPE_L4_IGMP     0x0070<br />+<br /> #define ZXDH_TX_MAX_SEGS                      31<br /> #define ZXDH_RX_MAX_SEGS                      31<br />  <br />+#define ZXDH_PI_LRO_FALG    0x00000001<br />+<br />+#define ZXDH_MIN_MSS                                     64<br />+#define ZXDH_VLAN_ID_MASK                                0xfff<br />+<br />+#define ZXDH_MTU_MSS_UNIT_SHIFTBIT                       2<br />+#define ZXDH_MTU_MSS_MASK                                0xFFF<br />+#define ZXDH_PD_HDR_SIZE_MAX                             256<br />+<br />+/* error code */<br />+#define ZXDH_UDP_CSUM_ERR  0x0020<br />+#define ZXDH_TCP_CSUM_ERR  0x0040<br />+#define ZXDH_IPV4_CSUM_ERR 0x0100<br />+<br />+#define ZXDH_DTPOFFLOAD_MASK ( \<br />+        RTE_MBUF_F_TX_IP_CKSUM |        \<br />+        RTE_MBUF_F_TX_L4_MASK |         \<br />+        RTE_MBUF_F_TX_TCP_SEG |         \<br />+        RTE_MBUF_F_TX_SEC_OFFLOAD |     \<br />+        RTE_MBUF_F_TX_UDP_SEG)<br />+<br /> uint32_t zxdh_outer_l2_type[16] = {<br />     0,<br />     RTE_PTYPE_L2_ETHER,<br />@@ -161,259 +236,196 @@ zxdh_xmit_cleanup_inorder_packed(struct zxdh_virtqueue *vq, int32_t num)<br />     vq->vq_free_cnt += free_cnt;<br /> }<br />  <br />-static void<br />-zxdh_ring_free_id_packed(struct zxdh_virtqueue *vq, uint16_t id)<br />+static inline uint16_t<br />+zxdh_get_mtu(struct zxdh_virtqueue *vq)<br /> {<br />-    struct zxdh_vq_desc_extra *dxp = NULL;<br />-<br />-    dxp = &vq->vq_descx[id];<br />-    vq->vq_free_cnt += dxp->ndescs;<br />-<br />-    if (vq->vq_desc_tail_idx == ZXDH_VQ_RING_DESC_CHAIN_END)<br />-        vq->vq_desc_head_idx = id;<br />-    else<br />-        vq->vq_descx[vq->vq_desc_tail_idx].next = id;<br />+    struct rte_eth_dev *eth_dev = vq->hw->eth_dev;<br />  <br />-    vq->vq_desc_tail_idx = id;<br />-    dxp->next = ZXDH_VQ_RING_DESC_CHAIN_END;<br />-}<br />-<br />-static void<br />-zxdh_xmit_cleanup_normal_packed(struct zxdh_virtqueue *vq, int32_t num)<br />-{<br />-    uint16_t used_idx = 0;<br />-    uint16_t id = 0;<br />-    uint16_t size = vq->vq_nentries;<br />-    struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc;<br />-    struct zxdh_vq_desc_extra *dxp = NULL;<br />-<br />-    used_idx = vq->vq_used_cons_idx;<br />-    /* desc_is_used has a load-acquire or rte_io_rmb inside<br />-     * and wait for used desc in virtqueue.<br />-     */<br />-    while (num-- && zxdh_desc_used(&desc[used_idx], vq)) {<br />-        id = desc[used_idx].id;<br />-        dxp = &vq->vq_descx[id];<br />-        vq->vq_used_cons_idx += dxp->ndescs;<br />-        if (vq->vq_used_cons_idx >= size) {<br />-            vq->vq_used_cons_idx -= size;<br />-            vq->vq_packed.used_wrap_counter ^= 1;<br />-        }<br />-        zxdh_ring_free_id_packed(vq, id);<br />-        if (dxp->cookie != NULL) {<br />-            rte_pktmbuf_free(dxp->cookie);<br />-            dxp->cookie = NULL;<br />-        }<br />-        used_idx = vq->vq_used_cons_idx;<br />-    }<br />+    return eth_dev->data->mtu;<br /> }<br />  <br /> static void<br />-zxdh_xmit_cleanup_packed(struct zxdh_virtqueue *vq, int32_t num, int32_t in_order)<br />-{<br />-    if (in_order)<br />-        zxdh_xmit_cleanup_inorder_packed(vq, num);<br />-    else<br />-        zxdh_xmit_cleanup_normal_packed(vq, num);<br />-}<br />-<br />-static uint8_t<br />-zxdh_xmit_get_ptype(struct rte_mbuf *m)<br />-{<br />-    uint8_t pcode = ZXDH_PCODE_NO_IP_PKT_TYPE;<br />-    uint8_t l3_ptype = ZXDH_PI_L3TYPE_NOIP;<br />-<br />-    if ((m->packet_type & RTE_PTYPE_INNER_L3_MASK) == RTE_PTYPE_INNER_L3_IPV4 ||<br />-            ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && <br />-            (m->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4)) {<br />-        l3_ptype = ZXDH_PI_L3TYPE_IP;<br />-        pcode = ZXDH_PCODE_IP_PKT_TYPE;<br />-    } else if ((m->packet_type & RTE_PTYPE_INNER_L3_MASK) == RTE_PTYPE_INNER_L3_IPV6 ||<br />-            ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && <br />-            (m->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6)) {<br />-        l3_ptype = ZXDH_PI_L3TYPE_IPV6;<br />-        pcode = ZXDH_PCODE_IP_PKT_TYPE;<br />-    } else {<br />-        goto end;<br />-    }<br />-<br />-    if ((m->packet_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_TCP ||<br />-            ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && <br />-            (m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP))<br />-        pcode = ZXDH_PCODE_TCP_PKT_TYPE;<br />-    else if ((m->packet_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP ||<br />-                ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && <br />-                (m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP))<br />-        pcode = ZXDH_PCODE_UDP_PKT_TYPE;<br />-<br />-end:<br />-    return  l3_ptype | ZXDH_PKT_FORM_CPU | pcode;<br />-}<br />-<br />-static void zxdh_xmit_fill_net_hdr(struct rte_mbuf *cookie,<br />+zxdh_xmit_fill_net_hdr(struct zxdh_virtqueue *vq, struct rte_mbuf *cookie,<br />                 struct zxdh_net_hdr_dl *hdr)<br /> {<br />+    uint16_t mtu_or_mss = 0;<br />     uint16_t pkt_flag_lw16 = ZXDH_NO_IPID_UPDATE;<br />     uint16_t l3_offset;<br />+    uint8_t pcode = ZXDH_PCODE_NO_IP_PKT_TYPE;<br />+    uint8_t l3_ptype = ZXDH_PI_L3TYPE_NOIP;<br />+    struct zxdh_pi_hdr *pi_hdr = NULL;<br />+    struct zxdh_pd_hdr_dl *pd_hdr = NULL;<br />+    struct zxdh_hw *hw = vq->hw;<br />+    struct zxdh_net_hdr_dl *net_hdr_dl = &g_net_hdr_dl[hw->port_id];<br />+    uint8_t hdr_len = hw->dl_net_hdr_len;<br />     uint32_t ol_flag = 0;<br />  <br />-    hdr->pi_hdr.pkt_flag_lw16 = rte_be_to_cpu_16(pkt_flag_lw16);<br />+    rte_memcpy(hdr, net_hdr_dl, hdr_len);<br />+    if (hw->has_tx_offload) {<br />+        pi_hdr = &hdr->pipd_hdr_dl.pi_hdr;<br />+        pd_hdr = &hdr->pipd_hdr_dl.pd_hdr;<br />  <br />-    hdr->pi_hdr.pkt_type = zxdh_xmit_get_ptype(cookie);<br />-    l3_offset = ZXDH_DL_NET_HDR_SIZE + cookie->outer_l2_len +<br />-                cookie->outer_l3_len + cookie->l2_len;<br />-    hdr->pi_hdr.l3_offset = rte_be_to_cpu_16(l3_offset);<br />-    hdr->pi_hdr.l4_offset = rte_be_to_cpu_16(l3_offset + cookie->l3_len);<br />+        pcode = ZXDH_PCODE_IP_PKT_TYPE;<br />+        if (cookie->ol_flags & RTE_MBUF_F_TX_IPV6)<br />+            l3_ptype = ZXDH_PI_L3TYPE_IPV6;<br />+        else if (cookie->ol_flags & RTE_MBUF_F_TX_IPV4)<br />+            l3_ptype = ZXDH_PI_L3TYPE_IP;<br />+        else<br />+            pcode = ZXDH_PCODE_NO_IP_PKT_TYPE;<br />+<br />+        if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {<br />+            mtu_or_mss = (cookie->tso_segsz >= ZXDH_MIN_MSS)<br />+                ? cookie->tso_segsz<br />+                : ZXDH_MIN_MSS;<br />+            pi_hdr->pkt_flag_hi8  |= ZXDH_TX_TCPUDP_CKSUM_CAL;<br />+            pkt_flag_lw16 |= ZXDH_NO_IP_FRAGMENT | ZXDH_TX_IP_CKSUM_CAL;<br />+            pcode = ZXDH_PCODE_TCP_PKT_TYPE;<br />+        } else if (cookie->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {<br />+            mtu_or_mss = zxdh_get_mtu(vq);<br />+            mtu_or_mss = (mtu_or_mss >= ZXDH_MIN_MSS) ? mtu_or_mss : ZXDH_MIN_MSS;<br />+            pkt_flag_lw16 |= ZXDH_TX_IP_CKSUM_CAL;<br />+            pi_hdr->pkt_flag_hi8 |= ZXDH_NO_TCP_FRAGMENT | ZXDH_TX_TCPUDP_CKSUM_CAL;<br />+            pcode = ZXDH_PCODE_UDP_PKT_TYPE;<br />+        } else {<br />+            pkt_flag_lw16 |= ZXDH_NO_IP_FRAGMENT;<br />+            pi_hdr->pkt_flag_hi8 |= ZXDH_NO_TCP_FRAGMENT;<br />+        }<br />  <br />-    if (cookie->ol_flags & RTE_MBUF_F_TX_VLAN) {<br />-        ol_flag |= ZXDH_PD_OFFLOAD_CVLAN_INSERT;<br />-        hdr->pi_hdr.vlan_id = rte_be_to_cpu_16(cookie->vlan_tci);<br />-        hdr->pd_hdr.cvlan_insert =<br />-            rte_be_to_cpu_32((ZXDH_CVLAN_TPID << 16) | cookie->vlan_tci);<br />+        if (cookie->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)<br />+            pkt_flag_lw16 |= ZXDH_TX_IP_CKSUM_CAL;<br />+<br />+        if ((cookie->ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) ==<br />+            RTE_MBUF_F_TX_UDP_CKSUM){<br />+            pcode = ZXDH_PCODE_UDP_PKT_TYPE;<br />+            pi_hdr->pkt_flag_hi8 |= ZXDH_TX_TCPUDP_CKSUM_CAL;<br />+        } else if ((cookie->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) ==<br />+                 RTE_MBUF_F_TX_TCP_CKSUM) {<br />+            pcode = ZXDH_PCODE_TCP_PKT_TYPE;<br />+            pi_hdr->pkt_flag_hi8 |= ZXDH_TX_TCPUDP_CKSUM_CAL;<br />+        }<br />+<br />+        pkt_flag_lw16 |= (mtu_or_mss >> ZXDH_MTU_MSS_UNIT_SHIFTBIT) & ZXDH_MTU_MSS_MASK;<br />+        pi_hdr->pkt_flag_lw16 = rte_be_to_cpu_16(pkt_flag_lw16);<br />+        pi_hdr->pkt_type = l3_ptype | ZXDH_PKT_FORM_CPU | pcode;<br />+<br />+        l3_offset = hdr_len + cookie->l2_len;<br />+        l3_offset += (cookie->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?<br />+                    cookie->outer_l2_len + cookie->outer_l3_len : 0;<br />+        pi_hdr->l3_offset = rte_be_to_cpu_16(l3_offset);<br />+        pi_hdr->l4_offset = rte_be_to_cpu_16(l3_offset + cookie->l3_len);<br />+        if (cookie->ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM)<br />+            ol_flag |= ZXDH_PD_OFFLOAD_OUTER_IPCSUM;<br />+    } else {<br />+        pd_hdr = &hdr->pd_hdr;<br />     }<br />-    if (cookie->ol_flags & RTE_MBUF_F_TX_QINQ) {<br />-        ol_flag |= ZXDH_PD_OFFLOAD_SVLAN_INSERT;<br />-        hdr->pd_hdr.svlan_insert =<br />-            rte_be_to_cpu_32((ZXDH_SVLAN_TPID << 16) | cookie->vlan_tci_outer);<br />+<br />+    if (cookie->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {<br />+        ol_flag |= ZXDH_PD_OFFLOAD_CVLAN_INSERT;<br />+        pd_hdr->cvlan_insert = rte_be_to_cpu_16(cookie->vlan_tci);<br />+        if (unlikely(cookie->ol_flags & RTE_MBUF_F_TX_QINQ)) {<br />+            ol_flag |= ZXDH_PD_OFFLOAD_SVLAN_INSERT;<br />+            pd_hdr->svlan_insert = rte_be_to_cpu_16(cookie->vlan_tci_outer);<br />+        }<br />     }<br />  <br />-    hdr->pd_hdr.ol_flag = rte_be_to_cpu_32(ol_flag);<br />+    pd_hdr->ol_flag = rte_be_to_cpu_16(ol_flag);<br /> }<br />  <br />-static inline void zxdh_enqueue_xmit_packed_fast(struct zxdh_virtnet_tx *txvq,<br />-                        struct rte_mbuf *cookie, int32_t in_order)<br />+static inline void<br />+zxdh_enqueue_xmit_packed_fast(struct zxdh_virtnet_tx *txvq,<br />+                        struct rte_mbuf *cookie)<br /> {<br />     struct zxdh_virtqueue *vq = txvq->vq;<br />-    uint16_t id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;<br />+    uint16_t id = vq->vq_avail_idx;<br />     struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id];<br />     uint16_t flags = vq->vq_packed.cached_flags;<br />     struct zxdh_net_hdr_dl *hdr = NULL;<br />+    uint8_t hdr_len = vq->hw->dl_net_hdr_len;<br />+    struct zxdh_vring_packed_desc *dp = &vq->vq_packed.ring.desc[id];<br />  <br />     dxp->ndescs = 1;<br />     dxp->cookie = cookie;<br />-    hdr = rte_pktmbuf_mtod_offset(cookie, struct zxdh_net_hdr_dl *, -ZXDH_DL_NET_HDR_SIZE);<br />-    zxdh_xmit_fill_net_hdr(cookie, hdr);<br />-<br />-    uint16_t idx = vq->vq_avail_idx;<br />-    struct zxdh_vring_packed_desc *dp = &vq->vq_packed.ring.desc[idx];<br />+    hdr = rte_pktmbuf_mtod_offset(cookie, struct zxdh_net_hdr_dl *, -hdr_len);<br />+    zxdh_xmit_fill_net_hdr(vq, cookie, hdr);<br />  <br />-    dp->addr = rte_pktmbuf_iova(cookie) - ZXDH_DL_NET_HDR_SIZE;<br />-    dp->len  = cookie->data_len + ZXDH_DL_NET_HDR_SIZE;<br />+    dp->addr = rte_pktmbuf_iova(cookie) - hdr_len;<br />+    dp->len  = cookie->data_len + hdr_len;<br />     dp->id   = id;<br />     if (++vq->vq_avail_idx >= vq->vq_nentries) {<br />         vq->vq_avail_idx -= vq->vq_nentries;<br />         vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />     }<br />     vq->vq_free_cnt--;<br />-    if (!in_order) {<br />-        vq->vq_desc_head_idx = dxp->next;<br />-        if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END)<br />-            vq->vq_desc_tail_idx = ZXDH_VQ_RING_DESC_CHAIN_END;<br />-        }<br />-        zxdh_queue_store_flags_packed(dp, flags, vq->hw->weak_barriers);<br />+    zxdh_queue_store_flags_packed(dp, flags);<br /> }<br />  <br />-static inline void zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,<br />+static inline void<br />+zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,<br />                         struct rte_mbuf *cookie,<br />-                        uint16_t needed,<br />-                        int32_t use_indirect,<br />-                        int32_t in_order)<br />+                        uint16_t needed)<br /> {<br />     struct zxdh_tx_region *txr = txvq->zxdh_net_hdr_mz->addr;<br />     struct zxdh_virtqueue *vq = txvq->vq;<br />-    struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc;<br />-    void *hdr = NULL;<br />+    uint16_t id = vq->vq_avail_idx;<br />+    struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id];<br />     uint16_t head_idx = vq->vq_avail_idx;<br />     uint16_t idx = head_idx;<br />-    uint16_t prev = head_idx;<br />-    uint16_t head_flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0;<br />-    uint16_t seg_num = cookie->nb_segs;<br />-    uint16_t id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;<br />+    struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc;<br />     struct zxdh_vring_packed_desc *head_dp = &vq->vq_packed.ring.desc[idx];<br />-    struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id];<br />+    struct zxdh_net_hdr_dl *hdr = NULL;<br />+<br />+    uint16_t head_flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0;<br />+    uint8_t hdr_len = vq->hw->dl_net_hdr_len;<br />  <br />     dxp->ndescs = needed;<br />     dxp->cookie = cookie;<br />     head_flags |= vq->vq_packed.cached_flags;<br />-    /* if offload disabled, it is not zeroed below, do it now */<br />  <br />-    if (use_indirect) {<br />-        /**<br />-         * setup tx ring slot to point to indirect<br />-         * descriptor list stored in reserved region.<br />-         * the first slot in indirect ring is already<br />-         * preset to point to the header in reserved region<br />-         **/<br />-        start_dp[idx].addr =<br />-            txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);<br />-        start_dp[idx].len  = (seg_num + 1) * sizeof(struct zxdh_vring_packed_desc);<br />-        /* Packed descriptor id needs to be restored when inorder. */<br />-        if (in_order)<br />-            start_dp[idx].id = idx;<br />-<br />-        /* reset flags for indirect desc */<br />-        head_flags = ZXDH_VRING_DESC_F_INDIRECT;<br />-        head_flags |= vq->vq_packed.cached_flags;<br />-        hdr = (void *)&txr[idx].tx_hdr;<br />-        /* loop below will fill in rest of the indirect elements */<br />-        start_dp = txr[idx].tx_packed_indir;<br />-        start_dp->len = ZXDH_DL_NET_HDR_SIZE; /* update actual net or type hdr size */<br />-        idx = 1;<br />-    } else {<br />-        /* setup first tx ring slot to point to header stored in reserved region. */<br />-        start_dp[idx].addr = txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);<br />-        start_dp[idx].len  = ZXDH_DL_NET_HDR_SIZE;<br />-        head_flags |= ZXDH_VRING_DESC_F_NEXT;<br />-        hdr = (void *)&txr[idx].tx_hdr;<br />-        idx++;<br />-        if (idx >= vq->vq_nentries) {<br />-            idx -= vq->vq_nentries;<br />-            vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />-        }<br />+    start_dp[idx].addr = txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);<br />+    start_dp[idx].len  = hdr_len;<br />+    head_flags |= ZXDH_VRING_DESC_F_NEXT;<br />+    hdr = (void *)&txr[idx].tx_hdr;<br />+<br />+    rte_prefetch1(hdr);<br />+    idx++;<br />+    if (idx >= vq->vq_nentries) {<br />+        idx -= vq->vq_nentries;<br />+        vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />     }<br />-    zxdh_xmit_fill_net_hdr(cookie, (struct zxdh_net_hdr_dl *)hdr);<br />+<br />+    zxdh_xmit_fill_net_hdr(vq, cookie, hdr);<br />  <br />     do {<br />         start_dp[idx].addr = rte_pktmbuf_iova(cookie);<br />         start_dp[idx].len  = cookie->data_len;<br />+        start_dp[idx].id = id;<br />         if (likely(idx != head_idx)) {<br />             uint16_t flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0;<br />+<br />             flags |= vq->vq_packed.cached_flags;<br />             start_dp[idx].flags = flags;<br />         }<br />-        prev = idx;<br />+<br />         idx++;<br />         if (idx >= vq->vq_nentries) {<br />             idx -= vq->vq_nentries;<br />             vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />         }<br />     } while ((cookie = cookie->next) != NULL);<br />-    start_dp[prev].id = id;<br />-    if (use_indirect) {<br />-        idx = head_idx;<br />-        if (++idx >= vq->vq_nentries) {<br />-            idx -= vq->vq_nentries;<br />-            vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />-        }<br />-    }<br />+<br />     vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);<br />     vq->vq_avail_idx = idx;<br />-    if (!in_order) {<br />-        vq->vq_desc_head_idx = dxp->next;<br />-        if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END)<br />-            vq->vq_desc_tail_idx = ZXDH_VQ_RING_DESC_CHAIN_END;<br />-    }<br />-    zxdh_queue_store_flags_packed(head_dp, head_flags, vq->hw->weak_barriers);<br />+<br />+    zxdh_queue_store_flags_packed(head_dp, head_flags);<br /> }<br />  <br /> static void<br /> zxdh_update_packet_stats(struct zxdh_virtnet_stats *stats, struct rte_mbuf *mbuf)<br /> {<br />     uint32_t s = mbuf->pkt_len;<br />-    struct rte_ether_addr *ea = NULL;<br />  <br />     stats->bytes += s;<br />-<br />+    #ifdef QUEUE_XSTAT<br />     if (s == 64) {<br />         stats->size_bins[1]++;<br />     } else if (s > 64 && s < 1024) {<br />@@ -438,6 +450,45 @@ zxdh_update_packet_stats(struct zxdh_virtnet_stats *stats, struct rte_mbuf *mbuf<br />         else<br />             stats->multicast++;<br />     }<br />+    #endif<br />+}<br />+<br />+static void<br />+zxdh_xmit_flush(struct zxdh_virtqueue *vq)<br />+{<br />+    uint16_t id       = 0;<br />+    uint16_t curr_id  = 0;<br />+    uint16_t free_cnt = 0;<br />+    uint16_t size     = vq->vq_nentries;<br />+    struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc;<br />+    struct zxdh_vq_desc_extra     *dxp  = NULL;<br />+    uint16_t used_idx = vq->vq_used_cons_idx;<br />+<br />+    /*<br />+     * The function desc_is_used performs a load-acquire operation<br />+     * or calls rte_io_rmb to ensure memory consistency. It waits<br />+     * for a used descriptor in the virtqueue.<br />+     */<br />+    while (desc_is_used(&desc[used_idx], vq)) {<br />+        id = desc[used_idx].id;<br />+        do {<br />+            curr_id = used_idx;<br />+            dxp = &vq->vq_descx[used_idx];<br />+            used_idx += dxp->ndescs;<br />+            free_cnt += dxp->ndescs;<br />+            //num -= dxp->ndescs;<br />+            if (used_idx >= size) {<br />+                used_idx -= size;<br />+                vq->vq_packed.used_wrap_counter ^= 1;<br />+            }<br />+            if (dxp->cookie != NULL) {<br />+                rte_pktmbuf_free(dxp->cookie);<br />+                dxp->cookie = NULL;<br />+            }<br />+        } while (curr_id != id);<br />+    }<br />+    vq->vq_used_cons_idx = used_idx;<br />+    vq->vq_free_cnt += free_cnt;<br /> }<br />  <br /> uint16_t<br />@@ -445,33 +496,23 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt<br /> {<br />     struct zxdh_virtnet_tx *txvq = tx_queue;<br />     struct zxdh_virtqueue  *vq   = txvq->vq;<br />-    struct zxdh_hw    *hw   = vq->hw;<br />     uint16_t nb_tx = 0;<br />  <br />-    bool in_order = zxdh_pci_with_feature(hw, ZXDH_F_IN_ORDER);<br />+    zxdh_xmit_flush(vq);<br />  <br />-    if (nb_pkts > vq->vq_free_cnt)<br />-        zxdh_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt, in_order);<br />     for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {<br />         struct rte_mbuf *txm = tx_pkts[nb_tx];<br />         int32_t can_push     = 0;<br />-        int32_t use_indirect = 0;<br />         int32_t slots        = 0;<br />         int32_t need         = 0;<br />  <br />+        rte_prefetch0(txm);<br />         /* optimize ring usage */<br />-        if ((zxdh_pci_with_feature(hw, ZXDH_F_ANY_LAYOUT) ||<br />-            zxdh_pci_with_feature(hw, ZXDH_F_VERSION_1)) && <br />-            rte_mbuf_refcnt_read(txm) == 1 && <br />+        if (rte_mbuf_refcnt_read(txm) == 1 && <br />             RTE_MBUF_DIRECT(txm) && <br />             txm->nb_segs == 1 && <br />-            rte_pktmbuf_headroom(txm) >= ZXDH_DL_NET_HDR_SIZE && <br />-            rte_is_aligned(rte_pktmbuf_mtod(txm, char *),<br />-            alignof(struct zxdh_net_hdr_dl))) {<br />+            txm->data_off >= ZXDH_DL_NET_HDR_SIZE) {<br />             can_push = 1;<br />-        } else if (zxdh_pci_with_feature(hw, ZXDH_RING_F_INDIRECT_DESC) && <br />-                    txm->nb_segs < ZXDH_MAX_TX_INDIRECT) {<br />-            use_indirect = 1;<br />         }<br />         /**<br />          * How many main ring entries are needed to this Tx?<br />@@ -479,46 +520,50 @@ zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkt<br />          * any_layout => number of segments<br />          * default    => number of segments + 1<br />          **/<br />-        slots = use_indirect ? 1 : (txm->nb_segs + !can_push);<br />+        slots = txm->nb_segs + !can_push;<br />         need = slots - vq->vq_free_cnt;<br />         /* Positive value indicates it need free vring descriptors */<br />         if (unlikely(need > 0)) {<br />-            zxdh_xmit_cleanup_packed(vq, need, in_order);<br />+            zxdh_xmit_cleanup_inorder_packed(vq, need);<br />             need = slots - vq->vq_free_cnt;<br />             if (unlikely(need > 0)) {<br />-                PMD_TX_LOG(ERR, "port[ep:%d, pf:%d, vf:%d, vfid:%d, pcieid:%d], queue:%d[pch:%d]. No free desc to xmit",<br />-                    hw->vport.epid, hw->vport.pfid, hw->vport.vfid,<br />-                    hw->vfid, hw->pcie_id, txvq->queue_id,<br />-                    hw->channel_context[txvq->queue_id].ph_chno);<br />+                PMD_TX_LOG(ERR,<br />+                        " No enough %d free tx descriptors to transmit." <br />+                        "freecnt %d",<br />+                        need,<br />+                        vq->vq_free_cnt);<br />                 break;<br />             }<br />         }<br />-        if (txm->nb_segs > ZXDH_TX_MAX_SEGS) {<br />-            PMD_TX_LOG(ERR, "%d segs dropped", txm->nb_segs);<br />-            txvq->stats.truncated_err += nb_pkts - nb_tx;<br />-            break;<br />-        }<br />+<br />         /* Enqueue Packet buffers */<br />         if (can_push)<br />-            zxdh_enqueue_xmit_packed_fast(txvq, txm, in_order);<br />+            zxdh_enqueue_xmit_packed_fast(txvq, txm);<br />         else<br />-            zxdh_enqueue_xmit_packed(txvq, txm, slots, use_indirect, in_order);<br />+            zxdh_enqueue_xmit_packed(txvq, txm, slots);<br />         zxdh_update_packet_stats(&txvq->stats, txm);<br />     }<br />     txvq->stats.packets += nb_tx;<br />-    if (likely(nb_tx)) {<br />-        if (unlikely(zxdh_queue_kick_prepare_packed(vq))) {<br />-            zxdh_queue_notify(vq);<br />-            PMD_TX_LOG(DEBUG, "Notified backend after xmit");<br />-        }<br />-    }<br />+    if (likely(nb_tx))<br />+        zxdh_queue_notify(vq);<br />     return nb_tx;<br /> }<br />  <br />+static inline int dl_net_hdr_check(struct rte_mbuf *m, struct zxdh_hw *hw)<br />+{<br />+    if ((m->ol_flags & ZXDH_DTPOFFLOAD_MASK) && !hw->has_tx_offload) {<br />+        PMD_TX_LOG(ERR, "port:[%d], vfid[%d]. " <br />+                    "not support tx_offload", hw->port_id, hw->vfid);<br />+        return -EINVAL;<br />+    }<br />+    return 0;<br />+}<br />+<br /> uint16_t zxdh_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts,<br />                 uint16_t nb_pkts)<br /> {<br />     struct zxdh_virtnet_tx *txvq = tx_queue;<br />+    struct zxdh_hw *hw = txvq->vq->hw;<br />     uint16_t nb_tx;<br />  <br />     for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {<br />@@ -544,11 +589,20 @@ uint16_t zxdh_xmit_pkts_prepare(void *tx_queue, struct rte_mbuf **tx_pkts,<br />             rte_errno = ENOMEM;<br />             break;<br />         }<br />+<br />+        error = dl_net_hdr_check(m, hw);<br />+        if (unlikely(error)) {<br />+            rte_errno = ENOTSUP;<br />+            txvq->stats.errors += nb_pkts - nb_tx;<br />+            txvq->stats.offload_cfg_err += nb_pkts - nb_tx;<br />+            break;<br />+        }<br />     }<br />     return nb_tx;<br /> }<br />  <br />-static uint16_t zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,<br />+static uint16_t<br />+zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,<br />                     struct rte_mbuf **rx_pkts,<br />                     uint32_t *len,<br />                     uint16_t num)<br />@@ -575,6 +629,8 @@ static uint16_t zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,<br />                 "vring descriptor with no mbuf cookie at %u", vq->vq_used_cons_idx);<br />             break;<br />         }<br />+        rte_prefetch0(cookie);<br />+        rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));<br />         rx_pkts[i] = cookie;<br />         vq->vq_free_cnt++;<br />         vq->vq_used_cons_idx++;<br />@@ -586,15 +642,107 @@ static uint16_t zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,<br />     return i;<br /> }<br />  <br />-static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *hdr)<br />+static inline void<br />+zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *hdr)<br /> {<br />-    struct zxdh_pd_hdr_ul *pd_hdr = &hdr->pd_hdr;<br />-    struct zxdh_pi_hdr *pi_hdr = &hdr->pi_hdr;<br />+    uint8_t has_pi = (uint64_t)(hdr->type_hdr.pd_len << 1) > ZXDH_UL_NOPI_HDR_SIZE_MAX;<br />+    struct zxdh_pd_hdr_ul *pd_hdr = has_pi ? &hdr->pipd_hdr_ul.pd_hdr : &hdr->pd_hdr;<br />+    uint32_t pkt_flag = ntohl(pd_hdr->pkt_flag);<br />     uint32_t idx = 0;<br />+    uint32_t striped_vlan_tci = rte_be_to_cpu_32(pd_hdr->striped_vlan_tci);<br />+    uint16_t pkt_type_outer = rte_be_to_cpu_16(pd_hdr->pkt_type_out);<br />+    uint16_t pkt_type_inner = rte_be_to_cpu_16(pd_hdr->pkt_type_in);<br />  <br />-    m->pkt_len = rte_be_to_cpu_16(pi_hdr->ul.pkt_len);<br />+    if (unlikely(pkt_flag & (ZXDH_MBUF_F_RX_1588_PTP | ZXDH_MBUF_F_RX_1588_TMST))) {<br />+        if (pkt_flag & ZXDH_MBUF_F_RX_1588_PTP)<br />+            m->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;<br />+        if (pkt_flag & ZXDH_MBUF_F_RX_1588_TMST)<br />+            m->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;<br />+    }<br />  <br />-    uint16_t pkt_type_outer = rte_be_to_cpu_16(pd_hdr->pkt_type_out);<br />+    if (pkt_flag & ZXDH_MBUF_F_RX_VLAN_STRIPPED) {<br />+        m->ol_flags |= (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN);<br />+        m->vlan_tci = (unlikely(pkt_flag & ZXDH_MBUF_F_RX_QINQ))<br />+                ? (striped_vlan_tci >> 16) & ZXDH_VLAN_ID_MASK<br />+                : striped_vlan_tci & ZXDH_VLAN_ID_MASK;<br />+    }<br />+<br />+    if (unlikely(pkt_flag & ZXDH_MBUF_F_RX_QINQ_STRIPPED)) {<br />+        /*<br />+         * When PKT_RX_QINQ_STRIPPED is set and PKT_RX_VLAN_STRIPPED is unset:<br />+         * - Only the outer VLAN is removed from the packet data.<br />+         * - Both TCI values are saved: the inner TCI in mbuf->vlan_tci and<br />+         *   the outer TCI in mbuf->vlan_tci_outer.<br />+         *<br />+         * When PKT_RX_QINQ is set, PKT_RX_VLAN must also be set, and the inner<br />+         * TCI is saved in mbuf->vlan_tci.<br />+         */<br />+        m->ol_flags |= (RTE_MBUF_F_RX_QINQ_STRIPPED | RTE_MBUF_F_RX_QINQ);<br />+        m->ol_flags |= (RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN);<br />+        m->vlan_tci = striped_vlan_tci & ZXDH_VLAN_ID_MASK;<br />+        m->vlan_tci_outer = (striped_vlan_tci >> 16) & ZXDH_VLAN_ID_MASK;<br />+    }<br />+<br />+    /* rss hash/fd handle */<br />+    if (pkt_flag & ZXDH_MBUF_F_RX_RSS_HASH) {<br />+        m->hash.rss = rte_be_to_cpu_32(pd_hdr->rss_hash);<br />+        m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;<br />+    }<br />+    if (pkt_flag & ZXDH_MBUF_F_RX_FDIR) {<br />+        m->ol_flags |= RTE_MBUF_F_RX_FDIR;<br />+        if (pkt_flag & ZXDH_MBUF_F_RX_FDIR_ID) {<br />+            m->hash.fdir.hi = rte_be_to_cpu_32(pd_hdr->fd);<br />+            m->ol_flags |= RTE_MBUF_F_RX_FDIR_ID;<br />+        } else if ((pkt_flag & ZXDH_MBUF_F_RX_FDIR_FLX_MASK) == FELX_4BYTE) {<br />+            m->hash.fdir.hi = rte_be_to_cpu_32(pd_hdr->fd);<br />+            m->ol_flags |= RTE_MBUF_F_RX_FDIR_FLX;<br />+        } else if (((pkt_flag & ZXDH_MBUF_F_RX_FDIR_FLX_MASK) == FELX_8BYTE)) {<br />+            m->hash.fdir.hi = rte_be_to_cpu_32(pd_hdr->rss_hash);<br />+            m->hash.fdir.lo = rte_be_to_cpu_32(pd_hdr->fd);<br />+            m->ol_flags |= RTE_MBUF_F_RX_FDIR_FLX;<br />+        }<br />+    }<br />+    /* checksum handle */<br />+    if (pkt_flag & ZXDH_MBUF_F_RX_OUTER_IP_CKSUM_BAD)<br />+        m->ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;<br />+    if (pkt_flag & ZXDH_MBUF_F_RX_OUTER_L4_CKSUM_GOOD)<br />+        m->ol_flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;<br />+<br />+    if (has_pi) {<br />+        struct zxdh_pi_hdr *pi_hdr = &hdr->pipd_hdr_ul.pi_hdr;<br />+        uint16_t pkt_type_masked = pi_hdr->pkt_type & ZXDH_PCODE_MASK;<br />+        uint16_t err_code = rte_be_to_cpu_16(pi_hdr->ul.err_code);<br />+<br />+        bool is_ip_pkt =<br />+                (pi_hdr->pkt_type == ZXDH_PCODE_IP_PKT_TYPE) ||<br />+                ((pi_hdr->pkt_type & ZXDH_PI_L3TYPE_MASK) == ZXDH_PI_L3TYPE_IP);<br />+<br />+        bool is_l4_pkt =<br />+                (pkt_type_masked == ZXDH_PCODE_UDP_PKT_TYPE) ||<br />+                (pkt_type_masked == ZXDH_PCODE_NO_REASSMBLE_TCP_PKT_TYPE) ||<br />+                (pkt_type_masked == ZXDH_PCODE_TCP_PKT_TYPE);<br />+<br />+        if (is_ip_pkt && (pi_hdr->pkt_flag_hi8 & ZXDH_RX_IP_CKSUM_VERIFY)) {<br />+            if (err_code & ZXDH_IPV4_CSUM_ERR)<br />+                m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;<br />+            else<br />+                m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;<br />+        }<br />+<br />+        if (is_l4_pkt && (pi_hdr->pkt_flag_hi8 & ZXDH_RX_TCPUDP_CKSUM_VERIFY)) {<br />+            if (err_code & (ZXDH_TCP_CSUM_ERR | ZXDH_UDP_CSUM_ERR))<br />+                m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;<br />+            else<br />+                m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;<br />+        }<br />+<br />+        if (ntohl(pi_hdr->ul.lro_flag) & ZXDH_PI_LRO_FALG)<br />+            m->ol_flags |= RTE_MBUF_F_RX_LRO;<br />+<br />+        m->pkt_len = rte_be_to_cpu_16(pi_hdr->ul.pkt_len);<br />+    } else {<br />+        m->pkt_len = rte_be_to_cpu_16(pd_hdr->pkt_len);<br />+    }<br />  <br />     idx = (pkt_type_outer >> 12) & 0xF;<br />     m->packet_type  = zxdh_outer_l2_type[idx];<br />@@ -605,8 +753,6 @@ static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *h<br />     idx = pkt_type_outer         & 0xF;<br />     m->packet_type |= zxdh_tunnel_type[idx];<br />  <br />-    uint16_t pkt_type_inner = rte_be_to_cpu_16(pd_hdr->pkt_type_in);<br />-<br />     if (pkt_type_inner) {<br />         idx = (pkt_type_inner >> 12) & 0xF;<br />         m->packet_type |= zxdh_inner_l2_type[idx];<br />@@ -616,7 +762,6 @@ static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *h<br />         m->packet_type |= zxdh_inner_l4_type[idx];<br />     }<br />  <br />-    return 0;<br /> }<br />  <br /> static void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)<br />@@ -633,92 +778,67 @@ static void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)<br />     }<br /> }<br />  <br />-uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />+uint16_t<br />+zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />                 uint16_t nb_pkts)<br /> {<br />     struct zxdh_virtnet_rx *rxvq = rx_queue;<br />     struct zxdh_virtqueue *vq = rxvq->vq;<br />     struct zxdh_hw *hw = vq->hw;<br />-    struct rte_eth_dev *dev = hw->eth_dev;<br />     struct rte_mbuf *rxm = NULL;<br />     struct rte_mbuf *prev = NULL;<br />-    uint32_t len[ZXDH_MBUF_BURST_SZ] = {0};<br />+    struct zxdh_net_hdr_ul *header = NULL;<br />+    uint32_t lens[ZXDH_MBUF_BURST_SZ] = {0};<br />     struct rte_mbuf *rcv_pkts[ZXDH_MBUF_BURST_SZ] = {NULL};<br />-    uint32_t nb_enqueued = 0;<br />+    uint16_t len = 0;<br />     uint32_t seg_num = 0;<br />     uint32_t seg_res = 0;<br />+    uint32_t error = 0;<br />     uint16_t hdr_size = 0;<br />-    int32_t error = 0;<br />     uint16_t nb_rx = 0;<br />+    uint16_t i;<br />+    uint16_t rcvd_pkt_len = 0;<br />     uint16_t num = nb_pkts;<br />  <br />     if (unlikely(num > ZXDH_MBUF_BURST_SZ))<br />         num = ZXDH_MBUF_BURST_SZ;<br />  <br />-    num = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);<br />-    uint16_t i;<br />-    uint16_t rcvd_pkt_len = 0;<br />+    num = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, lens, num);<br />+    if (num == 0) {<br />+        rxvq->stats.idle++;<br />+        goto refill;<br />+    }<br />  <br />     for (i = 0; i < num; i++) {<br />         rxm = rcv_pkts[i];<br />-        if (unlikely(len[i] < ZXDH_UL_NET_HDR_SIZE)) {<br />-            nb_enqueued++;<br />-            PMD_RX_LOG(ERR, "RX, len:%u err", len[i]);<br />-            zxdh_discard_rxbuf(vq, rxm);<br />-            rxvq->stats.errors++;<br />-            continue;<br />-        }<br />-        struct zxdh_net_hdr_ul *header =<br />-            (struct zxdh_net_hdr_ul *)((char *)rxm->buf_addr +<br />-            RTE_PKTMBUF_HEADROOM);<br />+        rx_pkts[nb_rx] = rxm;<br />+        prev = rxm;<br />+        len = lens[i];<br />+        header = rte_pktmbuf_mtod(rxm, struct zxdh_net_hdr_ul *);<br />  <br />         seg_num  = header->type_hdr.num_buffers;<br />-        if (seg_num == 0) {<br />-            PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);<br />-            seg_num = 1;<br />-        }<br />-        if (seg_num > ZXDH_RX_MAX_SEGS) {<br />-            PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);<br />-            nb_enqueued++;<br />-            zxdh_discard_rxbuf(vq, rxm);<br />-            rxvq->stats.errors++;<br />-            continue;<br />-        }<br />-        /* bit[0:6]-pd_len unit:2B */<br />-        uint16_t pd_len = header->type_hdr.pd_len << 1;<br />-        if (pd_len > ZXDH_PD_HDR_SIZE_MAX || pd_len < ZXDH_PD_HDR_SIZE_MIN) {<br />-            PMD_RX_LOG(ERR, "pd_len:%d is invalid", pd_len);<br />-            nb_enqueued++;<br />-            zxdh_discard_rxbuf(vq, rxm);<br />-            rxvq->stats.errors++;<br />-            continue;<br />-        }<br />+<br />         /* Private queue only handle type hdr */<br />-        hdr_size = pd_len;<br />-        rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size;<br />+        hdr_size = ZXDH_TYPE_HDR_SIZE;<br />+        rxm->pkt_len = ((header->type_hdr.port & 0x7f) << 8) +<br />+                            header->type_hdr.pd_len;<br />+        rxm->data_off += hdr_size;<br />         rxm->nb_segs = seg_num;<br />         rxm->ol_flags = 0;<br />-        rxm->vlan_tci = 0;<br />-        rcvd_pkt_len = (uint32_t)(len[i] - hdr_size);<br />-        rxm->data_len = (uint16_t)(len[i] - hdr_size);<br />+        rcvd_pkt_len = len - hdr_size;<br />+        rxm->data_len = rcvd_pkt_len;<br />         rxm->port = rxvq->port_id;<br />-        rx_pkts[nb_rx] = rxm;<br />-        prev = rxm;<br />+<br />         /* Update rte_mbuf according to pi/pd header */<br />-        if (zxdh_rx_update_mbuf(rxm, header) < 0) {<br />-            zxdh_discard_rxbuf(vq, rxm);<br />-            rxvq->stats.errors++;<br />-            continue;<br />-        }<br />+        zxdh_rx_update_mbuf(rxm, header);<br />         seg_res = seg_num - 1;<br />         /* Merge remaining segments */<br />         while (seg_res != 0 && i < (num - 1)) {<br />             i++;<br />+            len = lens[i];<br />             rxm = rcv_pkts[i];<br />-            rxm->data_off = RTE_PKTMBUF_HEADROOM;<br />-            rxm->data_len = (uint16_t)(len[i]);<br />-<br />-            rcvd_pkt_len += (uint32_t)(len[i]);<br />+            rxm->data_len = len;<br />+            rcvd_pkt_len += len;<br />             prev->next = rxm;<br />             prev = rxm;<br />             rxm->next = NULL;<br />@@ -743,27 +863,26 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />         uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, ZXDH_MBUF_BURST_SZ);<br />         uint16_t extra_idx = 0;<br />  <br />-        rcv_cnt = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, len, rcv_cnt);<br />+        rcv_cnt = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, lens, rcv_cnt);<br />         if (unlikely(rcv_cnt == 0)) {<br />             PMD_RX_LOG(ERR, "No enough segments for packet");<br />             rte_pktmbuf_free(rx_pkts[nb_rx]);<br />             rxvq->stats.errors++;<br />+            rxvq->stats.no_segs_err++;<br />             break;<br />         }<br />         while (extra_idx < rcv_cnt) {<br />             rxm = rcv_pkts[extra_idx];<br />-            rxm->data_off = RTE_PKTMBUF_HEADROOM;<br />-            rxm->pkt_len = (uint32_t)(len[extra_idx]);<br />-            rxm->data_len = (uint16_t)(len[extra_idx]);<br />+            rcvd_pkt_len += (uint16_t)(lens[extra_idx]);<br />+            rxm->data_len = lens[extra_idx];<br />             prev->next = rxm;<br />             prev = rxm;<br />             rxm->next = NULL;<br />-            rcvd_pkt_len += len[extra_idx];<br />             extra_idx += 1;<br />         }<br />         seg_res -= rcv_cnt;<br />         if (!seg_res) {<br />-            if (rcvd_pkt_len != rx_pkts[nb_rx]->pkt_len) {<br />+            if (unlikely(rcvd_pkt_len != rx_pkts[nb_rx]->pkt_len)) {<br />                 PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d",<br />                     rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);<br />                 zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);<br />@@ -777,6 +896,7 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />     }<br />     rxvq->stats.packets += nb_rx;<br />  <br />+refill:<br />     /* Allocate new mbuf for the used descriptor */<br />     if (likely(!zxdh_queue_full(vq))) {<br />         /* free_cnt may include mrg descs */<br />@@ -789,16 +909,14 @@ uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />                 for (i = 0; i < free_cnt; i++)<br />                     rte_pktmbuf_free(new_pkts[i]);<br />             }<br />-            nb_enqueued += free_cnt;<br />+<br />+            if (unlikely(zxdh_queue_kick_prepare_packed(vq)))<br />+                zxdh_queue_notify(vq);<br />         } else {<br />+            struct rte_eth_dev *dev = hw->eth_dev;<br />+<br />             dev->data->rx_mbuf_alloc_failed += free_cnt;<br />         }<br />     }<br />-    if (likely(nb_enqueued)) {<br />-        if (unlikely(zxdh_queue_kick_prepare_packed(vq))) {<br />-            zxdh_queue_notify(vq);<br />-            PMD_RX_LOG(DEBUG, "Notified");<br />-        }<br />-    }<br />     return nb_rx;<br /> }<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h<br />index 79c2a882c3..16ea506517 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.h<br />+++ b/drivers/net/zxdh/zxdh_rxtx.h<br />@@ -10,13 +10,40 @@<br /> #include <rte_common.h> <br /> #include <rte_mbuf_core.h> <br />  <br />+#define ZXDH_PORT_NP     0<br />+#define ZXDH_PORT_DRS    1<br />+#define ZXDH_PORT_DTP    2<br />+<br />+/*PI PKT FLAG */<br />+#define ZXDH_PKT_FORM_CPU                           0x20<br />+#define ZXDH_NO_IP_FRAGMENT                         0x2000<br />+#define ZXDH_NO_IPID_UPDATE                         0x4000<br />+#define ZXDH_TX_IP_CKSUM_CAL                        0x8000<br />+#define ZXDH_RX_IP_CKSUM_VERIFY                     0x01<br />+#define ZXDH_RX_PSEDUO_CKSUM_VALID                  0x02<br />+#define ZXDH_TX_TCPUDP_CKSUM_CAL                    0x04<br />+#define ZXDH_RX_TCPUDP_CKSUM_VERIFY                 0x08<br />+#define ZXDH_NO_TCP_FRAGMENT                        0x10<br />+#define ZXDH_PI_FLAG                                0x20<br />+#define ZXDH_PI_TYPE                                0x40<br />+#define ZXDH_VERSION1                               0x80<br />+#define ZXDH_PI_TYPE_PI                             0x00<br />+#define ZXDH_PI_TYPE_VIRTIO95                       0x40<br />+#define ZXDH_PI_TYPE_VIRTIO11                       0xC0<br />+<br /> struct zxdh_virtnet_stats {<br />     uint64_t packets;<br />     uint64_t bytes;<br />     uint64_t errors;<br />+    uint64_t idle;<br />+    uint64_t full;<br />+    uint64_t norefill;<br />     uint64_t multicast;<br />     uint64_t broadcast;<br />     uint64_t truncated_err;<br />+    uint64_t offload_cfg_err;<br />+    uint64_t invalid_hdr_len_err;<br />+    uint64_t no_segs_err;<br />     uint64_t size_bins[8];<br /> };<br />  <br />--  <br />2.27.0<br />