provided dev simple tx implementations.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/meson.build   |   1 +<br /> drivers/net/zxdh/zxdh_ethdev.c |  22 ++<br /> drivers/net/zxdh/zxdh_queue.h  |  26 ++-<br /> drivers/net/zxdh/zxdh_rxtx.c   | 396 +++++++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_rxtx.h   |   4 +<br /> 5 files changed, 448 insertions(+), 1 deletion(-)<br /> create mode 100644 drivers/net/zxdh/zxdh_rxtx.c<br /> <br />diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build<br />index 5b3af87c5b..20b2cf484a 100644<br />--- a/drivers/net/zxdh/meson.build<br />+++ b/drivers/net/zxdh/meson.build<br />@@ -21,4 +21,5 @@ sources = files(<br />         'zxdh_queue.c',<br />         'zxdh_np.c',<br />         'zxdh_tables.c',<br />+        'zxdh_rxtx.c',<br /> )<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index 6e603b967e..aef77e86a0 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -15,6 +15,7 @@<br /> #include "zxdh_queue.h" <br /> #include "zxdh_np.h" <br /> #include "zxdh_tables.h" <br />+#include "zxdh_rxtx.h" <br />  <br /> struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];<br /> struct zxdh_shared_data *zxdh_shared_data;<br />@@ -956,6 +957,25 @@ zxdh_dev_close(struct rte_eth_dev *dev)<br />     return ret;<br /> }<br />  <br />+static int32_t<br />+zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev)<br />+{<br />+    struct zxdh_hw *hw = eth_dev->data->dev_private;<br />+<br />+    if (!zxdh_pci_packed_queue(hw)) {<br />+        PMD_DRV_LOG(ERR, " port %u not support packed queue", eth_dev->data->port_id);<br />+        return -1;<br />+    }<br />+    if (!zxdh_pci_with_feature(hw, ZXDH_NET_F_MRG_RXBUF)) {<br />+        PMD_DRV_LOG(ERR, " port %u not support rx mergeable", eth_dev->data->port_id);<br />+        return -1;<br />+    }<br />+    eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare;<br />+    eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed;<br />+<br />+    return 0;<br />+}<br />+<br /> static int<br /> zxdh_dev_start(struct rte_eth_dev *dev)<br /> {<br />@@ -971,6 +991,8 @@ zxdh_dev_start(struct rte_eth_dev *dev)<br />         if (ret < 0)<br />             return ret;<br />     }<br />+<br />+    zxdh_set_rxtx_funcs(dev);<br />     ret = zxdh_intr_enable(dev);<br />     if (ret) {<br />         PMD_DRV_LOG(ERR, "interrupt enable failed");<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />index 698062ad62..daabb3530c 100644<br />--- a/drivers/net/zxdh/zxdh_queue.h<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -21,8 +21,15 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };<br /> #define ZXDH_TQ_QUEUE_IDX                 1<br /> #define ZXDH_MAX_TX_INDIRECT              8<br />  <br />+/* This marks a buffer as continuing via the next field. */<br />+#define ZXDH_VRING_DESC_F_NEXT                 1<br />+<br /> /* This marks a buffer as write-only (otherwise read-only). */<br />-#define ZXDH_VRING_DESC_F_WRITE           2<br />+#define ZXDH_VRING_DESC_F_WRITE                2<br />+<br />+/* This means the buffer contains a list of buffer descriptors. */<br />+#define ZXDH_VRING_DESC_F_INDIRECT             4<br />+<br /> /* This flag means the descriptor was made available by the driver */<br /> #define ZXDH_VRING_PACKED_DESC_F_AVAIL   (1 << (7))<br /> #define ZXDH_VRING_PACKED_DESC_F_USED    (1 << (15))<br />@@ -35,11 +42,17 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };<br /> #define ZXDH_RING_EVENT_FLAGS_DISABLE     0x1<br /> #define ZXDH_RING_EVENT_FLAGS_DESC        0x2<br />  <br />+#define ZXDH_RING_F_INDIRECT_DESC         28<br />+<br /> #define ZXDH_VQ_RING_DESC_CHAIN_END       32768<br /> #define ZXDH_QUEUE_DEPTH                  1024<br />  <br /> #define ZXDH_RQ_QUEUE_IDX                 0<br /> #define ZXDH_TQ_QUEUE_IDX                 1<br />+#define ZXDH_TYPE_HDR_SIZE        sizeof(struct zxdh_type_hdr)<br />+#define ZXDH_PI_HDR_SIZE          sizeof(struct zxdh_pi_hdr)<br />+#define ZXDH_DL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_dl)<br />+#define ZXDH_UL_NET_HDR_SIZE      sizeof(struct zxdh_net_hdr_ul)<br />  <br /> /*<br />  * ring descriptors: 16 bytes.<br />@@ -355,6 +368,17 @@ static inline void zxdh_queue_notify(struct zxdh_virtqueue *vq)<br />     ZXDH_VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);<br /> }<br />  <br />+static inline int32_t<br />+zxdh_queue_kick_prepare_packed(struct zxdh_virtqueue *vq)<br />+{<br />+    uint16_t flags = 0;<br />+<br />+    zxdh_mb(vq->hw->weak_barriers);<br />+    flags = vq->vq_packed.ring.device->desc_event_flags;<br />+<br />+    return (flags != ZXDH_RING_EVENT_FLAGS_DISABLE);<br />+}<br />+<br /> struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);<br /> int32_t zxdh_free_queues(struct rte_eth_dev *dev);<br /> int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c<br />new file mode 100644<br />index 0000000000..10034a0e98<br />--- /dev/null<br />+++ b/drivers/net/zxdh/zxdh_rxtx.c<br />@@ -0,0 +1,396 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+#include <stdalign.h> <br />+<br />+#include <rte_net.h> <br />+<br />+#include "zxdh_logs.h" <br />+#include "zxdh_pci.h" <br />+#include "zxdh_queue.h" <br />+<br />+#define ZXDH_PKT_FORM_CPU                     0x20    /* 1-cpu 0-np */<br />+#define ZXDH_NO_IP_FRAGMENT                   0x2000  /* ip fragment flag */<br />+#define ZXDH_NO_IPID_UPDATE                   0x4000  /* ipid update flag */<br />+<br />+#define ZXDH_PI_L3TYPE_IP                     0x00<br />+#define ZXDH_PI_L3TYPE_IPV6                   0x40<br />+#define ZXDH_PI_L3TYPE_NOIP                   0x80<br />+#define ZXDH_PI_L3TYPE_RSV                    0xC0<br />+#define ZXDH_PI_L3TYPE_MASK                   0xC0<br />+<br />+#define ZXDH_PCODE_MASK                       0x1F<br />+#define ZXDH_PCODE_IP_PKT_TYPE                0x01<br />+#define ZXDH_PCODE_TCP_PKT_TYPE               0x02<br />+#define ZXDH_PCODE_UDP_PKT_TYPE               0x03<br />+#define ZXDH_PCODE_NO_IP_PKT_TYPE             0x09<br />+#define ZXDH_PCODE_NO_REASSMBLE_TCP_PKT_TYPE  0x0C<br />+<br />+#define ZXDH_TX_MAX_SEGS                      31<br />+#define ZXDH_RX_MAX_SEGS                      31<br />+<br />+static void<br />+zxdh_xmit_cleanup_inorder_packed(struct zxdh_virtqueue *vq, int32_t num)<br />+{<br />+    uint16_t used_idx = 0;<br />+    uint16_t id       = 0;<br />+    uint16_t curr_id  = 0;<br />+    uint16_t free_cnt = 0;<br />+    uint16_t size     = vq->vq_nentries;<br />+    struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc;<br />+    struct zxdh_vq_desc_extra     *dxp  = NULL;<br />+<br />+    used_idx = vq->vq_used_cons_idx;<br />+    /* desc_is_used has a load-acquire or rte_io_rmb inside<br />+     * and wait for used desc in virtqueue.<br />+     */<br />+    while (num > 0 && zxdh_desc_used(&desc[used_idx], vq)) {<br />+        id = desc[used_idx].id;<br />+        do {<br />+            curr_id = used_idx;<br />+            dxp = &vq->vq_descx[used_idx];<br />+            used_idx += dxp->ndescs;<br />+            free_cnt += dxp->ndescs;<br />+            num -= dxp->ndescs;<br />+            if (used_idx >= size) {<br />+                used_idx -= size;<br />+                vq->vq_packed.used_wrap_counter ^= 1;<br />+            }<br />+            if (dxp->cookie != NULL) {<br />+                rte_pktmbuf_free(dxp->cookie);<br />+                dxp->cookie = NULL;<br />+            }<br />+        } while (curr_id != id);<br />+    }<br />+    vq->vq_used_cons_idx = used_idx;<br />+    vq->vq_free_cnt += free_cnt;<br />+}<br />+<br />+static void<br />+zxdh_ring_free_id_packed(struct zxdh_virtqueue *vq, uint16_t id)<br />+{<br />+    struct zxdh_vq_desc_extra *dxp = NULL;<br />+<br />+    dxp = &vq->vq_descx[id];<br />+    vq->vq_free_cnt += dxp->ndescs;<br />+<br />+    if (vq->vq_desc_tail_idx == ZXDH_VQ_RING_DESC_CHAIN_END)<br />+        vq->vq_desc_head_idx = id;<br />+    else<br />+        vq->vq_descx[vq->vq_desc_tail_idx].next = id;<br />+<br />+    vq->vq_desc_tail_idx = id;<br />+    dxp->next = ZXDH_VQ_RING_DESC_CHAIN_END;<br />+}<br />+<br />+static void<br />+zxdh_xmit_cleanup_normal_packed(struct zxdh_virtqueue *vq, int32_t num)<br />+{<br />+    uint16_t used_idx = 0;<br />+    uint16_t id = 0;<br />+    uint16_t size = vq->vq_nentries;<br />+    struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc;<br />+    struct zxdh_vq_desc_extra *dxp = NULL;<br />+<br />+    used_idx = vq->vq_used_cons_idx;<br />+    /* desc_is_used has a load-acquire or rte_io_rmb inside<br />+     * and wait for used desc in virtqueue.<br />+     */<br />+    while (num-- && zxdh_desc_used(&desc[used_idx], vq)) {<br />+        id = desc[used_idx].id;<br />+        dxp = &vq->vq_descx[id];<br />+        vq->vq_used_cons_idx += dxp->ndescs;<br />+        if (vq->vq_used_cons_idx >= size) {<br />+            vq->vq_used_cons_idx -= size;<br />+            vq->vq_packed.used_wrap_counter ^= 1;<br />+        }<br />+        zxdh_ring_free_id_packed(vq, id);<br />+        if (dxp->cookie != NULL) {<br />+            rte_pktmbuf_free(dxp->cookie);<br />+            dxp->cookie = NULL;<br />+        }<br />+        used_idx = vq->vq_used_cons_idx;<br />+    }<br />+}<br />+<br />+static void<br />+zxdh_xmit_cleanup_packed(struct zxdh_virtqueue *vq, int32_t num, int32_t in_order)<br />+{<br />+    if (in_order)<br />+        zxdh_xmit_cleanup_inorder_packed(vq, num);<br />+    else<br />+        zxdh_xmit_cleanup_normal_packed(vq, num);<br />+}<br />+<br />+static uint8_t<br />+zxdh_xmit_get_ptype(struct rte_mbuf *m)<br />+{<br />+    uint8_t pcode = ZXDH_PCODE_NO_IP_PKT_TYPE;<br />+    uint8_t l3_ptype = ZXDH_PI_L3TYPE_NOIP;<br />+<br />+    if ((m->packet_type & RTE_PTYPE_INNER_L3_MASK) == RTE_PTYPE_INNER_L3_IPV4 ||<br />+            ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && <br />+            (m->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4)) {<br />+        l3_ptype = ZXDH_PI_L3TYPE_IP;<br />+        pcode = ZXDH_PCODE_IP_PKT_TYPE;<br />+    } else if ((m->packet_type & RTE_PTYPE_INNER_L3_MASK) == RTE_PTYPE_INNER_L3_IPV6 ||<br />+            ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && <br />+            (m->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6)) {<br />+        l3_ptype = ZXDH_PI_L3TYPE_IPV6;<br />+        pcode = ZXDH_PCODE_IP_PKT_TYPE;<br />+    } else {<br />+        goto end;<br />+    }<br />+<br />+    if ((m->packet_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_TCP ||<br />+            ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && <br />+            (m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP))<br />+        pcode = ZXDH_PCODE_TCP_PKT_TYPE;<br />+    else if ((m->packet_type & RTE_PTYPE_INNER_L4_MASK) == RTE_PTYPE_INNER_L4_UDP ||<br />+                ((!(m->packet_type & RTE_PTYPE_TUNNEL_MASK)) && <br />+                (m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP))<br />+        pcode = ZXDH_PCODE_UDP_PKT_TYPE;<br />+<br />+end:<br />+    return  l3_ptype | ZXDH_PKT_FORM_CPU | pcode;<br />+}<br />+<br />+static void zxdh_xmit_fill_net_hdr(struct rte_mbuf *cookie,<br />+                struct zxdh_net_hdr_dl *hdr)<br />+{<br />+    uint16_t pkt_flag_lw16 = ZXDH_NO_IPID_UPDATE;<br />+    uint16_t l3_offset;<br />+    uint32_t ol_flag = 0;<br />+<br />+    hdr->pi_hdr.pkt_flag_lw16 = rte_be_to_cpu_16(pkt_flag_lw16);<br />+<br />+    hdr->pi_hdr.pkt_type = zxdh_xmit_get_ptype(cookie);<br />+    l3_offset = ZXDH_DL_NET_HDR_SIZE + cookie->outer_l2_len +<br />+                cookie->outer_l3_len + cookie->l2_len;<br />+    hdr->pi_hdr.l3_offset = rte_be_to_cpu_16(l3_offset);<br />+    hdr->pi_hdr.l4_offset = rte_be_to_cpu_16(l3_offset + cookie->l3_len);<br />+<br />+    hdr->pd_hdr.ol_flag = rte_be_to_cpu_32(ol_flag);<br />+}<br />+<br />+static inline void zxdh_enqueue_xmit_packed_fast(struct zxdh_virtnet_tx *txvq,<br />+                        struct rte_mbuf *cookie, int32_t in_order)<br />+{<br />+    struct zxdh_virtqueue *vq = txvq->vq;<br />+    uint16_t id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;<br />+    struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id];<br />+    uint16_t flags = vq->vq_packed.cached_flags;<br />+    struct zxdh_net_hdr_dl *hdr = NULL;<br />+<br />+    dxp->ndescs = 1;<br />+    dxp->cookie = cookie;<br />+    hdr = rte_pktmbuf_mtod_offset(cookie, struct zxdh_net_hdr_dl *, -ZXDH_DL_NET_HDR_SIZE);<br />+    zxdh_xmit_fill_net_hdr(cookie, hdr);<br />+<br />+    uint16_t idx = vq->vq_avail_idx;<br />+    struct zxdh_vring_packed_desc *dp = &vq->vq_packed.ring.desc[idx];<br />+<br />+    dp->addr = rte_pktmbuf_iova(cookie) - ZXDH_DL_NET_HDR_SIZE;<br />+    dp->len  = cookie->data_len + ZXDH_DL_NET_HDR_SIZE;<br />+    dp->id   = id;<br />+    if (++vq->vq_avail_idx >= vq->vq_nentries) {<br />+        vq->vq_avail_idx -= vq->vq_nentries;<br />+        vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+    }<br />+    vq->vq_free_cnt--;<br />+    if (!in_order) {<br />+        vq->vq_desc_head_idx = dxp->next;<br />+        if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END)<br />+            vq->vq_desc_tail_idx = ZXDH_VQ_RING_DESC_CHAIN_END;<br />+        }<br />+        zxdh_queue_store_flags_packed(dp, flags, vq->hw->weak_barriers);<br />+}<br />+<br />+static inline void zxdh_enqueue_xmit_packed(struct zxdh_virtnet_tx *txvq,<br />+                        struct rte_mbuf *cookie,<br />+                        uint16_t needed,<br />+                        int32_t use_indirect,<br />+                        int32_t in_order)<br />+{<br />+    struct zxdh_tx_region *txr = txvq->zxdh_net_hdr_mz->addr;<br />+    struct zxdh_virtqueue *vq = txvq->vq;<br />+    struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc;<br />+    void *hdr = NULL;<br />+    uint16_t head_idx = vq->vq_avail_idx;<br />+    uint16_t idx = head_idx;<br />+    uint16_t prev = head_idx;<br />+    uint16_t head_flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0;<br />+    uint16_t seg_num = cookie->nb_segs;<br />+    uint16_t id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;<br />+    struct zxdh_vring_packed_desc *head_dp = &vq->vq_packed.ring.desc[idx];<br />+    struct zxdh_vq_desc_extra *dxp = &vq->vq_descx[id];<br />+<br />+    dxp->ndescs = needed;<br />+    dxp->cookie = cookie;<br />+    head_flags |= vq->vq_packed.cached_flags;<br />+    /* if offload disabled, it is not zeroed below, do it now */<br />+<br />+    if (use_indirect) {<br />+        /**<br />+         * setup tx ring slot to point to indirect<br />+         * descriptor list stored in reserved region.<br />+         * the first slot in indirect ring is already<br />+         * preset to point to the header in reserved region<br />+         **/<br />+        start_dp[idx].addr =<br />+            txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);<br />+        start_dp[idx].len  = (seg_num + 1) * sizeof(struct zxdh_vring_packed_desc);<br />+        /* Packed descriptor id needs to be restored when inorder. */<br />+        if (in_order)<br />+            start_dp[idx].id = idx;<br />+<br />+        /* reset flags for indirect desc */<br />+        head_flags = ZXDH_VRING_DESC_F_INDIRECT;<br />+        head_flags |= vq->vq_packed.cached_flags;<br />+        hdr = (void *)&txr[idx].tx_hdr;<br />+        /* loop below will fill in rest of the indirect elements */<br />+        start_dp = txr[idx].tx_packed_indir;<br />+        start_dp->len = ZXDH_DL_NET_HDR_SIZE; /* update actual net or type hdr size */<br />+        idx = 1;<br />+    } else {<br />+        /* setup first tx ring slot to point to header stored in reserved region. */<br />+        start_dp[idx].addr = txvq->zxdh_net_hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);<br />+        start_dp[idx].len  = ZXDH_DL_NET_HDR_SIZE;<br />+        head_flags |= ZXDH_VRING_DESC_F_NEXT;<br />+        hdr = (void *)&txr[idx].tx_hdr;<br />+        idx++;<br />+        if (idx >= vq->vq_nentries) {<br />+            idx -= vq->vq_nentries;<br />+            vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+        }<br />+    }<br />+    zxdh_xmit_fill_net_hdr(cookie, (struct zxdh_net_hdr_dl *)hdr);<br />+<br />+    do {<br />+        start_dp[idx].addr = rte_pktmbuf_iova(cookie);<br />+        start_dp[idx].len  = cookie->data_len;<br />+        if (likely(idx != head_idx)) {<br />+            uint16_t flags = cookie->next ? ZXDH_VRING_DESC_F_NEXT : 0;<br />+            flags |= vq->vq_packed.cached_flags;<br />+            start_dp[idx].flags = flags;<br />+        }<br />+        prev = idx;<br />+        idx++;<br />+        if (idx >= vq->vq_nentries) {<br />+            idx -= vq->vq_nentries;<br />+            vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+        }<br />+    } while ((cookie = cookie->next) != NULL);<br />+    start_dp[prev].id = id;<br />+    if (use_indirect) {<br />+        idx = head_idx;<br />+        if (++idx >= vq->vq_nentries) {<br />+            idx -= vq->vq_nentries;<br />+            vq->vq_packed.cached_flags ^= ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;<br />+        }<br />+    }<br />+    vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);<br />+    vq->vq_avail_idx = idx;<br />+    if (!in_order) {<br />+        vq->vq_desc_head_idx = dxp->next;<br />+        if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END)<br />+            vq->vq_desc_tail_idx = ZXDH_VQ_RING_DESC_CHAIN_END;<br />+    }<br />+    zxdh_queue_store_flags_packed(head_dp, head_flags, vq->hw->weak_barriers);<br />+}<br />+<br />+uint16_t<br />+zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)<br />+{<br />+    struct zxdh_virtnet_tx *txvq = tx_queue;<br />+    struct zxdh_virtqueue  *vq   = txvq->vq;<br />+    struct zxdh_hw    *hw   = vq->hw;<br />+    uint16_t nb_tx = 0;<br />+<br />+    bool in_order = zxdh_pci_with_feature(hw, ZXDH_F_IN_ORDER);<br />+<br />+    if (nb_pkts > vq->vq_free_cnt)<br />+        zxdh_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt, in_order);<br />+    for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {<br />+        struct rte_mbuf *txm = tx_pkts[nb_tx];<br />+        int32_t can_push     = 0;<br />+        int32_t use_indirect = 0;<br />+        int32_t slots        = 0;<br />+        int32_t need         = 0;<br />+<br />+        /* optimize ring usage */<br />+        if ((zxdh_pci_with_feature(hw, ZXDH_F_ANY_LAYOUT) ||<br />+            zxdh_pci_with_feature(hw, ZXDH_F_VERSION_1)) && <br />+            rte_mbuf_refcnt_read(txm) == 1 && <br />+            RTE_MBUF_DIRECT(txm) && <br />+            txm->nb_segs == 1 && <br />+            rte_pktmbuf_headroom(txm) >= ZXDH_DL_NET_HDR_SIZE && <br />+            rte_is_aligned(rte_pktmbuf_mtod(txm, char *),<br />+            alignof(struct zxdh_net_hdr_dl))) {<br />+            can_push = 1;<br />+        } else if (zxdh_pci_with_feature(hw, ZXDH_RING_F_INDIRECT_DESC) && <br />+                    txm->nb_segs < ZXDH_MAX_TX_INDIRECT) {<br />+            use_indirect = 1;<br />+        }<br />+        /**<br />+         * How many main ring entries are needed to this Tx?<br />+         * indirect   => 1<br />+         * any_layout => number of segments<br />+         * default    => number of segments + 1<br />+         **/<br />+        slots = use_indirect ? 1 : (txm->nb_segs + !can_push);<br />+        need = slots - vq->vq_free_cnt;<br />+        /* Positive value indicates it need free vring descriptors */<br />+        if (unlikely(need > 0)) {<br />+            zxdh_xmit_cleanup_packed(vq, need, in_order);<br />+            need = slots - vq->vq_free_cnt;<br />+            if (unlikely(need > 0)) {<br />+                PMD_TX_LOG(ERR, "port[ep:%d, pf:%d, vf:%d, vfid:%d, pcieid:%d], queue:%d[pch:%d]. No free desc to xmit",<br />+                    hw->vport.epid, hw->vport.pfid, hw->vport.vfid,<br />+                    hw->vfid, hw->pcie_id, txvq->queue_id,<br />+                    hw->channel_context[txvq->queue_id].ph_chno);<br />+                break;<br />+            }<br />+        }<br />+        /* Enqueue Packet buffers */<br />+        if (can_push)<br />+            zxdh_enqueue_xmit_packed_fast(txvq, txm, in_order);<br />+        else<br />+            zxdh_enqueue_xmit_packed(txvq, txm, slots, use_indirect, in_order);<br />+    }<br />+    if (likely(nb_tx)) {<br />+        if (unlikely(zxdh_queue_kick_prepare_packed(vq))) {<br />+            zxdh_queue_notify(vq);<br />+            PMD_TX_LOG(DEBUG, "Notified backend after xmit");<br />+        }<br />+    }<br />+    return nb_tx;<br />+}<br />+<br />+uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,<br />+                uint16_t nb_pkts)<br />+{<br />+    uint16_t nb_tx;<br />+<br />+    for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {<br />+        struct rte_mbuf *m = tx_pkts[nb_tx];<br />+        int32_t error;<br />+<br />+#ifdef RTE_LIBRTE_ETHDEV_DEBUG<br />+        error = rte_validate_tx_offload(m);<br />+        if (unlikely(error)) {<br />+            rte_errno = -error;<br />+            break;<br />+        }<br />+#endif<br />+<br />+        error = rte_net_intel_cksum_prepare(m);<br />+        if (unlikely(error)) {<br />+            rte_errno = -error;<br />+            break;<br />+        }<br />+    }<br />+    return nb_tx;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h<br />index 036eeff90a..d22af43ebe 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.h<br />+++ b/drivers/net/zxdh/zxdh_rxtx.h<br />@@ -45,4 +45,8 @@ struct __rte_cache_aligned zxdh_virtnet_tx {<br />     const struct rte_memzone *zxdh_net_hdr_mz;  /* memzone to populate hdr. */<br /> };<br />  <br />+uint16_t zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);<br />+uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,<br />+                uint16_t nb_pkts);<br />+<br /> #endif  /* ZXDH_RXTX_H */<br />--  <br />2.27.0<br />