provided dev simple rx implementations.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> doc/guides/nics/features/zxdh.ini |   1 +<br /> doc/guides/nics/zxdh.rst          |   1 +<br /> drivers/net/zxdh/zxdh_ethdev.c    |   1 +<br /> drivers/net/zxdh/zxdh_rxtx.c      | 313 ++++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_rxtx.h      |   2 +<br /> 5 files changed, 318 insertions(+)<br /> <br />diff --git a/doc/guides/nics/features/zxdh.ini b/doc/guides/nics/features/zxdh.ini<br />index 7b72be5f25..bb44e93fad 100644<br />--- a/doc/guides/nics/features/zxdh.ini<br />+++ b/doc/guides/nics/features/zxdh.ini<br />@@ -9,3 +9,4 @@ x86-64               = Y<br /> ARMv8                = Y<br /> SR-IOV               = Y<br /> Multiprocess aware   = Y<br />+Scattered Rx         = Y<br />diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst<br />index eb970a888f..f42db9c1f1 100644<br />--- a/doc/guides/nics/zxdh.rst<br />+++ b/doc/guides/nics/zxdh.rst<br />@@ -20,6 +20,7 @@ Features of the ZXDH PMD are:<br /> - Multi arch support: x86_64, ARMv8.<br /> - Multiple queues for TX and RX<br /> - SR-IOV VF<br />+- Scattered and gather for TX and RX<br />  <br />  <br /> Driver compilation and testing<br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index aef77e86a0..bc4d2a937b 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -972,6 +972,7 @@ zxdh_set_rxtx_funcs(struct rte_eth_dev *eth_dev)<br />     }<br />     eth_dev->tx_pkt_prepare = zxdh_xmit_pkts_prepare;<br />     eth_dev->tx_pkt_burst = &zxdh_xmit_pkts_packed;<br />+    eth_dev->rx_pkt_burst = &zxdh_recv_pkts_packed;<br />  <br />     return 0;<br /> }<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.c b/drivers/net/zxdh/zxdh_rxtx.c<br />index 10034a0e98..06290d48bb 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.c<br />+++ b/drivers/net/zxdh/zxdh_rxtx.c<br />@@ -31,6 +31,93 @@<br /> #define ZXDH_TX_MAX_SEGS                      31<br /> #define ZXDH_RX_MAX_SEGS                      31<br />  <br />+uint32_t zxdh_outer_l2_type[16] = {<br />+    0,<br />+    RTE_PTYPE_L2_ETHER,<br />+    RTE_PTYPE_L2_ETHER_TIMESYNC,<br />+    RTE_PTYPE_L2_ETHER_ARP,<br />+    RTE_PTYPE_L2_ETHER_LLDP,<br />+    RTE_PTYPE_L2_ETHER_NSH,<br />+    RTE_PTYPE_L2_ETHER_VLAN,<br />+    RTE_PTYPE_L2_ETHER_QINQ,<br />+    RTE_PTYPE_L2_ETHER_PPPOE,<br />+    RTE_PTYPE_L2_ETHER_FCOE,<br />+    RTE_PTYPE_L2_ETHER_MPLS,<br />+};<br />+<br />+uint32_t zxdh_outer_l3_type[16] = {<br />+    0,<br />+    RTE_PTYPE_L3_IPV4,<br />+    RTE_PTYPE_L3_IPV4_EXT,<br />+    RTE_PTYPE_L3_IPV6,<br />+    RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,<br />+    RTE_PTYPE_L3_IPV6_EXT,<br />+    RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,<br />+};<br />+<br />+uint32_t zxdh_outer_l4_type[16] = {<br />+    0,<br />+    RTE_PTYPE_L4_TCP,<br />+    RTE_PTYPE_L4_UDP,<br />+    RTE_PTYPE_L4_FRAG,<br />+    RTE_PTYPE_L4_SCTP,<br />+    RTE_PTYPE_L4_ICMP,<br />+    RTE_PTYPE_L4_NONFRAG,<br />+    RTE_PTYPE_L4_IGMP,<br />+};<br />+<br />+uint32_t zxdh_tunnel_type[16] = {<br />+    0,<br />+    RTE_PTYPE_TUNNEL_IP,<br />+    RTE_PTYPE_TUNNEL_GRE,<br />+    RTE_PTYPE_TUNNEL_VXLAN,<br />+    RTE_PTYPE_TUNNEL_NVGRE,<br />+    RTE_PTYPE_TUNNEL_GENEVE,<br />+    RTE_PTYPE_TUNNEL_GRENAT,<br />+    RTE_PTYPE_TUNNEL_GTPC,<br />+    RTE_PTYPE_TUNNEL_GTPU,<br />+    RTE_PTYPE_TUNNEL_ESP,<br />+    RTE_PTYPE_TUNNEL_L2TP,<br />+    RTE_PTYPE_TUNNEL_VXLAN_GPE,<br />+    RTE_PTYPE_TUNNEL_MPLS_IN_GRE,<br />+    RTE_PTYPE_TUNNEL_MPLS_IN_UDP,<br />+};<br />+<br />+uint32_t zxdh_inner_l2_type[16] = {<br />+    0,<br />+    RTE_PTYPE_INNER_L2_ETHER,<br />+    0,<br />+    0,<br />+    0,<br />+    0,<br />+    RTE_PTYPE_INNER_L2_ETHER_VLAN,<br />+    RTE_PTYPE_INNER_L2_ETHER_QINQ,<br />+    0,<br />+    0,<br />+    0,<br />+};<br />+<br />+uint32_t zxdh_inner_l3_type[16] = {<br />+    0,<br />+    RTE_PTYPE_INNER_L3_IPV4,<br />+    RTE_PTYPE_INNER_L3_IPV4_EXT,<br />+    RTE_PTYPE_INNER_L3_IPV6,<br />+    RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,<br />+    RTE_PTYPE_INNER_L3_IPV6_EXT,<br />+    RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,<br />+};<br />+<br />+uint32_t zxdh_inner_l4_type[16] = {<br />+    0,<br />+    RTE_PTYPE_INNER_L4_TCP,<br />+    RTE_PTYPE_INNER_L4_UDP,<br />+    RTE_PTYPE_INNER_L4_FRAG,<br />+    RTE_PTYPE_INNER_L4_SCTP,<br />+    RTE_PTYPE_INNER_L4_ICMP,<br />+    0,<br />+    0,<br />+};<br />+<br /> static void<br /> zxdh_xmit_cleanup_inorder_packed(struct zxdh_virtqueue *vq, int32_t num)<br /> {<br />@@ -394,3 +481,229 @@ uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **t<br />     }<br />     return nb_tx;<br /> }<br />+<br />+static uint16_t zxdh_dequeue_burst_rx_packed(struct zxdh_virtqueue *vq,<br />+                    struct rte_mbuf **rx_pkts,<br />+                    uint32_t *len,<br />+                    uint16_t num)<br />+{<br />+    struct zxdh_vring_packed_desc *desc = vq->vq_packed.ring.desc;<br />+    struct rte_mbuf *cookie = NULL;<br />+    uint16_t i, used_idx;<br />+    uint16_t id;<br />+<br />+    for (i = 0; i < num; i++) {<br />+        used_idx = vq->vq_used_cons_idx;<br />+        /**<br />+         * desc_is_used has a load-acquire or rte_io_rmb inside<br />+         * and wait for used desc in virtqueue.<br />+         */<br />+        if (!zxdh_desc_used(&desc[used_idx], vq))<br />+            return i;<br />+        len[i] = desc[used_idx].len;<br />+        id = desc[used_idx].id;<br />+        cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;<br />+        vq->vq_descx[id].cookie = NULL;<br />+        if (unlikely(cookie == NULL)) {<br />+            PMD_RX_LOG(ERR,<br />+                "vring descriptor with no mbuf cookie at %u", vq->vq_used_cons_idx);<br />+            break;<br />+        }<br />+        rx_pkts[i] = cookie;<br />+        vq->vq_free_cnt++;<br />+        vq->vq_used_cons_idx++;<br />+        if (vq->vq_used_cons_idx >= vq->vq_nentries) {<br />+            vq->vq_used_cons_idx -= vq->vq_nentries;<br />+            vq->vq_packed.used_wrap_counter ^= 1;<br />+        }<br />+    }<br />+    return i;<br />+}<br />+<br />+static int32_t zxdh_rx_update_mbuf(struct rte_mbuf *m, struct zxdh_net_hdr_ul *hdr)<br />+{<br />+    struct zxdh_pd_hdr_ul *pd_hdr = &hdr->pd_hdr;<br />+    struct zxdh_pi_hdr *pi_hdr = &hdr->pi_hdr;<br />+    uint32_t idx = 0;<br />+<br />+    m->pkt_len = rte_be_to_cpu_16(pi_hdr->ul.pkt_len);<br />+<br />+    uint16_t pkt_type_outer = rte_be_to_cpu_16(pd_hdr->pkt_type_out);<br />+<br />+    idx = (pkt_type_outer >> 12) & 0xF;<br />+    m->packet_type  = zxdh_outer_l2_type[idx];<br />+    idx = (pkt_type_outer >> 8)  & 0xF;<br />+    m->packet_type |= zxdh_outer_l3_type[idx];<br />+    idx = (pkt_type_outer >> 4)  & 0xF;<br />+    m->packet_type |= zxdh_outer_l4_type[idx];<br />+    idx = pkt_type_outer         & 0xF;<br />+    m->packet_type |= zxdh_tunnel_type[idx];<br />+<br />+    uint16_t pkt_type_inner = rte_be_to_cpu_16(pd_hdr->pkt_type_in);<br />+<br />+    if (pkt_type_inner) {<br />+        idx = (pkt_type_inner >> 12) & 0xF;<br />+        m->packet_type |= zxdh_inner_l2_type[idx];<br />+        idx = (pkt_type_inner >> 8)  & 0xF;<br />+        m->packet_type |= zxdh_inner_l3_type[idx];<br />+        idx = (pkt_type_inner >> 4)  & 0xF;<br />+        m->packet_type |= zxdh_inner_l4_type[idx];<br />+    }<br />+<br />+    return 0;<br />+}<br />+<br />+static inline void zxdh_discard_rxbuf(struct zxdh_virtqueue *vq, struct rte_mbuf *m)<br />+{<br />+    int32_t error = 0;<br />+    /*<br />+     * Requeue the discarded mbuf. This should always be<br />+     * successful since it was just dequeued.<br />+     */<br />+    error = zxdh_enqueue_recv_refill_packed(vq, &m, 1);<br />+    if (unlikely(error)) {<br />+        PMD_RX_LOG(ERR, "cannot enqueue discarded mbuf");<br />+        rte_pktmbuf_free(m);<br />+    }<br />+}<br />+<br />+uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />+                uint16_t nb_pkts)<br />+{<br />+    struct zxdh_virtnet_rx *rxvq = rx_queue;<br />+    struct zxdh_virtqueue *vq = rxvq->vq;<br />+    struct zxdh_hw *hw = vq->hw;<br />+    struct rte_eth_dev *dev = hw->eth_dev;<br />+    struct rte_mbuf *rxm = NULL;<br />+    struct rte_mbuf *prev = NULL;<br />+    uint32_t len[ZXDH_MBUF_BURST_SZ] = {0};<br />+    struct rte_mbuf *rcv_pkts[ZXDH_MBUF_BURST_SZ] = {NULL};<br />+    uint32_t nb_enqueued = 0;<br />+    uint32_t seg_num = 0;<br />+    uint32_t seg_res = 0;<br />+    uint16_t hdr_size = 0;<br />+    int32_t error = 0;<br />+    uint16_t nb_rx = 0;<br />+    uint16_t num = nb_pkts;<br />+<br />+    if (unlikely(num > ZXDH_MBUF_BURST_SZ))<br />+        num = ZXDH_MBUF_BURST_SZ;<br />+<br />+    num = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);<br />+    uint16_t i;<br />+    uint16_t rcvd_pkt_len = 0;<br />+<br />+    for (i = 0; i < num; i++) {<br />+        rxm = rcv_pkts[i];<br />+<br />+        struct zxdh_net_hdr_ul *header =<br />+            (struct zxdh_net_hdr_ul *)((char *)rxm->buf_addr +<br />+            RTE_PKTMBUF_HEADROOM);<br />+<br />+        seg_num  = header->type_hdr.num_buffers;<br />+        if (seg_num == 0) {<br />+            PMD_RX_LOG(ERR, "dequeue %d pkt, No.%d pkt seg_num is %d", num, i, seg_num);<br />+            seg_num = 1;<br />+        }<br />+        /* bit[0:6]-pd_len unit:2B */<br />+        uint16_t pd_len = header->type_hdr.pd_len << 1;<br />+        /* Private queue only handle type hdr */<br />+        hdr_size = pd_len;<br />+        rxm->data_off = RTE_PKTMBUF_HEADROOM + hdr_size;<br />+        rxm->nb_segs = seg_num;<br />+        rxm->ol_flags = 0;<br />+        rxm->vlan_tci = 0;<br />+        rcvd_pkt_len = (uint32_t)(len[i] - hdr_size);<br />+        rxm->data_len = (uint16_t)(len[i] - hdr_size);<br />+        rxm->port = rxvq->port_id;<br />+        rx_pkts[nb_rx] = rxm;<br />+        prev = rxm;<br />+        /* Update rte_mbuf according to pi/pd header */<br />+        if (zxdh_rx_update_mbuf(rxm, header) < 0) {<br />+            zxdh_discard_rxbuf(vq, rxm);<br />+            continue;<br />+        }<br />+        seg_res = seg_num - 1;<br />+        /* Merge remaining segments */<br />+        while (seg_res != 0 && i < (num - 1)) {<br />+            i++;<br />+            rxm = rcv_pkts[i];<br />+            rxm->data_off = RTE_PKTMBUF_HEADROOM;<br />+            rxm->data_len = (uint16_t)(len[i]);<br />+<br />+            rcvd_pkt_len += (uint32_t)(len[i]);<br />+            prev->next = rxm;<br />+            prev = rxm;<br />+            rxm->next = NULL;<br />+            seg_res -= 1;<br />+        }<br />+<br />+        if (!seg_res) {<br />+            if (rcvd_pkt_len != rx_pkts[nb_rx]->pkt_len) {<br />+                PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.",<br />+                    rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);<br />+                zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);<br />+                continue;<br />+            }<br />+            nb_rx++;<br />+        }<br />+    }<br />+    /* Last packet still need merge segments */<br />+    while (seg_res != 0) {<br />+        uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, ZXDH_MBUF_BURST_SZ);<br />+        uint16_t extra_idx = 0;<br />+<br />+        rcv_cnt = zxdh_dequeue_burst_rx_packed(vq, rcv_pkts, len, rcv_cnt);<br />+        if (unlikely(rcv_cnt == 0)) {<br />+            PMD_RX_LOG(ERR, "No enough segments for packet.");<br />+            rte_pktmbuf_free(rx_pkts[nb_rx]);<br />+            break;<br />+        }<br />+        while (extra_idx < rcv_cnt) {<br />+            rxm = rcv_pkts[extra_idx];<br />+            rxm->data_off = RTE_PKTMBUF_HEADROOM;<br />+            rxm->pkt_len = (uint32_t)(len[extra_idx]);<br />+            rxm->data_len = (uint16_t)(len[extra_idx]);<br />+            prev->next = rxm;<br />+            prev = rxm;<br />+            rxm->next = NULL;<br />+            rcvd_pkt_len += len[extra_idx];<br />+            extra_idx += 1;<br />+        }<br />+        seg_res -= rcv_cnt;<br />+        if (!seg_res) {<br />+            if (rcvd_pkt_len != rx_pkts[nb_rx]->pkt_len) {<br />+                PMD_RX_LOG(ERR, "dropped rcvd_pkt_len %d pktlen %d.",<br />+                    rcvd_pkt_len, rx_pkts[nb_rx]->pkt_len);<br />+                zxdh_discard_rxbuf(vq, rx_pkts[nb_rx]);<br />+                continue;<br />+            }<br />+            nb_rx++;<br />+        }<br />+    }<br />+<br />+    /* Allocate new mbuf for the used descriptor */<br />+    if (likely(!zxdh_queue_full(vq))) {<br />+        /* free_cnt may include mrg descs */<br />+        uint16_t free_cnt = vq->vq_free_cnt;<br />+        struct rte_mbuf *new_pkts[free_cnt];<br />+<br />+        if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {<br />+            error = zxdh_enqueue_recv_refill_packed(vq, new_pkts, free_cnt);<br />+            if (unlikely(error)) {<br />+                for (i = 0; i < free_cnt; i++)<br />+                    rte_pktmbuf_free(new_pkts[i]);<br />+            }<br />+            nb_enqueued += free_cnt;<br />+        } else {<br />+            dev->data->rx_mbuf_alloc_failed += free_cnt;<br />+        }<br />+    }<br />+    if (likely(nb_enqueued)) {<br />+        if (unlikely(zxdh_queue_kick_prepare_packed(vq))) {<br />+            zxdh_queue_notify(vq);<br />+            PMD_RX_LOG(DEBUG, "Notified");<br />+        }<br />+    }<br />+    return nb_rx;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h<br />index d22af43ebe..79c2a882c3 100644<br />--- a/drivers/net/zxdh/zxdh_rxtx.h<br />+++ b/drivers/net/zxdh/zxdh_rxtx.h<br />@@ -48,5 +48,7 @@ struct __rte_cache_aligned zxdh_virtnet_tx {<br /> uint16_t zxdh_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);<br /> uint16_t zxdh_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,<br />                 uint16_t nb_pkts);<br />+uint16_t zxdh_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,<br />+                uint16_t nb_pkts);<br />  <br /> #endif  /* ZXDH_RXTX_H */<br />--  <br />2.27.0<br />