rx/tx queue setup and intr enable implementations.<br /> <br />Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn> <br />---<br /> drivers/net/zxdh/zxdh_ethdev.c |   4 +<br /> drivers/net/zxdh/zxdh_queue.c  | 149 +++++++++++++++++++++++++++++++++<br /> drivers/net/zxdh/zxdh_queue.h  |  33 ++++++++<br /> 3 files changed, 186 insertions(+)<br /> <br />diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c<br />index 717a1d2b0b..521d7ed433 100644<br />--- a/drivers/net/zxdh/zxdh_ethdev.c<br />+++ b/drivers/net/zxdh/zxdh_ethdev.c<br />@@ -933,6 +933,10 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {<br />     .dev_configure             = zxdh_dev_configure,<br />     .dev_close                 = zxdh_dev_close,<br />     .dev_infos_get             = zxdh_dev_infos_get,<br />+    .rx_queue_setup             = zxdh_dev_rx_queue_setup,<br />+    .tx_queue_setup             = zxdh_dev_tx_queue_setup,<br />+    .rx_queue_intr_enable     = zxdh_dev_rx_queue_intr_enable,<br />+    .rx_queue_intr_disable     = zxdh_dev_rx_queue_intr_disable,<br /> };<br />  <br /> static int32_t<br />diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c<br />index b4ef90ea36..af21f046ad 100644<br />--- a/drivers/net/zxdh/zxdh_queue.c<br />+++ b/drivers/net/zxdh/zxdh_queue.c<br />@@ -12,6 +12,11 @@<br /> #include "zxdh_common.h" <br /> #include "zxdh_msg.h" <br />  <br />+#define ZXDH_MBUF_MIN_SIZE       sizeof(struct zxdh_net_hdr_dl)<br />+#define ZXDH_MBUF_SIZE_4K             4096<br />+#define ZXDH_RX_FREE_THRESH           32<br />+#define ZXDH_TX_FREE_THRESH           32<br />+<br /> struct rte_mbuf *<br /> zxdh_queue_detach_unused(struct zxdh_virtqueue *vq)<br /> {<br />@@ -125,3 +130,147 @@ zxdh_free_queues(struct rte_eth_dev *dev)<br />  <br />     return 0;<br /> }<br />+<br />+static int<br />+zxdh_check_mempool(struct rte_mempool *mp, uint16_t offset, uint16_t min_length)<br />+{<br />+    uint16_t data_room_size;<br />+<br />+    if (mp == NULL)<br />+        return -EINVAL;<br />+    data_room_size = rte_pktmbuf_data_room_size(mp);<br />+    if (data_room_size < offset + min_length) {<br />+        PMD_RX_LOG(ERR,<br />+                   "%s mbuf_data_room_size %u < %u (%u + %u)",<br />+                   mp->name, data_room_size,<br />+                   offset + min_length, offset, min_length);<br />+        return -EINVAL;<br />+    }<br />+    return 0;<br />+}<br />+<br />+int32_t<br />+zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,<br />+            uint16_t queue_idx,<br />+            uint16_t nb_desc,<br />+            uint32_t socket_id __rte_unused,<br />+            const struct rte_eth_rxconf *rx_conf,<br />+            struct rte_mempool *mp)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_RQ_QUEUE_IDX;<br />+    struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx];<br />+    int32_t ret = 0;<br />+<br />+    if (rx_conf->rx_deferred_start) {<br />+        PMD_RX_LOG(ERR, "Rx deferred start is not supported");<br />+        return -EINVAL;<br />+    }<br />+    uint16_t rx_free_thresh = rx_conf->rx_free_thresh;<br />+<br />+    if (rx_free_thresh == 0)<br />+        rx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_RX_FREE_THRESH);<br />+<br />+    /* rx_free_thresh must be multiples of four. */<br />+    if (rx_free_thresh & 0x3) {<br />+        PMD_RX_LOG(ERR, "(rx_free_thresh=%u port=%u queue=%u)",<br />+            rx_free_thresh, dev->data->port_id, queue_idx);<br />+        return -EINVAL;<br />+    }<br />+    /* rx_free_thresh must be less than the number of RX entries */<br />+    if (rx_free_thresh >= vq->vq_nentries) {<br />+        PMD_RX_LOG(ERR, "RX entries (%u). (rx_free_thresh=%u port=%u queue=%u)",<br />+            vq->vq_nentries, rx_free_thresh, dev->data->port_id, queue_idx);<br />+        return -EINVAL;<br />+    }<br />+    vq->vq_free_thresh = rx_free_thresh;<br />+    nb_desc = ZXDH_QUEUE_DEPTH;<br />+<br />+    vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);<br />+    struct zxdh_virtnet_rx *rxvq = &vq->rxq;<br />+<br />+    rxvq->queue_id = vtpci_logic_qidx;<br />+<br />+    int mbuf_min_size  = ZXDH_MBUF_MIN_SIZE;<br />+<br />+    if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)<br />+        mbuf_min_size = ZXDH_MBUF_SIZE_4K;<br />+<br />+    ret = zxdh_check_mempool(mp, RTE_PKTMBUF_HEADROOM, mbuf_min_size);<br />+    if (ret != 0) {<br />+        PMD_RX_LOG(ERR,<br />+            "rxq setup but mpool size too small(<%d) failed", mbuf_min_size);<br />+        return -EINVAL;<br />+    }<br />+    rxvq->mpool = mp;<br />+    if (queue_idx < dev->data->nb_rx_queues)<br />+        dev->data->rx_queues[queue_idx] = rxvq;<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t<br />+zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev,<br />+            uint16_t queue_idx,<br />+            uint16_t nb_desc,<br />+            uint32_t socket_id __rte_unused,<br />+            const struct rte_eth_txconf *tx_conf)<br />+{<br />+    uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_TQ_QUEUE_IDX;<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx];<br />+    struct zxdh_virtnet_tx *txvq = NULL;<br />+    uint16_t tx_free_thresh = 0;<br />+<br />+    if (tx_conf->tx_deferred_start) {<br />+        PMD_TX_LOG(ERR, "Tx deferred start is not supported");<br />+        return -EINVAL;<br />+    }<br />+<br />+    nb_desc = ZXDH_QUEUE_DEPTH;<br />+<br />+    vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);<br />+<br />+    txvq = &vq->txq;<br />+    txvq->queue_id = vtpci_logic_qidx;<br />+<br />+    tx_free_thresh = tx_conf->tx_free_thresh;<br />+    if (tx_free_thresh == 0)<br />+        tx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_TX_FREE_THRESH);<br />+<br />+    /* tx_free_thresh must be less than the number of TX entries minus 3 */<br />+    if (tx_free_thresh >= (vq->vq_nentries - 3)) {<br />+        PMD_TX_LOG(ERR, "TX entries - 3 (%u). (tx_free_thresh=%u port=%u queue=%u)",<br />+                vq->vq_nentries - 3, tx_free_thresh, dev->data->port_id, queue_idx);<br />+        return -EINVAL;<br />+    }<br />+<br />+    vq->vq_free_thresh = tx_free_thresh;<br />+<br />+    if (queue_idx < dev->data->nb_tx_queues)<br />+        dev->data->tx_queues[queue_idx] = txvq;<br />+<br />+    return 0;<br />+}<br />+<br />+int32_t<br />+zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)<br />+{<br />+    struct zxdh_hw *hw = dev->data->dev_private;<br />+    struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[queue_id];<br />+    struct zxdh_virtqueue *vq = rxvq->vq;<br />+<br />+    zxdh_queue_enable_intr(vq);<br />+    zxdh_mb(hw->weak_barriers);<br />+    return 0;<br />+}<br />+<br />+int32_t<br />+zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)<br />+{<br />+    struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[queue_id];<br />+    struct zxdh_virtqueue  *vq    = rxvq->vq;<br />+<br />+    zxdh_queue_disable_intr(vq);<br />+    return 0;<br />+}<br />diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h<br />index f4b4c4cd02..d3fa8b1dd3 100644<br />--- a/drivers/net/zxdh/zxdh_queue.h<br />+++ b/drivers/net/zxdh/zxdh_queue.h<br />@@ -8,6 +8,7 @@<br /> #include <stdint.h> <br />  <br /> #include <rte_common.h> <br />+#include <rte_atomic.h> <br />  <br /> #include "zxdh_ethdev.h" <br /> #include "zxdh_rxtx.h" <br />@@ -30,6 +31,7 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };<br /> #define ZXDH_RING_EVENT_FLAGS_DESC        0x2<br />  <br /> #define ZXDH_VQ_RING_DESC_CHAIN_END       32768<br />+#define ZXDH_QUEUE_DEPTH                  1024<br />  <br /> /*<br />  * ring descriptors: 16 bytes.<br />@@ -270,8 +272,39 @@ zxdh_queue_disable_intr(struct zxdh_virtqueue *vq)<br />     }<br /> }<br />  <br />+static inline void<br />+zxdh_queue_enable_intr(struct zxdh_virtqueue *vq)<br />+{<br />+    if (vq->vq_packed.event_flags_shadow == ZXDH_RING_EVENT_FLAGS_DISABLE) {<br />+        vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;<br />+        vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow;<br />+    }<br />+}<br />+<br />+static inline void<br />+zxdh_mb(uint8_t weak_barriers)<br />+{<br />+    if (weak_barriers)<br />+        rte_atomic_thread_fence(rte_memory_order_seq_cst);<br />+    else<br />+        rte_mb();<br />+}<br />+<br /> struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);<br /> int32_t zxdh_free_queues(struct rte_eth_dev *dev);<br /> int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);<br />+int32_t zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev,<br />+            uint16_t queue_idx,<br />+            uint16_t nb_desc,<br />+            uint32_t socket_id __rte_unused,<br />+            const struct rte_eth_txconf *tx_conf);<br />+int32_t zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,<br />+            uint16_t queue_idx,<br />+            uint16_t nb_desc,<br />+            uint32_t socket_id __rte_unused,<br />+            const struct rte_eth_rxconf *rx_conf,<br />+            struct rte_mempool *mp);<br />+int32_t zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);<br />+int32_t zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);<br />  <br /> #endif /* ZXDH_QUEUE_H */<br />--  <br />2.27.0<br />