Add rawdev dequeue operation for gdtc devices.<br /> <br />Signed-off-by: Yong Zhang <zhang.yong25@zte.com.cn> <br />---<br /> drivers/raw/gdtc/gdtc_rawdev.c | 168 ++++++++++++++++++++++++++++-----<br /> 1 file changed, 147 insertions(+), 21 deletions(-)<br /> <br />diff --git a/drivers/raw/gdtc/gdtc_rawdev.c b/drivers/raw/gdtc/gdtc_rawdev.c<br />index f58b034e58..bc94bad827 100644<br />--- a/drivers/raw/gdtc/gdtc_rawdev.c<br />+++ b/drivers/raw/gdtc/gdtc_rawdev.c<br />@@ -88,6 +88,8 @@<br /> #define LOW32_MASK                              0xffffffff<br /> #define LOW16_MASK                              0xffff<br />  <br />+#define ZXDH_GDMA_TC_CNT_MAX                    0x10000<br />+<br /> #define IDX_TO_ADDR(addr, idx, t) \<br />     ((t)((uintptr_t)(addr) + (idx) * sizeof(struct zxdh_gdma_buff_desc)))<br />  <br />@@ -116,6 +118,19 @@ zxdh_gdma_get_queue(struct rte_rawdev *dev, uint16_t queue_id)<br />     return &(gdmadev->vqs[queue_id]);<br /> }<br />  <br />+static uint32_t<br />+zxdh_gdma_read_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset)<br />+{<br />+    struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);<br />+    uint32_t addr = 0;<br />+    uint32_t val = 0;<br />+<br />+    addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;<br />+    val = *(uint32_t *)(gdmadev->base_addr + addr);<br />+<br />+    return val;<br />+}<br />+<br /> static void<br /> zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset, uint32_t val)<br /> {<br />@@ -264,11 +279,11 @@ zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,<br />  <br />         if (rbp->svfid != 0)<br />             src_user |= (ZXDH_GDMA_VF_EN |<br />-                         ((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />+                    ((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />  <br />         ZXDH_PMD_LOG(DEBUG, "rxq->qidx:%d setup src_user(ep:%d pf:%d vf:%d) success",<br />-                    queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,<br />-                    (uint8_t)rbp->svfid);<br />+                queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,<br />+                (uint8_t)rbp->svfid);<br />     } else if ((rbp->srbp == 0) && (rbp->drbp != 0)) {<br />         is_txq = 1;<br />         src_user = ZXDH_GDMA_ZF_USER;<br />@@ -277,11 +292,11 @@ zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,<br />  <br />         if (rbp->dvfid != 0)<br />             dst_user |= (ZXDH_GDMA_VF_EN |<br />-                         ((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />+                    ((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />  <br />         ZXDH_PMD_LOG(DEBUG, "txq->qidx:%d setup dst_user(ep:%d pf:%d vf:%d) success",<br />-                    queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,<br />-                    (uint8_t)rbp->dvfid);<br />+                queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,<br />+                (uint8_t)rbp->dvfid);<br />     } else {<br />         ZXDH_PMD_LOG(ERR, "Failed to setup queue, srbp/drbp is invalid");<br />         return -EINVAL;<br />@@ -353,7 +368,7 @@ zxdh_gdma_user_get(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)<br />  <br />     if ((job->flags & ZXDH_GDMA_JOB_DIR_MASK) == 0) {<br />         ZXDH_PMD_LOG(DEBUG, "job flags:0x%x default user:0x%x",<br />-                            job->flags, queue->user);<br />+                job->flags, queue->user);<br />         return queue->user;<br />     } else if ((job->flags & ZXDH_GDMA_JOB_DIR_TX) != 0) {<br />         src_user = ZXDH_GDMA_ZF_USER;<br />@@ -362,7 +377,7 @@ zxdh_gdma_user_get(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)<br />  <br />         if (job->vf_id != 0)<br />             dst_user |= (ZXDH_GDMA_VF_EN |<br />-                         ((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />+                    ((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />     } else {<br />         dst_user = ZXDH_GDMA_ZF_USER;<br />         src_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |<br />@@ -370,11 +385,11 @@ zxdh_gdma_user_get(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)<br />  <br />         if (job->vf_id != 0)<br />             src_user |= (ZXDH_GDMA_VF_EN |<br />-                         ((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />+                    ((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />     }<br />     ZXDH_PMD_LOG(DEBUG, "job flags:0x%x ep_id:%u, pf_id:%u, vf_id:%u, user:0x%x",<br />-                        job->flags, job->ep_id, job->pf_id, job->vf_id,<br />-                        (src_user & LOW16_MASK) | (dst_user << 16));<br />+            job->flags, job->ep_id, job->pf_id, job->vf_id,<br />+            (src_user & LOW16_MASK) | (dst_user << 16));<br />  <br />     return (src_user & LOW16_MASK) | (dst_user << 16);<br /> }<br />@@ -395,8 +410,8 @@ zxdh_gdma_fill_bd(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)<br />     if (job != NULL) {<br />         zxdh_gdma_control_cal(&val, 1);<br />         next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem,<br />-                            (avail_idx + 1) % ZXDH_GDMA_RING_SIZE,<br />-                            uint64_t);<br />+                        (avail_idx + 1) % ZXDH_GDMA_RING_SIZE,<br />+                        uint64_t);<br />         bd->SrcAddr_L  = job->src & LOW32_MASK;<br />         bd->DstAddr_L  = job->dest & LOW32_MASK;<br />         bd->SrcAddr_H  = (job->src >> 32) & LOW32_MASK;<br />@@ -473,8 +488,8 @@ zxdh_gdma_rawdev_enqueue_bufs(struct rte_rawdev *dev,<br />     free_cnt = queue->sw_ring.free_cnt;<br />     if (free_cnt == 0) {<br />         ZXDH_PMD_LOG(ERR, "queue %u is full, enq_idx:%u deq_idx:%u used_idx:%u",<br />-                           queue_id, queue->sw_ring.enq_idx,<br />-                           queue->sw_ring.deq_idx, queue->sw_ring.used_idx);<br />+                queue_id, queue->sw_ring.enq_idx,<br />+                queue->sw_ring.deq_idx, queue->sw_ring.used_idx);<br />         return 0;<br />     } else if (free_cnt < count) {<br />         ZXDH_PMD_LOG(DEBUG, "job num %u > free_cnt, change to %u", count, free_cnt);<br />@@ -519,6 +534,116 @@ zxdh_gdma_rawdev_enqueue_bufs(struct rte_rawdev *dev,<br />  <br />     return count;<br /> }<br />+<br />+static inline void<br />+zxdh_gdma_used_idx_update(struct zxdh_gdma_queue *queue, uint16_t cnt, uint8_t data_bd_err)<br />+{<br />+    uint16_t idx = 0;<br />+<br />+    if (queue->sw_ring.used_idx + cnt < queue->queue_size)<br />+        queue->sw_ring.used_idx += cnt;<br />+    else<br />+        queue->sw_ring.used_idx = queue->sw_ring.used_idx + cnt - queue->queue_size;<br />+<br />+    if (data_bd_err == 1) {<br />+        /* Update job status, the last job status is error */<br />+        if (queue->sw_ring.used_idx == 0)<br />+            idx = queue->queue_size - 1;<br />+        else<br />+            idx = queue->sw_ring.used_idx - 1;<br />+<br />+        queue->sw_ring.job[idx]->status = 1;<br />+    }<br />+}<br />+<br />+static int<br />+zxdh_gdma_rawdev_dequeue_bufs(struct rte_rawdev *dev,<br />+                __rte_unused struct rte_rawdev_buf **buffers,<br />+                uint32_t count,<br />+                rte_rawdev_obj_t context)<br />+{<br />+    struct zxdh_gdma_queue *queue = NULL;<br />+    struct zxdh_gdma_enqdeq *e_context = NULL;<br />+    uint16_t queue_id = 0;<br />+    uint32_t val = 0;<br />+    uint16_t tc_cnt = 0;<br />+    uint16_t diff_cnt = 0;<br />+    uint16_t i = 0;<br />+    uint16_t bd_idx = 0;<br />+    uint64_t next_bd_addr = 0;<br />+    uint8_t data_bd_err = 0;<br />+<br />+    if ((dev == NULL) || (context == NULL))<br />+        return -EINVAL;<br />+<br />+    e_context = (struct zxdh_gdma_enqdeq *)context;<br />+    queue_id = e_context->vq_id;<br />+    queue = zxdh_gdma_get_queue(dev, queue_id);<br />+    if ((queue == NULL) || (queue->enable == 0))<br />+        return -EINVAL;<br />+<br />+    if (queue->sw_ring.pend_cnt == 0)<br />+        goto deq_job;<br />+<br />+    /* Get data transmit count */<br />+    val = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET);<br />+    tc_cnt = val & LOW16_MASK;<br />+    if (tc_cnt >= queue->tc_cnt)<br />+        diff_cnt = tc_cnt - queue->tc_cnt;<br />+    else<br />+        diff_cnt = tc_cnt + ZXDH_GDMA_TC_CNT_MAX - queue->tc_cnt;<br />+<br />+    queue->tc_cnt = tc_cnt;<br />+<br />+    /* Data transmit error, channel stopped */<br />+    if ((val & ZXDH_GDMA_ERR_STATUS) != 0) {<br />+        next_bd_addr  = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET);<br />+        next_bd_addr |= ((uint64_t)zxdh_gdma_read_reg(dev, queue_id,<br />+                            ZXDH_GDMA_LLI_H_OFFSET) << 32);<br />+        next_bd_addr  = next_bd_addr << 6;<br />+        bd_idx = (next_bd_addr - queue->ring.ring_mem) / sizeof(struct zxdh_gdma_buff_desc);<br />+        if ((val & ZXDH_GDMA_SRC_DATA_ERR) || (val & ZXDH_GDMA_DST_ADDR_ERR)) {<br />+            diff_cnt++;<br />+            data_bd_err = 1;<br />+        }<br />+        ZXDH_PMD_LOG(INFO, "queue%d is err(0x%x) next_bd_idx:%u ll_addr:0x%"PRIx64" def user:0x%x",<br />+                queue_id, val, bd_idx, next_bd_addr, queue->user);<br />+<br />+        ZXDH_PMD_LOG(INFO, "Clean up error status");<br />+        val = ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_ERR_INTR_ENABLE;<br />+        zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET, val);<br />+<br />+        ZXDH_PMD_LOG(INFO, "Restart channel");<br />+        zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);<br />+        zxdh_gdma_control_cal(&val, 0);<br />+        zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);<br />+    }<br />+<br />+    if (diff_cnt != 0) {<br />+        zxdh_gdma_used_idx_update(queue, diff_cnt, data_bd_err);<br />+        queue->sw_ring.deq_cnt += diff_cnt;<br />+        queue->sw_ring.pend_cnt -= diff_cnt;<br />+    }<br />+<br />+deq_job:<br />+    if (queue->sw_ring.deq_cnt == 0)<br />+        return 0;<br />+    else if (queue->sw_ring.deq_cnt < count)<br />+        count = queue->sw_ring.deq_cnt;<br />+<br />+    queue->sw_ring.deq_cnt -= count;<br />+<br />+    for (i = 0; i < count; i++) {<br />+        e_context->job[i] = queue->sw_ring.job[queue->sw_ring.deq_idx];<br />+        queue->sw_ring.job[queue->sw_ring.deq_idx] = NULL;<br />+        if (++queue->sw_ring.deq_idx >= queue->queue_size)<br />+            queue->sw_ring.deq_idx -= queue->queue_size;<br />+    }<br />+    queue->sw_ring.free_cnt += count;<br />+<br />+    return count;<br />+}<br />+<br /> static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {<br />     .dev_info_get = zxdh_gdma_rawdev_info_get,<br />     .dev_configure = zxdh_gdma_rawdev_configure,<br />@@ -533,6 +658,7 @@ static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {<br />     .attr_get = zxdh_gdma_rawdev_get_attr,<br />  <br />     .enqueue_bufs = zxdh_gdma_rawdev_enqueue_bufs,<br />+    .dequeue_bufs = zxdh_gdma_rawdev_dequeue_bufs,<br /> };<br />  <br /> static int<br />@@ -573,7 +699,7 @@ zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)<br />     snprintf(name, RTE_MEMZONE_NAMESIZE, "gdma_vq%d_ring", queue_id);<br />     size = ZXDH_GDMA_RING_SIZE * sizeof(struct zxdh_gdma_buff_desc);<br />     mz = rte_memzone_reserve_aligned(name, size, rte_socket_id(),<br />-                            RTE_MEMZONE_IOVA_CONTIG, size);<br />+                        RTE_MEMZONE_IOVA_CONTIG, size);<br />     if (mz == NULL) {<br />         if (rte_errno == EEXIST)<br />             mz = rte_memzone_lookup(name);<br />@@ -589,7 +715,7 @@ zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)<br />     queue->ring.ring_mem  = mz->iova;<br />     queue->ring.avail_idx = 0;<br />     ZXDH_PMD_LOG(INFO, "queue%u ring phy addr:0x%"PRIx64" virt addr:%p",<br />-                        queue_id, mz->iova, mz->addr);<br />+            queue_id, mz->iova, mz->addr);<br />  <br />     /* Configure the hardware channel to the initial state */<br />     zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET,<br />@@ -689,7 +815,7 @@ zxdh_gdma_unmap_resource(void *requested_addr, size_t size)<br />     /* Unmap the PCI memory resource of device */<br />     if (rte_mem_unmap(requested_addr, size))<br />         ZXDH_PMD_LOG(ERR, "cannot mem unmap(%p, %#zx): %s",<br />-            requested_addr, size, rte_strerror(rte_errno));<br />+                requested_addr, size, rte_strerror(rte_errno));<br />     else<br />         ZXDH_PMD_LOG(DEBUG, "PCI memory unmapped at %p", requested_addr);<br /> }<br />@@ -715,8 +841,8 @@ zxdh_gdma_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,<br />         return -1;<br />     }<br />     ZXDH_PMD_LOG(INFO, "%s bar0 0x%"PRIx64" mapped at %p",<br />-                pci_dev->name, pci_dev->mem_resource[0].phys_addr,<br />-                pci_dev->mem_resource[0].addr);<br />+            pci_dev->name, pci_dev->mem_resource[0].phys_addr,<br />+            pci_dev->mem_resource[0].addr);<br />  <br />     dev = rte_rawdev_pmd_allocate(dev_name, sizeof(struct zxdh_gdma_rawdev), rte_socket_id());<br />     if (dev == NULL) {<br />@@ -747,7 +873,7 @@ zxdh_gdma_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,<br />  <br /> err_out:<br />     zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,<br />-        (size_t)pci_dev->mem_resource[0].len);<br />+                (size_t)pci_dev->mem_resource[0].len);<br />     return -1;<br /> }<br />  <br />--  <br />2.43.0<br />