Add rawdev enqueue operation for gdtc devices.<br /> <br />Signed-off-by: Yong Zhang <zhang.yong25@zte.com.cn> <br />---<br /> drivers/raw/gdtc/gdtc_rawdev.c | 220 +++++++++++++++++++++++++++++++++<br /> drivers/raw/gdtc/gdtc_rawdev.h |  19 +++<br /> 2 files changed, 239 insertions(+)<br /> <br />diff --git a/drivers/raw/gdtc/gdtc_rawdev.c b/drivers/raw/gdtc/gdtc_rawdev.c<br />index 8512bd8413..f58b034e58 100644<br />--- a/drivers/raw/gdtc/gdtc_rawdev.c<br />+++ b/drivers/raw/gdtc/gdtc_rawdev.c<br />@@ -43,10 +43,34 @@<br /> /* Register offset */<br /> #define ZXDH_GDMA_BASE_OFFSET                   0x100000<br /> #define ZXDH_GDMA_EXT_ADDR_OFFSET               0x218<br />+#define ZXDH_GDMA_SAR_LOW_OFFSET                0x200<br />+#define ZXDH_GDMA_DAR_LOW_OFFSET                0x204<br />+#define ZXDH_GDMA_SAR_HIGH_OFFSET               0x234<br />+#define ZXDH_GDMA_DAR_HIGH_OFFSET               0x238<br />+#define ZXDH_GDMA_XFERSIZE_OFFSET               0x208<br /> #define ZXDH_GDMA_CONTROL_OFFSET                0x230<br />+#define ZXDH_GDMA_TC_STATUS_OFFSET              0x0<br />+#define ZXDH_GDMA_STATUS_CLEAN_OFFSET           0x80<br />+#define ZXDH_GDMA_LLI_L_OFFSET                  0x21c<br />+#define ZXDH_GDMA_LLI_H_OFFSET                  0x220<br />+#define ZXDH_GDMA_CHAN_CONTINUE_OFFSET          0x224<br /> #define ZXDH_GDMA_TC_CNT_OFFSET                 0x23c<br /> #define ZXDH_GDMA_LLI_USER_OFFSET               0x228<br />  <br />+/* Control register */<br />+#define ZXDH_GDMA_CHAN_ENABLE                   0x1<br />+#define ZXDH_GDMA_CHAN_DISABLE                  0<br />+#define ZXDH_GDMA_SOFT_CHAN                     0x2<br />+#define ZXDH_GDMA_TC_INTR_ENABLE                0x10<br />+#define ZXDH_GDMA_ALL_INTR_ENABLE               0x30<br />+#define ZXDH_GDMA_SBS_SHIFT                     6           /* src burst size */<br />+#define ZXDH_GDMA_SBL_SHIFT                     9           /* src burst length */<br />+#define ZXDH_GDMA_DBS_SHIFT                     13          /* dest burst size */<br />+#define ZXDH_GDMA_BURST_SIZE_MIN                0x1         /* 1 byte */<br />+#define ZXDH_GDMA_BURST_SIZE_MEDIUM             0x4         /* 4 word */<br />+#define ZXDH_GDMA_BURST_SIZE_MAX                0x6         /* 16 word */<br />+#define ZXDH_GDMA_DEFAULT_BURST_LEN             0xf         /* 16 beats */<br />+#define ZXDH_GDMA_TC_CNT_ENABLE                 (1 << 27)<br /> #define ZXDH_GDMA_CHAN_FORCE_CLOSE              (1 << 31)<br />  <br /> /* TC count & Error interrupt status register */<br />@@ -58,9 +82,15 @@<br /> #define ZXDH_GDMA_TC_CNT_CLEAN                  (1)<br />  <br /> #define ZXDH_GDMA_CHAN_SHIFT                    0x80<br />+#define ZXDH_GDMA_LINK_END_NODE                 (1 << 30)<br />+#define ZXDH_GDMA_CHAN_CONTINUE                 (1)<br />+<br /> #define LOW32_MASK                              0xffffffff<br /> #define LOW16_MASK                              0xffff<br />  <br />+#define IDX_TO_ADDR(addr, idx, t) \<br />+    ((t)((uintptr_t)(addr) + (idx) * sizeof(struct zxdh_gdma_buff_desc)))<br />+<br /> static int zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id);<br /> static int zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id);<br />  <br />@@ -301,6 +331,194 @@ zxdh_gdma_rawdev_get_attr(struct rte_rawdev *dev,<br />  <br />     return 0;<br /> }<br />+<br />+static inline void<br />+zxdh_gdma_control_cal(uint32_t *val, uint8_t tc_enable)<br />+{<br />+    *val = (ZXDH_GDMA_CHAN_ENABLE |<br />+            ZXDH_GDMA_SOFT_CHAN |<br />+            (ZXDH_GDMA_DEFAULT_BURST_LEN << ZXDH_GDMA_SBL_SHIFT) |<br />+            (ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_SBS_SHIFT) |<br />+            (ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_DBS_SHIFT));<br />+<br />+    if (tc_enable != 0)<br />+        *val |= ZXDH_GDMA_TC_CNT_ENABLE;<br />+}<br />+<br />+static inline uint32_t<br />+zxdh_gdma_user_get(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)<br />+{<br />+    uint32_t src_user = 0;<br />+    uint32_t dst_user = 0;<br />+<br />+    if ((job->flags & ZXDH_GDMA_JOB_DIR_MASK) == 0) {<br />+        ZXDH_PMD_LOG(DEBUG, "job flags:0x%x default user:0x%x",<br />+                            job->flags, queue->user);<br />+        return queue->user;<br />+    } else if ((job->flags & ZXDH_GDMA_JOB_DIR_TX) != 0) {<br />+        src_user = ZXDH_GDMA_ZF_USER;<br />+        dst_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |<br />+            ((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));<br />+<br />+        if (job->vf_id != 0)<br />+            dst_user |= (ZXDH_GDMA_VF_EN |<br />+                         ((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />+    } else {<br />+        dst_user = ZXDH_GDMA_ZF_USER;<br />+        src_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |<br />+            ((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));<br />+<br />+        if (job->vf_id != 0)<br />+            src_user |= (ZXDH_GDMA_VF_EN |<br />+                         ((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />+    }<br />+    ZXDH_PMD_LOG(DEBUG, "job flags:0x%x ep_id:%u, pf_id:%u, vf_id:%u, user:0x%x",<br />+                        job->flags, job->ep_id, job->pf_id, job->vf_id,<br />+                        (src_user & LOW16_MASK) | (dst_user << 16));<br />+<br />+    return (src_user & LOW16_MASK) | (dst_user << 16);<br />+}<br />+<br />+static inline void<br />+zxdh_gdma_fill_bd(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)<br />+{<br />+    struct zxdh_gdma_buff_desc *bd = NULL;<br />+    uint32_t val = 0;<br />+    uint64_t next_bd_addr = 0;<br />+    uint16_t avail_idx = 0;<br />+<br />+    avail_idx = queue->ring.avail_idx;<br />+    bd = &(queue->ring.desc[avail_idx]);<br />+    memset(bd, 0, sizeof(struct zxdh_gdma_buff_desc));<br />+<br />+    /* data bd */<br />+    if (job != NULL) {<br />+        zxdh_gdma_control_cal(&val, 1);<br />+        next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem,<br />+                            (avail_idx + 1) % ZXDH_GDMA_RING_SIZE,<br />+                            uint64_t);<br />+        bd->SrcAddr_L  = job->src & LOW32_MASK;<br />+        bd->DstAddr_L  = job->dest & LOW32_MASK;<br />+        bd->SrcAddr_H  = (job->src >> 32) & LOW32_MASK;<br />+        bd->DstAddr_H  = (job->dest >> 32) & LOW32_MASK;<br />+        bd->Xpara      = job->len;<br />+        bd->ExtAddr    = zxdh_gdma_user_get(queue, job);<br />+        bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;<br />+        bd->LLI_Addr_H = next_bd_addr >> 38;<br />+        bd->LLI_User   = ZXDH_GDMA_ZF_USER;<br />+        bd->Control    = val;<br />+    } else {<br />+        zxdh_gdma_control_cal(&val, 0);<br />+        next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem, avail_idx, uint64_t);<br />+        bd->ExtAddr    = queue->user;<br />+        bd->LLI_User   = ZXDH_GDMA_ZF_USER;<br />+        bd->Control    = val;<br />+        bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;<br />+        bd->LLI_Addr_H = (next_bd_addr >> 38) | ZXDH_GDMA_LINK_END_NODE;<br />+        if (queue->flag != 0) {<br />+            bd = IDX_TO_ADDR(queue->ring.desc,<br />+                    queue->ring.last_avail_idx,<br />+                    struct zxdh_gdma_buff_desc*);<br />+            next_bd_addr = IDX_TO_ADDR(queue->ring.ring_mem,<br />+                    (queue->ring.last_avail_idx + 1) % ZXDH_GDMA_RING_SIZE,<br />+                    uint64_t);<br />+            bd->LLI_Addr_L  = (next_bd_addr >> 6) & LOW32_MASK;<br />+            bd->LLI_Addr_H  = next_bd_addr >> 38;<br />+            rte_wmb();<br />+            bd->LLI_Addr_H &= ~ZXDH_GDMA_LINK_END_NODE;<br />+        }<br />+        /* Record the index of empty bd for dynamic chaining */<br />+        queue->ring.last_avail_idx = avail_idx;<br />+    }<br />+<br />+    if (++avail_idx >= ZXDH_GDMA_RING_SIZE)<br />+        avail_idx -= ZXDH_GDMA_RING_SIZE;<br />+<br />+    queue->ring.avail_idx = avail_idx;<br />+}<br />+<br />+static int<br />+zxdh_gdma_rawdev_enqueue_bufs(struct rte_rawdev *dev,<br />+                __rte_unused struct rte_rawdev_buf **buffers,<br />+                uint32_t count,<br />+                rte_rawdev_obj_t context)<br />+{<br />+    struct zxdh_gdma_rawdev *gdmadev = NULL;<br />+    struct zxdh_gdma_queue *queue = NULL;<br />+    struct zxdh_gdma_enqdeq *e_context = NULL;<br />+    struct zxdh_gdma_job *job = NULL;<br />+    uint16_t queue_id = 0;<br />+    uint32_t val = 0;<br />+    uint16_t i = 0;<br />+    uint16_t free_cnt = 0;<br />+<br />+    if (dev == NULL)<br />+        return -EINVAL;<br />+<br />+    if (unlikely((count < 1) || (context == NULL)))<br />+        return -EINVAL;<br />+<br />+    gdmadev = zxdh_gdma_rawdev_get_priv(dev);<br />+    if (gdmadev->device_state == ZXDH_GDMA_DEV_STOPPED) {<br />+        ZXDH_PMD_LOG(ERR, "gdma dev is stop");<br />+        return 0;<br />+    }<br />+<br />+    e_context = (struct zxdh_gdma_enqdeq *)context;<br />+    queue_id = e_context->vq_id;<br />+    queue = zxdh_gdma_get_queue(dev, queue_id);<br />+    if ((queue == NULL) || (queue->enable == 0))<br />+        return -EINVAL;<br />+<br />+    free_cnt = queue->sw_ring.free_cnt;<br />+    if (free_cnt == 0) {<br />+        ZXDH_PMD_LOG(ERR, "queue %u is full, enq_idx:%u deq_idx:%u used_idx:%u",<br />+                           queue_id, queue->sw_ring.enq_idx,<br />+                           queue->sw_ring.deq_idx, queue->sw_ring.used_idx);<br />+        return 0;<br />+    } else if (free_cnt < count) {<br />+        ZXDH_PMD_LOG(DEBUG, "job num %u > free_cnt, change to %u", count, free_cnt);<br />+        count = free_cnt;<br />+    }<br />+<br />+    rte_spinlock_lock(&queue->enqueue_lock);<br />+<br />+    /* Build bd list, the last bd is empty bd */<br />+    for (i = 0; i < count; i++) {<br />+        job = e_context->job[i];<br />+        zxdh_gdma_fill_bd(queue, job);<br />+    }<br />+    zxdh_gdma_fill_bd(queue, NULL);<br />+<br />+    if (unlikely(queue->flag == 0)) {<br />+        zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET,<br />+                            (queue->ring.ring_mem >> 6) & LOW32_MASK);<br />+        zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_H_OFFSET,<br />+                             queue->ring.ring_mem >> 38);<br />+        /* Start hardware handling */<br />+        zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);<br />+        zxdh_gdma_control_cal(&val, 0);<br />+        zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);<br />+        queue->flag = 1;<br />+    } else {<br />+        val = ZXDH_GDMA_CHAN_CONTINUE;<br />+        zxdh_gdma_write_reg(dev, queue->vq_id, ZXDH_GDMA_CHAN_CONTINUE_OFFSET, val);<br />+    }<br />+<br />+    /* job enqueue */<br />+    for (i = 0; i < count; i++) {<br />+        queue->sw_ring.job[queue->sw_ring.enq_idx] = e_context->job[i];<br />+        if (++queue->sw_ring.enq_idx >= queue->queue_size)<br />+            queue->sw_ring.enq_idx -= queue->queue_size;<br />+<br />+        free_cnt--;<br />+    }<br />+    queue->sw_ring.free_cnt = free_cnt;<br />+    queue->sw_ring.pend_cnt += count;<br />+    rte_spinlock_unlock(&queue->enqueue_lock);<br />+<br />+    return count;<br />+}<br /> static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {<br />     .dev_info_get = zxdh_gdma_rawdev_info_get,<br />     .dev_configure = zxdh_gdma_rawdev_configure,<br />@@ -313,6 +531,8 @@ static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {<br />     .queue_release = zxdh_gdma_rawdev_queue_release,<br />  <br />     .attr_get = zxdh_gdma_rawdev_get_attr,<br />+<br />+    .enqueue_bufs = zxdh_gdma_rawdev_enqueue_bufs,<br /> };<br />  <br /> static int<br />diff --git a/drivers/raw/gdtc/gdtc_rawdev.h b/drivers/raw/gdtc/gdtc_rawdev.h<br />index 92b35fcf14..12ce8a04c7 100644<br />--- a/drivers/raw/gdtc/gdtc_rawdev.h<br />+++ b/drivers/raw/gdtc/gdtc_rawdev.h<br />@@ -24,6 +24,20 @@ extern int zxdh_gdma_rawdev_logtype;<br /> #define ZXDH_GDMA_QUEUE_SIZE                    16384<br /> #define ZXDH_GDMA_RING_SIZE                     32768<br />  <br />+/* States if the source addresses is physical. */<br />+#define ZXDH_GDMA_JOB_SRC_PHY                   (1UL)<br />+<br />+/* States if the destination addresses is physical. */<br />+#define ZXDH_GDMA_JOB_DEST_PHY                  (1UL << 1)<br />+<br />+/* ZF->HOST */<br />+#define ZXDH_GDMA_JOB_DIR_TX                    (1UL << 2)<br />+<br />+/* HOST->ZF */<br />+#define ZXDH_GDMA_JOB_DIR_RX                    (1UL << 3)<br />+<br />+#define ZXDH_GDMA_JOB_DIR_MASK                  (ZXDH_GDMA_JOB_DIR_TX | ZXDH_GDMA_JOB_DIR_RX)<br />+<br /> enum zxdh_gdma_device_state {<br />     ZXDH_GDMA_DEV_RUNNING,<br />     ZXDH_GDMA_DEV_STOPPED<br />@@ -100,6 +114,11 @@ struct zxdh_gdma_rawdev {<br />     struct zxdh_gdma_queue vqs[ZXDH_GDMA_TOTAL_CHAN_NUM];<br /> };<br />  <br />+struct zxdh_gdma_enqdeq {<br />+    uint16_t vq_id;<br />+    struct zxdh_gdma_job **job;<br />+};<br />+<br /> struct zxdh_gdma_config {<br />     uint16_t max_hw_queues_per_core;<br />     uint16_t max_vqs;<br />--  <br />2.43.0<br />