Add queue initialization and release interface.<br /> <br />Signed-off-by: Yong Zhang <zhang.yong25@zte.com.cn> <br />---<br /> drivers/raw/gdtc/gdtc_rawdev.c | 244 +++++++++++++++++++++++++++++++++<br /> drivers/raw/gdtc/gdtc_rawdev.h |  19 +++<br /> 2 files changed, 263 insertions(+)<br /> <br />diff --git a/drivers/raw/gdtc/gdtc_rawdev.c b/drivers/raw/gdtc/gdtc_rawdev.c<br />index 6f20ecdad6..c3e59fcdab 100644<br />--- a/drivers/raw/gdtc/gdtc_rawdev.c<br />+++ b/drivers/raw/gdtc/gdtc_rawdev.c<br />@@ -28,10 +28,42 @@<br />  <br /> #include "gdtc_rawdev.h" <br />  <br />+/*<br />+ * User define:<br />+ * ep_id-bit[15:12] vfunc_num-bit[11:4] func_num-bit[3:1] vfunc_active-bit0<br />+ * host ep_id:5~8   zf ep_id:9<br />+ */<br />+#define ZXDH_GDMA_ZF_USER                       0x9000      /* ep4 pf0 */<br />+#define ZXDH_GDMA_PF_NUM_SHIFT                  1<br />+#define ZXDH_GDMA_VF_NUM_SHIFT                  4<br />+#define ZXDH_GDMA_EP_ID_SHIFT                   12<br />+#define ZXDH_GDMA_VF_EN                         1<br />+#define ZXDH_GDMA_EPID_OFFSET                   5<br />+<br /> /* Register offset */<br /> #define ZXDH_GDMA_BASE_OFFSET                   0x100000<br />+#define ZXDH_GDMA_EXT_ADDR_OFFSET               0x218<br />+#define ZXDH_GDMA_CONTROL_OFFSET                0x230<br />+#define ZXDH_GDMA_TC_CNT_OFFSET                 0x23c<br />+#define ZXDH_GDMA_LLI_USER_OFFSET               0x228<br />+<br />+#define ZXDH_GDMA_CHAN_FORCE_CLOSE              (1 << 31)<br />+<br />+/* TC count & Error interrupt status register */<br />+#define ZXDH_GDMA_SRC_LLI_ERR                   (1 << 16)<br />+#define ZXDH_GDMA_SRC_DATA_ERR                  (1 << 17)<br />+#define ZXDH_GDMA_DST_ADDR_ERR                  (1 << 18)<br />+#define ZXDH_GDMA_ERR_STATUS                    (1 << 19)<br />+#define ZXDH_GDMA_ERR_INTR_ENABLE               (1 << 20)<br />+#define ZXDH_GDMA_TC_CNT_CLEAN                  (1)<br />  <br /> #define ZXDH_GDMA_CHAN_SHIFT                    0x80<br />+#define LOW32_MASK                              0xffffffff<br />+#define LOW16_MASK                              0xffff<br />+<br />+static int zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id);<br />+static int zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id);<br />+<br /> char zxdh_gdma_driver_name[] = "rawdev_zxdh_gdma";<br /> char dev_name[] = "zxdh_gdma";<br />  <br />@@ -41,9 +73,221 @@ zxdh_gdma_rawdev_get_priv(const struct rte_rawdev *rawdev)<br />     return rawdev->dev_private;<br /> }<br />  <br />+static inline struct zxdh_gdma_queue *<br />+zxdh_gdma_get_queue(struct rte_rawdev *dev, uint16_t queue_id)<br />+{<br />+    struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);<br />+<br />+    if (queue_id >= ZXDH_GDMA_TOTAL_CHAN_NUM) {<br />+        ZXDH_PMD_LOG(ERR, "queue id %d is invalid", queue_id);<br />+        return NULL;<br />+    }<br />+<br />+    return &(gdmadev->vqs[queue_id]);<br />+}<br />+<br />+static void<br />+zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset, uint32_t val)<br />+{<br />+    struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);<br />+    uint32_t addr = 0;<br />+<br />+    addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;<br />+    *(uint32_t *)(gdmadev->base_addr + addr) = val;<br />+}<br />+<br />+static int<br />+zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,<br />+                uint16_t queue_id,<br />+                rte_rawdev_obj_t queue_conf,<br />+                size_t conf_size)<br />+{<br />+    struct zxdh_gdma_rawdev *gdmadev = NULL;<br />+    struct zxdh_gdma_queue *queue = NULL;<br />+    struct zxdh_gdma_queue_config *qconfig = NULL;<br />+    struct zxdh_gdma_rbp *rbp = NULL;<br />+    uint16_t i = 0;<br />+    uint8_t is_txq = 0;<br />+    uint32_t src_user = 0;<br />+    uint32_t dst_user = 0;<br />+<br />+    if (dev == NULL)<br />+        return -EINVAL;<br />+<br />+    if ((queue_conf == NULL) || (conf_size != sizeof(struct zxdh_gdma_queue_config)))<br />+        return -EINVAL;<br />+<br />+    gdmadev = zxdh_gdma_rawdev_get_priv(dev);<br />+    qconfig = (struct zxdh_gdma_queue_config *)queue_conf;<br />+<br />+    for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {<br />+        if (gdmadev->vqs[i].enable == 0)<br />+            break;<br />+    }<br />+    if (i >= ZXDH_GDMA_TOTAL_CHAN_NUM) {<br />+        ZXDH_PMD_LOG(ERR, "Failed to setup queue, no avail queues");<br />+        return -1;<br />+    }<br />+    queue_id = i;<br />+    if (zxdh_gdma_queue_init(dev, queue_id) != 0) {<br />+        ZXDH_PMD_LOG(ERR, "Failed to init queue");<br />+        return -1;<br />+    }<br />+    queue = &(gdmadev->vqs[queue_id]);<br />+<br />+    rbp = qconfig->rbp;<br />+    if ((rbp->srbp != 0) && (rbp->drbp == 0)) {<br />+        is_txq = 0;<br />+        dst_user = ZXDH_GDMA_ZF_USER;<br />+        src_user = ((rbp->spfid << ZXDH_GDMA_PF_NUM_SHIFT) |<br />+            ((rbp->sportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));<br />+<br />+        if (rbp->svfid != 0)<br />+            src_user |= (ZXDH_GDMA_VF_EN |<br />+                         ((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />+<br />+        ZXDH_PMD_LOG(DEBUG, "rxq->qidx:%d setup src_user(ep:%d pf:%d vf:%d) success",<br />+                    queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,<br />+                    (uint8_t)rbp->svfid);<br />+    } else if ((rbp->srbp == 0) && (rbp->drbp != 0)) {<br />+        is_txq = 1;<br />+        src_user = ZXDH_GDMA_ZF_USER;<br />+        dst_user = ((rbp->dpfid << ZXDH_GDMA_PF_NUM_SHIFT) |<br />+            ((rbp->dportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));<br />+<br />+        if (rbp->dvfid != 0)<br />+            dst_user |= (ZXDH_GDMA_VF_EN |<br />+                         ((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));<br />+<br />+        ZXDH_PMD_LOG(DEBUG, "txq->qidx:%d setup dst_user(ep:%d pf:%d vf:%d) success",<br />+                    queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,<br />+                    (uint8_t)rbp->dvfid);<br />+    } else {<br />+        ZXDH_PMD_LOG(ERR, "Failed to setup queue, srbp/drbp is invalid");<br />+        return -EINVAL;<br />+    }<br />+    queue->is_txq = is_txq;<br />+<br />+    /* setup queue user info */<br />+    queue->user = (src_user & LOW16_MASK) | (dst_user << 16);<br />+<br />+    zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_EXT_ADDR_OFFSET, queue->user);<br />+    gdmadev->used_num++;<br />+<br />+    return queue_id;<br />+}<br />+<br /> static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {<br />+    .queue_setup = zxdh_gdma_rawdev_queue_setup,<br /> };<br />  <br />+static int<br />+zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)<br />+{<br />+    char name[RTE_MEMZONE_NAMESIZE];<br />+    struct zxdh_gdma_queue *queue = NULL;<br />+    const struct rte_memzone *mz = NULL;<br />+    uint32_t size = 0;<br />+    int ret = 0;<br />+<br />+    queue = zxdh_gdma_get_queue(dev, queue_id);<br />+    if (queue == NULL)<br />+        return -EINVAL;<br />+<br />+    queue->enable = 1;<br />+    queue->vq_id  = queue_id;<br />+    queue->flag   = 0;<br />+    queue->tc_cnt = 0;<br />+<br />+    /* Init sw_ring */<br />+    queue->sw_ring.job = rte_calloc(NULL, queue->queue_size, sizeof(struct zxdh_gdma_job *), 0);<br />+    if (queue->sw_ring.job == NULL) {<br />+        ZXDH_PMD_LOG(ERR, "can not allocate sw_ring");<br />+        ret = -ENOMEM;<br />+        goto free_queue;<br />+    }<br />+<br />+    /* Cache up to size-1 job in the ring to prevent overwriting hardware prefetching */<br />+    queue->sw_ring.free_cnt = queue->queue_size - 1;<br />+    queue->sw_ring.deq_cnt  = 0;<br />+    queue->sw_ring.pend_cnt = 0;<br />+    queue->sw_ring.enq_idx  = 0;<br />+    queue->sw_ring.deq_idx  = 0;<br />+    queue->sw_ring.used_idx = 0;<br />+<br />+    /* Init ring */<br />+    snprintf(name, RTE_MEMZONE_NAMESIZE, "gdma_vq%d_ring", queue_id);<br />+    size = ZXDH_GDMA_RING_SIZE * sizeof(struct zxdh_gdma_buff_desc);<br />+    mz = rte_memzone_reserve_aligned(name, size, rte_socket_id(),<br />+                            RTE_MEMZONE_IOVA_CONTIG, size);<br />+    if (mz == NULL) {<br />+        if (rte_errno == EEXIST)<br />+            mz = rte_memzone_lookup(name);<br />+        if (mz == NULL) {<br />+            ZXDH_PMD_LOG(ERR, "can not allocate ring %s", name);<br />+            ret = -ENOMEM;<br />+            goto free_queue;<br />+        }<br />+    }<br />+    memset(mz->addr, 0, size);<br />+    queue->ring.ring_mz   = mz;<br />+    queue->ring.desc      = (struct zxdh_gdma_buff_desc *)(mz->addr);<br />+    queue->ring.ring_mem  = mz->iova;<br />+    queue->ring.avail_idx = 0;<br />+    ZXDH_PMD_LOG(INFO, "queue%u ring phy addr:0x%"PRIx64" virt addr:%p",<br />+                        queue_id, mz->iova, mz->addr);<br />+<br />+    /* Initialize the hardware channel */<br />+    zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET,<br />+        ZXDH_GDMA_CHAN_FORCE_CLOSE);<br />+    zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET,<br />+        ZXDH_GDMA_ERR_INTR_ENABLE | ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_TC_CNT_CLEAN);<br />+    zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_USER_OFFSET,<br />+        ZXDH_GDMA_ZF_USER);<br />+<br />+    return 0;<br />+<br />+free_queue:<br />+    zxdh_gdma_queue_free(dev, queue_id);<br />+    return ret;<br />+}<br />+<br />+static int<br />+zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id)<br />+{<br />+    struct zxdh_gdma_rawdev *gdmadev = NULL;<br />+    struct zxdh_gdma_queue *queue = NULL;<br />+    uint32_t val = 0;<br />+<br />+    queue = zxdh_gdma_get_queue(dev, queue_id);<br />+    if (queue == NULL)<br />+        return -EINVAL;<br />+<br />+    gdmadev = zxdh_gdma_rawdev_get_priv(dev);<br />+    gdmadev->used_num--;<br />+<br />+    /* disable gdma channel */<br />+    val = ZXDH_GDMA_CHAN_FORCE_CLOSE;<br />+    zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);<br />+<br />+    queue->enable           = 0;<br />+    queue->is_txq           = 0;<br />+    queue->flag             = 0;<br />+    queue->user             = 0;<br />+    queue->tc_cnt           = 0;<br />+    queue->ring.avail_idx   = 0;<br />+    queue->sw_ring.free_cnt = 0;<br />+    queue->sw_ring.deq_cnt  = 0;<br />+    queue->sw_ring.pend_cnt = 0;<br />+    queue->sw_ring.enq_idx  = 0;<br />+    queue->sw_ring.deq_idx  = 0;<br />+    queue->sw_ring.used_idx = 0;<br />+    rte_free(queue->sw_ring.job);<br />+    rte_memzone_free(queue->ring.ring_mz);<br />+<br />+    return 0;<br />+}<br />+<br /> static int<br /> zxdh_gdma_map_resource(struct rte_pci_device *dev)<br /> {<br />diff --git a/drivers/raw/gdtc/gdtc_rawdev.h b/drivers/raw/gdtc/gdtc_rawdev.h<br />index 9f943c49c6..29b169d079 100644<br />--- a/drivers/raw/gdtc/gdtc_rawdev.h<br />+++ b/drivers/raw/gdtc/gdtc_rawdev.h<br />@@ -100,4 +100,23 @@ struct zxdh_gdma_rawdev {<br />     struct zxdh_gdma_queue vqs[ZXDH_GDMA_TOTAL_CHAN_NUM];<br /> };<br />  <br />+struct zxdh_gdma_rbp {<br />+    uint32_t use_ultrashort:1;<br />+    uint32_t enable:1;<br />+    uint32_t dportid:3;<br />+    uint32_t dpfid:3;<br />+    uint32_t dvfid:8; /*using route by port for destination */<br />+    uint32_t drbp:1;<br />+    uint32_t sportid:3;<br />+    uint32_t spfid:3;<br />+    uint32_t svfid:8;<br />+    uint32_t srbp:1;<br />+};<br />+<br />+struct zxdh_gdma_queue_config {<br />+    uint32_t lcore_id;<br />+    uint32_t flags;<br />+    struct zxdh_gdma_rbp *rbp;<br />+};<br />+<br /> #endif /* __GDTC_RAWDEV_H__ */<br />--  <br />2.43.0<br />