Add queue initialization, release, enqueue, dequeue and other interface.<br /> <br />Signed-off-by: Hanxiao Li <li.hanxiao@zte.com.cn> <br />---<br /> drivers/common/zsda/zsda_qp.c | 720 ++++++++++++++++++++++++++++++++++<br /> drivers/common/zsda/zsda_qp.h | 160 ++++++++<br /> 2 files changed, 880 insertions(+)<br /> create mode 100644 drivers/common/zsda/zsda_qp.c<br /> create mode 100644 drivers/common/zsda/zsda_qp.h<br /> <br />diff --git a/drivers/common/zsda/zsda_qp.c b/drivers/common/zsda/zsda_qp.c<br />new file mode 100644<br />index 0000000000..f2dfe43b2e<br />--- /dev/null<br />+++ b/drivers/common/zsda/zsda_qp.c<br />@@ -0,0 +1,720 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#include <stdint.h> <br />+<br />+#include <rte_malloc.h> <br />+<br />+#include "zsda_common.h" <br />+#include "zsda_logs.h" <br />+#include "zsda_device.h" <br />+#include "zsda_qp.h" <br />+<br />+#define RING_DIR_TX 0<br />+#define RING_DIR_RX 1<br />+<br />+struct ring_size {<br />+    uint16_t tx_msg_size;<br />+    uint16_t rx_msg_size;<br />+};<br />+<br />+struct ring_size zsda_qp_hw_ring_size[ZSDA_MAX_SERVICES] = {<br />+    [ZSDA_SERVICE_COMPRESSION] = {32, 16},<br />+    [ZSDA_SERVICE_DECOMPRESSION] = {32, 16},<br />+};<br />+<br />+static void<br />+zsda_set_queue_head_tail(const struct zsda_pci_device *zsda_pci_dev,<br />+             const uint8_t qid)<br />+{<br />+    struct rte_pci_device *pci_dev =<br />+        zsda_devs[zsda_pci_dev->zsda_dev_id].pci_dev;<br />+    uint8_t *mmio_base = pci_dev->mem_resource[0].addr;<br />+<br />+    ZSDA_CSR_WRITE32(mmio_base + IO_DB_INITIAL_CONFIG + (qid * 4),<br />+             SET_HEAD_INTI);<br />+}<br />+<br />+int<br />+zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev)<br />+{<br />+    uint8_t i;<br />+    uint32_t index;<br />+    enum zsda_service_type type;<br />+    struct zsda_qp_hw *zsda_hw_qps = zsda_pci_dev->zsda_hw_qps;<br />+    struct qinfo qcfg;<br />+    int ret = 0;<br />+<br />+    for (i = 0; i < zsda_num_used_qps; i++) {<br />+        zsda_set_queue_head_tail(zsda_pci_dev, i);<br />+        ret = zsda_get_queue_cfg_by_id(zsda_pci_dev, i, &qcfg);<br />+        type = qcfg.q_type;<br />+        if (ret) {<br />+            ZSDA_LOG(ERR, "get queue cfg!");<br />+            return ret;<br />+        }<br />+        if (type >= ZSDA_SERVICE_INVALID)<br />+            continue;<br />+<br />+        index = zsda_pci_dev->zsda_qp_hw_num[type];<br />+        zsda_hw_qps[type].data[index].used = true;<br />+        zsda_hw_qps[type].data[index].tx_ring_num = i;<br />+        zsda_hw_qps[type].data[index].rx_ring_num = i;<br />+        zsda_hw_qps[type].data[index].tx_msg_size =<br />+            zsda_qp_hw_ring_size[type].tx_msg_size;<br />+        zsda_hw_qps[type].data[index].rx_msg_size =<br />+            zsda_qp_hw_ring_size[type].rx_msg_size;<br />+<br />+        zsda_pci_dev->zsda_qp_hw_num[type]++;<br />+    }<br />+<br />+    return ret;<br />+}<br />+<br />+struct zsda_qp_hw *<br />+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,<br />+            const enum zsda_service_type service)<br />+{<br />+    struct zsda_qp_hw *qp_hw = NULL;<br />+<br />+    if (service < ZSDA_SERVICE_INVALID)<br />+        qp_hw = &(zsda_pci_dev->zsda_hw_qps[service]);<br />+<br />+    return qp_hw;<br />+}<br />+<br />+uint16_t<br />+zsda_qps_per_service(const struct zsda_pci_device *zsda_pci_dev,<br />+             const enum zsda_service_type service)<br />+{<br />+    uint16_t qp_hw_num = 0;<br />+<br />+    if (service < ZSDA_SERVICE_INVALID)<br />+        qp_hw_num = zsda_pci_dev->zsda_qp_hw_num[service];<br />+<br />+    return qp_hw_num;<br />+}<br />+<br />+uint16_t<br />+zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev)<br />+{<br />+    uint16_t comp =<br />+        zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_COMPRESSION);<br />+    uint16_t decomp =<br />+        zsda_qps_per_service(zsda_pci_dev, ZSDA_SERVICE_DECOMPRESSION);<br />+    uint16_t min = 0;<br />+<br />+    if ((comp == MAX_QPS_ON_FUNCTION) ||<br />+        (decomp == MAX_QPS_ON_FUNCTION))<br />+        min = MAX_QPS_ON_FUNCTION;<br />+    else<br />+        min = (comp < decomp) ? comp : decomp;<br />+    if (min == 0)<br />+        return MAX_QPS_ON_FUNCTION;<br />+    return min;<br />+}<br />+<br />+<br />+void<br />+zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,<br />+          struct zsda_common_stat *stats)<br />+{<br />+    enum zsda_service_type type;<br />+    uint32_t i;<br />+    struct zsda_qp *qp;<br />+<br />+    if ((stats == NULL) || (queue_pairs == NULL)) {<br />+        ZSDA_LOG(ERR, E_NULL);<br />+        return;<br />+    }<br />+<br />+    for (i = 0; i < nb_queue_pairs; i++) {<br />+        qp = (struct zsda_qp *)queue_pairs[i];<br />+<br />+        if (qp == NULL) {<br />+            ZSDA_LOG(ERR, E_NULL);<br />+            break;<br />+        }<br />+<br />+        for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {<br />+            if (qp->srv[type].used) {<br />+                stats->enqueued_count +=<br />+                    qp->srv[type].stats.enqueued_count;<br />+                stats->dequeued_count +=<br />+                    qp->srv[type].stats.dequeued_count;<br />+                stats->enqueue_err_count +=<br />+                    qp->srv[type].stats.enqueue_err_count;<br />+                stats->dequeue_err_count +=<br />+                    qp->srv[type].stats.dequeue_err_count;<br />+            }<br />+        }<br />+    }<br />+}<br />+<br />+void<br />+zsda_stats_reset(void **queue_pairs, const uint32_t nb_queue_pairs)<br />+{<br />+    enum zsda_service_type type;<br />+    uint32_t i;<br />+    struct zsda_qp *qp;<br />+<br />+    if (queue_pairs == NULL) {<br />+        ZSDA_LOG(ERR, E_NULL);<br />+        return;<br />+    }<br />+<br />+    for (i = 0; i < nb_queue_pairs; i++) {<br />+        qp = (struct zsda_qp *)queue_pairs[i];<br />+<br />+        if (qp == NULL) {<br />+            ZSDA_LOG(ERR, E_NULL);<br />+            break;<br />+        }<br />+        for (type = 0; type < ZSDA_MAX_SERVICES; type++) {<br />+            if (qp->srv[type].used)<br />+                memset(&(qp->srv[type].stats), 0,<br />+                       sizeof(struct zsda_common_stat));<br />+        }<br />+    }<br />+}<br />+<br />+static const struct rte_memzone *<br />+zsda_queue_dma_zone_reserve(const char *queue_name, const unsigned int queue_size,<br />+               const unsigned int socket_id)<br />+{<br />+    const struct rte_memzone *mz;<br />+<br />+    mz = rte_memzone_lookup(queue_name);<br />+    if (mz != 0) {<br />+        if (((size_t)queue_size <= mz->len) && <br />+            ((socket_id == (SOCKET_ID_ANY & 0xffff)) ||<br />+             (socket_id == (mz->socket_id & 0xffff)))) {<br />+            ZSDA_LOG(DEBUG,<br />+                 "re-use memzone already allocated for %s",<br />+                 queue_name);<br />+            return mz;<br />+        }<br />+        ZSDA_LOG(ERR, E_MALLOC);<br />+        return NULL;<br />+    }<br />+<br />+    mz = rte_memzone_reserve_aligned(queue_name, queue_size,<br />+                       (int)(socket_id & 0xfff),<br />+                       RTE_MEMZONE_IOVA_CONTIG, queue_size);<br />+<br />+    return mz;<br />+}<br />+<br />+static int<br />+zsda_queue_create(const uint32_t dev_id, struct zsda_queue *queue,<br />+          const struct zsda_qp_config *qp_conf, const uint8_t dir)<br />+{<br />+    void *io_addr;<br />+    const struct rte_memzone *qp_mz;<br />+    struct qinfo qcfg = {0};<br />+<br />+    uint16_t desc_size = ((dir == RING_DIR_TX) ? qp_conf->hw->tx_msg_size<br />+                           : qp_conf->hw->rx_msg_size);<br />+    unsigned int queue_size_bytes = qp_conf->nb_descriptors * desc_size;<br />+<br />+    queue->hw_queue_number =<br />+        ((dir == RING_DIR_TX) ? qp_conf->hw->tx_ring_num<br />+                      : qp_conf->hw->rx_ring_num);<br />+<br />+    struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;<br />+    struct zsda_pci_device *zsda_dev =<br />+        (struct zsda_pci_device *)zsda_devs[dev_id].mz->addr;<br />+<br />+    zsda_get_queue_cfg_by_id(zsda_dev, queue->hw_queue_number, &qcfg);<br />+<br />+    if (dir == RING_DIR_TX)<br />+        snprintf(queue->memz_name, sizeof(queue->memz_name),<br />+             "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,<br />+             qp_conf->service_str, "qptxmem",<br />+             queue->hw_queue_number);<br />+    else<br />+        snprintf(queue->memz_name, sizeof(queue->memz_name),<br />+             "%s_%d_%s_%s_%d", pci_dev->driver->driver.name, dev_id,<br />+             qp_conf->service_str, "qprxmem",<br />+             queue->hw_queue_number);<br />+<br />+    qp_mz = zsda_queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,<br />+                       rte_socket_id());<br />+    if (qp_mz == NULL) {<br />+        ZSDA_LOG(ERR, E_MALLOC);<br />+        return -ENOMEM;<br />+    }<br />+<br />+    queue->base_addr = (uint8_t *)qp_mz->addr;<br />+    queue->base_phys_addr = qp_mz->iova;<br />+    queue->modulo_mask = MAX_NUM_OPS;<br />+    queue->msg_size = desc_size;<br />+<br />+    queue->head = (dir == RING_DIR_TX) ? qcfg.wq_head : qcfg.cq_head;<br />+    queue->tail = (dir == RING_DIR_TX) ? qcfg.wq_tail : qcfg.cq_tail;<br />+<br />+    if ((queue->head == 0) && (queue->tail == 0))<br />+        qcfg.cycle += 1;<br />+<br />+    queue->valid = qcfg.cycle & (ZSDA_MAX_CYCLE - 1);<br />+    queue->queue_size = ZSDA_MAX_DESC;<br />+    queue->cycle_size = ZSDA_MAX_CYCLE;<br />+    queue->io_addr = pci_dev->mem_resource[0].addr;<br />+<br />+    memset(queue->base_addr, 0x0, queue_size_bytes);<br />+    io_addr = pci_dev->mem_resource[0].addr;<br />+<br />+    if (dir == RING_DIR_TX)<br />+        ZSDA_CSR_WQ_RING_BASE(io_addr, queue->hw_queue_number,<br />+                      queue->base_phys_addr);<br />+    else<br />+        ZSDA_CSR_CQ_RING_BASE(io_addr, queue->hw_queue_number,<br />+                      queue->base_phys_addr);<br />+<br />+    return 0;<br />+}<br />+<br />+static void<br />+zsda_queue_delete(const struct zsda_queue *queue)<br />+{<br />+    const struct rte_memzone *mz;<br />+    int status;<br />+<br />+    if (queue == NULL) {<br />+        ZSDA_LOG(DEBUG, "Invalid queue");<br />+        return;<br />+    }<br />+<br />+    mz = rte_memzone_lookup(queue->memz_name);<br />+    if (mz != NULL) {<br />+        memset(queue->base_addr, 0x0,<br />+               (uint16_t)(queue->queue_size * queue->msg_size));<br />+        status = rte_memzone_free(mz);<br />+        if (status != 0)<br />+            ZSDA_LOG(ERR, E_FREE);<br />+    } else<br />+        ZSDA_LOG(DEBUG, "queue %s doesn't exist", queue->memz_name);<br />+}<br />+<br />+static int<br />+zsda_cookie_init(const uint32_t dev_id, struct zsda_qp **qp_addr,<br />+        const uint16_t queue_pair_id,<br />+        const struct zsda_qp_config *zsda_qp_conf)<br />+{<br />+    struct zsda_qp *qp = *qp_addr;<br />+    struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;<br />+    char op_cookie_pool_name[RTE_RING_NAMESIZE];<br />+    uint32_t i;<br />+    enum zsda_service_type type = zsda_qp_conf->service_type;<br />+<br />+    if (zsda_qp_conf->nb_descriptors != ZSDA_MAX_DESC)<br />+        ZSDA_LOG(ERR, "Can't create qp for %u descriptors",<br />+             zsda_qp_conf->nb_descriptors);<br />+<br />+    qp->srv[type].nb_descriptors = zsda_qp_conf->nb_descriptors;<br />+<br />+    qp->srv[type].op_cookies = rte_zmalloc_socket(<br />+        "zsda PMD op cookie pointer",<br />+        zsda_qp_conf->nb_descriptors *<br />+            sizeof(*qp->srv[type].op_cookies),<br />+        RTE_CACHE_LINE_SIZE, zsda_qp_conf->socket_id);<br />+<br />+    if (qp->srv[type].op_cookies == NULL) {<br />+        ZSDA_LOG(ERR, E_MALLOC);<br />+        return -ENOMEM;<br />+    }<br />+<br />+    snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s%d_cks_%s_qp%hu",<br />+         pci_dev->driver->driver.name, dev_id,<br />+         zsda_qp_conf->service_str, queue_pair_id);<br />+<br />+    qp->srv[type].op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);<br />+    if (qp->srv[type].op_cookie_pool == NULL)<br />+        qp->srv[type].op_cookie_pool = rte_mempool_create(<br />+            op_cookie_pool_name, qp->srv[type].nb_descriptors,<br />+            zsda_qp_conf->cookie_size, 64, 0, NULL, NULL, NULL,<br />+            NULL, (int)(rte_socket_id() & 0xfff), 0);<br />+    if (!qp->srv[type].op_cookie_pool) {<br />+        ZSDA_LOG(ERR, E_CREATE);<br />+        goto exit;<br />+    }<br />+<br />+    for (i = 0; i < qp->srv[type].nb_descriptors; i++) {<br />+        if (rte_mempool_get(qp->srv[type].op_cookie_pool,<br />+                    &qp->srv[type].op_cookies[i])) {<br />+            ZSDA_LOG(ERR, "ZSDA PMD Cannot get op_cookie");<br />+            goto exit;<br />+        }<br />+        memset(qp->srv[type].op_cookies[i], 0,<br />+               zsda_qp_conf->cookie_size);<br />+    }<br />+    return 0;<br />+<br />+exit:<br />+    if (qp->srv[type].op_cookie_pool)<br />+        rte_mempool_free(qp->srv[type].op_cookie_pool);<br />+    rte_free(qp->srv[type].op_cookies);<br />+<br />+    return -EFAULT;<br />+}<br />+<br />+int<br />+zsda_queue_pair_setup(const uint32_t dev_id, struct zsda_qp **qp_addr,<br />+              const uint16_t queue_pair_id,<br />+              const struct zsda_qp_config *zsda_qp_conf)<br />+{<br />+    struct zsda_qp *qp = *qp_addr;<br />+    struct rte_pci_device *pci_dev = zsda_devs[dev_id].pci_dev;<br />+    int ret = 0;<br />+    enum zsda_service_type type = zsda_qp_conf->service_type;<br />+<br />+    if (type >= ZSDA_SERVICE_INVALID) {<br />+        ZSDA_LOG(ERR, "Failed! service type");<br />+        return -EINVAL;<br />+    }<br />+<br />+    if (pci_dev->mem_resource[0].addr == NULL) {<br />+        ZSDA_LOG(ERR, E_NULL);<br />+        return -EINVAL;<br />+    }<br />+<br />+    if (zsda_queue_create(dev_id, &(qp->srv[type].tx_q), zsda_qp_conf,<br />+                  RING_DIR_TX) != 0) {<br />+        ZSDA_LOG(ERR, E_CREATE);<br />+        return -EFAULT;<br />+    }<br />+<br />+    if (zsda_queue_create(dev_id, &(qp->srv[type].rx_q), zsda_qp_conf,<br />+                  RING_DIR_RX) != 0) {<br />+        ZSDA_LOG(ERR, E_CREATE);<br />+        zsda_queue_delete(&(qp->srv[type].tx_q));<br />+        return -EFAULT;<br />+    }<br />+<br />+    ret = zsda_cookie_init(dev_id, qp_addr, queue_pair_id, zsda_qp_conf);<br />+    if (ret) {<br />+        zsda_queue_delete(&(qp->srv[type].tx_q));<br />+        zsda_queue_delete(&(qp->srv[type].rx_q));<br />+        qp->srv[type].used = false;<br />+    }<br />+    qp->srv[type].used = true;<br />+    return ret;<br />+}<br />+<br />+int<br />+zsda_queue_pair_release(struct zsda_qp **qp_addr)<br />+{<br />+    struct zsda_qp *qp = *qp_addr;<br />+    uint32_t i;<br />+    enum zsda_service_type type;<br />+<br />+    if (qp == NULL) {<br />+        ZSDA_LOG(DEBUG, "qp already freed");<br />+        return 0;<br />+    }<br />+<br />+    for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {<br />+        if (!qp->srv[type].used)<br />+            continue;<br />+<br />+        zsda_queue_delete(&(qp->srv[type].tx_q));<br />+        zsda_queue_delete(&(qp->srv[type].rx_q));<br />+        qp->srv[type].used = false;<br />+        for (i = 0; i < qp->srv[type].nb_descriptors; i++)<br />+            rte_mempool_put(qp->srv[type].op_cookie_pool,<br />+                    qp->srv[type].op_cookies[i]);<br />+<br />+        if (qp->srv[type].op_cookie_pool)<br />+            rte_mempool_free(qp->srv[type].op_cookie_pool);<br />+<br />+        rte_free(qp->srv[type].op_cookies);<br />+    }<br />+<br />+    rte_free(qp);<br />+    *qp_addr = NULL;<br />+<br />+    return 0;<br />+}<br />+<br />+int<br />+zsda_fill_sgl(const struct rte_mbuf *buf, uint32_t offset, struct zsda_sgl *sgl,<br />+          const phys_addr_t sgl_phy_addr, uint32_t remain_len,<br />+          struct comp_head_info *comp_head_info)<br />+{<br />+    uint32_t nr;<br />+    uint16_t put_in_len;<br />+    bool head_set = false;<br />+<br />+    for (nr = 0; (buf && (nr < (ZSDA_SGL_MAX_NUMBER - 1)));) {<br />+        if (offset >= rte_pktmbuf_data_len(buf)) {<br />+            offset -= rte_pktmbuf_data_len(buf);<br />+            buf = buf->next;<br />+            continue;<br />+        }<br />+        memset(&(sgl->buffers[nr]), 0, sizeof(struct zsda_buf));<br />+        if ((nr > 0) && (((nr + 1) % ZSDA_SGL_FRAGMENT_SIZE) == 0) && <br />+            (buf->next != NULL)) {<br />+            sgl->buffers[nr].len = SGL_TYPE_PHYS_ADDR;<br />+            sgl->buffers[nr].addr =<br />+                sgl_phy_addr +<br />+                ((nr + 1) * sizeof(struct zsda_buf));<br />+            sgl->buffers[nr].type = SGL_TYPE_NEXT_LIST;<br />+            ++nr;<br />+            continue;<br />+        }<br />+        if (comp_head_info && !head_set) {<br />+            sgl->buffers[nr].len = comp_head_info->head_len;<br />+            sgl->buffers[nr].addr = comp_head_info->head_phys_addr;<br />+            sgl->buffers[nr].type = SGL_TYPE_PHYS_ADDR;<br />+            ++nr;<br />+            head_set = true;<br />+            remain_len -= comp_head_info->head_len;<br />+            continue;<br />+        } else {<br />+            put_in_len = rte_pktmbuf_data_len(buf) - (offset & 0xffff);<br />+            if (remain_len <= put_in_len)<br />+                put_in_len = remain_len;<br />+            remain_len -= put_in_len;<br />+<br />+            sgl->buffers[nr].len = put_in_len;<br />+            sgl->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);<br />+            sgl->buffers[nr].type = SGL_TYPE_PHYS_ADDR;<br />+        }<br />+        offset = 0;<br />+        ++nr;<br />+        buf = buf->next;<br />+<br />+        if (remain_len == 0)<br />+            break;<br />+    }<br />+<br />+    if (nr == 0) {<br />+        ZSDA_LOG(ERR, "In fill_sgl, nr == 0");<br />+        return ZSDA_FAILED;<br />+    }<br />+<br />+    sgl->buffers[nr - 1].type = SGL_TYPE_LAST_PHYS_ADDR;<br />+<br />+    if (buf) {<br />+        if (unlikely(buf->next)) {<br />+            if (nr == (ZSDA_SGL_MAX_NUMBER - 1)) {<br />+                ZSDA_LOG(ERR, "ERR! segs size (%u)",<br />+                     (ZSDA_SGL_MAX_NUMBER));<br />+                return -EINVAL;<br />+            }<br />+        }<br />+    }<br />+<br />+    return ZSDA_SUCCESS;<br />+}<br />+<br />+int<br />+zsda_get_sgl_num(const struct zsda_sgl *sgl)<br />+{<br />+    int sgl_num = 0;<br />+<br />+    while (sgl->buffers[sgl_num].type != 1) {<br />+        sgl_num++;<br />+        if (sgl_num >= ZSDA_SGL_MAX_NUMBER)<br />+            return ZSDA_FAILED;<br />+    }<br />+    sgl_num++;<br />+    return sgl_num;<br />+}<br />+<br />+static int<br />+zsda_find_next_free_cookie(const struct zsda_queue *queue, void **op_cookie,<br />+              uint16_t *idx)<br />+{<br />+    uint16_t old_tail = queue->tail;<br />+    uint16_t tail = queue->tail;<br />+    struct zsda_op_cookie *cookie;<br />+<br />+    do {<br />+        cookie = (struct zsda_op_cookie *)op_cookie[tail];<br />+        if (!cookie->used) {<br />+            *idx = tail & (queue->queue_size - 1);<br />+            return 0;<br />+        }<br />+        tail = zsda_modulo_16(tail++, queue->modulo_mask);<br />+    } while (old_tail != tail);<br />+<br />+    return -EINVAL;<br />+}<br />+<br />+static int<br />+zsda_enqueue(void *op, struct zsda_qp *qp)<br />+{<br />+    uint16_t new_tail;<br />+    enum zsda_service_type type;<br />+    void **op_cookie;<br />+    int ret = 0;<br />+    struct zsda_queue *queue;<br />+<br />+    for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {<br />+        if (qp->srv[type].used) {<br />+            if (!qp->srv[type].match(op))<br />+                continue;<br />+            queue = &qp->srv[type].tx_q;<br />+            op_cookie = qp->srv[type].op_cookies;<br />+<br />+            if (zsda_find_next_free_cookie(queue, op_cookie,<br />+                          &new_tail)) {<br />+                ret = -EBUSY;<br />+                break;<br />+            }<br />+            ret = qp->srv[type].tx_cb(op, queue, op_cookie,<br />+                          new_tail);<br />+            if (ret) {<br />+                qp->srv[type].stats.enqueue_err_count++;<br />+                ZSDA_LOG(ERR, "Failed! config wqe");<br />+                break;<br />+            }<br />+            qp->srv[type].stats.enqueued_count++;<br />+<br />+            queue->tail = zsda_modulo_16(new_tail + 1,<br />+                             queue->queue_size - 1);<br />+<br />+            if (new_tail > queue->tail)<br />+                queue->valid =<br />+                    zsda_modulo_8(queue->valid + 1,<br />+                    (uint8_t)(queue->cycle_size - 1));<br />+<br />+            queue->pushed_wqe++;<br />+            break;<br />+        }<br />+    }<br />+<br />+    return ret;<br />+}<br />+<br />+static void<br />+zsda_tx_write_tail(struct zsda_queue *queue)<br />+{<br />+    if (queue->pushed_wqe)<br />+        WRITE_CSR_WQ_TAIL(queue->io_addr, queue->hw_queue_number,<br />+                  queue->tail);<br />+<br />+    queue->pushed_wqe = 0;<br />+}<br />+<br />+uint16_t<br />+zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, uint16_t nb_ops)<br />+{<br />+    int ret = 0;<br />+    enum zsda_service_type type;<br />+    uint16_t i;<br />+    uint16_t nb_send = 0;<br />+    void *op;<br />+<br />+    if (nb_ops > ZSDA_MAX_DESC) {<br />+        ZSDA_LOG(ERR, "Enqueue number bigger than %d", ZSDA_MAX_DESC);<br />+        return 0;<br />+    }<br />+<br />+    for (i = 0; i < nb_ops; i++) {<br />+        op = ops[i];<br />+        ret = zsda_enqueue(op, qp);<br />+        if (ret < 0)<br />+            break;<br />+        nb_send++;<br />+    }<br />+<br />+    for (type = 0; type < ZSDA_SERVICE_INVALID; type++)<br />+        if (qp->srv[type].used)<br />+            zsda_tx_write_tail(&qp->srv[type].tx_q);<br />+<br />+    return nb_send;<br />+}<br />+<br />+static void<br />+zsda_dequeue(struct qp_srv *srv, void **ops, const uint16_t nb_ops, uint16_t *nb)<br />+{<br />+    uint16_t head;<br />+    struct zsda_cqe *cqe;<br />+    struct zsda_queue *queue = &srv->rx_q;<br />+    struct zsda_op_cookie *cookie;<br />+    head = queue->head;<br />+<br />+    while (*nb < nb_ops) {<br />+        cqe = (struct zsda_cqe *)((uint8_t *)queue->base_addr + head * queue->msg_size);<br />+<br />+        if (!CQE_VALID(cqe->err1))<br />+            break;<br />+        cookie = (struct zsda_op_cookie *)srv->op_cookies[cqe->sid];<br />+<br />+        if (cookie->decomp_no_tail && CQE_ERR0_RIGHT(cqe->err0))<br />+            cqe->err0 = 0x0000;<br />+<br />+        if (CQE_ERR0(cqe->err0) || CQE_ERR1(cqe->err1)) {<br />+            ZSDA_LOG(ERR,<br />+                 "ERR! Cqe, opcode 0x%x, sid 0x%x, " <br />+                 "tx_real_length 0x%x, err0 0x%x, err1 0x%x",<br />+                 cqe->op_code, cqe->sid, cqe->tx_real_length,<br />+                 cqe->err0, cqe->err1);<br />+            srv->stats.dequeue_err_count++;<br />+        } else<br />+            srv->stats.dequeued_count++;<br />+<br />+        ops[*nb] = cookie->op;<br />+        srv->rx_cb(cookie, cqe);<br />+        (*nb)++;<br />+        cookie->used = false;<br />+<br />+        head = zsda_modulo_16(head + 1, queue->modulo_mask);<br />+        queue->head = head;<br />+        WRITE_CSR_CQ_HEAD(queue->io_addr, queue->hw_queue_number, head);<br />+        memset(cqe, 0x0, sizeof(struct zsda_cqe));<br />+    }<br />+}<br />+<br />+uint16_t<br />+zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops)<br />+{<br />+    uint16_t nb = 0;<br />+    uint32_t type;<br />+    struct qp_srv *srv;<br />+<br />+    for (type = 0; type < ZSDA_SERVICE_INVALID; type++) {<br />+        if (!qp->srv[type].used)<br />+            continue;<br />+        srv = &qp->srv[type];<br />+        zsda_dequeue(srv, ops, nb_ops, &nb);<br />+        if (nb >= nb_ops)<br />+            return nb_ops;<br />+    }<br />+    return nb;<br />+}<br />+<br />+int<br />+zsda_common_setup_qp(uint32_t zsda_dev_id, struct zsda_qp **qp_addr,<br />+        const uint16_t queue_pair_id, const struct zsda_qp_config *conf)<br />+{<br />+    uint32_t i;<br />+    int ret = 0;<br />+    struct zsda_qp *qp;<br />+    rte_iova_t cookie_phys_addr;<br />+<br />+    ret = zsda_queue_pair_setup(zsda_dev_id, qp_addr, queue_pair_id, conf);<br />+    if (ret)<br />+        return ret;<br />+<br />+    qp = (struct zsda_qp *)*qp_addr;<br />+<br />+    for (i = 0; i < qp->srv[conf->service_type].nb_descriptors; i++) {<br />+        struct zsda_op_cookie *cookie =<br />+            qp->srv[conf->service_type].op_cookies[i];<br />+        cookie_phys_addr = rte_mempool_virt2iova(cookie);<br />+<br />+        cookie->comp_head_phys_addr = cookie_phys_addr +<br />+            offsetof(struct zsda_op_cookie, comp_head);<br />+<br />+        cookie->sgl_src_phys_addr = cookie_phys_addr +<br />+            offsetof(struct zsda_op_cookie, sgl_src);<br />+<br />+        cookie->sgl_dst_phys_addr = cookie_phys_addr +<br />+            offsetof(struct zsda_op_cookie, sgl_dst);<br />+    }<br />+    return ret;<br />+}<br />diff --git a/drivers/common/zsda/zsda_qp.h b/drivers/common/zsda/zsda_qp.h<br />new file mode 100644<br />index 0000000000..11943a9be4<br />--- /dev/null<br />+++ b/drivers/common/zsda/zsda_qp.h<br />@@ -0,0 +1,160 @@<br />+/* SPDX-License-Identifier: BSD-3-Clause<br />+ * Copyright(c) 2024 ZTE Corporation<br />+ */<br />+<br />+#ifndef _ZSDA_QP_H_<br />+#define _ZSDA_QP_H_<br />+<br />+#define WQ_CSR_LBASE 0x1000<br />+#define WQ_CSR_UBASE 0x1004<br />+#define CQ_CSR_LBASE 0x1400<br />+#define CQ_CSR_UBASE 0x1404<br />+#define WQ_TAIL         0x1800<br />+#define CQ_HEAD         0x1804<br />+<br />+/**<br />+ * Structure associated with each queue.<br />+ */<br />+struct zsda_queue {<br />+    char memz_name[RTE_MEMZONE_NAMESIZE];<br />+    uint8_t *io_addr;<br />+    uint8_t *base_addr;       /* Base address */<br />+    rte_iova_t base_phys_addr; /* Queue physical address */<br />+    uint16_t head;           /* Shadow copy of the head */<br />+    uint16_t tail;           /* Shadow copy of the tail */<br />+    uint16_t modulo_mask;<br />+    uint16_t msg_size;<br />+    uint16_t queue_size;<br />+    uint16_t cycle_size;<br />+    uint16_t pushed_wqe;<br />+<br />+    uint8_t hw_queue_number;<br />+    uint32_t csr_head; /* last written head value */<br />+    uint32_t csr_tail; /* last written tail value */<br />+<br />+    uint8_t valid;<br />+    uint16_t sid;<br />+};<br />+<br />+typedef void (*rx_callback)(void *cookie_in, const struct zsda_cqe *cqe);<br />+typedef int (*tx_callback)(void *op_in, const struct zsda_queue *queue,<br />+               void **op_cookies, const uint16_t new_tail);<br />+typedef int (*srv_match)(const void *op_in);<br />+<br />+struct qp_srv {<br />+    bool used;<br />+    struct zsda_queue tx_q;<br />+    struct zsda_queue rx_q;<br />+    rx_callback rx_cb;<br />+    tx_callback tx_cb;<br />+    srv_match match;<br />+    struct zsda_common_stat stats;<br />+    struct rte_mempool *op_cookie_pool;<br />+    void **op_cookies;<br />+    uint16_t nb_descriptors;<br />+};<br />+<br />+struct zsda_qp {<br />+    struct qp_srv srv[ZSDA_MAX_SERVICES];<br />+<br />+    uint16_t max_inflights;<br />+    uint16_t min_enq_burst_threshold;<br />+    void *mmap_bar_addr;<br />+};<br />+<br />+struct zsda_qp_config {<br />+    enum zsda_service_type service_type;<br />+    const struct zsda_qp_hw_data *hw;<br />+    uint16_t nb_descriptors;<br />+    uint32_t cookie_size;<br />+    int socket_id;<br />+    const char *service_str;<br />+};<br />+<br />+struct comp_head_info {<br />+    uint32_t head_len;<br />+    phys_addr_t head_phys_addr;<br />+};<br />+<br />+extern uint8_t zsda_num_used_qps;<br />+<br />+struct zsda_qp_hw *<br />+zsda_qps_hw_per_service(struct zsda_pci_device *zsda_pci_dev,<br />+            const enum zsda_service_type service);<br />+uint16_t zsda_qps_per_service(const struct zsda_pci_device *zsda_pci_dev,<br />+                  const enum zsda_service_type service);<br />+<br />+uint16_t zsda_comp_max_nb_qps(const struct zsda_pci_device *zsda_pci_dev);<br />+uint16_t zsda_crypto_max_nb_qps(struct zsda_pci_device *zsda_pci_dev);<br />+<br />+int zsda_get_queue_cfg(struct zsda_pci_device *zsda_pci_dev);<br />+<br />+/* CSR write macro */<br />+#define ZSDA_CSR_WR(csrAddr, csrOffset, val)                                   \<br />+    rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))<br />+#define ZSDA_CSR_WC_WR(csrAddr, csrOffset, val)                                \<br />+    rte_write32_wc(val, (((uint8_t *)csrAddr) + csrOffset))<br />+<br />+/* CSR read macro */<br />+#define ZSDA_CSR_RD(csrAddr, csrOffset)                                        \<br />+    rte_read32((((uint8_t *)csrAddr) + csrOffset))<br />+<br />+#define ZSDA_CSR_WQ_RING_BASE(csr_base_addr, ring, value)                      \<br />+    do {                                                                   \<br />+        uint32_t l_base = 0, u_base = 0;                               \<br />+        l_base = (uint32_t)(value & 0xFFFFFFFF);                       \<br />+        u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);    \<br />+        ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_LBASE,         \<br />+                l_base);                                           \<br />+        ZSDA_LOG(INFO, "l_basg - offset:0x%x, value:0x%x",             \<br />+             ((ring << 3) + WQ_CSR_LBASE), l_base);                \<br />+        ZSDA_CSR_WR(csr_base_addr, (ring << 3) + WQ_CSR_UBASE,         \<br />+                u_base);                                           \<br />+        ZSDA_LOG(INFO, "h_base - offset:0x%x, value:0x%x",             \<br />+             ((ring << 3) + WQ_CSR_UBASE), u_base);                \<br />+    } while (0)<br />+<br />+#define ZSDA_CSR_CQ_RING_BASE(csr_base_addr, ring, value)                      \<br />+    do {                                                                   \<br />+        uint32_t l_base = 0, u_base = 0;                               \<br />+        l_base = (uint32_t)(value & 0xFFFFFFFF);                       \<br />+        u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32);    \<br />+        ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_LBASE,         \<br />+                l_base);                                           \<br />+        ZSDA_CSR_WR(csr_base_addr, (ring << 3) + CQ_CSR_UBASE,         \<br />+                u_base);                                           \<br />+    } while (0)<br />+<br />+#define READ_CSR_WQ_HEAD(csr_base_addr, ring)                                  \<br />+    ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))<br />+#define WRITE_CSR_WQ_TAIL(csr_base_addr, ring, value)                          \<br />+    ZSDA_CSR_WC_WR(csr_base_addr, WQ_TAIL + (ring << 3), value)<br />+#define READ_CSR_CQ_HEAD(csr_base_addr, ring)                                  \<br />+    ZSDA_CSR_RD(csr_base_addr, WQ_TAIL + (ring << 3))<br />+#define WRITE_CSR_CQ_HEAD(csr_base_addr, ring, value)                          \<br />+    ZSDA_CSR_WC_WR(csr_base_addr, CQ_HEAD + (ring << 3), value)<br />+<br />+uint16_t zsda_enqueue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops);<br />+uint16_t zsda_dequeue_op_burst(struct zsda_qp *qp, void **ops, const uint16_t nb_ops);<br />+<br />+int zsda_queue_pair_setup(uint32_t dev_id, struct zsda_qp **qp_addr,<br />+              const uint16_t queue_pair_id,<br />+              const struct zsda_qp_config *zsda_qp_conf);<br />+<br />+int zsda_queue_pair_release(struct zsda_qp **qp_addr);<br />+int zsda_fill_sgl(const struct rte_mbuf *buf, uint32_t offset,<br />+          struct zsda_sgl *sgl, const phys_addr_t sgl_phy_addr,<br />+          uint32_t remain_len, struct comp_head_info *comp_head_info);<br />+<br />+int zsda_get_sgl_num(const struct zsda_sgl *sgl);<br />+int zsda_sgl_opt_addr_lost(struct rte_mbuf *mbuf);<br />+<br />+int zsda_common_setup_qp(uint32_t dev_id, struct zsda_qp **qp_addr,<br />+            const uint16_t queue_pair_id,<br />+            const struct zsda_qp_config *conf);<br />+<br />+void zsda_stats_get(void **queue_pairs, const uint32_t nb_queue_pairs,<br />+            struct zsda_common_stat *stats);<br />+void zsda_stats_reset(void **queue_pairs, const uint32_t nb_queue_pairs);<br />+<br />+#endif /* _ZSDA_QP_H_ */<br />--  <br />2.27.0<br />