[dpdk-dev] [PATCH v2 04/12] crypto/octeontx2: add queue pair functions

Anoob Joseph anoobj at marvell.com
Sun Oct 13 14:39:54 CEST 2019


From: Ankur Dwivedi <adwivedi at marvell.com>

This patch adds the queue pair setup and queue pair release functions
for OCTEON TX2 crypto pmd.

Signed-off-by: Ankur Dwivedi <adwivedi at marvell.com>
Signed-off-by: Anoob Joseph <anoobj at marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree at marvell.com>
---
 .../crypto/octeontx2/otx2_cryptodev_hw_access.c    |  99 +++++++
 .../crypto/octeontx2/otx2_cryptodev_hw_access.h    | 126 +++++++++
 drivers/crypto/octeontx2/otx2_cryptodev_mbox.c     |  83 ++++++
 drivers/crypto/octeontx2/otx2_cryptodev_mbox.h     |   6 +
 drivers/crypto/octeontx2/otx2_cryptodev_ops.c      | 306 ++++++++++++++++++++-
 5 files changed, 617 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.c b/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.c
index 663f9ca..9e4f782 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.c
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.c
@@ -1,10 +1,14 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (C) 2019 Marvell International Ltd.
  */
+#include <rte_cryptodev.h>
 
 #include "otx2_common.h"
 #include "otx2_cryptodev.h"
 #include "otx2_cryptodev_hw_access.h"
+#include "otx2_cryptodev_mbox.h"
+#include "otx2_cryptodev_ops.h"
+#include "otx2_dev.h"
 
 #include "cpt_pmd_logs.h"
 
@@ -124,3 +128,98 @@ otx2_cpt_err_intr_register(const struct rte_cryptodev *dev)
 	 */
 	return 0;
 }
+
+int
+otx2_cpt_iq_enable(const struct rte_cryptodev *dev,
+		   const struct otx2_cpt_qp *qp, uint8_t grp_mask, uint8_t pri,
+		   uint32_t size_div40)
+{
+	union otx2_cpt_af_lf_ctl af_lf_ctl;
+	union otx2_cpt_lf_inprog inprog;
+	union otx2_cpt_lf_q_base base;
+	union otx2_cpt_lf_q_size size;
+	union otx2_cpt_lf_ctl lf_ctl;
+	int ret;
+
+	/* Set engine group mask and priority */
+
+	ret = otx2_cpt_af_reg_read(dev, OTX2_CPT_AF_LF_CTL(qp->id),
+				   &af_lf_ctl.u);
+	if (ret)
+		return ret;
+	af_lf_ctl.s.grp = grp_mask;
+	af_lf_ctl.s.pri = pri ? 1 : 0;
+	ret = otx2_cpt_af_reg_write(dev, OTX2_CPT_AF_LF_CTL(qp->id),
+				    af_lf_ctl.u);
+	if (ret)
+		return ret;
+
+	/* Set instruction queue base address */
+
+	base.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_BASE);
+	base.s.fault = 0;
+	base.s.stopped = 0;
+	base.s.addr = qp->iq_dma_addr >> 7;
+	otx2_write64(base.u, qp->base + OTX2_CPT_LF_Q_BASE);
+
+	/* Set instruction queue size */
+
+	size.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_SIZE);
+	size.s.size_div40 = size_div40;
+	otx2_write64(size.u, qp->base + OTX2_CPT_LF_Q_SIZE);
+
+	/* Enable instruction queue */
+
+	lf_ctl.u = otx2_read64(qp->base + OTX2_CPT_LF_CTL);
+	lf_ctl.s.ena = 1;
+	otx2_write64(lf_ctl.u, qp->base + OTX2_CPT_LF_CTL);
+
+	/* Start instruction execution */
+
+	inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
+	inprog.s.eena = 1;
+	otx2_write64(inprog.u, qp->base + OTX2_CPT_LF_INPROG);
+
+	return 0;
+}
+
+void
+otx2_cpt_iq_disable(struct otx2_cpt_qp *qp)
+{
+	union otx2_cpt_lf_q_grp_ptr grp_ptr;
+	union otx2_cpt_lf_inprog inprog;
+	union otx2_cpt_lf_ctl ctl;
+	int cnt;
+
+	/* Stop instruction execution */
+	inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
+	inprog.s.eena = 0x0;
+	otx2_write64(inprog.u, qp->base + OTX2_CPT_LF_INPROG);
+
+	/* Disable instructions enqueuing */
+	ctl.u = otx2_read64(qp->base + OTX2_CPT_LF_CTL);
+	ctl.s.ena = 0;
+	otx2_write64(ctl.u, qp->base + OTX2_CPT_LF_CTL);
+
+	/* Wait for instruction queue to become empty */
+	cnt = 0;
+	do {
+		inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
+		if (inprog.s.grb_partial)
+			cnt = 0;
+		else
+			cnt++;
+		grp_ptr.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_GRP_PTR);
+	} while ((cnt < 10) && (grp_ptr.s.nq_ptr != grp_ptr.s.dq_ptr));
+
+	cnt = 0;
+	do {
+		inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
+		if ((inprog.s.inflight == 0) &&
+		    (inprog.s.gwb_cnt < 40) &&
+		    ((inprog.s.grb_cnt == 0) || (inprog.s.grb_cnt == 40)))
+			cnt++;
+		else
+			cnt = 0;
+	} while (cnt < 10);
+}
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h b/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h
index 2af674d..87d4e77 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h
@@ -5,23 +5,149 @@
 #ifndef _OTX2_CRYPTODEV_HW_ACCESS_H_
 #define _OTX2_CRYPTODEV_HW_ACCESS_H_
 
+#include <stdint.h>
+
 #include <rte_cryptodev.h>
+#include <rte_memory.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
 
 #include "otx2_dev.h"
 
+/* CPT instruction queue length */
+#define OTX2_CPT_IQ_LEN			8200
+
+#define OTX2_CPT_DEFAULT_CMD_QLEN	OTX2_CPT_IQ_LEN
+
+/* Mask which selects all engine groups */
+#define OTX2_CPT_ENG_GRPS_MASK		0xFF
+
 /* Register offsets */
 
+/* LMT LF registers */
+#define OTX2_LMT_LF_LMTLINE(a)		(0x0ull | (uint64_t)(a) << 3)
+
 /* CPT LF registers */
+#define OTX2_CPT_LF_CTL			0x10ull
+#define OTX2_CPT_LF_INPROG		0x40ull
 #define OTX2_CPT_LF_MISC_INT		0xb0ull
 #define OTX2_CPT_LF_MISC_INT_ENA_W1S	0xd0ull
 #define OTX2_CPT_LF_MISC_INT_ENA_W1C	0xe0ull
+#define OTX2_CPT_LF_Q_BASE		0xf0ull
+#define OTX2_CPT_LF_Q_SIZE		0x100ull
+#define OTX2_CPT_LF_Q_GRP_PTR		0x120ull
+#define OTX2_CPT_LF_NQ(a)		(0x400ull | (uint64_t)(a) << 3)
+
+#define OTX2_CPT_AF_LF_CTL(a)		(0x27000ull | (uint64_t)(a) << 3)
 
 #define OTX2_CPT_LF_BAR2(vf, q_id) \
 		((vf)->otx2_dev.bar2 + \
 		 ((RVU_BLOCK_ADDR_CPT0 << 20) | ((q_id) << 12)))
 
+#define OTX2_CPT_QUEUE_HI_PRIO 0x1
+
+union otx2_cpt_lf_ctl {
+	uint64_t u;
+	struct {
+		uint64_t ena                         : 1;
+		uint64_t fc_ena                      : 1;
+		uint64_t fc_up_crossing              : 1;
+		uint64_t reserved_3_3                : 1;
+		uint64_t fc_hyst_bits                : 4;
+		uint64_t reserved_8_63               : 56;
+	} s;
+};
+
+union otx2_cpt_lf_inprog {
+	uint64_t u;
+	struct {
+		uint64_t inflight                    : 9;
+		uint64_t reserved_9_15               : 7;
+		uint64_t eena                        : 1;
+		uint64_t grp_drp                     : 1;
+		uint64_t reserved_18_30              : 13;
+		uint64_t grb_partial                 : 1;
+		uint64_t grb_cnt                     : 8;
+		uint64_t gwb_cnt                     : 8;
+		uint64_t reserved_48_63              : 16;
+	} s;
+};
+
+union otx2_cpt_lf_q_base {
+	uint64_t u;
+	struct {
+		uint64_t fault                       : 1;
+		uint64_t stopped                     : 1;
+		uint64_t reserved_2_6                : 5;
+		uint64_t addr                        : 46;
+		uint64_t reserved_53_63              : 11;
+	} s;
+};
+
+union otx2_cpt_lf_q_size {
+	uint64_t u;
+	struct {
+		uint64_t size_div40                  : 15;
+		uint64_t reserved_15_63              : 49;
+	} s;
+};
+
+union otx2_cpt_af_lf_ctl {
+	uint64_t u;
+	struct {
+		uint64_t pri                         : 1;
+		uint64_t reserved_1_8                : 8;
+		uint64_t pf_func_inst                : 1;
+		uint64_t cont_err                    : 1;
+		uint64_t reserved_11_15              : 5;
+		uint64_t nixtx_en                    : 1;
+		uint64_t reserved_17_47              : 31;
+		uint64_t grp                         : 8;
+		uint64_t reserved_56_63              : 8;
+	} s;
+};
+
+union otx2_cpt_lf_q_grp_ptr {
+	uint64_t u;
+	struct {
+		uint64_t dq_ptr                      : 15;
+		uint64_t reserved_31_15              : 17;
+		uint64_t nq_ptr                      : 15;
+		uint64_t reserved_47_62              : 16;
+		uint64_t xq_xor                      : 1;
+	} s;
+};
+
+struct otx2_cpt_qp {
+	uint32_t id;
+	/**< Queue pair id */
+	uintptr_t base;
+	/**< Base address where BAR is mapped */
+	void *lmtline;
+	/**< Address of LMTLINE */
+	rte_iova_t lf_nq_reg;
+	/**< LF enqueue register address */
+	struct pending_queue pend_q;
+	/**< Pending queue */
+	struct rte_mempool *sess_mp;
+	/**< Session mempool */
+	struct rte_mempool *sess_mp_priv;
+	/**< Session private data mempool */
+	struct cpt_qp_meta_info meta_info;
+	/**< Metabuf info required to support operations on the queue pair */
+	rte_iova_t iq_dma_addr;
+	/**< Instruction queue address */
+};
+
 void otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev);
 
 int otx2_cpt_err_intr_register(const struct rte_cryptodev *dev);
 
+int otx2_cpt_iq_enable(const struct rte_cryptodev *dev,
+		       const struct otx2_cpt_qp *qp, uint8_t grp_mask,
+		       uint8_t pri, uint32_t size_div40);
+
+void otx2_cpt_iq_disable(struct otx2_cpt_qp *qp);
+
 #endif /* _OTX2_CRYPTODEV_HW_ACCESS_H_ */
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c
index 9b0117a..b54e407 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c
@@ -8,6 +8,8 @@
 #include "otx2_dev.h"
 #include "otx2_mbox.h"
 
+#include "cpt_pmd_logs.h"
+
 int
 otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
 			      uint16_t *nb_queues)
@@ -90,3 +92,84 @@ otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev)
 
 	return 0;
 }
+
+static int
+otx2_cpt_send_mbox_msg(struct otx2_cpt_vf *vf)
+{
+	struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+	int ret;
+
+	otx2_mbox_msg_send(mbox, 0);
+
+	ret = otx2_mbox_wait_for_rsp(mbox, 0);
+	if (ret < 0) {
+		CPT_LOG_ERR("Could not get mailbox response");
+		return ret;
+	}
+
+	return 0;
+}
+
+int
+otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
+		     uint64_t *val)
+{
+	struct otx2_cpt_vf *vf = dev->data->dev_private;
+	struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+	struct otx2_mbox_dev *mdev = &mbox->dev[0];
+	struct cpt_rd_wr_reg_msg *msg;
+	int ret, off;
+
+	msg = (struct cpt_rd_wr_reg_msg *)
+			otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
+						sizeof(*msg));
+	if (msg == NULL) {
+		CPT_LOG_ERR("Could not allocate mailbox message");
+		return -EFAULT;
+	}
+
+	msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+	msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+	msg->hdr.pcifunc = vf->otx2_dev.pf_func;
+	msg->is_write = 0;
+	msg->reg_offset = reg;
+	msg->ret_val = val;
+
+	ret = otx2_cpt_send_mbox_msg(vf);
+	if (ret < 0)
+		return ret;
+
+	off = mbox->rx_start +
+			RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+	msg = (struct cpt_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
+
+	*val = msg->val;
+
+	return 0;
+}
+
+int
+otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
+		      uint64_t val)
+{
+	struct otx2_cpt_vf *vf = dev->data->dev_private;
+	struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+	struct cpt_rd_wr_reg_msg *msg;
+
+	msg = (struct cpt_rd_wr_reg_msg *)
+			otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
+						sizeof(*msg));
+	if (msg == NULL) {
+		CPT_LOG_ERR("Could not allocate mailbox message");
+		return -EFAULT;
+	}
+
+	msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+	msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+	msg->hdr.pcifunc = vf->otx2_dev.pf_func;
+	msg->is_write = 1;
+	msg->reg_offset = reg;
+	msg->val = val;
+
+	return otx2_cpt_send_mbox_msg(vf);
+}
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h
index 0a43061..a298718 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h
@@ -16,4 +16,10 @@ int otx2_cpt_queues_detach(const struct rte_cryptodev *dev);
 
 int otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev);
 
+int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
+			 uint64_t *val);
+
+int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
+			  uint64_t val);
+
 #endif /* _OTX2_CRYPTODEV_MBOX_H_ */
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
index b1bb9ae..de53055 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
@@ -2,7 +2,10 @@
  * Copyright (C) 2019 Marvell International Ltd.
  */
 
+#include <unistd.h>
+
 #include <rte_cryptodev_pmd.h>
+#include <rte_errno.h>
 
 #include "otx2_cryptodev.h"
 #include "otx2_cryptodev_hw_access.h"
@@ -12,6 +15,233 @@
 
 #include "cpt_hw_types.h"
 #include "cpt_pmd_logs.h"
+#include "cpt_pmd_ops_helper.h"
+
+#define METABUF_POOL_CACHE_SIZE	512
+
+/* Forward declarations */
+
+static int
+otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
+
+static void
+qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
+{
+	snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
+}
+
+static int
+otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
+				struct otx2_cpt_qp *qp, uint8_t qp_id,
+				int nb_elements)
+{
+	char mempool_name[RTE_MEMPOOL_NAMESIZE];
+	int sg_mlen, lb_mlen, max_mlen, ret;
+	struct cpt_qp_meta_info *meta_info;
+	struct rte_mempool *pool;
+
+	/* Get meta len for scatter gather mode */
+	sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
+
+	/* Extra 32B saved for future considerations */
+	sg_mlen += 4 * sizeof(uint64_t);
+
+	/* Get meta len for linear buffer (direct) mode */
+	lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
+
+	/* Extra 32B saved for future considerations */
+	lb_mlen += 4 * sizeof(uint64_t);
+
+	/* Check max requirement for meta buffer */
+	max_mlen = RTE_MAX(lb_mlen, sg_mlen);
+
+	/* Allocate mempool */
+
+	snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
+		 dev->data->dev_id, qp_id);
+
+	pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
+					METABUF_POOL_CACHE_SIZE, 0,
+					rte_socket_id(), 0);
+
+	if (pool == NULL) {
+		CPT_LOG_ERR("Could not create mempool for metabuf");
+		return rte_errno;
+	}
+
+	ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
+					 NULL);
+	if (ret) {
+		CPT_LOG_ERR("Could not set mempool ops");
+		goto mempool_free;
+	}
+
+	ret = rte_mempool_populate_default(pool);
+	if (ret <= 0) {
+		CPT_LOG_ERR("Could not populate metabuf pool");
+		goto mempool_free;
+	}
+
+	meta_info = &qp->meta_info;
+
+	meta_info->pool = pool;
+	meta_info->lb_mlen = lb_mlen;
+	meta_info->sg_mlen = sg_mlen;
+
+	return 0;
+
+mempool_free:
+	rte_mempool_free(pool);
+	return ret;
+}
+
+static void
+otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
+{
+	struct cpt_qp_meta_info *meta_info = &qp->meta_info;
+
+	rte_mempool_free(meta_info->pool);
+
+	meta_info->pool = NULL;
+	meta_info->lb_mlen = 0;
+	meta_info->sg_mlen = 0;
+}
+
+static struct otx2_cpt_qp *
+otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
+		   uint8_t group)
+{
+	struct otx2_cpt_vf *vf = dev->data->dev_private;
+	uint64_t pg_sz = sysconf(_SC_PAGESIZE);
+	const struct rte_memzone *lf_mem;
+	uint32_t len, iq_len, size_div40;
+	char name[RTE_MEMZONE_NAMESIZE];
+	uint64_t used_len, iova;
+	struct otx2_cpt_qp *qp;
+	uint64_t lmtline;
+	uint8_t *va;
+	int ret;
+
+	/* Allocate queue pair */
+	qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
+				OTX2_ALIGN, 0);
+	if (qp == NULL) {
+		CPT_LOG_ERR("Could not allocate queue pair");
+		return NULL;
+	}
+
+	iq_len = OTX2_CPT_IQ_LEN;
+
+	/*
+	 * Queue size must be a multiple of 40 and effective queue size to
+	 * software is (size_div40 - 1) * 40
+	 */
+	size_div40 = (iq_len + 40 - 1) / 40 + 1;
+
+	/* For pending queue */
+	len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+
+	/* Space for instruction group memory */
+	len += size_div40 * 16;
+
+	/* So that instruction queues start as pg size aligned */
+	len = RTE_ALIGN(len, pg_sz);
+
+	/* For instruction queues */
+	len += OTX2_CPT_IQ_LEN * sizeof(union cpt_inst_s);
+
+	/* Wastage after instruction queues */
+	len = RTE_ALIGN(len, pg_sz);
+
+	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
+			    qp_id);
+
+	lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
+			RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
+			RTE_CACHE_LINE_SIZE);
+	if (lf_mem == NULL) {
+		CPT_LOG_ERR("Could not allocate reserved memzone");
+		goto qp_free;
+	}
+
+	va = lf_mem->addr;
+	iova = lf_mem->iova;
+
+	memset(va, 0, len);
+
+	ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
+	if (ret) {
+		CPT_LOG_ERR("Could not create mempool for metabuf");
+		goto lf_mem_free;
+	}
+
+	/* Initialize pending queue */
+	qp->pend_q.rid_queue = (struct rid *)va;
+	qp->pend_q.enq_tail = 0;
+	qp->pend_q.deq_head = 0;
+	qp->pend_q.pending_count = 0;
+
+	used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+	used_len += size_div40 * 16;
+	used_len = RTE_ALIGN(used_len, pg_sz);
+	iova += used_len;
+
+	qp->iq_dma_addr = iova;
+	qp->id = qp_id;
+	qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
+
+	lmtline = vf->otx2_dev.bar2 +
+		  (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
+		  OTX2_LMT_LF_LMTLINE(0);
+
+	qp->lmtline = (void *)lmtline;
+
+	qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
+
+	otx2_cpt_iq_disable(qp);
+
+	ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
+				 size_div40);
+	if (ret) {
+		CPT_LOG_ERR("Could not enable instruction queue");
+		goto mempool_destroy;
+	}
+
+	return qp;
+
+mempool_destroy:
+	otx2_cpt_metabuf_mempool_destroy(qp);
+lf_mem_free:
+	rte_memzone_free(lf_mem);
+qp_free:
+	rte_free(qp);
+	return NULL;
+}
+
+static int
+otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
+{
+	const struct rte_memzone *lf_mem;
+	char name[RTE_MEMZONE_NAMESIZE];
+	int ret;
+
+	otx2_cpt_iq_disable(qp);
+
+	otx2_cpt_metabuf_mempool_destroy(qp);
+
+	qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
+			    qp->id);
+
+	lf_mem = rte_memzone_lookup(name);
+
+	ret = rte_memzone_free(lf_mem);
+	if (ret)
+		return ret;
+
+	rte_free(qp);
+
+	return 0;
+}
 
 /* PMD ops */
 
@@ -91,7 +321,13 @@ otx2_cpt_dev_stop(struct rte_cryptodev *dev)
 static int
 otx2_cpt_dev_close(struct rte_cryptodev *dev)
 {
-	int ret;
+	int i, ret;
+
+	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+		ret = otx2_cpt_queue_pair_release(dev, i);
+		if (ret)
+			return ret;
+	}
 
 	otx2_cpt_err_intr_unregister(dev);
 
@@ -119,6 +355,70 @@ otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
 	}
 }
 
+static int
+otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+			  const struct rte_cryptodev_qp_conf *conf,
+			  int socket_id __rte_unused)
+{
+	uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
+	struct rte_pci_device *pci_dev;
+	struct otx2_cpt_qp *qp;
+
+	CPT_PMD_INIT_FUNC_TRACE();
+
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		otx2_cpt_queue_pair_release(dev, qp_id);
+
+	if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
+		CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
+			    conf->nb_descriptors);
+		return -EINVAL;
+	}
+
+	pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+	if (pci_dev->mem_resource[2].addr == NULL) {
+		CPT_LOG_ERR("Invalid PCI mem address");
+		return -EIO;
+	}
+
+	qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
+	if (qp == NULL) {
+		CPT_LOG_ERR("Could not create queue pair %d", qp_id);
+		return -ENOMEM;
+	}
+
+	qp->sess_mp = conf->mp_session;
+	qp->sess_mp_priv = conf->mp_session_private;
+	dev->data->queue_pairs[qp_id] = qp;
+
+	return 0;
+}
+
+static int
+otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
+	int ret;
+
+	CPT_PMD_INIT_FUNC_TRACE();
+
+	if (qp == NULL)
+		return -EINVAL;
+
+	CPT_LOG_INFO("Releasing queue pair %d", qp_id);
+
+	ret = otx2_cpt_qp_destroy(dev, qp);
+	if (ret) {
+		CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
+		return ret;
+	}
+
+	dev->data->queue_pairs[qp_id] = NULL;
+
+	return 0;
+}
+
 struct rte_cryptodev_ops otx2_cpt_ops = {
 	/* Device control ops */
 	.dev_configure = otx2_cpt_dev_config,
@@ -129,8 +429,8 @@ struct rte_cryptodev_ops otx2_cpt_ops = {
 
 	.stats_get = NULL,
 	.stats_reset = NULL,
-	.queue_pair_setup = NULL,
-	.queue_pair_release = NULL,
+	.queue_pair_setup = otx2_cpt_queue_pair_setup,
+	.queue_pair_release = otx2_cpt_queue_pair_release,
 	.queue_pair_count = NULL,
 
 	/* Symmetric crypto ops */
-- 
2.7.4



More information about the dev mailing list