[dpdk-dev] [PATCH 14/16] crypto/cpt: adds the enqueue and dequeue callbacks

Anoob Joseph anoob.joseph at caviumnetworks.com
Fri Jun 8 18:45:23 CEST 2018


From: Ragothaman Jayaraman <Ragothaman.Jayaraman at cavium.com>

The functions present in cpt_request_mgr.c manages the work of
enqueing/dequeing the request/response to/from cpt hardware unit.

Signed-off-by: Ankur Dwivedi <ankur.dwivedi at cavium.com>
Signed-off-by: Murthy NSSR <Nidadavolu.Murthy at cavium.com>
Signed-off-by: Nithin Dabilpuram <nithin.dabilpuram at cavium.com>
Signed-off-by: Ragothaman Jayaraman <Ragothaman.Jayaraman at cavium.com>
Signed-off-by: Srisivasubramanian Srinivasan <Srisivasubramanian.Srinivasan at cavium.com>
---
 drivers/crypto/cpt/Makefile            |   1 +
 drivers/crypto/cpt/cpt_pmd_cryptodev.c |   4 +-
 drivers/crypto/cpt/cpt_pmd_ops.c       | 859 +++++++++++++++++++++++++++++++++
 drivers/crypto/cpt/cpt_pmd_ops.h       |  10 +
 4 files changed, 872 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/cpt/Makefile b/drivers/crypto/cpt/Makefile
index bf22c2b..63553e0 100644
--- a/drivers/crypto/cpt/Makefile
+++ b/drivers/crypto/cpt/Makefile
@@ -29,6 +29,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CPT) += cpt_pmd_ops.c
 
 # Base code
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CPT) += cpt_device.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CPT) += cpt_request_mgr.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CPT) += cpt_ops.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CPT) += cpt8xxx_device.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_CPT) += cpt_vf_mbox.c
diff --git a/drivers/crypto/cpt/cpt_pmd_cryptodev.c b/drivers/crypto/cpt/cpt_pmd_cryptodev.c
index 939f31b..45e052f 100644
--- a/drivers/crypto/cpt/cpt_pmd_cryptodev.c
+++ b/drivers/crypto/cpt/cpt_pmd_cryptodev.c
@@ -137,8 +137,8 @@ static int init_global_resources(void)
 
 	c_dev->dev_ops = &cptvf_ops;
 
-	c_dev->enqueue_burst = NULL;
-	c_dev->dequeue_burst = NULL;
+	c_dev->enqueue_burst = cpt_pmd_pkt_enqueue;
+	c_dev->dequeue_burst = cpt_pmd_pkt_dequeue;
 
 	c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
diff --git a/drivers/crypto/cpt/cpt_pmd_ops.c b/drivers/crypto/cpt/cpt_pmd_ops.c
index 37808ce..d10caf5 100644
--- a/drivers/crypto/cpt/cpt_pmd_ops.c
+++ b/drivers/crypto/cpt/cpt_pmd_ops.c
@@ -1060,6 +1060,70 @@ int cpt_pmd_session_cfg(struct rte_cryptodev *dev,
 	return -EPERM;
 }
 
+static void *instance_session_cfg(cpt_instance_t *instance,
+			  struct rte_crypto_sym_xform *xform, void *sess)
+{
+	struct rte_crypto_sym_xform *chain;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/*
+	 * Microcode only supports the following combination.
+	 * Encryption followed by authentication
+	 * Authentication followed by decryption
+	 */
+	if (xform->next) {
+		if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+		    (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+		    (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)) {
+			PMD_DRV_LOG(ERR, "Unsupported combination by "
+					 "microcode\n");
+			goto err;
+			/* Unsupported as of now by microcode */
+		}
+		if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+		    (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+		    (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)) {
+			/* For GMAC auth there is no cipher operation */
+			if (xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM ||
+			    xform->next->auth.algo !=
+			    RTE_CRYPTO_AUTH_AES_GMAC) {
+				PMD_DRV_LOG(ERR, "Unsupported combination by "
+					    "microcode\n");
+				goto err;
+				/* Unsupported as of now by microcode */
+			}
+		}
+	}
+
+	chain = xform;
+	while (chain) {
+		if (chain->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+			if (fill_sess_aead(instance, chain, sess))
+				goto err;
+		} else {
+		if (chain->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+			if (fill_sess_cipher(instance, chain, sess))
+				goto err;
+		} else if (chain->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+			if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+				if (fill_sess_gmac(NULL, chain, sess))
+					goto err;
+			} else {
+				if (fill_sess_auth(instance, chain, sess))
+					goto err;
+			}
+		}
+		}
+		chain = chain->next;
+	}
+
+	return sess;
+
+err:
+	return NULL;
+}
+
 void
 cpt_pmd_session_clear(struct rte_cryptodev *dev,
 		  struct rte_cryptodev_sym_session *sess)
@@ -1075,3 +1139,798 @@ int cpt_pmd_session_cfg(struct rte_cryptodev *dev,
 		rte_mempool_put(sess_mp, session_private);
 	}
 }
+
+static inline void *
+alloc_op_meta(struct rte_mbuf *m_src,
+	      buf_ptr_t *buf,
+	      int32_t len)
+{
+	uint8_t *mdata;
+
+#ifndef CPT_ALWAYS_USE_SEPARATE_BUF
+	if (likely(m_src && (m_src->nb_segs == 1))) {
+		int32_t tailroom;
+		phys_addr_t mphys;
+
+		/* Check if tailroom is sufficient to hold meta data */
+		tailroom = rte_pktmbuf_tailroom(m_src);
+		if (likely(tailroom > len + 8)) {
+			mdata = (uint8_t *)m_src->buf_addr + m_src->buf_len;
+			mphys = m_src->buf_physaddr + m_src->buf_len;
+			mdata -= len;
+			mphys -= len;
+			buf->vaddr = mdata;
+			buf->dma_addr = mphys;
+			buf->size = len;
+			/* Indicate that this is a mbuf allocated mdata */
+			mdata = (uint8_t *)((uint64_t)mdata | 1ull);
+			return mdata;
+		}
+	}
+#else
+	(void) m_src;
+#endif
+
+	if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
+		return NULL;
+
+	buf->vaddr = mdata;
+	buf->dma_addr = rte_mempool_virt2iova(mdata);
+	buf->size = len;
+
+	return mdata;
+}
+
+/**
+ * cpt_free_metabuf - free metabuf to mempool.
+ * @param instance: pointer to instance.
+ * @param objp: pointer to the metabuf.
+ */
+static inline void free_op_meta(void *mdata)
+{
+	bool nofree = ((uint64_t)mdata & 1ull);
+
+	if (likely(nofree))
+		return;
+	rte_mempool_put(cpt_meta_pool, mdata);
+}
+
+static inline uint32_t
+prepare_iov_from_pkt(struct rte_mbuf *pkt,
+		     iov_ptr_t *iovec, uint32_t start_offset)
+{
+	uint16_t index = 0;
+	void *seg_data = NULL;
+	phys_addr_t seg_phys;
+	int32_t seg_size = 0;
+
+	if (!pkt) {
+		iovec->buf_cnt = 0;
+		return 0;
+	}
+
+	if (!start_offset) {
+		seg_data = rte_pktmbuf_mtod(pkt, void *);
+		seg_phys = rte_pktmbuf_mtophys(pkt);
+		seg_size = pkt->data_len;
+	} else {
+		while (start_offset >= pkt->data_len) {
+			start_offset -= pkt->data_len;
+			pkt = pkt->next;
+		}
+
+		seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
+		seg_phys = rte_pktmbuf_mtophys_offset(pkt, start_offset);
+		seg_size = pkt->data_len - start_offset;
+		if (!seg_size)
+			return 1;
+	}
+
+	/* first seg */
+	iovec->bufs[index].vaddr = seg_data;
+	iovec->bufs[index].dma_addr = seg_phys;
+	iovec->bufs[index].size = seg_size;
+	index++;
+	pkt = pkt->next;
+
+	while (unlikely(pkt != NULL)) {
+		seg_data = rte_pktmbuf_mtod(pkt, void *);
+		seg_phys = rte_pktmbuf_mtophys(pkt);
+		seg_size = pkt->data_len;
+		if (!seg_size)
+			break;
+
+		iovec->bufs[index].vaddr = seg_data;
+		iovec->bufs[index].dma_addr = seg_phys;
+		iovec->bufs[index].size = seg_size;
+
+		index++;
+
+		/* FIXME: Not depending on wqe.w0.s.bufs to break */
+		pkt = pkt->next;
+	}
+
+	iovec->buf_cnt = index;
+	return 0;
+}
+
+static inline uint32_t
+prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
+			     fc_params_t *param,
+			     uint32_t *flags)
+{
+	uint16_t index = 0;
+	void *seg_data = NULL;
+	phys_addr_t seg_phys;
+	uint32_t seg_size = 0;
+	iov_ptr_t *iovec;
+
+	seg_data = rte_pktmbuf_mtod(pkt, void *);
+	seg_phys = rte_pktmbuf_mtophys(pkt);
+	seg_size = pkt->data_len;
+
+	/* first seg */
+	if (likely(!pkt->next)) {
+		uint32_t headroom, tailroom;
+
+		*flags |= SINGLE_BUF_INPLACE;
+		headroom = rte_pktmbuf_headroom(pkt);
+		tailroom = rte_pktmbuf_tailroom(pkt);
+		if (likely((headroom >= 24) &&
+		    (tailroom >= 8))) {
+			/* In 83XX this is prerequivisit for Direct mode */
+			*flags |= SINGLE_BUF_HEADTAILROOM;
+		}
+		param->bufs[0].vaddr = seg_data;
+		param->bufs[0].dma_addr = seg_phys;
+		param->bufs[0].size = seg_size;
+		return 0;
+	}
+	iovec = param->src_iov;
+	iovec->bufs[index].vaddr = seg_data;
+	iovec->bufs[index].dma_addr = seg_phys;
+	iovec->bufs[index].size = seg_size;
+	index++;
+	pkt = pkt->next;
+
+	while (unlikely(pkt != NULL)) {
+		seg_data = rte_pktmbuf_mtod(pkt, void *);
+		seg_phys = rte_pktmbuf_mtophys(pkt);
+		seg_size = pkt->data_len;
+
+		if (!seg_size)
+			break;
+
+		iovec->bufs[index].vaddr = seg_data;
+		iovec->bufs[index].dma_addr = seg_phys;
+		iovec->bufs[index].size = seg_size;
+
+		index++;
+
+		pkt = pkt->next;
+	}
+
+	iovec->buf_cnt = index;
+	return 0;
+}
+
+static void
+find_kasumif9_direction_and_length(uint8_t *src,
+				   uint32_t counter_num_bytes,
+				   uint32_t *addr_length_in_bits,
+				   uint8_t *addr_direction)
+{
+	uint8_t found = 0;
+	while (!found && counter_num_bytes > 0) {
+		counter_num_bytes--;
+		if (src[counter_num_bytes] == 0x00)
+			continue;
+		if (src[counter_num_bytes] == 0x80) {
+			*addr_direction  =  src[counter_num_bytes - 1] & 0x1;
+			*addr_length_in_bits = counter_num_bytes * 8  - 1;
+			found = 1;
+		} else {
+			int i = 0;
+			uint8_t last_byte = src[counter_num_bytes];
+			for (i = 0; i < 8 && found == 0; i++) {
+				if (last_byte & (1 << i)) {
+					*addr_direction = (last_byte >> (i+1))
+							  & 0x1;
+					if (i != 6)
+						*addr_length_in_bits =
+							counter_num_bytes * 8
+							+ (8 - (i + 2));
+					else
+						*addr_length_in_bits =
+							counter_num_bytes * 8;
+
+					found = 1;
+					}
+				}
+			}
+	}
+}
+/*
+ * This handles all auth only except AES_GMAC
+ */
+static void *
+fill_digest_params(struct rte_crypto_op *cop,
+		   struct cpt_sess_misc *sess,
+		   void **mdata_ptr,
+		   int *op_ret)
+{
+	uint32_t space = 0;
+	struct rte_crypto_sym_op *sym_op = cop->sym;
+	void *mdata;
+	phys_addr_t mphys;
+	uint64_t *op;
+	uint32_t auth_range_off;
+	uint32_t flags = 0;
+	uint64_t d_offs = 0, d_lens;
+	void *prep_req = NULL;
+	struct rte_mbuf *m_src, *m_dst;
+	uint16_t auth_op = sess->cpt_op & CSP_OP_AUTH_MASK;
+	uint8_t zsk_flag = sess->zsk_flag;
+	uint16_t mac_len = sess->mac_len;
+	fc_params_t params;
+	char src[SRC_IOV_SIZE];
+	uint8_t iv_buf[16];
+
+	m_src = sym_op->m_src;
+
+	/* For just digest lets force mempool alloc */
+	mdata = alloc_op_meta(NULL, &params.meta_buf, cpt_op_mlen);
+	if (mdata == NULL) {
+		PMD_DRV_LOG(ERR, "Error allocating meta buffer for request\n");
+		*op_ret = -ENOMEM;
+		return NULL;
+	}
+
+	mphys = params.meta_buf.dma_addr;
+
+	op = mdata;
+	op[0] = (uint64_t)mdata;
+	op[1] = (uint64_t)cop;
+	op[2] = op[3] = 0; /* Used to indicate auth verify */
+	space += 4 * sizeof(uint64_t);
+
+	auth_range_off = sym_op->auth.data.offset;
+
+	flags = VALID_MAC_BUF;
+	params.src_iov = (void *)src;
+	if (unlikely(zsk_flag)) {
+		/*
+		 * Since for Zuc, Kasumi, Snow3g offsets are in bits
+		 * we will send pass through even for auth only case,
+		 * let MC handle it
+		 */
+		d_offs = auth_range_off;
+		auth_range_off = 0;
+		params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
+					uint8_t *, sess->auth_iv_offset);
+		if (zsk_flag == K_F9) {
+			uint32_t length_in_bits, num_bytes;
+			uint8_t *src, direction = 0;
+			uint32_t counter_num_bytes;
+
+			memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
+							uint8_t *), 8);
+			/*
+			 * This is kasumi f9, take direction from
+			 * source buffer
+			 */
+			length_in_bits = cop->sym->auth.data.length;
+			num_bytes = (length_in_bits >> 3);
+			counter_num_bytes = num_bytes;
+			src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
+			find_kasumif9_direction_and_length(src,
+						counter_num_bytes,
+						&length_in_bits,
+						&direction);
+			length_in_bits -= 64;
+			cop->sym->auth.data.offset += 64;
+			d_offs = cop->sym->auth.data.offset;
+			auth_range_off = d_offs / 8;
+			cop->sym->auth.data.length = length_in_bits;
+
+			/* Store it at end of auth iv */
+			iv_buf[8] = direction;
+			params.auth_iv_buf = iv_buf;
+		}
+	}
+
+	d_lens = sym_op->auth.data.length;
+
+	params.ctx_buf.vaddr = SESS_PRIV(sess);
+	params.ctx_buf.dma_addr = sess->ctx_dma_addr;
+
+	if (auth_op == CSP_OP_AUTH_GENERATE) {
+		if (sym_op->auth.digest.data) {
+			/*
+			 * Digest to be generated
+			 * in separate buffer
+			 */
+			params.mac_buf.size =
+				sess->mac_len;
+			params.mac_buf.vaddr =
+				sym_op->auth.digest.data;
+			params.mac_buf.dma_addr =
+				sym_op->auth.digest.phys_addr;
+		} else {
+			uint32_t off = sym_op->auth.data.offset +
+				sym_op->auth.data.length;
+			int32_t dlen, space;
+
+			m_dst = sym_op->m_dst ?
+				sym_op->m_dst : sym_op->m_src;
+			dlen = rte_pktmbuf_pkt_len(m_dst);
+
+			space = off + mac_len - dlen;
+			if (space > 0)
+				if (!rte_pktmbuf_append(m_dst, space)) {
+					PMD_DRV_LOG(ERR, "Failed to extend "
+					 "mbuf by %uB\n", space);
+					goto err;
+				}
+
+			params.mac_buf.vaddr =
+				rte_pktmbuf_mtod_offset(m_dst,
+							void *, off);
+			params.mac_buf.dma_addr =
+				rte_pktmbuf_mtophys_offset(m_dst, off);
+			params.mac_buf.size = mac_len;
+		}
+	} else {
+		/* Need space for storing generated mac */
+		params.mac_buf.vaddr =
+			(uint8_t *)mdata + space;
+		params.mac_buf.dma_addr = mphys + space;
+		params.mac_buf.size = mac_len;
+		space += RTE_ALIGN_CEIL(mac_len, 8);
+		op[2] = (uint64_t)params.mac_buf.vaddr;
+		op[3] = mac_len;
+
+	}
+
+	params.meta_buf.vaddr = (uint8_t *)mdata + space;
+	params.meta_buf.dma_addr = mphys + space;
+	params.meta_buf.size -= space;
+
+	/* Out of place processing */
+	params.src_iov = (void *)src;
+
+	/*Store SG I/O in the api for reuse */
+	if (prepare_iov_from_pkt(m_src, params.src_iov,
+				 auth_range_off)) {
+		PMD_DRV_LOG(ERR, "Prepare src iov failed\n");
+		*op_ret = -1;
+		goto err;
+	}
+
+	prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
+					&params, op, op_ret);
+	*mdata_ptr = mdata;
+	return prep_req;
+err:
+	if (unlikely(!prep_req))
+		free_op_meta(mdata);
+	return NULL;
+}
+
+static inline void *
+fill_fc_params(struct rte_crypto_op *cop,
+	       struct cpt_sess_misc *sess_misc,
+	       void **mdata_ptr,
+	       int *op_ret)
+{
+	uint32_t space = 0;
+	struct rte_crypto_sym_op *sym_op = cop->sym;
+	void *mdata;
+	uint64_t *op;
+	uint32_t mc_hash_off;
+	uint32_t flags = 0;
+	uint64_t d_offs, d_lens;
+	void *prep_req;
+	struct rte_mbuf *m_src, *m_dst;
+	uint8_t cpt_op = sess_misc->cpt_op;
+	uint8_t zsk_flag = sess_misc->zsk_flag;
+	uint8_t aes_gcm = sess_misc->aes_gcm;
+	uint16_t mac_len = sess_misc->mac_len;
+#ifdef CPT_ALWAYS_USE_SG_MODE
+	uint8_t inplace = 0;
+#else
+	uint8_t inplace = 1;
+#endif
+	fc_params_t fc_params;
+	char src[SRC_IOV_SIZE];
+	char dst[SRC_IOV_SIZE];
+	uint32_t iv_buf[4];
+
+	if (likely(sess_misc->iv_length)) {
+		flags |= VALID_IV_BUF;
+		fc_params.iv_buf = rte_crypto_op_ctod_offset(cop,
+				   uint8_t *, sess_misc->iv_offset);
+		if (sess_misc->aes_ctr &&
+		    unlikely(sess_misc->iv_length != 16)) {
+			memcpy((uint8_t *)iv_buf,
+				rte_crypto_op_ctod_offset(cop,
+				uint8_t *, sess_misc->iv_offset), 12);
+			iv_buf[3] = htobe32(0x1);
+			fc_params.iv_buf = iv_buf;
+		}
+	}
+
+	if (zsk_flag) {
+		fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
+					uint8_t *,
+					sess_misc->auth_iv_offset);
+		if (zsk_flag == K_F9) {
+			PMD_DRV_LOG(ERR, "Should not reach here for "
+			"kasumi F9\n");
+		}
+		if (zsk_flag != ZS_EA)
+			inplace = 0;
+	}
+	m_src = sym_op->m_src;
+	m_dst = sym_op->m_dst;
+
+	if (aes_gcm) {
+		uint8_t *salt;
+		uint8_t *aad_data;
+		uint16_t aad_len;
+
+		d_offs = sym_op->aead.data.offset;
+		d_lens = sym_op->aead.data.length;
+		mc_hash_off = sym_op->aead.data.offset +
+			      sym_op->aead.data.length;
+
+		aad_data = sym_op->aead.aad.data;
+		aad_len = sess_misc->aad_length;
+		if (likely((aad_data + aad_len) ==
+			   rte_pktmbuf_mtod_offset(m_src,
+				uint8_t *,
+				sym_op->aead.data.offset))) {
+			d_offs = (d_offs - aad_len) | (d_offs << 16);
+			d_lens = (d_lens + aad_len) | (d_lens << 32);
+		} else {
+			fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
+			fc_params.aad_buf.dma_addr = sym_op->aead.aad.phys_addr;
+			fc_params.aad_buf.size = aad_len;
+			flags |= VALID_AAD_BUF;
+			inplace = 0;
+			d_offs = d_offs << 16;
+			d_lens = d_lens << 32;
+		}
+
+		salt = fc_params.iv_buf;
+		if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
+			cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
+			sess_misc->salt = *(uint32_t *)salt;
+		}
+		fc_params.iv_buf = salt + 4;
+		if (likely(mac_len)) {
+			struct rte_mbuf *m = (cpt_op & CSP_OP_ENCODE) ? m_dst :
+					     m_src;
+
+			if (!m)
+				m = m_src;
+
+			/* hmac immediately following data is best case */
+			if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
+			    mc_hash_off !=
+			    (uint8_t *)sym_op->aead.digest.data)) {
+				flags |= VALID_MAC_BUF;
+				fc_params.mac_buf.size = sess_misc->mac_len;
+				fc_params.mac_buf.vaddr =
+				  sym_op->aead.digest.data;
+				fc_params.mac_buf.dma_addr =
+				 sym_op->aead.digest.phys_addr;
+				inplace = 0;
+			}
+		}
+	} else {
+		d_offs = sym_op->cipher.data.offset;
+		d_lens = sym_op->cipher.data.length;
+		mc_hash_off = sym_op->cipher.data.offset +
+			      sym_op->cipher.data.length;
+		d_offs = (d_offs << 16) | sym_op->auth.data.offset;
+		d_lens = (d_lens << 32) | sym_op->auth.data.length;
+
+		if (mc_hash_off < (sym_op->auth.data.offset +
+				   sym_op->auth.data.length)){
+			mc_hash_off = (sym_op->auth.data.offset +
+				       sym_op->auth.data.length);
+		}
+		/* for gmac, salt should be updated like in gcm */
+		if (unlikely(sess_misc->is_gmac)) {
+			uint8_t *salt;
+			salt = fc_params.iv_buf;
+			if (unlikely(*(uint32_t *)salt != sess_misc->salt)) {
+				cpt_fc_salt_update(SESS_PRIV(sess_misc), salt);
+				sess_misc->salt = *(uint32_t *)salt;
+			}
+			fc_params.iv_buf = salt + 4;
+		}
+			/* */
+		if (likely(mac_len)) {
+			struct rte_mbuf *m =
+			(cpt_op & CSP_OP_ENCODE) ? m_dst : m_src;
+
+			if (!m)
+				m = m_src;
+
+		/* hmac immediately following data is best case */
+			if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
+			    mc_hash_off !=
+			     (uint8_t *)sym_op->auth.digest.data)) {
+				flags |= VALID_MAC_BUF;
+				fc_params.mac_buf.size =
+					sess_misc->mac_len;
+				fc_params.mac_buf.vaddr =
+					sym_op->auth.digest.data;
+				fc_params.mac_buf.dma_addr =
+				sym_op->auth.digest.phys_addr;
+				inplace = 0;
+			}
+		}
+	}
+	fc_params.ctx_buf.vaddr = SESS_PRIV(sess_misc);
+	fc_params.ctx_buf.dma_addr = sess_misc->ctx_dma_addr;
+
+	if (likely(!m_dst && inplace)) {
+		/* Case of single buffer without AAD buf or
+		 * separate mac buf in place and
+		 * not air crypto
+		 */
+		fc_params.dst_iov = fc_params.src_iov = (void *)src;
+
+		if (unlikely(prepare_iov_from_pkt_inplace(m_src,
+							  &fc_params,
+							  &flags))) {
+			PMD_DRV_LOG(ERR, "Prepare inplace src iov failed\n");
+			*op_ret = -1;
+			return NULL;
+		}
+
+	} else {
+		/* Out of place processing */
+		fc_params.src_iov = (void *)src;
+		fc_params.dst_iov = (void *)dst;
+
+		/*Store SG I/O in the api for reuse */
+		if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
+			PMD_DRV_LOG(ERR, "Prepare src iov failed\n");
+			*op_ret = -1;
+			return NULL;
+		}
+
+		if (unlikely(m_dst != NULL)) {
+			uint32_t pkt_len;
+
+			/* Try to make room as much as src has */
+			m_dst = sym_op->m_dst;
+			pkt_len = rte_pktmbuf_pkt_len(m_dst);
+
+			if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
+				pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
+				if (!rte_pktmbuf_append(m_dst, pkt_len)) {
+					PMD_DRV_LOG(ERR, "Not enough space in "
+					 "m_dst %p, need %u more\n",
+					 m_dst, pkt_len);
+					return NULL;
+				}
+			}
+
+			if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
+				PMD_DRV_LOG(ERR, "Prepare dst iov failed for "
+				 "m_dst %p\n", m_dst);
+				return NULL;
+			}
+		} else {
+			fc_params.dst_iov = (void *)src;
+		}
+
+	}
+
+	if (likely(flags & SINGLE_BUF_HEADTAILROOM))
+		mdata = alloc_op_meta(m_src,
+				      &fc_params.meta_buf,
+				      cpt_op_sb_mlen);
+	else
+		mdata = alloc_op_meta(NULL,
+				      &fc_params.meta_buf,
+				      cpt_op_mlen);
+
+	if (unlikely(mdata == NULL)) {
+		PMD_DRV_LOG(ERR, "Error allocating meta buffer for request\n");
+		return NULL;
+	}
+
+	op = (uint64_t *)((uint64_t)mdata & ~1ull);
+	op[0] = (uint64_t)mdata;
+	op[1] = (uint64_t)cop;
+	op[2] = op[3] = 0; /* Used to indicate auth verify */
+	space += 4 * sizeof(uint64_t);
+
+	fc_params.meta_buf.vaddr = (uint8_t *)op + space;
+	fc_params.meta_buf.dma_addr += space;
+	fc_params.meta_buf.size -= space;
+
+	/* Finally prepare the instruction */
+	if (cpt_op & CSP_OP_ENCODE)
+		prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
+						&fc_params, op, op_ret);
+	else
+		prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
+						&fc_params, op, op_ret);
+
+	if (unlikely(!prep_req))
+		free_op_meta(mdata);
+	*mdata_ptr = mdata;
+	return prep_req;
+}
+
+static inline void
+compl_auth_verify(struct rte_crypto_op *op,
+		      uint8_t *gen_mac,
+		      uint64_t mac_len)
+{
+	uint8_t *mac;
+	struct rte_crypto_sym_op *sym_op = op->sym;
+
+	if (sym_op->auth.digest.data)
+		mac = sym_op->auth.digest.data;
+	else
+		mac = rte_pktmbuf_mtod_offset(sym_op->m_src,
+					      uint8_t *,
+					      sym_op->auth.data.length +
+					      sym_op->auth.data.offset);
+	if (!mac) {
+		op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		return;
+	}
+
+	if (memcmp(mac, gen_mac, mac_len))
+		op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+	else
+		op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
+
+static inline int __hot
+cpt_pmd_crypto_operation(cpt_instance_t *instance,
+		     struct rte_crypto_op *op,
+		     bool last_op)
+{
+	struct cpt_sess_misc *sess = NULL;
+	struct rte_crypto_sym_op *sym_op = op->sym;
+	void *prep_req, *mdata = NULL;
+	int ret = 0;
+	uint64_t cpt_op;
+	uint8_t flags = last_op ? 0 : ENQ_FLAG_NODOORBELL;
+
+
+	if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+		void *ptr = NULL;
+		int sess_len;
+
+		sess_len = cpt_pmd_get_session_size(NULL);
+
+		sess = rte_calloc(__func__, 1, sess_len, 8);
+		if (!sess)
+			return -ENOMEM;
+
+		sess->ctx_dma_addr =  rte_malloc_virt2iova(sess) +
+			sizeof(struct cpt_sess_misc);
+
+		ptr = instance_session_cfg(instance,
+					   sym_op->xform, (void *)sess);
+		if (ptr == NULL)
+			return -EINVAL;
+	} else {
+		sess = (struct cpt_sess_misc *)
+		get_session_private_data(sym_op->session,
+		cryptodev_cpt_driver_id);
+	}
+
+	cpt_op = sess->cpt_op;
+
+	if (likely(cpt_op & CSP_OP_CIPHER_MASK))
+		prep_req = fill_fc_params(op, sess, &mdata, &ret);
+	else
+		prep_req = fill_digest_params(op, sess, &mdata, &ret);
+
+	if (unlikely(!prep_req)) {
+		PMD_DRV_LOG_RAW(ERR, "prep cryto req : op %p, cpt_op 0x%x ret "
+		 "0x%x\n", op, (unsigned int)cpt_op, ret);
+		goto req_fail;
+	}
+
+	/* Enqueue prepared instruction to HW */
+	ret = cpt_enqueue_req(instance, prep_req,
+			      flags, NULL, 0);
+
+	if (unlikely(ret)) {
+		if (unlikely(ret == -EAGAIN))
+			goto req_fail;
+		PMD_DRV_LOG(ERR, "Error enqueing crypto request : error code "
+		 "%d\n", ret);
+		goto req_fail;
+	}
+
+	/* TODO: Stats here */
+
+	return 0;
+
+req_fail:
+	if (mdata)
+		free_op_meta(mdata);
+	return ret;
+}
+
+
+
+uint16_t
+cpt_pmd_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	cpt_instance_t *instance = (cpt_instance_t *)qptr;
+	uint16_t count = 0;
+	int ret;
+
+	count = cpt_queue_full(instance);
+	if (nb_ops > count)
+		nb_ops = count;
+
+	count = 0;
+	while (likely(count < nb_ops)) {
+		bool last_op = (count + 1 == nb_ops);
+		ret = cpt_pmd_crypto_operation(instance, ops[count], last_op);
+		if (unlikely(ret))
+			break;
+		count++;
+	}
+	return count;
+}
+
+uint16_t
+cpt_pmd_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	cpt_instance_t *instance = (cpt_instance_t *)qptr;
+	uint16_t nb_completed, i = 0;
+	uint8_t compcode[nb_ops];
+
+	nb_completed = cpt_dequeue_burst(instance, nb_ops,
+					 (void **)ops, compcode);
+	while (likely(i < nb_completed)) {
+		struct rte_crypto_op *cop;
+		void *metabuf;
+		uint64_t *rsp;
+		uint8_t status;
+
+		rsp = (void *)ops[i];
+		status = compcode[i];
+		if (likely((i + 1) < nb_completed))
+			rte_prefetch0(ops[i+1]);
+		metabuf = (void *)rsp[0];
+		cop = (void *)rsp[1];
+
+		ops[i] = cop;
+
+		if (likely(status == 0)) {
+			if (likely(!rsp[2]))
+				cop->status =
+					RTE_CRYPTO_OP_STATUS_SUCCESS;
+			else
+				compl_auth_verify(cop, (uint8_t *)rsp[2],
+						  rsp[3]);
+		} else if (status == ERR_GC_ICV_MISCOMPARE) {
+			/*auth data mismatch */
+			cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+		} else {
+			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		}
+		free_op_meta(metabuf);
+		i++;
+	}
+	return nb_completed;
+}
diff --git a/drivers/crypto/cpt/cpt_pmd_ops.h b/drivers/crypto/cpt/cpt_pmd_ops.h
index 314b2b1..17b3a09 100644
--- a/drivers/crypto/cpt/cpt_pmd_ops.h
+++ b/drivers/crypto/cpt/cpt_pmd_ops.h
@@ -83,4 +83,14 @@ int cpt_pmd_session_cfg(struct rte_cryptodev *dev,
 void
 cpt_pmd_session_clear(struct rte_cryptodev *dev,
 		  struct rte_cryptodev_sym_session *sess);
+
+uint16_t
+cpt_pmd_pkt_enqueue(void *qptr,
+		struct rte_crypto_op **ops,
+		uint16_t nb_ops);
+
+uint16_t
+cpt_pmd_pkt_dequeue(void *qptr,
+		struct rte_crypto_op **ops,
+		uint16_t nb_ops);
 #endif
-- 
1.9.3



More information about the dev mailing list