[dpdk-dev] [PATCH v1 5/6] crypto/ccp: enable IOMMU for CCP CCP use vdev framework, and vdev framework don’t support IOMMU. Adding custom IOMMU support for AMD CCP drives.

asomalap at amd.com asomalap at amd.com
Mon Oct 14 07:21:53 CEST 2019


From: Amaranath Somalapuram <asomalap at amd.com>

Signed-off-by: Amaranath Somalapuram <asomalap at amd.com>
---
 drivers/crypto/ccp/ccp_crypto.c  | 244 ++++++++++++++++++++++++-------
 drivers/crypto/ccp/ccp_dev.c     |  56 ++-----
 drivers/crypto/ccp/ccp_dev.h     |   2 +-
 drivers/crypto/ccp/ccp_pci.c     |   1 +
 drivers/crypto/ccp/ccp_pci.h     |   1 +
 drivers/crypto/ccp/rte_ccp_pmd.c |   5 +-
 6 files changed, 211 insertions(+), 98 deletions(-)

diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index 1837c8543..c0ece4e37 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -31,8 +31,11 @@
 #include <openssl/err.h>
 #include <openssl/hmac.h>
 
+extern int iommu_mode;
+
 /* SHA initial context values */
-static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+void *sha_ctx;
+uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
 	SHA1_H4, SHA1_H3,
 	SHA1_H2, SHA1_H1,
 	SHA1_H0, 0x0U,
@@ -744,8 +747,16 @@ ccp_configure_session_cipher(struct ccp_session *sess,
 		CCP_LOG_ERR("Invalid CCP Engine");
 		return -ENOTSUP;
 	}
-	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	if(iommu_mode == 2)
+	{
+		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+	}
+	else
+	{
+		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	}
 	return 0;
 }
 
@@ -784,6 +795,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha1_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+		rte_memcpy(sha_ctx,sess->auth.ctx,SHA_COMMON_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
 		if (sess->auth_opt) {
@@ -822,6 +834,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha224_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+		rte_memcpy(sha_ctx,sess->auth.ctx,SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 		if (sess->auth_opt) {
@@ -884,6 +897,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha256_init;
 		sess->auth.ctx_len = CCP_SB_BYTES;
 		sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+		rte_memcpy(sha_ctx,sess->auth.ctx,SHA256_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
 		if (sess->auth_opt) {
@@ -946,6 +960,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha384_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+		rte_memcpy(sha_ctx,sess->auth.ctx,SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
 		if (sess->auth_opt) {
@@ -1010,6 +1025,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
 		sess->auth.ctx = (void *)ccp_sha512_init;
 		sess->auth.ctx_len = CCP_SB_BYTES << 1;
 		sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+		rte_memcpy(sha_ctx,sess->auth.ctx,SHA512_DIGEST_SIZE);
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
 		if (sess->auth_opt) {
@@ -1159,8 +1175,16 @@ ccp_configure_session_aead(struct ccp_session *sess,
 		CCP_LOG_ERR("Unsupported aead algo");
 		return -ENOTSUP;
 	}
-	sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
-	sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	if(iommu_mode == 2)
+	{
+		sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
+	}
+	else
+	{
+		sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+		sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+	}
 	return 0;
 }
 
@@ -1571,15 +1595,27 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 					 ccp_cryptodev_driver_id);
 	addr = session->auth.pre_compute;
 
-	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
-					      op->sym->auth.data.offset);
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+	if(iommu_mode == 2)
+	{
+		src_addr = (phys_addr_t)rte_mem_virt2iova(
+				rte_pktmbuf_mtod_offset(op->sym->m_src, phys_addr_t*,
+				op->sym->auth.data.offset));
+		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+	}
+	else
+	{
+		src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					op->sym->auth.data.offset);
+		dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	}
 	dest_addr_t = dest_addr;
 
 	/** Load PHash1 to LSB*/
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1659,7 +1695,14 @@ ccp_perform_hmac(struct rte_crypto_op *op,
 
 	/** Load PHash2 to LSB*/
 	addr += session->auth.ctx_len;
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	if(iommu_mode == 2)
+	{
+		pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
+	}
+	else
+	{
+		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+	}
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1743,17 +1786,26 @@ ccp_perform_sha(struct rte_crypto_op *op,
 					 op->sym->session,
 					ccp_cryptodev_driver_id);
 
-	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
-					      op->sym->auth.data.offset);
-
 	append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
-
 	/** Passthru sha context*/
+	if(iommu_mode == 2)
+	{
+		src_addr = (phys_addr_t)rte_mem_virt2iova(
+				rte_pktmbuf_mtod_offset(op->sym->m_src, void*,
+				op->sym->auth.data.offset));
+		dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+		pst.src_addr = (phys_addr_t)sha_ctx;
+	}
+	else
+	{
+		src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					op->sym->auth.data.offset);
+                dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+                pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+						session->auth.ctx);
+        }
 
-	pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
-						     session->auth.ctx);
 	pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
 	pst.len = session->auth.ctx_len;
 	pst.dir = 1;
@@ -1832,18 +1884,32 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
 					 op->sym->session,
 					ccp_cryptodev_driver_id);
 
-	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
-					      op->sym->auth.data.offset);
 	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
 	if (!append_ptr) {
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	if(iommu_mode == 2)
+	{
+		src_addr = (phys_addr_t)rte_mem_virt2iova(
+				rte_pktmbuf_mtod_offset(op->sym->m_src, phys_addr_t*,
+				op->sym->auth.data.offset));
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+                ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void
+						*)session->auth.pre_compute);
+	}
+	else
+	{
+		src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+						op->sym->auth.data.offset);
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+						*)session->auth.pre_compute);
+	}
+
 	dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
-	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
-						   *)session->auth.pre_compute);
+
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
 
@@ -1964,7 +2030,7 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 	struct ccp_session *session;
 	union ccp_function function;
 	struct ccp_desc *desc;
-	uint8_t *ctx_addr, *append_ptr;
+	uint8_t *ctx_addr = NULL, *append_ptr = NULL;
 	uint32_t tail;
 	phys_addr_t src_addr, dest_addr, ctx_paddr;
 
@@ -1972,18 +2038,29 @@ ccp_perform_sha3(struct rte_crypto_op *op,
 					 op->sym->session,
 					ccp_cryptodev_driver_id);
 
-	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
-					      op->sym->auth.data.offset);
 	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
 	if (!append_ptr) {
 		CCP_LOG_ERR("CCP MBUF append failed\n");
 		return -1;
 	}
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
-	ctx_addr = session->auth.sha3_ctx;
-	ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+	if(iommu_mode == 2)
+	{
+		src_addr = (phys_addr_t)rte_mem_virt2iova(
+				rte_pktmbuf_mtod_offset(op->sym->m_src, phys_addr_t*,
+				op->sym->auth.data.offset));
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+                ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
+	}
+	else
+	{
+		src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					op->sym->auth.data.offset);
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+		ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+	}
 
+	ctx_addr = session->auth.sha3_ctx;
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 	memset(desc, 0, Q_DESC_SIZE);
 
@@ -2032,20 +2109,31 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 	struct ccp_passthru pst;
 	struct ccp_desc *desc;
 	uint32_t tail;
-	uint8_t *src_tb, *append_ptr, *ctx_addr;
+	uint8_t *src_tb, *append_ptr = NULL, *ctx_addr;
 	phys_addr_t src_addr, dest_addr, key_addr;
 	int length, non_align_len;
 
 	session = (struct ccp_session *)get_sym_session_private_data(
 					 op->sym->session,
 					ccp_cryptodev_driver_id);
-	key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+	if(iommu_mode == 2)
+	{
+		key_addr = rte_mem_virt2iova(session->auth.key_ccp);
+		src_addr = (phys_addr_t)rte_mem_virt2iova(
+				rte_pktmbuf_mtod_offset(op->sym->m_src, phys_addr_t*,
+				op->sym->auth.data.offset));
+		dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+	}
+	else
+	{
+		key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+		src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+					op->sym->auth.data.offset);
+		dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+	}
 
-	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
-					      op->sym->auth.data.offset);
 	append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
 						session->auth.ctx_len);
-	dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
 
 	function.raw = 0;
 	CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
@@ -2056,7 +2144,10 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 
 		ctx_addr = session->auth.pre_compute;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		if(iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2094,7 +2185,10 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
 	} else {
 		ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
 		memset(ctx_addr, 0, AES_BLOCK_SIZE);
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+		if(iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2221,11 +2315,23 @@ ccp_perform_aes(struct rte_crypto_op *op,
 
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 
-	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+	if(iommu_mode == 2)
+		src_addr = (phys_addr_t)rte_mem_virt2iova(
+				rte_pktmbuf_mtod_offset(op->sym->m_src, void *,
+				op->sym->cipher.data.offset));
+	else
+		src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
 					      op->sym->cipher.data.offset);
 	if (likely(op->sym->m_dst != NULL))
-		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
-						op->sym->cipher.data.offset);
+	{
+		if(iommu_mode == 2)
+			dest_addr = (phys_addr_t)rte_mem_virt2iova(
+					rte_pktmbuf_mtod_offset(op->sym->m_dst, void *,
+					op->sym->cipher.data.offset));
+		else
+			dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+                                                        op->sym->cipher.data.offset);
+	}
 	else
 		dest_addr = src_addr;
 	key_addr = session->cipher.key_phys;
@@ -2289,7 +2395,11 @@ ccp_perform_3des(struct rte_crypto_op *op,
 		rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
 			   iv, session->iv.length);
 
-		pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+		if(iommu_mode == 2)
+			pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *) lsb_buf);
+		else
+			pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+
 		pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
 		pst.len = CCP_SB_BYTES;
 		pst.dir = 1;
@@ -2303,16 +2413,30 @@ ccp_perform_3des(struct rte_crypto_op *op,
 		return -ENOTSUP;
 	}
 
-	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
-					      op->sym->cipher.data.offset);
+	if(iommu_mode == 2)
+		src_addr = (phys_addr_t)rte_mem_virt2iova(
+				rte_pktmbuf_mtod_offset(op->sym->m_src, void *,
+				op->sym->cipher.data.offset));
+	else
+		src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+                                              op->sym->cipher.data.offset);
 	if (unlikely(op->sym->m_dst != NULL))
-		dest_addr =
-			rte_pktmbuf_mtophys_offset(op->sym->m_dst,
-						   op->sym->cipher.data.offset);
+	{
+		if(iommu_mode == 2)
+			dest_addr = (phys_addr_t)rte_mem_virt2iova(
+					rte_pktmbuf_mtod_offset(op->sym->m_dst, void *,
+					op->sym->cipher.data.offset));
+		else
+			dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+                                                   op->sym->cipher.data.offset);
+	}
 	else
 		dest_addr = src_addr;
 
-	key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+	if(iommu_mode == 2)
+		key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
+	else
+		key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
 
 	desc = &cmd_q->qbase_desc[cmd_q->qidx];
 
@@ -2385,11 +2509,23 @@ ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
 	key_addr = session->cipher.key_phys;
 
-	src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
-					      op->sym->aead.data.offset);
+	if(iommu_mode == 2)
+		src_addr = (phys_addr_t)rte_mem_virt2iova(
+				rte_pktmbuf_mtod_offset(op->sym->m_src, void *,
+				op->sym->aead.data.offset));
+	else
+		src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+                                              op->sym->aead.data.offset);
 	if (unlikely(op->sym->m_dst != NULL))
-		dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
-						op->sym->aead.data.offset);
+	{
+		if(iommu_mode == 2)
+			dest_addr = (phys_addr_t)rte_mem_virt2iova(
+					rte_pktmbuf_mtod_offset(op->sym->m_dst, void *,
+					op->sym->aead.data.offset));
+		else
+			dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+                                                op->sym->aead.data.offset);
+	}
 	else
 		dest_addr = src_addr;
 	rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
@@ -2704,8 +2840,14 @@ process_ops_to_enqueue(struct ccp_qp *qp,
 	b_info->lsb_buf_idx = 0;
 	b_info->desccnt = 0;
 	b_info->cmd_q = cmd_q;
-	b_info->lsb_buf_phys =
-		(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+
+	if(iommu_mode == 2)
+		b_info->lsb_buf_phys =
+                        (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
+	else
+		b_info->lsb_buf_phys =
+			(phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+
 	rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
 
 	b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 80fe6a453..69ba61a3d 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -23,6 +23,8 @@
 #include "ccp_pci.h"
 #include "ccp_pmd_private.h"
 
+int iommu_mode = 0;
+
 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
 static int ccp_dev_id;
 
@@ -512,7 +514,7 @@ ccp_add_device(struct ccp_device *dev, int type)
 
 		CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
 	}
-	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+	CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x0);
 
 	/* Copy the private LSB mask to the public registers */
 	status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
@@ -657,9 +659,7 @@ ccp_probe_device(const char *dirname, uint16_t domain,
 	struct rte_pci_device *pci;
 	char filename[PATH_MAX];
 	unsigned long tmp;
-	int uio_fd = -1, i, uio_num;
-	char uio_devname[PATH_MAX];
-	void *map_addr;
+	int uio_fd = -1;
 
 	ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
 			      RTE_CACHE_LINE_SIZE);
@@ -710,46 +710,14 @@ ccp_probe_device(const char *dirname, uint16_t domain,
 	snprintf(filename, sizeof(filename), "%s/resource", dirname);
 	if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
 		goto fail;
+	if(iommu_mode == 2)
+		pci->kdrv = RTE_KDRV_VFIO;
+	else if(iommu_mode == 0)
+		pci->kdrv = RTE_KDRV_IGB_UIO;
+	else if (iommu_mode == 1)
+		pci->kdrv = RTE_KDRV_UIO_GENERIC;
 
-	uio_num = ccp_find_uio_devname(dirname);
-	if (uio_num < 0) {
-		/*
-		 * It may take time for uio device to appear,
-		 * wait  here and try again
-		 */
-		usleep(100000);
-		uio_num = ccp_find_uio_devname(dirname);
-		if (uio_num < 0)
-			goto fail;
-	}
-	snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
-
-	uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
-	if (uio_fd < 0)
-		goto fail;
-	if (flock(uio_fd, LOCK_EX | LOCK_NB))
-		goto fail;
-
-	/* Map the PCI memory resource of device */
-	for (i = 0; i < PCI_MAX_RESOURCE; i++) {
-
-		char devname[PATH_MAX];
-		int res_fd;
-
-		if (pci->mem_resource[i].phys_addr == 0)
-			continue;
-		snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
-		res_fd = open(devname, O_RDWR);
-		if (res_fd < 0)
-			goto fail;
-		map_addr = mmap(NULL, pci->mem_resource[i].len,
-				PROT_READ | PROT_WRITE,
-				MAP_SHARED, res_fd, 0);
-		if (map_addr == MAP_FAILED)
-			goto fail;
-
-		pci->mem_resource[i].addr = map_addr;
-	}
+	rte_pci_map_device(pci);
 
 	/* device is valid, add in list */
 	if (ccp_add_device(ccp_dev, ccp_type)) {
@@ -783,7 +751,7 @@ ccp_probe_devices(const struct rte_pci_id *ccp_id)
 	module_idx = ccp_check_pci_uio_module();
 	if (module_idx < 0)
 		return -1;
-
+	iommu_mode = module_idx;
 	TAILQ_INIT(&ccp_list);
 	dir = opendir(SYSFS_PCI_DEVICES);
 	if (dir == NULL)
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index de3e4bcc6..f4ad9eafd 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -59,7 +59,7 @@
 #define CMD_Q_RUN			0x1
 #define CMD_Q_SIZE			0x1F
 #define CMD_Q_SHIFT			3
-#define COMMANDS_PER_QUEUE		2048
+#define COMMANDS_PER_QUEUE		8192
 
 #define QUEUE_SIZE_VAL                  ((ffs(COMMANDS_PER_QUEUE) - 2) & \
 					 CMD_Q_SIZE)
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
index 1702a09c4..38029a908 100644
--- a/drivers/crypto/ccp/ccp_pci.c
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -15,6 +15,7 @@
 static const char * const uio_module_names[] = {
 	"igb_uio",
 	"uio_pci_generic",
+	"vfio_pci"
 };
 
 int
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
index 7ed3bac40..bcde1d970 100644
--- a/drivers/crypto/ccp/ccp_pci.h
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -12,6 +12,7 @@
 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
 #define PROC_MODULES "/proc/modules"
 
+
 int ccp_check_pci_uio_module(void);
 
 int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index 4810d799c..45221097d 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -22,6 +22,7 @@
  */
 static unsigned int ccp_pmd_init_done;
 uint8_t ccp_cryptodev_driver_id;
+extern void *sha_ctx;
 
 struct ccp_pmd_init_params {
 	struct rte_cryptodev_pmd_init_params def_p;
@@ -279,6 +280,7 @@ cryptodev_ccp_remove(struct rte_vdev_device *dev)
 
 	ccp_pmd_init_done = 0;
 	name = rte_vdev_device_name(dev);
+	rte_free((void*) sha_ctx);
 	if (name == NULL)
 		return -EINVAL;
 
@@ -296,7 +298,6 @@ cryptodev_ccp_create(const char *name,
 {
 	struct rte_cryptodev *dev;
 	struct ccp_private *internals;
-	uint8_t cryptodev_cnt = 0;
 
 	if (init_params->def_p.name[0] == '\0')
 		strlcpy(init_params->def_p.name, name,
@@ -361,7 +362,7 @@ cryptodev_ccp_probe(struct rte_vdev_device *vdev)
 		.auth_opt = CCP_PMD_AUTH_OPT_CCP,
 	};
 	const char *input_args;
-
+	sha_ctx = (void *)rte_malloc(NULL,SHA512_DIGEST_SIZE,64);
 	if (ccp_pmd_init_done) {
 		RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
 		return -EFAULT;
-- 
2.17.1



More information about the dev mailing list