patch 'crypto/ccp: fix IOVA handling' has been queued to stable release 22.11.2
Xueming Li
xuemingl at nvidia.com
Mon Feb 27 07:59:20 CET 2023
Hi,
FYI, your patch has been queued to stable release 22.11.2
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 03/01/23. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=22.11-staging
This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=22.11-staging&id=2d28201221df4758ab2c2124c07a3d935bd4999b
Thanks.
Xueming Li <xuemingl at nvidia.com>
---
>From 2d28201221df4758ab2c2124c07a3d935bd4999b Mon Sep 17 00:00:00 2001
From: David Marchand <david.marchand at redhat.com>
Date: Tue, 4 Oct 2022 11:51:31 +0200
Subject: [PATCH] crypto/ccp: fix IOVA handling
Cc: Xueming Li <xuemingl at nvidia.com>
[ upstream commit 9631fb8f10dfafb0547fbf9127b162e62b2bef43 ]
Using IOVA or physical addresses is something that the user (via
--iova-mode=) or the bus code decides.
The crypto/ccp PCI driver should only use rte_mem_virt2iova.
It should not try to decide what to use solely based on the kmod
the PCI device is bound to.
While at it, the global variable sha_ctx looks unsafe and unneeded.
Remove it.
Fixes: 09a0fd736a08 ("crypto/ccp: enable IOMMU")
Signed-off-by: David Marchand <david.marchand at redhat.com>
Acked-by: Sunil Uttarwar <sunilprakashrao.uttarwar at amd.com>
---
drivers/crypto/ccp/ccp_crypto.c | 105 ++++++-------------------------
drivers/crypto/ccp/ccp_dev.c | 9 +--
drivers/crypto/ccp/ccp_pci.c | 34 ----------
drivers/crypto/ccp/ccp_pci.h | 3 -
drivers/crypto/ccp/rte_ccp_pmd.c | 3 -
5 files changed, 19 insertions(+), 135 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index b21b32e507..2758187d93 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -33,8 +33,6 @@
#include <openssl/err.h>
#include <openssl/hmac.h>
-extern int iommu_mode;
-void *sha_ctx;
/* SHA initial context values */
uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
SHA1_H4, SHA1_H3,
@@ -748,13 +746,8 @@ ccp_configure_session_cipher(struct ccp_session *sess,
CCP_LOG_ERR("Invalid CCP Engine");
return -ENOTSUP;
}
- if (iommu_mode == 2) {
- sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
- sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
- } else {
- sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
- sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
- }
+ sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
return 0;
}
@@ -793,7 +786,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha1_init;
sess->auth.ctx_len = CCP_SB_BYTES;
sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
- rte_memcpy(sha_ctx, sess->auth.ctx, SHA_COMMON_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
if (sess->auth_opt) {
@@ -832,7 +824,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha224_init;
sess->auth.ctx_len = CCP_SB_BYTES;
sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
- rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
if (sess->auth_opt) {
@@ -895,7 +886,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha256_init;
sess->auth.ctx_len = CCP_SB_BYTES;
sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
- rte_memcpy(sha_ctx, sess->auth.ctx, SHA256_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
if (sess->auth_opt) {
@@ -958,7 +948,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha384_init;
sess->auth.ctx_len = CCP_SB_BYTES << 1;
sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
- rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
if (sess->auth_opt) {
@@ -1023,7 +1012,6 @@ ccp_configure_session_auth(struct ccp_session *sess,
sess->auth.ctx = (void *)ccp_sha512_init;
sess->auth.ctx_len = CCP_SB_BYTES << 1;
sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
- rte_memcpy(sha_ctx, sess->auth.ctx, SHA512_DIGEST_SIZE);
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
if (sess->auth_opt) {
@@ -1173,13 +1161,8 @@ ccp_configure_session_aead(struct ccp_session *sess,
CCP_LOG_ERR("Unsupported aead algo");
return -ENOTSUP;
}
- if (iommu_mode == 2) {
- sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
- sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
- } else {
- sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
- sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
- }
+ sess->cipher.nonce_phys = rte_mem_virt2iova(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2iova(sess->cipher.key_ccp);
return 0;
}
@@ -1592,14 +1575,8 @@ ccp_perform_hmac(struct rte_crypto_op *op,
op->sym->auth.data.offset);
append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
- if (iommu_mode == 2) {
- dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
- pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
- } else {
- dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
- }
- dest_addr_t = dest_addr;
+ dest_addr_t = dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
/** Load PHash1 to LSB*/
pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
@@ -1681,10 +1658,7 @@ ccp_perform_hmac(struct rte_crypto_op *op,
/** Load PHash2 to LSB*/
addr += session->auth.ctx_len;
- if (iommu_mode == 2)
- pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
- else
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)addr);
pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
pst.len = session->auth.ctx_len;
pst.dir = 1;
@@ -1770,14 +1744,8 @@ ccp_perform_sha(struct rte_crypto_op *op,
op->sym->auth.data.offset);
append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
session->auth.ctx_len);
- if (iommu_mode == 2) {
- dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
- pst.src_addr = (phys_addr_t)sha_ctx;
- } else {
- dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
- session->auth.ctx);
- }
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)session->auth.ctx);
+ dest_addr = (phys_addr_t)rte_mem_virt2iova(append_ptr);
/** Passthru sha context*/
@@ -1865,15 +1833,8 @@ ccp_perform_sha3_hmac(struct rte_crypto_op *op,
CCP_LOG_ERR("CCP MBUF append failed\n");
return -1;
}
- if (iommu_mode == 2) {
- dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
- ctx_paddr = (phys_addr_t)rte_mem_virt2iova(
- session->auth.pre_compute);
- } else {
- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
- ctx_paddr = (phys_addr_t)rte_mem_virt2phy(
- session->auth.pre_compute);
- }
+ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2iova(session->auth.pre_compute);
dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
desc = &cmd_q->qbase_desc[cmd_q->qidx];
memset(desc, 0, Q_DESC_SIZE);
@@ -2009,13 +1970,8 @@ ccp_perform_sha3(struct rte_crypto_op *op,
CCP_LOG_ERR("CCP MBUF append failed\n");
return -1;
}
- if (iommu_mode == 2) {
- dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
- ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
- } else {
- dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
- ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
- }
+ dest_addr = (phys_addr_t)rte_mem_virt2iova((void *)append_ptr);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
ctx_addr = session->auth.sha3_ctx;
@@ -2089,13 +2045,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
ctx_addr = session->auth.pre_compute;
memset(ctx_addr, 0, AES_BLOCK_SIZE);
- if (iommu_mode == 2)
- pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
- (void *)ctx_addr);
- else
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
- (void *)ctx_addr);
-
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
pst.len = CCP_SB_BYTES;
pst.dir = 1;
@@ -2133,12 +2083,7 @@ ccp_perform_aes_cmac(struct rte_crypto_op *op,
} else {
ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
memset(ctx_addr, 0, AES_BLOCK_SIZE);
- if (iommu_mode == 2)
- pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
- (void *)ctx_addr);
- else
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
- (void *)ctx_addr);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *)ctx_addr);
pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
pst.len = CCP_SB_BYTES;
pst.dir = 1;
@@ -2328,12 +2273,7 @@ ccp_perform_3des(struct rte_crypto_op *op,
rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
iv, session->iv.length);
- if (iommu_mode == 2)
- pst.src_addr = (phys_addr_t)rte_mem_virt2iova(
- (void *) lsb_buf);
- else
- pst.src_addr = (phys_addr_t)rte_mem_virt2phy(
- (void *) lsb_buf);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2iova((void *) lsb_buf);
pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
pst.len = CCP_SB_BYTES;
pst.dir = 1;
@@ -2356,11 +2296,7 @@ ccp_perform_3des(struct rte_crypto_op *op,
else
dest_addr = src_addr;
- if (iommu_mode == 2)
- key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
- else
- key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
-
+ key_addr = rte_mem_virt2iova(session->cipher.key_ccp);
desc = &cmd_q->qbase_desc[cmd_q->qidx];
memset(desc, 0, Q_DESC_SIZE);
@@ -2746,12 +2682,7 @@ process_ops_to_enqueue(struct ccp_qp *qp,
b_info->lsb_buf_idx = 0;
b_info->desccnt = 0;
b_info->cmd_q = cmd_q;
- if (iommu_mode == 2)
- b_info->lsb_buf_phys =
- (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
- else
- b_info->lsb_buf_phys =
- (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+ b_info->lsb_buf_phys = (phys_addr_t)rte_mem_virt2iova((void *)b_info->lsb_buf);
rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 410e62121e..14c54929c4 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -23,7 +23,6 @@
#include "ccp_pci.h"
#include "ccp_pmd_private.h"
-int iommu_mode;
struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
static int ccp_dev_id;
@@ -652,7 +651,7 @@ is_ccp_device(const char *dirname,
static int
ccp_probe_device(int ccp_type, struct rte_pci_device *pci_dev)
{
- struct ccp_device *ccp_dev = NULL;
+ struct ccp_device *ccp_dev;
ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
RTE_CACHE_LINE_SIZE);
@@ -683,16 +682,10 @@ ccp_probe_devices(struct rte_pci_device *pci_dev,
struct dirent *d;
DIR *dir;
int ret = 0;
- int module_idx = 0;
uint16_t domain;
uint8_t bus, devid, function;
char dirname[PATH_MAX];
- module_idx = ccp_check_pci_uio_module();
- if (module_idx < 0)
- return -1;
-
- iommu_mode = module_idx;
TAILQ_INIT(&ccp_list);
dir = opendir(SYSFS_PCI_DEVICES);
if (dir == NULL)
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
index c941e222c7..bd1a037f76 100644
--- a/drivers/crypto/ccp/ccp_pci.c
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -11,40 +11,6 @@
#include <rte_string_fns.h>
#include "ccp_pci.h"
-#include "ccp_pmd_private.h"
-
-static const char * const uio_module_names[] = {
- "igb_uio",
- "uio_pci_generic",
- "vfio_pci"
-};
-
-int
-ccp_check_pci_uio_module(void)
-{
- FILE *fp;
- int i;
- char buf[BUFSIZ];
-
- fp = fopen(PROC_MODULES, "r");
- if (fp == NULL)
- return -1;
- i = 0;
- while (uio_module_names[i] != NULL) {
- while (fgets(buf, sizeof(buf), fp) != NULL) {
- if (!strncmp(buf, uio_module_names[i],
- strlen(uio_module_names[i]))) {
- fclose(fp);
- return i;
- }
- }
- i++;
- rewind(fp);
- }
- fclose(fp);
- CCP_LOG_DBG("Insert igb_uio or uio_pci_generic kernel module(s)");
- return -1;/* uio not inserted */
-}
/*
* split up a pci address into its constituent parts.
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
index 6736bf8ad3..d9a8b9dcc6 100644
--- a/drivers/crypto/ccp/ccp_pci.h
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -10,9 +10,6 @@
#include <bus_pci_driver.h>
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
-#define PROC_MODULES "/proc/modules"
-
-int ccp_check_pci_uio_module(void);
int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
uint8_t *bus, uint8_t *devid, uint8_t *function);
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index bcb621234c..661a796116 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -22,7 +22,6 @@
static unsigned int ccp_pmd_init_done;
uint8_t ccp_cryptodev_driver_id;
uint8_t cryptodev_cnt;
-extern void *sha_ctx;
struct ccp_pmd_init_params {
struct rte_cryptodev_pmd_init_params def_p;
@@ -200,7 +199,6 @@ cryptodev_ccp_remove(struct rte_pci_device *pci_dev)
return -ENODEV;
ccp_pmd_init_done = 0;
- rte_free(sha_ctx);
RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
name, rte_socket_id());
@@ -287,7 +285,6 @@ cryptodev_ccp_probe(struct rte_pci_driver *pci_drv __rte_unused,
.auth_opt = CCP_PMD_AUTH_OPT_CCP,
};
- sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64);
if (ccp_pmd_init_done) {
RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
return -EFAULT;
--
2.25.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2023-02-27 14:08:42.615257100 +0800
+++ 0053-crypto-ccp-fix-IOVA-handling.patch 2023-02-27 14:08:40.779237000 +0800
@@ -1 +1 @@
-From 9631fb8f10dfafb0547fbf9127b162e62b2bef43 Mon Sep 17 00:00:00 2001
+From 2d28201221df4758ab2c2124c07a3d935bd4999b Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 9631fb8f10dfafb0547fbf9127b162e62b2bef43 ]
@@ -17 +19,0 @@
-Cc: stable at dpdk.org
More information about the stable
mailing list