[dpdk-dev] [PATCH v5 10/15] crypto/mlx5: add maximum segments devarg

Shiri Kuzin shirik at nvidia.com
Thu Jul 1 15:26:04 CEST 2021


From: Suanming Mou <suanmingm at nvidia.com>

The mlx5 HW crypto operations are done by attaching crypto property
to a memory region. Once done, every access to the memory via the
crypto-enabled memory region will result with in-line encryption or
decryption of the data.

As a result, the design choice is to provide two types of WQEs. One
is UMR WQE which sets the crypto property and the other is rdma write
WQE which sends DMA command to copy data from local MR to remote MR.

The size of the WQEs will be defined by a new devarg called
max_segs_num.

This devarg also defines the maximum segments in mbuf chain that will be
supported for crypto operations.

Signed-off-by: Suanming Mou <suanmingm at nvidia.com>
Signed-off-by: Matan Azrad <matan at nvidia.com>
---
 doc/guides/cryptodevs/mlx5.rst    |  4 ++++
 drivers/crypto/mlx5/mlx5_crypto.c | 33 +++++++++++++++++++++++++++----
 drivers/crypto/mlx5/mlx5_crypto.h |  6 ++++++
 3 files changed, 39 insertions(+), 4 deletions(-)

diff --git a/doc/guides/cryptodevs/mlx5.rst b/doc/guides/cryptodevs/mlx5.rst
index c3632484a5..dd4705b744 100644
--- a/doc/guides/cryptodevs/mlx5.rst
+++ b/doc/guides/cryptodevs/mlx5.rst
@@ -120,6 +120,10 @@ Driver options
 
   The plaintext of the keytag appanded to the AES-XTS keys, default value is 0.
 
+- ``max_segs_num`` parameter [int]
+
+  Maximum number of mbuf chain segments(src or dest), default value is 8.
+
 
 Supported NICs
 --------------
diff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c
index 6fd6bd1511..a6949775a9 100644
--- a/drivers/crypto/mlx5/mlx5_crypto.c
+++ b/drivers/crypto/mlx5/mlx5_crypto.c
@@ -21,6 +21,7 @@
 #define MLX5_CRYPTO_DRIVER_NAME mlx5_crypto
 #define MLX5_CRYPTO_LOG_NAME pmd.crypto.mlx5
 #define MLX5_CRYPTO_MAX_QPS 1024
+#define MLX5_CRYPTO_MAX_SEGS 56
 
 #define MLX5_CRYPTO_FEATURE_FLAGS \
 	(RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | \
@@ -496,14 +497,24 @@ mlx5_crypto_args_check_handler(const char *key, const char *val, void *opaque)
 		DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
 		return -errno;
 	}
-	if (strcmp(key, "import_kek_id") == 0)
+	if (strcmp(key, "max_segs_num") == 0) {
+		if (!tmp || tmp > MLX5_CRYPTO_MAX_SEGS) {
+			DRV_LOG(WARNING, "Invalid max_segs_num: %d, should"
+				" be less than %d.",
+				(uint32_t)tmp, MLX5_CRYPTO_MAX_SEGS);
+			rte_errno = EINVAL;
+			return -rte_errno;
+		}
+		devarg_prms->max_segs_num = (uint32_t)tmp;
+	} else if (strcmp(key, "import_kek_id") == 0) {
 		attr->session_import_kek_ptr = (uint32_t)tmp;
-	else if (strcmp(key, "credential_id") == 0)
+	} else if (strcmp(key, "credential_id") == 0) {
 		attr->credential_pointer = (uint32_t)tmp;
-	else if (strcmp(key, "keytag") == 0)
+	} else if (strcmp(key, "keytag") == 0) {
 		devarg_prms->keytag = tmp;
-	else
+	} else {
 		DRV_LOG(WARNING, "Invalid key %s.", key);
+	}
 	return 0;
 }
 
@@ -518,6 +529,7 @@ mlx5_crypto_parse_devargs(struct rte_devargs *devargs,
 	attr->credential_pointer = 0;
 	attr->session_import_kek_ptr = 0;
 	devarg_prms->keytag = 0;
+	devarg_prms->max_segs_num = 8;
 	if (devargs == NULL) {
 		DRV_LOG(ERR,
 	"No login devargs in order to enable crypto operations in the device.");
@@ -614,6 +626,7 @@ mlx5_crypto_pci_probe(struct rte_pci_driver *pci_drv,
 		.max_nb_queue_pairs =
 				RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
 	};
+	uint16_t rdmw_wqe_size;
 	int ret;
 
 	RTE_SET_USED(pci_drv);
@@ -692,6 +705,18 @@ mlx5_crypto_pci_probe(struct rte_pci_driver *pci_drv,
 	priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
 	priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
 	priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
+	priv->max_segs_num = devarg_prms.max_segs_num;
+	priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
+			     sizeof(struct mlx5_umr_wqe) +
+			     RTE_ALIGN(priv->max_segs_num, 4) *
+			     sizeof(struct mlx5_wqe_dseg);
+	rdmw_wqe_size = sizeof(struct mlx5_rdma_write_wqe) +
+			      sizeof(struct mlx5_wqe_dseg) *
+			      (priv->max_segs_num <= 2 ? 2 : 2 +
+			       RTE_ALIGN(priv->max_segs_num - 2, 4));
+	priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size;
+	priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
+	priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
 	/* Register callback function for global shared MR cache management. */
 	if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
 		rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
diff --git a/drivers/crypto/mlx5/mlx5_crypto.h b/drivers/crypto/mlx5/mlx5_crypto.h
index ad70052967..48ca1cb9a2 100644
--- a/drivers/crypto/mlx5/mlx5_crypto.h
+++ b/drivers/crypto/mlx5/mlx5_crypto.h
@@ -25,12 +25,17 @@ struct mlx5_crypto_priv {
 	struct rte_cryptodev *crypto_dev;
 	void *uar; /* User Access Region. */
 	uint32_t pdn; /* Protection Domain number. */
+	uint32_t max_segs_num; /* Maximum supported data segs. */
 	struct ibv_pd *pd;
 	struct mlx5_hlist *dek_hlist; /* Dek hash list. */
 	struct rte_cryptodev_config dev_config;
 	struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
 	struct mlx5_devx_obj *login_obj;
 	uint64_t keytag;
+	uint16_t wqe_set_size;
+	uint16_t umr_wqe_size;
+	uint16_t umr_wqe_stride;
+	uint16_t max_rdmar_ds;
 };
 
 struct mlx5_crypto_qp {
@@ -54,6 +59,7 @@ struct mlx5_crypto_devarg_params {
 	bool login_devarg;
 	struct mlx5_devx_crypto_login_attr login_attr;
 	uint64_t keytag;
+	uint32_t max_segs_num;
 };
 
 int
-- 
2.27.0



More information about the dev mailing list