patch 'crypto/qat: fix out-of-place chain/cipher/auth headers' has been queued to stable release 23.11.5

Xueming Li xuemingl at nvidia.com
Thu Jun 26 14:00:40 CEST 2025


Hi,

FYI, your patch has been queued to stable release 23.11.5

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 06/28/25. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging

This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=34f3447290892361ff0cc71c3b3d407b2f0a5d1d

Thanks.

Xueming Li <xuemingl at nvidia.com>

---
>From 34f3447290892361ff0cc71c3b3d407b2f0a5d1d Mon Sep 17 00:00:00 2001
From: Arkadiusz Kusztal <arkadiuszx.kusztal at intel.com>
Date: Mon, 28 Apr 2025 06:30:41 +0000
Subject: [PATCH] crypto/qat: fix out-of-place chain/cipher/auth headers
Cc: Xueming Li <xuemingl at nvidia.com>

[ upstream commit 317d05f3721c9a740614adf77aa89d00d5302cf7 ]

This commit fixes a problem with overwriting data in the OOP header
in RAW API crypto processing when using chain, cipher and auth algorithms.

Fixes: 85fec6fd9674 ("crypto/qat: unify raw data path functions")

Signed-off-by: Arkadiusz Kusztal <arkadiuszx.kusztal at intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 146 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  40 +++--
 2 files changed, 171 insertions(+), 15 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index a817c2bbb7..1c6ef0aae9 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -275,6 +275,152 @@ done:
 	return src_tot_length;
 }
 
+struct qat_sym_req_mid_info {
+	uint32_t data_len;
+	union rte_crypto_sym_ofs ofs;
+};
+
+static inline
+struct qat_sym_req_mid_info qat_sym_req_mid_set(
+	int *error, struct icp_qat_fw_la_bulk_req *const req,
+	struct qat_sym_op_cookie *const cookie, const void *const opaque,
+	const struct rte_crypto_sgl *sgl_src, const struct rte_crypto_sgl *sgl_dst,
+	const union rte_crypto_sym_ofs ofs)
+{
+	struct qat_sym_req_mid_info info = { };  /* Returned value */
+	uint32_t src_tot_length = 0;
+	uint32_t dst_tot_length = 0; /* Used only for input validity checks */
+	uint32_t src_length = 0;
+	uint32_t dst_length = 0;
+	uint64_t src_data_addr = 0;
+	uint64_t dst_data_addr = 0;
+	union rte_crypto_sym_ofs out_ofs = ofs;
+	const struct rte_crypto_vec * const vec_src = sgl_src->vec;
+	const struct rte_crypto_vec * const vec_dst = sgl_dst->vec;
+	const uint32_t n_src = sgl_src->num;
+	const uint32_t n_dst = sgl_dst->num;
+	const uint16_t offset = RTE_MIN(ofs.ofs.cipher.head, ofs.ofs.auth.head);
+	const uint8_t is_flat = !(n_src > 1 || n_dst > 1); /* Flat buffer or the SGL */
+	const uint8_t is_in_place = !n_dst; /* In-place or out-of-place */
+
+	*error = 0;
+	if (unlikely((n_src < 1 || n_src > QAT_SYM_SGL_MAX_NUMBER) ||
+			n_dst > QAT_SYM_SGL_MAX_NUMBER)) {
+		QAT_LOG(DEBUG,
+			"Invalid number of sgls, source no: %u, dst no: %u, opaque: %p",
+			n_src, n_dst, opaque);
+		*error = -1;
+		return info;
+	}
+
+	/* --- Flat buffer --- */
+	if (is_flat) {
+		src_data_addr = vec_src->iova;
+		dst_data_addr = vec_src->iova;
+		src_length = vec_src->len;
+		dst_length = vec_src->len;
+
+		if (is_in_place)
+			goto done;
+		/* Out-of-place
+		 * If OOP, we need to keep in mind that offset needs to
+		 * start where the aead starts
+		 */
+		dst_length = vec_dst->len;
+		/* Comparison between different types, intentional */
+		if (unlikely(offset > src_length || offset > dst_length)) {
+			QAT_LOG(DEBUG,
+				"Invalid size of the vector parameters, source length: %u, dst length: %u, opaque: %p",
+				src_length, dst_length, opaque);
+			*error = -1;
+			return info;
+		}
+		out_ofs.ofs.cipher.head -= offset;
+		out_ofs.ofs.auth.head -= offset;
+		src_data_addr += offset;
+		dst_data_addr = vec_dst->iova + offset;
+		src_length -= offset;
+		dst_length -= offset;
+		src_tot_length = src_length;
+		dst_tot_length = dst_length;
+		goto check;
+	}
+
+	/* --- Scatter-gather list --- */
+	struct qat_sgl * const qat_sgl_src = (struct qat_sgl *)&cookie->qat_sgl_src;
+	uint16_t i;
+
+	ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+		QAT_COMN_PTR_TYPE_SGL);
+	qat_sgl_src->num_bufs = n_src;
+	src_data_addr = cookie->qat_sgl_src_phys_addr;
+	/* Fill all the source buffers but the first one */
+	for (i = 1; i < n_src; i++) {
+		qat_sgl_src->buffers[i].len = (vec_src + i)->len;
+		qat_sgl_src->buffers[i].addr = (vec_src + i)->iova;
+		src_tot_length += qat_sgl_src->buffers[i].len;
+	}
+
+	if (is_in_place) {
+		/* SGL source first entry, no OOP */
+		qat_sgl_src->buffers[0].len = vec_src->len;
+		qat_sgl_src->buffers[0].addr = vec_src->iova;
+		dst_data_addr = src_data_addr;
+		goto done;
+	}
+	/* Out-of-place */
+	struct qat_sgl * const qat_sgl_dst =
+			(struct qat_sgl *)&cookie->qat_sgl_dst;
+	/*
+	 * Offset reaching outside of the first buffer is not supported (RAW api).
+	 * Integer promotion here, but it does not bother this time
+	 */
+	if (unlikely(offset > vec_src->len || offset > vec_dst->len)) {
+		QAT_LOG(DEBUG,
+			"Invalid size of the vector parameters, source length: %u, dst length: %u, opaque: %p",
+			vec_src->len, vec_dst->len, opaque);
+		*error = -1;
+		return info;
+	}
+	out_ofs.ofs.cipher.head -= offset;
+	out_ofs.ofs.auth.head -= offset;
+	/* SGL source first entry, adjusted to OOP offsets */
+	qat_sgl_src->buffers[0].addr = vec_src->iova + offset;
+	qat_sgl_src->buffers[0].len = vec_src->len - offset;
+	/* SGL destination first entry, adjusted to OOP offsets */
+	qat_sgl_dst->buffers[0].addr = vec_dst->iova + offset;
+	qat_sgl_dst->buffers[0].len = vec_dst->len - offset;
+	/* Fill the remaining destination buffers */
+	for (i = 1; i < n_dst; i++) {
+		qat_sgl_dst->buffers[i].len = (vec_dst + i)->len;
+		qat_sgl_dst->buffers[i].addr = (vec_dst + i)->iova;
+		dst_tot_length += qat_sgl_dst->buffers[i].len;
+	}
+	dst_tot_length += qat_sgl_dst->buffers[0].len;
+	qat_sgl_dst->num_bufs = n_dst;
+	dst_data_addr = cookie->qat_sgl_dst_phys_addr;
+
+check:	/* If error, return directly. If success, jump to one of these labels */
+	if (src_tot_length != dst_tot_length) {
+		QAT_LOG(DEBUG,
+			"Source length is not equal to the destination length %u, dst no: %u, opaque: %p",
+			src_tot_length, dst_tot_length, opaque);
+		*error = -1;
+		return info;
+	}
+done:
+	req->comn_mid.opaque_data = (uintptr_t)opaque;
+	req->comn_mid.src_data_addr = src_data_addr;
+	req->comn_mid.dest_data_addr = dst_data_addr;
+	req->comn_mid.src_length = src_length;
+	req->comn_mid.dst_length = dst_length;
+
+	info.data_len = src_tot_length;
+	info.ofs = out_ofs;
+
+	return info;
+}
+
 static __rte_always_inline int32_t
 qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
 		void *opaque, struct qat_sym_op_cookie *cookie,
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index b06514cd62..1e7c35afed 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -544,16 +544,20 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 	for (i = 0; i < n; i++) {
 		struct qat_sym_op_cookie *cookie =
 			qp->op_cookies[tail >> tx_queue->trailz];
+		struct qat_sym_req_mid_info info = { };
+		union rte_crypto_sym_ofs temp_ofs = ofs;
+		int error = 0;
 
+		temp_ofs.ofs.auth = temp_ofs.ofs.cipher;
 		req  = (struct icp_qat_fw_la_bulk_req *)(
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
 		if (vec->dest_sgl) {
-			data_len = qat_sym_build_req_set_data(req,
-				user_data[i], cookie,
-				vec->src_sgl[i].vec, vec->src_sgl[i].num,
-				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+			info = qat_sym_req_mid_set(&error, req, cookie, user_data[i],
+				&vec->src_sgl[i], &vec->dest_sgl[i], temp_ofs);
+			data_len = info.data_len;
+			ofs = info.ofs;
 		} else {
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
@@ -561,7 +565,7 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 				vec->src_sgl[i].num, NULL, 0);
 		}
 
-		if (unlikely(data_len < 0))
+		if (unlikely(data_len < 0 || error))
 			break;
 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
 			(uint32_t)data_len, cookie);
@@ -658,16 +662,20 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 	for (i = 0; i < n; i++) {
 		struct qat_sym_op_cookie *cookie =
 			qp->op_cookies[tail >> tx_queue->trailz];
+		struct qat_sym_req_mid_info info = { };
+		union rte_crypto_sym_ofs temp_ofs = ofs;
+		int error = 0;
 
+		temp_ofs.ofs.cipher = temp_ofs.ofs.auth;
 		req  = (struct icp_qat_fw_la_bulk_req *)(
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
 		if (vec->dest_sgl) {
-			data_len = qat_sym_build_req_set_data(req,
-				user_data[i], cookie,
-				vec->src_sgl[i].vec, vec->src_sgl[i].num,
-				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+			info = qat_sym_req_mid_set(&error, req, cookie, user_data[i],
+				&vec->src_sgl[i], &vec->dest_sgl[i], temp_ofs);
+			data_len = info.data_len;
+			ofs = info.ofs;
 		} else {
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
@@ -675,7 +683,7 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 				vec->src_sgl[i].num, NULL, 0);
 		}
 
-		if (unlikely(data_len < 0))
+		if (unlikely(data_len < 0 || error))
 			break;
 
 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
@@ -781,16 +789,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 	for (i = 0; i < n; i++) {
 		struct qat_sym_op_cookie *cookie =
 			qp->op_cookies[tail >> tx_queue->trailz];
+		struct qat_sym_req_mid_info info = { };
+		int error = 0;
 
 		req  = (struct icp_qat_fw_la_bulk_req *)(
 			(uint8_t *)tx_queue->base_addr + tail);
 		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
 		if (vec->dest_sgl) {
-			data_len = qat_sym_build_req_set_data(req,
-				user_data[i], cookie,
-				vec->src_sgl[i].vec, vec->src_sgl[i].num,
-				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+			info = qat_sym_req_mid_set(&error, req, cookie, user_data[i],
+				&vec->src_sgl[i], &vec->dest_sgl[i], ofs);
+			data_len = info.data_len;
+			ofs = info.ofs;
 		} else {
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
@@ -798,7 +808,7 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 				vec->src_sgl[i].num, NULL, 0);
 		}
 
-		if (unlikely(data_len < 0))
+		if (unlikely(data_len < 0 || error))
 			break;
 
 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
-- 
2.34.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2025-06-26 19:59:18.665600991 +0800
+++ 0020-crypto-qat-fix-out-of-place-chain-cipher-auth-header.patch	2025-06-26 19:59:17.234418050 +0800
@@ -1 +1 @@
-From 317d05f3721c9a740614adf77aa89d00d5302cf7 Mon Sep 17 00:00:00 2001
+From 34f3447290892361ff0cc71c3b3d407b2f0a5d1d Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 317d05f3721c9a740614adf77aa89d00d5302cf7 ]
@@ -10 +12,0 @@
-Cc: stable at dpdk.org
@@ -19 +21 @@
-index c447f2cb45..846636f57d 100644
+index a817c2bbb7..1c6ef0aae9 100644
@@ -22 +24 @@
-@@ -280,6 +280,152 @@ done:
+@@ -275,6 +275,152 @@ done:
@@ -176 +178 @@
-index 3976d03179..561166203c 100644
+index b06514cd62..1e7c35afed 100644
@@ -179 +181 @@
-@@ -567,16 +567,20 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -544,16 +544,20 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -204 +206 @@
-@@ -584,7 +588,7 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -561,7 +565,7 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -211,3 +213,3 @@
- 
- 		if (ctx->is_zuc256)
-@@ -688,16 +692,20 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ 		enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
+ 			(uint32_t)data_len, cookie);
+@@ -658,16 +662,20 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -238 +240 @@
-@@ -705,7 +713,7 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -675,7 +683,7 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -246,2 +248,2 @@
- 		if (ctx->is_zuc256)
-@@ -819,16 +827,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+ 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {
+@@ -781,16 +789,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -270 +272 @@
-@@ -836,7 +846,7 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -798,7 +808,7 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -278 +280 @@
- 		if (ctx->is_zuc256) {
+ 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) {


More information about the stable mailing list