patch 'crypto/qat: fix source buffer alignment' has been queued to stable release 22.11.11

luca.boccassi at gmail.com luca.boccassi at gmail.com
Mon Oct 27 17:19:51 CET 2025


Hi,

FYI, your patch has been queued to stable release 22.11.11

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 10/29/25. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/bluca/dpdk-stable

This queued commit can be viewed at:
https://github.com/bluca/dpdk-stable/commit/4c7cbd6bf29276e8f874343f8c262756e90c4f02

Thanks.

Luca Boccassi

---
>From 4c7cbd6bf29276e8f874343f8c262756e90c4f02 Mon Sep 17 00:00:00 2001
From: Radu Nicolau <radu.nicolau at intel.com>
Date: Wed, 6 Aug 2025 14:48:32 +0000
Subject: [PATCH] crypto/qat: fix source buffer alignment

[ upstream commit 253174309ff7abf9eaba58d1bccf90cca7e6d215 ]

Fix performance regression resulting from using non cache-aligned
source buffers when using cryptodev API.

Fixes: fb3b9f492205 ("crypto/qat: rework burst data path")

Signed-off-by: Radu Nicolau <radu.nicolau at intel.com>
Acked-by: Kai Ji <kai.ji at intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 14 ++++++------
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  6 ++---
 drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 21 ++++++++++++++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 24 ++++++++++----------
 4 files changed, 42 insertions(+), 23 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index 989caabf17..4a114a8a79 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -368,7 +368,7 @@ qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
 	}
 
 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
-			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num, &ofs, op);
 	if (unlikely(total_len < 0)) {
 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 		return -EINVAL;
@@ -414,7 +414,7 @@ qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
 	}
 
 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
-			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num, &ofs, op);
 	if (unlikely(total_len < 0)) {
 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 		return -EINVAL;
@@ -503,7 +503,7 @@ qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
-			data, n_data_vecs, NULL, 0);
+			data, n_data_vecs, NULL, 0, NULL, NULL);
 	if (unlikely(data_len < 0))
 		return -1;
 
@@ -555,12 +555,12 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
-				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num, NULL, NULL);
 		} else {
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
 				vec->src_sgl[i].vec,
-				vec->src_sgl[i].num, NULL, 0);
+				vec->src_sgl[i].num, NULL, 0, NULL, NULL);
 		}
 
 		if (unlikely(data_len < 0))
@@ -616,7 +616,7 @@ qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
-			data, n_data_vecs, NULL, 0);
+			data, n_data_vecs, NULL, 0, NULL, NULL);
 	if (unlikely(data_len < 0))
 		return -1;
 
@@ -679,7 +679,7 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
 				vec->src_sgl[i].vec,
-				vec->src_sgl[i].num, NULL, 0);
+				vec->src_sgl[i].num, NULL, 0, NULL, NULL);
 		}
 
 		if (unlikely(data_len < 0) || error)
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 1ffc4528cf..54b3295647 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -207,7 +207,7 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
 	}
 
 	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
-			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num, &ofs, op);
 	if (unlikely(total_len < 0)) {
 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 		return -EINVAL;
@@ -366,7 +366,7 @@ qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
-			data, n_data_vecs, NULL, 0);
+			data, n_data_vecs, NULL, 0, NULL, NULL);
 	if (unlikely(data_len < 0))
 		return -1;
 
@@ -426,7 +426,7 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
 				vec->src_sgl[i].vec,
-				vec->src_sgl[i].num, NULL, 0);
+				vec->src_sgl[i].num, NULL, 0, NULL, NULL);
 		}
 
 		if (unlikely(data_len < 0) || error)
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 6f676a2c44..3201e1ead8 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -411,7 +411,8 @@ static __rte_always_inline int32_t
 qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
 		void *opaque, struct qat_sym_op_cookie *cookie,
 		struct rte_crypto_vec *src_vec, uint16_t n_src,
-		struct rte_crypto_vec *dst_vec, uint16_t n_dst)
+		struct rte_crypto_vec *dst_vec, uint16_t n_dst,
+		union rte_crypto_sym_ofs *ofs, struct rte_crypto_op *op)
 {
 	struct qat_sgl *list;
 	uint32_t i;
@@ -483,6 +484,24 @@ qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
 			dst_data_start = src_data_start;
 	}
 
+	/* For crypto API only try to align the in-place buffers*/
+	if (op != NULL && likely(n_dst == 0)) {
+		uint16_t offset = src_data_start & RTE_CACHE_LINE_MASK;
+		if (offset) {
+			rte_iova_t buff_addr = rte_mbuf_iova_get(op->sym->m_src);
+			/* make sure src_data_start is still within the buffer */
+			if (src_data_start - offset >= buff_addr) {
+				src_data_start -= offset;
+				dst_data_start = src_data_start;
+				ofs->ofs.auth.head += offset;
+				ofs->ofs.cipher.head += offset;
+				tl_src += offset;
+				total_len_src = tl_src;
+				total_len_dst = tl_src;
+			}
+		}
+	}
+
 	req->comn_mid.src_data_addr = src_data_start;
 	req->comn_mid.dest_data_addr = dst_data_start;
 	req->comn_mid.src_length = total_len_src;
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 1856770522..be50f5049f 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -236,7 +236,7 @@ qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
 	}
 
 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
-			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num, &ofs, op);
 	if (unlikely(total_len < 0)) {
 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 		return -EINVAL;
@@ -281,7 +281,7 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
 	}
 
 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
-			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num, &ofs, op);
 	if (unlikely(total_len < 0)) {
 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 		return -EINVAL;
@@ -328,7 +328,7 @@ qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
 	}
 
 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
-			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num, &ofs, op);
 	if (unlikely(total_len < 0)) {
 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 		return -EINVAL;
@@ -375,7 +375,7 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
 	}
 
 	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
-			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num, &ofs, op);
 	if (unlikely(total_len < 0)) {
 		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
 		return -EINVAL;
@@ -508,7 +508,7 @@ qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
 
 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
-			data, n_data_vecs, NULL, 0);
+			data, n_data_vecs, NULL, 0, NULL, NULL);
 	if (unlikely(data_len < 0))
 		return -1;
 
@@ -569,7 +569,7 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
 				vec->src_sgl[i].vec,
-				vec->src_sgl[i].num, NULL, 0);
+				vec->src_sgl[i].num, NULL, 0, NULL, NULL);
 		}
 
 		if (unlikely(data_len < 0 || error))
@@ -622,7 +622,7 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
-			data, n_data_vecs, NULL, 0);
+			data, n_data_vecs, NULL, 0, NULL, NULL);
 	if (unlikely(data_len < 0))
 		return -1;
 
@@ -690,7 +690,7 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
 				vec->src_sgl[i].vec,
-				vec->src_sgl[i].num, NULL, 0);
+				vec->src_sgl[i].num, NULL, 0, NULL, NULL);
 		}
 
 		if (unlikely(data_len < 0 || error))
@@ -749,7 +749,7 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
-			data, n_data_vecs, NULL, 0);
+			data, n_data_vecs, NULL, 0, NULL, NULL);
 	if (unlikely(data_len < 0))
 		return -1;
 
@@ -818,7 +818,7 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
 				vec->src_sgl[i].vec,
-				vec->src_sgl[i].num, NULL, 0);
+				vec->src_sgl[i].num, NULL, 0, NULL, NULL);
 		}
 
 		if (unlikely(data_len < 0 || error))
@@ -882,7 +882,7 @@ qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
 	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
 	data_len = qat_sym_build_req_set_data(req, user_data, cookie,
-			data, n_data_vecs, NULL, 0);
+			data, n_data_vecs, NULL, 0, NULL, NULL);
 	if (unlikely(data_len < 0))
 		return -1;
 
@@ -942,7 +942,7 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
 			data_len = qat_sym_build_req_set_data(req,
 				user_data[i], cookie,
 				vec->src_sgl[i].vec,
-				vec->src_sgl[i].num, NULL, 0);
+				vec->src_sgl[i].num, NULL, 0, NULL, NULL);
 		}
 
 		if (unlikely(data_len < 0) || error)
-- 
2.47.3

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2025-10-27 15:54:37.440334027 +0000
+++ 0073-crypto-qat-fix-source-buffer-alignment.patch	2025-10-27 15:54:34.851950954 +0000
@@ -1 +1 @@
-From 253174309ff7abf9eaba58d1bccf90cca7e6d215 Mon Sep 17 00:00:00 2001
+From 4c7cbd6bf29276e8f874343f8c262756e90c4f02 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 253174309ff7abf9eaba58d1bccf90cca7e6d215 ]
+
@@ -10 +11,0 @@
-Cc: stable at dpdk.org
@@ -22 +23 @@
-index 0dcb5a7cb4..c196cf3cdb 100644
+index 989caabf17..4a114a8a79 100644
@@ -25 +26 @@
-@@ -422,7 +422,7 @@ qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+@@ -368,7 +368,7 @@ qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
@@ -34 +35 @@
-@@ -466,7 +466,7 @@ qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+@@ -414,7 +414,7 @@ qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
@@ -43 +44 @@
-@@ -564,7 +564,7 @@ qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
+@@ -503,7 +503,7 @@ qat_sym_dp_enqueue_single_aead_gen3(void *qp_data, uint8_t *drv_ctx,
@@ -52 +53,7 @@
-@@ -623,7 +623,7 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+@@ -555,12 +555,12 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
+ 			data_len = qat_sym_build_req_set_data(req,
+ 				user_data[i], cookie,
+ 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
+-				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
++				vec->dest_sgl[i].vec, vec->dest_sgl[i].num, NULL, NULL);
+ 		} else {
@@ -60,2 +67,2 @@
- 		if (unlikely(data_len < 0) || error)
-@@ -677,7 +677,7 @@ qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
+ 		if (unlikely(data_len < 0))
+@@ -616,7 +616,7 @@ qat_sym_dp_enqueue_single_auth_gen3(void *qp_data, uint8_t *drv_ctx,
@@ -70,7 +77 @@
-@@ -732,12 +732,12 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
- 			data_len = qat_sym_build_req_set_data(req,
- 				user_data[i], cookie,
- 				vec->src_sgl[i].vec, vec->src_sgl[i].num,
--				vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
-+				vec->dest_sgl[i].vec, vec->dest_sgl[i].num, NULL, NULL);
- 		} else {
+@@ -679,7 +679,7 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx,
@@ -84 +85 @@
- 		if (unlikely(data_len < 0))
+ 		if (unlikely(data_len < 0) || error)
@@ -86 +87 @@
-index 843580af72..82c5a40501 100644
+index 1ffc4528cf..54b3295647 100644
@@ -89 +90 @@
-@@ -289,7 +289,7 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+@@ -207,7 +207,7 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
@@ -98 +99 @@
-@@ -446,7 +446,7 @@ qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
+@@ -366,7 +366,7 @@ qat_sym_dp_enqueue_single_aead_gen4(void *qp_data, uint8_t *drv_ctx,
@@ -107 +108 @@
-@@ -505,7 +505,7 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
+@@ -426,7 +426,7 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx,
@@ -117 +118 @@
-index 1f19c69f88..67dc889b50 100644
+index 6f676a2c44..3201e1ead8 100644
@@ -120 +121 @@
-@@ -430,7 +430,8 @@ static __rte_always_inline int32_t
+@@ -411,7 +411,8 @@ static __rte_always_inline int32_t
@@ -130 +131 @@
-@@ -502,6 +503,24 @@ qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
+@@ -483,6 +484,24 @@ qat_sym_build_req_set_data(struct icp_qat_fw_la_bulk_req *req,
@@ -156 +157 @@
-index 8cb85fd8df..6da0f6c645 100644
+index 1856770522..be50f5049f 100644
@@ -159 +160 @@
-@@ -242,7 +242,7 @@ qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
+@@ -236,7 +236,7 @@ qat_sym_build_op_cipher_gen1(void *in_op, struct qat_sym_session *ctx,
@@ -168,2 +169,2 @@
-@@ -294,7 +294,7 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
- 				req->comn_hdr.serv_specif_flags, 0);
+@@ -281,7 +281,7 @@ qat_sym_build_op_auth_gen1(void *in_op, struct qat_sym_session *ctx,
+ 	}
@@ -177 +178 @@
-@@ -339,7 +339,7 @@ qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
+@@ -328,7 +328,7 @@ qat_sym_build_op_aead_gen1(void *in_op, struct qat_sym_session *ctx,
@@ -186 +187 @@
-@@ -384,7 +384,7 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
+@@ -375,7 +375,7 @@ qat_sym_build_op_chain_gen1(void *in_op, struct qat_sym_session *ctx,
@@ -195 +196 @@
-@@ -512,7 +512,7 @@ qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -508,7 +508,7 @@ qat_sym_dp_enqueue_single_cipher_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -204 +205 @@
-@@ -571,7 +571,7 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -569,7 +569,7 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -213 +214 @@
-@@ -623,7 +623,7 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -622,7 +622,7 @@ qat_sym_dp_enqueue_single_auth_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -231 +232 @@
-@@ -747,7 +747,7 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -749,7 +749,7 @@ qat_sym_dp_enqueue_single_chain_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -240 +241 @@
-@@ -815,7 +815,7 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -818,7 +818,7 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -249 +250 @@
-@@ -877,7 +877,7 @@ qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -882,7 +882,7 @@ qat_sym_dp_enqueue_single_aead_gen1(void *qp_data, uint8_t *drv_ctx,
@@ -258 +259 @@
-@@ -936,7 +936,7 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,
+@@ -942,7 +942,7 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx,


More information about the stable mailing list