patch 'crypto/qat: fix GEN4 write' has been queued to stable release 23.11.2
Xueming Li
xuemingl at nvidia.com
Mon Aug 12 14:49:42 CEST 2024
Hi,
FYI, your patch has been queued to stable release 23.11.2
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 08/14/24. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging
This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=c6111cb5fd8bc40a1fb3c3eacd46c12793f4f72a
Thanks.
Xueming Li <xuemingl at nvidia.com>
---
>From c6111cb5fd8bc40a1fb3c3eacd46c12793f4f72a Mon Sep 17 00:00:00 2001
From: Brian Dooley <brian.dooley at intel.com>
Date: Fri, 12 Jul 2024 15:48:51 +0100
Subject: [PATCH] crypto/qat: fix GEN4 write
Cc: Xueming Li <xuemingl at nvidia.com>
[ upstream commit c355c2d8e65f02fa9621249c9b2a111477230c89 ]
All generations of QAT use the same Gen1 raw datapath.
Gen4 needs a different WRITE function than other generations.
Added separation for configuration of raw ctx for Gen4 from
the Gen1 codepath.
Fixes: 85fec6fd9674 ("crypto/qat: unify raw data path functions")
Signed-off-by: Brian Dooley <brian.dooley at intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusztal at intel.com>
---
drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 121 ++++++++++++++++++-
drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 6 +
2 files changed, 123 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index de72383d4b..b44acece7c 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -9,6 +9,7 @@
#include "qat_asym.h"
#include "qat_crypto.h"
#include "qat_crypto_pmd_gens.h"
+#include "adf_transport_access_macros_gen4vf.h"
static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = {
@@ -233,6 +234,78 @@ qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
return 0;
}
+int
+qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+ if (unlikely(dp_ctx->cached_enqueue != n))
+ return -1;
+
+ qp->enqueued += n;
+ qp->stats.enqueued_count += n;
+
+ tx_queue->tail = dp_ctx->tail;
+
+ WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number, tx_queue->tail);
+
+ tx_queue->csr_tail = tx_queue->tail;
+ dp_ctx->cached_enqueue = 0;
+
+ return 0;
+}
+
+int
+qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n)
+{
+ struct qat_qp *qp = qp_data;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+
+ if (unlikely(dp_ctx->cached_dequeue != n))
+ return -1;
+
+ rx_queue->head = dp_ctx->head;
+ rx_queue->nb_processed_responses += n;
+ qp->dequeued += n;
+ qp->stats.dequeued_count += n;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = rx_queue->csr_head;
+ new_head = rx_queue->head;
+ max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+ max_head - old_head);
+ memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+ new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+ old_head);
+ }
+ rx_queue->nb_processed_responses = 0;
+ rx_queue->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr,
+ rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+ new_head);
+ }
+
+ dp_ctx->cached_dequeue = 0;
+ return 0;
+}
+
static int
qat_sym_crypto_set_session_gen4(void *cdev, void *session)
{
@@ -390,11 +463,51 @@ qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void *_ctx)
{
struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
struct qat_sym_session *ctx = _ctx;
- int ret;
- ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
- if (ret < 0)
- return ret;
+ raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen4;
+ raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
+ raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
+ raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen4;
+
+ if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+ !ctx->is_gmac) {
+ /* AES-GCM or AES-CCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+ raw_dp_ctx->enqueue_burst =
+ qat_sym_dp_enqueue_aead_jobs_gen1;
+ raw_dp_ctx->enqueue =
+ qat_sym_dp_enqueue_single_aead_gen1;
+ } else {
+ raw_dp_ctx->enqueue_burst =
+ qat_sym_dp_enqueue_chain_jobs_gen1;
+ raw_dp_ctx->enqueue =
+ qat_sym_dp_enqueue_single_chain_gen1;
+ }
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+ raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs_gen1;
+ raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth_gen1;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+ raw_dp_ctx->enqueue_burst =
+ qat_sym_dp_enqueue_aead_jobs_gen1;
+ raw_dp_ctx->enqueue =
+ qat_sym_dp_enqueue_single_aead_gen1;
+ } else {
+ raw_dp_ctx->enqueue_burst =
+ qat_sym_dp_enqueue_cipher_jobs_gen1;
+ raw_dp_ctx->enqueue =
+ qat_sym_dp_enqueue_single_cipher_gen1;
+ }
+ } else
+ return -1;
if (ctx->is_single_pass && ctx->is_ucs) {
raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_aead_jobs_gen4;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
index 24044bec13..b87253ae02 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
@@ -1009,6 +1009,12 @@ qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
int
qat_sym_dp_dequeue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+int
+qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
+int
+qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+
int
qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
--
2.34.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2024-08-12 20:44:05.841211335 +0800
+++ 0105-crypto-qat-fix-GEN4-write.patch 2024-08-12 20:44:02.405069355 +0800
@@ -1 +1 @@
-From c355c2d8e65f02fa9621249c9b2a111477230c89 Mon Sep 17 00:00:00 2001
+From c6111cb5fd8bc40a1fb3c3eacd46c12793f4f72a Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit c355c2d8e65f02fa9621249c9b2a111477230c89 ]
@@ -12 +14,0 @@
-Cc: stable at dpdk.org
@@ -22 +24 @@
-index 5e808a60bf..6a5d6e78b9 100644
+index de72383d4b..b44acece7c 100644
@@ -109 +111 @@
- int
+ static int
@@ -169 +171 @@
-index 1f5d2583c4..2c5816e696 100644
+index 24044bec13..b87253ae02 100644
@@ -172 +174 @@
-@@ -1040,6 +1040,12 @@ qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
+@@ -1009,6 +1009,12 @@ qat_sym_dp_enqueue_done_gen1(void *qp_data, uint8_t *drv_ctx, uint32_t n);
More information about the stable
mailing list