[dpdk-dev] [PATCH v2 1/2] crypto/qat: add aes-sha224-hmac capability to Intel QAT driver

Deepak Kumar Jain deepak.k.jain at intel.com
Mon Sep 12 21:46:43 CEST 2016


From: "Jain, Deepak K" <deepak.k.jain at intel.com>

Added support of aes-sha224-hmac in Intel(R) QuickAssist driver

Signed-off-by: Deepak Kumar Jain <deepak.k.jain at intel.com>
---
 doc/guides/cryptodevs/qat.rst                    |  1 +
 doc/guides/rel_notes/release_16_11.rst           |  1 +
 drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 33 ++++++++++++++++++++++++
 drivers/crypto/qat/qat_crypto.c                  | 25 +++++++++++++++++-
 4 files changed, 59 insertions(+), 1 deletion(-)

diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 485abb4..7f630be 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -53,6 +53,7 @@ Cipher algorithms:
 Hash algorithms:
 
 * ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
 * ``RTE_CRYPTO_AUTH_SHA256_HMAC``
 * ``RTE_CRYPTO_AUTH_SHA512_HMAC``
 * ``RTE_CRYPTO_AUTH_AES_XCBC_MAC``
diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst
index 4f7d784..040e250 100644
--- a/doc/guides/rel_notes/release_16_11.rst
+++ b/doc/guides/rel_notes/release_16_11.rst
@@ -40,6 +40,7 @@ New Features
   The QAT PMD was updated with changes including the following:
 
   * Added support for MD5_HMAC algorithm.
+  * Added support for SHA224-HMAC algorithm.
 
 
 Resolved Issues
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 521a9c4..77e6548 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -71,6 +71,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
 						QAT_HW_DEFAULT_ALIGNMENT);
+	case ICP_QAT_HW_AUTH_ALGO_SHA224:
+		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
+						QAT_HW_DEFAULT_ALIGNMENT);
 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
 		return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
 						QAT_HW_DEFAULT_ALIGNMENT);
@@ -107,6 +110,8 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 	switch (qat_hash_alg) {
 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
 		return ICP_QAT_HW_SHA1_STATE1_SZ;
+	case ICP_QAT_HW_AUTH_ALGO_SHA224:
+		return ICP_QAT_HW_SHA224_STATE1_SZ;
 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
 		return ICP_QAT_HW_SHA256_STATE1_SZ;
 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
@@ -129,6 +134,8 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 	switch (qat_hash_alg) {
 	case ICP_QAT_HW_AUTH_ALGO_SHA1:
 		return SHA_CBLOCK;
+	case ICP_QAT_HW_AUTH_ALGO_SHA224:
+		return SHA256_CBLOCK;
 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
 		return SHA256_CBLOCK;
 	case ICP_QAT_HW_AUTH_ALGO_SHA512:
@@ -158,6 +165,17 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+	SHA256_CTX ctx;
+
+	if (!SHA224_Init(&ctx))
+		return -EFAULT;
+	SHA256_Transform(&ctx, data_in);
+	rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+	return 0;
+}
+
 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
 {
 	SHA256_CTX ctx;
@@ -220,6 +238,13 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
 			*hash_state_out_be32 =
 				rte_bswap32(*(((uint32_t *)digest)+i));
 		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA224:
+		if (partial_hash_sha224(data_in, digest))
+			return -EFAULT;
+		for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+			*hash_state_out_be32 =
+				rte_bswap32(*(((uint32_t *)digest)+i));
+		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
 		if (partial_hash_sha256(data_in, digest))
 			return -EFAULT;
@@ -575,6 +600,14 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
 		}
 		state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
 		break;
+	case ICP_QAT_HW_AUTH_ALGO_SHA224:
+		if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+			authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+			PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+			return -EFAULT;
+		}
+		state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
+		break;
 	case ICP_QAT_HW_AUTH_ALGO_SHA256:
 		if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
 			authkey, authkeylen, cdesc->cd_cur_ptr,	&state1_size)) {
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index ba1daa7..a9a3ef7 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -90,6 +90,27 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* SHA224 HMAC */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+			{.auth = {
+				.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+				.block_size = 64,
+					.key_size = {
+					.min = 64,
+					.max = 64,
+					.increment = 0
+				},
+				.digest_size = {
+					.min = 28,
+					.max = 28,
+					.increment = 0
+				},
+				.aad_size = { 0 }
+			}, }
+		}, }
+	},
 	{	/* SHA256 HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
@@ -539,6 +560,9 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
 		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+		break;
 	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
 		break;
@@ -556,7 +580,6 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
 	case RTE_CRYPTO_AUTH_SHA256:
 	case RTE_CRYPTO_AUTH_SHA512:
 	case RTE_CRYPTO_AUTH_SHA224:
-	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 	case RTE_CRYPTO_AUTH_SHA384:
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
 	case RTE_CRYPTO_AUTH_MD5:
-- 
2.5.5



More information about the dev mailing list