[dpdk-dev] [PATCH v3 2/3] qat: add support for Snow3G
Deepak Kumar JAIN
deepak.k.jain at intel.com
Thu Mar 3 14:01:18 CET 2016
Signed-off-by: Deepak Kumar JAIN <deepak.k.jain at intel.com>
---
doc/guides/cryptodevs/qat.rst | 8 ++-
doc/guides/rel_notes/release_16_04.rst | 6 ++
drivers/crypto/qat/qat_adf/qat_algs.h | 1 +
drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 86 +++++++++++++++++++++---
drivers/crypto/qat/qat_crypto.c | 12 +++-
5 files changed, 100 insertions(+), 13 deletions(-)
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 23402b4..af52047 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -47,6 +47,7 @@ Cipher algorithms:
* ``RTE_CRYPTO_SYM_CIPHER_AES128_CBC``
* ``RTE_CRYPTO_SYM_CIPHER_AES192_CBC``
* ``RTE_CRYPTO_SYM_CIPHER_AES256_CBC``
+* ``RTE_CRYPTO_SYM_CIPHER_SNOW3G_UEA2``
Hash algorithms:
@@ -54,14 +55,15 @@ Hash algorithms:
* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
* ``RTE_CRYPTO_AUTH_AES_XCBC_MAC``
+* ``RTE_CRYPTO_AUTH_SNOW3G_UIA2``
Limitations
-----------
* Chained mbufs are not supported.
-* Hash only is not supported.
-* Cipher only is not supported.
+* Hash only is not supported except Snow3G UIA2.
+* Cipher only is not supported except Snow3G UEA2.
* Only in-place is currently supported (destination address is the same as source address).
* Only supports the session-oriented API implementation (session-less APIs are not supported).
* Not performance tuned.
diff --git a/doc/guides/rel_notes/release_16_04.rst b/doc/guides/rel_notes/release_16_04.rst
index 64e913d..d8ead62 100644
--- a/doc/guides/rel_notes/release_16_04.rst
+++ b/doc/guides/rel_notes/release_16_04.rst
@@ -35,6 +35,12 @@ This section should contain new features added in this release. Sample format:
Refer to the previous release notes for examples.
+* **Added support of Snow3G (UEA2 and UIA2) for Intel Quick Assist Devices.**
+
+ Enabled support for Snow3g Wireless algorithm for Intel Quick Assist devices.
+ Support for cipher only, Hash only is also provided
+ along with alg-chaining operations.
+
* **Enabled bulk allocation of mbufs.**
A new function ``rte_pktmbuf_alloc_bulk()`` has been added to allow the user
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index b73a5d0..b47dbc2 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -125,5 +125,6 @@ void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
unsigned int keylen);
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index bef444b..dd27476 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -376,7 +376,8 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
PMD_INIT_FUNC_TRACE();
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
+ cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
cipher =
(struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
sizeof(struct icp_qat_hw_auth_algo_blk));
@@ -409,13 +410,20 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
else
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
/* For Snow3G, set key convert and other bits */
if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ }
}
cipher->aes.cipher_config.val =
@@ -431,7 +439,6 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
/* Request template setup */
qat_alg_init_common_hdr(header);
header->service_cmd_id = cdesc->qat_cmd;
-
ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
/* Configure the common header protocol flags */
@@ -447,6 +454,10 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
} else {
cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -492,6 +503,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
{
struct icp_qat_hw_cipher_algo_blk *cipher;
struct icp_qat_hw_auth_algo_blk *hash;
+ struct icp_qat_hw_cipher_algo_blk *cipherconfig;
struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
@@ -510,7 +522,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
PMD_INIT_FUNC_TRACE();
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
+ cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd;
cipher =
(struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
@@ -549,11 +562,13 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
else
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
cipher->aes.cipher_config.val =
ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
cdesc->qat_cipher_alg, key_convert,
cdesc->qat_dir);
- memcpy(cipher->aes.key, authkey, authkeylen);
hash->sha.inner_setup.auth_config.reserved = 0;
hash->sha.inner_setup.auth_config.config =
@@ -561,6 +576,22 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
cdesc->qat_hash_alg, digestsize);
hash->sha.inner_setup.auth_counter.counter =
rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ hash->sha.inner_setup.auth_counter.counter = 0;
+ hash->sha.outer_setup.auth_config.reserved = 0;
+ cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
+ ((char *)&cdesc->cd +
+ sizeof(struct icp_qat_hw_auth_algo_blk)
+ + 16);
+ cipherconfig->aes.cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ memcpy(cipherconfig->aes.key, authkey, authkeylen);
+ memset(cipherconfig->aes.key + authkeylen, 0,
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
+ }
/* Do precomputes */
if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
@@ -587,6 +618,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
ICP_QAT_HW_GALOIS_H_SZ]) =
rte_bswap32(add_auth_data_length);
proto = ICP_QAT_FW_LA_GCM_PROTO;
+ } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+ state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
} else {
if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
authkey, authkeylen, (uint8_t *)(hash->sha.state1),
@@ -606,10 +640,25 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ }
+
/* Cipher CD config setup */
- cipher_cd_ctrl->cipher_key_sz = authkeylen >> 3;
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+ if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_AUTH) {
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset>>3;
+ } else {
+ cipher_cd_ctrl->cipher_state_sz = 0;
+ cipher_cd_ctrl->cipher_cfg_offset = 0;
+ }
/* Auth CD config setup */
hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
@@ -644,6 +693,13 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
break;
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ hash_cd_ctrl->inner_state2_sz =
+ ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
+ hash_cd_ctrl->inner_state1_sz =
+ ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ;
+ memset(hash->sha.state1, 0, ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ);
+ break;
default:
PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
return -EFAULT;
@@ -753,3 +809,15 @@ int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
}
return 0;
}
+
+int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index ad06e85..cb16aae 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -169,6 +169,14 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_3DES_CBC:
@@ -290,6 +298,9 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_GMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1:
case RTE_CRYPTO_AUTH_SHA256:
@@ -302,7 +313,6 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_MD5_HMAC:
case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_KASUMI_F9:
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
--
2.1.0
More information about the dev
mailing list