[dpdk-dev v3] crypto/qat: use intel-ipsec-mb for partial hash & aes
Zhang, Roy Fan
roy.fan.zhang at intel.com
Thu May 26 10:42:06 CEST 2022
Hi Kai,
> -----Original Message-----
> From: Ji, Kai <kai.ji at intel.com>
> Sent: Wednesday, May 25, 2022 5:52 PM
> To: dev at dpdk.org
> Cc: Zhang, Roy Fan <roy.fan.zhang at intel.com>; Richardson, Bruce
> <bruce.richardson at intel.com>; gakhil at marvell.com; Ji, Kai <kai.ji at intel.com>
> Subject: [dpdk-dev v3] crypto/qat: use intel-ipsec-mb for partial hash & aes
>
> Since openssl 3.0 now deprecates the low level API QAT required to
> perform partial hash & aes operation when creating the session. This
> patch add in qat_ipsec_mb_lib driver parameter to allow QAT PMD to
> switch APIs between openssl and intel ipsec-mb library.
>
> Signed-off-by: Kai Ji <kai.ji at intel.com>
> Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
> ---
<snip>
> +static int qat_sym_do_precomputes_ipsec_mb(enum
> icp_qat_hw_auth_algo hash_alg,
> + const uint8_t *auth_key,
> + uint16_t auth_keylen,
> + uint8_t *p_state_buf,
> + uint16_t *p_state_len,
> + uint8_t aes_cmac)
> +{
> + int block_size;
> + uint8_t
> ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
> + uint8_t
> opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
> + int i;
> +
> + IMB_MGR *m;
> + m = alloc_mb_mgr(0);
> + if (m == NULL)
> + return -ENOMEM;
> +
> + init_mb_mgr_auto(m, NULL);
> +
> + if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
> +
> + /* CMAC */
> + if (aes_cmac) {
> + uint8_t *in = NULL;
> + uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
> + uint8_t *k1, *k2;
> +
> + auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
> +
> + in = rte_zmalloc("AES CMAC K1",
> + ICP_QAT_HW_AES_128_KEY_SZ, 16);
> +
> + if (in == NULL) {
> + QAT_LOG(ERR, "Failed to alloc memory");
> + return -ENOMEM;
> + }
> +
> + rte_memcpy(in, AES_CMAC_SEED,
> + ICP_QAT_HW_AES_128_KEY_SZ);
> + rte_memcpy(p_state_buf, auth_key, auth_keylen);
> +
> + DECLARE_ALIGNED(uint32_t expkey[4*15], 16);
> + DECLARE_ALIGNED(uint32_t dust[4*15], 16);
> + IMB_AES_KEYEXP_128(m, p_state_buf, expkey,
> dust);
> + k1 = p_state_buf +
> ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> + k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
> +
> + IMB_AES_CMAC_SUBKEY_GEN_128(m, expkey, k1,
> k2);
> + memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
> + *p_state_len =
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> + rte_free(in);
> + free_mb_mgr(m);
> + return 0;
> + }
> +
> + static uint8_t qat_aes_xcbc_key_seed[
> + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ]
> = {
> + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
> + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
> + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
> + };
> +
> + uint8_t *in = NULL;
> + uint8_t *out = p_state_buf;
> + int x;
> +
> + in = rte_zmalloc("working mem for key",
> + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ,
> 16);
> + if (in == NULL) {
> + QAT_LOG(ERR, "Failed to alloc memory");
> + return -ENOMEM;
> + }
> +
> + rte_memcpy(in, qat_aes_xcbc_key_seed,
> + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> + for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
> + if (aes_ipsecmb_job(in, out, m, auth_key,
> auth_keylen)) {
> + rte_free(in -
> + (x *
> ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
> + memset(out -
> + (x *
> ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
> + 0,
> ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
> + return -EFAULT;
> + }
> +
> + in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> + out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
> + }
> + *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
> + rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
> + free_mb_mgr(m);
> + return 0;
> +
> + } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
> + (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
> + uint8_t *in = NULL;
> + uint8_t *out = p_state_buf;
> +
> + memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
> + ICP_QAT_HW_GALOIS_LEN_A_SZ +
> + ICP_QAT_HW_GALOIS_E_CTR0_SZ);
> + in = rte_zmalloc("working mem for key",
> + ICP_QAT_HW_GALOIS_H_SZ, 16);
> + if (in == NULL) {
> + QAT_LOG(ERR, "Failed to alloc memory");
> + return -ENOMEM;
> + }
> +
> + memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
> + if (aes_ipsecmb_job(in, out, m, auth_key, auth_keylen))
> + return -EFAULT;
> +
> + *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
> + ICP_QAT_HW_GALOIS_LEN_A_SZ +
> + ICP_QAT_HW_GALOIS_E_CTR0_SZ;
> + rte_free(in);
> + free_mb_mgr(m);
> + return 0;
> + }
> +
>From this point on, mb_mgr is not freed either error happens or the function exists
Normally.
> + block_size = qat_hash_get_block_size(hash_alg);
> + if (block_size < 0)
> + return block_size;
> + /* init ipad and opad from key and xor with fixed values */
> + memset(ipad, 0, block_size);
> + memset(opad, 0, block_size);
> +
> + if (auth_keylen > (unsigned int)block_size) {
> + QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
> + return -EFAULT;
> + }
> + rte_memcpy(ipad, auth_key, auth_keylen);
> + rte_memcpy(opad, auth_key, auth_keylen);
> +
> + for (i = 0; i < block_size; i++) {
> + uint8_t *ipad_ptr = ipad + i;
> + uint8_t *opad_ptr = opad + i;
> + *ipad_ptr ^= HMAC_IPAD_VALUE;
> + *opad_ptr ^= HMAC_OPAD_VALUE;
> + }
> +
> + /* do partial hash of ipad and copy to state1 */
> + if (partial_hash_compute_ipsec_mb(hash_alg, ipad, p_state_buf)) {
> + memset(ipad, 0, block_size);
> + memset(opad, 0, block_size);
> + QAT_LOG(ERR, "ipad precompute failed");
> + return -EFAULT;
> + }
> +
> + /*
> + * State len is a multiple of 8, so may be larger than the digest.
> + * Put the partial hash of opad state_len bytes after state1
> + */
> + *p_state_len = qat_hash_get_state1_size(hash_alg);
> + if (partial_hash_compute_ipsec_mb(hash_alg, opad, p_state_buf +
> *p_state_len)) {
> + memset(ipad, 0, block_size);
> + memset(opad, 0, block_size);
> + QAT_LOG(ERR, "opad precompute failed");
> + return -EFAULT;
> + }
> +
> + /* don't leave data lying around */
> + memset(ipad, 0, block_size);
> + memset(opad, 0, block_size);
> + return 0;
> +}
More information about the dev
mailing list