[dpdk-dev] [PATCH v3 18/26] cryptodev: set AES-GMAC as auth-only algo

Pablo de Lara pablo.de.lara.guarch at intel.com
Thu Jun 29 13:35:13 CEST 2017


AES-GMAC is an authentication algorithm, based on AES-GCM
without encryption. To simplify its usage, now it can be used
setting the authentication parameters, without requiring
to concatenate a ciphering transform.

Therefore, it is not required to set AAD, but authentication
data length and offset, giving the user the option
to have Scatter-Gather List in the input buffer,
as long as the driver supports it.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch at intel.com>
Acked-by: Akhil Goyal <akhil.goyal at nxp.com>
---
 app/test-crypto-perf/cperf_options_parsing.c     |   3 +-
 app/test-crypto-perf/cperf_test_vectors.c        |   5 -
 drivers/crypto/aesni_gcm/aesni_gcm_pmd.c         | 169 ++++++++++++++++-------
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c     |  12 +-
 drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h |   4 +-
 drivers/crypto/openssl/rte_openssl_pmd.c         |  52 +++++--
 drivers/crypto/openssl/rte_openssl_pmd_ops.c     |   9 +-
 drivers/crypto/qat/qat_crypto.c                  | 151 ++++++++++++++------
 drivers/crypto/qat/qat_crypto_capabilities.h     |  11 +-
 lib/librte_cryptodev/rte_crypto_sym.h            |  39 +-----
 test/test/test_cryptodev.c                       | 159 ++++++++++-----------
 test/test/test_cryptodev_gcm_test_vectors.h      |  29 +---
 12 files changed, 374 insertions(+), 269 deletions(-)

diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index 70b6a60..5c2dcff 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -820,8 +820,7 @@ cperf_options_check(struct cperf_options *options)
 	if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM ||
 			options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CCM ||
 			options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM ||
-			options->auth_algo == RTE_CRYPTO_AUTH_AES_CCM ||
-			options->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			options->auth_algo == RTE_CRYPTO_AUTH_AES_CCM) {
 		if (options->op_type != CPERF_AEAD) {
 			RTE_LOG(ERR, USER1, "Use --optype aead\n");
 			return -EINVAL;
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index b67d0f4..2e5339c 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -456,11 +456,6 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
 			t_vec->auth_key.data = NULL;
 			aad_alloc = 1;
 			break;
-		case RTE_CRYPTO_AUTH_AES_GMAC:
-			/* auth key should be the same as cipher key */
-			t_vec->auth_key.data = cipher_key;
-			aad_alloc = 1;
-			break;
 		default:
 			t_vec->auth_key.data = auth_key;
 			aad_alloc = 0;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index fcf0f8b..36372a6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -79,35 +79,74 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
 	const struct rte_crypto_sym_xform *auth_xform;
 	const struct rte_crypto_sym_xform *cipher_xform;
 	uint16_t digest_length;
+	uint8_t key_length;
+	uint8_t *key;
 
-	if (xform->next == NULL || xform->next->next != NULL) {
-		GCM_LOG_ERR("Two and only two chained xform required");
-		return -EINVAL;
-	}
-
-	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
-			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		auth_xform = xform->next;
-		cipher_xform = xform;
-	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
-			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+	/* AES-GMAC */
+	if (xform->next == NULL) {
 		auth_xform = xform;
-		cipher_xform = xform->next;
+		if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+			GCM_LOG_ERR("Only AES GMAC is supported as an "
+					"authentication only algorithm");
+			return -EINVAL;
+		}
+		/* Set IV parameters */
+		sess->iv.offset = auth_xform->auth.iv.offset;
+		sess->iv.length = auth_xform->auth.iv.length;
+
+		/* Select Crypto operation */
+		if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+			sess->op = AESNI_GMAC_OP_GENERATE;
+		else
+			sess->op = AESNI_GMAC_OP_VERIFY;
+
+		key_length = auth_xform->auth.key.length;
+		key = auth_xform->auth.key.data;
+	/* AES-GCM */
 	} else {
-		GCM_LOG_ERR("Cipher and auth xform required");
-		return -EINVAL;
-	}
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+				xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+			auth_xform = xform->next;
+			cipher_xform = xform;
+		} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+				xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+			auth_xform = xform;
+			cipher_xform = xform->next;
+		} else {
+			GCM_LOG_ERR("Cipher and auth xform required "
+					"when using AES GCM");
+			return -EINVAL;
+		}
 
-	if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
-		(auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
-			auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
-		GCM_LOG_ERR("We only support AES GCM and AES GMAC");
-		return -EINVAL;
-	}
+		if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+				(auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM))) {
+			GCM_LOG_ERR("The only combined operation "
+						"supported is AES GCM");
+			return -EINVAL;
+		}
 
-	/* Set IV parameters */
-	sess->iv.offset = cipher_xform->cipher.iv.offset;
-	sess->iv.length = cipher_xform->cipher.iv.length;
+		/* Set IV parameters */
+		sess->iv.offset = cipher_xform->cipher.iv.offset;
+		sess->iv.length = cipher_xform->cipher.iv.length;
+
+		/* Select Crypto operation */
+		if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+				auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+			sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+		else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+				auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+			sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+		else {
+			GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
+					" Decrypt/Verify are valid only");
+			return -EINVAL;
+		}
+
+		key_length = cipher_xform->auth.key.length;
+		key = cipher_xform->auth.key.data;
+
+		sess->aad_length = auth_xform->auth.add_auth_data_length;
+	}
 
 	/* IV check */
 	if (sess->iv.length != 16 && sess->iv.length != 12 &&
@@ -116,39 +155,25 @@ aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
 		return -EINVAL;
 	}
 
-	/* Select Crypto operation */
-	if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
-			auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
-		sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
-	else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
-			auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
-		sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
-	else {
-		GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
-				" Decrypt/Verify are valid only");
-		return -EINVAL;
-	}
-
 	digest_length = auth_xform->auth.digest_length;
 
 	/* Check key length and calculate GCM pre-compute. */
-	switch (cipher_xform->cipher.key.length) {
+	switch (key_length) {
 	case 16:
-		aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
+		aesni_gcm128_pre(key, &sess->gdata);
 		sess->key = AESNI_GCM_KEY_128;
 
 		break;
 	case 32:
-		aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
+		aesni_gcm256_pre(key, &sess->gdata);
 		sess->key = AESNI_GCM_KEY_256;
 
 		break;
 	default:
-		GCM_LOG_ERR("Unsupported cipher key length");
+		GCM_LOG_ERR("Unsupported cipher/auth key length");
 		return -EINVAL;
 	}
 
-	sess->aad_length = auth_xform->auth.add_auth_data_length;
 	/* Digest check */
 	if (digest_length != 16 &&
 			digest_length != 12 &&
@@ -211,9 +236,20 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
 	uint8_t *iv_ptr;
 	struct rte_crypto_sym_op *sym_op = op->sym;
 	struct rte_mbuf *m_src = sym_op->m_src;
-	uint32_t offset = sym_op->cipher.data.offset;
+	uint32_t offset, data_offset, data_length;
 	uint32_t part_len, total_len, data_len;
 
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
+			session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+		offset = sym_op->cipher.data.offset;
+		data_offset = offset;
+		data_length = sym_op->cipher.data.length;
+	} else {
+		offset = sym_op->auth.data.offset;
+		data_offset = offset;
+		data_length = sym_op->auth.data.length;
+	}
+
 	RTE_ASSERT(m_src != NULL);
 
 	while (offset >= m_src->data_len) {
@@ -224,12 +260,12 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
 	}
 
 	data_len = m_src->data_len - offset;
-	part_len = (data_len < sym_op->cipher.data.length) ? data_len :
-			sym_op->cipher.data.length;
+	part_len = (data_len < data_length) ? data_len :
+			data_length;
 
 	/* Destination buffer is required when segmented source buffer */
-	RTE_ASSERT((part_len == sym_op->cipher.data.length) ||
-			((part_len != sym_op->cipher.data.length) &&
+	RTE_ASSERT((part_len == data_length) ||
+			((part_len != data_length) &&
 					(sym_op->m_dst != NULL)));
 	/* Segmented destination buffer is not supported */
 	RTE_ASSERT((sym_op->m_dst == NULL) ||
@@ -239,9 +275,9 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
 
 	dst = sym_op->m_dst ?
 			rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
-					sym_op->cipher.data.offset) :
+					data_offset) :
 			rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
-					sym_op->cipher.data.offset);
+					data_offset);
 
 	src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
 
@@ -265,7 +301,7 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
 
 		aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
 				(uint64_t)part_len);
-		total_len = sym_op->cipher.data.length - part_len;
+		total_len = data_length - part_len;
 
 		while (total_len) {
 			dst += part_len;
@@ -286,7 +322,7 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
 		aesni_gcm_enc[session->key].finalize(&session->gdata,
 				sym_op->auth.digest.data,
 				(uint64_t)session->digest_length);
-	} else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
+	} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
 		uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
 				sym_op->m_dst : sym_op->m_src,
 				session->digest_length);
@@ -303,7 +339,7 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
 
 		aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
 				(uint64_t)part_len);
-		total_len = sym_op->cipher.data.length - part_len;
+		total_len = data_length - part_len;
 
 		while (total_len) {
 			dst += part_len;
@@ -324,6 +360,32 @@ process_gcm_crypto_op(struct rte_crypto_op *op,
 		aesni_gcm_dec[session->key].finalize(&session->gdata,
 				auth_tag,
 				(uint64_t)session->digest_length);
+	} else if (session->op == AESNI_GMAC_OP_GENERATE) {
+		aesni_gcm_enc[session->key].init(&session->gdata,
+				iv_ptr,
+				src,
+				(uint64_t)data_length);
+		aesni_gcm_enc[session->key].finalize(&session->gdata,
+				sym_op->auth.digest.data,
+				(uint64_t)session->digest_length);
+	} else { /* AESNI_GMAC_OP_VERIFY */
+		uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+				sym_op->m_dst : sym_op->m_src,
+				session->digest_length);
+
+		if (!auth_tag) {
+			GCM_LOG_ERR("auth_tag");
+			return -1;
+		}
+
+		aesni_gcm_dec[session->key].init(&session->gdata,
+				iv_ptr,
+				src,
+				(uint64_t)data_length);
+
+		aesni_gcm_dec[session->key].finalize(&session->gdata,
+				auth_tag,
+				(uint64_t)session->digest_length);
 	}
 
 	return 0;
@@ -350,7 +412,8 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op)
 	op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 
 	/* Verify digest if required */
-	if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+	if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
+			session->op == AESNI_GMAC_OP_VERIFY) {
 
 		uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
 				m->data_len - session->digest_length);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 542e6c4..39285d0 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -56,12 +56,12 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
 					.max = 16,
 					.increment = 4
 				},
-				.aad_size = {
-					.min = 0,
-					.max = 65535,
-					.increment = 1
-				},
-				.iv_size = { 0 }
+				.aad_size = { 0 },
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
 			}, }
 		}, }
 	},
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 05fabe6..9dea80d 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -80,7 +80,9 @@ struct aesni_gcm_qp {
 
 enum aesni_gcm_operation {
 	AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
-	AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
+	AESNI_GCM_OP_AUTHENTICATED_DECRYPTION,
+	AESNI_GMAC_OP_GENERATE,
+	AESNI_GMAC_OP_VERIFY
 };
 
 enum aesni_gcm_key {
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 46b1dd8..11260d8 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -330,13 +330,41 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
 
 	/* Select auth algo */
 	switch (xform->auth.algo) {
-	case RTE_CRYPTO_AUTH_AES_GMAC:
 	case RTE_CRYPTO_AUTH_AES_GCM:
-		/* Check additional condition for AES_GMAC/GCM */
+		/* Check additional condition for AES_GCM */
 		if (sess->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM)
 			return -EINVAL;
 		sess->chain_order = OPENSSL_CHAIN_COMBINED;
 		break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+		/* Set IV parameters */
+		sess->iv.offset = xform->auth.iv.offset;
+		sess->iv.length = xform->auth.iv.length;
+
+		/*
+		 * OpenSSL requires GMAC to be a GCM operation
+		 * with no cipher data length
+		 */
+		sess->cipher.mode = OPENSSL_CIPHER_LIB;
+		if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
+			sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+		else
+			sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+		sess->cipher.key.length = xform->auth.key.length;
+		sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+		if (get_cipher_algo(RTE_CRYPTO_CIPHER_AES_GCM,
+				sess->cipher.key.length,
+				&sess->cipher.evp_algo) != 0)
+			return -EINVAL;
+
+		get_cipher_key(xform->auth.key.data, xform->auth.key.length,
+			sess->cipher.key.data);
+
+		break;
 
 	case RTE_CRYPTO_AUTH_MD5:
 	case RTE_CRYPTO_AUTH_SHA1:
@@ -923,6 +951,7 @@ process_openssl_combined_op
 	/* cipher */
 	uint8_t *dst = NULL, *iv, *tag, *aad;
 	int srclen, ivlen, aadlen, status = -1;
+	uint32_t offset;
 
 	/*
 	 * Segmented destination buffer is not supported for
@@ -936,32 +965,37 @@ process_openssl_combined_op
 	iv = rte_crypto_op_ctod_offset(op, uint8_t *,
 			sess->iv.offset);
 	ivlen = sess->iv.length;
-	aad = op->sym->auth.aad.data;
-	aadlen = sess->auth.aad_length;
-
 	tag = op->sym->auth.digest.data;
 	if (tag == NULL)
 		tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
 				op->sym->cipher.data.offset +
 				op->sym->cipher.data.length);
 
-	if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+	if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
 		srclen = 0;
-	else {
+		offset = op->sym->auth.data.offset;
+		aadlen = op->sym->auth.data.length;
+		aad = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+				op->sym->auth.data.offset);
+
+	} else {
 		srclen = op->sym->cipher.data.length;
 		dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
 				op->sym->cipher.data.offset);
+		offset = op->sym->cipher.data.offset;
+		aad = op->sym->auth.aad.data;
+		aadlen = sess->auth.aad_length;
 	}
 
 	if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
 		status = process_openssl_auth_encryption_gcm(
-				mbuf_src, op->sym->cipher.data.offset, srclen,
+				mbuf_src, offset, srclen,
 				aad, aadlen, iv, ivlen, sess->cipher.key.data,
 				dst, tag, sess->cipher.ctx,
 				sess->cipher.evp_algo);
 	else
 		status = process_openssl_auth_decryption_gcm(
-				mbuf_src, op->sym->cipher.data.offset, srclen,
+				mbuf_src, offset, srclen,
 				aad, aadlen, iv, ivlen, sess->cipher.key.data,
 				dst, tag, sess->cipher.ctx,
 				sess->cipher.evp_algo);
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 3026dbd..fc525d9 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -407,12 +407,11 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
 					.max = 16,
 					.increment = 0
 				},
-				.aad_size = {
-					.min = 8,
-					.max = 65532,
+				.iv_size = {
+					.min = 12,
+					.max = 16,
 					.increment = 4
-				},
-				.iv_size = { 0 }
+				}
 			}, }
 		}, }
 	},
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index b365c8d..81f7a1f 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -518,6 +518,8 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
 	struct qat_pmd_private *internals = dev->data->dev_private;
 	auth_xform = qat_get_auth_xform(xform);
+	uint8_t *key_data = auth_xform->key.data;
+	uint8_t key_length = auth_xform->key.length;
 
 	switch (auth_xform->algo) {
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
@@ -539,10 +541,22 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
 		break;
 	case RTE_CRYPTO_AUTH_AES_GCM:
+		cipher_xform = qat_get_cipher_xform(xform);
+
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
+		key_data = cipher_xform->key.data;
+		key_length = cipher_xform->key.length;
 		break;
 	case RTE_CRYPTO_AUTH_AES_GMAC:
+		if (qat_alg_validate_aes_key(auth_xform->key.length,
+				&session->qat_cipher_alg) != 0) {
+			PMD_DRV_LOG(ERR, "Invalid AES key size");
+			goto error_out;
+		}
+		session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
 		break;
 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
 		session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
@@ -582,30 +596,62 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
 				auth_xform->algo);
 		goto error_out;
 	}
-	cipher_xform = qat_get_cipher_xform(xform);
 
 	session->auth_iv.offset = auth_xform->iv.offset;
 	session->auth_iv.length = auth_xform->iv.length;
 
-	if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
-			(session->qat_hash_alg ==
-				ICP_QAT_HW_AUTH_ALGO_GALOIS_64))  {
-		if (qat_alg_aead_session_create_content_desc_auth(session,
-				cipher_xform->key.data,
-				cipher_xform->key.length,
-				auth_xform->add_auth_data_length,
-				auth_xform->digest_length,
-				auth_xform->op))
-			goto error_out;
+	if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+		if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+			session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+			session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+			/*
+			 * It needs to create cipher desc content first,
+			 * then authentication
+			 */
+			if (qat_alg_aead_session_create_content_desc_cipher(session,
+						auth_xform->key.data,
+						auth_xform->key.length))
+				goto error_out;
+
+			if (qat_alg_aead_session_create_content_desc_auth(session,
+						key_data,
+						key_length,
+						0,
+						auth_xform->digest_length,
+						auth_xform->op))
+				goto error_out;
+		} else {
+			session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+			session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+			/*
+			 * It needs to create authentication desc content first,
+			 * then cipher
+			 */
+			if (qat_alg_aead_session_create_content_desc_auth(session,
+					key_data,
+					key_length,
+					0,
+					auth_xform->digest_length,
+					auth_xform->op))
+				goto error_out;
+
+			if (qat_alg_aead_session_create_content_desc_cipher(session,
+						auth_xform->key.data,
+						auth_xform->key.length))
+				goto error_out;
+		}
+		/* Restore to authentication only only */
+		session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
 	} else {
 		if (qat_alg_aead_session_create_content_desc_auth(session,
-				auth_xform->key.data,
-				auth_xform->key.length,
+				key_data,
+				key_length,
 				auth_xform->add_auth_data_length,
 				auth_xform->digest_length,
 				auth_xform->op))
 			goto error_out;
 	}
+
 	session->digest_length = auth_xform->digest_length;
 	return session;
 
@@ -892,6 +938,28 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
 	return 0;
 }
 
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+		struct icp_qat_fw_la_cipher_req_params *cipher_param,
+		struct rte_crypto_op *op,
+		struct icp_qat_fw_la_bulk_req *qat_req)
+{
+	/* copy IV into request if it fits */
+	if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+		rte_memcpy(cipher_param->u.cipher_IV_array,
+				rte_crypto_op_ctod_offset(op, uint8_t *,
+					iv_offset),
+				iv_length);
+	} else {
+		ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+				qat_req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+		cipher_param->u.s.cipher_IV_ptr =
+				rte_crypto_op_ctophys_offset(op,
+					iv_offset);
+	}
+}
+
 static inline int
 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 		struct qat_crypto_op_cookie *qat_op_cookie)
@@ -907,7 +975,6 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 	uint32_t min_ofs = 0;
 	uint64_t src_buf_start = 0, dst_buf_start = 0;
 	uint8_t do_sgl = 0;
-	uint8_t *cipher_iv_ptr = NULL;
 
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
 	if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
@@ -980,22 +1047,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 			cipher_ofs = op->sym->cipher.data.offset;
 		}
 
-		cipher_iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
-					ctx->cipher_iv.offset);
-		/* copy IV into request if it fits */
-		if (ctx->cipher_iv.length <=
-				sizeof(cipher_param->u.cipher_IV_array)) {
-			rte_memcpy(cipher_param->u.cipher_IV_array,
-					cipher_iv_ptr,
-					ctx->cipher_iv.length);
-		} else {
-			ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
-					qat_req->comn_hdr.serv_specif_flags,
-					ICP_QAT_FW_CIPH_IV_64BIT_PTR);
-			cipher_param->u.s.cipher_IV_ptr =
-					rte_crypto_op_ctophys_offset(op,
-						ctx->cipher_iv.offset);
-		}
+		set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+				cipher_param, op, qat_req);
 		min_ofs = cipher_ofs;
 	}
 
@@ -1034,10 +1087,18 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 					ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
 				ctx->qat_hash_alg ==
 					ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-			auth_ofs = op->sym->cipher.data.offset;
-			auth_len = op->sym->cipher.data.length;
-
-			auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+			/* AES-GCM */
+			if (do_cipher) {
+				auth_ofs = op->sym->cipher.data.offset;
+				auth_len = op->sym->cipher.data.length;
+
+				auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+			/* AES-GMAC */
+			} else {
+				set_cipher_iv(ctx->auth_iv.length,
+					ctx->auth_iv.offset,
+					cipher_param, op, qat_req);
+			}
 		} else {
 			auth_ofs = op->sym->auth.data.offset;
 			auth_len = op->sym->auth.data.length;
@@ -1154,7 +1215,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 
 	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
-		if (ctx->cipher_iv.length == 12) {
+		if (ctx->cipher_iv.length == 12 ||
+				ctx->auth_iv.length == 12) {
 			/*
 			 * For GCM a 12 bit IV is allowed,
 			 * but we need to inform the f/w
@@ -1163,20 +1225,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 				qat_req->comn_hdr.serv_specif_flags,
 				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
 		}
-		if (op->sym->cipher.data.length == 0) {
-			/*
-			 * GMAC
-			 */
-			qat_req->comn_mid.dest_data_addr =
-				qat_req->comn_mid.src_data_addr =
-						op->sym->auth.aad.phys_addr;
+		/* GMAC */
+		if (!do_cipher) {
 			qat_req->comn_mid.dst_length =
 				qat_req->comn_mid.src_length =
 					rte_pktmbuf_data_len(op->sym->m_src);
-			cipher_param->cipher_length = 0;
-			cipher_param->cipher_offset = 0;
 			auth_param->u1.aad_adr = 0;
-			auth_param->auth_len = ctx->aad_len;
+			auth_param->auth_len = op->sym->auth.data.length;
 			auth_param->auth_off = op->sym->auth.data.offset;
 			auth_param->u2.aad_sz = 0;
 		}
@@ -1188,9 +1243,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
 	rte_hexdump(stdout, "src_data:",
 			rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
 			rte_pktmbuf_data_len(op->sym->m_src));
-	if (do_cipher)
+	if (do_cipher) {
+		uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+						uint8_t *,
+						ctx->cipher_iv.offset);
 		rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
 				ctx->cipher_iv.length);
+	}
 
 	if (do_auth) {
 		if (ctx->auth_iv.length) {
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index fbff148..d863ccd 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -232,12 +232,11 @@
 					.max = 16,			\
 					.increment = 4			\
 				},					\
-				.aad_size = {				\
-					.min = 1,			\
-					.max = 65535,			\
-					.increment = 1			\
-				},					\
-				.iv_size = { 0 }			\
+				.iv_size = {				\
+					.min = 12,			\
+					.max = 12,			\
+					.increment = 0			\
+				}					\
 			}, }						\
 		}, }							\
 	},								\
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index de4031a..f174e12 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -84,11 +84,10 @@ enum rte_crypto_cipher_algorithm {
 	/**< AES algorithm in F8 mode */
 	RTE_CRYPTO_CIPHER_AES_GCM,
 	/**< AES algorithm in GCM mode. When this cipher algorithm is used the
-	 * *RTE_CRYPTO_AUTH_AES_GCM* or *RTE_CRYPTO_AUTH_AES_GMAC* element
-	 * of the *rte_crypto_auth_algorithm* enum MUST be used to set up
-	 * the related *rte_crypto_auth_setup_data* structure in the session
-	 * context or in the op_params of the crypto operation structure
-	 * in the case of a session-less crypto operation.
+	 * *RTE_CRYPTO_AUTH_AES_GCM* element of the *rte_crypto_auth_algorithm*
+	 * enum MUST be used to set up the related *rte_crypto_auth_setup_data*
+	 * structure in the session context or in the op_params of the crypto
+	 * operation structure in the case of a session-less crypto operation.
 	 */
 	RTE_CRYPTO_CIPHER_AES_XTS,
 	/**< AES algorithm in XTS mode */
@@ -268,13 +267,7 @@ enum rte_crypto_auth_algorithm {
 	 * op_params parameter MUST be set for a session-less crypto operation.
 	 */
 	RTE_CRYPTO_AUTH_AES_GMAC,
-	/**< AES GMAC algorithm. When this hash algorithm
-	* is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the
-	* rte_crypto_cipher_algorithm enum MUST be used to set up the related
-	* rte_crypto_cipher_setup_data structure in the session context,  or
-	* the corresponding parameter in the crypto operation data structures
-	* op_params parameter MUST be set for a session-less crypto operation.
-	*/
+	/**< AES GMAC algorithm. */
 	RTE_CRYPTO_AUTH_AES_XCBC_MAC,
 	/**< AES XCBC algorithm. */
 
@@ -384,11 +377,6 @@ struct rte_crypto_auth_xform {
 	 *   block B0 and the encoded length.  The maximum permitted value in
 	 *   this case is 222 bytes.
 	 *
-	 * @note
-	 *  For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation
-	 *  this field is not used and should be set to 0. Instead the length
-	 *  of the AAD data is specified in additional authentication data
-	 *  length field of the rte_crypto_sym_op_data structure
 	 */
 
 	struct {
@@ -522,10 +510,6 @@ struct rte_crypto_sym_op {
 			  * values.
 			  *
 			  * @note
-			  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this
-			  * field should be set to 0.
-			  *
-			  * @note
 			  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2,
 			  * KASUMI @ RTE_CRYPTO_CIPHER_KASUMI_F8
 			  * and ZUC @ RTE_CRYPTO_CIPHER_ZUC_EEA3,
@@ -547,11 +531,6 @@ struct rte_crypto_sym_op {
 			  * ignored. The field @ref aad field
 			  * should be set instead.
 			  *
-			  * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC)
-			  * mode of operation, this field is set to 0. aad data
-			  * pointer of rte_crypto_sym_op_data structure is
-			  * used instead
-			  *
 			  * @note
 			  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
 			  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
@@ -569,11 +548,6 @@ struct rte_crypto_sym_op {
 			  * instead.
 			  *
 			  * @note
-			  * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode
-			  * of operation, this field is set to 0.
-			  * Auth.aad.length is used instead.
-			  *
-			  * @note
 			  * For SNOW 3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2,
 			  * KASUMI @ RTE_CRYPTO_AUTH_KASUMI_F9
 			  * and ZUC @ RTE_CRYPTO_AUTH_ZUC_EIA3,
@@ -644,9 +618,6 @@ struct rte_crypto_sym_op {
 			 * any space to round this up to the nearest multiple
 			 * of the block size (16 bytes).
 			 *
-			 * @note
-			 * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of
-			 * operation, this field is used to pass plaintext.
 			 */
 			phys_addr_t phys_addr;	/**< physical address */
 		} aad;
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 4698f26..00c32a4 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -6281,17 +6281,7 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 	struct crypto_unittest_params *ut_params = &unittest_params;
 	struct rte_crypto_sym_op *sym_op;
 
-	unsigned aad_pad_len;
-
-	aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16);
-
-	/*
-	 * Runtime generate the large plain text instead of use hard code
-	 * plain text vector. It is done to avoid create huge source file
-	 * with the test vector.
-	 */
-	if (tdata->aad.len == GMAC_LARGE_PLAINTEXT_LENGTH)
-		generate_gmac_large_plaintext(tdata->aad.data);
+	uint32_t plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
 
 	/* Generate Crypto op data structure */
 	ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
@@ -6300,14 +6290,6 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 			"Failed to allocate symmetric crypto operation struct");
 
 	sym_op = ut_params->op->sym;
-	sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-			aad_pad_len);
-	TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
-			"no room to append aad");
-
-	sym_op->auth.aad.phys_addr =
-			rte_pktmbuf_mtophys(ut_params->ibuf);
-	memcpy(sym_op->auth.aad.data, tdata->aad.data, tdata->aad.len);
 
 	sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
 			ut_params->ibuf, tdata->gmac_tag.len);
@@ -6315,7 +6297,7 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 			"no room to append digest");
 
 	sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
-			ut_params->ibuf, aad_pad_len);
+			ut_params->ibuf, plaintext_pad_len);
 
 	if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
 		rte_memcpy(sym_op->auth.digest.data, tdata->gmac_tag.data,
@@ -6336,31 +6318,20 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
 	sym_op->cipher.data.offset = 0;
 
 	sym_op->auth.data.offset = 0;
-	sym_op->auth.data.length = 0;
+	sym_op->auth.data.length = tdata->plaintext.len;
 
 	return 0;
 }
 
 static int create_gmac_session(uint8_t dev_id,
-		enum rte_crypto_cipher_operation op,
 		const struct gmac_test_data *tdata,
 		enum rte_crypto_auth_operation auth_op)
 {
-	uint8_t cipher_key[tdata->key.len];
+	uint8_t auth_key[tdata->key.len];
 
 	struct crypto_unittest_params *ut_params = &unittest_params;
 
-	memcpy(cipher_key, tdata->key.data, tdata->key.len);
-
-	/* For GMAC we setup cipher parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-	ut_params->cipher_xform.next = NULL;
-	ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
-	ut_params->cipher_xform.cipher.op = op;
-	ut_params->cipher_xform.cipher.key.data = cipher_key;
-	ut_params->cipher_xform.cipher.key.length = tdata->key.len;
-	ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
-	ut_params->cipher_xform.cipher.iv.length = tdata->iv.len;
+	memcpy(auth_key, tdata->key.data, tdata->key.len);
 
 	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.next = NULL;
@@ -6368,14 +6339,15 @@ static int create_gmac_session(uint8_t dev_id,
 	ut_params->auth_xform.auth.algo = RTE_CRYPTO_AUTH_AES_GMAC;
 	ut_params->auth_xform.auth.op = auth_op;
 	ut_params->auth_xform.auth.digest_length = tdata->gmac_tag.len;
-	ut_params->auth_xform.auth.add_auth_data_length = tdata->aad.len;
-	ut_params->auth_xform.auth.key.length = 0;
-	ut_params->auth_xform.auth.key.data = NULL;
+	ut_params->auth_xform.auth.add_auth_data_length = 0;
+	ut_params->auth_xform.auth.key.length = tdata->key.len;
+	ut_params->auth_xform.auth.key.data = auth_key;
+	ut_params->auth_xform.auth.iv.offset = IV_OFFSET;
+	ut_params->auth_xform.auth.iv.length = tdata->iv.len;
 
-	ut_params->cipher_xform.next = &ut_params->auth_xform;
 
 	ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
-			&ut_params->cipher_xform);
+			&ut_params->auth_xform);
 
 	TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
 
@@ -6390,20 +6362,19 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata)
 
 	int retval;
 
-	uint8_t *auth_tag, *p;
-	uint16_t aad_pad_len;
+	uint8_t *auth_tag, *plaintext;
+	uint16_t plaintext_pad_len;
 
 	TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
 			      "No GMAC length in the source data");
 
 	retval = create_gmac_session(ts_params->valid_devs[0],
-			RTE_CRYPTO_CIPHER_OP_ENCRYPT,
 			tdata, RTE_CRYPTO_AUTH_OP_GENERATE);
 
 	if (retval < 0)
 		return retval;
 
-	if (tdata->aad.len > MBUF_SIZE)
+	if (tdata->plaintext.len > MBUF_SIZE)
 		ut_params->ibuf = rte_pktmbuf_alloc(ts_params->large_mbuf_pool);
 	else
 		ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
@@ -6413,9 +6384,22 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata)
 	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
 			rte_pktmbuf_tailroom(ut_params->ibuf));
 
-	aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16);
+	plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+	/*
+	 * Runtime generate the large plain text instead of use hard code
+	 * plain text vector. It is done to avoid create huge source file
+	 * with the test vector.
+	 */
+	if (tdata->plaintext.len == GMAC_LARGE_PLAINTEXT_LENGTH)
+		generate_gmac_large_plaintext(tdata->plaintext.data);
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				plaintext_pad_len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
 
-	p = rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *);
+	memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+	TEST_HEXDUMP(stdout, "plaintext:", plaintext,
+			tdata->plaintext.len);
 
 	retval = create_gmac_operation(RTE_CRYPTO_AUTH_OP_GENERATE,
 			tdata);
@@ -6435,9 +6419,9 @@ test_AES_GMAC_authentication(const struct gmac_test_data *tdata)
 
 	if (ut_params->op->sym->m_dst) {
 		auth_tag = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_dst,
-				uint8_t *, aad_pad_len);
+				uint8_t *, plaintext_pad_len);
 	} else {
-		auth_tag = p + aad_pad_len;
+		auth_tag = plaintext + plaintext_pad_len;
 	}
 
 	TEST_HEXDUMP(stdout, "auth tag:", auth_tag, tdata->gmac_tag.len);
@@ -6481,18 +6465,19 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata)
 	struct crypto_testsuite_params *ts_params = &testsuite_params;
 	struct crypto_unittest_params *ut_params = &unittest_params;
 	int retval;
+	uint32_t plaintext_pad_len;
+	uint8_t *plaintext;
 
 	TEST_ASSERT_NOT_EQUAL(tdata->gmac_tag.len, 0,
 			      "No GMAC length in the source data");
 
 	retval = create_gmac_session(ts_params->valid_devs[0],
-			RTE_CRYPTO_CIPHER_OP_DECRYPT,
 			tdata, RTE_CRYPTO_AUTH_OP_VERIFY);
 
 	if (retval < 0)
 		return retval;
 
-	if (tdata->aad.len > MBUF_SIZE)
+	if (tdata->plaintext.len > MBUF_SIZE)
 		ut_params->ibuf = rte_pktmbuf_alloc(ts_params->large_mbuf_pool);
 	else
 		ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
@@ -6502,6 +6487,24 @@ test_AES_GMAC_authentication_verify(const struct gmac_test_data *tdata)
 	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
 			rte_pktmbuf_tailroom(ut_params->ibuf));
 
+	plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
+	/*
+	 * Runtime generate the large plain text instead of use hard code
+	 * plain text vector. It is done to avoid create huge source file
+	 * with the test vector.
+	 */
+	if (tdata->plaintext.len == GMAC_LARGE_PLAINTEXT_LENGTH)
+		generate_gmac_large_plaintext(tdata->plaintext.data);
+
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+				plaintext_pad_len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+
+	memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+	TEST_HEXDUMP(stdout, "plaintext:", plaintext,
+			tdata->plaintext.len);
+
 	retval = create_gmac_operation(RTE_CRYPTO_AUTH_OP_VERIFY,
 			tdata);
 
@@ -6615,8 +6618,7 @@ hmac_sha1_test_crypto_vector = {
 static const struct test_crypto_vector
 aes128_gmac_test_vector = {
 	.auth_algo = RTE_CRYPTO_AUTH_AES_GMAC,
-	.crypto_algo = RTE_CRYPTO_CIPHER_AES_GCM,
-	.aad = {
+	.plaintext = {
 		.data = plaintext_hash,
 		.len = 512
 	},
@@ -6627,7 +6629,7 @@ aes128_gmac_test_vector = {
 		},
 		.len = 12
 	},
-	.cipher_key = {
+	.auth_key = {
 		.data = {
 			0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
 			0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA
@@ -6745,22 +6747,28 @@ create_auth_cipher_session(struct crypto_unittest_params *ut_params,
 	/* Setup Authentication Parameters */
 	ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
 	ut_params->auth_xform.auth.op = auth_op;
-	ut_params->auth_xform.next = &ut_params->cipher_xform;
 	ut_params->auth_xform.auth.algo = reference->auth_algo;
 	ut_params->auth_xform.auth.key.length = reference->auth_key.len;
 	ut_params->auth_xform.auth.key.data = auth_key;
 	ut_params->auth_xform.auth.digest_length = reference->digest.len;
-	ut_params->auth_xform.auth.add_auth_data_length = reference->aad.len;
 
-	/* Setup Cipher Parameters */
-	ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
-	ut_params->cipher_xform.next = NULL;
-	ut_params->cipher_xform.cipher.algo = reference->crypto_algo;
-	ut_params->cipher_xform.cipher.op = cipher_op;
-	ut_params->cipher_xform.cipher.key.data = cipher_key;
-	ut_params->cipher_xform.cipher.key.length = reference->cipher_key.len;
-	ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
-	ut_params->cipher_xform.cipher.iv.length = reference->iv.len;
+	if (reference->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+		ut_params->auth_xform.auth.iv.offset = IV_OFFSET;
+		ut_params->auth_xform.auth.iv.length = reference->iv.len;
+	} else {
+		ut_params->auth_xform.next = &ut_params->cipher_xform;
+		ut_params->auth_xform.auth.add_auth_data_length = reference->aad.len;
+
+		/* Setup Cipher Parameters */
+		ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+		ut_params->cipher_xform.next = NULL;
+		ut_params->cipher_xform.cipher.algo = reference->crypto_algo;
+		ut_params->cipher_xform.cipher.op = cipher_op;
+		ut_params->cipher_xform.cipher.key.data = cipher_key;
+		ut_params->cipher_xform.cipher.key.length = reference->cipher_key.len;
+		ut_params->cipher_xform.cipher.iv.offset = IV_OFFSET;
+		ut_params->cipher_xform.cipher.iv.length = reference->iv.len;
+	}
 
 	/* Create Crypto session*/
 	ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
@@ -6838,16 +6846,6 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
 	/* set crypto operation source mbuf */
 	sym_op->m_src = ut_params->ibuf;
 
-	/* aad */
-	sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-			reference->aad.len);
-	TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data, "no room to append AAD");
-	memcpy(sym_op->auth.aad.data, reference->aad.data, reference->aad.len);
-
-	TEST_HEXDUMP(stdout, "AAD:", sym_op->auth.aad.data, reference->aad.len);
-
-	sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
-
 	/* digest */
 	sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
 			ut_params->ibuf, reference->digest.len);
@@ -6875,7 +6873,7 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
 	sym_op->cipher.data.length = 0;
 	sym_op->cipher.data.offset = 0;
 
-	sym_op->auth.data.length = 0;
+	sym_op->auth.data.length = reference->plaintext.len;
 	sym_op->auth.data.offset = 0;
 
 	return 0;
@@ -7025,6 +7023,7 @@ test_authentication_verify_GMAC_fail_when_corruption(
 		unsigned int data_corrupted)
 {
 	int retval;
+	uint8_t *plaintext;
 
 	/* Create session */
 	retval = create_auth_cipher_session(ut_params,
@@ -7043,6 +7042,13 @@ test_authentication_verify_GMAC_fail_when_corruption(
 	memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
 			rte_pktmbuf_tailroom(ut_params->ibuf));
 
+	plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+			reference->plaintext.len);
+	TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
+	memcpy(plaintext, reference->plaintext.data, reference->plaintext.len);
+
+	TEST_HEXDUMP(stdout, "plaintext:", plaintext, reference->plaintext.len);
+
 	/* Create operation */
 	retval = create_auth_verify_GMAC_operation(ts_params,
 			ut_params,
@@ -7052,10 +7058,9 @@ test_authentication_verify_GMAC_fail_when_corruption(
 		return retval;
 
 	if (data_corrupted)
-		data_corruption(ut_params->op->sym->auth.aad.data);
+		data_corruption(plaintext);
 	else
-		tag_corruption(ut_params->op->sym->auth.aad.data,
-				reference->aad.len);
+		tag_corruption(plaintext, reference->aad.len);
 
 	ut_params->op = process_crypto_request(ts_params->valid_devs[0],
 			ut_params->op);
diff --git a/test/test/test_cryptodev_gcm_test_vectors.h b/test/test/test_cryptodev_gcm_test_vectors.h
index 5764edb..ac4b0d4 100644
--- a/test/test/test_cryptodev_gcm_test_vectors.h
+++ b/test/test/test_cryptodev_gcm_test_vectors.h
@@ -92,11 +92,6 @@ struct gmac_test_data {
 	struct {
 		uint8_t *data;
 		unsigned len;
-	} aad;
-
-	struct {
-		uint8_t *data;
-		unsigned len;
 	} plaintext;
 
 	struct {
@@ -1484,14 +1479,10 @@ static const struct gmac_test_data gmac_test_case_1 = {
 			0xde, 0xca, 0xf8, 0x88 },
 		.len = 12
 	},
-	.aad = {
+	.plaintext = {
 		.data = gmac_plaintext,
 		.len = 160
 	},
-	.plaintext = {
-		.data = NULL,
-		.len = 0
-	},
 	.gmac_tag = {
 		.data = {
 			0x4C, 0x0C, 0x4F, 0x47, 0x2D, 0x78, 0xF6, 0xD8,
@@ -1516,14 +1507,10 @@ static const struct gmac_test_data gmac_test_case_2 = {
 		    0x55, 0x61, 0xf0, 0x43, 0x15, },
 		.len = 12
 	},
-	.aad = {
+	.plaintext = {
 		.data = gmac_plaintext,
 		.len = 80
 	},
-	.plaintext = {
-		.data = NULL,
-		.len = 0
-	},
 	.gmac_tag = {
 		.data = {
 		    0xCF, 0x82, 0x80, 0x64, 0x02, 0x46, 0xF4, 0xFB,
@@ -1550,14 +1537,10 @@ static const struct gmac_test_data gmac_test_case_3 = {
 		},
 		.len = 12
 	},
-	.aad = {
+	.plaintext = {
 		.data = gmac_plaintext,
 		.len = 65
 	},
-	.plaintext = {
-		.data = NULL,
-		.len = 0
-	},
 	.gmac_tag = {
 		.data = {
 			0x77, 0x46, 0x0D, 0x6F, 0xB1, 0x87, 0xDB, 0xA9,
@@ -2214,14 +2197,10 @@ static const struct gmac_test_data gmac_test_case_4 = {
 		},
 		.len = 12
 	},
-	.aad = {
+	.plaintext = {
 		.data = gmac_plaintext,
 		.len = GMAC_LARGE_PLAINTEXT_LENGTH
 	},
-	.plaintext = {
-		.data = NULL,
-		.len = 0
-	},
 	.gmac_tag = {
 		.data = {
 			0x3f, 0x07, 0xcb, 0xb9, 0x86, 0x3a, 0xea, 0xc2,
-- 
2.9.4



More information about the dev mailing list