[dpdk-dev] [PATCH v3 2/6] crypto/aesni_gcm: cpu crypto support
Marcin Smoczynski
marcinx.smoczynski at intel.com
Wed Jan 15 19:28:28 CET 2020
Add support for CPU crypto mode by introducing required handler.
Crypto mode (sync/async) is chosen during sym session create if an
appropriate flag is set in an xform type number.
Authenticated encryption and decryption are supported with tag
generation/verification.
Signed-off-by: Marcin Smoczynski <marcinx.smoczynski at intel.com>
---
drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 9 ++
drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 149 +++++++++++++++++-
drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c | 3 +
.../crypto/aesni_gcm/aesni_gcm_pmd_private.h | 18 ++-
4 files changed, 169 insertions(+), 10 deletions(-)
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index e272f1067..404c0adff 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -65,4 +65,13 @@ struct aesni_gcm_ops {
aesni_gcm_finalize_t finalize_dec;
};
+/** GCM per-session operation handlers */
+struct aesni_gcm_session_ops {
+ aesni_gcm_t cipher;
+ aesni_gcm_pre_t pre;
+ aesni_gcm_init_t init;
+ aesni_gcm_update_t update;
+ aesni_gcm_finalize_t finalize;
+};
+
#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 1a03be31d..860e9b369 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -25,9 +25,16 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
const struct rte_crypto_sym_xform *aead_xform;
uint8_t key_length;
const uint8_t *key;
+ uint32_t xform_type;
+
+ /* check for CPU-crypto mode */
+ xform_type = xform->type;
+ sess->mode = xform_type | RTE_CRYPTO_SYM_CPU_CRYPTO ?
+ AESNI_GCM_MODE_SYNC : AESNI_GCM_MODE_ASYNC;
+ xform_type &= RTE_CRYPTO_SYM_XFORM_TYPE_MASK;
/* AES-GMAC */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform;
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
@@ -49,7 +56,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
sess->req_digest_length = auth_xform->auth.digest_length;
/* AES-GCM */
- } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ } else if (xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
aead_xform = xform;
if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
@@ -62,11 +69,24 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
sess->iv.offset = aead_xform->aead.iv.offset;
sess->iv.length = aead_xform->aead.iv.length;
+ /* setup session handlers */
+ sess->ops.pre = gcm_ops->pre;
+ sess->ops.init = gcm_ops->init;
+
/* Select Crypto operation */
- if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
- else
+ sess->ops.cipher = gcm_ops->enc;
+ sess->ops.update = gcm_ops->update_enc;
+ sess->ops.finalize = gcm_ops->finalize_enc;
+ }
+ /* op == RTE_CRYPTO_AEAD_OP_DECRYPT */
+ else {
sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+ sess->ops.cipher = gcm_ops->dec;
+ sess->ops.update = gcm_ops->update_dec;
+ sess->ops.finalize = gcm_ops->finalize_dec;
+ }
key_length = aead_xform->aead.key.length;
key = aead_xform->aead.key.data;
@@ -78,7 +98,6 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
return -ENOTSUP;
}
-
/* IV check */
if (sess->iv.length != 16 && sess->iv.length != 12 &&
sess->iv.length != 0) {
@@ -356,6 +375,122 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
return 0;
}
+static inline void
+aesni_gcm_fill_error_code(struct rte_crypto_sym_vec *vec, int32_t errnum)
+{
+ uint32_t i;
+
+ for (i = 0; i < vec->num; i++)
+ vec->status[i] = errnum;
+}
+
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_encryption(struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, uint8_t *digest)
+{
+ if (s->req_digest_length != s->gen_digest_length) {
+ uint8_t tmpdigest[s->gen_digest_length];
+
+ s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
+ s->gen_digest_length);
+ memcpy(digest, tmpdigest, s->req_digest_length);
+ } else {
+ s->ops.finalize(&s->gdata_key, gdata_ctx, digest,
+ s->gen_digest_length);
+ }
+
+ return 0;
+}
+
+static inline int32_t
+aesni_gcm_sgl_op_finalize_decryption(struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, uint8_t *digest)
+{
+ uint8_t tmpdigest[s->gen_digest_length];
+
+ s->ops.finalize(&s->gdata_key, gdata_ctx, tmpdigest,
+ s->gen_digest_length);
+
+ return memcmp(digest, tmpdigest, s->req_digest_length) == 0 ? 0 :
+ EBADMSG;
+}
+
+static inline void
+aesni_gcm_process_gcm_sgl_op(struct aesni_gcm_session *s,
+ struct gcm_context_data *gdata_ctx, struct rte_crypto_sgl *sgl,
+ void *iv, void *aad)
+{
+ uint32_t i;
+
+ /* init crypto operation */
+ s->ops.init(&s->gdata_key, gdata_ctx, iv, aad,
+ (uint64_t)s->aad_length);
+
+ /* update with sgl data */
+ for (i = 0; i < sgl->num; i++) {
+ struct rte_crypto_vec *vec = &sgl->vec[i];
+
+ s->ops.update(&s->gdata_key, gdata_ctx, vec->base, vec->base,
+ vec->len);
+ }
+}
+
+/** Process CPU crypto bulk operations */
+uint32_t
+aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess,
+ __rte_unused union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_sym_vec *vec)
+{
+ void *sess_priv;
+ struct aesni_gcm_session *s;
+ uint32_t processed;
+ uint32_t i;
+
+ sess_priv = get_sym_session_private_data(sess, dev->driver_id);
+ if (unlikely(sess_priv == NULL)) {
+ aesni_gcm_fill_error_code(vec, EINVAL);
+ return 0;
+ }
+
+ s = sess_priv;
+ if (unlikely(s->mode != AESNI_GCM_MODE_SYNC)) {
+ aesni_gcm_fill_error_code(vec, EINVAL);
+ return 0;
+ }
+
+ processed = 0;
+ for (i = 0; i < vec->num; ++i) {
+ struct gcm_context_data gdata_ctx;
+ int32_t status;
+
+ aesni_gcm_process_gcm_sgl_op(s, &gdata_ctx, &vec->sgl[i],
+ vec->iv[i], vec->aad[i]);
+
+ switch (s->op) {
+ case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+ status = aesni_gcm_sgl_op_finalize_encryption(s,
+ &gdata_ctx, vec->digest[i]);
+ break;
+
+ case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+ status = aesni_gcm_sgl_op_finalize_decryption(s,
+ &gdata_ctx, vec->digest[i]);
+ break;
+
+ default:
+ status = EINVAL;
+ }
+
+ vec->status[i] = status;
+ if (status == 0)
+ processed++;
+ }
+
+ return processed;
+}
+
/**
* Process a completed job and return rte_mbuf which job processed
*
@@ -527,7 +662,8 @@ aesni_gcm_create(const char *name,
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_IN_PLACE_SGL |
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
- RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO;
/* Check CPU for support for AES instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES))
@@ -672,7 +808,6 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
cryptodev_driver_id);
-
RTE_INIT(aesni_gcm_init_log)
{
aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm");
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 2f66c7c58..5228d98b1 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -331,9 +331,12 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.queue_pair_release = aesni_gcm_pmd_qp_release,
.queue_pair_count = aesni_gcm_pmd_qp_count,
+ .sym_cpu_process = aesni_gcm_pmd_cpu_crypto_process,
+
.sym_session_get_size = aesni_gcm_pmd_sym_session_get_size,
.sym_session_configure = aesni_gcm_pmd_sym_session_configure,
.sym_session_clear = aesni_gcm_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
+
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 2039adb53..dc8d3c653 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -73,6 +73,11 @@ enum aesni_gcm_operation {
AESNI_GMAC_OP_VERIFY
};
+enum aesni_gcm_mode {
+ AESNI_GCM_MODE_ASYNC,
+ AESNI_GCM_MODE_SYNC
+};
+
/** AESNI GCM private session structure */
struct aesni_gcm_session {
struct {
@@ -90,8 +95,12 @@ struct aesni_gcm_session {
/**< GCM operation type */
enum aesni_gcm_key key;
/**< GCM key type */
+ enum aesni_gcm_mode mode;
+ /**< Sync/async mode */
struct gcm_key_data gdata_key;
/**< GCM parameters */
+ struct aesni_gcm_session_ops ops;
+ /**< Session handlers */
};
@@ -109,10 +118,13 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform);
-
-/**
- * Device specific operations function pointer structure */
+/* Device specific operations function pointer structure */
extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+/** CPU crypto bulk process handler */
+uint32_t
+aesni_gcm_pmd_cpu_crypto_process(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
+ struct rte_crypto_sym_vec *vec);
#endif /* _AESNI_GCM_PMD_PRIVATE_H_ */
--
2.17.1
More information about the dev
mailing list