[dpdk-dev] [PATCH v2 3/3] crypto/dpaa_sec: support ipsec protocol offload

Akhil Goyal akhil.goyal at nxp.com
Thu Jan 11 12:33:04 CET 2018


Signed-off-by: Akhil Goyal <akhil.goyal at nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal at nxp.com>
---
 doc/guides/cryptodevs/features/dpaa_sec.ini |   1 +
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 410 ++++++++++++++++++++++++++--
 drivers/crypto/dpaa_sec/dpaa_sec.h          |  62 ++++-
 3 files changed, 450 insertions(+), 23 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
index 0e8f5b2..deab53a 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -7,6 +7,7 @@
 Symmetric crypto       = Y
 Sym operation chaining = Y
 HW Accelerated         = Y
+Protocol offload       = Y
 
 ;
 ; Supported crypto algorithms of the 'dpaa_sec' crypto driver.
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index f433f0a..d7b6f39 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -15,6 +15,7 @@
 #include <rte_cryptodev_pmd.h>
 #include <rte_crypto.h>
 #include <rte_cryptodev.h>
+#include <rte_security_driver.h>
 #include <rte_cycles.h>
 #include <rte_dev.h>
 #include <rte_kvargs.h>
@@ -196,8 +197,19 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
 	 * sg[1] for input
 	 */
 	job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
 	ctx = container_of(job, struct dpaa_sec_op_ctx, job);
 	ctx->fd_status = fd->status;
+	if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+		struct qm_sg_entry *sg_out;
+		uint32_t len;
+
+		sg_out = &job->sg[0];
+		hw_sg_to_cpu(sg_out);
+		len = sg_out->length;
+		ctx->op->sym->m_src->pkt_len = len;
+		ctx->op->sym->m_src->data_len = len;
+	}
 	dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
 	dpaa_sec_op_ending(ctx);
 
@@ -261,7 +273,13 @@ static inline int is_aead(dpaa_sec_session *ses)
 static inline int is_auth_cipher(dpaa_sec_session *ses)
 {
 	return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
-		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+		(ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+		(ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int is_proto_ipsec(dpaa_sec_session *ses)
+{
+	return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
 }
 
 static inline int is_encode(dpaa_sec_session *ses)
@@ -282,27 +300,39 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
 		ses->digest_length = 0;
 		break;
 	case RTE_CRYPTO_AUTH_MD5_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA1_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	case RTE_CRYPTO_AUTH_SHA512_HMAC:
-		alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
+		alginfo_a->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
 		alginfo_a->algmode = OP_ALG_AAI_HMAC;
 		break;
 	default:
@@ -317,15 +347,21 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
 	case RTE_CRYPTO_CIPHER_NULL:
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CBC:
-		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
 		alginfo_c->algmode = OP_ALG_AAI_CBC;
 		break;
 	case RTE_CRYPTO_CIPHER_3DES_CBC:
-		alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
 		alginfo_c->algmode = OP_ALG_AAI_CBC;
 		break;
 	case RTE_CRYPTO_CIPHER_AES_CTR:
-		alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+		alginfo_c->algtype =
+			(ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+			OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
 		alginfo_c->algmode = OP_ALG_AAI_CTR;
 		break;
 	default:
@@ -471,14 +507,28 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 		cdb->sh_desc[0] = 0;
 		cdb->sh_desc[1] = 0;
 		cdb->sh_desc[2] = 0;
-
-		/* Auth_only_len is set as 0 here and it will be overwritten
-		 *  in fd for each packet.
-		 */
-		shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
-				true, swap, &alginfo_c, &alginfo_a,
-				ses->iv.length, 0,
-				ses->digest_length, ses->dir);
+		if (is_proto_ipsec(ses)) {
+			if (ses->dir == DIR_ENC) {
+				shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+						cdb->sh_desc,
+						true, swap, &ses->encap_pdb,
+						(uint8_t *)&ses->ip4_hdr,
+						&alginfo_c, &alginfo_a);
+			} else if (ses->dir == DIR_DEC) {
+				shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+						cdb->sh_desc,
+						true, swap, &ses->decap_pdb,
+						&alginfo_c, &alginfo_a);
+			}
+		} else {
+			/* Auth_only_len is set as 0 here and it will be
+			 * overwritten in fd for each packet.
+			 */
+			shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+					true, swap, &alginfo_c, &alginfo_a,
+					ses->iv.length, 0,
+					ses->digest_length, ses->dir);
+		}
 	}
 	cdb->sh_hdr.hi.field.idlen = shared_desc_len;
 	cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
@@ -860,6 +910,45 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 	return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+	struct rte_crypto_sym_op *sym = op->sym;
+	struct dpaa_sec_job *cf;
+	struct dpaa_sec_op_ctx *ctx;
+	struct qm_sg_entry *sg;
+	phys_addr_t src_start_addr, dst_start_addr;
+
+	ctx = dpaa_sec_alloc_ctx(ses);
+	if (!ctx)
+		return NULL;
+	cf = &ctx->job;
+	ctx->op = op;
+
+	src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+
+	if (sym->m_dst)
+		dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+	else
+		dst_start_addr = src_start_addr;
+
+	/* input */
+	sg = &cf->sg[1];
+	qm_sg_entry_set64(sg, src_start_addr);
+	sg->length = sym->m_src->pkt_len;
+	sg->final = 1;
+	cpu_to_hw_sg(sg);
+
+	sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+	/* output */
+	sg = &cf->sg[0];
+	qm_sg_entry_set64(sg, dst_start_addr);
+	sg->length = sym->m_src->buf_len - sym->m_src->data_off;
+	cpu_to_hw_sg(sg);
+
+	return cf;
+}
+
 static int
 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 {
@@ -870,8 +959,14 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 	uint32_t auth_only_len = op->sym->auth.data.length -
 				op->sym->cipher.data.length;
 
-	ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
-					cryptodev_driver_id);
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		ses = (dpaa_sec_session *)get_session_private_data(
+				op->sym->session, cryptodev_driver_id);
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		ses = (dpaa_sec_session *)get_sec_session_private_data(
+				op->sym->sec_session);
+	else
+		return -ENOTSUP;
 
 	if (unlikely(!ses->qp || ses->qp != qp)) {
 		PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
@@ -895,6 +990,8 @@ dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
 		auth_only_len = ses->auth_only_len;
 	} else if (is_auth_cipher(ses)) {
 		cf = build_cipher_auth(op, ses);
+	} else if (is_proto_ipsec(ses)) {
+		cf = build_proto(op, ses);
 	} else {
 		PMD_TX_LOG(ERR, "not supported sec op");
 		return -ENOTSUP;
@@ -933,7 +1030,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 
 	/*Prepare each packet which is to be sent*/
 	for (loop = 0; loop < nb_ops; loop++) {
-		if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
+		if (ops[loop]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
 			PMD_TX_LOG(ERR, "sessionless crypto op not supported");
 			return 0;
 		}
@@ -1343,6 +1440,236 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev,
 }
 
 static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+			   struct rte_security_session_conf *conf,
+			   void *sess)
+{
+	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+	struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+	struct rte_crypto_auth_xform *auth_xform;
+	struct rte_crypto_cipher_xform *cipher_xform;
+	dpaa_sec_session *session = (dpaa_sec_session *)sess;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		cipher_xform = &conf->crypto_xform->cipher;
+		auth_xform = &conf->crypto_xform->next->auth;
+	} else {
+		auth_xform = &conf->crypto_xform->auth;
+		cipher_xform = &conf->crypto_xform->next->cipher;
+	}
+	session->proto_alg = conf->protocol;
+	session->cipher_key.data = rte_zmalloc(NULL,
+					       cipher_xform->key.length,
+					       RTE_CACHE_LINE_SIZE);
+	if (session->cipher_key.data == NULL &&
+			cipher_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+		return -ENOMEM;
+	}
+
+	session->cipher_key.length = cipher_xform->key.length;
+	session->auth_key.data = rte_zmalloc(NULL,
+					auth_xform->key.length,
+					RTE_CACHE_LINE_SIZE);
+	if (session->auth_key.data == NULL &&
+			auth_xform->key.length > 0) {
+		RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+		rte_free(session->cipher_key.data);
+		return -ENOMEM;
+	}
+	session->auth_key.length = auth_xform->key.length;
+	memcpy(session->cipher_key.data, cipher_xform->key.data,
+			cipher_xform->key.length);
+	memcpy(session->auth_key.data, auth_xform->key.data,
+			auth_xform->key.length);
+
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+		break;
+	case RTE_CRYPTO_AUTH_NULL:
+		session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+	case RTE_CRYPTO_AUTH_SHA1:
+	case RTE_CRYPTO_AUTH_SHA256:
+	case RTE_CRYPTO_AUTH_SHA512:
+	case RTE_CRYPTO_AUTH_SHA224:
+	case RTE_CRYPTO_AUTH_SHA384:
+	case RTE_CRYPTO_AUTH_MD5:
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+	case RTE_CRYPTO_AUTH_ZUC_EIA3:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+			auth_xform->algo);
+		goto out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+			auth_xform->algo);
+		goto out;
+	}
+
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CTR:
+		session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+		break;
+	case RTE_CRYPTO_CIPHER_NULL:
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+			cipher_xform->algo);
+		goto out;
+	default:
+		RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+			cipher_xform->algo);
+		goto out;
+	}
+
+	if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+		memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+				sizeof(session->ip4_hdr));
+		session->ip4_hdr.ip_v = IPVERSION;
+		session->ip4_hdr.ip_hl = 5;
+		session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+						sizeof(session->ip4_hdr));
+		session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+		session->ip4_hdr.ip_id = 0;
+		session->ip4_hdr.ip_off = 0;
+		session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+		session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+				RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+				: IPPROTO_AH;
+		session->ip4_hdr.ip_sum = 0;
+		session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+		session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+		session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+						(void *)&session->ip4_hdr,
+						sizeof(struct ip));
+
+		session->encap_pdb.options =
+			(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+			PDBOPTS_ESP_OIHI_PDB_INL |
+			PDBOPTS_ESP_IVSRC |
+			PDBHMO_ESP_ENCAP_DTTL;
+		session->encap_pdb.spi = ipsec_xform->spi;
+		session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+		session->dir = DIR_ENC;
+	} else if (ipsec_xform->direction ==
+			RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+		memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+		session->decap_pdb.options = sizeof(struct ip) << 16;
+		session->dir = DIR_DEC;
+	} else
+		goto out;
+	session->ctx_pool = internals->ctx_pool;
+	session->inq = dpaa_sec_attach_rxq(internals);
+	if (session->inq == NULL) {
+		PMD_DRV_LOG(ERR, "unable to attach sec queue");
+		goto out;
+	}
+
+
+	return 0;
+out:
+	rte_free(session->auth_key.data);
+	rte_free(session->cipher_key.data);
+	memset(session, 0, sizeof(dpaa_sec_session));
+	return -1;
+}
+
+static int
+dpaa_sec_security_session_create(void *dev,
+				 struct rte_security_session_conf *conf,
+				 struct rte_security_session *sess,
+				 struct rte_mempool *mempool)
+{
+	void *sess_private_data;
+	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	int ret;
+
+	if (rte_mempool_get(mempool, &sess_private_data)) {
+		CDEV_LOG_ERR(
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	switch (conf->protocol) {
+	case RTE_SECURITY_PROTOCOL_IPSEC:
+		ret = dpaa_sec_set_ipsec_session(cdev, conf,
+				sess_private_data);
+		break;
+	case RTE_SECURITY_PROTOCOL_MACSEC:
+		return -ENOTSUP;
+	default:
+		return -EINVAL;
+	}
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR,
+			"DPAA2 PMD: failed to configure session parameters");
+
+		/* Return session to mempool */
+		rte_mempool_put(mempool, sess_private_data);
+		return ret;
+	}
+
+	set_sec_session_private_data(sess, sess_private_data);
+
+	return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa_sec_security_session_destroy(void *dev __rte_unused,
+		struct rte_security_session *sess)
+{
+	PMD_INIT_FUNC_TRACE();
+	void *sess_priv = get_sec_session_private_data(sess);
+
+	dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+	if (sess_priv) {
+		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+		rte_free(s->cipher_key.data);
+		rte_free(s->auth_key.data);
+		memset(sess, 0, sizeof(dpaa_sec_session));
+		set_sec_session_private_data(sess, NULL);
+		rte_mempool_put(sess_mp, sess_priv);
+	}
+	return 0;
+}
+
+
+static int
 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
 		       struct rte_cryptodev_config *config __rte_unused)
 {
@@ -1408,6 +1735,21 @@ static struct rte_cryptodev_ops crypto_ops = {
 	.qp_detach_session    = dpaa_sec_qp_detach_sess,
 };
 
+static const struct rte_security_capability *
+dpaa_sec_capabilities_get(void *device __rte_unused)
+{
+	return dpaa_sec_security_cap;
+}
+
+struct rte_security_ops dpaa_sec_security_ops = {
+	.session_create = dpaa_sec_security_session_create,
+	.session_update = NULL,
+	.session_stats_get = NULL,
+	.session_destroy = dpaa_sec_security_session_destroy,
+	.set_pkt_metadata = NULL,
+	.capabilities_get = dpaa_sec_capabilities_get
+};
+
 static int
 dpaa_sec_uninit(struct rte_cryptodev *dev)
 {
@@ -1416,6 +1758,8 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
 	if (dev == NULL)
 		return -ENODEV;
 
+	rte_free(dev->security_ctx);
+
 	rte_mempool_free(internals->ctx_pool);
 	rte_free(internals);
 
@@ -1429,6 +1773,7 @@ static int
 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
 	struct dpaa_sec_dev_private *internals;
+	struct rte_security_ctx *security_instance;
 	struct dpaa_sec_qp *qp;
 	uint32_t i, flags;
 	int ret;
@@ -1443,12 +1788,33 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
 	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
 			RTE_CRYPTODEV_FF_HW_ACCELERATED |
-			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+			RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+			RTE_CRYPTODEV_FF_SECURITY;
 
 	internals = cryptodev->data->dev_private;
 	internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
 	internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
 
+	/*
+	 * For secondary processes, we don't initialise any further as primary
+	 * has already done this work. Only check we don't need a different
+	 * RX function
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+		return 0;
+	}
+
+	/* Initialize security_ctx only for primary process*/
+	security_instance = rte_malloc("rte_security_instances_ops",
+				sizeof(struct rte_security_ctx), 0);
+	if (security_instance == NULL)
+		return -ENOMEM;
+	security_instance->device = (void *)cryptodev;
+	security_instance->ops = &dpaa_sec_security_ops;
+	security_instance->sess_cnt = 0;
+	cryptodev->security_ctx = security_instance;
+
 	for (i = 0; i < internals->max_nb_queue_pairs; i++) {
 		/* init qman fq for queue pair */
 		qp = &internals->qps[i];
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 93369e4..578c46a 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -91,7 +91,8 @@ typedef struct dpaa_sec_session_entry {
 	uint8_t dir;         /*!< Operation Direction */
 	enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
 	enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
-	enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+	enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
+	enum rte_security_session_protocol proto_alg; /*!< Security Algorithm*/
 	union {
 		struct {
 			uint8_t *data;	/**< pointer to key data */
@@ -114,6 +115,9 @@ typedef struct dpaa_sec_session_entry {
 	} iv;	/**< Initialisation vector parameters */
 	uint16_t auth_only_len; /*!< Length of data for Auth only */
 	uint32_t digest_length;
+	struct ipsec_encap_pdb encap_pdb;
+	struct ip ip4_hdr;
+	struct ipsec_decap_pdb decap_pdb;
 	struct dpaa_sec_qp *qp;
 	struct qman_fq *inq;
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
@@ -378,4 +382,60 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
 	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
+static const struct rte_security_capability dpaa_sec_security_cap[] = {
+	{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = dpaa_sec_capabilities
+	},
+	{ /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = dpaa_sec_capabilities
+	},
+	{
+		.action = RTE_SECURITY_ACTION_TYPE_NONE
+	}
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len    buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+	uint16_t *buf = (uint16_t *)buffer;
+	uint32_t sum = 0;
+	uint16_t result;
+
+	for (sum = 0; len > 1; len -= 2)
+		sum += *buf++;
+
+	if (len == 1)
+		sum += *(unsigned char *)buf;
+
+	sum = (sum >> 16) + (sum & 0xFFFF);
+	sum += (sum >> 16);
+	result = ~sum;
+
+	return  result;
+}
+
 #endif /* _DPAA_SEC_H_ */
-- 
2.9.3



More information about the dev mailing list