[dpdk-dev] [PATCH v6 6/8] examples/ipsec-secgw: cpu crypto support

Marcin Smoczynski marcinx.smoczynski at intel.com
Tue Feb 4 14:12:56 CET 2020


Add support for CPU accelerated crypto. 'cpu-crypto' SA type has
been introduced in configuration allowing to use abovementioned
acceleration.

Legacy mode is not currently supported.

Signed-off-by: Konstantin Ananyev <konstantin.ananyev at intel.com>
Signed-off-by: Marcin Smoczynski <marcinx.smoczynski at intel.com>
Acked-by: Fan Zhang <roy.fan.zhang at intel.com>
---
 examples/ipsec-secgw/ipsec.c         |  25 ++++-
 examples/ipsec-secgw/ipsec_process.c | 136 +++++++++++++++++----------
 examples/ipsec-secgw/sa.c            |  30 ++++--
 3 files changed, 131 insertions(+), 60 deletions(-)

diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index d4b57121a..6e8120702 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2020 Intel Corporation
  */
 #include <sys/types.h>
 #include <netinet/in.h>
@@ -10,6 +10,7 @@
 #include <rte_crypto.h>
 #include <rte_security.h>
 #include <rte_cryptodev.h>
+#include <rte_ipsec.h>
 #include <rte_ethdev.h>
 #include <rte_mbuf.h>
 #include <rte_hash.h>
@@ -86,7 +87,8 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa,
 			ipsec_ctx->tbl[cdev_id_qp].id,
 			ipsec_ctx->tbl[cdev_id_qp].qp);
 
-	if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE) {
+	if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
+		ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
 		struct rte_security_session_conf sess_conf = {
 			.action_type = ips->type,
 			.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
@@ -126,6 +128,18 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa,
 			return -1;
 		}
 	} else {
+		if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
+			struct rte_cryptodev_info info;
+			uint16_t cdev_id;
+
+			cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
+			rte_cryptodev_info_get(cdev_id, &info);
+			if (!(info.feature_flags &
+				RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
+				return -ENOTSUP;
+
+			ips->crypto.dev_id = cdev_id;
+		}
 		ips->crypto.ses = rte_cryptodev_sym_session_create(
 				ipsec_ctx->session_pool);
 		rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
@@ -476,6 +490,13 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 			rte_security_attach_session(&priv->cop,
 				ips->security.ses);
 			break;
+
+		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+			RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
+					" legacy mode.");
+			rte_pktmbuf_free(pkts[i]);
+			continue;
+
 		case RTE_SECURITY_ACTION_TYPE_NONE:
 
 			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c
index 2eb5c8b34..bb2f2b82d 100644
--- a/examples/ipsec-secgw/ipsec_process.c
+++ b/examples/ipsec-secgw/ipsec_process.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2020 Intel Corporation
  */
 #include <sys/types.h>
 #include <netinet/in.h>
@@ -92,7 +92,8 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx,
 	int32_t rc;
 
 	/* setup crypto section */
-	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+	if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE ||
+			ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
 		RTE_ASSERT(ss->crypto.ses == NULL);
 		rc = create_lookaside_session(ctx, sa, ss);
 		if (rc != 0)
@@ -215,6 +216,62 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
 	return k;
 }
 
+/*
+ * helper routine for inline and cpu(synchronous) processing
+ * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt().
+ * Should be removed in future.
+ */
+static inline void
+prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt)
+{
+	uint32_t j;
+	struct ipsec_mbuf_metadata *priv;
+
+	for (j = 0; j != cnt; j++) {
+		priv = get_priv(mb[j]);
+		priv->sa = sa;
+	}
+}
+
+/*
+ * finish processing of packets successfully decrypted by an inline processor
+ */
+static uint32_t
+ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa,
+	struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
+{
+	uint64_t satp;
+	uint32_t k;
+
+	/* get SA type */
+	satp = rte_ipsec_sa_type(ips->sa);
+	prep_process_group(sa, mb, cnt);
+
+	k = rte_ipsec_pkt_process(ips, mb, cnt);
+	copy_to_trf(trf, satp, mb, k);
+	return k;
+}
+
+/*
+ * process packets synchronously
+ */
+static uint32_t
+ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa,
+	struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt)
+{
+	uint64_t satp;
+	uint32_t k;
+
+	/* get SA type */
+	satp = rte_ipsec_sa_type(ips->sa);
+	prep_process_group(sa, mb, cnt);
+
+	k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt);
+	k = rte_ipsec_pkt_process(ips, mb, k);
+	copy_to_trf(trf, satp, mb, k);
+	return k;
+}
+
 /*
  * Process ipsec packets.
  * If packet belong to SA that is subject of inline-crypto,
@@ -225,10 +282,8 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa,
 void
 ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
 {
-	uint64_t satp;
-	uint32_t i, j, k, n;
+	uint32_t i, k, n;
 	struct ipsec_sa *sa;
-	struct ipsec_mbuf_metadata *priv;
 	struct rte_ipsec_group *pg;
 	struct rte_ipsec_session *ips;
 	struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)];
@@ -236,10 +291,17 @@ ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
 	n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num);
 
 	for (i = 0; i != n; i++) {
+
 		pg = grp + i;
 		sa = ipsec_mask_saptr(pg->id.ptr);
 
-		ips = ipsec_get_primary_session(sa);
+		/* fallback to cryptodev with RX packets which inline
+		 * processor was unable to process
+		 */
+		if (sa != NULL)
+			ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ?
+				ipsec_get_fallback_session(sa) :
+				ipsec_get_primary_session(sa);
 
 		/* no valid HW session for that SA, try to create one */
 		if (sa == NULL || (ips->crypto.ses == NULL &&
@@ -247,50 +309,26 @@ ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf)
 			k = 0;
 
 		/* process packets inline */
-		else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
-				ips->type ==
-				RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
-
-			/* get SA type */
-			satp = rte_ipsec_sa_type(ips->sa);
-
-			/*
-			 * This is just to satisfy inbound_sa_check()
-			 * and get_hop_for_offload_pkt().
-			 * Should be removed in future.
-			 */
-			for (j = 0; j != pg->cnt; j++) {
-				priv = get_priv(pg->m[j]);
-				priv->sa = sa;
+		else {
+			switch (ips->type) {
+			/* enqueue packets to crypto dev */
+			case RTE_SECURITY_ACTION_TYPE_NONE:
+			case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+				k = ipsec_prepare_crypto_group(ctx, sa, ips,
+					pg->m, pg->cnt);
+				break;
+			case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+			case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+				k = ipsec_process_inline_group(ips, sa,
+					trf, pg->m, pg->cnt);
+				break;
+			case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+				k = ipsec_process_cpu_group(ips, sa,
+					trf, pg->m, pg->cnt);
+				break;
+			default:
+				k = 0;
 			}
-
-			/* fallback to cryptodev with RX packets which inline
-			 * processor was unable to process
-			 */
-			if (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) {
-				/* offload packets to cryptodev */
-				struct rte_ipsec_session *fallback;
-
-				fallback = ipsec_get_fallback_session(sa);
-				if (fallback->crypto.ses == NULL &&
-					fill_ipsec_session(fallback, ctx, sa)
-					!= 0)
-					k = 0;
-				else
-					k = ipsec_prepare_crypto_group(ctx, sa,
-						fallback, pg->m, pg->cnt);
-			} else {
-				/* finish processing of packets successfully
-				 * decrypted by an inline processor
-				 */
-				k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt);
-				copy_to_trf(trf, satp, pg->m, k);
-
-			}
-		/* enqueue packets to crypto dev */
-		} else {
-			k = ipsec_prepare_crypto_group(ctx, sa, ips, pg->m,
-				pg->cnt);
 		}
 
 		/* drop packets that cannot be enqueued/processed */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index c75a5a15f..e9e8d624c 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2020 Intel Corporation
  */
 
 /*
@@ -586,6 +586,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
 				RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
 			else if (strcmp(tokens[ti], "no-offload") == 0)
 				ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
+			else if (strcmp(tokens[ti], "cpu-crypto") == 0)
+				ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO;
 			else {
 				APP_CHECK(0, status, "Invalid input \"%s\"",
 						tokens[ti]);
@@ -679,10 +681,12 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
 	if (status->status < 0)
 		return;
 
-	if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
+	if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type !=
+			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0))
 		printf("Missing portid option, falling back to non-offload\n");
 
-	if (!type_p || !portid_p) {
+	if (!type_p || (!portid_p && ips->type !=
+			RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) {
 		ips->type = RTE_SECURITY_ACTION_TYPE_NONE;
 		rule->portid = -1;
 	}
@@ -768,15 +772,25 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
 	case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
 		printf("lookaside-protocol-offload ");
 		break;
+	case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+		printf("cpu-crypto-accelerated");
+		break;
 	}
 
 	fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK];
 	if (fallback_ips != NULL && sa->fallback_sessions > 0) {
 		printf("inline fallback: ");
-		if (fallback_ips->type == RTE_SECURITY_ACTION_TYPE_NONE)
+		switch (fallback_ips->type) {
+		case RTE_SECURITY_ACTION_TYPE_NONE:
 			printf("lookaside-none");
-		else
+			break;
+		case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+			printf("cpu-crypto-accelerated");
+			break;
+		default:
 			printf("invalid");
+			break;
+		}
 	}
 	printf("\n");
 }
@@ -975,7 +989,6 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 				return -EINVAL;
 		}
 
-
 		switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
 		case IP4_TUNNEL:
 			sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
@@ -1026,7 +1039,6 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 					return -EINVAL;
 				}
 			}
-			print_one_sa_rule(sa, inbound);
 		} else {
 			switch (sa->cipher_algo) {
 			case RTE_CRYPTO_CIPHER_NULL:
@@ -1091,9 +1103,9 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 			sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
 			sa_ctx->xf[idx].b.next = NULL;
 			sa->xforms = &sa_ctx->xf[idx].a;
-
-			print_one_sa_rule(sa, inbound);
 		}
+
+		print_one_sa_rule(sa, inbound);
 	}
 
 	return 0;
-- 
2.17.1



More information about the dev mailing list