[dpdk-dev] [RFC PATCH 5/5] examples/ipsec-secgw: enabled inline ipsec

Radu Nicolau radu.nicolau at intel.com
Fri Aug 25 16:57:26 CEST 2017


Signed-off-by: Radu Nicolau <radu.nicolau at intel.com>
---
 examples/ipsec-secgw/esp.c   | 26 ++++++++++++++++--
 examples/ipsec-secgw/ipsec.c | 61 +++++++++++++++++++++++++++++++++++------
 examples/ipsec-secgw/ipsec.h |  2 ++
 examples/ipsec-secgw/sa.c    | 65 +++++++++++++++++++++++++++++++-------------
 4 files changed, 123 insertions(+), 31 deletions(-)

diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 70bb81f..77ab232 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -58,6 +58,9 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 	struct rte_crypto_sym_op *sym_cop;
 	int32_t payload_len, ip_hdr_len;
 
+	if (sa->type == RTE_SECURITY_SESS_ETH_INLINE_CRYPTO)
+		return 0;
+
 	RTE_ASSERT(m != NULL);
 	RTE_ASSERT(sa != NULL);
 	RTE_ASSERT(cop != NULL);
@@ -175,6 +178,16 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
 	RTE_ASSERT(sa != NULL);
 	RTE_ASSERT(cop != NULL);
 
+
+	if (sa->type == RTE_SECURITY_SESS_ETH_INLINE_CRYPTO) {
+		if (m->ol_flags & PKT_RX_SECURITY_OFFLOAD
+				&& m->ol_flags & PKT_RX_SECURITY_OFFLOAD_FAILED)
+			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+		else
+			cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+	}
+
+
 	if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
 		RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
 		return -1;
@@ -321,6 +334,9 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
 	esp->spi = rte_cpu_to_be_32(sa->spi);
 	esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
 
+	if (sa->type == RTE_SECURITY_SESS_ETH_INLINE_CRYPTO)
+		return 0;
+
 	uint64_t *iv = (uint64_t *)(esp + 1);
 
 	sym_cop = get_sym_cop(cop);
@@ -419,9 +435,13 @@ esp_outbound_post(struct rte_mbuf *m __rte_unused,
 	RTE_ASSERT(sa != NULL);
 	RTE_ASSERT(cop != NULL);
 
-	if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
-		RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
-		return -1;
+	if (sa->type == RTE_SECURITY_SESS_ETH_INLINE_CRYPTO) {
+		m->ol_flags |= PKT_TX_SECURITY_OFFLOAD;
+	} else {
+		if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+			RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+			return -1;
+		}
 	}
 
 	return 0;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index c8fde1c..b14b23d 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -58,13 +58,17 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
 	key.cipher_algo = (uint8_t)sa->cipher_algo;
 	key.auth_algo = (uint8_t)sa->auth_algo;
 
-	ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
-			(void **)&cdev_id_qp);
-	if (ret < 0) {
-		RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
-				"auth_algo %u\n", key.lcore_id, key.cipher_algo,
-				key.auth_algo);
-		return -1;
+	if (sa->type == RTE_SECURITY_SESS_NONE) {
+		ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
+				(void **)&cdev_id_qp);
+		if (ret < 0) {
+			RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, "
+					"cipher_algo %u, "
+					"auth_algo %u\n",
+					key.lcore_id, key.cipher_algo,
+					key.auth_algo);
+			return -1;
+		}
 	}
 
 	RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
@@ -79,7 +83,8 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
 				sa->crypto_session, sa->xforms,
 				ipsec_ctx->session_pool);
 
-		rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
+		rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
+				&cdev_info);
 		if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
 			ret = rte_cryptodev_queue_pair_attach_sym_session(
 					ipsec_ctx->tbl[cdev_id_qp].id,
@@ -146,6 +151,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 	struct ipsec_mbuf_metadata *priv;
 	struct rte_crypto_sym_op *sym_cop;
 	struct ipsec_sa *sa;
+	struct cdev_qp *cqp;
 
 	for (i = 0; i < nb_pkts; i++) {
 		if (unlikely(sas[i] == NULL)) {
@@ -202,8 +208,31 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 			}
 			break;
 		case RTE_SECURITY_SESS_ETH_PROTO_OFFLOAD:
-		case RTE_SECURITY_SESS_ETH_INLINE_CRYPTO:
 			break;
+		case RTE_SECURITY_SESS_ETH_INLINE_CRYPTO:
+			priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+			priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+			rte_prefetch0(&priv->sym_cop);
+
+			if ((unlikely(sa->sec_session == NULL)) &&
+					create_session(ipsec_ctx, sa)) {
+				rte_pktmbuf_free(pkts[i]);
+				continue;
+			}
+
+			rte_security_attach_session(&priv->cop,
+					sa->sec_session);
+
+			ret = xform_func(pkts[i], sa, &priv->cop);
+			if (unlikely(ret)) {
+				rte_pktmbuf_free(pkts[i]);
+				continue;
+			}
+
+			cqp = &ipsec_ctx->tbl[sa->cdev_id_qp];
+			cqp->ol_pkts[cqp->ol_pkts_cnt++] = pkts[i];
+			continue;
 		}
 
 		RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
@@ -228,6 +257,20 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
 		if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
 			ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
 
+
+		while (cqp->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
+			pkt = cqp->ol_pkts[--cqp->ol_pkts_cnt];
+			rte_prefetch0(pkt);
+			priv = get_priv(pkt);
+			sa = priv->sa;
+			ret = xform_func(pkt, sa, &priv->cop);
+			if (unlikely(ret)) {
+				rte_pktmbuf_free(pkt);
+				continue;
+			}
+			pkts[nb_pkts++] = pkt;
+		}
+
 		if (cqp->in_flight == 0)
 			continue;
 
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 6291d86..685304b 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -142,6 +142,8 @@ struct cdev_qp {
 	uint16_t in_flight;
 	uint16_t len;
 	struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+	struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+	uint16_t ol_pkts_cnt;
 };
 
 struct ipsec_ctx {
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 851262b..11b31d0 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -613,11 +613,13 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
 		if (status->status < 0)
 			return;
 	} else {
-		APP_CHECK(cipher_algo_p == 1, status, "missing cipher or AEAD options");
+		APP_CHECK(cipher_algo_p == 1, status,
+			  "missing cipher or AEAD options");
 		if (status->status < 0)
 			return;
 
-		APP_CHECK(auth_algo_p == 1, status, "missing auth or AEAD options");
+		APP_CHECK(auth_algo_p == 1, status,
+			"missing auth or AEAD options");
 		if (status->status < 0)
 			return;
 	}
@@ -763,14 +765,31 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 			sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
 		}
 
-		if (sa->type == RTE_SECURITY_SESS_CRYPTO_PROTO_OFFLOAD) {
-			sa_ctx->xf[idx].c.cipher_alg = sa->cipher_algo;
-			sa_ctx->xf[idx].c.auth_alg = sa->auth_algo;
-			sa_ctx->xf[idx].c.cipher_key.data = sa->cipher_key;
-			sa_ctx->xf[idx].c.auth_key.data = sa->auth_key;
-			sa_ctx->xf[idx].c.cipher_key.length =
+		if (sa->type == RTE_SECURITY_SESS_CRYPTO_PROTO_OFFLOAD ||
+			sa->type == RTE_SECURITY_SESS_ETH_INLINE_CRYPTO) {
+
+			if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+				sa_ctx->xf[idx].c.aead_alg =
+						sa->aead_algo;
+				sa_ctx->xf[idx].c.aead_key.data =
+						sa->cipher_key;
+				sa_ctx->xf[idx].c.aead_key.length =
+						sa->cipher_key_len;
+
+			} else {
+				sa_ctx->xf[idx].c.cipher_alg = sa->cipher_algo;
+				sa_ctx->xf[idx].c.auth_alg = sa->auth_algo;
+				sa_ctx->xf[idx].c.cipher_key.data =
+						sa->cipher_key;
+				sa_ctx->xf[idx].c.auth_key.data =
+						sa->auth_key;
+				sa_ctx->xf[idx].c.cipher_key.length =
 						sa->cipher_key_len;
-			sa_ctx->xf[idx].c.auth_key.length = sa->auth_key_len;
+				sa_ctx->xf[idx].c.auth_key.length =
+						sa->auth_key_len;
+				sa_ctx->xf[idx].c.salt = sa->salt;
+			}
+
 			sa_ctx->xf[idx].c.op = (inbound == 1)?
 						RTE_SECURITY_IPSEC_OP_DECAP :
 						RTE_SECURITY_IPSEC_OP_ENCAP;
@@ -835,9 +854,11 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 			}
 
 			if (inbound) {
-				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+				sa_ctx->xf[idx].b.type =
+						RTE_CRYPTO_SYM_XFORM_CIPHER;
 				sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
-				sa_ctx->xf[idx].b.cipher.key.data = sa->cipher_key;
+				sa_ctx->xf[idx].b.cipher.key.data =
+						sa->cipher_key;
 				sa_ctx->xf[idx].b.cipher.key.length =
 					sa->cipher_key_len;
 				sa_ctx->xf[idx].b.cipher.op =
@@ -846,7 +867,8 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 				sa_ctx->xf[idx].b.cipher.iv.offset = IV_OFFSET;
 				sa_ctx->xf[idx].b.cipher.iv.length = iv_length;
 
-				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+				sa_ctx->xf[idx].a.type =
+						RTE_CRYPTO_SYM_XFORM_AUTH;
 				sa_ctx->xf[idx].a.auth.algo = sa->auth_algo;
 				sa_ctx->xf[idx].a.auth.key.data = sa->auth_key;
 				sa_ctx->xf[idx].a.auth.key.length =
@@ -856,9 +878,11 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 				sa_ctx->xf[idx].a.auth.op =
 					RTE_CRYPTO_AUTH_OP_VERIFY;
 			} else { /* outbound */
-				sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+				sa_ctx->xf[idx].a.type =
+					RTE_CRYPTO_SYM_XFORM_CIPHER;
 				sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
-				sa_ctx->xf[idx].a.cipher.key.data = sa->cipher_key;
+				sa_ctx->xf[idx].a.cipher.key.data =
+					sa->cipher_key;
 				sa_ctx->xf[idx].a.cipher.key.length =
 					sa->cipher_key_len;
 				sa_ctx->xf[idx].a.cipher.op =
@@ -867,9 +891,12 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
 				sa_ctx->xf[idx].a.cipher.iv.offset = IV_OFFSET;
 				sa_ctx->xf[idx].a.cipher.iv.length = iv_length;
 
-				sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_AUTH;
-				sa_ctx->xf[idx].b.auth.algo = sa->auth_algo;
-				sa_ctx->xf[idx].b.auth.key.data = sa->auth_key;
+				sa_ctx->xf[idx].b.type =
+					RTE_CRYPTO_SYM_XFORM_AUTH;
+				sa_ctx->xf[idx].b.auth.algo =
+					sa->auth_algo;
+				sa_ctx->xf[idx].b.auth.key.data =
+						sa->auth_key;
 				sa_ctx->xf[idx].b.auth.key.length =
 					sa->auth_key_len;
 				sa_ctx->xf[idx].b.auth.digest_length =
@@ -991,8 +1018,8 @@ single_inbound_lookup(struct ipsec_sa *sadb, struct rte_mbuf *pkt,
 	case IP6_TUNNEL:
 		src6_addr = RTE_PTR_ADD(ip, offsetof(struct ip6_hdr, ip6_src));
 		if ((ip->ip_v == IP6_VERSION) &&
-				!memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
-				!memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
+			!memcmp(&sa->src.ip.ip6.ip6, src6_addr, 16) &&
+			!memcmp(&sa->dst.ip.ip6.ip6, src6_addr + 16, 16))
 			*sa_ret = sa;
 		break;
 	case TRANSPORT:
-- 
2.7.5



More information about the dev mailing list