[dpdk-dev] [PATCH 4/5] lib/gso: add GRE GSO support

Jiayu Hu jiayu.hu at intel.com
Thu Aug 24 16:15:43 CEST 2017


From: Mark Kavanagh <mark.b.kavanagh at intel.com>

This patch adds GSO support for GRE-tunneled packets. Supported GRE
packets must contain an outer IPv4 header, and inner TCP/IPv4 headers.
They may also contain a single VLAN tag. GRE GSO assumes that all input
packets have correct checksums and doesn't update checksums for output
packets. Additionally, it doesn't process IP fragmented packets.

As with VxLAN GSO, GRE GSO uses a two-segment MBUF to organize each
output packet, which requires multi-segment mbuf support in the TX
functions of the NIC driver. Also, if a packet is GSOed, GRE GSO reduces
its MBUF refcnt by 1. As a result, when all of its GSOed segments are
freed, the packet is freed automatically.

Signed-off-by: Mark Kavanagh <mark.b.kavanagh at intel.com>
Signed-off-by: Jiayu Hu <jiayu.hu at intel.com>
---
 lib/librte_gso/gso_common.c | 66 +++++++++++++++++++++++++++++++++++++++++++--
 lib/librte_gso/gso_common.h | 21 +++++++++++++++
 lib/librte_gso/rte_gso.c    |  5 +++-
 lib/librte_gso/rte_gso.h    |  4 +++
 4 files changed, 93 insertions(+), 3 deletions(-)

diff --git a/lib/librte_gso/gso_common.c b/lib/librte_gso/gso_common.c
index 65cec44..b3e7f9d 100644
--- a/lib/librte_gso/gso_common.c
+++ b/lib/librte_gso/gso_common.c
@@ -37,6 +37,7 @@
 #include <rte_malloc.h>
 
 #include <rte_ether.h>
+#include <rte_gre.h>
 #include <rte_ip.h>
 #include <rte_tcp.h>
 #include <rte_udp.h>
@@ -159,6 +160,8 @@ gso_do_segment(struct rte_mbuf *pkt,
 
 static inline void parse_ethernet(struct ether_hdr *eth_hdr,
 		struct rte_mbuf *pkt);
+static inline void parse_ipv4(struct ipv4_hdr *ipv4_hdr,
+		struct rte_mbuf *pkt);
 
 static inline void
 parse_vxlan(struct udp_hdr *udp_hdr, struct rte_mbuf *pkt)
@@ -190,15 +193,29 @@ parse_udp(struct udp_hdr *udp_hdr, struct rte_mbuf *pkt)
 }
 
 static inline void
+parse_gre(struct gre_hdr *gre_hdr, struct rte_mbuf *pkt)
+{
+	struct ipv4_hdr *ipv4_hdr;
+
+	if (gre_hdr->proto == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+		ipv4_hdr = (struct ipv4_hdr *)(gre_hdr + 1);
+		pkt->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
+		parse_ipv4(ipv4_hdr, pkt);
+	}
+}
+
+static inline void
 parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct rte_mbuf *pkt)
 {
+	struct gre_hdr *gre_hdr;
 	struct tcp_hdr *tcp_hdr;
 	struct udp_hdr *udp_hdr;
 
 	switch (ipv4_hdr->next_proto_id) {
 	case IPPROTO_TCP:
-		if (IS_VXLAN_PKT(pkt)) {
-			pkt->outer_l3_len = pkt->l3_len;
+		if (IS_TUNNEL_PKT(pkt)) {
+			if (IS_VXLAN_PKT(pkt))
+				pkt->outer_l3_len = pkt->l3_len;
 			pkt->packet_type |= RTE_PTYPE_INNER_L4_TCP;
 		} else
 			pkt->packet_type |= RTE_PTYPE_L4_TCP;
@@ -211,6 +228,14 @@ parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct rte_mbuf *pkt)
 		udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
 		parse_udp(udp_hdr, pkt);
 		break;
+	case IPPROTO_GRE:
+		gre_hdr = (struct gre_hdr *)(ipv4_hdr + 1);
+		pkt->outer_l2_len = pkt->l2_len;
+		pkt->outer_l3_len = IPv4_HDR_LEN(ipv4_hdr);
+		pkt->l2_len = sizeof(*gre_hdr);
+		pkt->packet_type |= RTE_PTYPE_TUNNEL_GRE;
+		parse_gre(gre_hdr, pkt);
+		break;
 	}
 }
 
@@ -343,6 +368,43 @@ gso_update_pkt_headers(struct rte_mbuf *pkt, uint16_t nb_segments,
 			sent_seq += seg->next->data_len;
 		}
 		break;
+	case ETHER_VLAN_IPv4_GRE_IPv4_TCP_PKT:
+	case ETHER_IPv4_GRE_IPv4_TCP_PKT:
+		outer_ipv4_hdr =
+			(struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
+				pkt->outer_l2_len);
+		ipv4_hdr = (struct ipv4_hdr *)((char *)outer_ipv4_hdr +
+				pkt->outer_l3_len + pkt->l2_len);
+		tcp_hdr = (struct tcp_hdr *)(ipv4_hdr + 1);
+
+		/* Retrieve values from original packet */
+		id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
+		outer_id = rte_be_to_cpu_16(outer_ipv4_hdr->packet_id);
+		sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
+
+		for (i = 0; i < nb_segments; i++) {
+			seg = out_segments[i];
+
+			/* Update outer IPv4 header */
+			offset = seg->outer_l2_len;
+			update_ipv4_header(rte_pktmbuf_mtod(seg, char *),
+					offset, seg->pkt_len, outer_id);
+			outer_id++;
+
+			/* Update inner IPv4 header */
+			offset += seg->outer_l3_len + seg->l2_len;
+			update_ipv4_header(rte_pktmbuf_mtod(seg, char *),
+					offset, seg->pkt_len, id);
+			id++;
+
+			/* Update inner TCP header */
+			offset += seg->l3_len;
+			update_tcp_header(rte_pktmbuf_mtod(seg, char *),
+					offset, sent_seq, i < tail_seg_idx);
+
+			sent_seq += seg->next->data_len;
+		}
+		break;
 	case ETHER_VLAN_IPv4_TCP_PKT:
 	case ETHER_IPv4_TCP_PKT:
 		ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
diff --git a/lib/librte_gso/gso_common.h b/lib/librte_gso/gso_common.h
index 0ad95d3..2ed264a 100644
--- a/lib/librte_gso/gso_common.h
+++ b/lib/librte_gso/gso_common.h
@@ -87,6 +87,21 @@
 		RTE_PTYPE_INNER_L3_IPV4 | \
 		RTE_PTYPE_INNER_L4_TCP)
 
+/* GRE packet. */
+#define ETHER_IPv4_GRE_IPv4_TCP_PKT (\
+		ETHER_IPv4_PKT          | \
+		RTE_PTYPE_TUNNEL_GRE    | \
+		RTE_PTYPE_INNER_L3_IPV4 | \
+		RTE_PTYPE_INNER_L4_TCP)
+
+/* GRE packet with VLAN tag. */
+#define ETHER_VLAN_IPv4_GRE_IPv4_TCP_PKT (\
+		RTE_PTYPE_L2_ETHER_VLAN | \
+		RTE_PTYPE_L3_IPV4       | \
+		RTE_PTYPE_TUNNEL_GRE    | \
+		RTE_PTYPE_INNER_L3_IPV4 | \
+		RTE_PTYPE_INNER_L4_TCP)
+
 #define IS_VLAN_PKT(pkt) ((pkt->packet_type & RTE_PTYPE_L2_ETHER_VLAN) == \
 		RTE_PTYPE_L2_ETHER_VLAN)
 #define IS_INNER_VLAN_PKT(pkt) (\
@@ -96,6 +111,12 @@
 #define VXLAN_DEFAULT_PORT 4789
 #define IS_VXLAN_PKT(pkt) ((pkt->packet_type & RTE_PTYPE_TUNNEL_VXLAN) == \
 		RTE_PTYPE_TUNNEL_VXLAN)
+
+#define IS_GRE_PKT(pkt) ((pkt->packet_type & RTE_PTYPE_TUNNEL_GRE) == \
+		RTE_PTYPE_TUNNEL_GRE)
+
+#define IS_TUNNEL_PKT(pkt) ((pkt->packet_type & RTE_PTYPE_TUNNEL_VXLAN) | \
+		(pkt->packet_type & RTE_PTYPE_TUNNEL_GRE))
 /**
  * Internal function which parses a packet, setting outer_l2/l3_len and
  * l2/l3/l4_len and packet_type.
diff --git a/lib/librte_gso/rte_gso.c b/lib/librte_gso/rte_gso.c
index f110f18..244bbf6 100644
--- a/lib/librte_gso/rte_gso.c
+++ b/lib/librte_gso/rte_gso.c
@@ -53,7 +53,8 @@ rte_gso_segment(struct rte_mbuf *pkt,
 		return -EINVAL;
 
 	if ((gso_ctx.gso_types & (RTE_GSO_TCP_IPV4 |
-					RTE_GSO_IPV4_VXLAN_TCP_IPV4)) == 0 ||
+					RTE_GSO_IPV4_VXLAN_TCP_IPV4 |
+					RTE_GSO_IPV4_GRE_TCP_IPV4)) == 0 ||
 			gso_ctx.gso_size >= pkt->pkt_len ||
 			gso_ctx.gso_size == 0)
 		return 1;
@@ -77,6 +78,8 @@ rte_gso_segment(struct rte_mbuf *pkt,
 	case ETHER_VLAN_IPv4_UDP_VXLAN_IPv4_TCP_PKT:
 	case ETHER_IPv4_UDP_VXLAN_VLAN_IPv4_TCP_PKT:
 	case ETHER_IPv4_UDP_VXLAN_IPv4_TCP_PKT:
+	case ETHER_VLAN_IPv4_GRE_IPv4_TCP_PKT:
+	case ETHER_IPv4_GRE_IPv4_TCP_PKT:
 		nb_segments = gso_tunnel_segment(pkt, gso_size,
 				direct_pool, indirect_pool,
 				pkts_out, nb_pkts_out);
diff --git a/lib/librte_gso/rte_gso.h b/lib/librte_gso/rte_gso.h
index e1b2c23..86ca790 100644
--- a/lib/librte_gso/rte_gso.h
+++ b/lib/librte_gso/rte_gso.h
@@ -52,6 +52,10 @@ extern "C" {
 /**< GSO flag for VxLAN packets that contain outer IPv4, and inner
  * TCP/IPv4 headers (plus optional inner and/or outer VLAN tags).
  */
+#define RTE_GSO_IPV4_GRE_TCP_IPV4 (1ULL << 2)
+/**< GSO flag for GRE packets that contain outer IPv4, and inner
+ * TCP/IPv4 headers (with optional outer VLAN tag).
+ */
 
 /**
  * GSO context structure.
-- 
2.7.4



More information about the dev mailing list