[dpdk-dev] [PATCH v3 5/5] app/testpmd: enable TCP/IPv4, VxLAN and GRE GSO

Jiayu Hu jiayu.hu at intel.com
Tue Sep 12 04:43:31 CEST 2017


This patch adds GSO support to the csum forwarding engine. Oversized
packets transmitted over a GSO-enabled port will undergo segmentation
(with the exception of packet-types unsupported by the GSO library).
GSO support is disabled by default.

GSO support may be toggled on a per-port basis, using the command:

        "set port <port_id> gso on|off"

The maximum packet length (including the packet header and payload) for
GSO segments may be set with the command:

        "set gso segsz <length>"

Show GSO configuration for a given port with the command:

	"show port <port_id> gso"

Signed-off-by: Jiayu Hu <jiayu.hu at intel.com>
Signed-off-by: Mark Kavanagh <mark.b.kavanagh at intel.com>
---
 app/test-pmd/cmdline.c  | 178 ++++++++++++++++++++++++++++++++++++++++++++++++
 app/test-pmd/config.c   |  24 +++++++
 app/test-pmd/csumonly.c | 102 +++++++++++++++++++++++++--
 app/test-pmd/testpmd.c  |  16 +++++
 app/test-pmd/testpmd.h  |  10 +++
 5 files changed, 326 insertions(+), 4 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index cd8c358..03b98a3 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -431,6 +431,17 @@ static void cmd_help_long_parsed(void *parsed_result,
 			"    Set max flow number and max packet number per-flow"
 			" for GRO.\n\n"
 
+			"set port (port_id) gso (on|off)"
+			"    Enable or disable Generic Segmentation Offload in"
+			" csum forwarding engine.\n\n"
+
+			"set gso segsz (length)\n"
+			"    Set max packet length for output GSO segments,"
+			" including packet header and payload.\n\n"
+
+			"show port (port_id) gso\n"
+			"    Show GSO configuration.\n\n"
+
 			"set fwd (%s)\n"
 			"    Set packet forwarding mode.\n\n"
 
@@ -3963,6 +3974,170 @@ cmdline_parse_inst_t cmd_gro_set = {
 	},
 };
 
+/* *** ENABLE/DISABLE GSO *** */
+struct cmd_gso_enable_result {
+	cmdline_fixed_string_t cmd_set;
+	cmdline_fixed_string_t cmd_port;
+	cmdline_fixed_string_t cmd_keyword;
+	cmdline_fixed_string_t cmd_mode;
+	uint8_t cmd_pid;
+};
+
+static void
+cmd_gso_enable_parsed(void *parsed_result,
+		__attribute__((unused)) struct cmdline *cl,
+		__attribute__((unused)) void *data)
+{
+	struct cmd_gso_enable_result *res;
+
+	res = parsed_result;
+	if (!strcmp(res->cmd_keyword, "gso"))
+		setup_gso(res->cmd_mode, res->cmd_pid);
+}
+
+cmdline_parse_token_string_t cmd_gso_enable_set =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result,
+			cmd_set, "set");
+cmdline_parse_token_string_t cmd_gso_enable_port =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result,
+			cmd_port, "port");
+cmdline_parse_token_string_t cmd_gso_enable_keyword =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result,
+			cmd_keyword, "gso");
+cmdline_parse_token_string_t cmd_gso_enable_mode =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result,
+			cmd_mode, "on#off");
+cmdline_parse_token_num_t cmd_gso_enable_pid =
+	TOKEN_NUM_INITIALIZER(struct cmd_gso_enable_result,
+			cmd_pid, UINT8);
+
+cmdline_parse_inst_t cmd_gso_enable = {
+	.f = cmd_gso_enable_parsed,
+	.data = NULL,
+	.help_str = "set port <port_id> gso on|off",
+	.tokens = {
+		(void *)&cmd_gso_enable_set,
+		(void *)&cmd_gso_enable_port,
+		(void *)&cmd_gso_enable_pid,
+		(void *)&cmd_gso_enable_keyword,
+		(void *)&cmd_gso_enable_mode,
+		NULL,
+	},
+};
+
+/* *** SET MAX PACKET LENGTH FOR GSO SEGMENTS *** */
+struct cmd_gso_size_result {
+	cmdline_fixed_string_t cmd_set;
+	cmdline_fixed_string_t cmd_keyword;
+	cmdline_fixed_string_t cmd_segsz;
+	uint16_t cmd_size;
+};
+
+static void
+cmd_gso_size_parsed(void *parsed_result,
+		       __attribute__((unused)) struct cmdline *cl,
+		       __attribute__((unused)) void *data)
+{
+	struct cmd_gso_size_result *res = parsed_result;
+
+	if (test_done == 0) {
+		printf("Before set GSO segsz, please stop fowarding first\n");
+		return;
+	}
+
+	if (!strcmp(res->cmd_keyword, "gso") &&
+			!strcmp(res->cmd_segsz, "segsz")) {
+		if (res->cmd_size == 0) {
+			printf("gso_size should be larger than 0."
+					" Please input a legal value\n");
+		} else
+			gso_max_segment_size = res->cmd_size;
+	}
+}
+
+cmdline_parse_token_string_t cmd_gso_size_set =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result,
+				cmd_set, "set");
+cmdline_parse_token_string_t cmd_gso_size_keyword =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result,
+				cmd_keyword, "gso");
+cmdline_parse_token_string_t cmd_gso_size_segsz =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result,
+				cmd_segsz, "segsz");
+cmdline_parse_token_num_t cmd_gso_size_size =
+	TOKEN_NUM_INITIALIZER(struct cmd_gso_size_result,
+				cmd_size, UINT16);
+
+cmdline_parse_inst_t cmd_gso_size = {
+	.f = cmd_gso_size_parsed,
+	.data = NULL,
+	.help_str = "set gso segsz <length>",
+	.tokens = {
+		(void *)&cmd_gso_size_set,
+		(void *)&cmd_gso_size_keyword,
+		(void *)&cmd_gso_size_segsz,
+		(void *)&cmd_gso_size_size,
+		NULL,
+	},
+};
+
+/* *** SHOW GSO CONFIGURATION *** */
+struct cmd_gso_show_result {
+	cmdline_fixed_string_t cmd_show;
+	cmdline_fixed_string_t cmd_port;
+	cmdline_fixed_string_t cmd_keyword;
+	uint8_t cmd_pid;
+};
+
+static void
+cmd_gso_show_parsed(void *parsed_result,
+		       __attribute__((unused)) struct cmdline *cl,
+		       __attribute__((unused)) void *data)
+{
+	struct cmd_gso_show_result *res = parsed_result;
+
+	if (!rte_eth_dev_is_valid_port(res->cmd_pid)) {
+		printf("invalid port id %u\n", res->cmd_pid);
+		return;
+	}
+
+	if (!strcmp(res->cmd_keyword, "gso")) {
+		if (gso_ports[res->cmd_pid].enable) {
+			printf("Max GSO segment size: %uB\n"
+					"Support GSO protocols: TCP/IPv4,"
+					" VxlAN and GRE\n",
+					gso_max_segment_size);
+		} else
+			printf("Port %u doesn't enable GSO\n", res->cmd_pid);
+	}
+}
+
+cmdline_parse_token_string_t cmd_gso_show_show =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result,
+				cmd_show, "show");
+cmdline_parse_token_string_t cmd_gso_show_port =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result,
+				cmd_port, "port");
+cmdline_parse_token_string_t cmd_gso_show_keyword =
+	TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result,
+				cmd_keyword, "gso");
+cmdline_parse_token_num_t cmd_gso_show_pid =
+	TOKEN_NUM_INITIALIZER(struct cmd_gso_show_result,
+				cmd_pid, UINT8);
+
+cmdline_parse_inst_t cmd_gso_show = {
+	.f = cmd_gso_show_parsed,
+	.data = NULL,
+	.help_str = "show port <port_id> gso",
+	.tokens = {
+		(void *)&cmd_gso_show_show,
+		(void *)&cmd_gso_show_port,
+		(void *)&cmd_gso_show_pid,
+		(void *)&cmd_gso_show_keyword,
+		NULL,
+	},
+};
+
 /* *** ENABLE/DISABLE FLUSH ON RX STREAMS *** */
 struct cmd_set_flush_rx {
 	cmdline_fixed_string_t set;
@@ -14251,6 +14426,9 @@ cmdline_parse_ctx_t main_ctx[] = {
 	(cmdline_parse_inst_t *)&cmd_tunnel_tso_show,
 	(cmdline_parse_inst_t *)&cmd_enable_gro,
 	(cmdline_parse_inst_t *)&cmd_gro_set,
+	(cmdline_parse_inst_t *)&cmd_gso_enable,
+	(cmdline_parse_inst_t *)&cmd_gso_size,
+	(cmdline_parse_inst_t *)&cmd_gso_show,
 	(cmdline_parse_inst_t *)&cmd_link_flow_control_set,
 	(cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx,
 	(cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx,
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 3ae3e1c..3434346 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2454,6 +2454,30 @@ setup_gro(const char *mode, uint8_t port_id)
 	}
 }
 
+void
+setup_gso(const char *mode, uint8_t port_id)
+{
+	if (!rte_eth_dev_is_valid_port(port_id)) {
+		printf("invalid port id %u\n", port_id);
+		return;
+	}
+	if (strcmp(mode, "on") == 0) {
+		if (test_done == 0) {
+			printf("before enable GSO,"
+					" please stop forwarding first\n");
+			return;
+		}
+		gso_ports[port_id].enable = 1;
+	} else if (strcmp(mode, "off") == 0) {
+		if (test_done == 0) {
+			printf("before disable GSO,"
+					" please stop forwarding first\n");
+			return;
+		}
+		gso_ports[port_id].enable = 0;
+	}
+}
+
 char*
 list_pkt_forwarding_modes(void)
 {
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 90c8119..8e9a8a1 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -66,10 +66,12 @@
 #include <rte_tcp.h>
 #include <rte_udp.h>
 #include <rte_sctp.h>
+#include <rte_net.h>
 #include <rte_prefetch.h>
 #include <rte_string_fns.h>
 #include <rte_flow.h>
 #include <rte_gro.h>
+#include <rte_gso.h>
 #include "testpmd.h"
 
 #define IP_DEFTTL  64   /* from RFC 1340. */
@@ -103,6 +105,7 @@ struct testpmd_offload_info {
 	uint16_t tso_segsz;
 	uint16_t tunnel_tso_segsz;
 	uint32_t pkt_len;
+	uint32_t packet_type;
 };
 
 /* simplified GRE header */
@@ -129,10 +132,25 @@ parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
 	info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
 	info->l4_proto = ipv4_hdr->next_proto_id;
 
+	if (info->is_tunnel)
+		info->packet_type |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+	else
+		info->packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
 	/* only fill l4_len for TCP, it's useful for TSO */
 	if (info->l4_proto == IPPROTO_TCP) {
 		tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len);
 		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+		if (info->is_tunnel)
+			info->packet_type |= RTE_PTYPE_INNER_L4_TCP;
+		else
+			info->packet_type |= RTE_PTYPE_L4_TCP;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		if (info->is_tunnel)
+			info->packet_type |= RTE_PTYPE_INNER_L4_UDP;
+		else
+			info->packet_type |= RTE_PTYPE_L4_UDP;
+		info->l4_len = 0;
 	} else
 		info->l4_len = 0;
 }
@@ -146,10 +164,25 @@ parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
 	info->l3_len = sizeof(struct ipv6_hdr);
 	info->l4_proto = ipv6_hdr->proto;
 
+	if (info->is_tunnel)
+		info->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+	else
+		info->packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
 	/* only fill l4_len for TCP, it's useful for TSO */
 	if (info->l4_proto == IPPROTO_TCP) {
 		tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len);
 		info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
+		if (info->is_tunnel)
+			info->packet_type |= RTE_PTYPE_INNER_L4_TCP;
+		else
+			info->packet_type |= RTE_PTYPE_L4_TCP;
+	} else if (info->l4_proto == IPPROTO_UDP) {
+		if (info->is_tunnel)
+			info->packet_type |= RTE_PTYPE_INNER_L4_UDP;
+		else
+			info->packet_type |= RTE_PTYPE_L4_UDP;
+		info->l4_len = 0;
 	} else
 		info->l4_len = 0;
 }
@@ -164,16 +197,26 @@ parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info)
 {
 	struct ipv4_hdr *ipv4_hdr;
 	struct ipv6_hdr *ipv6_hdr;
+	uint32_t l2_type;
 
 	info->l2_len = sizeof(struct ether_hdr);
 	info->ethertype = eth_hdr->ether_type;
+	if (info->is_tunnel)
+		l2_type = RTE_PTYPE_INNER_L2_ETHER;
+	else
+		l2_type = RTE_PTYPE_L2_ETHER;
 
 	if (info->ethertype == _htons(ETHER_TYPE_VLAN)) {
 		struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
 
 		info->l2_len  += sizeof(struct vlan_hdr);
 		info->ethertype = vlan_hdr->eth_proto;
+		if (info->is_tunnel)
+			l2_type = RTE_PTYPE_INNER_L2_ETHER_VLAN;
+		else
+			l2_type = RTE_PTYPE_L2_ETHER_VLAN;
 	}
+	info->packet_type |= l2_type;
 
 	switch (info->ethertype) {
 	case _htons(ETHER_TYPE_IPv4):
@@ -212,6 +255,7 @@ parse_vxlan(struct udp_hdr *udp_hdr,
 	info->outer_l2_len = info->l2_len;
 	info->outer_l3_len = info->l3_len;
 	info->outer_l4_proto = info->l4_proto;
+	info->packet_type |= RTE_PTYPE_TUNNEL_VXLAN;
 
 	eth_hdr = (struct ether_hdr *)((char *)udp_hdr +
 		sizeof(struct udp_hdr) +
@@ -245,6 +289,7 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
 		info->outer_l2_len = info->l2_len;
 		info->outer_l3_len = info->l3_len;
 		info->outer_l4_proto = info->l4_proto;
+		info->packet_type |= RTE_PTYPE_TUNNEL_GRE;
 
 		ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
 
@@ -258,6 +303,7 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
 		info->outer_l2_len = info->l2_len;
 		info->outer_l3_len = info->l3_len;
 		info->outer_l4_proto = info->l4_proto;
+		info->packet_type |= RTE_PTYPE_TUNNEL_GRE;
 
 		ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
 
@@ -271,6 +317,7 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
 		info->outer_l2_len = info->l2_len;
 		info->outer_l3_len = info->l3_len;
 		info->outer_l4_proto = info->l4_proto;
+		info->packet_type |= RTE_PTYPE_TUNNEL_GRE;
 
 		eth_hdr = (struct ether_hdr *)((char *)gre_hdr + gre_len);
 
@@ -299,6 +346,7 @@ parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
 	info->outer_ethertype = info->ethertype;
 	info->outer_l2_len = info->l2_len;
 	info->outer_l3_len = info->l3_len;
+	info->packet_type |= RTE_PTYPE_TUNNEL_IP;
 
 	if (ip_version == 4) {
 		parse_ipv4(ipv4_hdr, info);
@@ -627,6 +675,9 @@ static void
 pkt_burst_checksum_forward(struct fwd_stream *fs)
 {
 	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+	struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
+	struct rte_gso_ctx *gso_ctx;
+	struct rte_mbuf **tx_pkts_burst;
 	struct rte_port *txp;
 	struct rte_mbuf *m, *p;
 	struct ether_hdr *eth_hdr;
@@ -641,6 +692,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 	uint32_t rx_bad_ip_csum;
 	uint32_t rx_bad_l4_csum;
 	struct testpmd_offload_info info;
+	uint16_t nb_segments = 0;
+	int ret;
 
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
 	uint64_t start_tsc;
@@ -683,6 +736,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		m = pkts_burst[i];
 		info.is_tunnel = 0;
 		info.pkt_len = rte_pktmbuf_pkt_len(m);
+		info.packet_type = 0;
 		tx_ol_flags = 0;
 		rx_ol_flags = m->ol_flags;
 
@@ -790,6 +844,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 			m->tso_segsz = info.tso_segsz;
 		}
 		m->ol_flags = tx_ol_flags;
+		m->packet_type = info.packet_type;
 
 		/* Do split & copy for the packet. */
 		if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
@@ -851,13 +906,51 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		}
 	}
 
+	if (gso_ports[fs->tx_port].enable == 0)
+		tx_pkts_burst = pkts_burst;
+	else {
+		gso_ctx = &(current_fwd_lcore()->gso_ctx);
+		gso_ctx->gso_size = gso_max_segment_size;
+		for (i = 0; i < nb_rx; i++) {
+			ret = rte_gso_segment(pkts_burst[i], *gso_ctx,
+					&gso_segments[nb_segments],
+					GSO_MAX_PKT_BURST - nb_segments);
+			if (ret >= 1)
+				nb_segments += ret;
+			else if (ret < 0) {
+				/* insufficient MBUFs, stop GSO */
+				memcpy(&gso_segments[nb_segments],
+						&pkts_burst[i],
+						sizeof(struct rte_mbuf *) *
+						(nb_rx - i));
+				nb_segments += (nb_rx - i);
+				break;
+			}
+			if (unlikely(nb_rx - i >= GSO_MAX_PKT_BURST -
+						nb_segments)) {
+				/*
+				 * insufficient space in gso_segments,
+				 * stop GSO.
+				 */
+				memcpy(&gso_segments[nb_segments],
+						&pkts_burst[i],
+						sizeof(struct rte_mbuf *) *
+						(nb_rx - i));
+				nb_segments += (nb_rx - i);
+				break;
+			}
+		}
+		tx_pkts_burst = gso_segments;
+		nb_rx = nb_segments;
+	}
+
 	nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
-			pkts_burst, nb_rx);
+			tx_pkts_burst, nb_rx);
 	if (nb_prep != nb_rx)
 		printf("Preparing packet burst to transmit failed: %s\n",
 				rte_strerror(rte_errno));
 
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
+	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
 			nb_prep);
 
 	/*
@@ -868,7 +961,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
 			rte_delay_us(burst_tx_delay_time);
 			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&pkts_burst[nb_tx], nb_rx - nb_tx);
+					&tx_pkts_burst[nb_tx], nb_rx - nb_tx);
 		}
 	}
 	fs->tx_packets += nb_tx;
@@ -881,9 +974,10 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 	if (unlikely(nb_tx < nb_rx)) {
 		fs->fwd_dropped += (nb_rx - nb_tx);
 		do {
-			rte_pktmbuf_free(pkts_burst[nb_tx]);
+			rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
 		} while (++nb_tx < nb_rx);
 	}
+
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
 	end_tsc = rte_rdtsc();
 	core_cycles = (end_tsc - start_tsc);
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 7d40139..dd4b365 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -400,6 +400,9 @@ static int eth_event_callback(uint8_t port_id,
  */
 static int all_ports_started(void);
 
+struct gso_status gso_ports[RTE_MAX_ETHPORTS];
+uint16_t gso_max_segment_size = ETHER_MAX_LEN;
+
 /*
  * Helper function to check if socket is already discovered.
  * If yes, return positive value. If not, return zero.
@@ -570,6 +573,7 @@ init_config(void)
 	unsigned int nb_mbuf_per_pool;
 	lcoreid_t  lc_id;
 	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
+	uint32_t gso_types = 0;
 
 	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
 
@@ -654,6 +658,12 @@ init_config(void)
 
 	init_port_config();
 
+	gso_types = RTE_PTYPE_L2_ETHER_VLAN | RTE_PTYPE_L2_ETHER |
+		RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L2_ETHER_VLAN |
+		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+		RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN |
+		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_TCP |
+		RTE_PTYPE_TUNNEL_GRE;
 	/*
 	 * Records which Mbuf pool to use by each logical core, if needed.
 	 */
@@ -664,6 +674,12 @@ init_config(void)
 		if (mbp == NULL)
 			mbp = mbuf_pool_find(0);
 		fwd_lcores[lc_id]->mbp = mbp;
+		/* initialize GSO context */
+		fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
+		fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
+		fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
+		fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN;
+		fwd_lcores[lc_id]->gso_ctx.ipid_flag = RTE_GSO_IPID_INCREASE;
 	}
 
 	/* Configuration of packet forwarding streams. */
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index c9d7739..725af1a 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -36,6 +36,7 @@
 
 #include <rte_pci.h>
 #include <rte_gro.h>
+#include <rte_gso.h>
 
 #define RTE_PORT_ALL            (~(portid_t)0x0)
 
@@ -205,6 +206,7 @@ struct rte_port {
  * CPU id. configuration table.
  */
 struct fwd_lcore {
+	struct rte_gso_ctx gso_ctx;     /**< GSO context */
 	struct rte_mempool *mbp; /**< The mbuf pool to use by this core */
 	streamid_t stream_idx;   /**< index of 1st stream in "fwd_streams" */
 	streamid_t stream_nb;    /**< number of streams in "fwd_streams" */
@@ -442,6 +444,13 @@ struct gro_status {
 };
 extern struct gro_status gro_ports[RTE_MAX_ETHPORTS];
 
+#define GSO_MAX_PKT_BURST 2048
+struct gso_status {
+	uint8_t enable;
+};
+extern struct gso_status gso_ports[RTE_MAX_ETHPORTS];
+extern uint16_t gso_max_segment_size;
+
 static inline unsigned int
 lcore_num(void)
 {
@@ -641,6 +650,7 @@ void get_5tuple_filter(uint8_t port_id, uint16_t index);
 int rx_queue_id_is_invalid(queueid_t rxq_id);
 int tx_queue_id_is_invalid(queueid_t txq_id);
 void setup_gro(const char *mode, uint8_t port_id);
+void setup_gso(const char *mode, uint8_t port_id);
 
 /* Functions to manage the set of filtered Multicast MAC addresses */
 void mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr);
-- 
2.7.4



More information about the dev mailing list