[dpdk-dev] [PATCH v2] net/enic: add support for TSO

John Daley johndale at cisco.com
Mon Jan 9 19:58:37 CET 2017


The enic TSO implementation requires that the length of the Eth/IP/TCP
headers be passed to the NIC. Other than that, it's just a matter of
setting the mss and offload mode on a per packet basis.

In TSO mode, IP and TCP checksums are offloaded even if not requested
with mb->ol_flags.

Signed-off-by: John Daley <johndale at cisco.com>
---
v2: document TSO feature for enic

 doc/guides/nics/features/enic.ini |  1 +
 drivers/net/enic/enic_ethdev.c    |  3 +-
 drivers/net/enic/enic_rxtx.c      | 93 ++++++++++++++++++++++++++++++++++-----
 3 files changed, 84 insertions(+), 13 deletions(-)

diff --git a/doc/guides/nics/features/enic.ini b/doc/guides/nics/features/enic.ini
index 86576a7..51b330f 100644
--- a/doc/guides/nics/features/enic.ini
+++ b/doc/guides/nics/features/enic.ini
@@ -10,6 +10,7 @@ Queue start/stop     = Y
 MTU update           = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
+TSO                  = Y
 Promiscuous mode     = Y
 Unicast MAC filter   = Y
 Multicast MAC filter = Y
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index e5ceb98..c3ba2aa 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -475,7 +475,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
 		DEV_TX_OFFLOAD_VLAN_INSERT |
 		DEV_TX_OFFLOAD_IPV4_CKSUM  |
 		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM;
+		DEV_TX_OFFLOAD_TCP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_TSO;
 	device_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
 	};
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index f762a26..ed2b721 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -37,6 +37,9 @@
 #include "enic_compat.h"
 #include "rq_enet_desc.h"
 #include "enic.h"
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
 
 #define RTE_PMD_USE_PREFETCH
 
@@ -129,6 +132,60 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
 		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
 }
 
+/* Find the offset to L5. This is needed by enic TSO implementation.
+ * Return 0 if not a TCP packet or can't figure out the length.
+ */
+static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
+{
+	struct ether_hdr *eh;
+	struct vlan_hdr *vh;
+	struct ipv4_hdr *ip4;
+	struct ipv6_hdr *ip6;
+	struct tcp_hdr *th;
+	uint8_t hdr_len;
+	uint16_t ether_type;
+
+	/* offset past Ethernet header */
+	eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+	ether_type = eh->ether_type;
+	hdr_len = sizeof(struct ether_hdr);
+	if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+		vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
+		ether_type = vh->eth_proto;
+		hdr_len += sizeof(struct vlan_hdr);
+	}
+
+	/* offset past IP header */
+	switch (rte_be_to_cpu_16(ether_type)) {
+	case ETHER_TYPE_IPv4:
+		ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
+		if (ip4->next_proto_id != IPPROTO_TCP)
+			return 0;
+		hdr_len += (ip4->version_ihl & 0xf) * 4;
+		break;
+	case ETHER_TYPE_IPv6:
+		ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
+		if (ip6->proto != IPPROTO_TCP)
+			return 0;
+		hdr_len += sizeof(struct ipv6_hdr);
+		break;
+	default:
+		return 0;
+	}
+
+	if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
+		return 0;
+
+	/* offset past TCP header */
+	th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
+	hdr_len += (th->data_off >> 4) * 4;
+
+	if (hdr_len > mbuf->pkt_len)
+		return 0;
+
+	return hdr_len;
+}
+
 static inline uint8_t
 enic_cq_rx_check_err(struct cq_desc *cqd)
 {
@@ -462,10 +519,12 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	struct vnic_wq_buf *buf;
 	unsigned int desc_count;
 	struct wq_enet_desc *descs, *desc_p, desc_tmp;
-	uint16_t mss;
+	uint16_t mss = 0;
 	uint8_t vlan_tag_insert;
 	uint8_t eop;
 	uint64_t bus_addr;
+	uint8_t offload_mode;
+	uint16_t header_len;
 
 	enic_cleanup_wq(enic, wq);
 	wq_desc_avail = vnic_wq_desc_avail(wq);
@@ -487,7 +546,6 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		pkt_len = tx_pkt->pkt_len;
 		data_len = tx_pkt->data_len;
 		ol_flags = tx_pkt->ol_flags;
-		mss = 0;
 		vlan_id = 0;
 		vlan_tag_insert = 0;
 		bus_addr = (dma_addr_t)
@@ -497,13 +555,17 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		desc_p = descs + head_idx;
 
 		eop = (data_len == pkt_len);
-
-		if (ol_flags & ol_flags_mask) {
-			if (ol_flags & PKT_TX_VLAN_PKT) {
-				vlan_tag_insert = 1;
-				vlan_id = tx_pkt->vlan_tci;
+		offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
+		header_len = 0;
+
+		if (tx_pkt->tso_segsz) {
+			header_len = tso_header_len(tx_pkt);
+			if (header_len) {
+				offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
+				mss = tx_pkt->tso_segsz;
 			}
-
+		}
+		if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
 			if (ol_flags & PKT_TX_IP_CKSUM)
 				mss |= ENIC_CALC_IP_CKSUM;
 
@@ -516,8 +578,14 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 			}
 		}
 
-		wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
-				 eop, 0, vlan_tag_insert, vlan_id, 0);
+		if (ol_flags & PKT_TX_VLAN_PKT) {
+			vlan_tag_insert = 1;
+			vlan_id = tx_pkt->vlan_tci;
+		}
+
+		wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
+				 offload_mode, eop, eop, 0, vlan_tag_insert,
+				 vlan_id, 0);
 
 		*desc_p = desc_tmp;
 		buf = &wq->bufs[head_idx];
@@ -537,8 +605,9 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 					   + tx_pkt->data_off);
 				wq_enet_desc_enc((struct wq_enet_desc *)
 						 &desc_tmp, bus_addr, data_len,
-						 mss, 0, 0, eop, eop, 0,
-						 vlan_tag_insert, vlan_id, 0);
+						 mss, 0, offload_mode, eop, eop,
+						 0, vlan_tag_insert, vlan_id,
+						 0);
 
 				*desc_p = desc_tmp;
 				buf = &wq->bufs[head_idx];
-- 
2.10.0



More information about the dev mailing list