[dpdk-dev] [PATCH v3 7/8]i40e:support VxLAN Tx checksum offload

Jijiang Liu jijiang.liu at intel.com
Fri Sep 12 08:33:58 CEST 2014


Support VxLAN Tx checksum offload, which include
  - outer L3(IP) checksum offload
  - inner L3(IP) checksum offload
  - inner L4(UDP, TCP and SCTP) checksum offload

Signed-off-by: Jijiang Liu <jijiang.liu at intel.com>
Acked-by: Helin Zhang <helin.zhang at intel.com>
Acked-by: Jingjing Wu <jingjing.wu at intel.com>
Acked-by: Jing Chen <jing.d.chen at intel.com>

---
 lib/librte_mbuf/rte_mbuf.h      |    2 +
 lib/librte_pmd_i40e/i40e_rxtx.c |   47 ++++++++++++++++++++++++++++++++++++--
 2 files changed, 46 insertions(+), 3 deletions(-)

diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 1832e73..212ac3a 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -97,6 +97,8 @@ struct rte_ctrlmbuf {
 #define PKT_RX_IEEE1588_PTP  0x0200 /**< RX IEEE1588 L2 Ethernet PT Packet. */
 #define PKT_RX_IEEE1588_TMST 0x0400 /**< RX IEEE1588 L2/L4 timestamped packet.*/
 
+#define PKT_TX_VXLAN_CKSUM   0x0001 /**< Checksum of TX VxLAN pkt. computed by NIC.. */
+#define PKT_TX_IVLAN_PKT     0x0002 /**< TX packet is VxLAN packet with an inner VLAN. */
 #define PKT_TX_VLAN_PKT      0x0800 /**< TX packet is a 802.1q VLAN packet. */
 #define PKT_TX_IP_CKSUM      0x1000 /**< IP cksum of TX pkt. computed by NIC. */
 #define PKT_TX_IPV4_CSUM     0x1000 /**< Alias of PKT_TX_IP_CKSUM. */
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index a1dce74..a382d74 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -412,12 +412,16 @@ i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
 	return ip_ptype_map[ptype];
 }
 
+#define L4TUN_LEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)\
+			 + sizeof(struct ether_hdr))
 static inline void
 i40e_txd_enable_checksum(uint32_t ol_flags,
 			uint32_t *td_cmd,
 			uint32_t *td_offset,
 			uint8_t l2_len,
-			uint8_t l3_len)
+			uint8_t l3_len,
+			uint8_t inner_l3_len,
+			uint32_t *cd_tunneling)
 {
 	if (!l2_len) {
 		PMD_DRV_LOG(DEBUG, "L2 length set to 0\n");
@@ -430,6 +434,31 @@ i40e_txd_enable_checksum(uint32_t ol_flags,
 		return;
 	}
 
+	/* VxLAN packet TX checksum offload */
+	if (unlikely(ol_flags & PKT_TX_VXLAN_CKSUM)) {
+		uint8_t l4tun_len;
+
+		/* packet with inner VLAN */
+		if (ol_flags  & PKT_TX_IVLAN_PKT)
+			l4tun_len = L4TUN_LEN + sizeof(struct vlan_hdr);
+		else
+			l4tun_len = L4TUN_LEN;
+
+		if (ol_flags & PKT_TX_IPV4_CSUM)
+			*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+		else if (ol_flags & PKT_TX_IPV6)
+			*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+		/* Now set the ctx descriptor fields */
+		*cd_tunneling |= (l3_len >> 2) <<
+				I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+				I40E_TXD_CTX_UDP_TUNNELING |
+				(l4tun_len >> 1) <<
+				I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+		l3_len = inner_l3_len;
+	}
+
 	/* Enable L3 checksum offloads */
 	if (ol_flags & PKT_TX_IPV4_CSUM) {
 		*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
@@ -1063,6 +1092,9 @@ i40e_calc_context_desc(uint16_t flags)
 {
 	uint16_t mask = 0;
 
+	if (flags | PKT_TX_VXLAN_CKSUM)
+		mask |= PKT_TX_VXLAN_CKSUM;
+
 #ifdef RTE_LIBRTE_IEEE1588
 	mask |= PKT_TX_IEEE1588_TMST;
 #endif
@@ -1082,6 +1114,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	volatile struct i40e_tx_desc *txr;
 	struct rte_mbuf *tx_pkt;
 	struct rte_mbuf *m_seg;
+	uint32_t cd_tunneling_params;
 	uint16_t tx_id;
 	uint16_t nb_tx;
 	uint32_t td_cmd;
@@ -1091,6 +1124,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	uint16_t ol_flags;
 	uint8_t l2_len;
 	uint8_t l3_len;
+	uint8_t inner_l3_len;
 	uint16_t nb_used;
 	uint16_t nb_ctx;
 	uint16_t tx_last;
@@ -1120,6 +1154,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		l2_len = tx_pkt->pkt.vlan_macip.f.l2_len;
 		l3_len = tx_pkt->pkt.vlan_macip.f.l3_len;
 
+		/**
+		 * the reserved in mbuf is used to store innel L3
+		 * header length.
+		 */
+		inner_l3_len = tx_pkt->reserved;
+
 		/* Calculate the number of context descriptors needed. */
 		nb_ctx = i40e_calc_context_desc(ol_flags);
 
@@ -1166,15 +1206,16 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
 		/* Enable checksum offloading */
+		cd_tunneling_params = 0;
 		i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
-							l2_len, l3_len);
+						l2_len, l3_len, inner_l3_len,
+						&cd_tunneling_params);
 
 		if (unlikely(nb_ctx)) {
 			/* Setup TX context descriptor if required */
 			volatile struct i40e_tx_context_desc *ctx_txd =
 				(volatile struct i40e_tx_context_desc *)\
 							&txr[tx_id];
-			uint32_t cd_tunneling_params = 0;
 			uint16_t cd_l2tag2 = 0;
 			uint64_t cd_type_cmd_tso_mss =
 				I40E_TX_DESC_DTYPE_CONTEXT;
-- 
1.7.7.6



More information about the dev mailing list