[dpdk-dev] [PATCH v3 37/40] net/dpaa: add support for checksum offload

Shreyansh Jain shreyansh.jain at nxp.com
Wed Aug 23 16:12:10 CEST 2017


Signed-off-by: Hemant Agrawal <hemant.agrawal at nxp.com>
Signed-off-by: Shreyansh Jain <shreyansh.jain at nxp.com>
---
 doc/guides/nics/features/dpaa.ini |  2 +
 drivers/net/dpaa/dpaa_ethdev.c    |  4 ++
 drivers/net/dpaa/dpaa_rxtx.c      | 89 +++++++++++++++++++++++++++++++++++++++
 drivers/net/dpaa/dpaa_rxtx.h      | 19 +++++++++
 4 files changed, 114 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini b/doc/guides/nics/features/dpaa.ini
index 2ef1b56..23626c0 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -13,6 +13,8 @@ Allmulticast mode    = Y
 Unicast MAC filter   = Y
 RSS hash             = Y
 Flow control         = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
 Packet type parsing  = Y
 Basic stats          = Y
 ARMv8                = Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index bcb69ad..96924b6 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -186,6 +186,10 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 		(DEV_RX_OFFLOAD_IPV4_CKSUM |
 		DEV_RX_OFFLOAD_UDP_CKSUM   |
 		DEV_RX_OFFLOAD_TCP_CKSUM);
+	dev_info->tx_offload_capa =
+		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM);
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 90be40d..0f43bb4 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -200,6 +200,82 @@ static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
 	/* Packet received without stripping the vlan */
 }
 
+static inline void dpaa_checksum(struct rte_mbuf *mbuf)
+{
+	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+	char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
+	struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+	struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+	DPAA_TX_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
+
+	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+	    ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+	    RTE_PTYPE_L3_IPV4_EXT)) {
+		ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+		ipv4_hdr->hdr_checksum = 0;
+		ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+	} else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+		   RTE_PTYPE_L3_IPV6) ||
+		   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+		   RTE_PTYPE_L3_IPV6_EXT))
+		ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+		struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
+					  mbuf->l3_len);
+		tcp_hdr->cksum = 0;
+		if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+			tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+							       tcp_hdr);
+		else /* assume ethertype == ETHER_TYPE_IPv6 */
+			tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+							       tcp_hdr);
+	} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
+		   RTE_PTYPE_L4_UDP) {
+		struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
+							     mbuf->l3_len);
+		udp_hdr->dgram_cksum = 0;
+		if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+			udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+								     udp_hdr);
+		else /* assume ethertype == ETHER_TYPE_IPv6 */
+			udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+								     udp_hdr);
+	}
+}
+
+static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
+					 struct qm_fd *fd, char *prs_buf)
+{
+	struct dpaa_eth_parse_results_t *prs;
+
+	DPAA_TX_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
+
+	prs = GET_TX_PRS(prs_buf);
+	prs->l3r = 0;
+	prs->l4r = 0;
+	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+	   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+	   RTE_PTYPE_L3_IPV4_EXT))
+		prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
+	else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+		   RTE_PTYPE_L3_IPV6) ||
+		 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+		RTE_PTYPE_L3_IPV6_EXT))
+		prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
+
+	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
+		prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
+	else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+		prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
+
+	prs->ip_off[0] = mbuf->l2_len;
+	prs->l4_off = mbuf->l3_len + mbuf->l2_len;
+	/* Enable L3 (and L4, if TCP or UDP) HW checksum*/
+	fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
+}
+
 static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
 							uint32_t ifid)
 {
@@ -358,6 +434,19 @@ tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
 		}
 		rte_pktmbuf_free(mbuf);
 	}
+
+	if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+		if (mbuf->data_off < (DEFAULT_TX_ICEOF +
+		    sizeof(struct dpaa_eth_parse_results_t))) {
+			DPAA_TX_LOG(DEBUG, "Checksum offload Err: "
+				"Not enough Headroom "
+				"space for correct Checksum offload."
+				"So Calculating checksum in Software.");
+			dpaa_checksum(mbuf);
+		} else {
+			dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
+		}
+	}
 }
 
 /* Handle all mbufs on dpaa BMAN managed pool */
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
index 68d2c41..624ddda 100644
--- a/drivers/net/dpaa/dpaa_rxtx.h
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -41,6 +41,22 @@
 
 /* IC offsets from buffer header address */
 #define DEFAULT_RX_ICEOF	16
+#define DEFAULT_TX_ICEOF	16
+
+/*
+ * Values for the L3R field of the FM Parse Results
+ */
+/* L3 Type field: First IP Present IPv4 */
+#define DPAA_L3_PARSE_RESULT_IPV4 0x80
+/* L3 Type field: First IP Present IPv6 */
+#define DPAA_L3_PARSE_RESULT_IPV6	0x40
+/* Values for the L4R field of the FM Parse Results
+ * See $8.8.4.7.20 - L4 HXS - L4 Results from DPAA-Rev2 Reference Manual.
+ */
+/* L4 Type field: UDP */
+#define DPAA_L4_PARSE_RESULT_UDP	0x40
+/* L4 Type field: TCP */
+#define DPAA_L4_PARSE_RESULT_TCP	0x20
 
 #define DPAA_MAX_DEQUEUE_NUM_FRAMES    63
 	/** <Maximum number of frames to be dequeued in a single rx call*/
@@ -257,6 +273,9 @@ struct annotations_t {
 #define GET_RX_PRS(_buf) \
 	(struct dpaa_eth_parse_results_t *)((uint8_t *)_buf + DEFAULT_RX_ICEOF)
 
+#define GET_TX_PRS(_buf) \
+	(struct dpaa_eth_parse_results_t *)((uint8_t *)_buf + DEFAULT_TX_ICEOF)
+
 uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
 
 uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
-- 
2.9.3



More information about the dev mailing list