[dpdk-dev] [RFC Patch 33/39] net/dpaa: support for checksum offload

Shreyansh Jain shreyansh.jain at nxp.com
Sat May 27 12:25:29 CEST 2017


Signed-off-by: Hemant Agrawal <hemant.agrawal at nxp.com>
Signed-off-by: Shreyansh Jain <shreyansh.jain at nxp.com>
---
 doc/guides/nics/features/dpaa.ini |  2 +
 drivers/net/dpaa/dpaa_ethdev.c    |  8 ++++
 drivers/net/dpaa/dpaa_rxtx.c      | 88 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 98 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini b/doc/guides/nics/features/dpaa.ini
index 7165e47..cb220e4 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -11,6 +11,8 @@ MTU update           = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 Unicast MAC filter   = Y
+L3 checksum offload  = Y
+L4 checksum offload  = Y
 Basic stats          = Y
 ARMv8                = Y
 Usage doc            = Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 5d406be..c3f9eb5 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -158,6 +158,14 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->max_hash_mac_addrs = 0;
 	dev_info->max_vfs = 0;
 	dev_info->max_vmdq_pools = ETH_16_POOLS;
+	dev_info->rx_offload_capa =
+		(DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM  |
+		DEV_RX_OFFLOAD_TCP_CKSUM);
+	dev_info->tx_offload_capa =
+		(DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM);
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 5978090..b51d66c 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -85,6 +85,82 @@
 		(_fd)->bpid = _bpid; \
 	} while (0)
 
+static inline void dpaa_checksum(struct rte_mbuf *mbuf)
+{
+	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+	char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
+	struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+	struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+	PMD_TX_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
+
+	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+	    ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+	    RTE_PTYPE_L3_IPV4_EXT)) {
+		ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+		ipv4_hdr->hdr_checksum = 0;
+		ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+	} else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+		   RTE_PTYPE_L3_IPV6) ||
+		   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+		   RTE_PTYPE_L3_IPV6_EXT))
+		ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+
+	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
+		struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
+					  mbuf->l3_len);
+		tcp_hdr->cksum = 0;
+		if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+			tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+							       tcp_hdr);
+		else /* assume ethertype == ETHER_TYPE_IPv6 */
+			tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+							       tcp_hdr);
+	} else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
+		   RTE_PTYPE_L4_UDP) {
+		struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
+							     mbuf->l3_len);
+		udp_hdr->dgram_cksum = 0;
+		if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
+			udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
+								     udp_hdr);
+		else /* assume ethertype == ETHER_TYPE_IPv6 */
+			udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
+								     udp_hdr);
+	}
+}
+
+static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
+					 struct qm_fd *fd, char *prs_buf)
+{
+	struct dpaa_eth_parse_results_t *prs;
+
+	PMD_TX_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
+
+	prs = GET_TX_PRS(prs_buf);
+	prs->l3r = 0;
+	prs->l4r = 0;
+	if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
+	   ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+	   RTE_PTYPE_L3_IPV4_EXT))
+		prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
+	else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+		   RTE_PTYPE_L3_IPV6) ||
+		 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
+		RTE_PTYPE_L3_IPV6_EXT))
+		prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
+
+	if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
+		prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
+	else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
+		prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
+
+	prs->ip_off[0] = mbuf->l2_len;
+	prs->l4_off = mbuf->l3_len + mbuf->l2_len;
+	/* Enable L3 (and L4, if TCP or UDP) HW checksum*/
+	fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
+}
+
 static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
 							uint32_t ifid)
 {
@@ -251,6 +327,18 @@ dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
 						}
 						rte_pktmbuf_free(mbuf);
 					}
+					if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
+						if (mbuf->data_off < DEFAULT_TX_ICEOF +
+							sizeof(struct dpaa_eth_parse_results_t)) {
+							PMD_DRV_LOG(DEBUG, "Checksum offload Err: "
+								"Not enough Headroom "
+								"space for correct Checksum offload."
+								"So Calculating checksum in Software.");
+							dpaa_checksum(mbuf);
+						} else
+							dpaa_checksum_offload(mbuf, &fd_arr[loop],
+								mbuf->buf_addr);
+					}
 				} else {
 					PMD_DRV_LOG(DEBUG, "Number of Segments not supported");
 					/* Set frames_to_send & nb_bufs so that
-- 
2.7.4



More information about the dev mailing list