[dpdk-dev] [PATCH v3 36/40] net/dpaa: add support for packet type parsing

Shreyansh Jain shreyansh.jain at nxp.com
Wed Aug 23 16:12:09 CEST 2017


Add support for parsing the packet type and L2/L3 checksum offload
capability information.

Signed-off-by: Hemant Agrawal <hemant.agrawal at nxp.com>
Signed-off-by: Shreyansh Jain <shreyansh.jain at nxp.com>
---
 doc/guides/nics/features/dpaa.ini |   2 +
 drivers/net/dpaa/dpaa_ethdev.c    |  27 +++++
 drivers/net/dpaa/dpaa_rxtx.c      | 116 +++++++++++++++++++++
 drivers/net/dpaa/dpaa_rxtx.h      | 206 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 351 insertions(+)

diff --git a/doc/guides/nics/features/dpaa.ini b/doc/guides/nics/features/dpaa.ini
index 1ba6b11..2ef1b56 100644
--- a/doc/guides/nics/features/dpaa.ini
+++ b/doc/guides/nics/features/dpaa.ini
@@ -11,7 +11,9 @@ MTU update           = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 Unicast MAC filter   = Y
+RSS hash             = Y
 Flow control         = Y
+Packet type parsing  = Y
 Basic stats          = Y
 ARMv8                = Y
 Usage doc            = Y
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 55adc04..bcb69ad 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -112,6 +112,28 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 	return 0;
 }
 
+static const uint32_t *
+dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+	static const uint32_t ptypes[] = {
+		/*todo -= add more types */
+		RTE_PTYPE_L2_ETHER,
+		RTE_PTYPE_L3_IPV4,
+		RTE_PTYPE_L3_IPV4_EXT,
+		RTE_PTYPE_L3_IPV6,
+		RTE_PTYPE_L3_IPV6_EXT,
+		RTE_PTYPE_L4_TCP,
+		RTE_PTYPE_L4_UDP,
+		RTE_PTYPE_L4_SCTP
+	};
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (dev->rx_pkt_burst == dpaa_eth_queue_rx)
+		return ptypes;
+	return NULL;
+}
+
 static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
 {
 	struct dpaa_if *dpaa_intf = dev->data->dev_private;
@@ -160,6 +182,10 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
 	dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 	dev_info->speed_capa = (ETH_LINK_SPEED_1G |
 				ETH_LINK_SPEED_10G);
+	dev_info->rx_offload_capa =
+		(DEV_RX_OFFLOAD_IPV4_CKSUM |
+		DEV_RX_OFFLOAD_UDP_CKSUM   |
+		DEV_RX_OFFLOAD_TCP_CKSUM);
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
@@ -465,6 +491,7 @@ static struct eth_dev_ops dpaa_devops = {
 	.dev_stop		  = dpaa_eth_dev_stop,
 	.dev_close		  = dpaa_eth_dev_close,
 	.dev_infos_get		  = dpaa_eth_dev_info,
+	.dev_supported_ptypes_get = dpaa_supported_ptypes_get,
 
 	.rx_queue_setup		  = dpaa_eth_rx_queue_setup,
 	.tx_queue_setup		  = dpaa_eth_tx_queue_setup,
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 80adf9c..90be40d 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -85,6 +85,121 @@
 		(_fd)->bpid = _bpid; \
 	} while (0)
 
+static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
+				     uint64_t prs __rte_unused)
+{
+	DPAA_RX_LOG(DEBUG, "Slow parsing");
+	/*TBD:XXX: to be implemented*/
+}
+
+static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
+					uint64_t fd_virt_addr)
+{
+	struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
+	uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
+
+	DPAA_RX_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
+
+	switch (prs) {
+	case DPAA_PKT_TYPE_NONE:
+		m->packet_type = 0;
+		break;
+	case DPAA_PKT_TYPE_ETHER:
+		m->packet_type = RTE_PTYPE_L2_ETHER;
+		break;
+	case DPAA_PKT_TYPE_IPV4:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4;
+		break;
+	case DPAA_PKT_TYPE_IPV6:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6;
+		break;
+	case DPAA_PKT_TYPE_IPV4_FRAG:
+	case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
+	case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
+	case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
+		break;
+	case DPAA_PKT_TYPE_IPV6_FRAG:
+	case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
+	case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
+	case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
+		break;
+	case DPAA_PKT_TYPE_IPV4_EXT:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4_EXT;
+		break;
+	case DPAA_PKT_TYPE_IPV6_EXT:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6_EXT;
+		break;
+	case DPAA_PKT_TYPE_IPV4_TCP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_TCP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
+		break;
+	case DPAA_PKT_TYPE_IPV4_UDP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_UDP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
+		break;
+	case DPAA_PKT_TYPE_IPV4_EXT_UDP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_EXT_UDP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
+		break;
+	case DPAA_PKT_TYPE_IPV4_EXT_TCP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_EXT_TCP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
+		break;
+	case DPAA_PKT_TYPE_IPV4_SCTP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
+		break;
+	case DPAA_PKT_TYPE_IPV6_SCTP:
+		m->packet_type = RTE_PTYPE_L2_ETHER |
+			RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
+		break;
+	/* More switch cases can be added */
+	default:
+		dpaa_slow_parsing(m, prs);
+	}
+
+	m->tx_offload = annot->parse.ip_off[0];
+	m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
+					<< DPAA_PKT_L3_LEN_SHIFT;
+
+	/* Set the hash values */
+	m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
+	m->ol_flags = PKT_RX_RSS_HASH;
+	/* All packets with Bad checksum are dropped by interface (and
+	 * corresponding notification issued to RX error queues).
+	 */
+	m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+	/* Check if Vlan is present */
+	if (prs & DPAA_PARSE_VLAN_MASK)
+		m->ol_flags |= PKT_RX_VLAN_PKT;
+	/* Packet received without stripping the vlan */
+}
+
 static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
 							uint32_t ifid)
 {
@@ -117,6 +232,7 @@ static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
 	mbuf->ol_flags = 0;
 	mbuf->next = NULL;
 	rte_mbuf_refcnt_set(mbuf, 1);
+	dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
 
 	return mbuf;
 }
diff --git a/drivers/net/dpaa/dpaa_rxtx.h b/drivers/net/dpaa/dpaa_rxtx.h
index 45bfae8..68d2c41 100644
--- a/drivers/net/dpaa/dpaa_rxtx.h
+++ b/drivers/net/dpaa/dpaa_rxtx.h
@@ -44,6 +44,7 @@
 
 #define DPAA_MAX_DEQUEUE_NUM_FRAMES    63
 	/** <Maximum number of frames to be dequeued in a single rx call*/
+
 /* FD structure masks and offset */
 #define DPAA_FD_FORMAT_MASK 0xE0000000
 #define DPAA_FD_OFFSET_MASK 0x1FF00000
@@ -51,6 +52,211 @@
 #define DPAA_FD_FORMAT_SHIFT 29
 #define DPAA_FD_OFFSET_SHIFT 20
 
+/* Parsing mask (Little Endian) - 0x00E044ED00800000
+ *	Classification Plan ID 0x00
+ *	L4R 0xE0 -
+ *		0x20 - TCP
+ *		0x40 - UDP
+ *		0x80 - SCTP
+ *	L3R 0xEDC4 (in Big Endian) -
+ *		0x8000 - IPv4
+ *		0x4000 - IPv6
+ *		0x8140 - IPv4 Ext + Frag
+ *		0x8040 - IPv4 Frag
+ *		0x8100 - IPv4 Ext
+ *		0x4140 - IPv6 Ext + Frag
+ *		0x4040 - IPv6 Frag
+ *		0x4100 - IPv6 Ext
+ *	L2R 0x8000 (in Big Endian) -
+ *		0x8000 - Ethernet type
+ *	ShimR & Logical Port ID 0x0000
+ */
+#define DPAA_PARSE_MASK			0x00E044ED00800000
+#define DPAA_PARSE_VLAN_MASK		0x0000000000700000
+
+/* Parsed values (Little Endian) */
+#define DPAA_PKT_TYPE_NONE		0x0000000000000000
+#define DPAA_PKT_TYPE_ETHER		0x0000000000800000
+#define DPAA_PKT_TYPE_IPV4 \
+			(0x0000008000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV6 \
+			(0x0000004000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_GRE \
+			(0x0000002000000000 | DPAA_PKT_TYPE_ETHER)
+#define DPAA_PKT_TYPE_IPV4_FRAG	\
+			(0x0000400000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_FRAG	\
+			(0x0000400000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_EXT \
+			(0x0000000100000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_EXT \
+			(0x0000000100000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_SCTP	\
+			(0x0080000000000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_IPV6_SCTP	\
+			(0x0080000000000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_IPV4_FRAG_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_FRAG_SCTP \
+			(0x0080000000000000 | DPAA_PKT_TYPE_IPV4_FRAG)
+#define DPAA_PKT_TYPE_IPV6_FRAG_SCTP \
+			(0x0080000000000000 | DPAA_PKT_TYPE_IPV6_FRAG)
+#define DPAA_PKT_TYPE_IPV4_EXT_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_IPV4_EXT_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_IPV4_EXT)
+#define DPAA_PKT_TYPE_IPV6_EXT_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_IPV6_EXT)
+#define DPAA_PKT_TYPE_TUNNEL_4_4 \
+			(0x0000000800000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6 \
+			(0x0000000400000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6 \
+			(0x0000000400000000 | DPAA_PKT_TYPE_IPV4)
+#define DPAA_PKT_TYPE_TUNNEL_6_4 \
+			(0x0000000800000000 | DPAA_PKT_TYPE_IPV6)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_UDP \
+			(0x0040000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_TYPE_TUNNEL_4_4_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_4)
+#define DPAA_PKT_TYPE_TUNNEL_6_6_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_6)
+#define DPAA_PKT_TYPE_TUNNEL_4_6_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_4_6)
+#define DPAA_PKT_TYPE_TUNNEL_6_4_TCP \
+			(0x0020000000000000 | DPAA_PKT_TYPE_TUNNEL_6_4)
+#define DPAA_PKT_L3_LEN_SHIFT	7
+
+/**
+ * FMan parse result array
+ */
+struct dpaa_eth_parse_results_t {
+	 uint8_t     lpid;		 /**< Logical port id */
+	 uint8_t     shimr;		 /**< Shim header result  */
+	 union {
+		uint16_t              l2r;	/**< Layer 2 result */
+		struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+			uint16_t      ethernet:1;
+			uint16_t      vlan:1;
+			uint16_t      llc_snap:1;
+			uint16_t      mpls:1;
+			uint16_t      ppoe_ppp:1;
+			uint16_t      unused_1:3;
+			uint16_t      unknown_eth_proto:1;
+			uint16_t      eth_frame_type:2;
+			uint16_t      l2r_err:5;
+			/*00-unicast, 01-multicast, 11-broadcast*/
+#else
+			uint16_t      l2r_err:5;
+			uint16_t      eth_frame_type:2;
+			uint16_t      unknown_eth_proto:1;
+			uint16_t      unused_1:3;
+			uint16_t      ppoe_ppp:1;
+			uint16_t      mpls:1;
+			uint16_t      llc_snap:1;
+			uint16_t      vlan:1;
+			uint16_t      ethernet:1;
+#endif
+		} __attribute__((__packed__));
+	 } __attribute__((__packed__));
+	 union {
+		uint16_t              l3r;	/**< Layer 3 result */
+		struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+			uint16_t      first_ipv4:1;
+			uint16_t      first_ipv6:1;
+			uint16_t      gre:1;
+			uint16_t      min_enc:1;
+			uint16_t      last_ipv4:1;
+			uint16_t      last_ipv6:1;
+			uint16_t      first_info_err:1;/*0 info, 1 error*/
+			uint16_t      first_ip_err_code:5;
+			uint16_t      last_info_err:1;	/*0 info, 1 error*/
+			uint16_t      last_ip_err_code:3;
+#else
+			uint16_t      last_ip_err_code:3;
+			uint16_t      last_info_err:1;	/*0 info, 1 error*/
+			uint16_t      first_ip_err_code:5;
+			uint16_t      first_info_err:1;/*0 info, 1 error*/
+			uint16_t      last_ipv6:1;
+			uint16_t      last_ipv4:1;
+			uint16_t      min_enc:1;
+			uint16_t      gre:1;
+			uint16_t      first_ipv6:1;
+			uint16_t      first_ipv4:1;
+#endif
+		} __attribute__((__packed__));
+	 } __attribute__((__packed__));
+	 union {
+		uint8_t               l4r;	/**< Layer 4 result */
+		struct{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+			uint8_t	       l4_type:3;
+			uint8_t	       l4_info_err:1;
+			uint8_t	       l4_result:4;
+					/* if type IPSec: 1 ESP, 2 AH */
+#else
+			uint8_t        l4_result:4;
+					/* if type IPSec: 1 ESP, 2 AH */
+			uint8_t        l4_info_err:1;
+			uint8_t        l4_type:3;
+#endif
+		} __attribute__((__packed__));
+	 } __attribute__((__packed__));
+	 uint8_t     cplan;		 /**< Classification plan id */
+	 uint16_t    nxthdr;		 /**< Next Header  */
+	 uint16_t    cksum;		 /**< Checksum */
+	 uint32_t    lcv;		 /**< LCV */
+	 uint8_t     shim_off[3];	 /**< Shim offset */
+	 uint8_t     eth_off;		 /**< ETH offset */
+	 uint8_t     llc_snap_off;	 /**< LLC_SNAP offset */
+	 uint8_t     vlan_off[2];	 /**< VLAN offset */
+	 uint8_t     etype_off;		 /**< ETYPE offset */
+	 uint8_t     pppoe_off;		 /**< PPP offset */
+	 uint8_t     mpls_off[2];	 /**< MPLS offset */
+	 uint8_t     ip_off[2];		 /**< IP offset */
+	 uint8_t     gre_off;		 /**< GRE offset */
+	 uint8_t     l4_off;		 /**< Layer 4 offset */
+	 uint8_t     nxthdr_off;	 /**< Parser end point */
+} __attribute__ ((__packed__));
+
+/* The structure is the Prepended Data to the Frame which is used by FMAN */
+struct annotations_t {
+	uint8_t reserved[DEFAULT_RX_ICEOF];
+	struct dpaa_eth_parse_results_t parse;	/**< Pointer to Parsed result*/
+	uint64_t reserved1;
+	uint64_t hash;			/**< Hash Result */
+};
+
+#define GET_ANNOTATIONS(_buf) \
+	(struct annotations_t *)(_buf)
+
+#define GET_RX_PRS(_buf) \
+	(struct dpaa_eth_parse_results_t *)((uint8_t *)_buf + DEFAULT_RX_ICEOF)
+
 uint16_t dpaa_eth_queue_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
 
 uint16_t dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs);
-- 
2.9.3



More information about the dev mailing list