[dpdk-dev] [PATCH v2 03/37] net/txgbe: add ntuple parse rule

Jiawen Wu jiawenwu at trustnetic.com
Wed Nov 11 07:49:02 CET 2020


Add support to parse flow for ntuple filter.

Signed-off-by: Jiawen Wu <jiawenwu at trustnetic.com>
---
 drivers/net/txgbe/meson.build  |   1 +
 drivers/net/txgbe/txgbe_flow.c | 536 +++++++++++++++++++++++++++++++++
 2 files changed, 537 insertions(+)
 create mode 100644 drivers/net/txgbe/txgbe_flow.c

diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index 345dffaf6..45379175d 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -6,6 +6,7 @@ objs = [base_objs]
 
 sources = files(
 	'txgbe_ethdev.c',
+	'txgbe_flow.c',
 	'txgbe_ptypes.c',
 	'txgbe_pf.c',
 	'txgbe_rxtx.c',
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
new file mode 100644
index 000000000..6f8be3b7f
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_rxtx.h"
+
+#define TXGBE_MIN_N_TUPLE_PRIO 1
+#define TXGBE_MAX_N_TUPLE_PRIO 7
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_item *cur)
+{
+	const struct rte_flow_item *next =
+		cur ? cur + 1 : &pattern[0];
+	while (1) {
+		if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+			return next;
+		next++;
+	}
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+		const struct rte_flow_action actions[],
+		const struct rte_flow_action *cur)
+{
+	const struct rte_flow_action *next =
+		cur ? cur + 1 : &actions[0];
+	while (1) {
+		if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+			return next;
+		next++;
+	}
+}
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM		Spec			Mask
+ * ETH		NULL			NULL
+ * IPV4		src_addr 192.168.1.20	0xFFFFFFFF
+ *		dst_addr 192.167.3.50	0xFFFFFFFF
+ *		next_proto_id	17	0xFF
+ * UDP/TCP/	src_port	80	0xFFFF
+ * SCTP		dst_port	80	0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+			 const struct rte_flow_item pattern[],
+			 const struct rte_flow_action actions[],
+			 struct rte_eth_ntuple_filter *filter,
+			 struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item;
+	const struct rte_flow_action *act;
+	const struct rte_flow_item_ipv4 *ipv4_spec;
+	const struct rte_flow_item_ipv4 *ipv4_mask;
+	const struct rte_flow_item_tcp *tcp_spec;
+	const struct rte_flow_item_tcp *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec;
+	const struct rte_flow_item_udp *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec;
+	const struct rte_flow_item_sctp *sctp_mask;
+	const struct rte_flow_item_eth *eth_spec;
+	const struct rte_flow_item_eth *eth_mask;
+	const struct rte_flow_item_vlan *vlan_spec;
+	const struct rte_flow_item_vlan *vlan_mask;
+	struct rte_flow_item_eth eth_null;
+	struct rte_flow_item_vlan vlan_null;
+
+	if (!pattern) {
+		rte_flow_error_set(error,
+			EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+			NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
+	memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
+	/* the first not void item can be MAC or IPv4 */
+	item = next_no_void_pattern(pattern, NULL);
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+	    item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+	/* Skip Ethernet */
+	if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+		eth_spec = item->spec;
+		eth_mask = item->mask;
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error,
+			  EINVAL,
+			  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			  item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* if the first item is MAC, the content should be NULL */
+		if ((item->spec || item->mask) &&
+			(memcmp(eth_spec, &eth_null,
+				sizeof(struct rte_flow_item_eth)) ||
+			 memcmp(eth_mask, &eth_null,
+				sizeof(struct rte_flow_item_eth)))) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		/* check if the next not void item is IPv4 or Vlan */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+			item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+			rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+		vlan_spec = item->spec;
+		vlan_mask = item->mask;
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+		/* the content should be NULL */
+		if ((item->spec || item->mask) &&
+			(memcmp(vlan_spec, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)) ||
+			 memcmp(vlan_mask, &vlan_null,
+				sizeof(struct rte_flow_item_vlan)))) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		/* check if the next not void item is IPv4 */
+		item = next_no_void_pattern(pattern, item);
+		if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+			rte_flow_error_set(error,
+			  EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			  item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+	}
+
+	if (item->mask) {
+		/* get the IPv4 info */
+		if (!item->spec || !item->mask) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Invalid ntuple mask");
+			return -rte_errno;
+		}
+		/*Not supported last point for range*/
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+				item, "Not supported last point for range");
+			return -rte_errno;
+		}
+
+		ipv4_mask = item->mask;
+		/**
+		 * Only support src & dst addresses, protocol,
+		 * others should be masked.
+		 */
+		if (ipv4_mask->hdr.version_ihl ||
+		    ipv4_mask->hdr.type_of_service ||
+		    ipv4_mask->hdr.total_length ||
+		    ipv4_mask->hdr.packet_id ||
+		    ipv4_mask->hdr.fragment_offset ||
+		    ipv4_mask->hdr.time_to_live ||
+		    ipv4_mask->hdr.hdr_checksum) {
+			rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		if ((ipv4_mask->hdr.src_addr != 0 &&
+			ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+			(ipv4_mask->hdr.dst_addr != 0 &&
+			ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+			(ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+			ipv4_mask->hdr.next_proto_id != 0)) {
+			rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+		filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+		filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+		ipv4_spec = item->spec;
+		filter->dst_ip = ipv4_spec->hdr.dst_addr;
+		filter->src_ip = ipv4_spec->hdr.src_addr;
+		filter->proto  = ipv4_spec->hdr.next_proto_id;
+	}
+
+	/* check if the next not void item is TCP or UDP */
+	item = next_no_void_pattern(pattern, item);
+	if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+	    item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+		(!item->spec && !item->mask)) {
+		goto action;
+	}
+
+	/* get the TCP/UDP/SCTP info */
+	if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+		(!item->spec || !item->mask)) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Invalid ntuple mask");
+		return -rte_errno;
+	}
+
+	/*Not supported last point for range*/
+	if (item->last) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+			item, "Not supported last point for range");
+		return -rte_errno;
+	}
+
+	if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+		tcp_mask = item->mask;
+
+		/**
+		 * Only support src & dst ports, tcp flags,
+		 * others should be masked.
+		 */
+		if (tcp_mask->hdr.sent_seq ||
+		    tcp_mask->hdr.recv_ack ||
+		    tcp_mask->hdr.data_off ||
+		    tcp_mask->hdr.rx_win ||
+		    tcp_mask->hdr.cksum ||
+		    tcp_mask->hdr.tcp_urp) {
+			memset(filter, 0,
+				sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		if ((tcp_mask->hdr.src_port != 0 &&
+			tcp_mask->hdr.src_port != UINT16_MAX) ||
+			(tcp_mask->hdr.dst_port != 0 &&
+			tcp_mask->hdr.dst_port != UINT16_MAX)) {
+			rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+		filter->src_port_mask  = tcp_mask->hdr.src_port;
+		if (tcp_mask->hdr.tcp_flags == 0xFF) {
+			filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else if (!tcp_mask->hdr.tcp_flags) {
+			filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+		} else {
+			memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		tcp_spec = item->spec;
+		filter->dst_port  = tcp_spec->hdr.dst_port;
+		filter->src_port  = tcp_spec->hdr.src_port;
+		filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+	} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+		udp_mask = item->mask;
+
+		/**
+		 * Only support src & dst ports,
+		 * others should be masked.
+		 */
+		if (udp_mask->hdr.dgram_len ||
+		    udp_mask->hdr.dgram_cksum) {
+			memset(filter, 0,
+				sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+		if ((udp_mask->hdr.src_port != 0 &&
+			udp_mask->hdr.src_port != UINT16_MAX) ||
+			(udp_mask->hdr.dst_port != 0 &&
+			udp_mask->hdr.dst_port != UINT16_MAX)) {
+			rte_flow_error_set(error,
+				EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		filter->dst_port_mask = udp_mask->hdr.dst_port;
+		filter->src_port_mask = udp_mask->hdr.src_port;
+
+		udp_spec = item->spec;
+		filter->dst_port = udp_spec->hdr.dst_port;
+		filter->src_port = udp_spec->hdr.src_port;
+	} else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+		sctp_mask = item->mask;
+
+		/**
+		 * Only support src & dst ports,
+		 * others should be masked.
+		 */
+		if (sctp_mask->hdr.tag ||
+		    sctp_mask->hdr.cksum) {
+			memset(filter, 0,
+				sizeof(struct rte_eth_ntuple_filter));
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				item, "Not supported by ntuple filter");
+			return -rte_errno;
+		}
+
+		filter->dst_port_mask = sctp_mask->hdr.dst_port;
+		filter->src_port_mask = sctp_mask->hdr.src_port;
+
+		sctp_spec = item->spec;
+		filter->dst_port = sctp_spec->hdr.dst_port;
+		filter->src_port = sctp_spec->hdr.src_port;
+	} else {
+		goto action;
+	}
+
+	/* check if the next not void item is END */
+	item = next_no_void_pattern(pattern, item);
+	if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			item, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+action:
+
+	/**
+	 * n-tuple only supports forwarding,
+	 * check if the first not void action is QUEUE.
+	 */
+	act = next_no_void_action(actions, NULL);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			item, "Not supported action.");
+		return -rte_errno;
+	}
+	filter->queue =
+		((const struct rte_flow_action_queue *)act->conf)->index;
+
+	/* check if the next not void item is END */
+	act = next_no_void_action(actions, act);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Not supported action.");
+		return -rte_errno;
+	}
+
+	/* parse attr */
+	/* must be input direction */
+	if (!attr->ingress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* not supported */
+	if (attr->egress) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* not supported */
+	if (attr->transfer) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+				   attr, "No support for transfer.");
+		return -rte_errno;
+	}
+
+	if (attr->priority > 0xFFFF) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Error priority.");
+		return -rte_errno;
+	}
+	filter->priority = (uint16_t)attr->priority;
+	if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
+		attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
+		filter->priority = 1;
+
+	return 0;
+}
+
+/* a specific function for txgbe because the flags is specific */
+static int
+txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
+			  const struct rte_flow_attr *attr,
+			  const struct rte_flow_item pattern[],
+			  const struct rte_flow_action actions[],
+			  struct rte_eth_ntuple_filter *filter,
+			  struct rte_flow_error *error)
+{
+	int ret;
+
+	ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+	if (ret)
+		return ret;
+
+	/* txgbe doesn't support tcp flags */
+	if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM,
+				   NULL, "Not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+	/* txgbe doesn't support many priorities */
+	if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
+	    filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
+		memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM,
+			NULL, "Priority not supported by ntuple filter");
+		return -rte_errno;
+	}
+
+	if (filter->queue >= dev->data->nb_rx_queues)
+		return -rte_errno;
+
+	/* fixed value for txgbe */
+	filter->flags = RTE_5TUPLE_FLAGS;
+	return 0;
+}
+
-- 
2.18.4





More information about the dev mailing list