[dpdk-dev] [PATCH] net/i40e: enable port filter by switch filter

Jeff Guo jia.guo at intel.com
Sun Jun 21 14:28:34 CEST 2020


hi, guinan

On 6/11/2020 1:24 PM, Guinan Sun wrote:
> This patch enables the filter that supports
> to create following two rules for the same packet type:
> One is to select source port only as input set and the
> other is for destination port only.
>
> Signed-off-by: Guinan Sun <guinanx.sun at intel.com>
> ---
>   doc/guides/rel_notes/release_20_08.rst |   7 +
>   drivers/net/i40e/i40e_ethdev.c         | 195 ++++++++++++++++++++-
>   drivers/net/i40e/i40e_ethdev.h         |  17 ++
>   drivers/net/i40e/i40e_flow.c           | 223 +++++++++++++++++++++++++
>   4 files changed, 441 insertions(+), 1 deletion(-)
>
> diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
> index 7a67c960c..16870100d 100644
> --- a/doc/guides/rel_notes/release_20_08.rst
> +++ b/doc/guides/rel_notes/release_20_08.rst
> @@ -68,6 +68,13 @@ New Features
>   
>     * Added new PMD devarg ``reclaim_mem_mode``.
>   
> +* **Updated Intel i40e driver.**
> +
> +  Updated i40e PMD with new features and improvements, including:
> +
> +  * Added a new type of cloud filter to support the coexistence of the
> +    following two rules. One selects L4 destination as input set and
> +    the other one selects L4 source port.
>   
>   Removed Items
>   -------------
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 970a31cb2..97e6e948a 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -7956,6 +7956,13 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
>   #define I40E_TR_GRE_KEY_MASK			0x400
>   #define I40E_TR_GRE_KEY_WITH_XSUM_MASK		0x800
>   #define I40E_TR_GRE_NO_KEY_MASK			0x8000
> +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0 0x49
> +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0 0x41
> +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0 0x80
> +#define I40E_DIRECTION_INGRESS_KEY		0x8000
> +#define I40E_TR_L4_TYPE_TCP			0x2
> +#define I40E_TR_L4_TYPE_UDP			0x4
> +#define I40E_TR_L4_TYPE_SCTP			0x8
>   
>   static enum
>   i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
> @@ -8254,6 +8261,131 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
>   	return status;
>   }
>   
> +static enum i40e_status_code
> +i40e_replace_port_l1_filter(struct i40e_pf *pf, enum i40e_l4_port_type port_type)
> +{
> +	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
> +	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
> +	enum i40e_status_code status = I40E_SUCCESS;
> +	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> +	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
> +


The Christmas tree would be look good?


> +	if (pf->support_multi_driver) {
> +		PMD_DRV_LOG(ERR, "Replace l1 filter is not supported.");
> +		return I40E_NOT_SUPPORTED;
> +	}
> +
> +	memset(&filter_replace, 0,
> +	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
> +	memset(&filter_replace_buf, 0,
> +	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
> +
> +	/* create L1 filter */
> +	if (port_type == I40E_L4_PORT_TYPE_SRC) {
> +		filter_replace.old_filter_type =
> +			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY;
> +		filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X11;
> +		filter_replace_buf.data[8] =
> +			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT;
> +	} else {
> +		filter_replace.old_filter_type =
> +			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
> +		filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_0X10;
> +		filter_replace_buf.data[8] =
> +			I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT;
> +	}
> +
> +	filter_replace.tr_bit = 0;
> +	/* Prepare the buffer, 3 entries */
> +	filter_replace_buf.data[0] =
> +		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_DIRECTION_WORD0;
> +	filter_replace_buf.data[0] |=
> +		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
> +	filter_replace_buf.data[2] = 0x00;
> +	filter_replace_buf.data[3] =
> +		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_INGRESS_WORD0;
> +	filter_replace_buf.data[4] =
> +		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_PORT_TR_WORD0;
> +	filter_replace_buf.data[4] |=
> +		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
> +	filter_replace_buf.data[5] = 0x00;
> +	filter_replace_buf.data[6] = I40E_TR_L4_TYPE_UDP |
> +		I40E_TR_L4_TYPE_TCP |
> +		I40E_TR_L4_TYPE_SCTP;
> +	filter_replace_buf.data[7] = 0x00;
> +	filter_replace_buf.data[8] |=
> +		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
> +	filter_replace_buf.data[9] = 0x00;
> +	filter_replace_buf.data[10] = 0xFF;
> +	filter_replace_buf.data[11] = 0xFF;
> +
> +	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
> +						&filter_replace_buf);


Please check the alignment here and below.


> +	if (!status && (filter_replace.old_filter_type !=
> +			filter_replace.new_filter_type))
> +		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
> +			    " original: 0x%x, new: 0x%x",
> +			    dev->device->name,
> +			    filter_replace.old_filter_type,
> +			    filter_replace.new_filter_type);
> +
> +	return status;
> +}
> +
> +static enum
> +i40e_status_code i40e_replace_port_cloud_filter(struct i40e_pf *pf,
> +						enum i40e_l4_port_type port_type)


Please check the limit of the line characters and below. I think 
checkpatch could help you for that.


> +{
> +	struct i40e_aqc_replace_cloud_filters_cmd_buf  filter_replace_buf;
> +	struct i40e_aqc_replace_cloud_filters_cmd  filter_replace;
> +	enum i40e_status_code status = I40E_SUCCESS;
> +	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> +	struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
> +
> +	if (pf->support_multi_driver) {
> +		PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
> +		return I40E_NOT_SUPPORTED;
> +	}
> +
> +	memset(&filter_replace, 0,
> +	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
> +	memset(&filter_replace_buf, 0,
> +	       sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
> +
> +	if (port_type == I40E_L4_PORT_TYPE_SRC) {
> +		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
> +		filter_replace.new_filter_type =
> +			I40E_AQC_ADD_L1_FILTER_0X11;
> +		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X11;
> +	} else {
> +		filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
> +		filter_replace.new_filter_type =
> +			I40E_AQC_ADD_CLOUD_FILTER_0X10;
> +		filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_0X10;
> +	}
> +
> +	filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
> +	filter_replace.tr_bit = 0;
> +	/* Prepare the buffer, 2 entries */
> +	filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
> +	filter_replace_buf.data[0] |=
> +		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
> +	filter_replace_buf.data[4] |=
> +		I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
> +	status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
> +						&filter_replace_buf);
> +
> +	if (!status && (filter_replace.old_filter_type !=
> +			filter_replace.new_filter_type))
> +		PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
> +			    " original: 0x%x, new: 0x%x",
> +			    dev->device->name,
> +			    filter_replace.old_filter_type,
> +			    filter_replace.new_filter_type);
> +
> +	return status;
> +}
> +
>   int
>   i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
>   		      struct i40e_tunnel_filter_conf *tunnel_filter,
> @@ -8401,6 +8533,58 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
>   		pfilter->general_fields[0] = tunnel_filter->inner_vlan;
>   		pfilter->general_fields[1] = tunnel_filter->outer_vlan;
>   		big_buffer = 1;
> +		break;
> +	case I40E_TUNNEL_TYPE_UDP:
> +	case I40E_TUNNEL_TYPE_TCP:
> +	case I40E_TUNNEL_TYPE_SCTP:
> +		if (tunnel_filter->port_type == I40E_L4_PORT_TYPE_SRC) {
> +			if (!pf->sport_replace_flag) {
> +				i40e_replace_port_l1_filter(pf, tunnel_filter->port_type);
> +				i40e_replace_port_cloud_filter(pf, tunnel_filter->port_type);
> +				pf->sport_replace_flag = 1;
> +			}
> +			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
> +			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
> +				I40E_DIRECTION_INGRESS_KEY;
> +
> +			if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_UDP)
> +				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
> +					I40E_TR_L4_TYPE_UDP;
> +			else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_TCP)
> +				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
> +					I40E_TR_L4_TYPE_TCP;
> +			else
> +				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
> +					I40E_TR_L4_TYPE_SCTP;
> +
> +			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
> +				(teid_le >> 16) & 0xFFFF;
> +			big_buffer = 1;
> +		} else {
> +			if (!pf->dport_replace_flag) {
> +				i40e_replace_port_l1_filter(pf, tunnel_filter->port_type);
> +				i40e_replace_port_cloud_filter(pf, tunnel_filter->port_type);
> +				pf->dport_replace_flag = 1;
> +			}
> +			teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
> +			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0] =
> +				I40E_DIRECTION_INGRESS_KEY;
> +
> +			if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_UDP)
> +				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
> +					I40E_TR_L4_TYPE_UDP;
> +			else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_TCP)
> +				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
> +					I40E_TR_L4_TYPE_TCP;
> +			else
> +				pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1] =
> +					I40E_TR_L4_TYPE_SCTP;
> +
> +			pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2] =
> +				(teid_le >> 16) & 0xFFFF;
> +			big_buffer = 1;
> +		}
> +
>   		break;
>   	default:
>   		/* Other tunnel types is not supported. */
> @@ -8424,7 +8608,16 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
>   	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
>   		pfilter->element.flags |=
>   			I40E_AQC_ADD_CLOUD_FILTER_0X10;
> -	else {
> +	else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_UDP ||
> +		tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_TCP ||
> +		tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_SCTP) {
> +		if (tunnel_filter->port_type == I40E_L4_PORT_TYPE_SRC)
> +			pfilter->element.flags |=
> +				I40E_AQC_ADD_L1_FILTER_0X11;
> +		else
> +			pfilter->element.flags |=
> +				I40E_AQC_ADD_CLOUD_FILTER_0X10;
> +	} else {
>   		val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
>   						&pfilter->element.flags);
>   		if (val < 0) {
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index e5d0ce53f..b79ab5880 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -767,6 +767,8 @@ struct i40e_rss_pattern_info {
>   
>   #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
>   #define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
> +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_SRC_PORT 29
> +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_DST_PORT 30
>   #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOUDP	8
>   #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSOGRE	9
>   #define I40E_AQC_ADD_CLOUD_FILTER_0X10		0x10
> @@ -828,9 +830,20 @@ enum i40e_tunnel_type {
>   	I40E_TUNNEL_TYPE_GTPU,
>   	I40E_TUNNEL_TYPE_ESPoUDP,
>   	I40E_TUNNEL_TYPE_ESPoIP,
> +	I40E_TUNNEL_TYPE_UDP,
> +	I40E_TUNNEL_TYPE_TCP,
> +	I40E_TUNNEL_TYPE_SCTP,
>   	I40E_TUNNEL_TYPE_MAX,
>   };
>   
> +/**
> + * Port type.
> + */
> +enum i40e_l4_port_type {
> +	I40E_L4_PORT_TYPE_SRC = 0,
> +	I40E_L4_PORT_TYPE_DST,
> +};
> +
>   /**
>    * Tunneling Packet filter configuration.
>    */
> @@ -852,6 +865,7 @@ struct i40e_tunnel_filter_conf {
>   	/** Flags from ETH_TUNNEL_FILTER_XX - see above. */
>   	uint16_t filter_type;
>   	enum i40e_tunnel_type tunnel_type; /**< Tunnel Type. */
> +	enum i40e_l4_port_type port_type; /**< L4 Port Type. */


L4_port_type would be better?


>   	uint32_t tenant_id;     /**< Tenant ID to match. VNI, GRE key... */
>   	uint16_t queue_id;      /**< Queue assigned to if match. */
>   	uint8_t is_to_vf;       /**< 0 - to PF, 1 - to VF */
> @@ -1073,6 +1087,9 @@ struct i40e_pf {
>   	bool mpls_replace_flag;  /* 1 - MPLS filter replace is done */
>   	bool gtp_replace_flag;   /* 1 - GTP-C/U filter replace is done */
>   	bool qinq_replace_flag;  /* QINQ filter replace is done */
> +	/* l4 port flag */
> +	bool sport_replace_flag;   /* source port replace is done */
> +	bool dport_replace_flag;   /* dest port replace is done */
>   	struct i40e_tm_conf tm_conf;
>   	bool support_multi_driver; /* 1 - support multiple driver */
>   
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
> index 8f8df6fae..a268ff3d2 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
> @@ -124,6 +124,12 @@ i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
>   			      struct rte_flow_error *error,
>   			      struct i40e_tunnel_filter_conf *filter);
>   
> +static int i40e_flow_parse_l4_proto_filter(struct rte_eth_dev *dev,
> +				      const struct rte_flow_attr *attr,
> +				      const struct rte_flow_item pattern[],
> +				      const struct rte_flow_action actions[],
> +				      struct rte_flow_error *error,
> +				      union i40e_filter_t *filter);
>   const struct rte_flow_ops i40e_flow_ops = {
>   	.validate = i40e_flow_validate,
>   	.create = i40e_flow_create,
> @@ -1845,6 +1851,13 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
>   	/* L2TPv3 over IP */
>   	{ pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
>   	{ pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
> +	/* L4 over port */
> +	{ pattern_fdir_ipv4_udp, i40e_flow_parse_l4_proto_filter },
> +	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_proto_filter },
> +	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_proto_filter },
> +	{ pattern_fdir_ipv6_udp, i40e_flow_parse_l4_proto_filter },
> +	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_proto_filter },
> +	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_proto_filter },
>   };
>   
>   #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
> @@ -3541,6 +3554,216 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
>   	return 0;
>   }
>   
> +static int
> +i40e_flow_parse_l4_proto_pattern(const struct rte_flow_item *pattern,
> +				 struct rte_flow_error *error,
> +				 struct i40e_tunnel_filter_conf *filter)
> +{
> +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> +	const struct rte_flow_item *item = pattern;
> +	enum rte_flow_item_type item_type;
> +
> +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Not support range");
> +			return -rte_errno;
> +		}


A blank line would help separate the range and type checking.


> +		item_type = item->type;
> +		switch (item_type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			if (item->spec || item->mask) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid ETH item");


This line is no need to be separated from the above line? The same as below.


> +				return -rte_errno;
> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
> +			/* IPv4 is used to describe protocol,
> +			 * spec and mask should be NULL.
> +			 */
> +			if (item->spec || item->mask) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid IPv4 item");
> +				return -rte_errno;
> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
> +			/* IPv6 is used to describe protocol,
> +			 * spec and mask should be NULL.
> +			 */
> +			if (item->spec || item->mask) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid IPv6 item");
> +				return -rte_errno;
> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_UDP:
> +			udp_spec = item->spec;
> +			udp_mask = item->mask;
> +
> +			if (!udp_spec || !udp_mask) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid udp item");
> +				return -rte_errno;
> +			}
> +
> +			if (udp_spec->hdr.src_port != 0 &&
> +			    udp_spec->hdr.dst_port != 0) {
> +				rte_flow_error_set(error, EINVAL,
> +						RTE_FLOW_ERROR_TYPE_ITEM,
> +						item,
> +						"Invalid udp spec");
> +				return -rte_errno;
> +			}
> +
> +			if (udp_spec->hdr.src_port != 0) {
> +				filter->port_type =
> +					I40E_L4_PORT_TYPE_SRC;
> +				filter->tenant_id =
> +				rte_be_to_cpu_32(udp_spec->hdr.src_port);
> +			}
> +
> +			if (udp_spec->hdr.dst_port != 0) {
> +				filter->port_type =
> +					I40E_L4_PORT_TYPE_DST;
> +				filter->tenant_id =
> +				rte_be_to_cpu_32(udp_spec->hdr.dst_port);
> +			}
> +
> +			filter->tunnel_type = I40E_TUNNEL_TYPE_UDP;
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_TCP:
> +			tcp_spec = item->spec;
> +			tcp_mask = item->mask;
> +
> +			if (!tcp_spec || !tcp_mask) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid tcp item");
> +				return -rte_errno;
> +			}
> +
> +			if (tcp_spec->hdr.src_port != 0 &&
> +			    tcp_spec->hdr.dst_port != 0) {
> +				rte_flow_error_set(error, EINVAL,
> +						RTE_FLOW_ERROR_TYPE_ITEM,
> +						item,
> +						"Invalid tcp spec");
> +				return -rte_errno;
> +			}
> +
> +			if (tcp_spec->hdr.src_port != 0) {
> +				filter->port_type =
> +					I40E_L4_PORT_TYPE_SRC;
> +				filter->tenant_id =
> +				rte_be_to_cpu_32(tcp_spec->hdr.src_port);
> +			}
> +
> +			if (tcp_spec->hdr.dst_port != 0) {
> +				filter->port_type =
> +					I40E_L4_PORT_TYPE_DST;
> +				filter->tenant_id =
> +				rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
> +			}
> +
> +			filter->tunnel_type = I40E_TUNNEL_TYPE_TCP;
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_SCTP:
> +			sctp_spec = item->spec;
> +			sctp_mask = item->mask;
> +
> +			if (!sctp_spec || !sctp_mask) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid sctp item");
> +				return -rte_errno;
> +			}
> +
> +			if (sctp_spec->hdr.src_port != 0 &&
> +			    sctp_spec->hdr.dst_port != 0) {
> +				rte_flow_error_set(error, EINVAL,
> +						RTE_FLOW_ERROR_TYPE_ITEM,
> +						item,
> +						"Invalid sctp spec");
> +				return -rte_errno;
> +			}
> +
> +			if (sctp_spec->hdr.src_port != 0) {
> +				filter->port_type =
> +					I40E_L4_PORT_TYPE_SRC;
> +				filter->tenant_id =
> +					rte_be_to_cpu_32(sctp_spec->hdr.src_port);
> +			}
> +
> +			if (sctp_spec->hdr.dst_port != 0) {
> +				filter->port_type =
> +					I40E_L4_PORT_TYPE_DST;
> +				filter->tenant_id =
> +					rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
> +			}
> +
> +			filter->tunnel_type = I40E_TUNNEL_TYPE_SCTP;
> +
> +			break;
> +		default:
> +			break;
> +		}
> +	}
> +


I saw a duplicate process for UDP/TCP/SCTP in switch, do you think it 
need to fine a better way to combine them?


> +	return 0;
> +}
> +
> +static int
> +i40e_flow_parse_l4_proto_filter(struct rte_eth_dev *dev,
> +			   const struct rte_flow_attr *attr,
> +			   const struct rte_flow_item pattern[],
> +			   const struct rte_flow_action actions[],
> +			   struct rte_flow_error *error,
> +			   union i40e_filter_t *filter)
> +{
> +	struct i40e_tunnel_filter_conf *tunnel_filter =
> +		&filter->consistent_tunnel_filter;
> +	int ret;
> +
> +	ret = i40e_flow_parse_l4_proto_pattern(pattern, error, tunnel_filter);
> +	if (ret)
> +		return ret;
> +
> +	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
> +	if (ret)
> +		return ret;
> +
> +	ret = i40e_flow_parse_attr(attr, error);
> +	if (ret)
> +		return ret;
> +
> +	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
> +
> +	return ret;
> +}
> +
>   static uint16_t i40e_supported_tunnel_filter_types[] = {
>   	ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
>   	ETH_TUNNEL_FILTER_IVLAN,


More information about the dev mailing list