[dpdk-dev] [PATCH v3 4/4] net/iavf: support FDIR for IP fragment packet

Xu, Ting ting.xu at intel.com
Mon Apr 12 10:45:03 CEST 2021


Hi, Jeff

Best Regards,
Xu Ting

> -----Original Message-----
> From: Guo, Jia <jia.guo at intel.com>
> Sent: Sunday, April 11, 2021 2:02 PM
> To: orika at nvidia.com; Zhang, Qi Z <qi.z.zhang at intel.com>; Xing, Beilei
> <beilei.xing at intel.com>; Li, Xiaoyun <xiaoyun.li at intel.com>; Wu, Jingjing
> <jingjing.wu at intel.com>
> Cc: dev at dpdk.org; Xu, Ting <ting.xu at intel.com>; Guo, Jia <jia.guo at intel.com>
> Subject: [PATCH v3 4/4] net/iavf: support FDIR for IP fragment packet
> 
> New FDIR parsing are added to handle the fragmented IPv4/IPv6 packet.
> 
> Signed-off-by: Ting Xu <ting.xu at intel.com>
> Signed-off-by: Jeff Guo <jia.guo at intel.com>
> ---
>  drivers/net/iavf/iavf_fdir.c         | 376 ++++++++++++++++++---------
>  drivers/net/iavf/iavf_generic_flow.h |   5 +
>  2 files changed, 257 insertions(+), 124 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c index
> 62f032985a..64c169f8c4 100644
> --- a/drivers/net/iavf/iavf_fdir.c
> +++ b/drivers/net/iavf/iavf_fdir.c
> @@ -34,7 +34,7 @@
>  #define IAVF_FDIR_INSET_ETH_IPV4 (\
>  	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
>  	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
> -	IAVF_INSET_IPV4_TTL)
> +	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_ID)
> 

Skip...

> +			if (ipv4_mask->hdr.version_ihl ||
> +			    ipv4_mask->hdr.total_length ||
> +			    ipv4_mask->hdr.hdr_checksum) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid IPv4 mask.");
> +				return -rte_errno;
> +			}
> 
> -				if (tun_inner) {
> -					input_set &=
> ~IAVF_PROT_IPV4_OUTER;
> -					input_set |= IAVF_PROT_IPV4_INNER;
> -				}

This part "tun_inner" is newly added and needed for GTPU inner, cannot be deleted.

> +			if (ipv4_last &&
> +			    (ipv4_last->hdr.version_ihl ||
> +			     ipv4_last->hdr.type_of_service ||
> +			     ipv4_last->hdr.time_to_live ||
> +			     ipv4_last->hdr.total_length |
> +			     ipv4_last->hdr.next_proto_id ||
> +			     ipv4_last->hdr.hdr_checksum ||
> +			     ipv4_last->hdr.src_addr ||
> +			     ipv4_last->hdr.dst_addr)) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid IPv4 last.");
> +				return -rte_errno;
> +			}
> 
> -				rte_memcpy(hdr->buffer,
> -					&ipv4_spec->hdr,
> -					sizeof(ipv4_spec->hdr));
> +			if (ipv4_mask->hdr.type_of_service ==
> +			    UINT8_MAX) {
> +				input_set |= IAVF_INSET_IPV4_TOS;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV4,
> +								 DSCP);
> +			}
> +
> +			if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
> +				input_set |= IAVF_INSET_IPV4_PROTO;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV4,
> +								 PROT);
> +			}
> +
> +			if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
> +				input_set |= IAVF_INSET_IPV4_TTL;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV4,
> +								 TTL);
> +			}
> +
> +			if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
> +				input_set |= IAVF_INSET_IPV4_SRC;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV4,
> +								 SRC);
> +			}
> +
> +			if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
> +				input_set |= IAVF_INSET_IPV4_DST;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV4,
> +								 DST);
> +			}
> +
> +			rte_memcpy(hdr->buffer, &ipv4_spec->hdr,
> +				   sizeof(ipv4_spec->hdr));
> +
> +			hdrs->count = ++layer;
> +
> +			/* only support any packet id for fragment IPv4
> +			 * any packet_id:
> +			 * spec is 0, last is 0xffff, mask is 0xffff
> +			 */
> +			if (ipv4_last && ipv4_spec->hdr.packet_id == 0 &&
> +			    ipv4_last->hdr.packet_id == UINT16_MAX &&
> +			    ipv4_mask->hdr.packet_id == UINT16_MAX &&
> +			    ipv4_mask->hdr.fragment_offset == UINT16_MAX) {
> +				/* all IPv4 fragment packet has the same
> +				 * ethertype, if the spec is for all valid
> +				 * packet id, set ethertype into input set.
> +				 */
> +				input_set |= IAVF_INSET_ETHERTYPE;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr1,
> ETH,
> +								 ETHERTYPE);
> +
> +				/* add dummy header for IPv4 Fragment */
> +				iavf_fdir_add_fragment_hdr(hdrs, layer);
> +			} else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid IPv4 mask.");
> +				return -rte_errno;
>  			}
> 
> -			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
>  			break;
> 
>  		case RTE_FLOW_ITEM_TYPE_IPV6:
> @@ -707,63 +787,109 @@ iavf_fdir_parse_pattern(__rte_unused struct
> iavf_adapter *ad,
>  			ipv6_spec = item->spec;
>  			ipv6_mask = item->mask;
> 
> -			hdr = &filter-
> >add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
> +			hdr = &hdrs->proto_hdr[layer];
> 
>  			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
> 
> -			if (ipv6_spec && ipv6_mask) {
> -				if (ipv6_mask->hdr.payload_len) {
> -					rte_flow_error_set(error, EINVAL,
> -
> 	RTE_FLOW_ERROR_TYPE_ITEM,
> -						item, "Invalid IPv6 mask");
> -					return -rte_errno;
> -				}
> +			if (!(ipv6_spec && ipv6_mask)) {
> +				hdrs->count = ++layer;
> +				break;
> +			}
> 
> -				if ((ipv6_mask->hdr.vtc_flow &
> -
> 	rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
> -					==
> rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
> -					input_set |= IAVF_INSET_IPV6_TC;
> -
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
> -				}
> -				if (ipv6_mask->hdr.proto == UINT8_MAX) {
> -					input_set |=
> IAVF_INSET_IPV6_NEXT_HDR;
> -
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
> -				}
> -				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
> {
> -					input_set |=
> IAVF_INSET_IPV6_HOP_LIMIT;
> -
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
> -				}
> -				if (!memcmp(ipv6_mask->hdr.src_addr,
> -					ipv6_addr_mask,
> -					RTE_DIM(ipv6_mask->hdr.src_addr))) {
> -					input_set |= IAVF_INSET_IPV6_SRC;
> -
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
> -				}
> -				if (!memcmp(ipv6_mask->hdr.dst_addr,
> -					ipv6_addr_mask,
> -					RTE_DIM(ipv6_mask->hdr.dst_addr)))
> {
> -					input_set |= IAVF_INSET_IPV6_DST;
> -
> 	VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
> -				}
> +			if (ipv6_mask->hdr.payload_len) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid IPv6 mask");
> +				return -rte_errno;
> +			}
> 
> -				if (tun_inner) {
> -					input_set &=
> ~IAVF_PROT_IPV6_OUTER;
> -					input_set |= IAVF_PROT_IPV6_INNER;
> -				}

The same as ipv4.

> +			if ((ipv6_mask->hdr.vtc_flow &
> +			      rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
> +			     == rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
> +				input_set |= IAVF_INSET_IPV6_TC;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV6,
> +								 TC);
> +			}
> 
> -				rte_memcpy(hdr->buffer,
> -					&ipv6_spec->hdr,
> -					sizeof(ipv6_spec->hdr));
> +			if (ipv6_mask->hdr.proto == UINT8_MAX) {
> +				input_set |= IAVF_INSET_IPV6_NEXT_HDR;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV6,
> +								 PROT);
> +			}
> +
> +			if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
> +				input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV6,
> +								 HOP_LIMIT);
> +			}
> +
> +			if (!memcmp(ipv6_mask->hdr.src_addr,
> ipv6_addr_mask,
> +				    RTE_DIM(ipv6_mask->hdr.src_addr))) {
> +				input_set |= IAVF_INSET_IPV6_SRC;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV6,
> +								 SRC);
> +			}
> +			if (!memcmp(ipv6_mask->hdr.dst_addr,
> ipv6_addr_mask,
> +				    RTE_DIM(ipv6_mask->hdr.dst_addr))) {
> +				input_set |= IAVF_INSET_IPV6_DST;
> +				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr,
> IPV6,
> +								 DST);
> +			}
> +
> +			rte_memcpy(hdr->buffer, &ipv6_spec->hdr,
> +				   sizeof(ipv6_spec->hdr));
> +
> +			hdrs->count = ++layer;
> +			break;
> +

Skip...

> @@ -84,6 +85,8 @@
>  	(IAVF_PROT_IPV4_OUTER | IAVF_IP_PROTO)  #define
> IAVF_INSET_IPV4_TTL \
>  	(IAVF_PROT_IPV4_OUTER | IAVF_IP_TTL)
> +#define IAVF_INSET_IPV4_ID \
> +	(IAVF_PROT_IPV4_OUTER | IAVF_IP_PKID)
>  #define IAVF_INSET_IPV6_SRC \
>  	(IAVF_PROT_IPV6_OUTER | IAVF_IP_SRC)
>  #define IAVF_INSET_IPV6_DST \
> @@ -94,6 +97,8 @@
>  	(IAVF_PROT_IPV6_OUTER | IAVF_IP_TTL)
>  #define IAVF_INSET_IPV6_TC \
>  	(IAVF_PROT_IPV6_OUTER | IAVF_IP_TOS)
> +#define IAVF_INSET_IPV6_ID \
> +	(IAVF_PROT_IPV6_OUTER | IAVF_IP_PKID)
> 
>  #define IAVF_INSET_TUN_IPV4_SRC \
>  	(IAVF_PROT_IPV4_INNER | IAVF_IP_SRC)
> --
> 2.20.1



More information about the dev mailing list