[dpdk-dev] [PATCH 4/5] net/mlx5: fix wildcard item for Direct Verbs

Yongseok Koh yskoh at mellanox.com
Tue Oct 23 19:22:52 CEST 2018


On Tue, Oct 23, 2018 at 08:25:15AM -0700, Ori Kam wrote:
> PSB
> 
> > -----Original Message-----
> > From: Shahaf Shuler
> > Sent: Tuesday, October 23, 2018 10:42 AM
> > To: Yongseok Koh <yskoh at mellanox.com>
> > Cc: dev at dpdk.org; Ori Kam <orika at mellanox.com>
> > Subject: RE: [PATCH 4/5] net/mlx5: fix wildcard item for Direct Verbs
> > 
> > Wednesday, October 17, 2018 5:08 AM, Yongseok Koh:
> > > Subject: [PATCH 4/5] net/mlx5: fix wildcard item for Direct Verbs
> > >
> > > If a network layer is specified with no spec, it means wildcard match.
> > > flow_dv_translate_item_*() returns without writing anything if spec is null
> > > and it causes creation of wrong flow. E.g., the following flow has to patch
> > > with any ipv4 packet.
> > >
> > >   flow create 0 ingress pattern eth / ipv4 / end actions ...
> > >
> > > But, with the current code, it matches any packet because PMD doesn't write
> > > anything about IPv4. The matcher value and mask becomes completely zero.
> > > It should have written the IP version at least. It is same for the rest of items.
> > >
> > > Even if the spec is null, PMD has to write constant fields before return, e.g. IP
> > > version and IP protocol number.
> > >
> > > Fixes: fc2c498ccb94 ("net/mlx5: add Direct Verbs translate items")
> > > Cc: orika at mellanox.com
> > >
> > > Signed-off-by: Yongseok Koh <yskoh at mellanox.com>
> > > Acked-by: Ori Kam <orika at mellanox.com>
> > 
> > [...]
> > 
> > >  #include <sys/queue.h>
> > >  #include <stdalign.h>
> > >  #include <stdint.h>
> > > @@ -474,10 +473,6 @@ flow_dv_translate_item_ipv4(void *matcher, void
> > > *key,
> > >  	char *l24_v;
> > >  	uint8_t tos;
> > >
> > > -	if (!ipv4_v)
> > > -		return;
> > > -	if (!ipv4_m)
> > > -		ipv4_m = &nic_mask;
> > >  	if (inner) {
> > >  		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> > >  					 inner_headers);
> > > @@ -489,6 +484,10 @@ flow_dv_translate_item_ipv4(void *matcher, void
> > > *key,
> > >  	}
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
> > 
> > Is matching on the ip version is enough? Don't we need to match also the
> > ethertype?
> > Meaning maybe the value on the IP offset can be 4 even though it is not a IPv4
> > header.
> > 
> > Same question for IPv6.
> 
> I think you are correct, 
> We should also test the ethertype.

Nope, we should test either one if kernel driver code is right. When I wrote
this fix, I checked the driver code.


static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
			   u32 *match_v, const union ib_flow_spec *ib_spec,
			   const struct ib_flow_attr *flow_attr,
			   struct mlx5_flow_act *action, u32 prev_type)
{
[...]

	if (ib_spec->type & IB_FLOW_SPEC_INNER) {
		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
					 inner_headers);
		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
					 inner_headers);
		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
					ft_field_support.inner_ip_version);
	} else {
		headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
					 outer_headers);
		headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
					 outer_headers);
		match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
					ft_field_support.outer_ip_version);
	}
[...]

	switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
[...]
	case IB_FLOW_SPEC_IPV4:
		if (match_ipv) {
			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
				 ip_version, 0xf);
			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
				 ip_version, MLX5_FS_IPV4_VERSION);
		} else {
			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
				 ethertype, 0xffff);
			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
				 ethertype, ETH_P_IP);
		}

It does look like it depends on device capability. So, I thought Xueming/Ori
already knew the result and made a decision to not check the capability. Any
comment?


Thanks,
Yongseok

> > 
> > > +	if (!ipv4_v)
> > > +		return;
> > > +	if (!ipv4_m)
> > > +		ipv4_m = &nic_mask;
> > >  	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
> > >  			     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
> > >  	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, @@ -
> > > 557,10 +556,6 @@ flow_dv_translate_item_ipv6(void *matcher, void *key,
> > >  	int i;
> > >  	int size;
> > >
> > > -	if (!ipv6_v)
> > > -		return;
> > > -	if (!ipv6_m)
> > > -		ipv6_m = &nic_mask;
> > >  	if (inner) {
> > >  		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> > >  					 inner_headers);
> > > @@ -570,6 +565,12 @@ flow_dv_translate_item_ipv6(void *matcher, void
> > > *key,
> > >  					 outer_headers);
> > >  		headers_v = MLX5_ADDR_OF(fte_match_param, key,
> > > outer_headers);
> > >  	}
> > > +	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
> > > +	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
> > > +	if (!ipv6_v)
> > > +		return;
> > > +	if (!ipv6_m)
> > > +		ipv6_m = &nic_mask;
> > >  	size = sizeof(ipv6_m->hdr.dst_addr);
> > >  	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
> > >  			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
> > > @@ -585,8 +586,6 @@ flow_dv_translate_item_ipv6(void *matcher, void
> > > *key,
> > >  	memcpy(l24_m, ipv6_m->hdr.src_addr, size);
> > >  	for (i = 0; i < size; ++i)
> > >  		l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
> > > -	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
> > > -	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
> > >  	/* TOS. */
> > >  	vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
> > >  	vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v-
> > > >hdr.vtc_flow); @@ -635,10 +634,6 @@ flow_dv_translate_item_tcp(void
> > > *matcher, void *key,
> > >  	void *headers_m;
> > >  	void *headers_v;
> > >
> > > -	if (!tcp_v)
> > > -		return;
> > > -	if (!tcp_m)
> > > -		tcp_m = &rte_flow_item_tcp_mask;
> > >  	if (inner) {
> > >  		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> > >  					 inner_headers);
> > > @@ -650,6 +645,10 @@ flow_dv_translate_item_tcp(void *matcher, void
> > > *key,
> > >  	}
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
> > > IPPROTO_TCP);
> > > +	if (!tcp_v)
> > > +		return;
> > > +	if (!tcp_m)
> > > +		tcp_m = &rte_flow_item_tcp_mask;
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
> > >  		 rte_be_to_cpu_16(tcp_m->hdr.src_port));
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, @@ -
> > > 682,10 +681,6 @@ flow_dv_translate_item_udp(void *matcher, void *key,
> > >  	void *headers_m;
> > >  	void *headers_v;
> > >
> > > -	if (!udp_v)
> > > -		return;
> > > -	if (!udp_m)
> > > -		udp_m = &rte_flow_item_udp_mask;
> > >  	if (inner) {
> > >  		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> > >  					 inner_headers);
> > > @@ -697,6 +692,10 @@ flow_dv_translate_item_udp(void *matcher, void
> > > *key,
> > >  	}
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
> > > IPPROTO_UDP);
> > > +	if (!udp_v)
> > > +		return;
> > > +	if (!udp_m)
> > > +		udp_m = &rte_flow_item_udp_mask;
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
> > >  		 rte_be_to_cpu_16(udp_m->hdr.src_port));
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, @@ -
> > > 731,10 +730,6 @@ flow_dv_translate_item_gre(void *matcher, void *key,
> > >  	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher,
> > > misc_parameters);
> > >  	void *misc_v = MLX5_ADDR_OF(fte_match_param, key,
> > > misc_parameters);
> > >
> > > -	if (!gre_v)
> > > -		return;
> > > -	if (!gre_m)
> > > -		gre_m = &rte_flow_item_gre_mask;
> > >  	if (inner) {
> > >  		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> > >  					 inner_headers);
> > > @@ -746,6 +741,10 @@ flow_dv_translate_item_gre(void *matcher, void
> > > *key,
> > >  	}
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
> > >  	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
> > > IPPROTO_GRE);
> > > +	if (!gre_v)
> > > +		return;
> > > +	if (!gre_m)
> > > +		gre_m = &rte_flow_item_gre_mask;
> > >  	MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
> > >  		 rte_be_to_cpu_16(gre_m->protocol));
> > >  	MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, @@ -780,6
> > > +779,7 @@ flow_dv_translate_item_nvgre(void *matcher, void *key,
> > >  	int size;
> > >  	int i;
> > >
> > > +	flow_dv_translate_item_gre(matcher, key, item, inner);
> > >  	if (!nvgre_v)
> > >  		return;
> > >  	if (!nvgre_m)
> > > @@ -790,7 +790,6 @@ flow_dv_translate_item_nvgre(void *matcher, void
> > > *key,
> > >  	memcpy(gre_key_m, tni_flow_id_m, size);
> > >  	for (i = 0; i < size; ++i)
> > >  		gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
> > > -	flow_dv_translate_item_gre(matcher, key, item, inner);
> > >  }
> > >
> > >  /**
> > > @@ -822,10 +821,6 @@ flow_dv_translate_item_vxlan(void *matcher, void
> > > *key,
> > >  	int size;
> > >  	int i;
> > >
> > > -	if (!vxlan_v)
> > > -		return;
> > > -	if (!vxlan_m)
> > > -		vxlan_m = &rte_flow_item_vxlan_mask;
> > >  	if (inner) {
> > >  		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
> > >  					 inner_headers);
> > > @@ -841,6 +836,10 @@ flow_dv_translate_item_vxlan(void *matcher, void
> > > *key,
> > >  		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
> > > 0xFFFF);
> > >  		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
> > > dport);
> > >  	}
> > > +	if (!vxlan_v)
> > > +		return;
> > > +	if (!vxlan_m)
> > > +		vxlan_m = &rte_flow_item_vxlan_mask;
> > >  	size = sizeof(vxlan_m->vni);
> > >  	vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
> > >  	vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
> > > --
> > > 2.11.0
> 


More information about the dev mailing list