[dpdk-dev] [PATCH v2 08/20] net/mlx5: add flow IPv4 item

Yongseok Koh yskoh at mellanox.com
Wed Jul 4 02:12:03 CEST 2018


On Wed, Jun 27, 2018 at 05:07:40PM +0200, Nelio Laranjeiro wrote:
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
> ---
>  drivers/net/mlx5/mlx5_flow.c | 83 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 83 insertions(+)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 6a576ddd9..8e7a0bb5a 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -540,6 +540,86 @@ mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
>  	return size;
>  }
>  
> +/**
> + * Validate IPv4 layer and possibly create the Verbs specification.

Instead of 'possibly', please describe what it does in details like I asked for
the previous patch:
	net/mlx5: support flow Ethernet item among with drop action

Thanks,
Yongseok

> + *
> + * @param item[in]
> + *   Item specification.
> + * @param flow[in, out]
> + *   Pointer to flow structure.
> + * @param flow_size[in]
> + *   Size in bytes of the available space for to store the flow information.
> + * @param error
> + *   Pointer to error structure.
> + *
> + * @return
> + *   size in bytes necessary for the conversion, a negative errno value
> + *   otherwise and rte_errno is set.
> + */
> +static int
> +mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
> +		    const size_t flow_size, struct rte_flow_error *error)
> +{
> +	const struct rte_flow_item_ipv4 *spec = item->spec;
> +	const struct rte_flow_item_ipv4 *mask = item->mask;
> +	const struct rte_flow_item_ipv4 nic_mask = {
> +		.hdr = {
> +			.src_addr = RTE_BE32(0xffffffff),
> +			.dst_addr = RTE_BE32(0xffffffff),
> +			.type_of_service = 0xff,
> +			.next_proto_id = 0xff,
> +		},
> +	};
> +	unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
> +	struct ibv_flow_spec_ipv4_ext ipv4 = {
> +		.type = IBV_FLOW_SPEC_IPV4_EXT,
> +		.size = size,
> +	};
> +	int ret;
> +
> +	if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3)
> +		return rte_flow_error_set(error, ENOTSUP,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> +					  item,
> +					  "multiple L3 layers not supported");
> +	else if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
> +		return rte_flow_error_set(error, ENOTSUP,
> +					  RTE_FLOW_ERROR_TYPE_ITEM,
> +					  item,
> +					  "L3 cannot follow an L4 layer.");
> +	if (!mask)
> +		mask = &rte_flow_item_ipv4_mask;
> +	ret = mlx5_flow_item_validate(item, (const uint8_t *)mask,
> +				      (const uint8_t *)&nic_mask,
> +				      sizeof(struct rte_flow_item_ipv4), error);
> +	if (ret < 0)
> +		return ret;
> +	flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
> +	if (size > flow_size)
> +		return size;
> +	if (spec) {
> +		ipv4.val = (struct ibv_flow_ipv4_ext_filter){
> +			.src_ip = spec->hdr.src_addr,
> +			.dst_ip = spec->hdr.dst_addr,
> +			.proto = spec->hdr.next_proto_id,
> +			.tos = spec->hdr.type_of_service,
> +		};
> +		ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
> +			.src_ip = mask->hdr.src_addr,
> +			.dst_ip = mask->hdr.dst_addr,
> +			.proto = mask->hdr.next_proto_id,
> +			.tos = mask->hdr.type_of_service,
> +		};
> +		/* Remove unwanted bits from values. */
> +		ipv4.val.src_ip &= ipv4.mask.src_ip;
> +		ipv4.val.dst_ip &= ipv4.mask.dst_ip;
> +		ipv4.val.proto &= ipv4.mask.proto;
> +		ipv4.val.tos &= ipv4.mask.tos;
> +	}
> +	mlx5_flow_spec_verbs_add(flow, &ipv4, size);
> +	return size;
> +}
> +
>  /**
>   * Validate items provided by the user.
>   *
> @@ -576,6 +656,9 @@ mlx5_flow_items(const struct rte_flow_item items[],
>  		case RTE_FLOW_ITEM_TYPE_VLAN:
>  			ret = mlx5_flow_item_vlan(items, flow, remain, error);
>  			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			ret = mlx5_flow_item_ipv4(items, flow, remain, error);
> +			break;
>  		default:
>  			return rte_flow_error_set(error, ENOTSUP,
>  						  RTE_FLOW_ERROR_TYPE_ITEM,
> -- 
> 2.18.0
> 


More information about the dev mailing list