[dpdk-dev] [PATCH 11/13] net/mlx5: add default flows for hairpin

Slava Ovsiienko viacheslavo at mellanox.com
Thu Sep 26 11:34:13 CEST 2019


> -----Original Message-----
> From: Ori Kam <orika at mellanox.com>
> Sent: Thursday, September 26, 2019 9:29
> To: Matan Azrad <matan at mellanox.com>; Shahaf Shuler
> <shahafs at mellanox.com>; Slava Ovsiienko <viacheslavo at mellanox.com>
> Cc: dev at dpdk.org; Ori Kam <orika at mellanox.com>; jingjing.wu at intel.com;
> stephen at networkplumber.org
> Subject: [PATCH 11/13] net/mlx5: add default flows for hairpin
> 
> When using hairpin all traffic from TX hairpin queues should jump to
> dedecated table where matching can be done using regesters.
> 
> Signed-off-by: Ori Kam <orika at mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at mellanox.com>

> ---
>  drivers/net/mlx5/mlx5.h         |  2 ++
>  drivers/net/mlx5/mlx5_flow.c    | 60
> +++++++++++++++++++++++++++++++++++++++
>  drivers/net/mlx5/mlx5_flow.h    |  9 ++++++
>  drivers/net/mlx5/mlx5_flow_dv.c | 63
> +++++++++++++++++++++++++++++++++++++++--
>  drivers/net/mlx5/mlx5_trigger.c | 18 ++++++++++++
>  5 files changed, 150 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index
> 41eb35a..5f1a25d 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -556,6 +556,7 @@ struct mlx5_flow_tbl_resource {  };
> 
>  #define MLX5_MAX_TABLES UINT16_MAX
> +#define MLX5_HAIRPIN_TX_TABLE (UINT16_MAX - 1)
>  #define MLX5_MAX_TABLES_FDB UINT16_MAX
> 
>  #define MLX5_DBR_PAGE_SIZE 4096 /* Must be >= 512. */ @@ -872,6
> +873,7 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,  int
> mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);  void
> mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);  int
> mlx5_flow_verify(struct rte_eth_dev *dev);
> +int mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev, uint32_t
> +queue);
>  int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
>  			struct rte_flow_item_eth *eth_spec,
>  			struct rte_flow_item_eth *eth_mask,
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 00afc18..33ed204 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -2712,6 +2712,66 @@ struct rte_flow *  }
> 
>  /**
> + * Enable default hairpin egress flow.
> + *
> + * @param dev
> + *   Pointer to Ethernet device.
> + * @param queue
> + *   The queue index.
> + *
> + * @return
> + *   0 on success, a negative errno value otherwise and rte_errno is set.
> + */
> +int
> +mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
> +			    uint32_t queue)
> +{
> +	struct mlx5_priv *priv = dev->data->dev_private;
> +	const struct rte_flow_attr attr = {
> +		.egress = 1,
> +		.priority = 0,
> +	};
> +	struct mlx5_rte_flow_item_tx_queue queue_spec = {
> +		.queue = queue,
> +	};
> +	struct mlx5_rte_flow_item_tx_queue queue_mask = {
> +		.queue = UINT32_MAX,
> +	};
> +	struct rte_flow_item items[] = {
> +		{
> +			.type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
> +			.spec = &queue_spec,
> +			.last = NULL,
> +			.mask = &queue_mask,
> +		},
> +		{
> +			.type = RTE_FLOW_ITEM_TYPE_END,
> +		},
> +	};
> +	struct rte_flow_action_jump jump = {
> +		.group = MLX5_HAIRPIN_TX_TABLE,
> +	};
> +	struct rte_flow_action actions[2];
> +	struct rte_flow *flow;
> +	struct rte_flow_error error;
> +
> +	actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
> +	actions[0].conf = &jump;
> +	actions[1].type = RTE_FLOW_ACTION_TYPE_END;
> +	flow = flow_list_create(dev, &priv->ctrl_flows,
> +				&attr, items, actions, false, &error);
> +	if (!flow) {
> +		DRV_LOG(DEBUG,
> +			"Failed to create ctrl flow: rte_errno(%d),"
> +			" type(%d), message(%s)\n",
> +			rte_errno, error.type,
> +			error.message ? error.message : " (no stated
> reason)");
> +		return -rte_errno;
> +	}
> +	return 0;
> +}
> +
> +/**
>   * Enable a control flow configured from the control plane.
>   *
>   * @param dev
> diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
> index 1b14fb7..bb67380 100644
> --- a/drivers/net/mlx5/mlx5_flow.h
> +++ b/drivers/net/mlx5/mlx5_flow.h
> @@ -44,6 +44,7 @@ enum modify_reg {
>  enum mlx5_rte_flow_item_type {
>  	MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
>  	MLX5_RTE_FLOW_ITEM_TYPE_TAG,
> +	MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
>  };
> 
>  /* Private rte flow actions. */
> @@ -64,6 +65,11 @@ struct mlx5_rte_flow_action_set_tag {
>  	rte_be32_t data;
>  };
> 
> +/* Matches on source queue. */
> +struct mlx5_rte_flow_item_tx_queue {
> +	uint32_t queue;
> +};
> +
>  /* Pattern outer Layer bits. */
>  #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)  #define
> MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) @@ -102,6 +108,9 @@
> struct mlx5_rte_flow_action_set_tag {  #define
> MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 23)  #define
> MLX5_FLOW_LAYER_NVGRE (1u << 24)
> 
> +/* Queue items. */
> +#define MLX5_FLOW_ITEM_TX_QUEUE (1u << 25)
> +
>  /* Outer Masks. */
>  #define MLX5_FLOW_LAYER_OUTER_L3 \
>  	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 |
> MLX5_FLOW_LAYER_OUTER_L3_IPV6) diff --git
> a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
> index dde0831..2b48680 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -3357,7 +3357,9 @@ struct field_modify_info modify_tcp[] = {
>  		return ret;
>  	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
>  		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> -		switch (items->type) {
> +		int type = items->type;
> +
> +		switch (type) {
>  		case RTE_FLOW_ITEM_TYPE_VOID:
>  			break;
>  		case RTE_FLOW_ITEM_TYPE_PORT_ID:
> @@ -3518,6 +3520,9 @@ struct field_modify_info modify_tcp[] = {
>  				return ret;
>  			last_item = MLX5_FLOW_LAYER_ICMP6;
>  			break;
> +		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
> +		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
> +			break;
>  		default:
>  			return rte_flow_error_set(error, ENOTSUP,
> 
> RTE_FLOW_ERROR_TYPE_ITEM,
> @@ -3526,11 +3531,12 @@ struct field_modify_info modify_tcp[] = {
>  		item_flags |= last_item;
>  	}
>  	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
> +		int type = actions->type;
>  		if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
>  			return rte_flow_error_set(error, ENOTSUP,
> 
> RTE_FLOW_ERROR_TYPE_ACTION,
>  						  actions, "too many
> actions");
> -		switch (actions->type) {
> +		switch (type) {
>  		case RTE_FLOW_ACTION_TYPE_VOID:
>  			break;
>  		case RTE_FLOW_ACTION_TYPE_PORT_ID:
> @@ -3796,6 +3802,8 @@ struct field_modify_info modify_tcp[] = {
> 
> 	MLX5_FLOW_ACTION_INC_TCP_ACK :
> 
> 	MLX5_FLOW_ACTION_DEC_TCP_ACK;
>  			break;
> +		case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
> +			break;
>  		default:
>  			return rte_flow_error_set(error, ENOTSUP,
> 
> RTE_FLOW_ERROR_TYPE_ACTION,
> @@ -5291,6 +5299,51 @@ struct field_modify_info modify_tcp[] = {  }
> 
>  /**
> + * Add Tx queue matcher
> + *
> + * @param[in] dev
> + *   Pointer to the dev struct.
> + * @param[in, out] matcher
> + *   Flow matcher.
> + * @param[in, out] key
> + *   Flow matcher value.
> + * @param[in] item
> + *   Flow pattern to translate.
> + * @param[in] inner
> + *   Item is inner pattern.
> + */
> +static void
> +flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
> +				void *matcher, void *key,
> +				const struct rte_flow_item *item)
> +{
> +	const struct mlx5_rte_flow_item_tx_queue *queue_m;
> +	const struct mlx5_rte_flow_item_tx_queue *queue_v;
> +	void *misc_m =
> +		MLX5_ADDR_OF(fte_match_param, matcher,
> misc_parameters);
> +	void *misc_v =
> +		MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
> +	struct mlx5_txq_ctrl *txq;
> +	uint32_t queue;
> +
> +
> +	queue_m = (const void *)item->mask;
> +	if (!queue_m)
> +		return;
> +	queue_v = (const void *)item->spec;
> +	if (!queue_v)
> +		return;
> +	txq = mlx5_txq_get(dev, queue_v->queue);
> +	if (!txq)
> +		return;
> +	queue = txq->obj->sq->id;
> +	MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m-
> >queue);
> +	MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
> +		 queue & queue_m->queue);
> +	mlx5_txq_release(dev, queue_v->queue); }
> +
> +/**
>   * Fill the flow with DV spec.
>   *
>   * @param[in] dev
> @@ -5866,6 +5919,12 @@ struct field_modify_info modify_tcp[] = {
>  						   items);
>  			last_item = MLX5_FLOW_ITEM_TAG;
>  			break;
> +		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
> +			flow_dv_translate_item_tx_queue(dev,
> match_mask,
> +							match_value,
> +							items);
> +			last_item = MLX5_FLOW_ITEM_TX_QUEUE;
> +			break;
>  		default:
>  			break;
>  		}
> diff --git a/drivers/net/mlx5/mlx5_trigger.c
> b/drivers/net/mlx5/mlx5_trigger.c index a4fcdb3..a476cd5 100644
> --- a/drivers/net/mlx5/mlx5_trigger.c
> +++ b/drivers/net/mlx5/mlx5_trigger.c
> @@ -396,6 +396,24 @@
>  	unsigned int j;
>  	int ret;
> 
> +	/*
> +	 * Hairpin txq default flow should be created no matter if it is
> +	 * isolation mode. Or else all the packets to be sent will be sent
> +	 * out directly without the TX flow actions, e.g. encapsulation.
> +	 */
> +	for (i = 0; i != priv->txqs_n; ++i) {
> +		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
> +		if (!txq_ctrl)
> +			continue;
> +		if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
> +			ret = mlx5_ctrl_flow_source_queue(dev, i);
> +			if (ret) {
> +				mlx5_txq_release(dev, i);
> +				goto error;
> +			}
> +		}
> +		mlx5_txq_release(dev, i);
> +	}
>  	if (priv->config.dv_esw_en && !priv->config.vf)
>  		if (!mlx5_flow_create_esw_table_zero_flow(dev))
>  			goto error;
> --
> 1.8.3.1



More information about the dev mailing list