[dpdk-dev] [PATCH v3 14/21] net/mlx5: add RSS flow action

Yongseok Koh yskoh at mellanox.com
Wed Jul 11 21:57:19 CEST 2018


On Wed, Jul 11, 2018 at 09:22:47AM +0200, Nelio Laranjeiro wrote:
> Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
> ---
>  drivers/net/mlx5/mlx5_flow.c | 682 +++++++++++++++++++++++++++--------
>  1 file changed, 537 insertions(+), 145 deletions(-)
> 
[...]
> @@ -1322,26 +1583,122 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
>  		struct rte_flow_error *error)
>  {
>  	struct rte_flow local_flow = { .layers = 0, };
> -	size_t size = sizeof(*flow) + sizeof(struct ibv_flow_attr);
> -	int remain = (flow_size > size) ? flow_size - size : 0;
> +	size_t size = sizeof(*flow);
> +	union {
> +		struct rte_flow_expand_rss buf;
> +		uint8_t buffer[2048];
> +	} expand_buffer;
> +	struct rte_flow_expand_rss *buf = &expand_buffer.buf;
> +	struct mlx5_flow_verbs *original_verbs = NULL;
> +	size_t original_verbs_size = 0;
> +	uint32_t original_layers = 0;
>  	int ret;
> +	uint32_t i;
>  
> -	if (!remain)
> +	if (size > flow_size)
>  		flow = &local_flow;
>  	ret = mlx5_flow_attributes(dev, attributes, flow, error);
>  	if (ret < 0)
>  		return ret;
> -	ret = mlx5_flow_items(pattern, flow, remain, error);
> -	if (ret < 0)
> -		return ret;
> -	size += ret;
> -	remain = (flow_size > size) ? flow_size - size : 0;
> -	ret = mlx5_flow_actions(dev, actions, flow, remain, error);
> +	ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
>  	if (ret < 0)
>  		return ret;
> -	size += ret;
> +	if (local_flow.rss.types) {
> +		ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
> +					  pattern, local_flow.rss.types,
> +					  mlx5_support_expansion,
> +					  MLX5_EXPANSION_ROOT);
> +		assert(ret > 0 &&
> +		       (unsigned int)ret < sizeof(expand_buffer.buffer));
> +	} else {
> +		buf->entries = 1;
> +		buf->entry[0].pattern = (void *)(uintptr_t)pattern;
> +	}
> +	size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
> +			       sizeof(void *));
>  	if (size <= flow_size)
> -		flow->verbs.attr->priority = flow->attributes.priority;
> +		flow->queue = (void *)(flow + 1);
> +	LIST_INIT(&flow->verbs);
> +	flow->layers = 0;
> +	flow->modifier = 0;
> +	flow->fate = 0;
> +	for (i = 0; i != buf->entries; ++i) {
> +		size_t off = size;
> +		size_t off2;
> +		int pattern_start_idx = 0;
> +
> +		flow->layers = original_layers;
> +		size += sizeof(struct ibv_flow_attr) +
> +			sizeof(struct mlx5_flow_verbs);
> +		off2 = size;
> +		if (size < flow_size) {
> +			flow->cur_verbs = (void *)((uintptr_t)flow + off);
> +			flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
> +			flow->cur_verbs->specs =
> +				(void *)(flow->cur_verbs->attr + 1);
> +		}
> +		/* First iteration convert the pattern into Verbs. */
> +		if (i == 0) {
> +			/* Actions don't need to be converted several time. */
> +			ret = mlx5_flow_actions(dev, actions, flow,
> +						(size < flow_size) ?
> +						flow_size - size : 0,
> +						error);
> +			if (ret < 0)
> +				return ret;
> +			size += ret;
> +		} else {
> +			/*
> +			 * Next iteration means the pattern has already been
> +			 * converted and an expansion is necessary to match
> +			 * the user RSS request.  For that only the expanded
> +			 * items will be converted, the common part with the
> +			 * user pattern are just copied into the next buffer
> +			 * zone.
> +			 */
> +			const struct rte_flow_item *item = pattern;
> +
> +			size += original_verbs_size;
> +			if (size < flow_size) {
> +				rte_memcpy(flow->cur_verbs->attr,
> +					   original_verbs->attr,
> +					   original_verbs_size +
> +					   sizeof(struct ibv_flow_attr));
> +				flow->cur_verbs->size = original_verbs_size;
> +			}
> +			if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
> +				pattern_start_idx++;
> +			} else {
> +				for (item = pattern;
> +				     item->type != RTE_FLOW_ITEM_TYPE_END;
> +				     ++item)
> +					pattern_start_idx++;
> +			}

Small nit.
Can't this be run once when? As pattern isn't changing, it might be run redundant
from i > 1. And how about expanded_pattern_idx instead of pattern_start_idx?

Acked-by: Yongseok Koh <yskoh at mellanox.com>

Thanks

> +		}
> +		ret = mlx5_flow_items
> +			((const struct rte_flow_item *)
> +			 &buf->entry[i].pattern[pattern_start_idx],
> +			 flow,
> +			 (size < flow_size) ? flow_size - size : 0, error);
> +		if (ret < 0)
> +			return ret;
> +		size += ret;
> +		if (size <= flow_size) {
> +			mlx5_flow_adjust_priority(dev, flow);
> +			LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
> +		}
> +		/*
> +		 * Keep a pointer of the first verbs conversion and the layers
> +		 * it has encountered.
> +		 */
> +		if (i == 0) {
> +			original_verbs = flow->cur_verbs;
> +			original_verbs_size = size - off2;
> +			original_layers = flow->layers;
> +		}
> +	}
> +	/* Restore the origin layers in the flow. */
> +	flow->layers = original_layers;
>  	return size;
>  }
>  
> @@ -1359,12 +1716,17 @@ mlx5_flow_rxq_mark_set(struct rte_eth_dev *dev, struct rte_flow *flow)
>  	struct priv *priv = dev->data->dev_private;
>  
>  	if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)) {
> -		struct mlx5_rxq_ctrl *rxq_ctrl =
> -			container_of((*priv->rxqs)[flow->queue],
> -				     struct mlx5_rxq_ctrl, rxq);
> +		unsigned int i;
> +
> +		for (i = 0; i != flow->rss.queue_num; ++i) {
> +			int idx = (*flow->queue)[i];
> +			struct mlx5_rxq_ctrl *rxq_ctrl =
> +				container_of((*priv->rxqs)[idx],
> +					     struct mlx5_rxq_ctrl, rxq);
>  
> -		rxq_ctrl->rxq.mark = 1;
> -		rxq_ctrl->flow_mark_n++;
> +			rxq_ctrl->rxq.mark = 1;
> +			rxq_ctrl->flow_mark_n++;
> +		}
>  	}
>  }
>  
> @@ -1383,12 +1745,17 @@ mlx5_flow_rxq_mark_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
>  	struct priv *priv = dev->data->dev_private;
>  
>  	if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK)) {
> -		struct mlx5_rxq_ctrl *rxq_ctrl =
> -			container_of((*priv->rxqs)[flow->queue],
> -				     struct mlx5_rxq_ctrl, rxq);
> +		unsigned int i;
> +
> +		for (i = 0; i != flow->rss.queue_num; ++i) {
> +			int idx = (*flow->queue)[i];
> +			struct mlx5_rxq_ctrl *rxq_ctrl =
> +				container_of((*priv->rxqs)[idx],
> +					     struct mlx5_rxq_ctrl, rxq);
>  
> -		rxq_ctrl->flow_mark_n--;
> -		rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
> +			rxq_ctrl->flow_mark_n--;
> +			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
> +		}
>  	}
>  }
>  
> @@ -1449,18 +1816,20 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
>  static void
>  mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
>  {
> -	if (flow->fate & MLX5_FLOW_FATE_DROP) {
> -		if (flow->verbs.flow) {
> -			claim_zero(mlx5_glue->destroy_flow(flow->verbs.flow));
> -			flow->verbs.flow = NULL;
> +	struct mlx5_flow_verbs *verbs;
> +
> +	LIST_FOREACH(verbs, &flow->verbs, next) {
> +		if (verbs->flow) {
> +			claim_zero(mlx5_glue->destroy_flow(verbs->flow));
> +			verbs->flow = NULL;
> +		}
> +		if (verbs->hrxq) {
> +			if (flow->fate & MLX5_FLOW_FATE_DROP)
> +				mlx5_hrxq_drop_release(dev);
> +			else
> +				mlx5_hrxq_release(dev, verbs->hrxq);
> +			verbs->hrxq = NULL;
>  		}
> -	}
> -	if (flow->verbs.hrxq) {
> -		if (flow->fate & MLX5_FLOW_FATE_DROP)
> -			mlx5_hrxq_drop_release(dev);
> -		else if (flow->fate & MLX5_FLOW_FATE_QUEUE)
> -			mlx5_hrxq_release(dev, flow->verbs.hrxq);
> -		flow->verbs.hrxq = NULL;
>  	}
>  }
>  
> @@ -1481,46 +1850,68 @@ static int
>  mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
>  		struct rte_flow_error *error)
>  {
> -	if (flow->fate & MLX5_FLOW_FATE_DROP) {
> -		flow->verbs.hrxq = mlx5_hrxq_drop_new(dev);
> -		if (!flow->verbs.hrxq)
> -			return rte_flow_error_set
> -				(error, errno,
> -				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> -				 NULL,
> -				 "cannot allocate Drop queue");
> -	} else if (flow->fate & MLX5_FLOW_FATE_QUEUE) {
> -		struct mlx5_hrxq *hrxq;
> -
> -		hrxq = mlx5_hrxq_get(dev, rss_hash_default_key,
> -				     MLX5_RSS_HASH_KEY_LEN, 0,
> -				     &flow->queue, 1, 0, 0);
> -		if (!hrxq)
> -			hrxq = mlx5_hrxq_new(dev, rss_hash_default_key,
> -					     MLX5_RSS_HASH_KEY_LEN, 0,
> -					     &flow->queue, 1, 0, 0);
> -		if (!hrxq)
> -			return rte_flow_error_set(error, rte_errno,
> -					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> -					NULL,
> -					"cannot create flow");
> -		flow->verbs.hrxq = hrxq;
> -	}
> -	flow->verbs.flow =
> -		mlx5_glue->create_flow(flow->verbs.hrxq->qp, flow->verbs.attr);
> -	if (!flow->verbs.flow) {
> -		if (flow->fate & MLX5_FLOW_FATE_DROP)
> -			mlx5_hrxq_drop_release(dev);
> -		else
> -			mlx5_hrxq_release(dev, flow->verbs.hrxq);
> -		flow->verbs.hrxq = NULL;
> -		return rte_flow_error_set(error, errno,
> -					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> -					  NULL,
> -					  "kernel module refuses to create"
> -					  " flow");
> +	struct mlx5_flow_verbs *verbs;
> +	int err;
> +
> +	LIST_FOREACH(verbs, &flow->verbs, next) {
> +		if (flow->fate & MLX5_FLOW_FATE_DROP) {
> +			verbs->hrxq = mlx5_hrxq_drop_new(dev);
> +			if (!verbs->hrxq) {
> +				rte_flow_error_set
> +					(error, errno,
> +					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +					 NULL,
> +					 "cannot get drop hash queue");
> +				goto error;
> +			}
> +		} else {
> +			struct mlx5_hrxq *hrxq;
> +
> +			hrxq = mlx5_hrxq_get(dev, flow->key,
> +					     MLX5_RSS_HASH_KEY_LEN,
> +					     verbs->hash_fields,
> +					     (*flow->queue),
> +					     flow->rss.queue_num, 0, 0);
> +			if (!hrxq)
> +				hrxq = mlx5_hrxq_new(dev, flow->key,
> +						     MLX5_RSS_HASH_KEY_LEN,
> +						     verbs->hash_fields,
> +						     (*flow->queue),
> +						     flow->rss.queue_num, 0, 0);
> +			if (!hrxq) {
> +				rte_flow_error_set
> +					(error, rte_errno,
> +					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +					 NULL,
> +					 "cannot get hash queue");
> +				goto error;
> +			}
> +			verbs->hrxq = hrxq;
> +		}
> +		verbs->flow =
> +			mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
> +		if (!verbs->flow) {
> +			rte_flow_error_set(error, errno,
> +					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> +					   NULL,
> +					   "hardware refuses to create flow");
> +			goto error;
> +		}
>  	}
>  	return 0;
> +error:
> +	err = rte_errno; /* Save rte_errno before cleanup. */
> +	LIST_FOREACH(verbs, &flow->verbs, next) {
> +		if (verbs->hrxq) {
> +			if (flow->fate & MLX5_FLOW_FATE_DROP)
> +				mlx5_hrxq_drop_release(dev);
> +			else
> +				mlx5_hrxq_release(dev, verbs->hrxq);
> +			verbs->hrxq = NULL;
> +		}
> +	}
> +	rte_errno = err; /* Restore rte_errno. */
> +	return -rte_errno;
>  }
>  
>  /**
> @@ -1550,42 +1941,43 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
>  		      const struct rte_flow_action actions[],
>  		      struct rte_flow_error *error)
>  {
> -	struct rte_flow *flow;
> -	size_t size;
> +	struct rte_flow *flow = NULL;
> +	size_t size = 0;
>  	int ret;
>  
> -	ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
> +	ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
>  	if (ret < 0)
>  		return NULL;
>  	size = ret;
> -	flow = rte_zmalloc(__func__, size, 0);
> +	flow = rte_calloc(__func__, 1, size, 0);
>  	if (!flow) {
>  		rte_flow_error_set(error, ENOMEM,
>  				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
>  				   NULL,
> -				   "cannot allocate memory");
> +				   "not enough memory to create flow");
>  		return NULL;
>  	}
> -	flow->verbs.attr = (struct ibv_flow_attr *)(flow + 1);
> -	flow->verbs.specs = (uint8_t *)(flow->verbs.attr + 1);
>  	ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
> -	if (ret < 0)
> -		goto error;
> +	if (ret < 0) {
> +		rte_free(flow);
> +		return NULL;
> +	}
>  	assert((size_t)ret == size);
>  	if (dev->data->dev_started) {
>  		ret = mlx5_flow_apply(dev, flow, error);
> -		if (ret < 0)
> -			goto error;
> +		if (ret < 0) {
> +			ret = rte_errno; /* Save rte_errno before cleanup. */
> +			if (flow) {
> +				mlx5_flow_remove(dev, flow);
> +				rte_free(flow);
> +			}
> +			rte_errno = ret; /* Restore rte_errno. */
> +			return NULL;
> +		}
>  	}
>  	mlx5_flow_rxq_mark_set(dev, flow);
>  	TAILQ_INSERT_TAIL(list, flow, next);
>  	return flow;
> -error:
> -	ret = rte_errno; /* Save rte_errno before cleanup. */
> -	mlx5_flow_remove(dev, flow);
> -	rte_free(flow);
> -	rte_errno = ret; /* Restore rte_errno. */
> -	return NULL;
>  }
>  
>  /**
> @@ -1745,7 +2137,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
>  	struct priv *priv = dev->data->dev_private;
>  	const struct rte_flow_attr attr = {
>  		.ingress = 1,
> -		.priority = priv->config.flow_prio - 1,
> +		.priority = MLX5_FLOW_PRIO_RSVD,
>  	};
>  	struct rte_flow_item items[] = {
>  		{
> -- 
> 2.18.0
> 


More information about the dev mailing list