[dpdk-dev] [PATCH v2] net/mlx5: make flow operation thread safe

Suanming Mou suanmingm at nvidia.com
Thu Nov 19 02:01:30 CET 2020


Hi,

(Seems I replied to the wrong version yesterday.)
Since the issue happens only in the flow flush when the port status change.
The root cause is the LSC callback should not be called before the port start is done.

BR,
SuanmingMou

> -----Original Message-----
> From: dev <dev-bounces at dpdk.org> On Behalf Of Weifeng Li
> Sent: Sunday, November 8, 2020 3:54 PM
> To: Matan Azrad <matan at nvidia.com>
> Cc: dev at dpdk.org; Weifeng Li <liweifeng96 at 126.com>
> Subject: [dpdk-dev] [PATCH v2] net/mlx5: make flow operation thread safe
> 
> Does it need a lock for flow about below scene.
> Thread1: flow_list_destroy----flow_list_create
> Thread2: ---------flow_list_destroy----
> Maybe the same flow can be operate at the same time.
> 
> When i start mlx5 bond and trigger LSC at the same time.
> It is possible to assert in mlx5_rx_queue_release func and print "port 4 Rx queue
> 0 is still used by a flow and cannot be removed". I use dpdk-testpmd to simulate
> the test.
> 
> Signed-off-by: Weifeng Li <liweifeng96 at 126.com>
> ---
> v2: adjust coding style issue.
> ---
>  drivers/net/mlx5/linux/mlx5_os.c |  1 +
>  drivers/net/mlx5/mlx5.h          |  1 +
>  drivers/net/mlx5/mlx5_flow.c     | 13 +++++++++++--
>  3 files changed, 13 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
> index c78d56f..59c074e 100644
> --- a/drivers/net/mlx5/linux/mlx5_os.c
> +++ b/drivers/net/mlx5/linux/mlx5_os.c
> @@ -1426,6 +1426,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  				      MLX5_MAX_MAC_ADDRESSES);
>  	priv->flows = 0;
>  	priv->ctrl_flows = 0;
> +	rte_spinlock_init(&priv->flow_lock);
>  	rte_spinlock_init(&priv->flow_list_lock);
>  	TAILQ_INIT(&priv->flow_meters);
>  	TAILQ_INIT(&priv->flow_meter_profiles);
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index
> b43a8c9..860bf2f 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -963,6 +963,7 @@ struct mlx5_priv {
>  	unsigned int reta_idx_n; /* RETA index size. */
>  	struct mlx5_drop drop_queue; /* Flow drop queues. */
>  	uint32_t flows; /* RTE Flow rules. */
> +	rte_spinlock_t flow_lock;
>  	uint32_t ctrl_flows; /* Control flow rules. */
>  	rte_spinlock_t flow_list_lock;
>  	struct mlx5_obj_ops obj_ops; /* HW objects operations. */ diff --git
> a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index
> e9c0ddd..69d8159 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -5577,6 +5577,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t
> *list,
>  				external, hairpin_flow, error);
>  	if (ret < 0)
>  		goto error_before_hairpin_split;
> +	rte_spinlock_lock(&priv->flow_lock);
>  	flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
> &idx);
>  	if (!flow) {
>  		rte_errno = ENOMEM;
> @@ -5598,8 +5599,10 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t
> *list,
>  	memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
>  	rss = flow_get_rss_action(p_actions_rx);
>  	if (rss) {
> -		if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
> +		if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
> {
> +			rte_spinlock_unlock(&priv->flow_lock);
>  			return 0;
> +		}
>  		/*
>  		 * The following information is required by
>  		 * mlx5_flow_hashfields_adjust() in advance.
> @@ -5723,6 +5726,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t
> *list,
>  		__atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
>  		mlx5_free(default_miss_ctx.queue);
>  	}
> +	rte_spinlock_unlock(&priv->flow_lock);
>  	return idx;
>  error:
>  	MLX5_ASSERT(flow);
> @@ -5738,6 +5742,7 @@ flow_list_create(struct rte_eth_dev *dev, uint32_t
> *list,
>  		wks->flow_nested_idx = 0;
>  error_before_hairpin_split:
>  	rte_free(translated_actions);
> +	rte_spinlock_unlock(&priv->flow_lock);
>  	return 0;
>  }
> 
> @@ -5877,11 +5882,14 @@ flow_list_destroy(struct rte_eth_dev *dev,
> uint32_t *list,
>  		  uint32_t flow_idx)
>  {
>  	struct mlx5_priv *priv = dev->data->dev_private;
> +	rte_spinlock_lock(&priv->flow_lock);
>  	struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
>  					       [MLX5_IPOOL_RTE_FLOW],
> flow_idx);
> 
> -	if (!flow)
> +	if (!flow) {
> +		rte_spinlock_unlock(&priv->flow_lock);
>  		return;
> +	}
>  	/*
>  	 * Update RX queue flags only if port is started, otherwise it is
>  	 * already clean.
> @@ -5908,6 +5916,7 @@ flow_list_destroy(struct rte_eth_dev *dev, uint32_t
> *list,
>  	}
>  	flow_mreg_del_copy_action(dev, flow);
>  	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
> +	rte_spinlock_unlock(&priv->flow_lock);
>  }
> 
>  /**
> --
> 2.9.5



More information about the dev mailing list