[dpdk-dev] [PATCH v5 06/34] net/mlx5: make rte flow list thread safe

Suanming Mou suanmingm at nvidia.com
Wed Oct 28 09:59:47 CET 2020


From: Xueming Li <xuemingl at nvidia.com>

To support multi-thread flow operations, this patch introduces list lock
for the rte_flow list manages all the rte_flow handlers.

Signed-off-by: Xueming Li <xuemingl at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c |  1 +
 drivers/net/mlx5/mlx5.h          |  1 +
 drivers/net/mlx5/mlx5_flow.c     | 10 ++++++++--
 3 files changed, 10 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 0b59e74..a579dde 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1358,6 +1358,7 @@
 				      MLX5_MAX_MAC_ADDRESSES);
 	priv->flows = 0;
 	priv->ctrl_flows = 0;
+	rte_spinlock_init(&priv->flow_list_lock);
 	TAILQ_INIT(&priv->flow_meters);
 	TAILQ_INIT(&priv->flow_meter_profiles);
 	/* Hint libmlx5 to use PMD allocator for data plane resources */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5bda233..9ab2976 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -856,6 +856,7 @@ struct mlx5_priv {
 	struct mlx5_drop drop_queue; /* Flow drop queues. */
 	uint32_t flows; /* RTE Flow rules. */
 	uint32_t ctrl_flows; /* Control flow rules. */
+	rte_spinlock_t flow_list_lock;
 	struct mlx5_obj_ops obj_ops; /* HW objects operations. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ed2acd1..cc31801 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5774,9 +5774,12 @@ struct tunnel_default_miss_ctx {
 		if (ret < 0)
 			goto error;
 	}
-	if (list)
+	if (list) {
+		rte_spinlock_lock(&priv->flow_list_lock);
 		ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
 			     flow, next);
+		rte_spinlock_unlock(&priv->flow_list_lock);
+	}
 	flow_rxq_flags_set(dev, flow);
 	rte_free(translated_actions);
 	/* Nested flow creation index recovery. */
@@ -5957,9 +5960,12 @@ struct rte_flow *
 	if (dev->data->dev_started)
 		flow_rxq_flags_trim(dev, flow);
 	flow_drv_destroy(dev, flow);
-	if (list)
+	if (list) {
+		rte_spinlock_lock(&priv->flow_list_lock);
 		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
 			     flow_idx, flow, next);
+		rte_spinlock_unlock(&priv->flow_list_lock);
+	}
 	flow_mreg_del_copy_action(dev, flow);
 	if (flow->fdir) {
 		LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
-- 
1.8.3.1



More information about the dev mailing list