[dpdk-dev] [PATCH v5 32/34] net/mlx5: make tunnel hub list thread safe

Suanming Mou suanmingm at nvidia.com
Wed Oct 28 10:33:51 CET 2020


This commit uses spinlock to protect the tunnel hub list in multiple
thread.

Signed-off-by: Suanming Mou <suanmingm at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c | 20 +++++++++++++++++---
 drivers/net/mlx5/mlx5_flow.h |  1 +
 2 files changed, 18 insertions(+), 3 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 1e82030..a6e60af 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -669,10 +669,14 @@ enum mlx5_expansion {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
 
+	rte_spinlock_lock(&thub->sl);
 	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->item == pmd_items)
+		if (&tun->item == pmd_items) {
+			LIST_REMOVE(tun, chain);
 			break;
+		}
 	}
+	rte_spinlock_unlock(&thub->sl);
 	if (!tun || num_items != 1)
 		return rte_flow_error_set(err, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -690,10 +694,14 @@ enum mlx5_expansion {
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
 
+	rte_spinlock_lock(&thub->sl);
 	LIST_FOREACH(tun, &thub->tunnels, chain) {
-		if (&tun->action == pmd_actions)
+		if (&tun->action == pmd_actions) {
+			LIST_REMOVE(tun, chain);
 			break;
+		}
 	}
+	rte_spinlock_unlock(&thub->sl);
 	if (!tun || num_actions != 1)
 		return rte_flow_error_set(err, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -5880,8 +5888,12 @@ struct rte_flow *
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
 	if (flow->tunnel) {
 		struct mlx5_flow_tunnel *tunnel;
+
+		rte_spinlock_lock(&mlx5_tunnel_hub(dev)->sl);
 		tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
 		RTE_VERIFY(tunnel);
+		LIST_REMOVE(tunnel, chain);
+		rte_spinlock_unlock(&mlx5_tunnel_hub(dev)->sl);
 		if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
 			mlx5_flow_tunnel_free(dev, tunnel);
 	}
@@ -7940,7 +7952,6 @@ struct mlx5_meter_domains_infos *
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
 	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	LIST_REMOVE(tunnel, chain);
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
 			tunnel->tunnel_id);
 	mlx5_hlist_destroy(tunnel->groups);
@@ -8029,6 +8040,7 @@ struct mlx5_meter_domains_infos *
 	struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
 	struct mlx5_flow_tunnel *tun;
 
+	rte_spinlock_lock(&thub->sl);
 	LIST_FOREACH(tun, &thub->tunnels, chain) {
 		if (!memcmp(app_tunnel, &tun->app_tunnel,
 			    sizeof(*app_tunnel))) {
@@ -8046,6 +8058,7 @@ struct mlx5_meter_domains_infos *
 			ret = -ENOMEM;
 		}
 	}
+	rte_spinlock_unlock(&thub->sl);
 	if (tun)
 		__atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
 
@@ -8074,6 +8087,7 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
 	if (!thub)
 		return -ENOMEM;
 	LIST_INIT(&thub->tunnels);
+	rte_spinlock_init(&thub->sl);
 	thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
 					 0, mlx5_flow_tunnel_grp2tbl_create_cb,
 					 NULL,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4a8c2bf..8ef2a85 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -945,6 +945,7 @@ struct mlx5_flow_tunnel {
 /** PMD tunnel related context */
 struct mlx5_flow_tunnel_hub {
 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
+	rte_spinlock_t sl;			/* Tunnel list spinlock. */
 	struct mlx5_hlist *groups;		/** non tunnel groups */
 };
 
-- 
1.8.3.1



More information about the dev mailing list