[dpdk-dev] [PATCH 1/4] net/mlx5: fix offloaded tunnel allocation

Gregory Etelson getelson at nvidia.com
Wed Nov 11 08:14:14 CET 2020


The original patch allocated tunnel offload objects with invalid
indexes. As the result, PMD tunnel object allocation failed.

In this patch indexed pool provides both an index and memory for a new
tunnel offload object.
Also tunnel offload ipool moved to dv enabled code only.

Fixes: f2e8093 ("net/mlx5: use indexed pool as id generator")

Signed-off-by: Gregory Etelson <getelson at nvidia.com>
---
 drivers/net/mlx5/mlx5.c      | 50 ++++++++++++++++++------------------
 drivers/net/mlx5/mlx5.h      |  4 +--
 drivers/net/mlx5/mlx5_flow.c | 41 ++++++++++-------------------
 3 files changed, 40 insertions(+), 55 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 43344391df..e1faa819a3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -186,7 +186,7 @@ static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	{
+	[MLX5_IPOOL_DECAP_ENCAP] = {
 		.size = sizeof(struct mlx5_flow_dv_encap_decap_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -197,7 +197,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_encap_decap_ipool",
 	},
-	{
+	[MLX5_IPOOL_PUSH_VLAN] = {
 		.size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -208,7 +208,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_push_vlan_ipool",
 	},
-	{
+	[MLX5_IPOOL_TAG] = {
 		.size = sizeof(struct mlx5_flow_dv_tag_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -219,7 +219,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_tag_ipool",
 	},
-	{
+	[MLX5_IPOOL_PORT_ID] = {
 		.size = sizeof(struct mlx5_flow_dv_port_id_action_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -230,7 +230,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_port_id_ipool",
 	},
-	{
+	[MLX5_IPOOL_JUMP] = {
 		.size = sizeof(struct mlx5_flow_tbl_data_entry),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -241,7 +241,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_jump_ipool",
 	},
-	{
+	[MLX5_IPOOL_SAMPLE] = {
 		.size = sizeof(struct mlx5_flow_dv_sample_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -252,7 +252,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_sample_ipool",
 	},
-	{
+	[MLX5_IPOOL_DEST_ARRAY] = {
 		.size = sizeof(struct mlx5_flow_dv_dest_array_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -263,8 +263,19 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_dest_array_ipool",
 	},
+	[MLX5_IPOOL_TUNNEL_OFFLOAD] = {
+		.size = sizeof(struct mlx5_flow_tunnel),
+		.need_lock = 1,
+		.release_mem_en = 1,
+		.type = "mlx5_tunnel_offload",
+	},
+	[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID] = {
+		.size = 0,
+		.need_lock = 1,
+		.type = "mlx5_flow_tnl_tbl_ipool",
+	},
 #endif
-	{
+	[MLX5_IPOOL_MTR] = {
 		.size = sizeof(struct mlx5_flow_meter),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -275,7 +286,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_meter_ipool",
 	},
-	{
+	[MLX5_IPOOL_MCP] = {
 		.size = sizeof(struct mlx5_flow_mreg_copy_resource),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -286,7 +297,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_mcp_ipool",
 	},
-	{
+	[MLX5_IPOOL_HRXQ] = {
 		.size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -297,7 +308,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_hrxq_ipool",
 	},
-	{
+	[MLX5_IPOOL_MLX5_FLOW] = {
 		/*
 		 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows.
 		 * It set in run time according to PCI function configuration.
@@ -312,7 +323,7 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_flow_handle_ipool",
 	},
-	{
+	[MLX5_IPOOL_RTE_FLOW] = {
 		.size = sizeof(struct rte_flow),
 		.trunk_size = 4096,
 		.need_lock = 1,
@@ -321,22 +332,12 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "rte_flow_ipool",
 	},
-	{
+	[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = {
 		.size = 0,
 		.need_lock = 1,
 		.type = "mlx5_flow_rss_id_ipool",
 	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_flow_ipool",
-	},
-	{
-		.size = 0,
-		.need_lock = 1,
-		.type = "mlx5_flow_tnl_tbl_ipool",
-	},
-	{
+	[MLX5_IPOOL_RSS_SHARED_ACTIONS] = {
 		.size = sizeof(struct mlx5_shared_action_rss),
 		.trunk_size = 64,
 		.grow_trunk = 3,
@@ -347,7 +348,6 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = {
 		.free = mlx5_free,
 		.type = "mlx5_shared_action_rss",
 	},
-
 };
 
 
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 7ee63a7a14..af097d6a7e 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -44,6 +44,8 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_JUMP, /* Pool for jump resource. */
 	MLX5_IPOOL_SAMPLE, /* Pool for sample resource. */
 	MLX5_IPOOL_DEST_ARRAY, /* Pool for destination array resource. */
+	MLX5_IPOOL_TUNNEL_OFFLOAD, /* Pool for tunnel offload context */
+	MLX5_IPOOL_TUNNEL_FLOW_TBL_ID, /* Pool for tunnel table ID. */
 #endif
 	MLX5_IPOOL_MTR, /* Pool for meter resource. */
 	MLX5_IPOOL_MCP, /* Pool for metadata resource. */
@@ -51,8 +53,6 @@ enum mlx5_ipool_index {
 	MLX5_IPOOL_MLX5_FLOW, /* Pool for mlx5 flow handle. */
 	MLX5_IPOOL_RTE_FLOW, /* Pool for rte_flow. */
 	MLX5_IPOOL_RSS_EXPANTION_FLOW_ID, /* Pool for Queue/RSS flow ID. */
-	MLX5_IPOOL_TUNNEL_ID, /* Pool for flow tunnel ID. */
-	MLX5_IPOOL_TNL_TBL_ID, /* Pool for tunnel table ID. */
 	MLX5_IPOOL_RSS_SHARED_ACTIONS, /* Pool for RSS shared actions. */
 	MLX5_IPOOL_MAX,
 };
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 92adfcacca..31c9d82b4a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6934,7 +6934,7 @@ mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
 	struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
 
-	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
 			tunnel_flow_tbl_to_id(tte->flow_table));
 	mlx5_free(tte);
 }
@@ -6952,12 +6952,12 @@ mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
 			  SOCKET_ID_ANY);
 	if (!tte)
 		goto err;
-	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+	mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
 			  &tte->flow_table);
 	if (tte->flow_table >= MLX5_MAX_TABLES) {
 		DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
 			tte->flow_table);
-		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
+		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TUNNEL_FLOW_TBL_ID],
 				tte->flow_table);
 		goto err;
 	} else if (!tte->flow_table) {
@@ -7465,14 +7465,13 @@ mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
 		      struct mlx5_flow_tunnel *tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 
 	DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
 		dev->data->port_id, tunnel->tunnel_id);
-	RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
-			tunnel->tunnel_id);
 	mlx5_hlist_destroy(tunnel->groups);
-	mlx5_free(tunnel);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];
+	mlx5_ipool_free(ipool, tunnel->tunnel_id);
 }
 
 static struct mlx5_flow_tunnel *
@@ -7494,39 +7493,25 @@ mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
 			  const struct rte_flow_tunnel *app_tunnel)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_indexed_pool *ipool;
 	struct mlx5_flow_tunnel *tunnel;
 	uint32_t id;
 
-	mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
-			  &id);
+	ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_OFFLOAD];
+	tunnel = mlx5_ipool_zmalloc(ipool, &id);
+	if (!tunnel)
+		return NULL;
 	if (id >= MLX5_MAX_TUNNELS) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
+		mlx5_ipool_free(ipool, id);
 		DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
 		return NULL;
-	} else if (!id) {
-		return NULL;
-	}
-	/**
-	 * mlx5 flow tunnel is an auxlilary data structure
-	 * It's not part of IO. No need to allocate it from
-	 * huge pages pools dedicated for IO
-	 */
-	tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
-			     0, SOCKET_ID_ANY);
-	if (!tunnel) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		return NULL;
 	}
 	tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
 					   mlx5_flow_tunnel_grp2tbl_create_cb,
 					   NULL,
 					   mlx5_flow_tunnel_grp2tbl_remove_cb);
 	if (!tunnel->groups) {
-		mlx5_ipool_free(priv->sh->ipool
-				[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
-		mlx5_free(tunnel);
+		mlx5_ipool_free(ipool, id);
 		return NULL;
 	}
 	tunnel->groups->ctx = priv->sh;
-- 
2.29.2



More information about the dev mailing list