patch 'net/mlx5: fix crash on flow rule destruction' has been queued to stable release 24.11.4
Kevin Traynor
ktraynor at redhat.com
Fri Nov 21 12:21:19 CET 2025
Hi,
FYI, your patch has been queued to stable release 24.11.4
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/26/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable
This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/f9ba4e61a235f07e6f4ea92dadbb24c0c62213fc
Thanks.
Kevin
---
>From f9ba4e61a235f07e6f4ea92dadbb24c0c62213fc Mon Sep 17 00:00:00 2001
From: Maayan Kashani <mkashani at nvidia.com>
Date: Mon, 17 Nov 2025 09:15:36 +0200
Subject: [PATCH] net/mlx5: fix crash on flow rule destruction
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
[ upstream commit ad7db900a05397773b812e76655918086b07ab36 ]
The mlx5_ipool_free() function was called with a NULL pool pointer
during HW flow destruction, causing a segmentation fault. This occurred
when flow creation failed and the cleanup path attempted to free
resources from an uninitialized flow pool.
The crash happened in the following scenario:
1. During device start, a default NTA copy action flow is created
2. If the flow creation fails, mlx5_flow_hw_list_destroy() is called
3. In hw_cmpl_flow_update_or_destroy(), table->flow pool could be NULL
4. mlx5_ipool_free(table->flow, flow->idx) was called without checking
if table->flow is NULL
5. Inside mlx5_ipool_free(), accessing pool->cfg.per_core_cache caused
a segmentation fault due to NULL pointer dereference
The fix adds two layers of protection,
1. Add NULL check for table->flow before calling mlx5_ipool_free() in
hw_cmpl_flow_update_or_destroy(), consistent with the existing check
for table->resource on the previous line
2. Add NULL check for pool parameter in mlx5_ipool_free() as a defensive
measure to prevent similar crashes in other code paths
The fix also renames the ‘flow’ field in rte_flow_template_table
to ‘flow_pool’ for better code readability.
Stack trace of the fault:
mlx5_ipool_free (pool=0x0) at mlx5_utils.c:753
hw_cmpl_flow_update_or_destroy at mlx5_flow_hw.c:4481
mlx5_flow_hw_destroy at mlx5_flow_hw.c:14219
mlx5_flow_hw_list_destroy at mlx5_flow_hw.c:14279
flow_hw_list_create at mlx5_flow_hw.c:14415
mlx5_flow_start_default at mlx5_flow.c:8263
mlx5_dev_start at mlx5_trigger.c:1420
Fixes: 27d171b88031 ("net/mlx5: abstract flow action and enable reconfigure")
Signed-off-by: Maayan Kashani <mkashani at nvidia.com>
Acked-by: Bing Zhao <bingz at nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 2 +-
drivers/net/mlx5/mlx5_flow_hw.c | 25 +++++++++++++------------
drivers/net/mlx5/mlx5_utils.c | 2 +-
3 files changed, 15 insertions(+), 14 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ee6ad206b8..264d4ad60f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1702,5 +1702,5 @@ struct rte_flow_template_table {
/* Action templates bind to the table. */
struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
- struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
+ struct mlx5_indexed_pool *flow_pool; /* The table's flow ipool. */
struct rte_flow_hw_aux *flow_aux; /**< Auxiliary data stored per flow. */
struct mlx5_indexed_pool *resource; /* The table's resource ipool. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9bd237b82a..a1274c93f8 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -3918,5 +3918,5 @@ flow_hw_async_flow_create_generic(struct rte_eth_dev *dev,
return NULL;
}
- flow = mlx5_ipool_malloc(table->flow, &flow_idx);
+ flow = mlx5_ipool_malloc(table->flow_pool, &flow_idx);
if (!flow) {
rte_errno = ENOMEM;
@@ -4007,5 +4007,5 @@ error:
mlx5_ipool_free(table->resource, res_idx);
if (flow_idx)
- mlx5_ipool_free(table->flow, flow_idx);
+ mlx5_ipool_free(table->flow_pool, flow_idx);
if (sub_error.cause != RTE_FLOW_ERROR_TYPE_NONE && error != NULL)
*error = sub_error;
@@ -4457,5 +4457,6 @@ hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
if (table->resource)
mlx5_ipool_free(table->resource, res_idx);
- mlx5_ipool_free(table->flow, flow->idx);
+ if (table->flow_pool)
+ mlx5_ipool_free(table->flow_pool, flow->idx);
}
}
@@ -4745,5 +4746,5 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,
if (!tbl->cfg.external)
continue;
- MLX5_IPOOL_FOREACH(tbl->flow, fidx, flow) {
+ MLX5_IPOOL_FOREACH(tbl->flow_pool, fidx, flow) {
if (flow_hw_async_flow_destroy(dev,
MLX5_DEFAULT_FLUSH_QUEUE,
@@ -5051,6 +5052,6 @@ flow_hw_table_create(struct rte_eth_dev *dev,
tbl->cfg = *table_cfg;
/* Allocate flow indexed pool. */
- tbl->flow = mlx5_ipool_create(&cfg);
- if (!tbl->flow)
+ tbl->flow_pool = mlx5_ipool_create(&cfg);
+ if (!tbl->flow_pool)
goto error;
/* Allocate table of auxiliary flow rule structs. */
@@ -5200,6 +5201,6 @@ error:
if (tbl->flow_aux)
mlx5_free(tbl->flow_aux);
- if (tbl->flow)
- mlx5_ipool_destroy(tbl->flow);
+ if (tbl->flow_pool)
+ mlx5_ipool_destroy(tbl->flow_pool);
mlx5_free(tbl);
}
@@ -5422,8 +5423,8 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
if (table->resource)
mlx5_ipool_flush_cache(table->resource);
- mlx5_ipool_flush_cache(table->flow);
+ mlx5_ipool_flush_cache(table->flow_pool);
/* Check if ipool has allocated objects. */
if (table->refcnt ||
- mlx5_ipool_get_next(table->flow, &fidx) ||
+ mlx5_ipool_get_next(table->flow_pool, &fidx) ||
(table->resource && mlx5_ipool_get_next(table->resource, &ridx))) {
DRV_LOG(WARNING, "Table %p is still in use.", (void *)table);
@@ -5455,5 +5456,5 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
mlx5_ipool_destroy(table->resource);
mlx5_free(table->flow_aux);
- mlx5_ipool_destroy(table->flow);
+ mlx5_ipool_destroy(table->flow_pool);
mlx5_free(table);
return 0;
@@ -14937,5 +14938,5 @@ flow_hw_table_resize(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
table, "shrinking table is not supported");
- ret = mlx5_ipool_resize(table->flow, nb_flows, error);
+ ret = mlx5_ipool_resize(table->flow_pool, nb_flows, error);
if (ret)
return ret;
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index d882af6047..a6db03fcc4 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -588,5 +588,5 @@ mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
uint32_t entry_idx;
- if (!idx)
+ if (!pool || !idx)
return;
if (pool->cfg.per_core_cache) {
--
2.51.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2025-11-21 11:05:12.537156034 +0000
+++ 0095-net-mlx5-fix-crash-on-flow-rule-destruction.patch 2025-11-21 11:05:09.643201937 +0000
@@ -1 +1 @@
-From ad7db900a05397773b812e76655918086b07ab36 Mon Sep 17 00:00:00 2001
+From f9ba4e61a235f07e6f4ea92dadbb24c0c62213fc Mon Sep 17 00:00:00 2001
@@ -8,0 +9,2 @@
+[ upstream commit ad7db900a05397773b812e76655918086b07ab36 ]
+
@@ -43 +44,0 @@
-Cc: stable at dpdk.org
@@ -54 +55 @@
-index d7c9d4d0ea..218b55d536 100644
+index ee6ad206b8..264d4ad60f 100644
@@ -57 +58 @@
-@@ -1745,5 +1745,5 @@ struct rte_flow_template_table {
+@@ -1702,5 +1702,5 @@ struct rte_flow_template_table {
@@ -65 +66 @@
-index c60f836de4..f8995b53cc 100644
+index 9bd237b82a..a1274c93f8 100644
@@ -68 +69 @@
-@@ -3959,5 +3959,5 @@ flow_hw_async_flow_create_generic(struct rte_eth_dev *dev,
+@@ -3918,5 +3918,5 @@ flow_hw_async_flow_create_generic(struct rte_eth_dev *dev,
@@ -75 +76 @@
-@@ -4049,5 +4049,5 @@ error:
+@@ -4007,5 +4007,5 @@ error:
@@ -82 +83 @@
-@@ -4499,5 +4499,6 @@ hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
+@@ -4457,5 +4457,6 @@ hw_cmpl_flow_update_or_destroy(struct rte_eth_dev *dev,
@@ -90 +91 @@
-@@ -4787,5 +4788,5 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,
+@@ -4745,5 +4746,5 @@ flow_hw_q_flow_flush(struct rte_eth_dev *dev,
@@ -97 +98 @@
-@@ -5109,6 +5110,6 @@ flow_hw_table_create(struct rte_eth_dev *dev,
+@@ -5051,6 +5052,6 @@ flow_hw_table_create(struct rte_eth_dev *dev,
@@ -106 +107 @@
-@@ -5265,6 +5266,6 @@ error:
+@@ -5200,6 +5201,6 @@ error:
@@ -115 +116 @@
-@@ -5496,8 +5497,8 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
+@@ -5422,8 +5423,8 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
@@ -126 +127 @@
-@@ -5529,5 +5530,5 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
+@@ -5455,5 +5456,5 @@ flow_hw_table_destroy(struct rte_eth_dev *dev,
@@ -133 +134 @@
-@@ -15228,5 +15229,5 @@ flow_hw_table_resize(struct rte_eth_dev *dev,
+@@ -14937,5 +14938,5 @@ flow_hw_table_resize(struct rte_eth_dev *dev,
@@ -141 +142 @@
-index cba8cc3f49..defcf80dd7 100644
+index d882af6047..a6db03fcc4 100644
@@ -144 +145 @@
-@@ -749,5 +749,5 @@ mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
+@@ -588,5 +588,5 @@ mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx)
More information about the stable
mailing list