patch 'net/hns3: fix inconsistent lock' has been queued to stable release 24.11.4
Kevin Traynor
ktraynor at redhat.com
Fri Oct 31 15:33:34 CET 2025
Hi,
FYI, your patch has been queued to stable release 24.11.4
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/05/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable
This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/bdc2be7156ac4f9a5b07c58d7c138166f28e4395
Thanks.
Kevin
---
>From bdc2be7156ac4f9a5b07c58d7c138166f28e4395 Mon Sep 17 00:00:00 2001
From: Dengdui Huang <huangdengdui at huawei.com>
Date: Wed, 13 Aug 2025 15:33:15 +0800
Subject: [PATCH] net/hns3: fix inconsistent lock
[ upstream commit d441169bd20415691ea86707e7bf852eb6fcda46 ]
The hns3 driver supports configuring RSS through both ops API and
rte_flow API. The ops API uses spin lock, while the rte_flow API uses
pthread mutex. When concurrent calls occur, issues may arise.
This patch replaces the lock in the flow API with spin lock.
With the pthread mutex no longer needed, the pthread attributes
can also be removed.
Fixes: 1bdcca8006e4 ("net/hns3: fix flow director lock")
Signed-off-by: Dengdui Huang <huangdengdui at huawei.com>
---
drivers/net/hns3/hns3_ethdev.h | 2 --
drivers/net/hns3/hns3_fdir.c | 13 --------
drivers/net/hns3/hns3_flow.c | 60 +++++++++++++---------------------
3 files changed, 22 insertions(+), 53 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 01d473fd2e..9b8566139b 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -6,5 +6,4 @@
#define HNS3_ETHDEV_H
-#include <pthread.h>
#include <ethdev_driver.h>
#include <rte_byteorder.h>
@@ -681,5 +680,4 @@ struct hns3_hw {
struct hns3_port_base_vlan_config port_base_vlan_cfg;
- pthread_mutex_t flows_lock; /* rte_flow ops lock */
struct hns3_fdir_rule_list flow_fdir_list; /* flow fdir rule list */
struct hns3_rss_filter_list flow_rss_list; /* flow RSS rule list */
diff --git a/drivers/net/hns3/hns3_fdir.c b/drivers/net/hns3/hns3_fdir.c
index aacad40e61..50572ae430 100644
--- a/drivers/net/hns3/hns3_fdir.c
+++ b/drivers/net/hns3/hns3_fdir.c
@@ -1146,15 +1146,4 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
return 0;
- /*
- * This API is called in the reset recovery process, the parent function
- * must hold hw->lock.
- * There maybe deadlock if acquire hw->flows_lock directly because rte
- * flow driver ops first acquire hw->flows_lock and then may acquire
- * hw->lock.
- * So here first release the hw->lock and then acquire the
- * hw->flows_lock to avoid deadlock.
- */
- rte_spinlock_unlock(&hw->lock);
- pthread_mutex_lock(&hw->flows_lock);
TAILQ_FOREACH(fdir_filter, &fdir_info->fdir_list, entries) {
ret = hns3_config_action(hw, &fdir_filter->fdir_conf);
@@ -1167,6 +1156,4 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
}
}
- pthread_mutex_unlock(&hw->flows_lock);
- rte_spinlock_lock(&hw->lock);
if (err) {
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index c0238d2bfa..f2d1e4ec3a 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -2211,16 +2211,4 @@ hns3_reconfig_all_rss_filter(struct hns3_hw *hw)
}
-static int
-hns3_restore_rss_filter(struct hns3_hw *hw)
-{
- int ret;
-
- pthread_mutex_lock(&hw->flows_lock);
- ret = hns3_reconfig_all_rss_filter(hw);
- pthread_mutex_unlock(&hw->flows_lock);
-
- return ret;
-}
-
int
hns3_restore_filter(struct hns3_adapter *hns)
@@ -2233,5 +2221,5 @@ hns3_restore_filter(struct hns3_adapter *hns)
return ret;
- return hns3_restore_rss_filter(hw);
+ return hns3_reconfig_all_rss_filter(hw);
}
@@ -2625,8 +2613,8 @@ hns3_flow_validate_wrap(struct rte_eth_dev *dev,
int ret;
- pthread_mutex_lock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
ret = hns3_flow_validate(dev, attr, pattern, actions, error,
&filter_info);
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return ret;
@@ -2642,7 +2630,7 @@ hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
struct rte_flow *flow;
- pthread_mutex_lock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
flow = hns3_flow_create(dev, attr, pattern, actions, error);
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return flow;
@@ -2656,7 +2644,7 @@ hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
int ret;
- pthread_mutex_lock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
ret = hns3_flow_destroy(dev, flow, error);
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return ret;
@@ -2669,7 +2657,7 @@ hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
int ret;
- pthread_mutex_lock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
ret = hns3_flow_flush(dev, error);
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return ret;
@@ -2684,7 +2672,7 @@ hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
int ret;
- pthread_mutex_lock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
ret = hns3_flow_query(dev, flow, actions, data, error);
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return ret;
@@ -2734,5 +2722,5 @@ hns3_flow_action_create(struct rte_eth_dev *dev,
return NULL;
- pthread_mutex_lock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
act_count = (const struct rte_flow_action_count *)action->conf;
@@ -2759,9 +2747,9 @@ hns3_flow_action_create(struct rte_eth_dev *dev,
handle.counter_id = counter->id;
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return (struct rte_flow_action_handle *)handle.val64;
err_exit:
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return NULL;
}
@@ -2776,9 +2764,9 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
struct hns3_flow_counter *counter;
- pthread_mutex_lock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
indir.val64 = (uint64_t)handle;
if (indir.indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -2788,5 +2776,5 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
counter = hns3_counter_lookup(dev, indir.counter_id);
if (counter == NULL) {
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -2795,5 +2783,5 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
if (counter->ref_cnt > 1) {
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -2803,5 +2791,5 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
(void)hns3_counter_release(dev, indir.counter_id);
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return 0;
}
@@ -2818,9 +2806,9 @@ hns3_flow_action_query(struct rte_eth_dev *dev,
int ret;
- pthread_mutex_lock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
indir.val64 = (uint64_t)handle;
if (indir.indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
@@ -2832,5 +2820,5 @@ hns3_flow_action_query(struct rte_eth_dev *dev,
ret = hns3_counter_query(dev, &flow,
(struct rte_flow_query_count *)data, error);
- pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_unlock(&hw->lock);
return ret;
}
@@ -2866,12 +2854,8 @@ hns3_flow_init(struct rte_eth_dev *dev)
{
struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pthread_mutexattr_t attr;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return;
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
- pthread_mutex_init(&hw->flows_lock, &attr);
dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
--
2.51.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2025-10-31 13:53:55.051343982 +0000
+++ 0092-net-hns3-fix-inconsistent-lock.patch 2025-10-31 13:53:52.266524086 +0000
@@ -1 +1 @@
-From d441169bd20415691ea86707e7bf852eb6fcda46 Mon Sep 17 00:00:00 2001
+From bdc2be7156ac4f9a5b07c58d7c138166f28e4395 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit d441169bd20415691ea86707e7bf852eb6fcda46 ]
+
@@ -15 +16,0 @@
-Cc: stable at dpdk.org
@@ -25 +26 @@
-index d602bfa02f..f6bb1b5d43 100644
+index 01d473fd2e..9b8566139b 100644
@@ -34 +35 @@
-@@ -680,5 +679,4 @@ struct hns3_hw {
+@@ -681,5 +680,4 @@ struct hns3_hw {
More information about the stable
mailing list