patch 'net/hns3: fix inconsistent lock' has been queued to stable release 22.11.11
    luca.boccassi at gmail.com 
    luca.boccassi at gmail.com
       
    Mon Oct 27 17:19:31 CET 2025
    
    
  
Hi,
FYI, your patch has been queued to stable release 22.11.11
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 10/29/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/bluca/dpdk-stable
This queued commit can be viewed at:
https://github.com/bluca/dpdk-stable/commit/a7f48b8165af74c3a7577b200f3576cd82034118
Thanks.
Luca Boccassi
---
>From a7f48b8165af74c3a7577b200f3576cd82034118 Mon Sep 17 00:00:00 2001
From: Dengdui Huang <huangdengdui at huawei.com>
Date: Wed, 13 Aug 2025 15:33:15 +0800
Subject: [PATCH] net/hns3: fix inconsistent lock
[ upstream commit d441169bd20415691ea86707e7bf852eb6fcda46 ]
The hns3 driver supports configuring RSS through both ops API and
rte_flow API. The ops API uses spin lock, while the rte_flow API uses
pthread mutex. When concurrent calls occur, issues may arise.
This patch replaces the lock in the flow API with spin lock.
With the pthread mutex no longer needed, the pthread attributes
can also be removed.
Fixes: 1bdcca8006e4 ("net/hns3: fix flow director lock")
Signed-off-by: Dengdui Huang <huangdengdui at huawei.com>
---
 drivers/net/hns3/hns3_ethdev.h |  2 --
 drivers/net/hns3/hns3_fdir.c   | 13 --------
 drivers/net/hns3/hns3_flow.c   | 60 +++++++++++++---------------------
 3 files changed, 22 insertions(+), 53 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 5445170c8b..9ebf807202 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -5,7 +5,6 @@
 #ifndef HNS3_ETHDEV_H
 #define HNS3_ETHDEV_H
 
-#include <pthread.h>
 #include <ethdev_driver.h>
 #include <rte_byteorder.h>
 #include <rte_io.h>
@@ -655,7 +654,6 @@ struct hns3_hw {
 
 	struct hns3_port_base_vlan_config port_base_vlan_cfg;
 
-	pthread_mutex_t flows_lock; /* rte_flow ops lock */
 	struct hns3_fdir_rule_list flow_fdir_list; /* flow fdir rule list */
 	struct hns3_rss_filter_list flow_rss_list; /* flow RSS rule list */
 	struct hns3_flow_mem_list flow_list;
diff --git a/drivers/net/hns3/hns3_fdir.c b/drivers/net/hns3/hns3_fdir.c
index 73d4a25d63..c53a26f57b 100644
--- a/drivers/net/hns3/hns3_fdir.c
+++ b/drivers/net/hns3/hns3_fdir.c
@@ -1072,17 +1072,6 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
 	if (hns->is_vf)
 		return 0;
 
-	/*
-	 * This API is called in the reset recovery process, the parent function
-	 * must hold hw->lock.
-	 * There maybe deadlock if acquire hw->flows_lock directly because rte
-	 * flow driver ops first acquire hw->flows_lock and then may acquire
-	 * hw->lock.
-	 * So here first release the hw->lock and then acquire the
-	 * hw->flows_lock to avoid deadlock.
-	 */
-	rte_spinlock_unlock(&hw->lock);
-	pthread_mutex_lock(&hw->flows_lock);
 	TAILQ_FOREACH(fdir_filter, &fdir_info->fdir_list, entries) {
 		ret = hns3_config_action(hw, &fdir_filter->fdir_conf);
 		if (!ret)
@@ -1093,8 +1082,6 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
 				break;
 		}
 	}
-	pthread_mutex_unlock(&hw->flows_lock);
-	rte_spinlock_lock(&hw->lock);
 
 	if (err) {
 		hns3_err(hw, "Fail to restore FDIR filter, ret = %d", ret);
diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c
index e1180e4fec..cf71c6766d 100644
--- a/drivers/net/hns3/hns3_flow.c
+++ b/drivers/net/hns3/hns3_flow.c
@@ -2032,18 +2032,6 @@ hns3_reconfig_all_rss_filter(struct hns3_hw *hw)
 	return 0;
 }
 
-static int
-hns3_restore_rss_filter(struct hns3_hw *hw)
-{
-	int ret;
-
-	pthread_mutex_lock(&hw->flows_lock);
-	ret = hns3_reconfig_all_rss_filter(hw);
-	pthread_mutex_unlock(&hw->flows_lock);
-
-	return ret;
-}
-
 int
 hns3_restore_filter(struct hns3_adapter *hns)
 {
@@ -2054,7 +2042,7 @@ hns3_restore_filter(struct hns3_adapter *hns)
 	if (ret != 0)
 		return ret;
 
-	return hns3_restore_rss_filter(hw);
+	return hns3_reconfig_all_rss_filter(hw);
 }
 
 static int
@@ -2446,10 +2434,10 @@ hns3_flow_validate_wrap(struct rte_eth_dev *dev,
 	struct hns3_filter_info filter_info = {0};
 	int ret;
 
-	pthread_mutex_lock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 	ret = hns3_flow_validate(dev, attr, pattern, actions, error,
 				 &filter_info);
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 
 	return ret;
 }
@@ -2463,9 +2451,9 @@ hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct rte_flow *flow;
 
-	pthread_mutex_lock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 	flow = hns3_flow_create(dev, attr, pattern, actions, error);
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 
 	return flow;
 }
@@ -2477,9 +2465,9 @@ hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int ret;
 
-	pthread_mutex_lock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 	ret = hns3_flow_destroy(dev, flow, error);
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 
 	return ret;
 }
@@ -2490,9 +2478,9 @@ hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int ret;
 
-	pthread_mutex_lock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 	ret = hns3_flow_flush(dev, error);
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 
 	return ret;
 }
@@ -2505,9 +2493,9 @@ hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	int ret;
 
-	pthread_mutex_lock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 	ret = hns3_flow_query(dev, flow, actions, data, error);
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 
 	return ret;
 }
@@ -2555,7 +2543,7 @@ hns3_flow_action_create(struct rte_eth_dev *dev,
 	if (hns3_check_indir_action(conf, action, error))
 		return NULL;
 
-	pthread_mutex_lock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 
 	act_count = (const struct rte_flow_action_count *)action->conf;
 	if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
@@ -2580,11 +2568,11 @@ hns3_flow_action_create(struct rte_eth_dev *dev,
 	handle.indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
 	handle.counter_id = counter->id;
 
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 	return (struct rte_flow_action_handle *)handle.val64;
 
 err_exit:
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 	return NULL;
 }
 
@@ -2597,11 +2585,11 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
 	struct rte_flow_action_handle indir;
 	struct hns3_flow_counter *counter;
 
-	pthread_mutex_lock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 
 	indir.val64 = (uint64_t)handle;
 	if (indir.indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
-		pthread_mutex_unlock(&hw->flows_lock);
+		rte_spinlock_unlock(&hw->lock);
 		return rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					handle, "Invalid indirect type");
@@ -2609,14 +2597,14 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
 
 	counter = hns3_counter_lookup(dev, indir.counter_id);
 	if (counter == NULL) {
-		pthread_mutex_unlock(&hw->flows_lock);
+		rte_spinlock_unlock(&hw->lock);
 		return rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 				handle, "Counter id not exist");
 	}
 
 	if (counter->ref_cnt > 1) {
-		pthread_mutex_unlock(&hw->flows_lock);
+		rte_spinlock_unlock(&hw->lock);
 		return rte_flow_error_set(error, EBUSY,
 				RTE_FLOW_ERROR_TYPE_HANDLE,
 				handle, "Counter id in use");
@@ -2624,7 +2612,7 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
 
 	(void)hns3_counter_release(dev, indir.counter_id);
 
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 	return 0;
 }
 
@@ -2639,11 +2627,11 @@ hns3_flow_action_query(struct rte_eth_dev *dev,
 	struct rte_flow flow;
 	int ret;
 
-	pthread_mutex_lock(&hw->flows_lock);
+	rte_spinlock_lock(&hw->lock);
 
 	indir.val64 = (uint64_t)handle;
 	if (indir.indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
-		pthread_mutex_unlock(&hw->flows_lock);
+		rte_spinlock_unlock(&hw->lock);
 		return rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION_CONF,
 					handle, "Invalid indirect type");
@@ -2653,7 +2641,7 @@ hns3_flow_action_query(struct rte_eth_dev *dev,
 	flow.counter_id = indir.counter_id;
 	ret = hns3_counter_query(dev, &flow,
 				 (struct rte_flow_query_count *)data, error);
-	pthread_mutex_unlock(&hw->flows_lock);
+	rte_spinlock_unlock(&hw->lock);
 	return ret;
 }
 
@@ -2687,14 +2675,10 @@ void
 hns3_flow_init(struct rte_eth_dev *dev)
 {
 	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	pthread_mutexattr_t attr;
 
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return;
 
-	pthread_mutexattr_init(&attr);
-	pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
-	pthread_mutex_init(&hw->flows_lock, &attr);
 	dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
 
 	TAILQ_INIT(&hw->flow_fdir_list);
-- 
2.47.3
---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2025-10-27 15:54:36.756185384 +0000
+++ 0053-net-hns3-fix-inconsistent-lock.patch	2025-10-27 15:54:34.827950352 +0000
@@ -1 +1 @@
-From d441169bd20415691ea86707e7bf852eb6fcda46 Mon Sep 17 00:00:00 2001
+From a7f48b8165af74c3a7577b200f3576cd82034118 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit d441169bd20415691ea86707e7bf852eb6fcda46 ]
+
@@ -15 +16,0 @@
-Cc: stable at dpdk.org
@@ -25 +26 @@
-index d602bfa02f..f6bb1b5d43 100644
+index 5445170c8b..9ebf807202 100644
@@ -36 +37 @@
-@@ -679,7 +678,6 @@ struct hns3_hw {
+@@ -655,7 +654,6 @@ struct hns3_hw {
@@ -45 +46 @@
-index aacad40e61..50572ae430 100644
+index 73d4a25d63..c53a26f57b 100644
@@ -48 +49 @@
-@@ -1145,17 +1145,6 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
+@@ -1072,17 +1072,6 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
@@ -66 +67 @@
-@@ -1166,8 +1155,6 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
+@@ -1093,8 +1082,6 @@ int hns3_restore_all_fdir_filter(struct hns3_adapter *hns)
@@ -76 +77 @@
-index c0238d2bfa..f2d1e4ec3a 100644
+index e1180e4fec..cf71c6766d 100644
@@ -79 +80 @@
-@@ -2210,18 +2210,6 @@ hns3_reconfig_all_rss_filter(struct hns3_hw *hw)
+@@ -2032,18 +2032,6 @@ hns3_reconfig_all_rss_filter(struct hns3_hw *hw)
@@ -98 +99 @@
-@@ -2232,7 +2220,7 @@ hns3_restore_filter(struct hns3_adapter *hns)
+@@ -2054,7 +2042,7 @@ hns3_restore_filter(struct hns3_adapter *hns)
@@ -107 +108 @@
-@@ -2624,10 +2612,10 @@ hns3_flow_validate_wrap(struct rte_eth_dev *dev,
+@@ -2446,10 +2434,10 @@ hns3_flow_validate_wrap(struct rte_eth_dev *dev,
@@ -120 +121 @@
-@@ -2641,9 +2629,9 @@ hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+@@ -2463,9 +2451,9 @@ hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
@@ -132 +133 @@
-@@ -2655,9 +2643,9 @@ hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+@@ -2477,9 +2465,9 @@ hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
@@ -144 +145 @@
-@@ -2668,9 +2656,9 @@ hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
+@@ -2490,9 +2478,9 @@ hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
@@ -156 +157 @@
-@@ -2683,9 +2671,9 @@ hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+@@ -2505,9 +2493,9 @@ hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
@@ -168 +169 @@
-@@ -2733,7 +2721,7 @@ hns3_flow_action_create(struct rte_eth_dev *dev,
+@@ -2555,7 +2543,7 @@ hns3_flow_action_create(struct rte_eth_dev *dev,
@@ -177 +178 @@
-@@ -2758,11 +2746,11 @@ hns3_flow_action_create(struct rte_eth_dev *dev,
+@@ -2580,11 +2568,11 @@ hns3_flow_action_create(struct rte_eth_dev *dev,
@@ -191 +192 @@
-@@ -2775,11 +2763,11 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
+@@ -2597,11 +2585,11 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
@@ -205 +206 @@
-@@ -2787,14 +2775,14 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
+@@ -2609,14 +2597,14 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
@@ -222 +223 @@
-@@ -2802,7 +2790,7 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
+@@ -2624,7 +2612,7 @@ hns3_flow_action_destroy(struct rte_eth_dev *dev,
@@ -231 +232 @@
-@@ -2817,11 +2805,11 @@ hns3_flow_action_query(struct rte_eth_dev *dev,
+@@ -2639,11 +2627,11 @@ hns3_flow_action_query(struct rte_eth_dev *dev,
@@ -245 +246 @@
-@@ -2831,7 +2819,7 @@ hns3_flow_action_query(struct rte_eth_dev *dev,
+@@ -2653,7 +2641,7 @@ hns3_flow_action_query(struct rte_eth_dev *dev,
@@ -254 +255 @@
-@@ -2865,14 +2853,10 @@ void
+@@ -2687,14 +2675,10 @@ void
    
    
More information about the stable
mailing list