patch 'net/mlx5: fix hairpin queue release' has been queued to stable release 23.11.4
Xueming Li
xuemingl at nvidia.com
Tue Apr 8 10:10:41 CEST 2025
Hi,
FYI, your patch has been queued to stable release 23.11.4
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 04/10/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging
This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=59d1fbfd973b943ac420afe477c84bfd9afd0533
Thanks.
Xueming Li <xuemingl at nvidia.com>
---
>From 59d1fbfd973b943ac420afe477c84bfd9afd0533 Mon Sep 17 00:00:00 2001
From: Maayan Kashani <mkashani at nvidia.com>
Date: Thu, 27 Feb 2025 12:14:14 +0200
Subject: [PATCH] net/mlx5: fix hairpin queue release
Cc: Xueming Li <xuemingl at nvidia.com>
[ upstream commit 6886b5f39d66770fb7e233fa1c8fc74ed1935116 ]
Fix an assert failure that occurs when releasing a hairpin queue. The issue
arises from incorrect handling of shared Rx queues during release.
Fixes: 09c2555303be ("net/mlx5: support shared Rx queue")
Signed-off-by: Maayan Kashani <mkashani at nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski at nvidia.com>
---
drivers/net/mlx5/mlx5.h | 1 +
drivers/net/mlx5/mlx5_flow.c | 4 ++--
drivers/net/mlx5/mlx5_rx.h | 1 +
drivers/net/mlx5/mlx5_rxq.c | 12 ++++++++----
4 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 55c29e31a2..a9129bf61b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1886,6 +1886,7 @@ struct mlx5_priv {
uint32_t ctrl_flows; /* Control flow rules. */
rte_spinlock_t flow_list_lock;
struct mlx5_obj_ops obj_ops; /* HW objects operations. */
+ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
struct mlx5_list *hrxqs; /* Hash Rx queues. */
LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 97f678ff4e..bca16a916b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1748,13 +1748,13 @@ flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
opriv->domain_id != priv->domain_id ||
opriv->mark_enabled)
continue;
- LIST_FOREACH(rxq_ctrl, &opriv->sh->shared_rxqs, share_entry) {
+ LIST_FOREACH(rxq_ctrl, &opriv->rxqsctrl, next) {
rxq_ctrl->rxq.mark = 1;
}
opriv->mark_enabled = 1;
}
} else {
- LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
rxq_ctrl->rxq.mark = 1;
}
priv->mark_enabled = 1;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index db912adf2a..2205149458 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -151,6 +151,7 @@ struct mlx5_rxq_data {
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
+ LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dccfc4eb36..7b377974d3 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1037,6 +1037,7 @@ mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
rte_errno = ENOMEM;
return -rte_errno;
}
+ rte_atomic_fetch_add_explicit(&rxq_ctrl->ctrl_ref, 1, rte_memory_order_relaxed);
DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
dev->data->port_id, idx);
dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
@@ -1971,8 +1972,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.shared = 1;
tmpl->share_group = conf->share_group;
tmpl->share_qid = conf->share_qid;
+ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
}
- LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
rte_atomic_store_explicit(&tmpl->ctrl_ref, 1, rte_memory_order_relaxed);
return tmpl;
error:
@@ -2026,7 +2028,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
tmpl->rxq.idx = idx;
rxq->hairpin_conf = *hairpin_conf;
mlx5_rxq_ref(dev, idx);
- LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+ LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
rte_atomic_store_explicit(&tmpl->ctrl_ref, 1, rte_memory_order_relaxed);
return tmpl;
}
@@ -2301,7 +2303,9 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
if (!rxq_ctrl->is_hairpin)
mlx5_mr_btree_free
(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
- LIST_REMOVE(rxq_ctrl, share_entry);
+ if (rxq_ctrl->rxq.shared)
+ LIST_REMOVE(rxq_ctrl, share_entry);
+ LIST_REMOVE(rxq_ctrl, next);
mlx5_free(rxq_ctrl);
}
dev->data->rx_queues[idx] = NULL;
@@ -2327,7 +2331,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
- LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
+ LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
dev->data->port_id, rxq_ctrl->rxq.idx);
++ret;
--
2.34.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2025-04-08 15:39:07.013131230 +0800
+++ 0037-net-mlx5-fix-hairpin-queue-release.patch 2025-04-08 15:39:05.996436559 +0800
@@ -1 +1 @@
-From 6886b5f39d66770fb7e233fa1c8fc74ed1935116 Mon Sep 17 00:00:00 2001
+From 59d1fbfd973b943ac420afe477c84bfd9afd0533 Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit 6886b5f39d66770fb7e233fa1c8fc74ed1935116 ]
@@ -10 +12,0 @@
-Cc: stable at dpdk.org
@@ -22 +24 @@
-index 545ba48b3c..6df99c25e2 100644
+index 55c29e31a2..a9129bf61b 100644
@@ -25 +27 @@
-@@ -2023,6 +2023,7 @@ struct mlx5_priv {
+@@ -1886,6 +1886,7 @@ struct mlx5_priv {
@@ -34 +36 @@
-index f8b3e504b3..6169ebc13f 100644
+index 97f678ff4e..bca16a916b 100644
@@ -37 +39 @@
-@@ -1648,13 +1648,13 @@ flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
+@@ -1748,13 +1748,13 @@ flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
@@ -54 +56 @@
-index f80a2e3227..6380895502 100644
+index db912adf2a..2205149458 100644
@@ -57 +59 @@
-@@ -169,6 +169,7 @@ struct __rte_cache_aligned mlx5_rxq_data {
+@@ -151,6 +151,7 @@ struct mlx5_rxq_data {
@@ -66 +68 @@
-index a5971b5cdd..5cf7d4971b 100644
+index dccfc4eb36..7b377974d3 100644
@@ -77 +79 @@
-@@ -2006,8 +2007,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+@@ -1971,8 +1972,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -88 +90 @@
-@@ -2061,7 +2063,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+@@ -2026,7 +2028,7 @@ mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
@@ -97 +99 @@
-@@ -2336,7 +2338,9 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
+@@ -2301,7 +2303,9 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
@@ -108 +110 @@
-@@ -2362,7 +2366,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
+@@ -2327,7 +2331,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
More information about the stable
mailing list