patch 'net/mlx5: fix flow configure validation' has been queued to stable release 23.11.1
Xueming Li
xuemingl at nvidia.com
Sat Apr 13 14:49:44 CEST 2024
Hi,
FYI, your patch has been queued to stable release 23.11.1
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 04/15/24. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://git.dpdk.org/dpdk-stable/log/?h=23.11-staging
This queued commit can be viewed at:
https://git.dpdk.org/dpdk-stable/commit/?h=23.11-staging&id=8117b4b2f7fa51f50686aa90939b8d8ac41a4ddc
Thanks.
Xueming Li <xuemingl at nvidia.com>
---
>From 8117b4b2f7fa51f50686aa90939b8d8ac41a4ddc Mon Sep 17 00:00:00 2001
From: Dariusz Sosnowski <dsosnowski at nvidia.com>
Date: Wed, 6 Mar 2024 21:21:50 +0100
Subject: [PATCH] net/mlx5: fix flow configure validation
Cc: Xueming Li <xuemingl at nvidia.com>
[ upstream commit ff9433b578195be8c6cb44443ad199defdbf3c99 ]
There's an existing limitation in mlx5 PMD, that all configured flow
queues must have the same size. Even though this condition is checked,
some allocations are done before that. This lead to segmentation
fault during rollback on error in rte_flow_configure() implementation.
This patch fixes that by reorganizing validation, so that configuration
options are validated before any allocations are done and
necessary checks for NULL are added to error rollback.
Bugzilla ID: 1199
Fixes: b401400db24e ("net/mlx5: add port flow configuration")
Signed-off-by: Dariusz Sosnowski <dsosnowski at nvidia.com>
Acked-by: Suanming Mou <suanmingm at nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 58 +++++++++++++++++++++++----------
1 file changed, 41 insertions(+), 17 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 938d9b5824..a54075ed7e 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -9291,6 +9291,38 @@ flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
return true;
}
+static int
+flow_hw_validate_attributes(const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error)
+{
+ uint32_t size;
+ unsigned int i;
+
+ if (port_attr == NULL)
+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Port attributes must be non-NULL");
+
+ if (nb_queue == 0)
+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "At least one flow queue is required");
+
+ if (queue_attr == NULL)
+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Queue attributes must be non-NULL");
+
+ size = queue_attr[0]->size;
+ for (i = 1; i < nb_queue; ++i) {
+ if (queue_attr[i]->size != size)
+ return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "All flow queues must have the same size");
+ }
+
+ return 0;
+}
+
/**
* Configure port HWS resources.
*
@@ -9342,10 +9374,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
int ret = 0;
uint32_t action_flags;
- if (!port_attr || !nb_queue || !queue_attr) {
- rte_errno = EINVAL;
- goto err;
- }
+ if (flow_hw_validate_attributes(port_attr, nb_queue, queue_attr, error))
+ return -rte_errno;
/*
* Calling rte_flow_configure() again is allowed if and only if
* provided configuration matches the initially provided one.
@@ -9392,14 +9422,6 @@ flow_hw_configure(struct rte_eth_dev *dev,
/* Allocate the queue job descriptor LIFO. */
mem_size = sizeof(priv->hw_q[0]) * nb_q_updated;
for (i = 0; i < nb_q_updated; i++) {
- /*
- * Check if the queues' size are all the same as the
- * limitation from HWS layer.
- */
- if (_queue_attr[i]->size != _queue_attr[0]->size) {
- rte_errno = EINVAL;
- goto err;
- }
mem_size += (sizeof(struct mlx5_hw_q_job *) +
sizeof(struct mlx5_hw_q_job) +
sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
@@ -9681,12 +9703,14 @@ err:
flow_hw_destroy_vlan(dev);
if (dr_ctx)
claim_zero(mlx5dr_context_close(dr_ctx));
- for (i = 0; i < nb_q_updated; i++) {
- rte_ring_free(priv->hw_q[i].indir_iq);
- rte_ring_free(priv->hw_q[i].indir_cq);
+ if (priv->hw_q) {
+ for (i = 0; i < nb_q_updated; i++) {
+ rte_ring_free(priv->hw_q[i].indir_iq);
+ rte_ring_free(priv->hw_q[i].indir_cq);
+ }
+ mlx5_free(priv->hw_q);
+ priv->hw_q = NULL;
}
- mlx5_free(priv->hw_q);
- priv->hw_q = NULL;
if (priv->acts_ipool) {
mlx5_ipool_destroy(priv->acts_ipool);
priv->acts_ipool = NULL;
--
2.34.1
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2024-04-13 20:43:08.204017644 +0800
+++ 0104-net-mlx5-fix-flow-configure-validation.patch 2024-04-13 20:43:05.107753788 +0800
@@ -1 +1 @@
-From ff9433b578195be8c6cb44443ad199defdbf3c99 Mon Sep 17 00:00:00 2001
+From 8117b4b2f7fa51f50686aa90939b8d8ac41a4ddc Mon Sep 17 00:00:00 2001
@@ -4,0 +5,3 @@
+Cc: Xueming Li <xuemingl at nvidia.com>
+
+[ upstream commit ff9433b578195be8c6cb44443ad199defdbf3c99 ]
@@ -17 +19,0 @@
-Cc: stable at dpdk.org
@@ -22,2 +24,2 @@
- drivers/net/mlx5/mlx5_flow_hw.c | 62 +++++++++++++++++++++++----------
- 1 file changed, 43 insertions(+), 19 deletions(-)
+ drivers/net/mlx5/mlx5_flow_hw.c | 58 +++++++++++++++++++++++----------
+ 1 file changed, 41 insertions(+), 17 deletions(-)
@@ -26 +28 @@
-index d88959e36d..35f1ed7a03 100644
+index 938d9b5824..a54075ed7e 100644
@@ -29,2 +31,2 @@
-@@ -10289,6 +10289,38 @@ mlx5_hwq_ring_create(uint16_t port_id, uint32_t queue, uint32_t size, const char
- RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
+@@ -9291,6 +9291,38 @@ flow_hw_compare_config(const struct mlx5_flow_hw_attr *hw_attr,
+ return true;
@@ -68 +70 @@
-@@ -10340,10 +10372,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
+@@ -9342,10 +9374,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
@@ -81 +83 @@
-@@ -10390,14 +10420,6 @@ flow_hw_configure(struct rte_eth_dev *dev,
+@@ -9392,14 +9422,6 @@ flow_hw_configure(struct rte_eth_dev *dev,
@@ -94,6 +96,6 @@
- sizeof(struct mlx5_hw_q_job)) * _queue_attr[i]->size;
- }
-@@ -10679,14 +10701,16 @@ err:
- __atomic_fetch_sub(&host_priv->shared_refcnt, 1, __ATOMIC_RELAXED);
- priv->shared_host = NULL;
- }
+ sizeof(struct mlx5_hw_q_job) +
+ sizeof(uint8_t) * MLX5_ENCAP_MAX_LEN +
+@@ -9681,12 +9703,14 @@ err:
+ flow_hw_destroy_vlan(dev);
+ if (dr_ctx)
+ claim_zero(mlx5dr_context_close(dr_ctx));
@@ -103,2 +104,0 @@
-- rte_ring_free(priv->hw_q[i].flow_transfer_pending);
-- rte_ring_free(priv->hw_q[i].flow_transfer_completed);
@@ -109,2 +108,0 @@
-+ rte_ring_free(priv->hw_q[i].flow_transfer_pending);
-+ rte_ring_free(priv->hw_q[i].flow_transfer_completed);
More information about the stable
mailing list