patch 'net/mlx5: fix control flow leakage for external SQ' has been queued to stable release 23.11.6
Shani Peretz
shperetz at nvidia.com
Thu Dec 25 10:18:13 CET 2025
Hi,
FYI, your patch has been queued to stable release 23.11.6
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/30/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/shanipr/dpdk-stable
This queued commit can be viewed at:
https://github.com/shanipr/dpdk-stable/commit/db2a376e8bd7ca39e35b895e4b8427967b1fbf78
Thanks.
Shani
---
>From db2a376e8bd7ca39e35b895e4b8427967b1fbf78 Mon Sep 17 00:00:00 2001
From: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
Date: Tue, 18 Nov 2025 18:51:58 +0200
Subject: [PATCH] net/mlx5: fix control flow leakage for external SQ
[ upstream commit 3bf9f0f9f0beb8dcd4f3b316c3216a87bc9ab49f ]
There is the private API rte_pmd_mlx5_external_sq_enable(),
that allows application to create the Send Queue (SQ) on its own
and then enable this queue usage as "external SQ".
On this enabling call some implicit flows are created to provide
compliant SQs behavior - copy metadata register, forward queue
originated packet to correct VF, etc.
These implicit flows are marked as "external" ones, and there is
no cleanup on device start and stop for this kind of flows.
Also, PMD has no knowledge if external SQ is still in use by
application and implicit cleanup can not be performed.
As a result, on multiple device start/stop cycles application
re-creates and re-enables many external SQs, causing implicit
flow tables overflow.
To resolve this issue the rte_pmd_mlx5_external_sq_disable()
API is provided, that allows to application to notify PMD
the external SQ is not in usage anymore and related implicit
flows can be dismissed.
Fixes: 26e1eaf2dac4 ("net/mlx5: support device control for E-Switch default rule")
Cc: stable at dpdk.org
Signed-off-by: Viacheslav Ovsiienko <viacheslavo at nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski at nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 12 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 106 +++++++++++++++++++++++++++++++-
drivers/net/mlx5/mlx5_trigger.c | 2 +-
drivers/net/mlx5/mlx5_txq.c | 54 ++++++++++++++--
drivers/net/mlx5/rte_pmd_mlx5.h | 18 ++++++
drivers/net/mlx5/version.map | 1 +
6 files changed, 181 insertions(+), 12 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 219ea462c9..1ebf584078 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2917,12 +2917,16 @@ int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
uint32_t sqn, bool external);
int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
- uint32_t sqn);
+ uint32_t sqn, bool external);
int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev,
- uint32_t sqn,
- bool external);
-int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
+ uint32_t sqn, bool external);
+int mlx5_flow_hw_destroy_tx_default_mreg_copy_flow(struct rte_eth_dev *dev,
+ uint32_t sqn, bool external);
+int mlx5_flow_hw_create_tx_repr_matching_flow(struct rte_eth_dev *dev,
+ uint32_t sqn, bool external);
+int mlx5_flow_hw_destroy_tx_repr_matching_flow(struct rte_eth_dev *dev,
+ uint32_t sqn, bool external);
int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev);
int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
const struct rte_flow_actions_template_attr *attr,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 41910d801b..b66ed53141 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -12197,7 +12197,7 @@ flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf,
}
int
-mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
+mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
{
uint16_t port_id = dev->data->port_id;
uint16_t proxy_port_id = dev->data->port_id;
@@ -12224,7 +12224,8 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
!proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_root_tbl ||
!proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl)
return 0;
- cf = LIST_FIRST(&proxy_priv->hw_ctrl_flows);
+ cf = external ? LIST_FIRST(&proxy_priv->hw_ext_ctrl_flows) :
+ LIST_FIRST(&proxy_priv->hw_ctrl_flows);
while (cf != NULL) {
cf_next = LIST_NEXT(cf, next);
if (flow_hw_is_matching_sq_miss_flow(cf, dev, sqn)) {
@@ -12358,8 +12359,58 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev, uint32_t
items, 0, copy_reg_action, 0, &flow_info, external);
}
+static bool
+flow_hw_is_matching_tx_mreg_copy_flow(struct mlx5_hw_ctrl_flow *cf,
+ struct rte_eth_dev *dev,
+ uint32_t sqn)
+{
+ if (cf->owner_dev != dev)
+ return false;
+ if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY && cf->info.tx_repr_sq == sqn)
+ return true;
+ return false;
+}
+
+int
+mlx5_flow_hw_destroy_tx_default_mreg_copy_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
+{
+ uint16_t port_id = dev->data->port_id;
+ uint16_t proxy_port_id = dev->data->port_id;
+ struct rte_eth_dev *proxy_dev;
+ struct mlx5_priv *proxy_priv;
+ struct mlx5_hw_ctrl_flow *cf;
+ struct mlx5_hw_ctrl_flow *cf_next;
+ int ret;
+
+ ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
+ "port must be present for default SQ miss flow rules to exist.",
+ port_id);
+ return ret;
+ }
+ proxy_dev = &rte_eth_devices[proxy_port_id];
+ proxy_priv = proxy_dev->data->dev_private;
+ if (!proxy_priv->dr_ctx ||
+ !proxy_priv->hw_ctrl_fdb ||
+ !proxy_priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl)
+ return 0;
+ cf = external ? LIST_FIRST(&proxy_priv->hw_ext_ctrl_flows) :
+ LIST_FIRST(&proxy_priv->hw_ctrl_flows);
+ while (cf != NULL) {
+ cf_next = LIST_NEXT(cf, next);
+ if (flow_hw_is_matching_tx_mreg_copy_flow(cf, dev, sqn)) {
+ claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
+ LIST_REMOVE(cf, next);
+ mlx5_free(cf);
+ }
+ cf = cf_next;
+ }
+ return 0;
+}
+
int
-mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
+mlx5_flow_hw_create_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rte_flow_item_sq sq_spec = {
@@ -12416,6 +12467,55 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool e
items, 0, actions, 0, &flow_info, external);
}
+static bool
+flow_hw_is_tx_matching_repr_matching_flow(struct mlx5_hw_ctrl_flow *cf,
+ struct rte_eth_dev *dev,
+ uint32_t sqn)
+{
+ if (cf->owner_dev != dev)
+ return false;
+ if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH && cf->info.tx_repr_sq == sqn)
+ return true;
+ return false;
+}
+
+int
+mlx5_flow_hw_destroy_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
+{
+ uint16_t port_id = dev->data->port_id;
+ uint16_t proxy_port_id = dev->data->port_id;
+ struct rte_eth_dev *proxy_dev;
+ struct mlx5_priv *proxy_priv;
+ struct mlx5_hw_ctrl_flow *cf;
+ struct mlx5_hw_ctrl_flow *cf_next;
+ int ret;
+
+ ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
+ if (ret) {
+ DRV_LOG(ERR, "Unable to pick transfer proxy port for port %u. Transfer proxy "
+ "port must be present for default SQ miss flow rules to exist.",
+ port_id);
+ return ret;
+ }
+ proxy_dev = &rte_eth_devices[proxy_port_id];
+ proxy_priv = proxy_dev->data->dev_private;
+ if (!proxy_priv->dr_ctx ||
+ !proxy_priv->hw_tx_repr_tagging_tbl)
+ return 0;
+ cf = external ? LIST_FIRST(&proxy_priv->hw_ext_ctrl_flows) :
+ LIST_FIRST(&proxy_priv->hw_ctrl_flows);
+ while (cf != NULL) {
+ cf_next = LIST_NEXT(cf, next);
+ if (flow_hw_is_tx_matching_repr_matching_flow(cf, dev, sqn)) {
+ claim_zero(flow_hw_destroy_ctrl_flow(proxy_dev, cf->flow));
+ LIST_REMOVE(cf, next);
+ mlx5_free(cf);
+ }
+ cf = cf_next;
+ }
+ return 0;
+}
+
int
mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 51d848158c..3bda84e963 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1495,7 +1495,7 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
}
}
if (config->dv_esw_en && config->repr_matching) {
- if (mlx5_flow_hw_tx_repr_matching_flow(dev, queue, false)) {
+ if (mlx5_flow_hw_create_tx_repr_matching_flow(dev, queue, false)) {
mlx5_txq_release(dev, i);
goto error;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 3f8d861180..d6f5790983 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1308,7 +1308,7 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
priv = dev->data->dev_private;
if ((!priv->representor && !priv->master) ||
!priv->sh->config.dv_esw_en) {
- DRV_LOG(ERR, "Port %u must be represetnor or master port in E-Switch mode.",
+ DRV_LOG(ERR, "Port %u must be representor or master port in E-Switch mode.",
port_id);
rte_errno = EINVAL;
return -rte_errno;
@@ -1329,9 +1329,9 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
}
if (priv->sh->config.repr_matching &&
- mlx5_flow_hw_tx_repr_matching_flow(dev, sq_num, true)) {
+ mlx5_flow_hw_create_tx_repr_matching_flow(dev, sq_num, true)) {
if (sq_miss_created)
- mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
+ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num, true);
return -rte_errno;
}
@@ -1339,7 +1339,7 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev, sq_num, true)) {
if (sq_miss_created)
- mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
+ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num, true);
return -rte_errno;
}
return 0;
@@ -1353,6 +1353,52 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
return -rte_errno;
}
+int
+rte_pmd_mlx5_external_sq_disable(uint16_t port_id, uint32_t sq_num)
+{
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+
+ if (rte_eth_dev_is_valid_port(port_id) < 0) {
+ DRV_LOG(ERR, "There is no Ethernet device for port %u.",
+ port_id);
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ if ((!priv->representor && !priv->master) ||
+ !priv->sh->config.dv_esw_en) {
+ DRV_LOG(ERR, "Port %u must be representor or master port in E-Switch mode.",
+ port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (sq_num == 0) {
+ DRV_LOG(ERR, "Invalid SQ number.");
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+#ifdef HAVE_MLX5_HWS_SUPPORT
+ if (priv->sh->config.dv_flow_en == 2) {
+ if (priv->sh->config.fdb_def_rule &&
+ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num, true))
+ return -rte_errno;
+ if (priv->sh->config.repr_matching &&
+ mlx5_flow_hw_destroy_tx_repr_matching_flow(dev, sq_num, true))
+ return -rte_errno;
+ if (!priv->sh->config.repr_matching &&
+ priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
+ mlx5_flow_hw_destroy_tx_default_mreg_copy_flow(dev, sq_num, true))
+ return -rte_errno;
+ return 0;
+ }
+#endif
+ /* Not supported for software steering. */
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
/**
* Set the Tx queue dynamic timestamp (mask and offset)
*
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index cc9340f71e..ee5c4a08e9 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -232,6 +232,24 @@ enum rte_pmd_mlx5_flow_engine_mode {
__rte_experimental
int rte_pmd_mlx5_flow_engine_set_mode(enum rte_pmd_mlx5_flow_engine_mode mode, uint32_t flags);
+/**
+ * Disable traffic for external SQ. Should be invoked by application
+ * before destroying the external SQ.
+ *
+ * @param[in] port_id
+ * The port identifier of the Ethernet device.
+ * @param[in] sq_num
+ * SQ HW number.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * Possible values for rte_errno:
+ * - EINVAL - invalid sq_number or port type.
+ * - ENODEV - there is no Ethernet device for this port id.
+ */
+__rte_experimental
+int rte_pmd_mlx5_external_sq_disable(uint16_t port_id, uint32_t sq_num);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/mlx5/version.map b/drivers/net/mlx5/version.map
index 99f5ab754a..3561a1db2a 100644
--- a/drivers/net/mlx5/version.map
+++ b/drivers/net/mlx5/version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
rte_pmd_mlx5_external_sq_enable;
# added in 23.03
rte_pmd_mlx5_flow_engine_set_mode;
+ rte_pmd_mlx5_external_sq_disable;
};
--
2.43.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2025-12-25 11:16:38.912054998 +0200
+++ 0052-net-mlx5-fix-control-flow-leakage-for-external-SQ.patch 2025-12-25 11:16:35.824918000 +0200
@@ -1 +1 @@
-From 3bf9f0f9f0beb8dcd4f3b316c3216a87bc9ab49f Mon Sep 17 00:00:00 2001
+From db2a376e8bd7ca39e35b895e4b8427967b1fbf78 Mon Sep 17 00:00:00 2001
@@ -3 +3 @@
-Date: Wed, 29 Oct 2025 17:57:09 +0200
+Date: Tue, 18 Nov 2025 18:51:58 +0200
@@ -5,0 +6,2 @@
+[ upstream commit 3bf9f0f9f0beb8dcd4f3b316c3216a87bc9ab49f ]
+
@@ -37 +39 @@
- drivers/net/mlx5/mlx5_txq.c | 55 +++++++++++++++--
+ drivers/net/mlx5/mlx5_txq.c | 54 ++++++++++++++--
@@ -39 +41,2 @@
- 5 files changed, 181 insertions(+), 12 deletions(-)
+ drivers/net/mlx5/version.map | 1 +
+ 6 files changed, 181 insertions(+), 12 deletions(-)
@@ -42 +45 @@
-index c5905ebfac..6da3c74eb9 100644
+index 219ea462c9..1ebf584078 100644
@@ -45 +48 @@
-@@ -3563,12 +3563,16 @@ int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
+@@ -2917,12 +2917,16 @@ int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
@@ -67 +70 @@
-index d945c88eb0..eb3dcce59d 100644
+index 41910d801b..b66ed53141 100644
@@ -70 +73 @@
-@@ -15897,7 +15897,7 @@ flow_hw_is_matching_sq_miss_flow(struct mlx5_ctrl_flow_entry *cf,
+@@ -12197,7 +12197,7 @@ flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf,
@@ -79 +82 @@
-@@ -15924,7 +15924,8 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
+@@ -12224,7 +12224,8 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
@@ -89 +92 @@
-@@ -16058,8 +16059,58 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev, uint32_t
+@@ -12358,8 +12359,58 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev, uint32_t
@@ -94 +97 @@
-+flow_hw_is_matching_tx_mreg_copy_flow(struct mlx5_ctrl_flow_entry *cf,
++flow_hw_is_matching_tx_mreg_copy_flow(struct mlx5_hw_ctrl_flow *cf,
@@ -100 +103 @@
-+ if (cf->info.type == MLX5_CTRL_FLOW_TYPE_TX_META_COPY && cf->info.tx_repr_sq == sqn)
++ if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY && cf->info.tx_repr_sq == sqn)
@@ -112,2 +115,2 @@
-+ struct mlx5_ctrl_flow_entry *cf;
-+ struct mlx5_ctrl_flow_entry *cf_next;
++ struct mlx5_hw_ctrl_flow *cf;
++ struct mlx5_hw_ctrl_flow *cf_next;
@@ -149 +152 @@
-@@ -16116,6 +16167,55 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool e
+@@ -12416,6 +12467,55 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool e
@@ -154 +157 @@
-+flow_hw_is_tx_matching_repr_matching_flow(struct mlx5_ctrl_flow_entry *cf,
++flow_hw_is_tx_matching_repr_matching_flow(struct mlx5_hw_ctrl_flow *cf,
@@ -160 +163 @@
-+ if (cf->info.type == MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH && cf->info.tx_repr_sq == sqn)
++ if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH && cf->info.tx_repr_sq == sqn)
@@ -172,2 +175,2 @@
-+ struct mlx5_ctrl_flow_entry *cf;
-+ struct mlx5_ctrl_flow_entry *cf_next;
++ struct mlx5_hw_ctrl_flow *cf;
++ struct mlx5_hw_ctrl_flow *cf_next;
@@ -206 +209 @@
-index e6acb56d4d..6acf398ccc 100644
+index 51d848158c..3bda84e963 100644
@@ -209 +212 @@
-@@ -1622,7 +1622,7 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
+@@ -1495,7 +1495,7 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
@@ -219 +222 @@
-index 834ca541d5..1d258f979c 100644
+index 3f8d861180..d6f5790983 100644
@@ -222 +225 @@
-@@ -1433,7 +1433,7 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
+@@ -1308,7 +1308,7 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
@@ -231 +234 @@
-@@ -1454,9 +1454,9 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
+@@ -1329,9 +1329,9 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
@@ -243 +246 @@
-@@ -1464,7 +1464,7 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
+@@ -1339,7 +1339,7 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
@@ -252 +255 @@
-@@ -1478,6 +1478,53 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
+@@ -1353,6 +1353,52 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
@@ -256 +258,0 @@
-+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_pmd_mlx5_external_sq_disable, 25.11)
@@ -307 +309 @@
-index 4d4821afae..31f99e7a78 100644
+index cc9340f71e..ee5c4a08e9 100644
@@ -310,3 +312,3 @@
-@@ -484,6 +484,24 @@ typedef void (*rte_pmd_mlx5_driver_event_callback_t)(uint16_t port_id,
- const void *opaque);
-
+@@ -232,6 +232,24 @@ enum rte_pmd_mlx5_flow_engine_mode {
+ __rte_experimental
+ int rte_pmd_mlx5_flow_engine_set_mode(enum rte_pmd_mlx5_flow_engine_mode mode, uint32_t flags);
@@ -332,3 +334,13 @@
- /**
- * Register mlx5 driver event callback.
- *
+ #ifdef __cplusplus
+ }
+ #endif
+diff --git a/drivers/net/mlx5/version.map b/drivers/net/mlx5/version.map
+index 99f5ab754a..3561a1db2a 100644
+--- a/drivers/net/mlx5/version.map
++++ b/drivers/net/mlx5/version.map
+@@ -17,4 +17,5 @@ EXPERIMENTAL {
+ rte_pmd_mlx5_external_sq_enable;
+ # added in 23.03
+ rte_pmd_mlx5_flow_engine_set_mode;
++ rte_pmd_mlx5_external_sq_disable;
+ };
More information about the stable
mailing list