[PATCH 1/3] net/mlx5: fix multi process Tx default rules
Gregory Etelson
getelson at nvidia.com
Wed Oct 29 16:57:08 CET 2025
From: Michael Baum <michaelba at nvidia.com>
When representor matching is disabled, an egress default rule is
inserted which matches all and copies REG_A to REG_C_1 (when dv_xmeta_en
== 4) and jump to group 1. All user rules started from group 1.
When 2 processes are working together, the first one creates this flow
rule and the second one is failed with errno EEXIST. This renders all
user egress rules in 2nd process to be invalid.
This patch changes this default rule match on SQs.
Fixes: 483181f7b6dd ("net/mlx5: support device control of representor matching")
Cc: dsosnowski at nvidia.com
Signed-off-by: Michael Baum <michaelba at nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski at nvidia.com>
# Conflicts:
# drivers/net/mlx5/mlx5_flow_hw.c
---
drivers/net/mlx5/mlx5_flow.h | 4 +++-
drivers/net/mlx5/mlx5_flow_hw.c | 24 +++++++++++-------------
drivers/net/mlx5/mlx5_trigger.c | 25 +++++++++++++------------
drivers/net/mlx5/mlx5_txq.c | 8 ++++++++
4 files changed, 35 insertions(+), 26 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index ff61706054..07d2f4185c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -3582,7 +3582,9 @@ int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev,
uint32_t sqn);
int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
-int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
+int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev,
+ uint32_t sqn,
+ bool external);
int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external);
int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev);
int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 491a78a0de..d945c88eb0 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -10643,7 +10643,7 @@ flow_hw_create_tx_default_mreg_copy_table(struct rte_eth_dev *dev,
.priority = MLX5_HW_LOWEST_PRIO_ROOT,
.egress = 1,
},
- .nb_flows = 1, /* One default flow rule for all. */
+ .nb_flows = MLX5_HW_CTRL_FLOW_NB_RULES,
};
struct mlx5_flow_template_table_cfg tx_tbl_cfg = {
.attr = tx_tbl_attr,
@@ -16004,21 +16004,18 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
}
int
-mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
+mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow_item_eth promisc = {
- .hdr.dst_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .hdr.src_addr.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .hdr.ether_type = 0,
+ struct mlx5_rte_flow_item_sq sq_spec = {
+ .queue = sqn,
};
- struct rte_flow_item eth_all[] = {
- [0] = {
- .type = RTE_FLOW_ITEM_TYPE_ETH,
- .spec = &promisc,
- .mask = &promisc,
+ struct rte_flow_item items[] = {
+ {
+ .type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ,
+ .spec = &sq_spec,
},
- [1] = {
+ {
.type = RTE_FLOW_ITEM_TYPE_END,
},
};
@@ -16048,6 +16045,7 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
};
struct mlx5_ctrl_flow_info flow_info = {
.type = MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
+ .tx_repr_sq = sqn,
};
MLX5_ASSERT(priv->master);
@@ -16057,7 +16055,7 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
return 0;
return flow_hw_create_ctrl_flow(dev, dev,
priv->hw_ctrl_fdb->hw_tx_meta_cpy_tbl,
- eth_all, 0, copy_reg_action, 0, &flow_info, false);
+ items, 0, copy_reg_action, 0, &flow_info, external);
}
int
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 916ac03c16..e6acb56d4d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1606,18 +1606,6 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- /*
- * With extended metadata enabled, the Tx metadata copy is handled by default
- * Tx tagging flow rules, so default Tx flow rule is not needed. It is only
- * required when representor matching is disabled.
- */
- if (config->dv_esw_en &&
- !config->repr_matching &&
- config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
- priv->master) {
- if (mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev))
- goto error;
- }
for (i = 0; i < priv->txqs_n; ++i) {
struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
uint32_t queue;
@@ -1639,6 +1627,19 @@ mlx5_traffic_enable_hws(struct rte_eth_dev *dev)
goto error;
}
}
+ /*
+ * With extended metadata enabled, the Tx metadata copy is handled by default
+ * Tx tagging flow rules, so default Tx flow rule is not needed. It is only
+ * required when representor matching is disabled.
+ */
+ if (config->dv_esw_en && !config->repr_matching &&
+ config->dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
+ (priv->master || priv->representor)) {
+ if (mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev, queue, false)) {
+ mlx5_txq_release(dev, i);
+ goto error;
+ }
+ }
mlx5_txq_release(dev, i);
}
if (config->fdb_def_rule) {
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index b090d8274d..834ca541d5 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1459,6 +1459,14 @@ rte_pmd_mlx5_external_sq_enable(uint16_t port_id, uint32_t sq_num)
mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
return -rte_errno;
}
+
+ if (!priv->sh->config.repr_matching &&
+ priv->sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS &&
+ mlx5_flow_hw_create_tx_default_mreg_copy_flow(dev, sq_num, true)) {
+ if (sq_miss_created)
+ mlx5_flow_hw_esw_destroy_sq_miss_flow(dev, sq_num);
+ return -rte_errno;
+ }
return 0;
}
#endif
--
2.51.0
More information about the dev
mailing list