[PATCH 4/9] net/mlx5: lazily allocate tag HWS action

Dariusz Sosnowski dsosnowski at nvidia.com
Wed Feb 25 12:59:12 CET 2026


HWS tag action is used to implement FLAG and MARK rte_flow actions.
It was allocated either on port start or on rte_flow_configure().
This could cause unnecessary FW resource usage
if user did not use any FLAG/MARK actions.

This patch extends global actions internal API,
introduced in previous commit, to allow lazy allocation
of HWS tag action. It will be allocated on first use of
FLAG/MARK action and will be allocated per domain to minimize
FW resource usage.

Signed-off-by: Dariusz Sosnowski <dsosnowski at nvidia.com>
Acked-by: Ori Kam <orika at nvidia.com>
---
 drivers/net/mlx5/mlx5.h                    |  2 -
 drivers/net/mlx5/mlx5_flow_hw.c            | 47 +++++------
 drivers/net/mlx5/mlx5_hws_global_actions.c | 92 ++++++++++++++++++----
 drivers/net/mlx5/mlx5_hws_global_actions.h |  5 ++
 4 files changed, 100 insertions(+), 46 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 54683cce7a..43553b1f35 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2115,8 +2115,6 @@ struct mlx5_priv {
 	struct mlx5dr_action **hw_vport;
 	/* HWS global actions. */
 	struct mlx5_hws_global_actions hw_global_actions;
-	/* HW steering global tag action. */
-	struct mlx5dr_action *hw_tag[2];
 	/* HW steering global default miss action. */
 	struct mlx5dr_action *hw_def_miss;
 	/* HW steering global send to kernel action. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 80e156f26a..54c30264b2 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2692,16 +2692,33 @@ __flow_hw_translate_actions_template(struct rte_eth_dev *dev,
 			acts->rule_acts[dr_pos].action = priv->hw_def_miss;
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
+			dr_action = mlx5_hws_global_action_tag_get(priv, type, is_root);
+			if (dr_action == NULL) {
+				DRV_LOG(ERR, "port %u failed to allocate flag action",
+					priv->dev_data->port_id);
+				rte_flow_error_set(&sub_error, ENOMEM,
+						   RTE_FLOW_ERROR_TYPE_STATE, NULL,
+						   "failed to allocate flag action");
+				goto err;
+			}
 			acts->mark = true;
 			acts->rule_acts[dr_pos].tag.value =
 				mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
-			acts->rule_acts[dr_pos].action =
-				priv->hw_tag[!!attr->group];
+			acts->rule_acts[dr_pos].action = dr_action;
 			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
 					rte_memory_order_relaxed);
 			mlx5_flow_hw_rxq_flag_set(dev, true);
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
+			dr_action = mlx5_hws_global_action_tag_get(priv, type, is_root);
+			if (dr_action == NULL) {
+				DRV_LOG(ERR, "port %u failed to allocate mark action",
+					priv->dev_data->port_id);
+				rte_flow_error_set(&sub_error, ENOMEM,
+						   RTE_FLOW_ERROR_TYPE_STATE, NULL,
+						   "failed to allocate mark action");
+				goto err;
+			}
 			acts->mark = true;
 			if (masks->conf &&
 			    ((const struct rte_flow_action_mark *)
@@ -2714,8 +2731,7 @@ __flow_hw_translate_actions_template(struct rte_eth_dev *dev,
 								   actions->type,
 								   src_pos, dr_pos))
 				goto err;
-			acts->rule_acts[dr_pos].action =
-				priv->hw_tag[!!attr->group];
+			acts->rule_acts[dr_pos].action = dr_action;
 			rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
 					rte_memory_order_relaxed);
 			mlx5_flow_hw_rxq_flag_set(dev, true);
@@ -11973,10 +11989,6 @@ __mlx5_flow_hw_resource_release(struct rte_eth_dev *dev, bool ctx_close)
 		claim_zero(flow_hw_actions_template_destroy(dev, at, NULL));
 		at = temp_at;
 	}
-	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
-		if (priv->hw_tag[i])
-			mlx5dr_action_destroy(priv->hw_tag[i]);
-	}
 	if (priv->hw_def_miss)
 		mlx5dr_action_destroy(priv->hw_def_miss);
 	flow_hw_destroy_nat64_actions(priv);
@@ -12351,25 +12363,6 @@ __flow_hw_configure(struct rte_eth_dev *dev,
 	if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
 		if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 0, 0, nb_q_updated))
 			goto err;
-	/* Add global actions. */
-	for (i = 0; i < MLX5_HW_ACTION_FLAG_MAX; i++) {
-		uint32_t tag_flags = mlx5_hw_act_flag[i][0];
-		bool tag_fdb_rx = !!priv->sh->cdev->config.hca_attr.fdb_rx_set_flow_tag_stc;
-
-		if (is_proxy) {
-			if (unified_fdb) {
-				if (i == MLX5_HW_ACTION_FLAG_NONE_ROOT && tag_fdb_rx)
-					tag_flags |= mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_FDB_RX];
-			} else {
-				if (i == MLX5_HW_ACTION_FLAG_NONE_ROOT && tag_fdb_rx)
-					tag_flags |= mlx5_hw_act_flag[i][MLX5DR_TABLE_TYPE_FDB];
-			}
-		}
-		priv->hw_tag[i] = mlx5dr_action_create_tag
-			(priv->dr_ctx, tag_flags);
-		if (!priv->hw_tag[i])
-			goto err;
-	}
 	if (priv->sh->config.dv_esw_en) {
 		ret = flow_hw_setup_tx_repr_tagging(dev, error);
 		if (ret)
diff --git a/drivers/net/mlx5/mlx5_hws_global_actions.c b/drivers/net/mlx5/mlx5_hws_global_actions.c
index 6af5497123..1ca444ce98 100644
--- a/drivers/net/mlx5/mlx5_hws_global_actions.c
+++ b/drivers/net/mlx5/mlx5_hws_global_actions.c
@@ -12,33 +12,63 @@ mlx5_hws_global_actions_init(struct mlx5_priv *priv)
 	rte_spinlock_init(&priv->hw_global_actions.lock);
 }
 
-void
-mlx5_hws_global_actions_cleanup(struct mlx5_priv *priv)
+static void
+global_actions_array_cleanup(struct mlx5_priv *priv,
+			     struct mlx5_hws_global_actions_array *array,
+			     const char *name)
 {
-	rte_spinlock_lock(&priv->hw_global_actions.lock);
-
 	for (int i = 0; i < MLX5_HWS_GLOBAL_ACTION_MAX; ++i) {
 		for (int j = 0; j < MLX5DR_TABLE_TYPE_MAX; ++j) {
 			int ret;
 
-			if (priv->hw_global_actions.drop.arr[i][j] == NULL)
+			if (array->arr[i][j] == NULL)
 				continue;
 
-			ret = mlx5dr_action_destroy(priv->hw_global_actions.drop.arr[i][j]);
+			ret = mlx5dr_action_destroy(array->arr[i][j]);
 			if (ret != 0)
-				DRV_LOG(ERR, "port %u failed to free HWS action",
-					priv->dev_data->port_id);
-			priv->hw_global_actions.drop.arr[i][j] = NULL;
+				DRV_LOG(ERR, "port %u failed to free %s HWS action",
+					priv->dev_data->port_id,
+					name);
+			array->arr[i][j] = NULL;
 		}
 	}
+}
+
+void
+mlx5_hws_global_actions_cleanup(struct mlx5_priv *priv)
+{
+	rte_spinlock_lock(&priv->hw_global_actions.lock);
+
+	global_actions_array_cleanup(priv, &priv->hw_global_actions.drop, "drop");
+	global_actions_array_cleanup(priv, &priv->hw_global_actions.tag, "tag");
 
 	rte_spinlock_unlock(&priv->hw_global_actions.lock);
 }
 
-struct mlx5dr_action *
-mlx5_hws_global_action_drop_get(struct mlx5_priv *priv,
-				enum mlx5dr_table_type table_type,
-				bool is_root)
+typedef struct mlx5dr_action *(*global_action_create_t)(struct mlx5dr_context *ctx,
+							uint32_t action_flags);
+
+static struct mlx5dr_action *
+action_create_drop_cb(struct mlx5dr_context *ctx,
+		      uint32_t action_flags)
+{
+	return mlx5dr_action_create_dest_drop(ctx, action_flags);
+}
+
+static struct mlx5dr_action *
+action_create_tag_cb(struct mlx5dr_context *ctx,
+		     uint32_t action_flags)
+{
+	return mlx5dr_action_create_tag(ctx, action_flags);
+}
+
+static struct mlx5dr_action *
+global_action_get(struct mlx5_priv *priv,
+		  struct mlx5_hws_global_actions_array *array,
+		  const char *name,
+		  enum mlx5dr_table_type table_type,
+		  bool is_root,
+		  global_action_create_t create_cb)
 {
 	enum mlx5dr_action_flags action_flags;
 	struct mlx5dr_action *action = NULL;
@@ -50,19 +80,47 @@ mlx5_hws_global_action_drop_get(struct mlx5_priv *priv,
 
 	rte_spinlock_lock(&priv->hw_global_actions.lock);
 
-	action = priv->hw_global_actions.drop.arr[!is_root][table_type];
+	action = array->arr[!is_root][table_type];
 	if (action != NULL)
 		goto unlock_ret;
 
-	action = mlx5dr_action_create_dest_drop(priv->dr_ctx, action_flags);
+	action = create_cb(priv->dr_ctx, action_flags);
 	if (action == NULL) {
-		DRV_LOG(ERR, "port %u failed to create drop HWS action", priv->dev_data->port_id);
+		DRV_LOG(ERR, "port %u failed to create %s HWS action",
+			priv->dev_data->port_id,
+			name);
 		goto unlock_ret;
 	}
 
-	priv->hw_global_actions.drop.arr[!is_root][table_type] = action;
+	array->arr[!is_root][table_type] = action;
 
 unlock_ret:
 	rte_spinlock_unlock(&priv->hw_global_actions.lock);
 	return action;
 }
+
+struct mlx5dr_action *
+mlx5_hws_global_action_drop_get(struct mlx5_priv *priv,
+				enum mlx5dr_table_type table_type,
+				bool is_root)
+{
+	return global_action_get(priv,
+				 &priv->hw_global_actions.drop,
+				 "drop",
+				 table_type,
+				 is_root,
+				 action_create_drop_cb);
+}
+
+struct mlx5dr_action *
+mlx5_hws_global_action_tag_get(struct mlx5_priv *priv,
+			       enum mlx5dr_table_type table_type,
+			       bool is_root)
+{
+	return global_action_get(priv,
+				 &priv->hw_global_actions.tag,
+				 "tag",
+				 table_type,
+				 is_root,
+				 action_create_tag_cb);
+}
diff --git a/drivers/net/mlx5/mlx5_hws_global_actions.h b/drivers/net/mlx5/mlx5_hws_global_actions.h
index 3921004102..bec9f3e0e8 100644
--- a/drivers/net/mlx5/mlx5_hws_global_actions.h
+++ b/drivers/net/mlx5/mlx5_hws_global_actions.h
@@ -25,6 +25,7 @@ struct mlx5_hws_global_actions_array {
 
 struct mlx5_hws_global_actions {
 	struct mlx5_hws_global_actions_array drop;
+	struct mlx5_hws_global_actions_array tag;
 	rte_spinlock_t lock;
 };
 
@@ -36,4 +37,8 @@ struct mlx5dr_action *mlx5_hws_global_action_drop_get(struct mlx5_priv *priv,
 						      enum mlx5dr_table_type table_type,
 						      bool is_root);
 
+struct mlx5dr_action *mlx5_hws_global_action_tag_get(struct mlx5_priv *priv,
+						     enum mlx5dr_table_type table_type,
+						     bool is_root);
+
 #endif /* !RTE_PMD_MLX5_HWS_GLOBAL_ACTIONS_H_ */
-- 
2.47.3



More information about the dev mailing list