[PATCH 7/9] net/mlx5: lazily allocate HWS send to kernel action
Dariusz Sosnowski
dsosnowski at nvidia.com
Wed Feb 25 12:59:15 CET 2026
HWS send_to_kernel action is used to implement
SEND_TO_KERNEL rte_flow action.
It was allocated either on port start or on rte_flow_configure().
This could cause unnecessary FW resource usage
if user did not use any SEND_TO_KERNEL action.
This patch extends global actions internal API,
introduced in previous commits, to allow lazy allocation
of HWS send_to_kernel action. It will be allocated on first use
and will be allocated per domain to minimize FW resource usage.
Signed-off-by: Dariusz Sosnowski <dsosnowski at nvidia.com>
Acked-by: Ori Kam <orika at nvidia.com>
---
drivers/net/mlx5/mlx5.h | 2 -
drivers/net/mlx5/mlx5_flow_hw.c | 90 ++++------------------
drivers/net/mlx5/mlx5_hws_global_actions.c | 59 +++++++++++---
drivers/net/mlx5/mlx5_hws_global_actions.h | 5 ++
4 files changed, 66 insertions(+), 90 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 94b4cb0d7b..739b414faf 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2115,8 +2115,6 @@ struct mlx5_priv {
struct mlx5_hws_global_actions hw_global_actions;
/* HW steering global default miss action. */
struct mlx5dr_action *hw_def_miss;
- /* HW steering global send to kernel action. */
- struct mlx5dr_action *hw_send_to_kernel[MLX5DR_TABLE_TYPE_MAX];
/* HW steering create ongoing rte flow table list header. */
LIST_HEAD(flow_hw_tbl_ongo, rte_flow_template_table) flow_hw_tbl_ongo;
struct mlx5_indexed_pool *acts_ipool; /* Action data indexed pool. */
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 2ecae1b7e7..7fafe3fe6a 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -2913,14 +2913,24 @@ __flow_hw_translate_actions_template(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
if (is_root) {
- __flow_hw_action_template_destroy(dev, acts);
rte_flow_error_set(&sub_error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"Send to kernel action on root table is not supported in HW steering mode");
goto err;
}
- acts->rule_acts[dr_pos].action = priv->hw_send_to_kernel[type];
+ dr_action = mlx5_hws_global_action_send_to_kernel_get(priv,
+ type,
+ MLX5_HW_LOWEST_PRIO_ROOT);
+ if (dr_action == NULL) {
+ DRV_LOG(ERR, "port %u failed to allocate send to kernel action",
+ priv->dev_data->port_id);
+ rte_flow_error_set(&sub_error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_STATE, NULL,
+ "failed to allocate send to kernel action");
+ goto err;
+ }
+ acts->rule_acts[dr_pos].action = dr_action;
break;
case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
err = flow_hw_modify_field_compile(dev, attr, actions,
@@ -7366,36 +7376,14 @@ mlx5_flow_hw_actions_validate(struct rte_eth_dev *dev,
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
#ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
- case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL: {
- bool res;
-
+ case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
if (priv->shared_host)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
action,
"action not supported in guest port");
- if (attr->ingress) {
- res = priv->hw_send_to_kernel[MLX5DR_TABLE_TYPE_NIC_RX];
- } else if (attr->egress) {
- res = priv->hw_send_to_kernel[MLX5DR_TABLE_TYPE_NIC_TX];
- } else {
- if (!is_unified_fdb(priv))
- res = priv->hw_send_to_kernel[MLX5DR_TABLE_TYPE_FDB];
- else
- res =
- priv->hw_send_to_kernel[MLX5DR_TABLE_TYPE_FDB_RX] &&
- priv->hw_send_to_kernel[MLX5DR_TABLE_TYPE_FDB_TX] &&
- priv->hw_send_to_kernel[MLX5DR_TABLE_TYPE_FDB_UNIFIED];
- }
- if (!res)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- action,
- "action is not available");
-
action_flags |= MLX5_FLOW_ACTION_SEND_TO_KERNEL;
break;
- }
#endif
case RTE_FLOW_ACTION_TYPE_QUEUE:
ret = mlx5_hw_validate_action_queue(dev, action, mask,
@@ -9891,55 +9879,6 @@ flow_hw_free_vport_actions(struct mlx5_priv *priv)
priv->hw_vport = NULL;
}
-#ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
-static __rte_always_inline void
-_create_send_to_kernel_actions(struct mlx5_priv *priv, int type)
-{
- int action_flag;
-
- action_flag = mlx5_hw_act_flag[1][type];
- priv->hw_send_to_kernel[type] =
- mlx5dr_action_create_dest_root(priv->dr_ctx,
- MLX5_HW_LOWEST_PRIO_ROOT,
- action_flag);
- if (!priv->hw_send_to_kernel[type])
- DRV_LOG(WARNING, "Unable to create HWS send to kernel action");
-}
-#endif
-
-static void
-flow_hw_create_send_to_kernel_actions(__rte_unused struct mlx5_priv *priv,
- __rte_unused bool is_proxy)
-{
-#ifdef HAVE_MLX5DV_DR_ACTION_CREATE_DEST_ROOT_TABLE
- int i, from, to;
- bool unified_fdb = is_unified_fdb(priv);
-
- for (i = MLX5DR_TABLE_TYPE_NIC_RX; i <= MLX5DR_TABLE_TYPE_NIC_TX; i++)
- _create_send_to_kernel_actions(priv, i);
-
- if (is_proxy) {
- from = unified_fdb ? MLX5DR_TABLE_TYPE_FDB_RX : MLX5DR_TABLE_TYPE_FDB;
- to = unified_fdb ? MLX5DR_TABLE_TYPE_FDB_UNIFIED : MLX5DR_TABLE_TYPE_FDB;
- for (i = from; i <= to; i++)
- _create_send_to_kernel_actions(priv, i);
- }
-#endif
-}
-
-static void
-flow_hw_destroy_send_to_kernel_action(struct mlx5_priv *priv)
-{
- int i;
-
- for (i = MLX5DR_TABLE_TYPE_NIC_RX; i < MLX5DR_TABLE_TYPE_MAX; i++) {
- if (priv->hw_send_to_kernel[i]) {
- mlx5dr_action_destroy(priv->hw_send_to_kernel[i]);
- priv->hw_send_to_kernel[i] = NULL;
- }
- }
-}
-
static bool
flow_hw_should_create_nat64_actions(struct mlx5_priv *priv)
{
@@ -11941,7 +11880,6 @@ __mlx5_flow_hw_resource_release(struct rte_eth_dev *dev, bool ctx_close)
if (priv->hw_def_miss)
mlx5dr_action_destroy(priv->hw_def_miss);
flow_hw_destroy_nat64_actions(priv);
- flow_hw_destroy_send_to_kernel_action(priv);
flow_hw_free_vport_actions(priv);
if (priv->acts_ipool) {
mlx5_ipool_destroy(priv->acts_ipool);
@@ -12360,8 +12298,6 @@ __flow_hw_configure(struct rte_eth_dev *dev,
goto err;
}
}
- if (!priv->shared_host)
- flow_hw_create_send_to_kernel_actions(priv, is_proxy);
if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
if (mlx5_flow_ct_init(dev, port_attr->nb_conn_tracks, nb_q_updated))
goto err;
diff --git a/drivers/net/mlx5/mlx5_hws_global_actions.c b/drivers/net/mlx5/mlx5_hws_global_actions.c
index 2bbfa5a24c..d8b21a67f1 100644
--- a/drivers/net/mlx5/mlx5_hws_global_actions.c
+++ b/drivers/net/mlx5/mlx5_hws_global_actions.c
@@ -43,48 +43,67 @@ mlx5_hws_global_actions_cleanup(struct mlx5_priv *priv)
global_actions_array_cleanup(priv, &priv->hw_global_actions.tag, "tag");
global_actions_array_cleanup(priv, &priv->hw_global_actions.pop_vlan, "pop_vlan");
global_actions_array_cleanup(priv, &priv->hw_global_actions.push_vlan, "push_vlan");
+ global_actions_array_cleanup(priv,
+ &priv->hw_global_actions.send_to_kernel,
+ "send_to_kernel");
rte_spinlock_unlock(&priv->hw_global_actions.lock);
}
typedef struct mlx5dr_action *(*global_action_create_t)(struct mlx5dr_context *ctx,
- uint32_t action_flags);
+ uint32_t action_flags,
+ void *user_data);
static struct mlx5dr_action *
action_create_drop_cb(struct mlx5dr_context *ctx,
- uint32_t action_flags)
+ uint32_t action_flags,
+ void *user_data __rte_unused)
{
return mlx5dr_action_create_dest_drop(ctx, action_flags);
}
static struct mlx5dr_action *
action_create_tag_cb(struct mlx5dr_context *ctx,
- uint32_t action_flags)
+ uint32_t action_flags,
+ void *user_data __rte_unused)
{
return mlx5dr_action_create_tag(ctx, action_flags);
}
static struct mlx5dr_action *
action_create_pop_vlan_cb(struct mlx5dr_context *ctx,
- uint32_t action_flags)
+ uint32_t action_flags,
+ void *user_data __rte_unused)
{
return mlx5dr_action_create_pop_vlan(ctx, action_flags);
}
static struct mlx5dr_action *
action_create_push_vlan_cb(struct mlx5dr_context *ctx,
- uint32_t action_flags)
+ uint32_t action_flags,
+ void *user_data __rte_unused)
{
return mlx5dr_action_create_push_vlan(ctx, action_flags);
}
+static struct mlx5dr_action *
+action_create_send_to_kernel_cb(struct mlx5dr_context *ctx,
+ uint32_t action_flags,
+ void *user_data)
+{
+ uint16_t priority = (uint16_t)(uintptr_t)user_data;
+
+ return mlx5dr_action_create_dest_root(ctx, priority, action_flags);
+}
+
static struct mlx5dr_action *
global_action_get(struct mlx5_priv *priv,
struct mlx5_hws_global_actions_array *array,
const char *name,
enum mlx5dr_table_type table_type,
bool is_root,
- global_action_create_t create_cb)
+ global_action_create_t create_cb,
+ void *user_data)
{
enum mlx5dr_action_flags action_flags;
struct mlx5dr_action *action = NULL;
@@ -100,7 +119,7 @@ global_action_get(struct mlx5_priv *priv,
if (action != NULL)
goto unlock_ret;
- action = create_cb(priv->dr_ctx, action_flags);
+ action = create_cb(priv->dr_ctx, action_flags, user_data);
if (action == NULL) {
DRV_LOG(ERR, "port %u failed to create %s HWS action",
priv->dev_data->port_id,
@@ -125,7 +144,8 @@ mlx5_hws_global_action_drop_get(struct mlx5_priv *priv,
"drop",
table_type,
is_root,
- action_create_drop_cb);
+ action_create_drop_cb,
+ NULL);
}
struct mlx5dr_action *
@@ -138,7 +158,8 @@ mlx5_hws_global_action_tag_get(struct mlx5_priv *priv,
"tag",
table_type,
is_root,
- action_create_tag_cb);
+ action_create_tag_cb,
+ NULL);
}
struct mlx5dr_action *
@@ -151,7 +172,8 @@ mlx5_hws_global_action_pop_vlan_get(struct mlx5_priv *priv,
"pop_vlan",
table_type,
is_root,
- action_create_pop_vlan_cb);
+ action_create_pop_vlan_cb,
+ NULL);
}
struct mlx5dr_action *
@@ -164,5 +186,20 @@ mlx5_hws_global_action_push_vlan_get(struct mlx5_priv *priv,
"push_vlan",
table_type,
is_root,
- action_create_push_vlan_cb);
+ action_create_push_vlan_cb,
+ NULL);
+}
+
+struct mlx5dr_action *
+mlx5_hws_global_action_send_to_kernel_get(struct mlx5_priv *priv,
+ enum mlx5dr_table_type table_type,
+ uint16_t priority)
+{
+ return global_action_get(priv,
+ &priv->hw_global_actions.send_to_kernel,
+ "send_to_kernel",
+ table_type,
+ false, /* send-to-kernel is non-root only */
+ action_create_send_to_kernel_cb,
+ (void *)(uintptr_t)priority);
}
diff --git a/drivers/net/mlx5/mlx5_hws_global_actions.h b/drivers/net/mlx5/mlx5_hws_global_actions.h
index 4281ba701c..7fbca9fc96 100644
--- a/drivers/net/mlx5/mlx5_hws_global_actions.h
+++ b/drivers/net/mlx5/mlx5_hws_global_actions.h
@@ -28,6 +28,7 @@ struct mlx5_hws_global_actions {
struct mlx5_hws_global_actions_array tag;
struct mlx5_hws_global_actions_array pop_vlan;
struct mlx5_hws_global_actions_array push_vlan;
+ struct mlx5_hws_global_actions_array send_to_kernel;
rte_spinlock_t lock;
};
@@ -51,4 +52,8 @@ struct mlx5dr_action *mlx5_hws_global_action_push_vlan_get(struct mlx5_priv *pri
enum mlx5dr_table_type table_type,
bool is_root);
+struct mlx5dr_action *mlx5_hws_global_action_send_to_kernel_get(struct mlx5_priv *priv,
+ enum mlx5dr_table_type table_type,
+ uint16_t priority);
+
#endif /* !RTE_PMD_MLX5_HWS_GLOBAL_ACTIONS_H_ */
--
2.47.3
More information about the dev
mailing list