[dpdk-dev] [PATCH 11/17] net/mlx5: add translation for CT action
Bing Zhao
bingz at nvidia.com
Tue Apr 27 17:38:05 CEST 2021
When creating a flow with this action context for CT, it needs to be
translated in 2 levels.
First, retrieve from action context to RTE_FLOW action.
Second, translate it to the correct DR action with traffic direction.
Before using the DR action in a flow, the CT context should be
available to use in the hardware.
Signed-off-by: Bing Zhao <bingz at nvidia.com>
---
drivers/net/mlx5/mlx5.h | 3 ++-
drivers/net/mlx5/mlx5_flow.c | 9 +++++++++
drivers/net/mlx5/mlx5_flow.h | 1 +
drivers/net/mlx5/mlx5_flow_aso.c | 40 ++++++++++++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_dv.c | 18 ++++++++++++++++++
5 files changed, 70 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index f999828..3b67706 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1716,6 +1716,7 @@ int mlx5_aso_ct_wait_ready(struct mlx5_dev_ctx_shared *sh,
int mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
struct mlx5_aso_ct_action *ct,
struct rte_flow_action_conntrack *profile);
-
+int mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_ct_action *ct);
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 1c28b63..7b9f055 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -3552,6 +3552,15 @@ flow_action_handles_translate(struct rte_eth_dev *dev,
break;
}
/* Fall-through */
+ case MLX5_INDIRECT_ACTION_TYPE_CT:
+ if (priv->sh->ct_aso_en) {
+ translated[handle->index].type =
+ RTE_FLOW_ACTION_TYPE_CONNTRACK;
+ translated[handle->index].conf =
+ (void *)(uintptr_t)idx;
+ break;
+ }
+ /* Fall-through */
default:
mlx5_free(translated);
return rte_flow_error_set
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 988b171..ddfc517 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -226,6 +226,7 @@ enum mlx5_feature_name {
#define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
#define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
#define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
+#define MLX5_FLOW_ACTION_CT (1ull << 41)
#define MLX5_FLOW_FATE_ACTIONS \
(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index 12e8dc7..21de855 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -1394,3 +1394,43 @@ mlx5_aso_ct_query_by_wqe(struct mlx5_dev_ctx_shared *sh,
mlx5_aso_ct_obj_analyze(profile, out_data);
return ret;
}
+
+/*
+ * Make sure the conntrack context is synchronized with hardware before
+ * creating a flow rule that uses it.
+ *
+ * @param[in] sh
+ * Pointer to shared device context.
+ * @param[in] ct
+ * Pointer to connection tracking offload object.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_aso_ct_available(struct mlx5_dev_ctx_shared *sh,
+ struct mlx5_aso_ct_action *ct)
+{
+ struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng;
+ uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
+ uint8_t state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+
+ if (state == ASO_CONNTRACK_FREE) {
+ rte_errno = ENXIO;
+ return -rte_errno;
+ } else if (state == ASO_CONNTRACK_READY ||
+ state == ASO_CONNTRACK_QUERY) {
+ return 0;
+ }
+ do {
+ mlx5_aso_ct_completion_handle(mng);
+ state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+ if (state == ASO_CONNTRACK_READY ||
+ state == ASO_CONNTRACK_QUERY)
+ return 0;
+ /* Waiting for CQE ready, consider should block or sleep. */
+ rte_delay_us_sleep(MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
+ } while (--poll_cqe_times);
+ rte_errno = EBUSY;
+ return -rte_errno;
+}
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f4fa3a0..3ebeb58 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -11373,6 +11373,7 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"Failed to update CT");
+ ct->is_original = !!pro->is_original_dir;
return idx;
}
@@ -11529,6 +11530,8 @@ flow_dv_translate(struct rte_eth_dev *dev,
const struct rte_flow_action *found_action = NULL;
uint32_t jump_group = 0;
struct mlx5_flow_counter *cnt;
+ uint32_t ct_idx;
+ struct mlx5_aso_ct_action *ct;
if (!mlx5_flow_os_action_supported(action_type))
return rte_flow_error_set(error, ENOTSUP,
@@ -12002,6 +12005,21 @@ flow_dv_translate(struct rte_eth_dev *dev,
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ ct_idx = (uint32_t)(uintptr_t)action->conf;
+ ct = flow_aso_ct_get_by_idx(dev, ct_idx);
+ if (mlx5_aso_ct_available(priv->sh, ct))
+ return -rte_errno;
+ if (ct->is_original)
+ dev_flow->dv.actions[actions_n] =
+ ct->dr_action_orig;
+ else
+ dev_flow->dv.actions[actions_n] =
+ ct->dr_action_rply;
+ __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED);
+ actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_CT;
+ break;
case RTE_FLOW_ACTION_TYPE_END:
actions_end = true;
if (mhdr_res->actions_num) {
--
2.5.5
More information about the dev
mailing list