[dpdk-dev] [PATCH v2 3/4] net/mlx5: prepare sub-policy for a flow with meter

Li Zhang lizh at nvidia.com
Fri Apr 2 17:56:12 CEST 2021


When a flow with RSS action, the MLX5 PMD split the flow
into several sub flow according to the flow RSS hashfield value,
each sub flow requested the different RSS TIR.

The new meter introduces the policy, for the meter flow with RSS
in the policy action, each RSS TIR in meter policy maintains
own sub-policy table resource.

This patch adds the function that find the correct policy table
resource according the RSS sub policy id.

Signed-off-by: Li Zhang <lizh at nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.h    |  11 +++
 drivers/net/mlx5/mlx5_flow_dv.c | 136 ++++++++++++++++++++++++++++++++
 2 files changed, 147 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 5672df983e..3024bd9b60 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1075,6 +1075,11 @@ typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
 typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
 				struct mlx5_flow_meter_info *fm);
 typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
+typedef struct mlx5_flow_meter_sub_policy *
+	(*mlx5_flow_meter_sub_policy_prepare_t)
+		(struct rte_eth_dev *dev,
+		uint32_t policy_id,
+		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
 typedef uint32_t (*mlx5_flow_mtr_alloc_t)
 					    (struct rte_eth_dev *dev);
 typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
@@ -1167,6 +1172,7 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
 	mlx5_flow_create_def_policy_t create_def_policy;
 	mlx5_flow_destroy_def_policy_t destroy_def_policy;
+	mlx5_flow_meter_sub_policy_prepare_t meter_sub_policy_prepare;
 	mlx5_flow_counter_alloc_t counter_alloc;
 	mlx5_flow_counter_free_t counter_free;
 	mlx5_flow_counter_query_t counter_query;
@@ -1398,6 +1404,11 @@ int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
 void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
 			       struct mlx5_flow_meter_info *fm);
 void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
+
+struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_prepare
+		(struct rte_eth_dev *dev,
+		uint32_t policy_id,
+		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
 int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
 			  struct rte_mtr_error *error);
 int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index adf7a8f1e7..b646b330ac 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -14713,6 +14713,141 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 	return -1;
 }
 
+/**
+ * Find the policy table for prefix table with RSS.
+ * Lock free, (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] policy_id
+ *   Policy index.
+ * @param[in] rss_desc
+ *   Pointer to rss_desc
+ * @return
+ *   Pointer to table set on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_meter_sub_policy *
+flow_dv_meter_sub_policy_prepare(struct rte_eth_dev *dev,
+		uint32_t policy_id,
+		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_meter_policy *mtr_policy;
+	struct mlx5_flow_meter_sub_policy *sub_policy;
+	uint32_t sub_policy_idx = 0;
+	uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
+	uint32_t i, j;
+	struct mlx5_hrxq *hrxq;
+	struct mlx5_flow_handle dh;
+	struct mlx5_meter_policy_action_container *act_cnt;
+	uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+	uint16_t sub_policy_num;
+
+	mtr_policy = mlx5_flow_meter_policy_find(dev, policy_id, NULL);
+	MLX5_ASSERT(mtr_policy && mtr_policy->is_rss);
+	rte_spinlock_lock(&mtr_policy->sl);
+	for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+		if (!rss_desc[i])
+			continue;
+		hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
+		if (!hrxq_idx[i]) {
+			rte_spinlock_unlock(&mtr_policy->sl);
+			return NULL;
+		}
+	}
+	sub_policy_num = (mtr_policy->sub_policy_num >>
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+			MLX5_MTR_SUB_POLICY_NUM_MARK;
+	for (i = 0; i < sub_policy_num;
+		i++) {
+		for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
+			if (rss_desc[j] &&
+				hrxq_idx[j] !=
+			mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
+				break;
+		}
+		if (j >= MLX5_MTR_RTE_COLORS) {
+			/*
+			 * Found the sub policy table with
+			 * the same queue per color
+			 */
+			rte_spinlock_unlock(&mtr_policy->sl);
+			for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
+				mlx5_hrxq_release(dev, hrxq_idx[j]);
+			return mtr_policy->sub_policys[domain][i];
+		}
+	}
+	/* Create sub policy. */
+	if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
+		/* Reuse the first dummy sub_policy*/
+		sub_policy = mtr_policy->sub_policys[domain][0];
+		sub_policy_idx = sub_policy->idx;
+	} else {
+		sub_policy = mlx5_ipool_zmalloc
+				(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+				&sub_policy_idx);
+		if (!sub_policy ||
+			sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM)
+			goto rss_sub_policy_error;
+		sub_policy->idx = sub_policy_idx;
+		sub_policy->main_policy = mtr_policy;
+	}
+	for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+		if (!rss_desc[i])
+			continue;
+		sub_policy->rix_hrxq[i] = hrxq_idx[i];
+		/*
+		 * Overwrite the last action from
+		 * RSS action to Queue action.
+		 */
+		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+			      hrxq_idx[i]);
+		if (!hrxq) {
+			DRV_LOG(ERR, "Failed to create policy hrxq");
+			goto rss_sub_policy_error;
+		}
+		act_cnt = &mtr_policy->act_cnt[i];
+		if (act_cnt->rix_mark || act_cnt->modify_hdr) {
+			memset(&dh, 0, sizeof(struct mlx5_flow_handle));
+			if (act_cnt->rix_mark)
+				dh.mark = 1;
+			dh.fate_action = MLX5_FLOW_FATE_QUEUE;
+			dh.rix_hrxq = hrxq_idx[i];
+			flow_drv_rxq_flags_set(dev, &dh);
+		}
+	}
+	if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
+		sub_policy, domain)) {
+		DRV_LOG(ERR, "Failed to create policy "
+			"rules per domain.");
+		goto rss_sub_policy_error;
+	}
+	if (sub_policy != mtr_policy->sub_policys[domain][0]) {
+		i = (mtr_policy->sub_policy_num >>
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+			MLX5_MTR_SUB_POLICY_NUM_MARK;
+		mtr_policy->sub_policys[domain][i] = sub_policy;
+		i++;
+		if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
+			goto rss_sub_policy_error;
+		mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MARK <<
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+		mtr_policy->sub_policy_num |=
+			(i & MLX5_MTR_SUB_POLICY_NUM_MARK) <<
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+	}
+	rte_spinlock_unlock(&mtr_policy->sl);
+	return sub_policy;
+rss_sub_policy_error:
+	for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
+		mlx5_hrxq_release(dev, hrxq_idx[i]);
+	if (sub_policy_idx)
+		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+			sub_policy_idx);
+	rte_spinlock_unlock(&mtr_policy->sl);
+	return NULL;
+}
+
 /**
  * Validate the batch counter support in root table.
  *
@@ -15298,6 +15433,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.destroy_policy_rules = flow_dv_destroy_policy_rules,
 	.create_def_policy = flow_dv_create_def_policy,
 	.destroy_def_policy = flow_dv_destroy_def_policy,
+	.meter_sub_policy_prepare = flow_dv_meter_sub_policy_prepare,
 	.counter_alloc = flow_dv_counter_allocate,
 	.counter_free = flow_dv_counter_free,
 	.counter_query = flow_dv_counter_query,
-- 
2.27.0



More information about the dev mailing list