[dpdk-dev] [PATCH v3 4/7] net/mlx5: split policies handling of colors

Bing Zhao bingz at nvidia.com
Wed Jul 21 10:54:18 CEST 2021


If the fate action is either RSS or Queue of a meter policy, the
action will only be created in the flow splitting stage. With queue
as the fate action, only one sub-policy is needed. And RSS will
have more than one sub-policies if there is an expansion.

Since the RSS parameters are the same for both green and yellow
colors except the queues, the expansion result will be unique.
Even if only one color has the RSS action, the checking and possible
expansion will be done then. For each sub-policy, the action rules
need to be created separately on its own policy table.

Signed-off-by: Bing Zhao <bingz at nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c    | 40 ++++++++++----------
 drivers/net/mlx5/mlx5_flow_dv.c | 67 +++++++++++++++++----------------
 2 files changed, 55 insertions(+), 52 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 347e8c1a09..d90c8cd314 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4687,7 +4687,7 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS] = {0};
 		uint32_t i;
 
-		/**
+		/*
 		 * This is a tmp dev_flow,
 		 * no need to register any matcher for it in translate.
 		 */
@@ -4695,18 +4695,19 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
 		for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
 			struct mlx5_flow dev_flow = {0};
 			struct mlx5_flow_handle dev_handle = { {0} };
+			uint8_t fate = final_policy->act_cnt[i].fate_action;
 
-			if (final_policy->is_rss) {
+			if (fate == MLX5_FLOW_FATE_SHARED_RSS) {
 				const void *rss_act =
 					final_policy->act_cnt[i].rss->conf;
 				struct rte_flow_action rss_actions[2] = {
 					[0] = {
 					.type = RTE_FLOW_ACTION_TYPE_RSS,
-					.conf = rss_act
+					.conf = rss_act,
 					},
 					[1] = {
 					.type = RTE_FLOW_ACTION_TYPE_END,
-					.conf = NULL
+					.conf = NULL,
 					}
 				};
 
@@ -4731,9 +4732,10 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
 						rss_desc_v[i].hash_fields ?
 						rss_desc_v[i].queue_num : 1;
 				rss_desc_v[i].tunnel =
-					!!(dev_flow.handle->layers &
-					MLX5_FLOW_LAYER_TUNNEL);
-			} else {
+						!!(dev_flow.handle->layers &
+						   MLX5_FLOW_LAYER_TUNNEL);
+				rss_desc[i] = &rss_desc_v[i];
+			} else if (fate == MLX5_FLOW_FATE_QUEUE) {
 				/* This is queue action. */
 				rss_desc_v[i] = wks->rss_desc;
 				rss_desc_v[i].key_len = 0;
@@ -4741,24 +4743,24 @@ get_meter_sub_policy(struct rte_eth_dev *dev,
 				rss_desc_v[i].queue =
 					&final_policy->act_cnt[i].queue;
 				rss_desc_v[i].queue_num = 1;
+				rss_desc[i] = &rss_desc_v[i];
+			} else {
+				rss_desc[i] = NULL;
 			}
-			rss_desc[i] = &rss_desc_v[i];
 		}
 		sub_policy = flow_drv_meter_sub_policy_rss_prepare(dev,
 						flow, policy, rss_desc);
 	} else {
 		enum mlx5_meter_domain mtr_domain =
 			attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
-				attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
-					MLX5_MTR_DOMAIN_INGRESS;
+				(attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+						MLX5_MTR_DOMAIN_INGRESS);
 		sub_policy = policy->sub_policys[mtr_domain][0];
 	}
-	if (!sub_policy) {
+	if (!sub_policy)
 		rte_flow_error_set(error, EINVAL,
-			RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-			"Failed to get meter sub-policy.");
-		goto exit;
-	}
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "Failed to get meter sub-policy.");
 exit:
 	return sub_policy;
 }
@@ -4956,8 +4958,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
 		} else {
 			enum mlx5_meter_domain mtr_domain =
 			attr->transfer ? MLX5_MTR_DOMAIN_TRANSFER :
-				attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
-					MLX5_MTR_DOMAIN_INGRESS;
+				(attr->egress ? MLX5_MTR_DOMAIN_EGRESS :
+						MLX5_MTR_DOMAIN_INGRESS);
 
 			sub_policy =
 			&priv->sh->mtrmng->def_policy[mtr_domain]->sub_policy;
@@ -4973,8 +4975,8 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
 	actions_pre++;
 	if (!tag_action)
 		return rte_flow_error_set(error, ENOMEM,
-					RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-					"No tag action space.");
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  NULL, "No tag action space.");
 	if (!mtr_flow_id) {
 		tag_action->type = RTE_FLOW_ACTION_TYPE_VOID;
 		goto exit;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2400565232..ee593a7001 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -15070,11 +15070,11 @@ __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
 				   next_port, tmp) {
 			claim_zero(mlx5_flow_os_destroy_flow(color_rule->rule));
 			tbl = container_of(color_rule->matcher->tbl,
-					typeof(*tbl), tbl);
+					   typeof(*tbl), tbl);
 			mlx5_list_unregister(tbl->matchers,
-						&color_rule->matcher->entry);
+					     &color_rule->matcher->entry);
 			TAILQ_REMOVE(&sub_policy->color_rules[i],
-					color_rule, next_port);
+				     color_rule, next_port);
 			mlx5_free(color_rule);
 			if (next_fm)
 				mlx5_flow_meter_detach(priv, next_fm);
@@ -15088,13 +15088,13 @@ __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
 		}
 		if (sub_policy->jump_tbl[i]) {
 			flow_dv_tbl_resource_release(MLX5_SH(dev),
-			sub_policy->jump_tbl[i]);
+						     sub_policy->jump_tbl[i]);
 			sub_policy->jump_tbl[i] = NULL;
 		}
 	}
 	if (sub_policy->tbl_rsc) {
 		flow_dv_tbl_resource_release(MLX5_SH(dev),
-			sub_policy->tbl_rsc);
+					     sub_policy->tbl_rsc);
 		sub_policy->tbl_rsc = NULL;
 	}
 }
@@ -15111,7 +15111,7 @@ __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
  */
 static void
 flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
-		      struct mlx5_flow_meter_policy *mtr_policy)
+			     struct mlx5_flow_meter_policy *mtr_policy)
 {
 	uint32_t i, j;
 	struct mlx5_flow_meter_sub_policy *sub_policy;
@@ -15124,8 +15124,8 @@ flow_dv_destroy_policy_rules(struct rte_eth_dev *dev,
 		for (j = 0; j < sub_policy_num; j++) {
 			sub_policy = mtr_policy->sub_policys[i][j];
 			if (sub_policy)
-				__flow_dv_destroy_sub_policy_rules
-						(dev, sub_policy);
+				__flow_dv_destroy_sub_policy_rules(dev,
+								   sub_policy);
 		}
 	}
 }
@@ -16158,6 +16158,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
 	bool match_src_port = false;
 	int i;
 
+	/* If RSS or Queue, no previous actions / rules is created. */
 	for (i = 0; i < RTE_COLORS; i++) {
 		acts[i].actions_n = 0;
 		if (i == RTE_COLOR_RED) {
@@ -16657,37 +16658,36 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
 	sub_policy_num = (mtr_policy->sub_policy_num >>
 			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
 			MLX5_MTR_SUB_POLICY_NUM_MASK;
-	for (i = 0; i < sub_policy_num;
-		i++) {
-		for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
-			if (rss_desc[j] &&
-				hrxq_idx[j] !=
-			mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
+	for (j = 0; j < sub_policy_num; j++) {
+		for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+			if (rss_desc[i] &&
+			    hrxq_idx[i] !=
+			    mtr_policy->sub_policys[domain][j]->rix_hrxq[i])
 				break;
 		}
-		if (j >= MLX5_MTR_RTE_COLORS) {
+		if (i >= MLX5_MTR_RTE_COLORS) {
 			/*
 			 * Found the sub policy table with
-			 * the same queue per color
+			 * the same queue per color.
 			 */
 			rte_spinlock_unlock(&mtr_policy->sl);
-			for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
-				mlx5_hrxq_release(dev, hrxq_idx[j]);
+			for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
+				mlx5_hrxq_release(dev, hrxq_idx[i]);
 			*is_reuse = true;
-			return mtr_policy->sub_policys[domain][i];
+			return mtr_policy->sub_policys[domain][j];
 		}
 	}
 	/* Create sub policy. */
 	if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
-		/* Reuse the first dummy sub_policy*/
+		/* Reuse the first pre-allocated sub_policy. */
 		sub_policy = mtr_policy->sub_policys[domain][0];
 		sub_policy_idx = sub_policy->idx;
 	} else {
 		sub_policy = mlx5_ipool_zmalloc
 				(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
-				&sub_policy_idx);
+				 &sub_policy_idx);
 		if (!sub_policy ||
-			sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
+		    sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM) {
 			for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
 				mlx5_hrxq_release(dev, hrxq_idx[i]);
 			goto rss_sub_policy_error;
@@ -16709,9 +16709,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
 			 * RSS action to Queue action.
 			 */
 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
-				hrxq_idx[i]);
+					      hrxq_idx[i]);
 			if (!hrxq) {
-				DRV_LOG(ERR, "Failed to create policy hrxq");
+				DRV_LOG(ERR, "Failed to get policy hrxq");
 				goto rss_sub_policy_error;
 			}
 			act_cnt = &mtr_policy->act_cnt[i];
@@ -16726,19 +16726,21 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
 		}
 	}
 	if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
-		sub_policy, domain)) {
+					       sub_policy, domain)) {
 		DRV_LOG(ERR, "Failed to create policy "
-			"rules per domain.");
+			"rules for ingress domain.");
 		goto rss_sub_policy_error;
 	}
 	if (sub_policy != mtr_policy->sub_policys[domain][0]) {
 		i = (mtr_policy->sub_policy_num >>
 			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
 			MLX5_MTR_SUB_POLICY_NUM_MASK;
+		if (i >= MLX5_MTR_RSS_MAX_SUB_POLICY) {
+			DRV_LOG(ERR, "No free sub-policy slot.");
+			goto rss_sub_policy_error;
+		}
 		mtr_policy->sub_policys[domain][i] = sub_policy;
 		i++;
-		if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
-			goto rss_sub_policy_error;
 		mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
 			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
 		mtr_policy->sub_policy_num |=
@@ -16756,8 +16758,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
 			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
 			MLX5_MTR_SUB_POLICY_NUM_MASK;
 			mtr_policy->sub_policys[domain][i] = NULL;
-			mlx5_ipool_free
-			(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+			mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
 					sub_policy->idx);
 		}
 	}
@@ -16818,7 +16819,7 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
 	while (i) {
 		/**
 		 * From last policy to the first one in hierarchy,
-		 * create/get the sub policy for each of them.
+		 * create / get the sub policy for each of them.
 		 */
 		sub_policy = __flow_dv_meter_get_rss_sub_policy(dev,
 							policies[--i],
@@ -17022,7 +17023,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
  */
 static void
 flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
-		struct mlx5_flow_meter_policy *mtr_policy)
+				    struct mlx5_flow_meter_policy *mtr_policy)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
@@ -17068,7 +17069,7 @@ flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
 		case MLX5_FLOW_FATE_QUEUE:
 			sub_policy = mtr_policy->sub_policys[domain][0];
 			__flow_dv_destroy_sub_policy_rules(dev,
-						sub_policy);
+							   sub_policy);
 			break;
 		default:
 			/*Other actions without queue and do nothing*/
-- 
2.27.0



More information about the dev mailing list