[dpdk-dev] [PATCH v1] net/mlx5: support RSS hash configuration in generic flow action

Nelio Laranjeiro nelio.laranjeiro at 6wind.com
Wed Aug 2 16:10:17 CEST 2017


This also bring back the RSS support on all flows created by default from
the control plane.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 437 ++++++++++++++++++++++++++++++++++---------
 1 file changed, 346 insertions(+), 91 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 8316255..fe21dac 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -87,12 +87,89 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 		       const void *default_mask,
 		       void *data);
 
+/* Hash RX queue types. */
+enum hash_rxq_type {
+	HASH_RXQ_TCPV4,
+	HASH_RXQ_UDPV4,
+	HASH_RXQ_IPV4,
+	HASH_RXQ_TCPV6,
+	HASH_RXQ_UDPV6,
+	HASH_RXQ_IPV6,
+	HASH_RXQ_ETH,
+};
+
+/* Initialization data for hash RX queue. */
+struct hash_rxq_init {
+	uint64_t hash_fields; /* Fields that participate in the hash. */
+	uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
+	unsigned int flow_priority; /* Flow priority to use. */
+};
+
+/* Initialization data for hash RX queues. */
+const struct hash_rxq_init hash_rxq_init[] = {
+	[HASH_RXQ_TCPV4] = {
+		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+				IBV_EXP_RX_HASH_DST_IPV4 |
+				IBV_EXP_RX_HASH_SRC_PORT_TCP |
+				IBV_EXP_RX_HASH_DST_PORT_TCP),
+		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
+		.flow_priority = 4,
+	},
+	[HASH_RXQ_UDPV4] = {
+		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+				IBV_EXP_RX_HASH_DST_IPV4 |
+				IBV_EXP_RX_HASH_SRC_PORT_UDP |
+				IBV_EXP_RX_HASH_DST_PORT_UDP),
+		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
+		.flow_priority = 4,
+	},
+	[HASH_RXQ_IPV4] = {
+		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+				IBV_EXP_RX_HASH_DST_IPV4),
+		.dpdk_rss_hf = (ETH_RSS_IPV4 |
+				ETH_RSS_FRAG_IPV4),
+		.flow_priority = 5,
+	},
+	[HASH_RXQ_TCPV6] = {
+		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+				IBV_EXP_RX_HASH_DST_IPV6 |
+				IBV_EXP_RX_HASH_SRC_PORT_TCP |
+				IBV_EXP_RX_HASH_DST_PORT_TCP),
+		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
+		.flow_priority = 4,
+	},
+	[HASH_RXQ_UDPV6] = {
+		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+				IBV_EXP_RX_HASH_DST_IPV6 |
+				IBV_EXP_RX_HASH_SRC_PORT_UDP |
+				IBV_EXP_RX_HASH_DST_PORT_UDP),
+		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
+		.flow_priority = 4,
+	},
+	[HASH_RXQ_IPV6] = {
+		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+				IBV_EXP_RX_HASH_DST_IPV6),
+		.dpdk_rss_hf = (ETH_RSS_IPV6 |
+				ETH_RSS_FRAG_IPV6),
+		.flow_priority = 5,
+	},
+	[HASH_RXQ_ETH] = {
+		.hash_fields = 0,
+		.dpdk_rss_hf = 0,
+		.flow_priority = 6,
+	},
+};
+
+/* Number of entries in hash_rxq_init[]. */
+const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
+
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
-	struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
-	struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
-	struct mlx5_hrxq *hrxq; /**< Hash Rx queue. */
 	uint32_t mark:1; /**< Set if the flow is marked. */
+	struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+	struct mlx5_hrxq *hrxqs[RTE_DIM(hash_rxq_init)]; /**< Hash Rx queues. */
+	struct ibv_exp_flow *ibv_flows[RTE_DIM(hash_rxq_init)];
+		/**< Verbs flows. */
 };
 
 /** Static initializer for items. */
@@ -271,6 +348,7 @@ struct mlx5_flow_action {
 	uint32_t mark_id; /**< Mark identifier. */
 	uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
 	uint16_t queues_n; /**< Number of entries in queue[]. */
+	const struct rte_eth_rss_conf *rss_conf; /**< User RSS configuration. */
 };
 
 /** Structure to pass to the conversion function. */
@@ -278,7 +356,6 @@ struct mlx5_flow_parse {
 	struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
 	unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
 	uint32_t inner; /**< Set once VXLAN is encountered. */
-	uint64_t hash_fields; /**< Fields that participate in the hash. */
 	struct mlx5_flow_action actions; /**< Parsed action result. */
 };
 
@@ -526,6 +603,7 @@ priv_flow_validate(struct priv *priv,
 				(const struct rte_flow_action_rss *)
 				actions->conf;
 			uint16_t n;
+			int rxq_n;
 
 			if (!rss || !rss->num) {
 				rte_flow_error_set(error, EINVAL,
@@ -534,6 +612,9 @@ priv_flow_validate(struct priv *priv,
 						   "no valid queues");
 				return -rte_errno;
 			}
+			rxq_n = rss->num;
+			if (rss->rss_conf && !rss->rss_conf->rss_hf)
+				rxq_n = 1;
 			if (flow->actions.queues_n == 1) {
 				uint16_t found = 0;
 
@@ -554,7 +635,7 @@ priv_flow_validate(struct priv *priv,
 					return -rte_errno;
 				}
 			}
-			for (n = 0; n < rss->num; ++n) {
+			for (n = 0; n < rxq_n; ++n) {
 				if (rss->queue[n] >= priv->rxqs_n) {
 					rte_flow_error_set(error, EINVAL,
 						   RTE_FLOW_ERROR_TYPE_ACTION,
@@ -565,9 +646,10 @@ priv_flow_validate(struct priv *priv,
 				}
 			}
 			flow->actions.queue = 1;
-			for (n = 0; n < rss->num; ++n)
+			for (n = 0; n < rxq_n; ++n)
 				flow->actions.queues[n] = rss->queue[n];
-			flow->actions.queues_n = rss->num;
+			flow->actions.queues_n = rxq_n;
+			flow->actions.rss_conf = rss->rss_conf;
 		} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
 			const struct rte_flow_action_mark *mark =
 				(const struct rte_flow_action_mark *)
@@ -666,7 +748,6 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
 
 	++flow->ibv_attr->num_of_specs;
 	flow->ibv_attr->priority = 2;
-	flow->hash_fields = 0;
 	eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*eth = (struct ibv_exp_flow_spec_eth) {
 		.type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
@@ -746,8 +827,6 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
 
 	++flow->ibv_attr->num_of_specs;
 	flow->ibv_attr->priority = 1;
-	flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
-			     IBV_EXP_RX_HASH_DST_IPV4);
 	ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
 		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
@@ -801,8 +880,6 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
 
 	++flow->ibv_attr->num_of_specs;
 	flow->ibv_attr->priority = 1;
-	flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
-			     IBV_EXP_RX_HASH_DST_IPV6);
 	ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) {
 		.type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT,
@@ -857,8 +934,6 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
 
 	++flow->ibv_attr->num_of_specs;
 	flow->ibv_attr->priority = 0;
-	flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP |
-			      IBV_EXP_RX_HASH_DST_PORT_UDP);
 	udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*udp = (struct ibv_exp_flow_spec_tcp_udp) {
 		.type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
@@ -901,8 +976,6 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
 
 	++flow->ibv_attr->num_of_specs;
 	flow->ibv_attr->priority = 0;
-	flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP |
-			      IBV_EXP_RX_HASH_DST_PORT_TCP);
 	tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
 	*tcp = (struct ibv_exp_flow_spec_tcp_udp) {
 		.type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
@@ -994,6 +1067,118 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *flow, uint32_t mark_id)
 }
 
 /**
+ * Create hash Rx queues when RSS is disabled.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param flow
+ *   MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * @param rte_flow
+ *   Pointer to rte flow structure.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, a errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_create_action_queue_no_rss(struct priv *priv,
+				     struct mlx5_flow_parse *flow,
+				     struct rte_flow *rte_flow,
+				     struct rte_flow_error *error)
+{
+	rte_flow->hrxqs[HASH_RXQ_ETH] =
+		mlx5_priv_hrxq_get(priv, rss_hash_default_key,
+				   rss_hash_default_key_len,
+				   0,
+				   flow->actions.queues,
+				   flow->actions.queues_n);
+	if (rte_flow->hrxqs[HASH_RXQ_ETH])
+		return 0;
+	rte_flow->hrxqs[HASH_RXQ_ETH] =
+		mlx5_priv_hrxq_new(priv, rss_hash_default_key,
+				   rss_hash_default_key_len,
+				   0,
+				   flow->actions.queues,
+				   flow->actions.queues_n);
+	if (!rte_flow->hrxqs[HASH_RXQ_ETH]) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE,
+				   NULL, "cannot create hash rxq");
+		return ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * Create hash Rx queues when RSS is enabled.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param flow
+ *   MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * @param rte_flow
+ *   Pointer to rte flow structure.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ *
+ * @return
+ *   0 on success, a errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_create_action_queue_rss(struct priv *priv,
+				  struct mlx5_flow_parse *flow,
+				  struct rte_flow *rte_flow,
+				  struct rte_flow_error *error)
+{
+	unsigned int i;
+
+	/**
+	 * 7 types of Hash Rx queues can be created to make the RSS
+	 * possible on the different kind of packets:
+	 *  - IPv4 UDP
+	 *  - IPv4 TCP
+	 *  - IPv6 UDP
+	 *  - IPv6 TCP
+	 *  - IPv4
+	 *  - IPv6
+	 * This can be possible when the DPDK rss_conf.hf is full.
+	 */
+	for (i = 0; i != (hash_rxq_init_n - 1); ++i) {
+		uint64_t hash_fields;
+
+		if ((flow->actions.rss_conf->rss_hf &
+		     hash_rxq_init[i].dpdk_rss_hf) !=
+		    hash_rxq_init[i].dpdk_rss_hf)
+			continue;
+		hash_fields = hash_rxq_init[i].hash_fields;
+		rte_flow->hrxqs[i] =
+			mlx5_priv_hrxq_get(priv,
+					   flow->actions.rss_conf->rss_key,
+					   flow->actions.rss_conf->rss_key_len,
+					   hash_fields,
+					   flow->actions.queues,
+					   flow->actions.queues_n);
+		if (rte_flow->hrxqs[i])
+			continue;
+		rte_flow->hrxqs[i] =
+			mlx5_priv_hrxq_new(priv,
+					   flow->actions.rss_conf->rss_key,
+					   flow->actions.rss_conf->rss_key_len,
+					   hash_fields,
+					   flow->actions.queues,
+					   flow->actions.queues_n);
+		if (!rte_flow->hrxqs[i]) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_HANDLE,
+					   NULL, "cannot create hash rxq");
+			return ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/**
  * Complete flow rule creation.
  *
  * @param priv
@@ -1024,23 +1209,20 @@ priv_flow_create_action_queue(struct priv *priv,
 	}
 	rte_flow->mark = flow->actions.mark;
 	rte_flow->ibv_attr = flow->ibv_attr;
-	rte_flow->hrxq = mlx5_priv_hrxq_get(priv, rss_hash_default_key,
-					    rss_hash_default_key_len,
-					    flow->hash_fields,
-					    flow->actions.queues,
-					    flow->actions.queues_n);
-	if (!rte_flow->hrxq) {
-		rte_flow->hrxq = mlx5_priv_hrxq_new(priv, rss_hash_default_key,
-						    rss_hash_default_key_len,
-						    flow->hash_fields,
-						    flow->actions.queues,
-						    flow->actions.queues_n);
-		if (!rte_flow->hrxq) {
-			rte_flow_error_set(error, ENOMEM,
-					   RTE_FLOW_ERROR_TYPE_HANDLE,
-					   NULL, "cannot create hash rxq");
+	if (flow->actions.queues_n == 1) {
+		unsigned int ret;
+
+		ret = priv_flow_create_action_queue_no_rss(priv, flow, rte_flow,
+							   error);
+		if (ret)
+			goto error;
+	} else {
+		unsigned int ret;
+
+		ret = priv_flow_create_action_queue_rss(priv, flow, rte_flow,
+							error);
+		if (ret)
 			goto error;
-		}
 	}
 	for (i = 0; i != flow->actions.queues_n; ++i) {
 		struct mlx5_rxq_data *q = (*priv->rxqs)[flow->actions.queues[i]];
@@ -1049,18 +1231,31 @@ priv_flow_create_action_queue(struct priv *priv,
 	}
 	if (!priv->dev->data->dev_started)
 		return rte_flow;
-	rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->hrxq->qp,
-						 rte_flow->ibv_attr);
-	if (!rte_flow->ibv_flow) {
-		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-				   NULL, "flow rule creation failure");
-		goto error;
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (!rte_flow->hrxqs[i])
+			continue;
+		rte_flow->ibv_flows[i] =
+			ibv_exp_create_flow(rte_flow->hrxqs[i]->qp,
+					    rte_flow->ibv_attr);
+		if (!rte_flow->ibv_flows[i]) {
+			rte_flow_error_set(error, ENOMEM,
+					   RTE_FLOW_ERROR_TYPE_HANDLE,
+					   NULL, "flow rule creation failure");
+			goto error;
+		}
+		DEBUG("%p type %d QP %p ibv_flow %p",
+		      (void*)rte_flow, i, (void*)rte_flow->hrxqs[i],
+		      (void*)rte_flow->ibv_flows[i]);
 	}
 	return rte_flow;
 error:
 	assert(rte_flow);
-	if (rte_flow->hrxq)
-		mlx5_priv_hrxq_release(priv, rte_flow->hrxq);
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (rte_flow->ibv_flows[i])
+			claim_zero(ibv_exp_destroy_flow(rte_flow->ibv_flows[i]));
+		if (rte_flow->hrxqs[i])
+			mlx5_priv_hrxq_release(priv, rte_flow->hrxqs[i]);
+	}
 	rte_free(rte_flow);
 	return NULL;
 }
@@ -1120,7 +1315,6 @@ priv_flow_create(struct priv *priv,
 		.reserved = 0,
 	};
 	flow.inner = 0;
-	flow.hash_fields = 0;
 	claim_zero(priv_flow_validate(priv, attr, items, actions,
 				      error, &flow));
 	if (flow.actions.mark) {
@@ -1178,41 +1372,53 @@ priv_flow_destroy(struct priv *priv,
 		  struct rte_flow *flow)
 {
 	unsigned int i;
+	unsigned int j;
 	uint16_t *queues;
 	uint16_t queues_n;
 
-	queues = flow->hrxq->ind_table->queues;
-	queues_n = flow->hrxq->ind_table->queues_n;
-	if (!flow->mark)
-		goto out;
-	for (i = 0; i != queues_n; ++i) {
-		struct rte_flow *tmp;
-		struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
-		int mark = 0;
-
-		/*
-		 * To remove the mark from the queue, the queue must not be
-		 * present in any other marked flow (RSS or not).
-		 */
-		TAILQ_FOREACH(tmp, list, next) {
-			unsigned int j;
-
-			if (!tmp->mark)
-				continue;
-			for (j = 0;
-			     (j != tmp->hrxq->ind_table->queues_n) && !mark;
-			     j++)
-				if (tmp->hrxq->ind_table->queues[j] ==
-				    queues[i])
-					mark = 1;
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (!flow->hrxqs[i])
+			continue;
+		queues = flow->hrxqs[i]->ind_table->queues;
+		queues_n = flow->hrxqs[i]->ind_table->queues_n;
+		if (!flow->mark)
+			goto out;
+		for (j = 0; j != queues_n; ++j) {
+			struct rte_flow *tmp;
+			struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[j]];
+			int mark = 0;
+
+			/*
+			 * To remove the mark from the queue, the queue must not be
+			 * present in any other marked flow (RSS or not).
+			 */
+			TAILQ_FOREACH(tmp, list, next) {
+				unsigned int k;
+				uint16_t *tqueues =
+					tmp->hrxqs[j]->ind_table->queues;
+				uint16_t tqueues_n =
+					tmp->hrxqs[j]->ind_table->queues_n;
+
+				if (!tmp->mark)
+					continue;
+				for (k = 0; (k != tqueues_n) && !mark; k++)
+					if (tqueues[k] == queues[i])
+						mark = 1;
+			}
+			rxq->mark = mark;
 		}
-		rxq->mark = mark;
 	}
 out:
 	TAILQ_REMOVE(list, flow, next);
-	if (flow->ibv_flow)
-		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
-	mlx5_priv_hrxq_release(priv, flow->hrxq);
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (flow->ibv_flows[i]) {
+			DEBUG("%p type %d QP %p ibv_flow %p",
+			      (void*)flow, i, (void*)flow->hrxqs[i],
+			      (void*)flow->ibv_flows[i]);
+			claim_zero(ibv_exp_destroy_flow(flow->ibv_flows[i]));
+			mlx5_priv_hrxq_release(priv, flow->hrxqs[i]);
+		}
+	}
 	rte_free(flow->ibv_attr);
 	DEBUG("Flow destroyed %p", (void *)flow);
 	rte_free(flow);
@@ -1294,8 +1500,12 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
 	TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
 		unsigned int i;
 
-		claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
-		flow->ibv_flow = NULL;
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			if (!flow->ibv_flows[i])
+				continue;
+			claim_zero(ibv_exp_destroy_flow(flow->ibv_flows[i]));
+			flow->ibv_flows[i] = NULL;
+		}
 		/* Disable mark on all queues. */
 		for (i = 0; i != priv->rxqs_n; ++i)
 			(*priv->rxqs)[i]->mark = 0;
@@ -1320,20 +1530,41 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list)
 	struct rte_flow *flow;
 
 	TAILQ_FOREACH(flow, list, next) {
-		flow->ibv_flow = ibv_exp_create_flow(flow->hrxq->qp,
-						     flow->ibv_attr);
-		if (!flow->ibv_flow) {
-			DEBUG("Flow %p cannot be applied", (void *)flow);
-			rte_errno = EINVAL;
-			return rte_errno;
+		unsigned int i;
+
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			if (!flow->hrxqs[i])
+				continue;
+			flow->ibv_flows[i] =
+				ibv_exp_create_flow(flow->hrxqs[i]->qp,
+						    flow->ibv_attr);
+			if (!flow->ibv_flows[i]) {
+				DEBUG("Flow %p cannot be applied",
+				      (void *)flow);
+				rte_errno = EINVAL;
+				return rte_errno;
+			}
 		}
 		DEBUG("Flow %p applied", (void *)flow);
 		if (flow->mark) {
 			unsigned int n;
 
-			for (n = 0; n < flow->hrxq->ind_table->queues_n; ++n) {
-				uint16_t idx = flow->hrxq->ind_table->queues[n];
-				(*priv->rxqs)[idx]->mark = 1;
+			/*
+			 * Inside the same flow, queue list will remain the
+			 * same.
+			 */
+			for (i = 0; i != hash_rxq_init_n; ++i) {
+				uint16_t *queues;
+				uint16_t queues_n;
+
+				if (!flow->hrxqs[i])
+					continue;
+				queues_n = flow->hrxqs[i]->ind_table->queues_n;
+				queues = flow->hrxqs[i]->ind_table->queues;
+				for (n = 0; n < queues_n; ++n) {
+					(*priv->rxqs)[queues[n]]->mark = 1;
+				}
+				break;
 			}
 		}
 	}
@@ -1431,18 +1662,35 @@ mlx5_flow_ctrl(struct rte_eth_dev *dev,
 	};
 	struct rte_flow_action actions[] = {
 		{
-			.type = RTE_FLOW_ACTION_TYPE_QUEUE,
-			.conf = &(struct rte_flow_action_queue){
-				.index = 0,
-			},
+			.type = RTE_FLOW_ACTION_TYPE_RSS,
 		},
 		{
 			.type = RTE_FLOW_ACTION_TYPE_END,
 		},
 	};
+	struct rte_flow_action_rss *conf;
 	struct rte_flow *flow;
 	struct rte_flow_error error;
+	unsigned int i;
+	unsigned int j;
 
+	conf = rte_malloc(__func__, sizeof(*conf) +
+			  priv->rxqs_n * sizeof(uint16_t), 0);
+	if (!conf)
+		goto out;
+	for (i = 0, j = 0; i != priv->rxqs_n; ++i) {
+		if ((*priv->rxqs)[i]) {
+			conf->queue[j] = i;
+			++j;
+			++conf->num;
+		}
+	}
+	if (!conf->num) {
+		rte_free(conf);
+		goto out;
+	}
+	conf->rss_conf = &priv->rss_conf;
+	actions[0].conf = conf;
 	if (enable) {
 		flow = priv_flow_create(priv, &attr, items, actions, &error);
 		if (!flow) {
@@ -1461,6 +1709,7 @@ mlx5_flow_ctrl(struct rte_eth_dev *dev,
 		};
 		struct ibv_exp_flow_spec_eth *eth;
 		const unsigned int attr_size = sizeof(struct ibv_exp_flow_attr);
+		unsigned int i;
 
 		claim_zero(mlx5_flow_create_eth(&items[0], NULL, &parser));
 		TAILQ_FOREACH(flow, &priv->ctrl_flows, next) {
@@ -1469,14 +1718,20 @@ mlx5_flow_ctrl(struct rte_eth_dev *dev,
 			if (!memcmp(eth, &spec.eth, sizeof(*eth)))
 				break;
 		}
-		if (flow) {
-			claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
-			mlx5_priv_hrxq_release(priv, flow->hrxq);
-			rte_free(flow->ibv_attr);
-			DEBUG("Control flow destroyed %p", (void *)flow);
-			TAILQ_REMOVE(&priv->ctrl_flows, flow, next);
-			rte_free(flow);
+		if (!flow)
+			goto out;
+		for (i = 0; i != hash_rxq_init_n; ++i) {
+			if (!flow->ibv_flows[i])
+				continue;
+			claim_zero(ibv_exp_destroy_flow(flow->ibv_flows[i]));
+			mlx5_priv_hrxq_release(priv, flow->hrxqs[i]);
 		}
+		rte_free(flow->ibv_attr);
+		DEBUG("Control flow destroyed %p", (void *)flow);
+		TAILQ_REMOVE(&priv->ctrl_flows, flow, next);
+		rte_free(flow);
 	}
+	rte_free(conf);
+out:
 	return 0;
 }
-- 
2.1.4



More information about the dev mailing list