[dpdk-dev] [PATCH v2 08/10] net/mlx5: optimize mlx5 flow RSS struct

Suanming Mou suanmingm at mellanox.com
Thu Apr 16 10:34:29 CEST 2020


When destroy the flow with RSS, flow can invoke the queues information
from hrxq index table object, since the queue number and list are both
saved to the index table object. No need to save the duplicated data in
rte flow.

Save the RSS description information to the intermediate private data
when create the flow with RSS action helps to save the memory for rte
flow.

Signed-off-by: Suanming Mou <suanmingm at mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at mellanox.com>
---
 drivers/net/mlx5/mlx5.h            |  1 +
 drivers/net/mlx5/mlx5_flow.c       | 72 ++++++++++++++++++++++----------------
 drivers/net/mlx5/mlx5_flow.h       | 11 +++---
 drivers/net/mlx5/mlx5_flow_dv.c    | 59 ++++++++++++++++---------------
 drivers/net/mlx5/mlx5_flow_verbs.c | 59 +++++++++++++++----------------
 5 files changed, 108 insertions(+), 94 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index bfb6c26..164ca3a 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -515,6 +515,7 @@ struct mlx5_priv {
 	struct mlx5_flows flows; /* RTE Flow rules. */
 	struct mlx5_flows ctrl_flows; /* Control flow rules. */
 	void *inter_flows; /* Intermediate resources for flow creation. */
+	void *rss_desc; /* Intermediate rss description resources. */
 	int flow_idx; /* Intermediate device flow index. */
 	int flow_nested_idx; /* Intermediate device flow index, nested. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 387a06d..4f74e6b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -658,13 +658,12 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *   The hash fields that should be used.
  */
 uint64_t
-mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
+mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
 			    int tunnel __rte_unused, uint64_t layer_types,
 			    uint64_t hash_fields)
 {
-	struct rte_flow *flow = dev_flow->flow;
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	int rss_request_inner = flow->rss.level >= 2;
+	int rss_request_inner = rss_desc->level >= 2;
 
 	/* Check RSS hash level for tunnel. */
 	if (tunnel && rss_request_inner)
@@ -673,7 +672,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		return 0;
 #endif
 	/* Check if requested layer matches RSS hash fields. */
-	if (!(flow->rss.types & layer_types))
+	if (!(rss_desc->types & layer_types))
 		return 0;
 	return hash_fields;
 }
@@ -712,22 +711,27 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] flow
- *   Pointer to flow structure.
  * @param[in] dev_handle
  *   Pointer to device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow,
+flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
 		       struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const int mark = dev_handle->mark;
 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+	struct mlx5_hrxq *hrxq;
 	unsigned int i;
 
-	for (i = 0; i != flow->rss.queue_num; ++i) {
-		int idx = (*flow->rss.queue)[i];
+	if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+		return;
+	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+			      dev_handle->rix_hrxq);
+	if (!hrxq)
+		return;
+	for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+		int idx = hrxq->ind_table->queues[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
 			container_of((*priv->rxqs)[idx],
 				     struct mlx5_rxq_ctrl, rxq);
@@ -780,7 +784,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
 		       handle_idx, dev_handle, next)
-		flow_drv_rxq_flags_set(dev, flow, dev_handle);
+		flow_drv_rxq_flags_set(dev, dev_handle);
 }
 
 /**
@@ -789,23 +793,28 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param[in] flow
- *   Pointer to flow structure.
  * @param[in] dev_handle
  *   Pointer to the device flow handle structure.
  */
 static void
-flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow,
+flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
 			struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const int mark = dev_handle->mark;
 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
+	struct mlx5_hrxq *hrxq;
 	unsigned int i;
 
+	if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
+		return;
+	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+			      dev_handle->rix_hrxq);
+	if (!hrxq)
+		return;
 	MLX5_ASSERT(dev->data->dev_started);
-	for (i = 0; i != flow->rss.queue_num; ++i) {
-		int idx = (*flow->rss.queue)[i];
+	for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
+		int idx = hrxq->ind_table->queues[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
 			container_of((*priv->rxqs)[idx],
 				     struct mlx5_rxq_ctrl, rxq);
@@ -854,7 +863,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
 		       handle_idx, dev_handle, next)
-		flow_drv_rxq_flags_trim(dev, flow, dev_handle);
+		flow_drv_rxq_flags_trim(dev, dev_handle);
 }
 
 /**
@@ -4239,9 +4248,9 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		uint8_t buffer[2048];
 	} items_tx;
 	struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+	struct mlx5_flow_rss_desc *rss_desc = priv->rss_desc;
 	const struct rte_flow_action *p_actions_rx = actions;
 	uint32_t i;
-	uint32_t flow_size;
 	int hairpin_flow = 0;
 	uint32_t hairpin_id = 0;
 	struct rte_flow_attr attr_tx = { .priority = 0 };
@@ -4261,14 +4270,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 				   &hairpin_id);
 		p_actions_rx = actions_rx.actions;
 	}
-	flow_size = sizeof(struct rte_flow);
-	rss = flow_get_rss_action(p_actions_rx);
-	if (rss)
-		flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
-					    sizeof(void *));
-	else
-		flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
-	flow = rte_calloc(__func__, 1, flow_size, 0);
+	flow = rte_calloc(__func__, 1, sizeof(struct rte_flow), 0);
 	if (!flow) {
 		rte_errno = ENOMEM;
 		goto error_before_flow;
@@ -4278,15 +4280,16 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		flow->hairpin_flow_id = hairpin_id;
 	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
 		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
-	flow->rss.queue = (void *)(flow + 1);
+	memset(rss_desc, 0, sizeof(*rss_desc));
+	rss = flow_get_rss_action(p_actions_rx);
 	if (rss) {
 		/*
 		 * The following information is required by
 		 * mlx5_flow_hashfields_adjust() in advance.
 		 */
-		flow->rss.level = rss->level;
+		rss_desc->level = rss->level;
 		/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
-		flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
+		rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
 	}
 	flow->dev_handles = 0;
 	if (rss && rss->types) {
@@ -4650,9 +4653,18 @@ struct rte_flow *
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (!priv->inter_flows)
+	if (!priv->inter_flows) {
 		priv->inter_flows = rte_calloc(__func__, MLX5_NUM_MAX_DEV_FLOWS,
-					       sizeof(struct mlx5_flow), 0);
+				    sizeof(struct mlx5_flow) +
+				    sizeof(struct mlx5_flow_rss_desc) +
+				    sizeof(uint16_t) * UINT16_MAX, 0);
+		if (!priv->inter_flows) {
+			DRV_LOG(ERR, "can't allocate intermediate memory.");
+			return;
+		}
+	}
+	priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows)
+			 [MLX5_NUM_MAX_DEV_FLOWS];
 	/* Reset the index. */
 	priv->flow_idx = 0;
 	priv->flow_nested_idx = 0;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8a4f7cb..759b0ce 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -483,14 +483,16 @@ struct ibv_spec_header {
 	uint16_t size;
 };
 
-struct mlx5_flow_rss {
+/* RSS description. */
+struct mlx5_flow_rss_desc {
 	uint32_t level;
 	uint32_t queue_num; /**< Number of entries in @p queue. */
 	uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
-	uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+	uint16_t queue[]; /**< Destination queues to redirect traffic to. */
 };
 
+
 /** Device flow handle structure for DV mode only. */
 struct mlx5_flow_handle_dv {
 	/* Flow DV api: */
@@ -762,7 +764,6 @@ struct mlx5_fdir_flow {
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
 	enum mlx5_flow_drv_type drv_type; /**< Driver type. */
-	struct mlx5_flow_rss rss; /**< RSS context. */
 	uint32_t counter; /**< Holds flow counter. */
 	uint32_t rix_mreg_copy;
 	/**< Index to metadata register copy table resource. */
@@ -855,8 +856,8 @@ uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool,
 int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes,
 			     bool external, uint32_t group, bool fdb_def_rule,
 			     uint32_t *table, struct rte_flow_error *error);
-uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel,
-				     uint64_t layer_types,
+uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
+				     int tunnel, uint64_t layer_types,
 				     uint64_t hash_fields);
 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 				   uint32_t subpriority);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 23b69ba..cfc911c 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7279,18 +7279,20 @@ struct field_modify_info modify_tcp[] = {
  *
  * @param[in] dev_flow
  *   Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ *   Pointer to the mlx5_flow_rss_desc.
  */
 static void
-flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
+flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
+		       struct mlx5_flow_rss_desc *rss_desc)
 {
-	struct rte_flow *flow = dev_flow->flow;
 	uint64_t items = dev_flow->handle->layers;
 	int rss_inner = 0;
-	uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
+	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
 
 	dev_flow->hash_fields = 0;
 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
-	if (flow->rss.level >= 2) {
+	if (rss_desc->level >= 2) {
 		dev_flow->hash_fields |= IBV_RX_HASH_INNER;
 		rss_inner = 1;
 	}
@@ -7375,6 +7377,8 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_dev_config *dev_conf = &priv->config;
 	struct rte_flow *flow = dev_flow->flow;
 	struct mlx5_flow_handle *handle = dev_flow->handle;
+	struct mlx5_flow_rss_desc *rss_desc = (struct mlx5_flow_rss_desc *)
+					      priv->rss_desc;
 	uint64_t item_flags = 0;
 	uint64_t last_item = 0;
 	uint64_t action_flags = 0;
@@ -7529,23 +7533,20 @@ struct field_modify_info modify_tcp[] = {
 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_QUEUE:
-			MLX5_ASSERT(flow->rss.queue);
 			queue = actions->conf;
-			flow->rss.queue_num = 1;
-			(*flow->rss.queue)[0] = queue->index;
+			rss_desc->queue_num = 1;
+			rss_desc->queue[0] = queue->index;
 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RSS:
-			MLX5_ASSERT(flow->rss.queue);
 			rss = actions->conf;
-			if (flow->rss.queue)
-				memcpy((*flow->rss.queue), rss->queue,
-				       rss->queue_num * sizeof(uint16_t));
-			flow->rss.queue_num = rss->queue_num;
+			memcpy(rss_desc->queue, rss->queue,
+			       rss->queue_num * sizeof(uint16_t));
+			rss_desc->queue_num = rss->queue_num;
 			/* NULL RSS key indicates default RSS key. */
 			rss_key = !rss->key ? rss_hash_default_key : rss->key;
-			memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+			memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
 			/*
 			 * rss->level and rss.types should be set in advance
 			 * when expanding items for RSS.
@@ -7955,7 +7956,7 @@ struct field_modify_info modify_tcp[] = {
 		case RTE_FLOW_ITEM_TYPE_GRE:
 			flow_dv_translate_item_gre(match_mask, match_value,
 						   items, tunnel);
-			matcher.priority = flow->rss.level >= 2 ?
+			matcher.priority = rss_desc->level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
@@ -7967,14 +7968,14 @@ struct field_modify_info modify_tcp[] = {
 		case RTE_FLOW_ITEM_TYPE_NVGRE:
 			flow_dv_translate_item_nvgre(match_mask, match_value,
 						     items, tunnel);
-			matcher.priority = flow->rss.level >= 2 ?
+			matcher.priority = rss_desc->level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GRE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_VXLAN:
 			flow_dv_translate_item_vxlan(match_mask, match_value,
 						     items, tunnel);
-			matcher.priority = flow->rss.level >= 2 ?
+			matcher.priority = rss_desc->level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_VXLAN;
 			break;
@@ -7982,21 +7983,21 @@ struct field_modify_info modify_tcp[] = {
 			flow_dv_translate_item_vxlan_gpe(match_mask,
 							 match_value, items,
 							 tunnel);
-			matcher.priority = flow->rss.level >= 2 ?
+			matcher.priority = rss_desc->level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_GENEVE:
 			flow_dv_translate_item_geneve(match_mask, match_value,
 						      items, tunnel);
-			matcher.priority = flow->rss.level >= 2 ?
+			matcher.priority = rss_desc->level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GENEVE;
 			break;
 		case RTE_FLOW_ITEM_TYPE_MPLS:
 			flow_dv_translate_item_mpls(match_mask, match_value,
 						    items, last_item, tunnel);
-			matcher.priority = flow->rss.level >= 2 ?
+			matcher.priority = rss_desc->level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_MPLS;
 			break;
@@ -8039,7 +8040,7 @@ struct field_modify_info modify_tcp[] = {
 		case RTE_FLOW_ITEM_TYPE_GTP:
 			flow_dv_translate_item_gtp(match_mask, match_value,
 						   items, tunnel);
-			matcher.priority = flow->rss.level >= 2 ?
+			matcher.priority = rss_desc->level >= 2 ?
 				    MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
 			last_item = MLX5_FLOW_LAYER_GTP;
 			break;
@@ -8071,7 +8072,7 @@ struct field_modify_info modify_tcp[] = {
 	 */
 	handle->layers |= item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
-		flow_dv_hashfields_set(dev_flow);
+		flow_dv_hashfields_set(dev_flow, rss_desc);
 	/* Register matcher. */
 	matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
 				    matcher.mask.size);
@@ -8146,20 +8147,22 @@ struct field_modify_info modify_tcp[] = {
 		} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
 			struct mlx5_hrxq *hrxq;
 			uint32_t hrxq_idx;
+			struct mlx5_flow_rss_desc *rss_desc =
+				(struct mlx5_flow_rss_desc *)priv->rss_desc;
 
-			MLX5_ASSERT(flow->rss.queue);
-			hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
+			MLX5_ASSERT(rss_desc->queue_num);
+			hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
 						 MLX5_RSS_HASH_KEY_LEN,
 						 dev_flow->hash_fields,
-						 (*flow->rss.queue),
-						 flow->rss.queue_num);
+						 rss_desc->queue,
+						 rss_desc->queue_num);
 			if (!hrxq_idx) {
 				hrxq_idx = mlx5_hrxq_new
-						(dev, flow->rss.key,
+						(dev, rss_desc->key,
 						MLX5_RSS_HASH_KEY_LEN,
 						dev_flow->hash_fields,
-						(*flow->rss.queue),
-						flow->rss.queue_num,
+						rss_desc->queue,
+						rss_desc->queue_num,
 						!!(dh->layers &
 						MLX5_FLOW_LAYER_TUNNEL));
 			}
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index f70b879..a246917 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -941,21 +941,19 @@
  * the input is valid and that there is space to insert the requested action
  * into the flow.
  *
- * @param[in] dev_flow
- *   Pointer to mlx5_flow.
+ * @param[in] rss_desc
+ *   Pointer to mlx5_flow_rss_desc.
  * @param[in] action
  *   Action configuration.
  */
 static void
-flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow,
+flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
 				  const struct rte_flow_action *action)
 {
 	const struct rte_flow_action_queue *queue = action->conf;
-	struct rte_flow *flow = dev_flow->flow;
 
-	if (flow->rss.queue)
-		(*flow->rss.queue)[0] = queue->index;
-	flow->rss.queue_num = 1;
+	rss_desc->queue[0] = queue->index;
+	rss_desc->queue_num = 1;
 }
 
 /**
@@ -963,28 +961,23 @@
  * the input is valid and that there is space to insert the requested action
  * into the flow.
  *
+ * @param[in] rss_desc
+ *   Pointer to mlx5_flow_rss_desc.
  * @param[in] action
  *   Action configuration.
- * @param[in, out] action_flags
- *   Pointer to the detected actions.
- * @param[in] dev_flow
- *   Pointer to mlx5_flow.
  */
 static void
-flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow,
+flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
 				const struct rte_flow_action *action)
 {
 	const struct rte_flow_action_rss *rss = action->conf;
 	const uint8_t *rss_key;
-	struct rte_flow *flow = dev_flow->flow;
 
-	if (flow->rss.queue)
-		memcpy((*flow->rss.queue), rss->queue,
-		       rss->queue_num * sizeof(uint16_t));
-	flow->rss.queue_num = rss->queue_num;
+	memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
+	rss_desc->queue_num = rss->queue_num;
 	/* NULL RSS key indicates default RSS key. */
 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
-	memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+	memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
 	/*
 	 * rss->level and rss.types should be set in advance when expanding
 	 * items for RSS.
@@ -1577,6 +1570,8 @@
 	uint64_t priority = attr->priority;
 	uint32_t subpriority = 0;
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_rss_desc *rss_desc = (struct mlx5_flow_rss_desc *)
+					      priv->rss_desc;
 
 	if (priority == MLX5_FLOW_PRIO_RSVD)
 		priority = priv->config.flow_prio - 1;
@@ -1602,12 +1597,12 @@
 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_QUEUE:
-			flow_verbs_translate_action_queue(dev_flow, actions);
+			flow_verbs_translate_action_queue(rss_desc, actions);
 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RSS:
-			flow_verbs_translate_action_rss(dev_flow, actions);
+			flow_verbs_translate_action_rss(rss_desc, actions);
 			action_flags |= MLX5_FLOW_ACTION_RSS;
 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
 			break;
@@ -1655,7 +1650,7 @@
 			subpriority = MLX5_PRIORITY_MAP_L3;
 			dev_flow->hash_fields |=
 				mlx5_flow_hashfields_adjust
-					(dev_flow, tunnel,
+					(rss_desc, tunnel,
 					 MLX5_IPV4_LAYER_TYPES,
 					 MLX5_IPV4_IBV_RX_HASH);
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
@@ -1667,7 +1662,7 @@
 			subpriority = MLX5_PRIORITY_MAP_L3;
 			dev_flow->hash_fields |=
 				mlx5_flow_hashfields_adjust
-					(dev_flow, tunnel,
+					(rss_desc, tunnel,
 					 MLX5_IPV6_LAYER_TYPES,
 					 MLX5_IPV6_IBV_RX_HASH);
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
@@ -1679,7 +1674,7 @@
 			subpriority = MLX5_PRIORITY_MAP_L4;
 			dev_flow->hash_fields |=
 				mlx5_flow_hashfields_adjust
-					(dev_flow, tunnel, ETH_RSS_TCP,
+					(rss_desc, tunnel, ETH_RSS_TCP,
 					 (IBV_RX_HASH_SRC_PORT_TCP |
 					  IBV_RX_HASH_DST_PORT_TCP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
@@ -1691,7 +1686,7 @@
 			subpriority = MLX5_PRIORITY_MAP_L4;
 			dev_flow->hash_fields |=
 				mlx5_flow_hashfields_adjust
-					(dev_flow, tunnel, ETH_RSS_UDP,
+					(rss_desc, tunnel, ETH_RSS_UDP,
 					 (IBV_RX_HASH_SRC_PORT_UDP |
 					  IBV_RX_HASH_DST_PORT_UDP));
 			item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
@@ -1848,19 +1843,21 @@
 			}
 		} else {
 			uint32_t hrxq_idx;
+			struct mlx5_flow_rss_desc *rss_desc =
+				(struct mlx5_flow_rss_desc *)priv->rss_desc;
 
-			MLX5_ASSERT(flow->rss.queue);
-			hrxq_idx = mlx5_hrxq_get(dev, flow->rss.key,
+			MLX5_ASSERT(rss_desc->queue_num);
+			hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
 					     MLX5_RSS_HASH_KEY_LEN,
 					     dev_flow->hash_fields,
-					     (*flow->rss.queue),
-					     flow->rss.queue_num);
+					     rss_desc->queue,
+					     rss_desc->queue_num);
 			if (!hrxq_idx)
-				hrxq_idx = mlx5_hrxq_new(dev, flow->rss.key,
+				hrxq_idx = mlx5_hrxq_new(dev, rss_desc->key,
 						MLX5_RSS_HASH_KEY_LEN,
 						dev_flow->hash_fields,
-						(*flow->rss.queue),
-						flow->rss.queue_num,
+						rss_desc->queue,
+						rss_desc->queue_num,
 						!!(handle->layers &
 						MLX5_FLOW_LAYER_TUNNEL));
 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
-- 
1.8.3.1



More information about the dev mailing list