[dpdk-dev] [PATCH v4 22/34] net/mlx5: optimize shared RSS list operation

Suanming Mou suanmingm at nvidia.com
Wed Oct 28 00:47:33 CET 2020


When create shared RSS hrxq, the hrxq will be created directly, no hrxq
will be reused.

In this case, add the shared RSS hrxq to the queue list is redundant.
And it also hurts the generic queue lookup.

This commit avoids add the shared RSS hrxq to the queue list.

Signed-off-by: Suanming Mou <suanmingm at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
 drivers/net/mlx5/mlx5.h      |  2 +-
 drivers/net/mlx5/mlx5_rxq.c  | 57 +++++++++++++++++++++++++++-----------------
 drivers/net/mlx5/mlx5_rxtx.h |  5 ++--
 3 files changed, 39 insertions(+), 25 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index debb862..9f96bd0 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -725,7 +725,7 @@ struct mlx5_ind_table_obj {
 struct mlx5_hrxq {
 	ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
 	uint32_t refcnt; /* Reference counter. */
-	uint32_t shared:1; /* This object used in shared action. */
+	uint32_t standalone:1; /* This object used in shared action. */
 	struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
 	RTE_STD_C11
 	union {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 8d05315..7579407 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1929,13 +1929,16 @@ struct mlx5_ind_table_obj *
  *   Pointer to Ethernet device.
  * @param ind_table
  *   Indirection table to release.
+ * @param standalone
+ *   Indirection table for Standalone queue.
  *
  * @return
  *   1 while a reference on it exists, 0 when freed.
  */
 int
 mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
-			   struct mlx5_ind_table_obj *ind_tbl)
+			   struct mlx5_ind_table_obj *ind_tbl,
+			   bool standalone)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int i;
@@ -1945,7 +1948,8 @@ struct mlx5_ind_table_obj *
 	for (i = 0; i != ind_tbl->queues_n; ++i)
 		claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
 	if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {
-		LIST_REMOVE(ind_tbl, next);
+		if (!standalone)
+			LIST_REMOVE(ind_tbl, next);
 		mlx5_free(ind_tbl);
 		return 0;
 	}
@@ -1986,13 +1990,15 @@ struct mlx5_ind_table_obj *
  *   Queues entering in the indirection table.
  * @param queues_n
  *   Number of queues in the array.
+ * @param standalone
+ *   Indirection table for Standalone queue.
  *
  * @return
  *   The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
  */
 static struct mlx5_ind_table_obj *
 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
-		       uint32_t queues_n)
+		       uint32_t queues_n, bool standalone)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_ind_table_obj *ind_tbl;
@@ -2019,7 +2025,8 @@ struct mlx5_ind_table_obj *
 	if (ret < 0)
 		goto error;
 	__atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
-	LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+	if (!standalone)
+		LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
 	return ind_tbl;
 error:
 	ret = rte_errno;
@@ -2062,8 +2069,6 @@ struct mlx5_ind_table_obj *
 		      hrxq, next) {
 		struct mlx5_ind_table_obj *ind_tbl;
 
-		if (hrxq->shared)
-			continue;
 		if (hrxq->rss_key_len != rss_key_len)
 			continue;
 		if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
@@ -2074,7 +2079,8 @@ struct mlx5_ind_table_obj *
 		if (!ind_tbl)
 			continue;
 		if (ind_tbl != hrxq->ind_table) {
-			mlx5_ind_table_obj_release(dev, ind_tbl);
+			mlx5_ind_table_obj_release(dev, ind_tbl,
+						   hrxq->standalone);
 			continue;
 		}
 		__atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
@@ -2135,7 +2141,8 @@ struct mlx5_ind_table_obj *
 	} else {
 		ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
 		if (!ind_tbl)
-			ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+			ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+							 hrxq->standalone);
 	}
 	if (!ind_tbl) {
 		rte_errno = ENOMEM;
@@ -2149,7 +2156,8 @@ struct mlx5_ind_table_obj *
 		goto error;
 	}
 	if (ind_tbl != hrxq->ind_table) {
-		mlx5_ind_table_obj_release(dev, hrxq->ind_table);
+		mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+					   hrxq->standalone);
 		hrxq->ind_table = ind_tbl;
 	}
 	hrxq->hash_fields = hash_fields;
@@ -2158,7 +2166,7 @@ struct mlx5_ind_table_obj *
 error:
 	err = rte_errno;
 	if (ind_tbl != hrxq->ind_table)
-		mlx5_ind_table_obj_release(dev, ind_tbl);
+		mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
 	rte_errno = err;
 	return -rte_errno;
 }
@@ -2188,13 +2196,16 @@ struct mlx5_ind_table_obj *
 		mlx5_glue->destroy_flow_action(hrxq->action);
 #endif
 		priv->obj_ops.hrxq_destroy(hrxq);
-		mlx5_ind_table_obj_release(dev, hrxq->ind_table);
-		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
-			     hrxq_idx, hrxq, next);
+		mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+					   hrxq->standalone);
+		if (!hrxq->standalone)
+			ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+				     &priv->hrxqs, hrxq_idx, hrxq, next);
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
 		return 0;
 	}
-	claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
+	claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+						 hrxq->standalone));
 	return 1;
 }
 
@@ -2216,8 +2227,8 @@ struct mlx5_ind_table_obj *
  *   Number of queues.
  * @param tunnel
  *   Tunnel type.
- * @param shared
- *   If true new object of Rx Hash queue will be used in shared action.
+ * @param standalone
+ *   Object of Rx Hash queue will be used in standalone shared action or not.
  *
  * @return
  *   The DevX object initialized index, 0 otherwise and rte_errno is set.
@@ -2227,7 +2238,7 @@ struct mlx5_ind_table_obj *
 	      const uint8_t *rss_key, uint32_t rss_key_len,
 	      uint64_t hash_fields,
 	      const uint16_t *queues, uint32_t queues_n,
-	      int tunnel, bool shared)
+	      int tunnel, bool standalone)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_hrxq *hrxq = NULL;
@@ -2238,7 +2249,8 @@ struct mlx5_ind_table_obj *
 	queues_n = hash_fields ? queues_n : 1;
 	ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
 	if (!ind_tbl)
-		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+						 standalone);
 	if (!ind_tbl) {
 		rte_errno = ENOMEM;
 		return 0;
@@ -2246,7 +2258,7 @@ struct mlx5_ind_table_obj *
 	hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
 	if (!hrxq)
 		goto error;
-	hrxq->shared = !!shared;
+	hrxq->standalone = !!standalone;
 	hrxq->ind_table = ind_tbl;
 	hrxq->rss_key_len = rss_key_len;
 	hrxq->hash_fields = hash_fields;
@@ -2257,12 +2269,13 @@ struct mlx5_ind_table_obj *
 		goto error;
 	}
 	__atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
-	ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
-		     hrxq, next);
+	if (!hrxq->standalone)
+		ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
+			     hrxq_idx, hrxq, next);
 	return hrxq_idx;
 error:
 	ret = rte_errno; /* Save rte_errno before cleanup. */
-	mlx5_ind_table_obj_release(dev, ind_tbl);
+	mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
 	if (hrxq)
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
 	rte_errno = ret; /* Restore rte_errno. */
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 1b5fba4..8fe0473 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -347,12 +347,13 @@ struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev,
 						  const uint16_t *queues,
 						  uint32_t queues_n);
 int mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
-			       struct mlx5_ind_table_obj *ind_tbl);
+			       struct mlx5_ind_table_obj *ind_tbl,
+			       bool standalone);
 uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev,
 		       const uint8_t *rss_key, uint32_t rss_key_len,
 		       uint64_t hash_fields,
 		       const uint16_t *queues, uint32_t queues_n,
-		       int tunnel, bool shared);
+		       int tunnel, bool standalone);
 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
 		       const uint8_t *rss_key, uint32_t rss_key_len,
 		       uint64_t hash_fields,
-- 
1.8.3.1



More information about the dev mailing list