[dpdk-dev] [PATCH 3/3] net/mlx5: optimize single counter pool search

Suanming Mou suanmingm at mellanox.com
Thu Jun 18 09:24:44 CEST 2020


For single counter, when allocate a new counter, it needs to find the pool
it belongs in order to do the query together.

Once there are millions of counters allocated, the pool array in the
counter container will become very large. In this case, the pool search
from the pool array will become extremely slow.

Save the minimum and maximum counter ID to have a quick check of current
counter ID range. And start searching the pool from the last pool in the
container will mostly get the needed pool since counter ID increases
sequentially.

Signed-off-by: Suanming Mou <suanmingm at mellanox.com>
Acked-by: Matan Azrad <matan at mellanox.com>
---
 drivers/net/mlx5/mlx5.c         |  3 +++
 drivers/net/mlx5/mlx5.h         |  9 +++++++
 drivers/net/mlx5/mlx5_flow_dv.c | 60 +++++++++++++++++++++++++++++++----------
 3 files changed, 58 insertions(+), 14 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 4c0c26e..670a59e 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -457,6 +457,9 @@ struct mlx5_flow_id_pool *
 	memset(&sh->cmng, 0, sizeof(sh->cmng));
 	TAILQ_INIT(&sh->cmng.flow_counters);
 	for (i = 0; i < MLX5_CCONT_TYPE_MAX; ++i) {
+		sh->cmng.ccont[i].min_id = MLX5_CNT_BATCH_OFFSET;
+		sh->cmng.ccont[i].max_id = -1;
+		sh->cmng.ccont[i].last_pool_idx = POOL_IDX_INVALID;
 		TAILQ_INIT(&sh->cmng.ccont[i].pool_list);
 		rte_spinlock_init(&sh->cmng.ccont[i].resize_sl);
 	}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 1ee9da7..3ddae17 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -312,6 +312,12 @@ struct mlx5_drop {
 	MLX5_CNT_TO_CNT_EXT(pool, MLX5_POOL_GET_CNT((pool), (offset)))
 #define MLX5_CNT_TO_AGE(cnt) \
 	((struct mlx5_age_param *)((cnt) + 1))
+/*
+ * The maximum single counter is 0x800000 as MLX5_CNT_BATCH_OFFSET
+ * defines. The pool size is 512, pool index should never reach
+ * INT16_MAX.
+ */
+#define POOL_IDX_INVALID UINT16_MAX
 
 struct mlx5_flow_counter_pool;
 
@@ -420,6 +426,9 @@ struct mlx5_counter_stats_raw {
 struct mlx5_pools_container {
 	rte_atomic16_t n_valid; /* Number of valid pools. */
 	uint16_t n; /* Number of pools. */
+	uint16_t last_pool_idx; /* Last used pool index */
+	int min_id; /* The minimum counter ID in the pools. */
+	int max_id; /* The maximum counter ID in the pools. */
 	rte_spinlock_t resize_sl; /* The resize lock. */
 	struct mlx5_counter_pools pool_list; /* Counter pool list. */
 	struct mlx5_flow_counter_pool **pools; /* Counter pool array. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 6e4e10c..9fa8568 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -4051,6 +4051,28 @@ struct field_modify_info modify_tcp[] = {
 }
 
 /**
+ * Check the devx counter belongs to the pool.
+ *
+ * @param[in] pool
+ *   Pointer to the counter pool.
+ * @param[in] id
+ *   The counter devx ID.
+ *
+ * @return
+ *   True if counter belongs to the pool, false otherwise.
+ */
+static bool
+flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
+{
+	int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
+		   MLX5_COUNTERS_PER_POOL;
+
+	if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
+		return true;
+	return false;
+}
+
+/**
  * Get a pool by devx counter ID.
  *
  * @param[in] cont
@@ -4065,24 +4087,25 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
 {
 	uint32_t i;
-	uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
 
-	for (i = 0; i < n_valid; i++) {
+	/* Check last used pool. */
+	if (cont->last_pool_idx != POOL_IDX_INVALID &&
+	    flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
+		return cont->pools[cont->last_pool_idx];
+	/* ID out of range means no suitable pool in the container. */
+	if (id > cont->max_id || id < cont->min_id)
+		return NULL;
+	/*
+	 * Find the pool from the end of the container, since mostly counter
+	 * ID is sequence increasing, and the last pool should be the needed
+	 * one.
+	 */
+	i = rte_atomic16_read(&cont->n_valid);
+	while (i--) {
 		struct mlx5_flow_counter_pool *pool = cont->pools[i];
-		int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
-			   MLX5_COUNTERS_PER_POOL;
 
-		if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) {
-			/*
-			 * Move the pool to the head, as counter allocate
-			 * always gets the first pool in the container.
-			 */
-			if (pool != TAILQ_FIRST(&cont->pool_list)) {
-				TAILQ_REMOVE(&cont->pool_list, pool, next);
-				TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
-			}
+		if (flow_dv_is_counter_in_pool(pool, id))
 			return pool;
-		}
 	}
 	return NULL;
 }
@@ -4337,6 +4360,15 @@ struct field_modify_info modify_tcp[] = {
 	TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
 	pool->index = n_valid;
 	cont->pools[n_valid] = pool;
+	if (!batch) {
+		int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
+
+		if (base < cont->min_id)
+			cont->min_id = base;
+		if (base > cont->max_id)
+			cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
+		cont->last_pool_idx = pool->index;
+	}
 	/* Pool initialization must be updated before host thread access. */
 	rte_cio_wmb();
 	rte_atomic16_add(&cont->n_valid, 1);
-- 
1.8.3.1



More information about the dev mailing list