[dpdk-dev] [PATCH v2 02/10] net/mlx5: add trunk dynamic grow for indexed pool

Suanming Mou suanmingm at mellanox.com
Thu Apr 16 04:42:00 CEST 2020


This commit add trunk dynamic grow for the indexed pool.

In case for pools which are not sure the entry number needed, pools can
be configured in increase progressively mode. It means the trunk size
will be increased dynamically one after one, then reach a stable value.
It saves memory to avoid allocate a very big trunk at beginning.

User should set both the grow_shift and grow_trunk to help the trunk grow
works. Keep one or both grow_shift and grow_trunk as 0 makes the trunk
work as fixed size.

Signed-off-by: Suanming Mou <suanmingm at mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at mellanox.com>
---
 drivers/net/mlx5/mlx5_utils.c | 105 +++++++++++++++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_utils.h |  23 +++++++--
 2 files changed, 108 insertions(+), 20 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index 4cab7f0..e63921d 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -132,16 +132,69 @@ struct mlx5_hlist_entry *
 		rte_spinlock_unlock(&pool->lock);
 }
 
+static inline uint32_t
+mlx5_trunk_idx_get(struct mlx5_indexed_pool *pool, uint32_t entry_idx)
+{
+	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
+	uint32_t trunk_idx = 0;
+	uint32_t i;
+
+	if (!cfg->grow_trunk)
+		return entry_idx / cfg->trunk_size;
+	if (entry_idx >= pool->grow_tbl[cfg->grow_trunk - 1]) {
+		trunk_idx = (entry_idx - pool->grow_tbl[cfg->grow_trunk - 1]) /
+			    (cfg->trunk_size << (cfg->grow_shift *
+			    cfg->grow_trunk)) + cfg->grow_trunk;
+	} else {
+		for (i = 0; i < cfg->grow_trunk; i++) {
+			if (entry_idx < pool->grow_tbl[i])
+				break;
+		}
+		trunk_idx = i;
+	}
+	return trunk_idx;
+}
+
+static inline uint32_t
+mlx5_trunk_size_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
+{
+	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
+
+	return cfg->trunk_size << (cfg->grow_shift *
+	       (trunk_idx > cfg->grow_trunk ? cfg->grow_trunk : trunk_idx));
+}
+
+static inline uint32_t
+mlx5_trunk_idx_offset_get(struct mlx5_indexed_pool *pool, uint32_t trunk_idx)
+{
+	struct mlx5_indexed_pool_config *cfg = &pool->cfg;
+	uint32_t offset = 0;
+
+	if (!trunk_idx)
+		return 0;
+	if (!cfg->grow_trunk)
+		return cfg->trunk_size * trunk_idx;
+	if (trunk_idx < cfg->grow_trunk)
+		offset = pool->grow_tbl[trunk_idx - 1];
+	else
+		offset = pool->grow_tbl[cfg->grow_trunk - 1] +
+			 (cfg->trunk_size << (cfg->grow_shift *
+			 cfg->grow_trunk)) * (trunk_idx - cfg->grow_trunk);
+	return offset;
+}
+
 struct mlx5_indexed_pool *
 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg)
 {
 	struct mlx5_indexed_pool *pool;
+	uint32_t i;
 
 	if (!cfg || !cfg->size || (!cfg->malloc ^ !cfg->free) ||
 	    (cfg->trunk_size && ((cfg->trunk_size & (cfg->trunk_size - 1)) ||
 	    ((__builtin_ffs(cfg->trunk_size) + TRUNK_IDX_BITS) > 32))))
 		return NULL;
-	pool = rte_zmalloc("mlx5_ipool", sizeof(*pool), RTE_CACHE_LINE_SIZE);
+	pool = rte_zmalloc("mlx5_ipool", sizeof(*pool) + cfg->grow_trunk *
+				sizeof(pool->grow_tbl[0]), RTE_CACHE_LINE_SIZE);
 	if (!pool)
 		return NULL;
 	pool->cfg = *cfg;
@@ -154,6 +207,15 @@ struct mlx5_indexed_pool *
 	pool->free_list = TRUNK_INVALID;
 	if (pool->cfg.need_lock)
 		rte_spinlock_init(&pool->lock);
+	/*
+	 * Initialize the dynamic grow trunk size lookup table to have a quick
+	 * lookup for the trunk entry index offset.
+	 */
+	for (i = 0; i < cfg->grow_trunk; i++) {
+		pool->grow_tbl[i] = cfg->trunk_size << (cfg->grow_shift * i);
+		if (i > 0)
+			pool->grow_tbl[i] += pool->grow_tbl[i - 1];
+	}
 	return pool;
 }
 
@@ -164,6 +226,7 @@ struct mlx5_indexed_pool *
 	struct mlx5_indexed_trunk **trunk_tmp;
 	struct mlx5_indexed_trunk **p;
 	size_t trunk_size = 0;
+	size_t data_size;
 	size_t bmp_size;
 	uint32_t idx;
 
@@ -193,23 +256,23 @@ struct mlx5_indexed_pool *
 	}
 	idx = pool->n_trunk_valid;
 	trunk_size += sizeof(*trunk);
-	bmp_size = rte_bitmap_get_memory_footprint(pool->cfg.trunk_size);
-	trunk_size += pool->cfg.trunk_size * pool->cfg.size + bmp_size;
+	data_size = mlx5_trunk_size_get(pool, idx);
+	bmp_size = rte_bitmap_get_memory_footprint(data_size);
+	trunk_size += data_size * pool->cfg.size + bmp_size;
 	trunk = pool->cfg.malloc(pool->cfg.type, trunk_size,
 				 RTE_CACHE_LINE_SIZE, rte_socket_id());
 	if (!trunk)
 		return -ENOMEM;
 	pool->trunks[idx] = trunk;
 	trunk->idx = idx;
-	trunk->free = pool->cfg.trunk_size;
+	trunk->free = data_size;
 	trunk->prev = TRUNK_INVALID;
 	trunk->next = TRUNK_INVALID;
 	MLX5_ASSERT(pool->free_list == TRUNK_INVALID);
 	pool->free_list = idx;
 	/* Mark all entries as available. */
-	trunk->bmp = rte_bitmap_init_with_all_set(pool->cfg.trunk_size,
-		     &trunk->data[pool->cfg.trunk_size  * pool->cfg.size],
-		     bmp_size);
+	trunk->bmp = rte_bitmap_init_with_all_set(data_size,
+		     &trunk->data[data_size * pool->cfg.size], bmp_size);
 	pool->n_trunk_valid++;
 #ifdef POOL_DEBUG
 	pool->trunk_new++;
@@ -244,10 +307,10 @@ struct mlx5_indexed_pool *
 	MLX5_ASSERT(slab);
 	iidx += __builtin_ctzll(slab);
 	MLX5_ASSERT(iidx != UINT32_MAX);
-	MLX5_ASSERT(iidx < pool->cfg.trunk_size);
+	MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
 	rte_bitmap_clear(trunk->bmp, iidx);
 	p = &trunk->data[iidx * pool->cfg.size];
-	iidx += trunk->idx * pool->cfg.trunk_size;
+	iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
 	iidx += 1; /* non-zero index. */
 	trunk->free--;
 #ifdef POOL_DEBUG
@@ -286,19 +349,23 @@ struct mlx5_indexed_pool *
 {
 	struct mlx5_indexed_trunk *trunk;
 	uint32_t trunk_idx;
+	uint32_t entry_idx;
 
 	if (!idx)
 		return;
 	idx -= 1;
 	mlx5_ipool_lock(pool);
-	trunk_idx = idx / pool->cfg.trunk_size;
+	trunk_idx = mlx5_trunk_idx_get(pool, idx);
 	if (trunk_idx >= pool->n_trunk_valid)
 		goto out;
 	trunk = pool->trunks[trunk_idx];
-	if (!trunk || trunk_idx != trunk->idx ||
-	    rte_bitmap_get(trunk->bmp, idx % pool->cfg.trunk_size))
+	if (!trunk)
+		goto out;
+	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
+	if (trunk_idx != trunk->idx ||
+	    rte_bitmap_get(trunk->bmp, entry_idx))
 		goto out;
-	rte_bitmap_set(trunk->bmp, idx % pool->cfg.trunk_size);
+	rte_bitmap_set(trunk->bmp, entry_idx);
 	trunk->free++;
 	if (trunk->free == 1) {
 		/* Put into free trunk list head. */
@@ -326,19 +393,23 @@ struct mlx5_indexed_pool *
 	struct mlx5_indexed_trunk *trunk;
 	void *p = NULL;
 	uint32_t trunk_idx;
+	uint32_t entry_idx;
 
 	if (!idx)
 		return NULL;
 	idx -= 1;
 	mlx5_ipool_lock(pool);
-	trunk_idx = idx / pool->cfg.trunk_size;
+	trunk_idx = mlx5_trunk_idx_get(pool, idx);
 	if (trunk_idx >= pool->n_trunk_valid)
 		goto out;
 	trunk = pool->trunks[trunk_idx];
-	if (!trunk || trunk_idx != trunk->idx ||
-	    rte_bitmap_get(trunk->bmp, idx % pool->cfg.trunk_size))
+	if (!trunk)
+		goto out;
+	entry_idx = idx - mlx5_trunk_idx_offset_get(pool, trunk->idx);
+	if (trunk_idx != trunk->idx ||
+	    rte_bitmap_get(trunk->bmp, entry_idx))
 		goto out;
-	p = &trunk->data[(idx % pool->cfg.trunk_size) * pool->cfg.size];
+	p = &trunk->data[entry_idx * pool->cfg.size];
 out:
 	mlx5_ipool_unlock(pool);
 	return p;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index e404a5c..af96a87 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -81,9 +81,25 @@
 
 struct mlx5_indexed_pool_config {
 	uint32_t size; /* Pool entry size. */
-	uint32_t trunk_size;
-	/* Trunk entry number. Must be power of 2. */
-	uint32_t need_lock;
+	uint32_t trunk_size:22;
+	/*
+	 * Trunk entry number. Must be power of 2. It can be increased
+	 * if trunk_grow enable. The trunk entry number increases with
+	 * left shift grow_shift. Trunks with index are after grow_trunk
+	 * will keep the entry number same with the last grow trunk.
+	 */
+	uint32_t grow_trunk:4;
+	/*
+	 * Trunks with entry number increase in the pool. Set it to 0
+	 * to make the pool works as trunk entry fixed pool. It works
+	 * only if grow_shift is not 0.
+	 */
+	uint32_t grow_shift:4;
+	/*
+	 * Trunk entry number increase shift value, stop after grow_trunk.
+	 * It works only if grow_trunk is not 0.
+	 */
+	uint32_t need_lock:1;
 	/* Lock is needed for multiple thread usage. */
 	const char *type; /* Memory allocate type name. */
 	void *(*malloc)(const char *type, size_t size, unsigned int align,
@@ -116,6 +132,7 @@ struct mlx5_indexed_pool {
 	int64_t trunk_empty;
 	int64_t trunk_free;
 #endif
+	uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
 };
 
 /**
-- 
1.8.3.1



More information about the dev mailing list