[dpdk-dev] [PATCH 10/13] net/mlx5: add id generation function

Slava Ovsiienko viacheslavo at mellanox.com
Thu Sep 26 11:34:00 CEST 2019


> -----Original Message-----
> From: Ori Kam <orika at mellanox.com>
> Sent: Thursday, September 26, 2019 9:29
> To: Matan Azrad <matan at mellanox.com>; Shahaf Shuler
> <shahafs at mellanox.com>; Slava Ovsiienko <viacheslavo at mellanox.com>
> Cc: dev at dpdk.org; Ori Kam <orika at mellanox.com>; jingjing.wu at intel.com;
> stephen at networkplumber.org
> Subject: [PATCH 10/13] net/mlx5: add id generation function
> 
> When splitting flows for example in hairpin / metering, there is a need to
> combine the flows. This is done using ID.
> This commit introduce a simple way to generate such IDs.
> 
> The reason why bitmap was not used is due to fact that the release and
> allocation are O(n) while in the chosen approch the allocation and release
> are O(1)
> 
> Signed-off-by: Ori Kam <orika at mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo at mellanox.com>

> ---
>  drivers/net/mlx5/mlx5.c      | 120
> ++++++++++++++++++++++++++++++++++++++++++-
>  drivers/net/mlx5/mlx5_flow.h |  14 +++++
>  2 files changed, 133 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index
> ad36743..940503d 100644
> --- a/drivers/net/mlx5/mlx5.c
> +++ b/drivers/net/mlx5/mlx5.c
> @@ -179,6 +179,124 @@ struct mlx5_dev_spawn_data {  static LIST_HEAD(,
> mlx5_ibv_shared) mlx5_ibv_list = LIST_HEAD_INITIALIZER();  static
> pthread_mutex_t mlx5_ibv_list_mutex = PTHREAD_MUTEX_INITIALIZER;
> 
> +#define MLX5_FLOW_MIN_ID_POOL_SIZE 512
> +#define MLX5_ID_GENERATION_ARRAY_FACTOR 16
> +
> +/**
> + * Allocate ID pool structure.
> + *
> + * @return
> + *   Pointer to pool object, NULL value otherwise.
> + */
> +struct mlx5_flow_id_pool *
> +mlx5_flow_id_pool_alloc(void)
> +{
> +	struct mlx5_flow_id_pool *pool;
> +	void *mem;
> +
> +	pool = rte_zmalloc("id pool allocation", sizeof(*pool),
> +			   RTE_CACHE_LINE_SIZE);
> +	if (!pool) {
> +		DRV_LOG(ERR, "can't allocate id pool");
> +		rte_errno  = ENOMEM;
> +		return NULL;
> +	}
> +	mem = rte_zmalloc("", MLX5_FLOW_MIN_ID_POOL_SIZE *
> sizeof(uint32_t),
> +			  RTE_CACHE_LINE_SIZE);
> +	if (!mem) {
> +		DRV_LOG(ERR, "can't allocate mem for id pool");
> +		rte_errno  = ENOMEM;
> +		goto error;
> +	}
> +	pool->free_arr = mem;
> +	pool->curr = pool->free_arr;
> +	pool->last = pool->free_arr + MLX5_FLOW_MIN_ID_POOL_SIZE;
> +	pool->base_index = 0;
> +	return pool;
> +error:
> +	rte_free(pool);
> +	return NULL;
> +}
> +
> +/**
> + * Release ID pool structure.
> + *
> + * @param[in] pool
> + *   Pointer to flow id pool object to free.
> + */
> +void
> +mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool) {
> +	rte_free(pool->free_arr);
> +	rte_free(pool);
> +}
> +
> +/**
> + * Generate ID.
> + *
> + * @param[in] pool
> + *   Pointer to flow id pool.
> + * @param[out] id
> + *   The generated ID.
> + *
> + * @return
> + *   0 on success, error value otherwise.
> + */
> +uint32_t
> +mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id) {
> +	if (pool->curr == pool->free_arr) {
> +		if (pool->base_index == UINT32_MAX) {
> +			rte_errno  = ENOMEM;
> +			DRV_LOG(ERR, "no free id");
> +			return -rte_errno;
> +		}
> +		*id = ++pool->base_index;
> +		return 0;
> +	}
> +	*id = *(--pool->curr);
> +	return 0;
> +}
> +
> +/**
> + * Release ID.
> + *
> + * @param[in] pool
> + *   Pointer to flow id pool.
> + * @param[out] id
> + *   The generated ID.
> + *
> + * @return
> + *   0 on success, error value otherwise.
> + */
> +uint32_t
> +mlx5_flow_id_release(struct mlx5_flow_id_pool *pool, uint32_t id) {
> +	uint32_t size;
> +	uint32_t size2;
> +	void *mem;
> +
> +	if (pool->curr == pool->last) {
> +		size = pool->curr - pool->free_arr;
> +		size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
> +		assert(size2 > size);
> +		mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
> +		if (!mem) {
> +			DRV_LOG(ERR, "can't allocate mem for id pool");
> +			rte_errno  = ENOMEM;
> +			return -rte_errno;
> +		}
> +		memcpy(mem, pool->free_arr, size * sizeof(uint32_t));
> +		rte_free(pool->free_arr);
> +		pool->free_arr = mem;
> +		pool->curr = pool->free_arr + size;
> +		pool->last = pool->free_arr + size2;
> +	}
> +	*pool->curr = id;
> +	pool->curr++;
> +	return 0;
> +}
> +
>  /**
>   * Initialize the counters management structure.
>   *
> @@ -329,7 +447,7 @@ struct mlx5_dev_spawn_data {
>  	struct mlx5_devx_tis_attr tis_attr = { 0 };  #endif
> 
> -	assert(spawn);
> +assert(spawn);
>  	/* Secondary process should not create the shared context. */
>  	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
>  	pthread_mutex_lock(&mlx5_ibv_list_mutex);
> diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
> index 0148c1b..1b14fb7 100644
> --- a/drivers/net/mlx5/mlx5_flow.h
> +++ b/drivers/net/mlx5/mlx5_flow.h
> @@ -495,8 +495,22 @@ struct mlx5_flow_driver_ops {  #define
> MLX5_CNT_CONTAINER_UNUSED(sh, batch, thread) (&(sh)->cmng.ccont \
>  	[(~((sh)->cmng.mhi[batch] >> (thread)) & 0x1) * 2 + (batch)])
> 
> +/* ID generation structure. */
> +struct mlx5_flow_id_pool {
> +	uint32_t *free_arr; /**< Pointer to the a array of free values. */
> +	uint32_t base_index;
> +	/**< The next index that can be used without any free elements. */
> +	uint32_t *curr; /**< Pointer to the index to pop. */
> +	uint32_t *last; /**< Pointer to the last element in the empty arrray.
> +*/ };
> +
>  /* mlx5_flow.c */
> 
> +struct mlx5_flow_id_pool *mlx5_flow_id_pool_alloc(void); void
> +mlx5_flow_id_pool_release(struct mlx5_flow_id_pool *pool); uint32_t
> +mlx5_flow_id_get(struct mlx5_flow_id_pool *pool, uint32_t *id);
> +uint32_t mlx5_flow_id_release(struct mlx5_flow_id_pool *pool,
> +			      uint32_t id);
>  int mlx5_flow_group_to_table(const struct rte_flow_attr *attributes,
>  			     bool external, uint32_t group, uint32_t *table,
>  			     struct rte_flow_error *error);
> --
> 1.8.3.1



More information about the dev mailing list