[dpdk-dev] [PATCH v6 13/26] common/mlx5: move list utility to common

Suanming Mou suanmingm at nvidia.com
Tue Jul 13 10:44:47 CEST 2021


Hash list is planned to be implemented with the cache list code.

This commit moves the list utility to common directory.

Signed-off-by: Suanming Mou <suanmingm at nvidia.com>
Acked-by: Matan Azrad <matan at nvidia.com>
---
 drivers/common/mlx5/mlx5_common.h       |   2 +
 drivers/common/mlx5/mlx5_common_utils.c | 250 +++++++++++++++++++++++
 drivers/common/mlx5/mlx5_common_utils.h | 205 +++++++++++++++++++
 drivers/common/mlx5/version.map         |   7 +
 drivers/net/mlx5/mlx5_utils.c           | 251 ------------------------
 drivers/net/mlx5/mlx5_utils.h           | 197 -------------------
 6 files changed, 464 insertions(+), 448 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 306f2f1ab7..7fb7d40b38 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -14,6 +14,8 @@
 #include <rte_kvargs.h>
 #include <rte_devargs.h>
 #include <rte_bitops.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
 #include <rte_os_shim.h>
 
 #include "mlx5_prm.h"
diff --git a/drivers/common/mlx5/mlx5_common_utils.c b/drivers/common/mlx5/mlx5_common_utils.c
index ad2011e858..8bb8a6016d 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -11,6 +11,256 @@
 #include "mlx5_common_utils.h"
 #include "mlx5_common_log.h"
 
+/********************* mlx5 list ************************/
+
+struct mlx5_list *
+mlx5_list_create(const char *name, void *ctx,
+		 mlx5_list_create_cb cb_create,
+		 mlx5_list_match_cb cb_match,
+		 mlx5_list_remove_cb cb_remove,
+		 mlx5_list_clone_cb cb_clone,
+		 mlx5_list_clone_free_cb cb_clone_free)
+{
+	struct mlx5_list *list;
+	int i;
+
+	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
+	    !cb_clone_free) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	list = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY);
+	if (!list)
+		return NULL;
+	if (name)
+		snprintf(list->name, sizeof(list->name), "%s", name);
+	list->ctx = ctx;
+	list->cb_create = cb_create;
+	list->cb_match = cb_match;
+	list->cb_remove = cb_remove;
+	list->cb_clone = cb_clone;
+	list->cb_clone_free = cb_clone_free;
+	rte_rwlock_init(&list->lock);
+	DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
+	for (i = 0; i <= RTE_MAX_LCORE; i++)
+		LIST_INIT(&list->cache[i].h);
+	return list;
+}
+
+static struct mlx5_list_entry *
+__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
+{
+	struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);
+	uint32_t ret;
+
+	while (entry != NULL) {
+		if (list->cb_match(list, entry, ctx) == 0) {
+			if (reuse) {
+				ret = __atomic_add_fetch(&entry->ref_cnt, 1,
+							 __ATOMIC_RELAXED) - 1;
+				DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
+					list->name, (void *)entry,
+					entry->ref_cnt);
+			} else if (lcore_index < RTE_MAX_LCORE) {
+				ret = __atomic_load_n(&entry->ref_cnt,
+						      __ATOMIC_RELAXED);
+			}
+			if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
+				return entry;
+			if (reuse && ret == 0)
+				entry->ref_cnt--; /* Invalid entry. */
+		}
+		entry = LIST_NEXT(entry, next);
+	}
+	return NULL;
+}
+
+struct mlx5_list_entry *
+mlx5_list_lookup(struct mlx5_list *list, void *ctx)
+{
+	struct mlx5_list_entry *entry = NULL;
+	int i;
+
+	rte_rwlock_read_lock(&list->lock);
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		entry = __list_lookup(list, i, ctx, false);
+		if (entry)
+			break;
+	}
+	rte_rwlock_read_unlock(&list->lock);
+	return entry;
+}
+
+static struct mlx5_list_entry *
+mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
+		       struct mlx5_list_entry *gentry, void *ctx)
+{
+	struct mlx5_list_entry *lentry = list->cb_clone(list, gentry, ctx);
+
+	if (unlikely(!lentry))
+		return NULL;
+	lentry->ref_cnt = 1u;
+	lentry->gentry = gentry;
+	lentry->lcore_idx = (uint32_t)lcore_index;
+	LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);
+	return lentry;
+}
+
+static void
+__list_cache_clean(struct mlx5_list *list, int lcore_index)
+{
+	struct mlx5_list_cache *c = &list->cache[lcore_index];
+	struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
+	uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
+					       __ATOMIC_RELAXED);
+
+	while (inv_cnt != 0 && entry != NULL) {
+		struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
+
+		if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+			LIST_REMOVE(entry, next);
+			list->cb_clone_free(list, entry);
+			inv_cnt--;
+		}
+		entry = nentry;
+	}
+}
+
+struct mlx5_list_entry *
+mlx5_list_register(struct mlx5_list *list, void *ctx)
+{
+	struct mlx5_list_entry *entry, *local_entry;
+	volatile uint32_t prev_gen_cnt = 0;
+	int lcore_index = rte_lcore_index(rte_lcore_id());
+
+	MLX5_ASSERT(list);
+	MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
+	if (unlikely(lcore_index == -1)) {
+		rte_errno = ENOTSUP;
+		return NULL;
+	}
+	/* 0. Free entries that was invalidated by other lcores. */
+	__list_cache_clean(list, lcore_index);
+	/* 1. Lookup in local cache. */
+	local_entry = __list_lookup(list, lcore_index, ctx, true);
+	if (local_entry)
+		return local_entry;
+	/* 2. Lookup with read lock on global list, reuse if found. */
+	rte_rwlock_read_lock(&list->lock);
+	entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
+	if (likely(entry)) {
+		rte_rwlock_read_unlock(&list->lock);
+		return mlx5_list_cache_insert(list, lcore_index, entry, ctx);
+	}
+	prev_gen_cnt = list->gen_cnt;
+	rte_rwlock_read_unlock(&list->lock);
+	/* 3. Prepare new entry for global list and for cache. */
+	entry = list->cb_create(list, entry, ctx);
+	if (unlikely(!entry))
+		return NULL;
+	local_entry = list->cb_clone(list, entry, ctx);
+	if (unlikely(!local_entry)) {
+		list->cb_remove(list, entry);
+		return NULL;
+	}
+	entry->ref_cnt = 1u;
+	local_entry->ref_cnt = 1u;
+	local_entry->gentry = entry;
+	local_entry->lcore_idx = (uint32_t)lcore_index;
+	rte_rwlock_write_lock(&list->lock);
+	/* 4. Make sure the same entry was not created before the write lock. */
+	if (unlikely(prev_gen_cnt != list->gen_cnt)) {
+		struct mlx5_list_entry *oentry = __list_lookup(list,
+							       RTE_MAX_LCORE,
+							       ctx, true);
+
+		if (unlikely(oentry)) {
+			/* 4.5. Found real race!!, reuse the old entry. */
+			rte_rwlock_write_unlock(&list->lock);
+			list->cb_remove(list, entry);
+			list->cb_clone_free(list, local_entry);
+			return mlx5_list_cache_insert(list, lcore_index, oentry,
+						      ctx);
+		}
+	}
+	/* 5. Update lists. */
+	LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);
+	list->gen_cnt++;
+	rte_rwlock_write_unlock(&list->lock);
+	LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);
+	__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
+	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
+		(void *)entry, entry->ref_cnt);
+	return local_entry;
+}
+
+int
+mlx5_list_unregister(struct mlx5_list *list,
+		      struct mlx5_list_entry *entry)
+{
+	struct mlx5_list_entry *gentry = entry->gentry;
+	int lcore_idx;
+
+	if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
+		return 1;
+	lcore_idx = rte_lcore_index(rte_lcore_id());
+	MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
+	if (entry->lcore_idx == (uint32_t)lcore_idx) {
+		LIST_REMOVE(entry, next);
+		list->cb_clone_free(list, entry);
+	} else if (likely(lcore_idx != -1)) {
+		__atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
+				   __ATOMIC_RELAXED);
+	} else {
+		return 0;
+	}
+	if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
+		return 1;
+	rte_rwlock_write_lock(&list->lock);
+	if (likely(gentry->ref_cnt == 0)) {
+		LIST_REMOVE(gentry, next);
+		rte_rwlock_write_unlock(&list->lock);
+		list->cb_remove(list, gentry);
+		__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
+		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
+			list->name, (void *)gentry);
+		return 0;
+	}
+	rte_rwlock_write_unlock(&list->lock);
+	return 1;
+}
+
+void
+mlx5_list_destroy(struct mlx5_list *list)
+{
+	struct mlx5_list_entry *entry;
+	int i;
+
+	MLX5_ASSERT(list);
+	for (i = 0; i <= RTE_MAX_LCORE; i++) {
+		while (!LIST_EMPTY(&list->cache[i].h)) {
+			entry = LIST_FIRST(&list->cache[i].h);
+			LIST_REMOVE(entry, next);
+			if (i == RTE_MAX_LCORE) {
+				list->cb_remove(list, entry);
+				DRV_LOG(DEBUG, "mlx5 list %s entry %p "
+					"destroyed.", list->name,
+					(void *)entry);
+			} else {
+				list->cb_clone_free(list, entry);
+			}
+		}
+	}
+	mlx5_free(list);
+}
+
+uint32_t
+mlx5_list_get_entry_num(struct mlx5_list *list)
+{
+	MLX5_ASSERT(list);
+	return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
+}
+
 /********************* Hash List **********************/
 
 static struct mlx5_hlist_entry *
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index ed378ce9bd..96add6d003 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -7,6 +7,211 @@
 
 #include "mlx5_common.h"
 
+/************************ mlx5 list *****************************/
+
+/** Maximum size of string for naming. */
+#define MLX5_NAME_SIZE			32
+
+struct mlx5_list;
+
+/**
+ * Structure of the entry in the mlx5 list, user should define its own struct
+ * that contains this in order to store the data.
+ */
+struct mlx5_list_entry {
+	LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
+	uint32_t ref_cnt; /* 0 means, entry is invalid. */
+	uint32_t lcore_idx;
+	struct mlx5_list_entry *gentry;
+};
+
+struct mlx5_list_cache {
+	LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
+	uint32_t inv_cnt; /* Invalid entries counter. */
+} __rte_cache_aligned;
+
+/**
+ * Type of callback function for entry removal.
+ *
+ * @param list
+ *   The mlx5 list.
+ * @param entry
+ *   The entry in the list.
+ */
+typedef void (*mlx5_list_remove_cb)(struct mlx5_list *list,
+				     struct mlx5_list_entry *entry);
+
+/**
+ * Type of function for user defined matching.
+ *
+ * @param list
+ *   The mlx5 list.
+ * @param entry
+ *   The entry in the list.
+ * @param ctx
+ *   The pointer to new entry context.
+ *
+ * @return
+ *   0 if matching, non-zero number otherwise.
+ */
+typedef int (*mlx5_list_match_cb)(struct mlx5_list *list,
+				   struct mlx5_list_entry *entry, void *ctx);
+
+typedef struct mlx5_list_entry *(*mlx5_list_clone_cb)
+				 (struct mlx5_list *list,
+				  struct mlx5_list_entry *entry, void *ctx);
+
+typedef void (*mlx5_list_clone_free_cb)(struct mlx5_list *list,
+					 struct mlx5_list_entry *entry);
+
+/**
+ * Type of function for user defined mlx5 list entry creation.
+ *
+ * @param list
+ *   The mlx5 list.
+ * @param entry
+ *   The new allocated entry, NULL if list entry size unspecified,
+ *   New entry has to be allocated in callback and return.
+ * @param ctx
+ *   The pointer to new entry context.
+ *
+ * @return
+ *   Pointer of entry on success, NULL otherwise.
+ */
+typedef struct mlx5_list_entry *(*mlx5_list_create_cb)
+				 (struct mlx5_list *list,
+				  struct mlx5_list_entry *entry,
+				  void *ctx);
+
+/**
+ * Linked mlx5 list structure.
+ *
+ * Entry in mlx5 list could be reused if entry already exists,
+ * reference count will increase and the existing entry returns.
+ *
+ * When destroy an entry from list, decrease reference count and only
+ * destroy when no further reference.
+ *
+ * Linked list is designed for limited number of entries,
+ * read mostly, less modification.
+ *
+ * For huge amount of entries, please consider hash list.
+ *
+ */
+struct mlx5_list {
+	char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
+	volatile uint32_t gen_cnt;
+	/* List modification will update generation count. */
+	volatile uint32_t count; /* number of entries in list. */
+	void *ctx; /* user objects target to callback. */
+	rte_rwlock_t lock; /* read/write lock. */
+	mlx5_list_create_cb cb_create; /**< entry create callback. */
+	mlx5_list_match_cb cb_match; /**< entry match callback. */
+	mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
+	mlx5_list_clone_cb cb_clone; /**< entry clone callback. */
+	mlx5_list_clone_free_cb cb_clone_free;
+	struct mlx5_list_cache cache[RTE_MAX_LCORE + 1];
+	/* Lcore cache, last index is the global cache. */
+};
+
+/**
+ * Create a mlx5 list.
+ *
+ * @param list
+ *   Pointer to the hast list table.
+ * @param name
+ *   Name of the mlx5 list.
+ * @param ctx
+ *   Pointer to the list context data.
+ * @param cb_create
+ *   Callback function for entry create.
+ * @param cb_match
+ *   Callback function for entry match.
+ * @param cb_remove
+ *   Callback function for entry remove.
+ * @return
+ *   List pointer on success, otherwise NULL.
+ */
+__rte_internal
+struct mlx5_list *mlx5_list_create(const char *name, void *ctx,
+				   mlx5_list_create_cb cb_create,
+				   mlx5_list_match_cb cb_match,
+				   mlx5_list_remove_cb cb_remove,
+				   mlx5_list_clone_cb cb_clone,
+				   mlx5_list_clone_free_cb cb_clone_free);
+
+/**
+ * Search an entry matching the key.
+ *
+ * Result returned might be destroyed by other thread, must use
+ * this function only in main thread.
+ *
+ * @param list
+ *   Pointer to the mlx5 list.
+ * @param ctx
+ *   Common context parameter used by entry callback function.
+ *
+ * @return
+ *   Pointer of the list entry if found, NULL otherwise.
+ */
+__rte_internal
+struct mlx5_list_entry *mlx5_list_lookup(struct mlx5_list *list,
+					   void *ctx);
+
+/**
+ * Reuse or create an entry to the mlx5 list.
+ *
+ * @param list
+ *   Pointer to the hast list table.
+ * @param ctx
+ *   Common context parameter used by callback function.
+ *
+ * @return
+ *   registered entry on success, NULL otherwise
+ */
+__rte_internal
+struct mlx5_list_entry *mlx5_list_register(struct mlx5_list *list,
+					     void *ctx);
+
+/**
+ * Remove an entry from the mlx5 list.
+ *
+ * User should guarantee the validity of the entry.
+ *
+ * @param list
+ *   Pointer to the hast list.
+ * @param entry
+ *   Entry to be removed from the mlx5 list table.
+ * @return
+ *   0 on entry removed, 1 on entry still referenced.
+ */
+__rte_internal
+int mlx5_list_unregister(struct mlx5_list *list,
+			  struct mlx5_list_entry *entry);
+
+/**
+ * Destroy the mlx5 list.
+ *
+ * @param list
+ *   Pointer to the mlx5 list.
+ */
+__rte_internal
+void mlx5_list_destroy(struct mlx5_list *list);
+
+/**
+ * Get entry number from the mlx5 list.
+ *
+ * @param list
+ *   Pointer to the hast list.
+ * @return
+ *   mlx5 list entry number.
+ */
+__rte_internal
+uint32_t
+mlx5_list_get_entry_num(struct mlx5_list *list);
+
+/************************ Hash list *****************************/
+
 #define MLX5_HLIST_DIRECT_KEY 0x0001 /* Use the key directly as hash index. */
 #define MLX5_HLIST_WRITE_MOST 0x0002 /* List mostly used for append new. */
 
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index b8be73a77b..e6586d6f6f 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -73,6 +73,13 @@ INTERNAL {
 
 	mlx5_glue;
 
+	mlx5_list_create;
+	mlx5_list_register;
+	mlx5_list_unregister;
+	mlx5_list_lookup;
+	mlx5_list_get_entry_num;
+	mlx5_list_destroy;
+
 	mlx5_hlist_create;
 	mlx5_hlist_lookup;
 	mlx5_hlist_register;
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index 0be778935f..e4e66ae4c5 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -8,257 +8,6 @@
 
 #include "mlx5_utils.h"
 
-
-/********************* mlx5 list ************************/
-
-struct mlx5_list *
-mlx5_list_create(const char *name, void *ctx,
-		 mlx5_list_create_cb cb_create,
-		 mlx5_list_match_cb cb_match,
-		 mlx5_list_remove_cb cb_remove,
-		 mlx5_list_clone_cb cb_clone,
-		 mlx5_list_clone_free_cb cb_clone_free)
-{
-	struct mlx5_list *list;
-	int i;
-
-	if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
-	    !cb_clone_free) {
-		rte_errno = EINVAL;
-		return NULL;
-	}
-	list = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*list), 0, SOCKET_ID_ANY);
-	if (!list)
-		return NULL;
-	if (name)
-		snprintf(list->name, sizeof(list->name), "%s", name);
-	list->ctx = ctx;
-	list->cb_create = cb_create;
-	list->cb_match = cb_match;
-	list->cb_remove = cb_remove;
-	list->cb_clone = cb_clone;
-	list->cb_clone_free = cb_clone_free;
-	rte_rwlock_init(&list->lock);
-	DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
-	for (i = 0; i <= RTE_MAX_LCORE; i++)
-		LIST_INIT(&list->cache[i].h);
-	return list;
-}
-
-static struct mlx5_list_entry *
-__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
-{
-	struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);
-	uint32_t ret;
-
-	while (entry != NULL) {
-		if (list->cb_match(list, entry, ctx) == 0) {
-			if (reuse) {
-				ret = __atomic_add_fetch(&entry->ref_cnt, 1,
-							 __ATOMIC_RELAXED) - 1;
-				DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
-					list->name, (void *)entry,
-					entry->ref_cnt);
-			} else if (lcore_index < RTE_MAX_LCORE) {
-				ret = __atomic_load_n(&entry->ref_cnt,
-						      __ATOMIC_RELAXED);
-			}
-			if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
-				return entry;
-			if (reuse && ret == 0)
-				entry->ref_cnt--; /* Invalid entry. */
-		}
-		entry = LIST_NEXT(entry, next);
-	}
-	return NULL;
-}
-
-struct mlx5_list_entry *
-mlx5_list_lookup(struct mlx5_list *list, void *ctx)
-{
-	struct mlx5_list_entry *entry = NULL;
-	int i;
-
-	rte_rwlock_read_lock(&list->lock);
-	for (i = 0; i < RTE_MAX_LCORE; i++) {
-		entry = __list_lookup(list, i, ctx, false);
-		if (entry)
-			break;
-	}
-	rte_rwlock_read_unlock(&list->lock);
-	return entry;
-}
-
-static struct mlx5_list_entry *
-mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
-		       struct mlx5_list_entry *gentry, void *ctx)
-{
-	struct mlx5_list_entry *lentry = list->cb_clone(list, gentry, ctx);
-
-	if (unlikely(!lentry))
-		return NULL;
-	lentry->ref_cnt = 1u;
-	lentry->gentry = gentry;
-	lentry->lcore_idx = (uint32_t)lcore_index;
-	LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);
-	return lentry;
-}
-
-static void
-__list_cache_clean(struct mlx5_list *list, int lcore_index)
-{
-	struct mlx5_list_cache *c = &list->cache[lcore_index];
-	struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
-	uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
-					       __ATOMIC_RELAXED);
-
-	while (inv_cnt != 0 && entry != NULL) {
-		struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
-
-		if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
-			LIST_REMOVE(entry, next);
-			list->cb_clone_free(list, entry);
-			inv_cnt--;
-		}
-		entry = nentry;
-	}
-}
-
-struct mlx5_list_entry *
-mlx5_list_register(struct mlx5_list *list, void *ctx)
-{
-	struct mlx5_list_entry *entry, *local_entry;
-	volatile uint32_t prev_gen_cnt = 0;
-	int lcore_index = rte_lcore_index(rte_lcore_id());
-
-	MLX5_ASSERT(list);
-	MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
-	if (unlikely(lcore_index == -1)) {
-		rte_errno = ENOTSUP;
-		return NULL;
-	}
-	/* 0. Free entries that was invalidated by other lcores. */
-	__list_cache_clean(list, lcore_index);
-	/* 1. Lookup in local cache. */
-	local_entry = __list_lookup(list, lcore_index, ctx, true);
-	if (local_entry)
-		return local_entry;
-	/* 2. Lookup with read lock on global list, reuse if found. */
-	rte_rwlock_read_lock(&list->lock);
-	entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
-	if (likely(entry)) {
-		rte_rwlock_read_unlock(&list->lock);
-		return mlx5_list_cache_insert(list, lcore_index, entry, ctx);
-	}
-	prev_gen_cnt = list->gen_cnt;
-	rte_rwlock_read_unlock(&list->lock);
-	/* 3. Prepare new entry for global list and for cache. */
-	entry = list->cb_create(list, entry, ctx);
-	if (unlikely(!entry))
-		return NULL;
-	local_entry = list->cb_clone(list, entry, ctx);
-	if (unlikely(!local_entry)) {
-		list->cb_remove(list, entry);
-		return NULL;
-	}
-	entry->ref_cnt = 1u;
-	local_entry->ref_cnt = 1u;
-	local_entry->gentry = entry;
-	local_entry->lcore_idx = (uint32_t)lcore_index;
-	rte_rwlock_write_lock(&list->lock);
-	/* 4. Make sure the same entry was not created before the write lock. */
-	if (unlikely(prev_gen_cnt != list->gen_cnt)) {
-		struct mlx5_list_entry *oentry = __list_lookup(list,
-							       RTE_MAX_LCORE,
-							       ctx, true);
-
-		if (unlikely(oentry)) {
-			/* 4.5. Found real race!!, reuse the old entry. */
-			rte_rwlock_write_unlock(&list->lock);
-			list->cb_remove(list, entry);
-			list->cb_clone_free(list, local_entry);
-			return mlx5_list_cache_insert(list, lcore_index, oentry,
-						      ctx);
-		}
-	}
-	/* 5. Update lists. */
-	LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);
-	list->gen_cnt++;
-	rte_rwlock_write_unlock(&list->lock);
-	LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);
-	__atomic_add_fetch(&list->count, 1, __ATOMIC_RELAXED);
-	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
-		(void *)entry, entry->ref_cnt);
-	return local_entry;
-}
-
-int
-mlx5_list_unregister(struct mlx5_list *list,
-		      struct mlx5_list_entry *entry)
-{
-	struct mlx5_list_entry *gentry = entry->gentry;
-	int lcore_idx;
-
-	if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
-		return 1;
-	lcore_idx = rte_lcore_index(rte_lcore_id());
-	MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
-	if (entry->lcore_idx == (uint32_t)lcore_idx) {
-		LIST_REMOVE(entry, next);
-		list->cb_clone_free(list, entry);
-	} else if (likely(lcore_idx != -1)) {
-		__atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
-				   __ATOMIC_RELAXED);
-	} else {
-		return 0;
-	}
-	if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
-		return 1;
-	rte_rwlock_write_lock(&list->lock);
-	if (likely(gentry->ref_cnt == 0)) {
-		LIST_REMOVE(gentry, next);
-		rte_rwlock_write_unlock(&list->lock);
-		list->cb_remove(list, gentry);
-		__atomic_sub_fetch(&list->count, 1, __ATOMIC_RELAXED);
-		DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
-			list->name, (void *)gentry);
-		return 0;
-	}
-	rte_rwlock_write_unlock(&list->lock);
-	return 1;
-}
-
-void
-mlx5_list_destroy(struct mlx5_list *list)
-{
-	struct mlx5_list_entry *entry;
-	int i;
-
-	MLX5_ASSERT(list);
-	for (i = 0; i <= RTE_MAX_LCORE; i++) {
-		while (!LIST_EMPTY(&list->cache[i].h)) {
-			entry = LIST_FIRST(&list->cache[i].h);
-			LIST_REMOVE(entry, next);
-			if (i == RTE_MAX_LCORE) {
-				list->cb_remove(list, entry);
-				DRV_LOG(DEBUG, "mlx5 list %s entry %p "
-					"destroyed.", list->name,
-					(void *)entry);
-			} else {
-				list->cb_clone_free(list, entry);
-			}
-		}
-	}
-	mlx5_free(list);
-}
-
-uint32_t
-mlx5_list_get_entry_num(struct mlx5_list *list)
-{
-	MLX5_ASSERT(list);
-	return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
-}
-
 /********************* Indexed pool **********************/
 
 static inline void
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index ea64bb75c9..cf3db89403 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -297,203 +297,6 @@ log2above(unsigned int v)
 	return l + r;
 }
 
-/************************ mlx5 list *****************************/
-
-/** Maximum size of string for naming. */
-#define MLX5_NAME_SIZE			32
-
-struct mlx5_list;
-
-/**
- * Structure of the entry in the mlx5 list, user should define its own struct
- * that contains this in order to store the data.
- */
-struct mlx5_list_entry {
-	LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
-	uint32_t ref_cnt; /* 0 means, entry is invalid. */
-	uint32_t lcore_idx;
-	struct mlx5_list_entry *gentry;
-};
-
-struct mlx5_list_cache {
-	LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
-	uint32_t inv_cnt; /* Invalid entries counter. */
-} __rte_cache_aligned;
-
-/**
- * Type of callback function for entry removal.
- *
- * @param list
- *   The mlx5 list.
- * @param entry
- *   The entry in the list.
- */
-typedef void (*mlx5_list_remove_cb)(struct mlx5_list *list,
-				     struct mlx5_list_entry *entry);
-
-/**
- * Type of function for user defined matching.
- *
- * @param list
- *   The mlx5 list.
- * @param entry
- *   The entry in the list.
- * @param ctx
- *   The pointer to new entry context.
- *
- * @return
- *   0 if matching, non-zero number otherwise.
- */
-typedef int (*mlx5_list_match_cb)(struct mlx5_list *list,
-				   struct mlx5_list_entry *entry, void *ctx);
-
-typedef struct mlx5_list_entry *(*mlx5_list_clone_cb)
-				 (struct mlx5_list *list,
-				  struct mlx5_list_entry *entry, void *ctx);
-
-typedef void (*mlx5_list_clone_free_cb)(struct mlx5_list *list,
-					 struct mlx5_list_entry *entry);
-
-/**
- * Type of function for user defined mlx5 list entry creation.
- *
- * @param list
- *   The mlx5 list.
- * @param entry
- *   The new allocated entry, NULL if list entry size unspecified,
- *   New entry has to be allocated in callback and return.
- * @param ctx
- *   The pointer to new entry context.
- *
- * @return
- *   Pointer of entry on success, NULL otherwise.
- */
-typedef struct mlx5_list_entry *(*mlx5_list_create_cb)
-				 (struct mlx5_list *list,
-				  struct mlx5_list_entry *entry,
-				  void *ctx);
-
-/**
- * Linked mlx5 list structure.
- *
- * Entry in mlx5 list could be reused if entry already exists,
- * reference count will increase and the existing entry returns.
- *
- * When destroy an entry from list, decrease reference count and only
- * destroy when no further reference.
- *
- * Linked list is designed for limited number of entries,
- * read mostly, less modification.
- *
- * For huge amount of entries, please consider hash list.
- *
- */
-struct mlx5_list {
-	char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
-	volatile uint32_t gen_cnt;
-	/* List modification will update generation count. */
-	volatile uint32_t count; /* number of entries in list. */
-	void *ctx; /* user objects target to callback. */
-	rte_rwlock_t lock; /* read/write lock. */
-	mlx5_list_create_cb cb_create; /**< entry create callback. */
-	mlx5_list_match_cb cb_match; /**< entry match callback. */
-	mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
-	mlx5_list_clone_cb cb_clone; /**< entry clone callback. */
-	mlx5_list_clone_free_cb cb_clone_free;
-	struct mlx5_list_cache cache[RTE_MAX_LCORE + 1];
-	/* Lcore cache, last index is the global cache. */
-};
-
-/**
- * Create a mlx5 list.
- *
- * @param list
- *   Pointer to the hast list table.
- * @param name
- *   Name of the mlx5 list.
- * @param ctx
- *   Pointer to the list context data.
- * @param cb_create
- *   Callback function for entry create.
- * @param cb_match
- *   Callback function for entry match.
- * @param cb_remove
- *   Callback function for entry remove.
- * @return
- *   List pointer on success, otherwise NULL.
- */
-struct mlx5_list *mlx5_list_create(const char *name, void *ctx,
-				   mlx5_list_create_cb cb_create,
-				   mlx5_list_match_cb cb_match,
-				   mlx5_list_remove_cb cb_remove,
-				   mlx5_list_clone_cb cb_clone,
-				   mlx5_list_clone_free_cb cb_clone_free);
-
-/**
- * Search an entry matching the key.
- *
- * Result returned might be destroyed by other thread, must use
- * this function only in main thread.
- *
- * @param list
- *   Pointer to the mlx5 list.
- * @param ctx
- *   Common context parameter used by entry callback function.
- *
- * @return
- *   Pointer of the list entry if found, NULL otherwise.
- */
-struct mlx5_list_entry *mlx5_list_lookup(struct mlx5_list *list,
-					   void *ctx);
-
-/**
- * Reuse or create an entry to the mlx5 list.
- *
- * @param list
- *   Pointer to the hast list table.
- * @param ctx
- *   Common context parameter used by callback function.
- *
- * @return
- *   registered entry on success, NULL otherwise
- */
-struct mlx5_list_entry *mlx5_list_register(struct mlx5_list *list,
-					     void *ctx);
-
-/**
- * Remove an entry from the mlx5 list.
- *
- * User should guarantee the validity of the entry.
- *
- * @param list
- *   Pointer to the hast list.
- * @param entry
- *   Entry to be removed from the mlx5 list table.
- * @return
- *   0 on entry removed, 1 on entry still referenced.
- */
-int mlx5_list_unregister(struct mlx5_list *list,
-			  struct mlx5_list_entry *entry);
-
-/**
- * Destroy the mlx5 list.
- *
- * @param list
- *   Pointer to the mlx5 list.
- */
-void mlx5_list_destroy(struct mlx5_list *list);
-
-/**
- * Get entry number from the mlx5 list.
- *
- * @param list
- *   Pointer to the hast list.
- * @return
- *   mlx5 list entry number.
- */
-uint32_t
-mlx5_list_get_entry_num(struct mlx5_list *list);
-
 /********************************* indexed pool *************************/
 
 /**
-- 
2.25.1



More information about the dev mailing list