[dpdk-dev] [PATCH 15/25] net/mlx5: introduce thread safe linked list cache

Suanming Mou suanmingm at nvidia.com
Tue Oct 6 13:48:58 CEST 2020


From: Xueming Li <xuemingl at nvidia.com>

New API of linked list for cache:
- optimized for small amount cache list
- optimized for read-most list
- thread safe
- since number of entries are limited, entries allocated by API
- for dynamic entry size, pass 0 as entry size, then the creation
callback allocate the entry.
- since number of entries are limited, no need to use indexed pool to
allocate memory. API will remove entry and free with mlx5_free.
- search API is not supposed to be used in multi-thread

Signed-off-by: Xueming Li <xuemingl at nvidia.com>
---
 drivers/net/mlx5/mlx5_utils.c | 170 +++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_utils.h | 172 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 342 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index 387a988..c0b4ae5 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -223,6 +223,176 @@ struct mlx5_hlist_entry*
 	mlx5_free(h);
 }
 
+/********************* Cache list ************************/
+
+static struct mlx5_cache_entry *
+mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
+			     struct mlx5_cache_entry *entry __rte_unused,
+			     void *ctx __rte_unused)
+{
+	return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
+}
+
+static void
+mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
+			     struct mlx5_cache_entry *entry)
+{
+	mlx5_free(entry);
+}
+
+int
+mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
+		     uint32_t entry_size, void *ctx,
+		     mlx5_cache_create_cb cb_create,
+		     mlx5_cache_match_cb cb_match,
+		     mlx5_cache_remove_cb cb_remove)
+{
+	MLX5_ASSERT(list);
+	if (!cb_match || (!cb_create ^ !cb_remove))
+		return -1;
+	if (name)
+		snprintf(list->name, sizeof(list->name), "%s", name);
+	list->entry_sz = entry_size;
+	list->ctx = ctx;
+	list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
+	list->cb_match = cb_match;
+	list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
+	rte_rwlock_init(&list->lock);
+	DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
+	LIST_INIT(&list->head);
+	return 0;
+}
+
+static struct mlx5_cache_entry *
+__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+{
+	struct mlx5_cache_entry *entry;
+
+	LIST_FOREACH(entry, &list->head, next) {
+		if (!__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED))
+			/* Ignore entry in middle of removal */
+			continue;
+		if (list->cb_match(list, entry, ctx))
+			continue;
+		if (reuse) {
+			__atomic_add_fetch(&entry->ref_cnt, 1,
+					   __ATOMIC_RELAXED);
+			DRV_LOG(DEBUG, "cache list %s entry %p ref++: %u",
+				list->name, (void *)entry, entry->ref_cnt);
+		}
+		break;
+	}
+	return entry;
+}
+
+static struct mlx5_cache_entry *
+cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+{
+	struct mlx5_cache_entry *entry;
+
+	rte_rwlock_read_lock(&list->lock);
+	entry = __cache_lookup(list, ctx, reuse);
+	rte_rwlock_read_unlock(&list->lock);
+	return entry;
+}
+
+struct mlx5_cache_entry *
+mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
+{
+	return __cache_lookup(list, ctx, false);
+}
+
+struct mlx5_cache_entry *
+mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
+{
+	struct mlx5_cache_entry *entry;
+	uint32_t prev_gen_cnt = 0;
+
+	MLX5_ASSERT(list);
+	prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
+	/* Lookup with read lock, reuse if found. */
+	entry = cache_lookup(list, ctx, true);
+	if (entry)
+		return entry;
+	/* Not found, append with write lock - block read from other threads. */
+	rte_rwlock_write_lock(&list->lock);
+	/* If list changed by other threads before lock, search again. */
+	if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
+		/* Lookup and reuse w/o read lock */
+		entry = __cache_lookup(list, ctx, true);
+		if (entry)
+			goto done;
+	}
+	entry = list->cb_create(list, entry, ctx);
+	if (!entry) {
+		if (list->entry_sz)
+			mlx5_free(entry);
+		else if (list->cb_remove)
+			list->cb_remove(list, entry);
+		DRV_LOG(ERR, "Failed to init cache list %s entry %p",
+			list->name, (void *)entry);
+		entry = NULL;
+		goto done;
+	}
+	entry->ref_cnt = 1;
+	LIST_INSERT_HEAD(&list->head, entry, next);
+	__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
+	__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
+	DRV_LOG(DEBUG, "cache list %s entry %p new: %u",
+		list->name, (void *)entry, entry->ref_cnt);
+done:
+	rte_rwlock_write_unlock(&list->lock);
+	return entry;
+}
+
+int
+mlx5_cache_unregister(struct mlx5_cache_list *list,
+		      struct mlx5_cache_entry *entry)
+{
+	uint32_t ref_cnt;
+
+	MLX5_ASSERT(entry && entry->next.le_prev);
+	MLX5_ASSERT(__atomic_fetch_n(&entry->ref_cnt, __ATOMIC_RELAXED));
+
+	ref_cnt = __atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_ACQ_REL);
+	DRV_LOG(DEBUG, "cache list %s entry %p ref--: %u",
+		list->name, (void *)entry, entry->ref_cnt);
+	if (ref_cnt)
+		return 1;
+	rte_rwlock_write_lock(&list->lock);
+	if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED)) {
+		return 1;
+		rte_rwlock_write_unlock(&list->lock);
+	}
+	__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
+	__atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
+	LIST_REMOVE(entry, next);
+	list->cb_remove(list, entry);
+	rte_rwlock_write_unlock(&list->lock);
+	DRV_LOG(DEBUG, "cache list %s entry %p removed",
+		list->name, (void *)entry);
+	return 0;
+}
+
+void
+mlx5_cache_list_destroy(struct mlx5_cache_list *list)
+{
+	struct mlx5_cache_entry *entry;
+
+	MLX5_ASSERT(list);
+	if (__atomic_load_n(&list->count, __ATOMIC_RELAXED)) {
+		/* no LIST_FOREACH_SAFE, using while instead */
+		while (!LIST_EMPTY(&list->head)) {
+			entry = LIST_FIRST(&list->head);
+			LIST_REMOVE(entry, next);
+			list->cb_remove(list, entry);
+			DRV_LOG(DEBUG, "cache list %s entry %p destroyed",
+				list->name, (void *)entry);
+		}
+	}
+	memset(list, 0, sizeof(*list));
+}
+
 /********************* Indexed pool **********************/
 
 static inline void
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 479dd10..5c39f98 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -422,6 +422,178 @@ struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key,
  */
 void mlx5_hlist_destroy(struct mlx5_hlist *h);
 
+/************************ cache list *****************************/
+
+/** Maximum size of string for naming. */
+#define MLX5_NAME_SIZE			32
+
+struct mlx5_cache_list;
+
+/**
+ * Structure of the entry in the cache list, user should define its own struct
+ * that contains this in order to store the data.
+ */
+struct mlx5_cache_entry {
+	LIST_ENTRY(mlx5_cache_entry) next; /* entry pointers in the list. */
+	uint32_t ref_cnt; /* reference count. */
+};
+
+/**
+ * Type of callback function for entry removal.
+ *
+ * @param list
+ *   The cache list.
+ * @param entry
+ *   The entry in the list.
+ */
+typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list,
+				     struct mlx5_cache_entry *entry);
+
+/**
+ * Type of function for user defined matching.
+ *
+ * @param list
+ *   The cache list.
+ * @param entry
+ *   The entry in the list.
+ * @param ctx
+ *   The pointer to new entry context.
+ *
+ * @return
+ *   0 if matching, non-zero number otherwise.
+ */
+typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list,
+				   struct mlx5_cache_entry *entry, void *ctx);
+
+/**
+ * Type of function for user defined cache list entry creation.
+ *
+ * @param list
+ *   The cache list.
+ * @param entry
+ *   The new allocated entry, NULL if list entry size unspecified,
+ *   New entry has to be allocated in callback and return.
+ * @param ctx
+ *   The pointer to new entry context.
+ *
+ * @return
+ *   Pointer of entry on success, NULL otherwise.
+ */
+typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb)
+				 (struct mlx5_cache_list *list,
+				  struct mlx5_cache_entry *entry,
+				  void *ctx);
+
+/**
+ * Linked cache list structure.
+ *
+ * Entry in cache list could be reused if entry already exists,
+ * reference count will increase and the existing entry returns.
+ *
+ * When destroy an entry from list, decrease reference count and only
+ * destroy when no further reference.
+ *
+ * Linked list cache is designed for limited number of entries cache,
+ * read mostly, less modification.
+ *
+ * For huge amount of entries cache, please consider hash list cache.
+ *
+ */
+struct mlx5_cache_list {
+	char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */
+	uint32_t entry_sz; /**< Entry size, 0: use create callback. */
+	rte_rwlock_t lock; /* read/write lock. */
+	uint32_t gen_cnt; /* List modification will update generation count. */
+	uint32_t count; /* number of entries in list. */
+	void *ctx; /* user objects target to callback. */
+	mlx5_cache_create_cb cb_create; /**< entry create callback. */
+	mlx5_cache_match_cb cb_match; /**< entry match callback. */
+	mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */
+	LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head;
+};
+
+/**
+ * Initialize a cache list.
+ *
+ * @param list
+ *   Pointer to the hast list table.
+ * @param name
+ *   Name of the cache list.
+ * @param entry_size
+ *   Entry size to allocate, 0 to allocate by creation callback.
+ * @param ctx
+ *   Pointer to the list context data.
+ * @param cb_create
+ *   Callback function for entry create.
+ * @param cb_match
+ *   Callback function for entry match.
+ * @param cb_remove
+ *   Callback function for entry remove.
+ * @return
+ *   0 on success, otherwise failure.
+ */
+int mlx5_cache_list_init(struct mlx5_cache_list *list,
+			 const char *name, uint32_t entry_size, void *ctx,
+			 mlx5_cache_create_cb cb_create,
+			 mlx5_cache_match_cb cb_match,
+			 mlx5_cache_remove_cb cb_remove);
+
+/**
+ * Search an entry matching the key.
+ *
+ * Result returned might be destroyed by other thread, must use
+ * this function only in main thread.
+ *
+ * @param list
+ *   Pointer to the cache list.
+ * @param ctx
+ *   Common context parameter used by entry callback function.
+ *
+ * @return
+ *   Pointer of the cache entry if found, NULL otherwise.
+ */
+struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list,
+					   void *ctx);
+
+/**
+ * Reuse or create an entry to the cache list.
+ *
+ * @param list
+ *   Pointer to the hast list table.
+ * @param ctx
+ *   Common context parameter used by callback function.
+ *
+ * @return
+ *   registered entry on success, NULL otherwise
+ */
+struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list,
+					     void *ctx);
+
+/**
+ * Remove an entry from the cache list.
+ *
+ * User should guarantee the validity of the entry.
+ *
+ * @param list
+ *   Pointer to the hast list.
+ * @param entry
+ *   Entry to be removed from the cache list table.
+ * @return
+ *   0 on entry removed, 1 on entry still referenced.
+ */
+int mlx5_cache_unregister(struct mlx5_cache_list *list,
+			  struct mlx5_cache_entry *entry);
+
+/**
+ * Destroy the cache list.
+ *
+ * @param list
+ *   Pointer to the cache list.
+ */
+void mlx5_cache_list_destroy(struct mlx5_cache_list *list);
+
+/********************************* indexed pool *************************/
+
 /**
  * This function allocates non-initialized memory entry from pool.
  * In NUMA systems, the memory entry allocated resides on the same
-- 
1.8.3.1



More information about the dev mailing list