[dpdk-dev] [PATCH v4 07/26] net/mlx5: remove cache term from the list utility

Suanming Mou suanmingm at nvidia.com
Tue Jul 6 15:32:38 CEST 2021


From: Matan Azrad <matan at nvidia.com>

The internal mlx5 list tool is used mainly when the list objects need to
be synchronized between multiple threads.

The "cache" term is used in the internal mlx5 list API.

Next enhancements on this tool will use the "cache" term for per thread
cache management.

To prevent confusing, remove the current "cache" term from the API's
names.

Signed-off-by: Matan Azrad <matan at nvidia.com>
Acked-by: Suanming Mou <suanmingm at nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c   |  32 +-
 drivers/net/mlx5/mlx5.c            |   2 +-
 drivers/net/mlx5/mlx5.h            |  15 +-
 drivers/net/mlx5/mlx5_flow.h       |  88 ++---
 drivers/net/mlx5/mlx5_flow_dv.c    | 558 ++++++++++++++---------------
 drivers/net/mlx5/mlx5_rx.h         |  12 +-
 drivers/net/mlx5/mlx5_rxq.c        |  28 +-
 drivers/net/mlx5/mlx5_utils.c      |  78 ++--
 drivers/net/mlx5/mlx5_utils.h      |  94 ++---
 drivers/net/mlx5/windows/mlx5_os.c |   7 +-
 10 files changed, 454 insertions(+), 460 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 4b050b4f4a..57b0a1c57f 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -272,27 +272,27 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 		goto error;
 	/* The resources below are only valid with DV support. */
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	/* Init port id action cache list. */
-	snprintf(s, sizeof(s), "%s_port_id_action_cache", sh->ibdev_name);
-	mlx5_cache_list_init(&sh->port_id_action_list, s, 0, sh,
+	/* Init port id action mlx5 list. */
+	snprintf(s, sizeof(s), "%s_port_id_action_list", sh->ibdev_name);
+	mlx5_list_create(&sh->port_id_action_list, s, 0, sh,
 			     flow_dv_port_id_create_cb,
 			     flow_dv_port_id_match_cb,
 			     flow_dv_port_id_remove_cb);
-	/* Init push vlan action cache list. */
-	snprintf(s, sizeof(s), "%s_push_vlan_action_cache", sh->ibdev_name);
-	mlx5_cache_list_init(&sh->push_vlan_action_list, s, 0, sh,
+	/* Init push vlan action mlx5 list. */
+	snprintf(s, sizeof(s), "%s_push_vlan_action_list", sh->ibdev_name);
+	mlx5_list_create(&sh->push_vlan_action_list, s, 0, sh,
 			     flow_dv_push_vlan_create_cb,
 			     flow_dv_push_vlan_match_cb,
 			     flow_dv_push_vlan_remove_cb);
-	/* Init sample action cache list. */
-	snprintf(s, sizeof(s), "%s_sample_action_cache", sh->ibdev_name);
-	mlx5_cache_list_init(&sh->sample_action_list, s, 0, sh,
+	/* Init sample action mlx5 list. */
+	snprintf(s, sizeof(s), "%s_sample_action_list", sh->ibdev_name);
+	mlx5_list_create(&sh->sample_action_list, s, 0, sh,
 			     flow_dv_sample_create_cb,
 			     flow_dv_sample_match_cb,
 			     flow_dv_sample_remove_cb);
-	/* Init dest array action cache list. */
-	snprintf(s, sizeof(s), "%s_dest_array_cache", sh->ibdev_name);
-	mlx5_cache_list_init(&sh->dest_array_list, s, 0, sh,
+	/* Init dest array action mlx5 list. */
+	snprintf(s, sizeof(s), "%s_dest_array_list", sh->ibdev_name);
+	mlx5_list_create(&sh->dest_array_list, s, 0, sh,
 			     flow_dv_dest_array_create_cb,
 			     flow_dv_dest_array_match_cb,
 			     flow_dv_dest_array_remove_cb);
@@ -500,8 +500,8 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
 		mlx5_release_tunnel_hub(sh, priv->dev_port);
 		sh->tunnel_hub = NULL;
 	}
-	mlx5_cache_list_destroy(&sh->port_id_action_list);
-	mlx5_cache_list_destroy(&sh->push_vlan_action_list);
+	mlx5_list_destroy(&sh->port_id_action_list);
+	mlx5_list_destroy(&sh->push_vlan_action_list);
 	mlx5_free_table_hash_list(priv);
 }
 
@@ -1704,7 +1704,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			err = ENOTSUP;
 			goto error;
 	}
-	mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
+	mlx5_list_create(&priv->hrxqs, "hrxq", 0, eth_dev,
 			     mlx5_hrxq_create_cb,
 			     mlx5_hrxq_match_cb,
 			     mlx5_hrxq_remove_cb);
@@ -1765,7 +1765,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			mlx5_l3t_destroy(priv->mtr_profile_tbl);
 		if (own_domain_id)
 			claim_zero(rte_eth_switch_domain_free(priv->domain_id));
-		mlx5_cache_list_destroy(&priv->hrxqs);
+		mlx5_list_destroy(&priv->hrxqs);
 		mlx5_free(priv);
 		if (eth_dev != NULL)
 			eth_dev->data->dev_private = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 2c99da0fac..f51be5ace6 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1609,7 +1609,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
 	if (ret)
 		DRV_LOG(WARNING, "port %u some flows still remain",
 			dev->data->port_id);
-	mlx5_cache_list_destroy(&priv->hrxqs);
+	mlx5_list_destroy(&priv->hrxqs);
 	/*
 	 * Free the shared context in last turn, because the cleanup
 	 * routines above may use some shared fields, like
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index b196fd365f..bf1fbb530b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -79,7 +79,7 @@ enum mlx5_flow_type {
 	MLX5_FLOW_TYPE_MAXI,
 };
 
-/* Hash and cache list callback context. */
+/* Hlist and list callback context. */
 struct mlx5_flow_cb_ctx {
 	struct rte_eth_dev *dev;
 	struct rte_flow_error *error;
@@ -1114,10 +1114,10 @@ struct mlx5_dev_ctx_shared {
 	struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
 	struct mlx5_hlist *modify_cmds;
 	struct mlx5_hlist *tag_table;
-	struct mlx5_cache_list port_id_action_list; /* Port ID action cache. */
-	struct mlx5_cache_list push_vlan_action_list; /* Push VLAN actions. */
-	struct mlx5_cache_list sample_action_list; /* List of sample actions. */
-	struct mlx5_cache_list dest_array_list;
+	struct mlx5_list port_id_action_list; /* Port ID action list. */
+	struct mlx5_list push_vlan_action_list; /* Push VLAN actions. */
+	struct mlx5_list sample_action_list; /* List of sample actions. */
+	struct mlx5_list dest_array_list;
 	/* List of destination array actions. */
 	struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
 	void *default_miss_action; /* Default miss action. */
@@ -1221,7 +1221,7 @@ struct mlx5_ind_table_obj {
 /* Hash Rx queue. */
 __extension__
 struct mlx5_hrxq {
-	struct mlx5_cache_entry entry; /* Cache entry. */
+	struct mlx5_list_entry entry; /* List entry. */
 	uint32_t standalone:1; /* This object used in shared action. */
 	struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
 	RTE_STD_C11
@@ -1359,7 +1359,7 @@ struct mlx5_priv {
 	struct mlx5_obj_ops obj_ops; /* HW objects operations. */
 	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
 	LIST_HEAD(rxqobj, mlx5_rxq_obj) rxqsobj; /* Verbs/DevX Rx queues. */
-	struct mlx5_cache_list hrxqs; /* Hash Rx queues. */
+	struct mlx5_list hrxqs; /* Hash Rx queues. */
 	LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */
 	LIST_HEAD(txqobj, mlx5_txq_obj) txqsobj; /* Verbs/DevX Tx queues. */
 	/* Indirection tables. */
@@ -1369,7 +1369,6 @@ struct mlx5_priv {
 	/**< Verbs modify header action object. */
 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
 	uint8_t max_lro_msg_size;
-	/* Tags resources cache. */
 	uint32_t link_speed_capa; /* Link speed capabilities. */
 	struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
 	struct mlx5_stats_ctrl stats_ctrl; /* Stats control. */
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 81c95e0beb..4dec703366 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -467,7 +467,7 @@ struct mlx5_flow_dv_match_params {
 
 /* Matcher structure. */
 struct mlx5_flow_dv_matcher {
-	struct mlx5_cache_entry entry; /**< Pointer to the next element. */
+	struct mlx5_list_entry entry; /**< Pointer to the next element. */
 	struct mlx5_flow_tbl_resource *tbl;
 	/**< Pointer to the table(group) the matcher associated with. */
 	void *matcher_object; /**< Pointer to DV matcher */
@@ -547,7 +547,7 @@ struct mlx5_flow_dv_jump_tbl_resource {
 
 /* Port ID resource structure. */
 struct mlx5_flow_dv_port_id_action_resource {
-	struct mlx5_cache_entry entry;
+	struct mlx5_list_entry entry;
 	void *action; /**< Action object. */
 	uint32_t port_id; /**< Port ID value. */
 	uint32_t idx; /**< Indexed pool memory index. */
@@ -555,7 +555,7 @@ struct mlx5_flow_dv_port_id_action_resource {
 
 /* Push VLAN action resource structure */
 struct mlx5_flow_dv_push_vlan_action_resource {
-	struct mlx5_cache_entry entry; /* Cache entry. */
+	struct mlx5_list_entry entry; /* Cache entry. */
 	void *action; /**< Action object. */
 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
 	rte_be32_t vlan_tag; /**< VLAN tag value. */
@@ -590,7 +590,7 @@ struct mlx5_flow_tbl_data_entry {
 	/**< hash list entry, 64-bits key inside. */
 	struct mlx5_flow_tbl_resource tbl;
 	/**< flow table resource. */
-	struct mlx5_cache_list matchers;
+	struct mlx5_list matchers;
 	/**< matchers' header associated with the flow table. */
 	struct mlx5_flow_dv_jump_tbl_resource jump;
 	/**< jump resource, at most one for each table created. */
@@ -631,7 +631,7 @@ struct mlx5_flow_sub_actions_idx {
 
 /* Sample action resource structure. */
 struct mlx5_flow_dv_sample_resource {
-	struct mlx5_cache_entry entry; /**< Cache entry. */
+	struct mlx5_list_entry entry; /**< Cache entry. */
 	union {
 		void *verbs_action; /**< Verbs sample action object. */
 		void **sub_actions; /**< Sample sub-action array. */
@@ -653,7 +653,7 @@ struct mlx5_flow_dv_sample_resource {
 
 /* Destination array action resource structure. */
 struct mlx5_flow_dv_dest_array_resource {
-	struct mlx5_cache_entry entry; /**< Cache entry. */
+	struct mlx5_list_entry entry; /**< Cache entry. */
 	uint32_t idx; /** Destination array action object index. */
 	uint8_t ft_type; /** Flow Table Type */
 	uint8_t num_of_dest; /**< Number of destination actions. */
@@ -1619,43 +1619,45 @@ struct mlx5_hlist_entry *flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
 void flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
 				   struct mlx5_hlist_entry *entry);
 
-int flow_dv_matcher_match_cb(struct mlx5_cache_list *list,
-			     struct mlx5_cache_entry *entry, void *ctx);
-struct mlx5_cache_entry *flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
-		struct mlx5_cache_entry *entry, void *ctx);
-void flow_dv_matcher_remove_cb(struct mlx5_cache_list *list,
-			       struct mlx5_cache_entry *entry);
-
-int flow_dv_port_id_match_cb(struct mlx5_cache_list *list,
-			     struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
-		struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
-			       struct mlx5_cache_entry *entry);
-
-int flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list,
-			       struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_push_vlan_create_cb
-				(struct mlx5_cache_list *list,
-				 struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
-				 struct mlx5_cache_entry *entry);
-
-int flow_dv_sample_match_cb(struct mlx5_cache_list *list,
-			    struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_sample_create_cb
-				(struct mlx5_cache_list *list,
-				 struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
-			      struct mlx5_cache_entry *entry);
-
-int flow_dv_dest_array_match_cb(struct mlx5_cache_list *list,
-				struct mlx5_cache_entry *entry, void *cb_ctx);
-struct mlx5_cache_entry *flow_dv_dest_array_create_cb
-				(struct mlx5_cache_list *list,
-				 struct mlx5_cache_entry *entry, void *cb_ctx);
-void flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
-				  struct mlx5_cache_entry *entry);
+int flow_dv_matcher_match_cb(struct mlx5_list *list,
+			     struct mlx5_list_entry *entry, void *ctx);
+struct mlx5_list_entry *flow_dv_matcher_create_cb(struct mlx5_list *list,
+						  struct mlx5_list_entry *entry,
+						  void *ctx);
+void flow_dv_matcher_remove_cb(struct mlx5_list *list,
+			       struct mlx5_list_entry *entry);
+
+int flow_dv_port_id_match_cb(struct mlx5_list *list,
+			     struct mlx5_list_entry *entry, void *cb_ctx);
+struct mlx5_list_entry *flow_dv_port_id_create_cb(struct mlx5_list *list,
+						  struct mlx5_list_entry *entry,
+						  void *cb_ctx);
+void flow_dv_port_id_remove_cb(struct mlx5_list *list,
+			       struct mlx5_list_entry *entry);
+
+int flow_dv_push_vlan_match_cb(struct mlx5_list *list,
+			       struct mlx5_list_entry *entry, void *cb_ctx);
+struct mlx5_list_entry *flow_dv_push_vlan_create_cb(struct mlx5_list *list,
+						  struct mlx5_list_entry *entry,
+						  void *cb_ctx);
+void flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
+				 struct mlx5_list_entry *entry);
+
+int flow_dv_sample_match_cb(struct mlx5_list *list,
+			    struct mlx5_list_entry *entry, void *cb_ctx);
+struct mlx5_list_entry *flow_dv_sample_create_cb(struct mlx5_list *list,
+						 struct mlx5_list_entry *entry,
+						 void *cb_ctx);
+void flow_dv_sample_remove_cb(struct mlx5_list *list,
+			      struct mlx5_list_entry *entry);
+
+int flow_dv_dest_array_match_cb(struct mlx5_list *list,
+				struct mlx5_list_entry *entry, void *cb_ctx);
+struct mlx5_list_entry *flow_dv_dest_array_create_cb(struct mlx5_list *list,
+						  struct mlx5_list_entry *entry,
+						  void *cb_ctx);
+void flow_dv_dest_array_remove_cb(struct mlx5_list *list,
+				  struct mlx5_list_entry *entry);
 struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
 						    uint32_t age_idx);
 int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index bca8339361..d19b41c20a 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3601,18 +3601,17 @@ flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
 			     uint64_t key __rte_unused, void *cb_ctx)
 {
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
-	struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
-	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
-
-	cache_resource = container_of(entry,
-				      struct mlx5_flow_dv_encap_decap_resource,
-				      entry);
-	if (resource->reformat_type == cache_resource->reformat_type &&
-	    resource->ft_type == cache_resource->ft_type &&
-	    resource->flags == cache_resource->flags &&
-	    resource->size == cache_resource->size &&
+	struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
+	struct mlx5_flow_dv_encap_decap_resource *resource;
+
+	resource = container_of(entry, struct mlx5_flow_dv_encap_decap_resource,
+				entry);
+	if (resource->reformat_type == ctx_resource->reformat_type &&
+	    resource->ft_type == ctx_resource->ft_type &&
+	    resource->flags == ctx_resource->flags &&
+	    resource->size == ctx_resource->size &&
 	    !memcmp((const void *)resource->buf,
-		    (const void *)cache_resource->buf,
+		    (const void *)ctx_resource->buf,
 		    resource->size))
 		return 0;
 	return -1;
@@ -3639,31 +3638,30 @@ flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct mlx5dv_dr_domain *domain;
-	struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
-	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+	struct mlx5_flow_dv_encap_decap_resource *ctx_resource = ctx->data;
+	struct mlx5_flow_dv_encap_decap_resource *resource;
 	uint32_t idx;
 	int ret;
 
-	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+	if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
 		domain = sh->fdb_domain;
-	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
+	else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
 		domain = sh->rx_domain;
 	else
 		domain = sh->tx_domain;
 	/* Register new encap/decap resource. */
-	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
-				       &idx);
-	if (!cache_resource) {
+	resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &idx);
+	if (!resource) {
 		rte_flow_error_set(ctx->error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 				   "cannot allocate resource memory");
 		return NULL;
 	}
-	*cache_resource = *resource;
-	cache_resource->idx = idx;
-	ret = mlx5_flow_os_create_flow_action_packet_reformat
-					(sh->ctx, domain, cache_resource,
-					 &cache_resource->action);
+	*resource = *ctx_resource;
+	resource->idx = idx;
+	ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
+							      resource,
+							     &resource->action);
 	if (ret) {
 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
 		rte_flow_error_set(ctx->error, ENOMEM,
@@ -3672,7 +3670,7 @@ flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
 		return NULL;
 	}
 
-	return &cache_resource->entry;
+	return &resource->entry;
 }
 
 /**
@@ -3776,8 +3774,8 @@ flow_dv_jump_tbl_resource_register
 }
 
 int
-flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
-			 struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_port_id_match_cb(struct mlx5_list *list __rte_unused,
+			 struct mlx5_list_entry *entry, void *cb_ctx)
 {
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
@@ -3787,30 +3785,30 @@ flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
 	return ref->port_id != res->port_id;
 }
 
-struct mlx5_cache_entry *
-flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
-			  struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_port_id_create_cb(struct mlx5_list *list,
+			  struct mlx5_list_entry *entry __rte_unused,
 			  void *cb_ctx)
 {
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
-	struct mlx5_flow_dv_port_id_action_resource *cache;
+	struct mlx5_flow_dv_port_id_action_resource *resource;
 	uint32_t idx;
 	int ret;
 
 	/* Register new port id action resource. */
-	cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
-	if (!cache) {
+	resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
+	if (!resource) {
 		rte_flow_error_set(ctx->error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "cannot allocate port_id action cache memory");
+				   "cannot allocate port_id action memory");
 		return NULL;
 	}
-	*cache = *ref;
+	*resource = *ref;
 	ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
 							ref->port_id,
-							&cache->action);
+							&resource->action);
 	if (ret) {
 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
 		rte_flow_error_set(ctx->error, ENOMEM,
@@ -3818,8 +3816,8 @@ flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
 				   "cannot create action");
 		return NULL;
 	}
-	cache->idx = idx;
-	return &cache->entry;
+	resource->idx = idx;
+	return &resource->entry;
 }
 
 /**
@@ -3827,8 +3825,8 @@ flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
  *
  * @param[in, out] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- *   Pointer to port ID action resource.
+ * @param[in, out] ref
+ *   Pointer to port ID action resource reference.
  * @parm[in, out] dev_flow
  *   Pointer to the dev_flow.
  * @param[out] error
@@ -3840,30 +3838,30 @@ flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
 static int
 flow_dv_port_id_action_resource_register
 			(struct rte_eth_dev *dev,
-			 struct mlx5_flow_dv_port_id_action_resource *resource,
+			 struct mlx5_flow_dv_port_id_action_resource *ref,
 			 struct mlx5_flow *dev_flow,
 			 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_cache_entry *entry;
-	struct mlx5_flow_dv_port_id_action_resource *cache;
+	struct mlx5_list_entry *entry;
+	struct mlx5_flow_dv_port_id_action_resource *resource;
 	struct mlx5_flow_cb_ctx ctx = {
 		.error = error,
-		.data = resource,
+		.data = ref,
 	};
 
-	entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
+	entry = mlx5_list_register(&priv->sh->port_id_action_list, &ctx);
 	if (!entry)
 		return -rte_errno;
-	cache = container_of(entry, typeof(*cache), entry);
-	dev_flow->dv.port_id_action = cache;
-	dev_flow->handle->rix_port_id_action = cache->idx;
+	resource = container_of(entry, typeof(*resource), entry);
+	dev_flow->dv.port_id_action = resource;
+	dev_flow->handle->rix_port_id_action = resource->idx;
 	return 0;
 }
 
 int
-flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
-			 struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_push_vlan_match_cb(struct mlx5_list *list __rte_unused,
+			 struct mlx5_list_entry *entry, void *cb_ctx)
 {
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
@@ -3873,28 +3871,28 @@ flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
 	return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
 }
 
-struct mlx5_cache_entry *
-flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
-			  struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_push_vlan_create_cb(struct mlx5_list *list,
+			  struct mlx5_list_entry *entry __rte_unused,
 			  void *cb_ctx)
 {
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
-	struct mlx5_flow_dv_push_vlan_action_resource *cache;
+	struct mlx5_flow_dv_push_vlan_action_resource *resource;
 	struct mlx5dv_dr_domain *domain;
 	uint32_t idx;
 	int ret;
 
 	/* Register new port id action resource. */
-	cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
-	if (!cache) {
+	resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
+	if (!resource) {
 		rte_flow_error_set(ctx->error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-				   "cannot allocate push_vlan action cache memory");
+				   "cannot allocate push_vlan action memory");
 		return NULL;
 	}
-	*cache = *ref;
+	*resource = *ref;
 	if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
 		domain = sh->fdb_domain;
 	else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
@@ -3902,7 +3900,7 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
 	else
 		domain = sh->tx_domain;
 	ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
-							&cache->action);
+							&resource->action);
 	if (ret) {
 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
 		rte_flow_error_set(ctx->error, ENOMEM,
@@ -3910,8 +3908,8 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
 				   "cannot create push vlan action");
 		return NULL;
 	}
-	cache->idx = idx;
-	return &cache->entry;
+	resource->idx = idx;
+	return &resource->entry;
 }
 
 /**
@@ -3919,8 +3917,8 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
  *
  * @param [in, out] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in, out] resource
- *   Pointer to port ID action resource.
+ * @param[in, out] ref
+ *   Pointer to port ID action resource reference.
  * @parm[in, out] dev_flow
  *   Pointer to the dev_flow.
  * @param[out] error
@@ -3932,25 +3930,25 @@ flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
 static int
 flow_dv_push_vlan_action_resource_register
 		       (struct rte_eth_dev *dev,
-			struct mlx5_flow_dv_push_vlan_action_resource *resource,
+			struct mlx5_flow_dv_push_vlan_action_resource *ref,
 			struct mlx5_flow *dev_flow,
 			struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_dv_push_vlan_action_resource *cache;
-	struct mlx5_cache_entry *entry;
+	struct mlx5_flow_dv_push_vlan_action_resource *resource;
+	struct mlx5_list_entry *entry;
 	struct mlx5_flow_cb_ctx ctx = {
 		.error = error,
-		.data = resource,
+		.data = ref,
 	};
 
-	entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
+	entry = mlx5_list_register(&priv->sh->push_vlan_action_list, &ctx);
 	if (!entry)
 		return -rte_errno;
-	cache = container_of(entry, typeof(*cache), entry);
+	resource = container_of(entry, typeof(*resource), entry);
 
-	dev_flow->handle->dvh.rix_push_vlan = cache->idx;
-	dev_flow->dv.push_vlan_res = cache;
+	dev_flow->handle->dvh.rix_push_vlan = resource->idx;
+	dev_flow->dv.push_vlan_res = resource;
 	return 0;
 }
 
@@ -9950,13 +9948,13 @@ flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
 			return NULL;
 		}
 	}
-	MKSTR(matcher_name, "%s_%s_%u_%u_matcher_cache",
+	MKSTR(matcher_name, "%s_%s_%u_%u_matcher_list",
 	      key.is_fdb ? "FDB" : "NIC", key.is_egress ? "egress" : "ingress",
 	      key.level, key.id);
-	mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
-			     flow_dv_matcher_create_cb,
-			     flow_dv_matcher_match_cb,
-			     flow_dv_matcher_remove_cb);
+	mlx5_list_create(&tbl_data->matchers, matcher_name, 0, sh,
+			 flow_dv_matcher_create_cb,
+			 flow_dv_matcher_match_cb,
+			 flow_dv_matcher_remove_cb);
 	return &tbl_data->entry;
 }
 
@@ -10084,7 +10082,7 @@ flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
 			tbl_data->tunnel->tunnel_id : 0,
 			tbl_data->group_id);
 	}
-	mlx5_cache_list_destroy(&tbl_data->matchers);
+	mlx5_list_destroy(&tbl_data->matchers);
 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
 }
 
@@ -10112,8 +10110,8 @@ flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
 }
 
 int
-flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
-			 struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_matcher_match_cb(struct mlx5_list *list __rte_unused,
+			 struct mlx5_list_entry *entry, void *cb_ctx)
 {
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct mlx5_flow_dv_matcher *ref = ctx->data;
@@ -10126,15 +10124,15 @@ flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
 		      (const void *)ref->mask.buf, ref->mask.size);
 }
 
-struct mlx5_cache_entry *
-flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
-			  struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_matcher_create_cb(struct mlx5_list *list,
+			  struct mlx5_list_entry *entry __rte_unused,
 			  void *cb_ctx)
 {
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct mlx5_flow_dv_matcher *ref = ctx->data;
-	struct mlx5_flow_dv_matcher *cache;
+	struct mlx5_flow_dv_matcher *resource;
 	struct mlx5dv_flow_matcher_attr dv_attr = {
 		.type = IBV_FLOW_ATTR_NORMAL,
 		.match_mask = (void *)&ref->mask,
@@ -10143,29 +10141,30 @@ flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
 							    typeof(*tbl), tbl);
 	int ret;
 
-	cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
-	if (!cache) {
+	resource = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*resource), 0,
+			       SOCKET_ID_ANY);
+	if (!resource) {
 		rte_flow_error_set(ctx->error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 				   "cannot create matcher");
 		return NULL;
 	}
-	*cache = *ref;
+	*resource = *ref;
 	dv_attr.match_criteria_enable =
-		flow_dv_matcher_enable(cache->mask.buf);
+		flow_dv_matcher_enable(resource->mask.buf);
 	dv_attr.priority = ref->priority;
 	if (tbl->is_egress)
 		dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
-					       &cache->matcher_object);
+					       &resource->matcher_object);
 	if (ret) {
-		mlx5_free(cache);
+		mlx5_free(resource);
 		rte_flow_error_set(ctx->error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 				   "cannot create matcher");
 		return NULL;
 	}
-	return &cache->entry;
+	return &resource->entry;
 }
 
 /**
@@ -10194,8 +10193,8 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 			 uint32_t group_id,
 			 struct rte_flow_error *error)
 {
-	struct mlx5_cache_entry *entry;
-	struct mlx5_flow_dv_matcher *cache;
+	struct mlx5_list_entry *entry;
+	struct mlx5_flow_dv_matcher *resource;
 	struct mlx5_flow_tbl_resource *tbl;
 	struct mlx5_flow_tbl_data_entry *tbl_data;
 	struct mlx5_flow_cb_ctx ctx = {
@@ -10215,15 +10214,15 @@ flow_dv_matcher_register(struct rte_eth_dev *dev,
 		return -rte_errno;	/* No need to refill the error info */
 	tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
 	ref->tbl = tbl;
-	entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+	entry = mlx5_list_register(&tbl_data->matchers, &ctx);
 	if (!entry) {
 		flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "cannot allocate ref memory");
 	}
-	cache = container_of(entry, typeof(*cache), entry);
-	dev_flow->handle->dvh.matcher = cache;
+	resource = container_of(entry, typeof(*resource), entry);
+	dev_flow->handle->dvh.matcher = resource;
 	return 0;
 }
 
@@ -10291,15 +10290,15 @@ flow_dv_tag_resource_register
 			 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_dv_tag_resource *cache_resource;
+	struct mlx5_flow_dv_tag_resource *resource;
 	struct mlx5_hlist_entry *entry;
 
 	entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
 	if (entry) {
-		cache_resource = container_of
-			(entry, struct mlx5_flow_dv_tag_resource, entry);
-		dev_flow->handle->dvh.rix_tag = cache_resource->idx;
-		dev_flow->dv.tag_resource = cache_resource;
+		resource = container_of(entry, struct mlx5_flow_dv_tag_resource,
+					entry);
+		dev_flow->handle->dvh.rix_tag = resource->idx;
+		dev_flow->dv.tag_resource = resource;
 		return 0;
 	}
 	return -rte_errno;
@@ -10626,68 +10625,69 @@ flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
 }
 
 int
-flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
-			struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_sample_match_cb(struct mlx5_list *list __rte_unused,
+			struct mlx5_list_entry *entry, void *cb_ctx)
 {
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct rte_eth_dev *dev = ctx->dev;
-	struct mlx5_flow_dv_sample_resource *resource = ctx->data;
-	struct mlx5_flow_dv_sample_resource *cache_resource =
-			container_of(entry, typeof(*cache_resource), entry);
-
-	if (resource->ratio == cache_resource->ratio &&
-	    resource->ft_type == cache_resource->ft_type &&
-	    resource->ft_id == cache_resource->ft_id &&
-	    resource->set_action == cache_resource->set_action &&
-	    !memcmp((void *)&resource->sample_act,
-		    (void *)&cache_resource->sample_act,
+	struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
+	struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
+							      typeof(*resource),
+							      entry);
+
+	if (ctx_resource->ratio == resource->ratio &&
+	    ctx_resource->ft_type == resource->ft_type &&
+	    ctx_resource->ft_id == resource->ft_id &&
+	    ctx_resource->set_action == resource->set_action &&
+	    !memcmp((void *)&ctx_resource->sample_act,
+		    (void *)&resource->sample_act,
 		    sizeof(struct mlx5_flow_sub_actions_list))) {
 		/*
 		 * Existing sample action should release the prepared
 		 * sub-actions reference counter.
 		 */
 		flow_dv_sample_sub_actions_release(dev,
-						&resource->sample_idx);
+						   &ctx_resource->sample_idx);
 		return 0;
 	}
 	return 1;
 }
 
-struct mlx5_cache_entry *
-flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
-			 struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_sample_create_cb(struct mlx5_list *list __rte_unused,
+			 struct mlx5_list_entry *entry __rte_unused,
 			 void *cb_ctx)
 {
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct rte_eth_dev *dev = ctx->dev;
-	struct mlx5_flow_dv_sample_resource *resource = ctx->data;
-	void **sample_dv_actions = resource->sub_actions;
-	struct mlx5_flow_dv_sample_resource *cache_resource;
+	struct mlx5_flow_dv_sample_resource *ctx_resource = ctx->data;
+	void **sample_dv_actions = ctx_resource->sub_actions;
+	struct mlx5_flow_dv_sample_resource *resource;
 	struct mlx5dv_dr_flow_sampler_attr sampler_attr;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
 	struct mlx5_flow_tbl_resource *tbl;
 	uint32_t idx = 0;
 	const uint32_t next_ft_step = 1;
-	uint32_t next_ft_id = resource->ft_id +	next_ft_step;
+	uint32_t next_ft_id = ctx_resource->ft_id + next_ft_step;
 	uint8_t is_egress = 0;
 	uint8_t is_transfer = 0;
 	struct rte_flow_error *error = ctx->error;
 
 	/* Register new sample resource. */
-	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
-	if (!cache_resource) {
+	resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
+	if (!resource) {
 		rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
 					  "cannot allocate resource memory");
 		return NULL;
 	}
-	*cache_resource = *resource;
+	*resource = *ctx_resource;
 	/* Create normal path table level */
-	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
+	if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
 		is_transfer = 1;
-	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
+	else if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
 		is_egress = 1;
 	tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
 					is_egress, is_transfer,
@@ -10700,8 +10700,8 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
 					  "for sample");
 		goto error;
 	}
-	cache_resource->normal_path_tbl = tbl;
-	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
+	resource->normal_path_tbl = tbl;
+	if (ctx_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
 		if (!sh->default_miss_action) {
 			rte_flow_error_set(error, ENOMEM,
 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -10710,33 +10710,33 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
 						"created");
 			goto error;
 		}
-		sample_dv_actions[resource->sample_act.actions_num++] =
+		sample_dv_actions[ctx_resource->sample_act.actions_num++] =
 						sh->default_miss_action;
 	}
 	/* Create a DR sample action */
-	sampler_attr.sample_ratio = cache_resource->ratio;
+	sampler_attr.sample_ratio = resource->ratio;
 	sampler_attr.default_next_table = tbl->obj;
-	sampler_attr.num_sample_actions = resource->sample_act.actions_num;
+	sampler_attr.num_sample_actions = ctx_resource->sample_act.actions_num;
 	sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
 							&sample_dv_actions[0];
-	sampler_attr.action = cache_resource->set_action;
+	sampler_attr.action = resource->set_action;
 	if (mlx5_os_flow_dr_create_flow_action_sampler
-			(&sampler_attr, &cache_resource->verbs_action)) {
+			(&sampler_attr, &resource->verbs_action)) {
 		rte_flow_error_set(error, ENOMEM,
 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					NULL, "cannot create sample action");
 		goto error;
 	}
-	cache_resource->idx = idx;
-	cache_resource->dev = dev;
-	return &cache_resource->entry;
+	resource->idx = idx;
+	resource->dev = dev;
+	return &resource->entry;
 error:
-	if (cache_resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
+	if (resource->ft_type != MLX5DV_FLOW_TABLE_TYPE_FDB)
 		flow_dv_sample_sub_actions_release(dev,
-						   &cache_resource->sample_idx);
-	if (cache_resource->normal_path_tbl)
+						   &resource->sample_idx);
+	if (resource->normal_path_tbl)
 		flow_dv_tbl_resource_release(MLX5_SH(dev),
-				cache_resource->normal_path_tbl);
+				resource->normal_path_tbl);
 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
 	return NULL;
 
@@ -10747,8 +10747,8 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
  *
  * @param[in, out] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in] resource
- *   Pointer to sample resource.
+ * @param[in] ref
+ *   Pointer to sample resource reference.
  * @parm[in, out] dev_flow
  *   Pointer to the dev_flow.
  * @param[out] error
@@ -10759,66 +10759,66 @@ flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
  */
 static int
 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
-			 struct mlx5_flow_dv_sample_resource *resource,
+			 struct mlx5_flow_dv_sample_resource *ref,
 			 struct mlx5_flow *dev_flow,
 			 struct rte_flow_error *error)
 {
-	struct mlx5_flow_dv_sample_resource *cache_resource;
-	struct mlx5_cache_entry *entry;
+	struct mlx5_flow_dv_sample_resource *resource;
+	struct mlx5_list_entry *entry;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_cb_ctx ctx = {
 		.dev = dev,
 		.error = error,
-		.data = resource,
+		.data = ref,
 	};
 
-	entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
+	entry = mlx5_list_register(&priv->sh->sample_action_list, &ctx);
 	if (!entry)
 		return -rte_errno;
-	cache_resource = container_of(entry, typeof(*cache_resource), entry);
-	dev_flow->handle->dvh.rix_sample = cache_resource->idx;
-	dev_flow->dv.sample_res = cache_resource;
+	resource = container_of(entry, typeof(*resource), entry);
+	dev_flow->handle->dvh.rix_sample = resource->idx;
+	dev_flow->dv.sample_res = resource;
 	return 0;
 }
 
 int
-flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
-			    struct mlx5_cache_entry *entry, void *cb_ctx)
+flow_dv_dest_array_match_cb(struct mlx5_list *list __rte_unused,
+			    struct mlx5_list_entry *entry, void *cb_ctx)
 {
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
-	struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
+	struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
 	struct rte_eth_dev *dev = ctx->dev;
-	struct mlx5_flow_dv_dest_array_resource *cache_resource =
-			container_of(entry, typeof(*cache_resource), entry);
+	struct mlx5_flow_dv_dest_array_resource *resource =
+			container_of(entry, typeof(*resource), entry);
 	uint32_t idx = 0;
 
-	if (resource->num_of_dest == cache_resource->num_of_dest &&
-	    resource->ft_type == cache_resource->ft_type &&
-	    !memcmp((void *)cache_resource->sample_act,
-		    (void *)resource->sample_act,
-		   (resource->num_of_dest *
+	if (ctx_resource->num_of_dest == resource->num_of_dest &&
+	    ctx_resource->ft_type == resource->ft_type &&
+	    !memcmp((void *)resource->sample_act,
+		    (void *)ctx_resource->sample_act,
+		   (ctx_resource->num_of_dest *
 		   sizeof(struct mlx5_flow_sub_actions_list)))) {
 		/*
 		 * Existing sample action should release the prepared
 		 * sub-actions reference counter.
 		 */
-		for (idx = 0; idx < resource->num_of_dest; idx++)
+		for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
 			flow_dv_sample_sub_actions_release(dev,
-					&resource->sample_idx[idx]);
+					&ctx_resource->sample_idx[idx]);
 		return 0;
 	}
 	return 1;
 }
 
-struct mlx5_cache_entry *
-flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
-			 struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+flow_dv_dest_array_create_cb(struct mlx5_list *list __rte_unused,
+			 struct mlx5_list_entry *entry __rte_unused,
 			 void *cb_ctx)
 {
 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
 	struct rte_eth_dev *dev = ctx->dev;
-	struct mlx5_flow_dv_dest_array_resource *cache_resource;
-	struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
+	struct mlx5_flow_dv_dest_array_resource *resource;
+	struct mlx5_flow_dv_dest_array_resource *ctx_resource = ctx->data;
 	struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
 	struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -10831,23 +10831,23 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
 	int ret;
 
 	/* Register new destination array resource. */
-	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+	resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
 					    &res_idx);
-	if (!cache_resource) {
+	if (!resource) {
 		rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL,
 					  "cannot allocate resource memory");
 		return NULL;
 	}
-	*cache_resource = *resource;
+	*resource = *ctx_resource;
 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
 		domain = sh->fdb_domain;
 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
 		domain = sh->rx_domain;
 	else
 		domain = sh->tx_domain;
-	for (idx = 0; idx < resource->num_of_dest; idx++) {
+	for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
 		dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
 				 mlx5_malloc(MLX5_MEM_ZERO,
 				 sizeof(struct mlx5dv_dr_action_dest_attr),
@@ -10860,7 +10860,7 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
 			goto error;
 		}
 		dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
-		sample_act = &resource->sample_act[idx];
+		sample_act = &ctx_resource->sample_act[idx];
 		action_flags = sample_act->action_flags;
 		switch (action_flags) {
 		case MLX5_FLOW_ACTION_QUEUE:
@@ -10891,9 +10891,9 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
 	/* create a dest array actioin */
 	ret = mlx5_os_flow_dr_create_flow_action_dest_array
 						(domain,
-						 cache_resource->num_of_dest,
+						 resource->num_of_dest,
 						 dest_attr,
-						 &cache_resource->action);
+						 &resource->action);
 	if (ret) {
 		rte_flow_error_set(error, ENOMEM,
 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -10901,19 +10901,18 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
 				   "cannot create destination array action");
 		goto error;
 	}
-	cache_resource->idx = res_idx;
-	cache_resource->dev = dev;
-	for (idx = 0; idx < resource->num_of_dest; idx++)
+	resource->idx = res_idx;
+	resource->dev = dev;
+	for (idx = 0; idx < ctx_resource->num_of_dest; idx++)
 		mlx5_free(dest_attr[idx]);
-	return &cache_resource->entry;
+	return &resource->entry;
 error:
-	for (idx = 0; idx < resource->num_of_dest; idx++) {
+	for (idx = 0; idx < ctx_resource->num_of_dest; idx++) {
 		flow_dv_sample_sub_actions_release(dev,
-				&cache_resource->sample_idx[idx]);
+						   &resource->sample_idx[idx]);
 		if (dest_attr[idx])
 			mlx5_free(dest_attr[idx]);
 	}
-
 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
 	return NULL;
 }
@@ -10923,8 +10922,8 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
  *
  * @param[in, out] dev
  *   Pointer to rte_eth_dev structure.
- * @param[in] resource
- *   Pointer to destination array resource.
+ * @param[in] ref
+ *   Pointer to destination array resource reference.
  * @parm[in, out] dev_flow
  *   Pointer to the dev_flow.
  * @param[out] error
@@ -10935,25 +10934,25 @@ flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
  */
 static int
 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
-			 struct mlx5_flow_dv_dest_array_resource *resource,
+			 struct mlx5_flow_dv_dest_array_resource *ref,
 			 struct mlx5_flow *dev_flow,
 			 struct rte_flow_error *error)
 {
-	struct mlx5_flow_dv_dest_array_resource *cache_resource;
+	struct mlx5_flow_dv_dest_array_resource *resource;
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_cache_entry *entry;
+	struct mlx5_list_entry *entry;
 	struct mlx5_flow_cb_ctx ctx = {
 		.dev = dev,
 		.error = error,
-		.data = resource,
+		.data = ref,
 	};
 
-	entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
+	entry = mlx5_list_register(&priv->sh->dest_array_list, &ctx);
 	if (!entry)
 		return -rte_errno;
-	cache_resource = container_of(entry, typeof(*cache_resource), entry);
-	dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
-	dev_flow->dv.dest_array_res = cache_resource;
+	resource = container_of(entry, typeof(*resource), entry);
+	dev_flow->handle->dvh.rix_dest_array = resource->idx;
+	dev_flow->dv.dest_array_res = resource;
 	return 0;
 }
 
@@ -13382,14 +13381,15 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 }
 
 void
-flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
-			  struct mlx5_cache_entry *entry)
+flow_dv_matcher_remove_cb(struct mlx5_list *list __rte_unused,
+			  struct mlx5_list_entry *entry)
 {
-	struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
-							  entry);
+	struct mlx5_flow_dv_matcher *resource = container_of(entry,
+							     typeof(*resource),
+							     entry);
 
-	claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
-	mlx5_free(cache);
+	claim_zero(mlx5_flow_os_destroy_flow_matcher(resource->matcher_object));
+	mlx5_free(resource);
 }
 
 /**
@@ -13413,7 +13413,7 @@ flow_dv_matcher_release(struct rte_eth_dev *dev,
 	int ret;
 
 	MLX5_ASSERT(matcher->matcher_object);
-	ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
+	ret = mlx5_list_unregister(&tbl->matchers, &matcher->entry);
 	flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
 	return ret;
 }
@@ -13432,7 +13432,7 @@ flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
 {
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
 	struct mlx5_flow_dv_encap_decap_resource *res =
-		container_of(entry, typeof(*res), entry);
+				       container_of(entry, typeof(*res), entry);
 
 	claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
@@ -13454,15 +13454,14 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
 				     uint32_t encap_decap_idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+	struct mlx5_flow_dv_encap_decap_resource *resource;
 
-	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
-					encap_decap_idx);
-	if (!cache_resource)
+	resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
+				  encap_decap_idx);
+	if (!resource)
 		return 0;
-	MLX5_ASSERT(cache_resource->action);
-	return mlx5_hlist_unregister(priv->sh->encaps_decaps,
-				     &cache_resource->entry);
+	MLX5_ASSERT(resource->action);
+	return mlx5_hlist_unregister(priv->sh->encaps_decaps, &resource->entry);
 }
 
 /**
@@ -13524,15 +13523,15 @@ flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
 }
 
 void
-flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
-			  struct mlx5_cache_entry *entry)
+flow_dv_port_id_remove_cb(struct mlx5_list *list,
+			  struct mlx5_list_entry *entry)
 {
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
-	struct mlx5_flow_dv_port_id_action_resource *cache =
-			container_of(entry, typeof(*cache), entry);
+	struct mlx5_flow_dv_port_id_action_resource *resource =
+				  container_of(entry, typeof(*resource), entry);
 
-	claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
-	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
+	claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], resource->idx);
 }
 
 /**
@@ -13551,14 +13550,14 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
 					uint32_t port_id)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_dv_port_id_action_resource *cache;
+	struct mlx5_flow_dv_port_id_action_resource *resource;
 
-	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
-	if (!cache)
+	resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
+	if (!resource)
 		return 0;
-	MLX5_ASSERT(cache->action);
-	return mlx5_cache_unregister(&priv->sh->port_id_action_list,
-				     &cache->entry);
+	MLX5_ASSERT(resource->action);
+	return mlx5_list_unregister(&priv->sh->port_id_action_list,
+				    &resource->entry);
 }
 
 /**
@@ -13581,15 +13580,15 @@ flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
 }
 
 void
-flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
-			    struct mlx5_cache_entry *entry)
+flow_dv_push_vlan_remove_cb(struct mlx5_list *list,
+			    struct mlx5_list_entry *entry)
 {
 	struct mlx5_dev_ctx_shared *sh = list->ctx;
-	struct mlx5_flow_dv_push_vlan_action_resource *cache =
-			container_of(entry, typeof(*cache), entry);
+	struct mlx5_flow_dv_push_vlan_action_resource *resource =
+			container_of(entry, typeof(*resource), entry);
 
-	claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
-	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
+	claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
+	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], resource->idx);
 }
 
 /**
@@ -13608,15 +13607,15 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
 					  struct mlx5_flow_handle *handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_dv_push_vlan_action_resource *cache;
+	struct mlx5_flow_dv_push_vlan_action_resource *resource;
 	uint32_t idx = handle->dvh.rix_push_vlan;
 
-	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
-	if (!cache)
+	resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
+	if (!resource)
 		return 0;
-	MLX5_ASSERT(cache->action);
-	return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
-				     &cache->entry);
+	MLX5_ASSERT(resource->action);
+	return mlx5_list_unregister(&priv->sh->push_vlan_action_list,
+				    &resource->entry);
 }
 
 /**
@@ -13653,26 +13652,24 @@ flow_dv_fate_resource_release(struct rte_eth_dev *dev,
 }
 
 void
-flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
-			 struct mlx5_cache_entry *entry)
+flow_dv_sample_remove_cb(struct mlx5_list *list __rte_unused,
+			 struct mlx5_list_entry *entry)
 {
-	struct mlx5_flow_dv_sample_resource *cache_resource =
-			container_of(entry, typeof(*cache_resource), entry);
-	struct rte_eth_dev *dev = cache_resource->dev;
+	struct mlx5_flow_dv_sample_resource *resource = container_of(entry,
+							      typeof(*resource),
+							      entry);
+	struct rte_eth_dev *dev = resource->dev;
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	if (cache_resource->verbs_action)
+	if (resource->verbs_action)
 		claim_zero(mlx5_flow_os_destroy_flow_action
-				(cache_resource->verbs_action));
-	if (cache_resource->normal_path_tbl)
+						      (resource->verbs_action));
+	if (resource->normal_path_tbl)
 		flow_dv_tbl_resource_release(MLX5_SH(dev),
-			cache_resource->normal_path_tbl);
-	flow_dv_sample_sub_actions_release(dev,
-				&cache_resource->sample_idx);
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
-			cache_resource->idx);
-	DRV_LOG(DEBUG, "sample resource %p: removed",
-		(void *)cache_resource);
+					     resource->normal_path_tbl);
+	flow_dv_sample_sub_actions_release(dev, &resource->sample_idx);
+	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], resource->idx);
+	DRV_LOG(DEBUG, "sample resource %p: removed", (void *)resource);
 }
 
 /**
@@ -13691,38 +13688,36 @@ flow_dv_sample_resource_release(struct rte_eth_dev *dev,
 				     struct mlx5_flow_handle *handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_dv_sample_resource *cache_resource;
+	struct mlx5_flow_dv_sample_resource *resource;
 
-	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
-			 handle->dvh.rix_sample);
-	if (!cache_resource)
+	resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
+				  handle->dvh.rix_sample);
+	if (!resource)
 		return 0;
-	MLX5_ASSERT(cache_resource->verbs_action);
-	return mlx5_cache_unregister(&priv->sh->sample_action_list,
-				     &cache_resource->entry);
+	MLX5_ASSERT(resource->verbs_action);
+	return mlx5_list_unregister(&priv->sh->sample_action_list,
+				    &resource->entry);
 }
 
 void
-flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
-			     struct mlx5_cache_entry *entry)
+flow_dv_dest_array_remove_cb(struct mlx5_list *list __rte_unused,
+			     struct mlx5_list_entry *entry)
 {
-	struct mlx5_flow_dv_dest_array_resource *cache_resource =
-			container_of(entry, typeof(*cache_resource), entry);
-	struct rte_eth_dev *dev = cache_resource->dev;
+	struct mlx5_flow_dv_dest_array_resource *resource =
+			container_of(entry, typeof(*resource), entry);
+	struct rte_eth_dev *dev = resource->dev;
 	struct mlx5_priv *priv = dev->data->dev_private;
 	uint32_t i = 0;
 
-	MLX5_ASSERT(cache_resource->action);
-	if (cache_resource->action)
-		claim_zero(mlx5_flow_os_destroy_flow_action
-					(cache_resource->action));
-	for (; i < cache_resource->num_of_dest; i++)
+	MLX5_ASSERT(resource->action);
+	if (resource->action)
+		claim_zero(mlx5_flow_os_destroy_flow_action(resource->action));
+	for (; i < resource->num_of_dest; i++)
 		flow_dv_sample_sub_actions_release(dev,
-				&cache_resource->sample_idx[i]);
-	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
-			cache_resource->idx);
+						   &resource->sample_idx[i]);
+	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], resource->idx);
 	DRV_LOG(DEBUG, "destination array resource %p: removed",
-		(void *)cache_resource);
+		(void *)resource);
 }
 
 /**
@@ -13741,15 +13736,15 @@ flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
 				    struct mlx5_flow_handle *handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_dv_dest_array_resource *cache;
+	struct mlx5_flow_dv_dest_array_resource *resource;
 
-	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
-			       handle->dvh.rix_dest_array);
-	if (!cache)
+	resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
+				  handle->dvh.rix_dest_array);
+	if (!resource)
 		return 0;
-	MLX5_ASSERT(cache->action);
-	return mlx5_cache_unregister(&priv->sh->dest_array_list,
-				     &cache->entry);
+	MLX5_ASSERT(resource->action);
+	return mlx5_list_unregister(&priv->sh->dest_array_list,
+				    &resource->entry);
 }
 
 static void
@@ -14592,7 +14587,7 @@ __flow_dv_destroy_sub_policy_rules(struct rte_eth_dev *dev,
 		if (sub_policy->color_matcher[i]) {
 			tbl = container_of(sub_policy->color_matcher[i]->tbl,
 				typeof(*tbl), tbl);
-			mlx5_cache_unregister(&tbl->matchers,
+			mlx5_list_unregister(&tbl->matchers,
 				      &sub_policy->color_matcher[i]->entry);
 			sub_policy->color_matcher[i] = NULL;
 		}
@@ -15326,8 +15321,8 @@ flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
 		if (mtrmng->def_matcher[i]) {
 			tbl = container_of(mtrmng->def_matcher[i]->tbl,
 				struct mlx5_flow_tbl_data_entry, tbl);
-			mlx5_cache_unregister(&tbl->matchers,
-				      &mtrmng->def_matcher[i]->entry);
+			mlx5_list_unregister(&tbl->matchers,
+					     &mtrmng->def_matcher[i]->entry);
 			mtrmng->def_matcher[i] = NULL;
 		}
 		for (j = 0; j < MLX5_REG_BITS; j++) {
@@ -15336,8 +15331,8 @@ flow_dv_destroy_mtr_drop_tbls(struct rte_eth_dev *dev)
 				container_of(mtrmng->drop_matcher[i][j]->tbl,
 					     struct mlx5_flow_tbl_data_entry,
 					     tbl);
-				mlx5_cache_unregister(&tbl->matchers,
-					&mtrmng->drop_matcher[i][j]->entry);
+				mlx5_list_unregister(&tbl->matchers,
+					    &mtrmng->drop_matcher[i][j]->entry);
 				mtrmng->drop_matcher[i][j] = NULL;
 			}
 		}
@@ -15433,7 +15428,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
 			bool match_src_port,
 			struct rte_flow_error *error)
 {
-	struct mlx5_cache_entry *entry;
+	struct mlx5_list_entry *entry;
 	struct mlx5_flow_tbl_resource *tbl_rsc = sub_policy->tbl_rsc;
 	struct mlx5_flow_dv_matcher matcher = {
 		.mask = {
@@ -15469,7 +15464,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
 	matcher.priority = priority;
 	matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
 					matcher.mask.size);
-	entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+	entry = mlx5_list_register(&tbl_data->matchers, &ctx);
 	if (!entry) {
 		DRV_LOG(ERR, "Failed to register meter drop matcher.");
 		return -1;
@@ -15835,7 +15830,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 						     0, &error);
 	uint32_t mtr_id_mask = (UINT32_C(1) << mtrmng->max_mtr_bits) - 1;
 	uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
-	struct mlx5_cache_entry *entry;
+	struct mlx5_list_entry *entry;
 	struct mlx5_flow_dv_matcher matcher = {
 		.mask = {
 			.size = sizeof(matcher.mask.buf) -
@@ -15881,7 +15876,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 			matcher.crc = rte_raw_cksum
 					((const void *)matcher.mask.buf,
 					matcher.mask.size);
-			entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+			entry = mlx5_list_register(&tbl_data->matchers, &ctx);
 			if (!entry) {
 				DRV_LOG(ERR, "Failed to register meter "
 				"drop default matcher.");
@@ -15918,7 +15913,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 			matcher.crc = rte_raw_cksum
 					((const void *)matcher.mask.buf,
 					matcher.mask.size);
-			entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
+			entry = mlx5_list_register(&tbl_data->matchers, &ctx);
 			if (!entry) {
 				DRV_LOG(ERR,
 				"Failed to register meter drop matcher.");
@@ -16104,7 +16099,6 @@ flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
 	return NULL;
 }
 
-
 /**
  * Destroy the sub policy table with RX queue.
  *
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 1b264e5994..3dcc71d51d 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -222,13 +222,13 @@ int mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
 			      struct mlx5_ind_table_obj *ind_tbl,
 			      uint16_t *queues, const uint32_t queues_n,
 			      bool standalone);
-struct mlx5_cache_entry *mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
-		struct mlx5_cache_entry *entry __rte_unused, void *cb_ctx);
-int mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
-		       struct mlx5_cache_entry *entry,
+struct mlx5_list_entry *mlx5_hrxq_create_cb(struct mlx5_list *list,
+		struct mlx5_list_entry *entry __rte_unused, void *cb_ctx);
+int mlx5_hrxq_match_cb(struct mlx5_list *list,
+		       struct mlx5_list_entry *entry,
 		       void *cb_ctx);
-void mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
-			 struct mlx5_cache_entry *entry);
+void mlx5_hrxq_remove_cb(struct mlx5_list *list,
+			 struct mlx5_list_entry *entry);
 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
 		       struct mlx5_flow_rss_desc *rss_desc);
 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hxrq_idx);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index bb9a908087..8395332507 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2093,7 +2093,7 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
  * Match an Rx Hash queue.
  *
  * @param list
- *   Cache list pointer.
+ *   mlx5 list pointer.
  * @param entry
  *   Hash queue entry pointer.
  * @param cb_ctx
@@ -2103,8 +2103,8 @@ mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
  *   0 if match, none zero if not match.
  */
 int
-mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
-		   struct mlx5_cache_entry *entry,
+mlx5_hrxq_match_cb(struct mlx5_list *list,
+		   struct mlx5_list_entry *entry,
 		   void *cb_ctx)
 {
 	struct rte_eth_dev *dev = list->ctx;
@@ -2242,13 +2242,13 @@ __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
  *   Index to Hash Rx queue to release.
  *
  * @param list
- *   Cache list pointer.
+ *   mlx5 list pointer.
  * @param entry
  *   Hash queue entry pointer.
  */
 void
-mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
-		    struct mlx5_cache_entry *entry)
+mlx5_hrxq_remove_cb(struct mlx5_list *list,
+		    struct mlx5_list_entry *entry)
 {
 	struct rte_eth_dev *dev = list->ctx;
 	struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
@@ -2305,7 +2305,7 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
  * Create an Rx Hash queue.
  *
  * @param list
- *   Cache list pointer.
+ *   mlx5 list pointer.
  * @param entry
  *   Hash queue entry pointer.
  * @param cb_ctx
@@ -2314,9 +2314,9 @@ __mlx5_hrxq_create(struct rte_eth_dev *dev,
  * @return
  *   queue entry on success, NULL otherwise.
  */
-struct mlx5_cache_entry *
-mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
-		    struct mlx5_cache_entry *entry __rte_unused,
+struct mlx5_list_entry *
+mlx5_hrxq_create_cb(struct mlx5_list *list,
+		    struct mlx5_list_entry *entry __rte_unused,
 		    void *cb_ctx)
 {
 	struct rte_eth_dev *dev = list->ctx;
@@ -2344,7 +2344,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_hrxq *hrxq;
-	struct mlx5_cache_entry *entry;
+	struct mlx5_list_entry *entry;
 	struct mlx5_flow_cb_ctx ctx = {
 		.data = rss_desc,
 	};
@@ -2352,7 +2352,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
 	if (rss_desc->shared_rss) {
 		hrxq = __mlx5_hrxq_create(dev, rss_desc);
 	} else {
-		entry = mlx5_cache_register(&priv->hrxqs, &ctx);
+		entry = mlx5_list_register(&priv->hrxqs, &ctx);
 		if (!entry)
 			return 0;
 		hrxq = container_of(entry, typeof(*hrxq), entry);
@@ -2382,7 +2382,7 @@ int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
 	if (!hrxq)
 		return 0;
 	if (!hrxq->standalone)
-		return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
+		return mlx5_list_unregister(&priv->hrxqs, &hrxq->entry);
 	__mlx5_hrxq_remove(dev, hrxq);
 	return 0;
 }
@@ -2470,7 +2470,7 @@ mlx5_hrxq_verify(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
-	return mlx5_cache_list_get_entry_num(&priv->hrxqs);
+	return mlx5_list_get_entry_num(&priv->hrxqs);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index f9557c09ff..4536ca807d 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -9,29 +9,29 @@
 #include "mlx5_utils.h"
 
 
-/********************* Cache list ************************/
+/********************* MLX5 list ************************/
 
-static struct mlx5_cache_entry *
-mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
-			     struct mlx5_cache_entry *entry __rte_unused,
+static struct mlx5_list_entry *
+mlx5_list_default_create_cb(struct mlx5_list *list,
+			     struct mlx5_list_entry *entry __rte_unused,
 			     void *ctx __rte_unused)
 {
 	return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
 }
 
 static void
-mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
-			     struct mlx5_cache_entry *entry)
+mlx5_list_default_remove_cb(struct mlx5_list *list __rte_unused,
+			     struct mlx5_list_entry *entry)
 {
 	mlx5_free(entry);
 }
 
 int
-mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
+mlx5_list_create(struct mlx5_list *list, const char *name,
 		     uint32_t entry_size, void *ctx,
-		     mlx5_cache_create_cb cb_create,
-		     mlx5_cache_match_cb cb_match,
-		     mlx5_cache_remove_cb cb_remove)
+		     mlx5_list_create_cb cb_create,
+		     mlx5_list_match_cb cb_match,
+		     mlx5_list_remove_cb cb_remove)
 {
 	MLX5_ASSERT(list);
 	if (!cb_match || (!cb_create ^ !cb_remove))
@@ -40,19 +40,19 @@ mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
 		snprintf(list->name, sizeof(list->name), "%s", name);
 	list->entry_sz = entry_size;
 	list->ctx = ctx;
-	list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
+	list->cb_create = cb_create ? cb_create : mlx5_list_default_create_cb;
 	list->cb_match = cb_match;
-	list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
+	list->cb_remove = cb_remove ? cb_remove : mlx5_list_default_remove_cb;
 	rte_rwlock_init(&list->lock);
-	DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
+	DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
 	LIST_INIT(&list->head);
 	return 0;
 }
 
-static struct mlx5_cache_entry *
-__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+static struct mlx5_list_entry *
+__list_lookup(struct mlx5_list *list, void *ctx, bool reuse)
 {
-	struct mlx5_cache_entry *entry;
+	struct mlx5_list_entry *entry;
 
 	LIST_FOREACH(entry, &list->head, next) {
 		if (list->cb_match(list, entry, ctx))
@@ -60,7 +60,7 @@ __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
 		if (reuse) {
 			__atomic_add_fetch(&entry->ref_cnt, 1,
 					   __ATOMIC_RELAXED);
-			DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
+			DRV_LOG(DEBUG, "mlx5 list %s entry %p ref++: %u.",
 				list->name, (void *)entry, entry->ref_cnt);
 		}
 		break;
@@ -68,33 +68,33 @@ __cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
 	return entry;
 }
 
-static struct mlx5_cache_entry *
-cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+static struct mlx5_list_entry *
+list_lookup(struct mlx5_list *list, void *ctx, bool reuse)
 {
-	struct mlx5_cache_entry *entry;
+	struct mlx5_list_entry *entry;
 
 	rte_rwlock_read_lock(&list->lock);
-	entry = __cache_lookup(list, ctx, reuse);
+	entry = __list_lookup(list, ctx, reuse);
 	rte_rwlock_read_unlock(&list->lock);
 	return entry;
 }
 
-struct mlx5_cache_entry *
-mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
+struct mlx5_list_entry *
+mlx5_list_lookup(struct mlx5_list *list, void *ctx)
 {
-	return cache_lookup(list, ctx, false);
+	return list_lookup(list, ctx, false);
 }
 
-struct mlx5_cache_entry *
-mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
+struct mlx5_list_entry *
+mlx5_list_register(struct mlx5_list *list, void *ctx)
 {
-	struct mlx5_cache_entry *entry;
+	struct mlx5_list_entry *entry;
 	uint32_t prev_gen_cnt = 0;
 
 	MLX5_ASSERT(list);
 	prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
 	/* Lookup with read lock, reuse if found. */
-	entry = cache_lookup(list, ctx, true);
+	entry = list_lookup(list, ctx, true);
 	if (entry)
 		return entry;
 	/* Not found, append with write lock - block read from other threads. */
@@ -102,13 +102,13 @@ mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
 	/* If list changed by other threads before lock, search again. */
 	if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
 		/* Lookup and reuse w/o read lock. */
-		entry = __cache_lookup(list, ctx, true);
+		entry = __list_lookup(list, ctx, true);
 		if (entry)
 			goto done;
 	}
 	entry = list->cb_create(list, entry, ctx);
 	if (!entry) {
-		DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
+		DRV_LOG(ERR, "Failed to init mlx5 list %s entry %p.",
 			list->name, (void *)entry);
 		goto done;
 	}
@@ -116,7 +116,7 @@ mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
 	LIST_INSERT_HEAD(&list->head, entry, next);
 	__atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
 	__atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
-	DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
+	DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.",
 		list->name, (void *)entry, entry->ref_cnt);
 done:
 	rte_rwlock_write_unlock(&list->lock);
@@ -124,12 +124,12 @@ mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
 }
 
 int
-mlx5_cache_unregister(struct mlx5_cache_list *list,
-		      struct mlx5_cache_entry *entry)
+mlx5_list_unregister(struct mlx5_list *list,
+		      struct mlx5_list_entry *entry)
 {
 	rte_rwlock_write_lock(&list->lock);
 	MLX5_ASSERT(entry && entry->next.le_prev);
-	DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
+	DRV_LOG(DEBUG, "mlx5 list %s entry %p ref--: %u.",
 		list->name, (void *)entry, entry->ref_cnt);
 	if (--entry->ref_cnt) {
 		rte_rwlock_write_unlock(&list->lock);
@@ -140,15 +140,15 @@ mlx5_cache_unregister(struct mlx5_cache_list *list,
 	LIST_REMOVE(entry, next);
 	list->cb_remove(list, entry);
 	rte_rwlock_write_unlock(&list->lock);
-	DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
+	DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
 		list->name, (void *)entry);
 	return 0;
 }
 
 void
-mlx5_cache_list_destroy(struct mlx5_cache_list *list)
+mlx5_list_destroy(struct mlx5_list *list)
 {
-	struct mlx5_cache_entry *entry;
+	struct mlx5_list_entry *entry;
 
 	MLX5_ASSERT(list);
 	/* no LIST_FOREACH_SAFE, using while instead */
@@ -156,14 +156,14 @@ mlx5_cache_list_destroy(struct mlx5_cache_list *list)
 		entry = LIST_FIRST(&list->head);
 		LIST_REMOVE(entry, next);
 		list->cb_remove(list, entry);
-		DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
+		DRV_LOG(DEBUG, "mlx5 list %s entry %p destroyed.",
 			list->name, (void *)entry);
 	}
 	memset(list, 0, sizeof(*list));
 }
 
 uint32_t
-mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
+mlx5_list_get_entry_num(struct mlx5_list *list)
 {
 	MLX5_ASSERT(list);
 	return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index a509b0a4eb..cfb3cb6180 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -297,19 +297,19 @@ log2above(unsigned int v)
 	return l + r;
 }
 
-/************************ cache list *****************************/
+/************************ mlx5 list *****************************/
 
 /** Maximum size of string for naming. */
 #define MLX5_NAME_SIZE			32
 
-struct mlx5_cache_list;
+struct mlx5_list;
 
 /**
- * Structure of the entry in the cache list, user should define its own struct
+ * Structure of the entry in the mlx5 list, user should define its own struct
  * that contains this in order to store the data.
  */
-struct mlx5_cache_entry {
-	LIST_ENTRY(mlx5_cache_entry) next; /* Entry pointers in the list. */
+struct mlx5_list_entry {
+	LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
 	uint32_t ref_cnt; /* Reference count. */
 };
 
@@ -317,18 +317,18 @@ struct mlx5_cache_entry {
  * Type of callback function for entry removal.
  *
  * @param list
- *   The cache list.
+ *   The mlx5 list.
  * @param entry
  *   The entry in the list.
  */
-typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list,
-				     struct mlx5_cache_entry *entry);
+typedef void (*mlx5_list_remove_cb)(struct mlx5_list *list,
+				     struct mlx5_list_entry *entry);
 
 /**
  * Type of function for user defined matching.
  *
  * @param list
- *   The cache list.
+ *   The mlx5 list.
  * @param entry
  *   The entry in the list.
  * @param ctx
@@ -337,14 +337,14 @@ typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list,
  * @return
  *   0 if matching, non-zero number otherwise.
  */
-typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list,
-				   struct mlx5_cache_entry *entry, void *ctx);
+typedef int (*mlx5_list_match_cb)(struct mlx5_list *list,
+				   struct mlx5_list_entry *entry, void *ctx);
 
 /**
- * Type of function for user defined cache list entry creation.
+ * Type of function for user defined mlx5 list entry creation.
  *
  * @param list
- *   The cache list.
+ *   The mlx5 list.
  * @param entry
  *   The new allocated entry, NULL if list entry size unspecified,
  *   New entry has to be allocated in callback and return.
@@ -354,46 +354,46 @@ typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list,
  * @return
  *   Pointer of entry on success, NULL otherwise.
  */
-typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb)
-				 (struct mlx5_cache_list *list,
-				  struct mlx5_cache_entry *entry,
+typedef struct mlx5_list_entry *(*mlx5_list_create_cb)
+				 (struct mlx5_list *list,
+				  struct mlx5_list_entry *entry,
 				  void *ctx);
 
 /**
- * Linked cache list structure.
+ * Linked mlx5 list structure.
  *
- * Entry in cache list could be reused if entry already exists,
+ * Entry in mlx5 list could be reused if entry already exists,
  * reference count will increase and the existing entry returns.
  *
  * When destroy an entry from list, decrease reference count and only
  * destroy when no further reference.
  *
- * Linked list cache is designed for limited number of entries cache,
+ * Linked list is designed for limited number of entries,
  * read mostly, less modification.
  *
- * For huge amount of entries cache, please consider hash list cache.
+ * For huge amount of entries, please consider hash list.
  *
  */
-struct mlx5_cache_list {
-	char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */
+struct mlx5_list {
+	char name[MLX5_NAME_SIZE]; /**< Name of the mlx5 list. */
 	uint32_t entry_sz; /**< Entry size, 0: use create callback. */
 	rte_rwlock_t lock; /* read/write lock. */
 	uint32_t gen_cnt; /* List modification will update generation count. */
 	uint32_t count; /* number of entries in list. */
 	void *ctx; /* user objects target to callback. */
-	mlx5_cache_create_cb cb_create; /**< entry create callback. */
-	mlx5_cache_match_cb cb_match; /**< entry match callback. */
-	mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */
-	LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head;
+	mlx5_list_create_cb cb_create; /**< entry create callback. */
+	mlx5_list_match_cb cb_match; /**< entry match callback. */
+	mlx5_list_remove_cb cb_remove; /**< entry remove callback. */
+	LIST_HEAD(mlx5_list_head, mlx5_list_entry) head;
 };
 
 /**
- * Initialize a cache list.
+ * Create a mlx5 list.
  *
  * @param list
  *   Pointer to the hast list table.
  * @param name
- *   Name of the cache list.
+ *   Name of the mlx5 list.
  * @param entry_size
  *   Entry size to allocate, 0 to allocate by creation callback.
  * @param ctx
@@ -407,11 +407,11 @@ struct mlx5_cache_list {
  * @return
  *   0 on success, otherwise failure.
  */
-int mlx5_cache_list_init(struct mlx5_cache_list *list,
+int mlx5_list_create(struct mlx5_list *list,
 			 const char *name, uint32_t entry_size, void *ctx,
-			 mlx5_cache_create_cb cb_create,
-			 mlx5_cache_match_cb cb_match,
-			 mlx5_cache_remove_cb cb_remove);
+			 mlx5_list_create_cb cb_create,
+			 mlx5_list_match_cb cb_match,
+			 mlx5_list_remove_cb cb_remove);
 
 /**
  * Search an entry matching the key.
@@ -420,18 +420,18 @@ int mlx5_cache_list_init(struct mlx5_cache_list *list,
  * this function only in main thread.
  *
  * @param list
- *   Pointer to the cache list.
+ *   Pointer to the mlx5 list.
  * @param ctx
  *   Common context parameter used by entry callback function.
  *
  * @return
- *   Pointer of the cache entry if found, NULL otherwise.
+ *   Pointer of the list entry if found, NULL otherwise.
  */
-struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list,
+struct mlx5_list_entry *mlx5_list_lookup(struct mlx5_list *list,
 					   void *ctx);
 
 /**
- * Reuse or create an entry to the cache list.
+ * Reuse or create an entry to the mlx5 list.
  *
  * @param list
  *   Pointer to the hast list table.
@@ -441,42 +441,42 @@ struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list,
  * @return
  *   registered entry on success, NULL otherwise
  */
-struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list,
+struct mlx5_list_entry *mlx5_list_register(struct mlx5_list *list,
 					     void *ctx);
 
 /**
- * Remove an entry from the cache list.
+ * Remove an entry from the mlx5 list.
  *
  * User should guarantee the validity of the entry.
  *
  * @param list
  *   Pointer to the hast list.
  * @param entry
- *   Entry to be removed from the cache list table.
+ *   Entry to be removed from the mlx5 list table.
  * @return
  *   0 on entry removed, 1 on entry still referenced.
  */
-int mlx5_cache_unregister(struct mlx5_cache_list *list,
-			  struct mlx5_cache_entry *entry);
+int mlx5_list_unregister(struct mlx5_list *list,
+			  struct mlx5_list_entry *entry);
 
 /**
- * Destroy the cache list.
+ * Destroy the mlx5 list.
  *
  * @param list
- *   Pointer to the cache list.
+ *   Pointer to the mlx5 list.
  */
-void mlx5_cache_list_destroy(struct mlx5_cache_list *list);
+void mlx5_list_destroy(struct mlx5_list *list);
 
 /**
- * Get entry number from the cache list.
+ * Get entry number from the mlx5 list.
  *
  * @param list
  *   Pointer to the hast list.
  * @return
- *   Cache list entry number.
+ *   mlx5 list entry number.
  */
 uint32_t
-mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list);
+mlx5_list_get_entry_num(struct mlx5_list *list);
 
 /********************************* indexed pool *************************/
 
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 17716b66c9..bcf72dc6db 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -610,10 +610,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 			err = ENOTSUP;
 			goto error;
 	}
-	mlx5_cache_list_init(&priv->hrxqs, "hrxq", 0, eth_dev,
-			     mlx5_hrxq_create_cb,
-			     mlx5_hrxq_match_cb,
-			     mlx5_hrxq_remove_cb);
+	mlx5_list_create(&priv->hrxqs, "hrxq", 0, eth_dev,
+		mlx5_hrxq_create_cb, mlx5_hrxq_match_cb,
+		mlx5_hrxq_remove_cb);
 	/* Query availability of metadata reg_c's. */
 	err = mlx5_flow_discover_mreg_c(eth_dev);
 	if (err < 0) {
-- 
2.25.1



More information about the dev mailing list