[dpdk-dev] [PATCH 7/8] net/mlx5: split the counter struct

Suanming Mou suanmingm at mellanox.com
Tue Apr 7 05:59:46 CEST 2020


Currently, the counter struct saves both the members used by batch
counters and none batch counters. The members which are only used
by none batch counters cost 16 bytes extra memory for batch counters.
As normally there will be limited none batch counters, mix the none
batch counter and batch counter members becomes quite expensive for
batch counter. If 1 million batch counters are created, it means 16 MB
memory which will not be used by the batch counters are allocated.

Split the mlx5_flow_counter struct for batch and none batch counters
helps save the memory.

Signed-off-by: Suanming Mou <suanmingm at mellanox.com>
Acked-by: Matan Azrad <matan at mellanox.com>
---
 drivers/net/mlx5/mlx5.c            |   6 +-
 drivers/net/mlx5/mlx5.h            |  32 ++++---
 drivers/net/mlx5/mlx5_flow_dv.c    | 173 ++++++++++++++++++-------------------
 drivers/net/mlx5/mlx5_flow_verbs.c |  58 ++++++++-----
 4 files changed, 145 insertions(+), 124 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 6a11b14..efdd53c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -391,9 +391,11 @@ struct mlx5_flow_id_pool *
 					claim_zero
 					(mlx5_glue->destroy_flow_action
 					       (pool->counters_raw[j].action));
-				if (!batch && pool->counters_raw[j].dcs)
+				if (!batch && MLX5_GET_POOL_CNT_EXT
+				    (pool, j)->dcs)
 					claim_zero(mlx5_devx_cmd_destroy
-						  (pool->counters_raw[j].dcs));
+						  (MLX5_GET_POOL_CNT_EXT
+						  (pool, j)->dcs));
 			}
 			TAILQ_REMOVE(&sh->cmng.ccont[i].pool_list, pool,
 				     next);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 1501e61..6bbb5dd 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -247,6 +247,11 @@ struct mlx5_drop {
  */
 #define MLX5_MAKE_CNT_IDX(pi, offset) \
 	((pi) * MLX5_COUNTERS_PER_POOL + (offset) + 1)
+#define MLX5_CNT_TO_CNT_EXT(pool, cnt) (&((struct mlx5_flow_counter_ext *) \
+			    ((pool) + 1))[((cnt) - (pool)->counters_raw)])
+#define MLX5_GET_POOL_CNT_EXT(pool, offset) \
+			      (&((struct mlx5_flow_counter_ext *) \
+			      ((pool) + 1))[offset])
 
 struct mlx5_flow_counter_pool;
 
@@ -255,15 +260,25 @@ struct flow_counter_stats {
 	uint64_t bytes;
 };
 
-/* Counters information. */
+/* Generic counters information. */
 struct mlx5_flow_counter {
 	TAILQ_ENTRY(mlx5_flow_counter) next;
 	/**< Pointer to the next flow counter structure. */
+	union {
+		uint64_t hits; /**< Reset value of hits packets. */
+		int64_t query_gen; /**< Generation of the last release. */
+	};
+	uint64_t bytes; /**< Reset value of bytes. */
+	void *action; /**< Pointer to the dv action. */
+};
+
+/* Extend counters information for none batch counters. */
+struct mlx5_flow_counter_ext {
 	uint32_t shared:1; /**< Share counter ID with other flow rules. */
 	uint32_t batch: 1;
 	/**< Whether the counter was allocated by batch command. */
 	uint32_t ref_cnt:30; /**< Reference counter. */
-	uint32_t id; /**< Counter ID. */
+	uint32_t id; /**< User counter ID. */
 	union {  /**< Holds the counters for the rule. */
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
 		struct ibv_counter_set *cs;
@@ -271,19 +286,13 @@ struct mlx5_flow_counter {
 		struct ibv_counters *cs;
 #endif
 		struct mlx5_devx_obj *dcs; /**< Counter Devx object. */
-		struct mlx5_flow_counter_pool *pool; /**< The counter pool. */
 	};
-	union {
-		uint64_t hits; /**< Reset value of hits packets. */
-		int64_t query_gen; /**< Generation of the last release. */
-	};
-	uint64_t bytes; /**< Reset value of bytes. */
-	void *action; /**< Pointer to the dv action. */
 };
 
+
 TAILQ_HEAD(mlx5_counters, mlx5_flow_counter);
 
-/* Counter pool structure - query is in pool resolution. */
+/* Generic counter pool structure - query is in pool resolution. */
 struct mlx5_flow_counter_pool {
 	TAILQ_ENTRY(mlx5_flow_counter_pool) next;
 	struct mlx5_counters counters; /* Free counter list. */
@@ -299,7 +308,8 @@ struct mlx5_flow_counter_pool {
 	rte_spinlock_t sl; /* The pool lock. */
 	struct mlx5_counter_stats_raw *raw;
 	struct mlx5_counter_stats_raw *raw_hw; /* The raw on HW working. */
-	struct mlx5_flow_counter counters_raw[]; /* The pool counters memory. */
+	struct mlx5_flow_counter counters_raw[MLX5_COUNTERS_PER_POOL];
+	/* The pool counters memory. */
 };
 
 struct mlx5_counter_stats_raw;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index b7daa8f..e051f8b 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3847,7 +3847,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0);
 	struct mlx5_flow_counter_pool *pool;
-	struct mlx5_flow_counter *cnt = NULL;
+	struct mlx5_flow_counter_ext *cnt_ext;
 	struct mlx5_devx_obj *dcs = NULL;
 	uint32_t offset;
 
@@ -3869,20 +3869,18 @@ struct field_modify_info modify_tcp[] = {
 		pool = TAILQ_FIRST(&cont->pool_list);
 	}
 	offset = dcs->id % MLX5_COUNTERS_PER_POOL;
-	cnt = &pool->counters_raw[offset];
-	struct mlx5_flow_counter tmpl = {
-		.shared = shared,
-		.ref_cnt = 1,
-		.id = id,
-		.dcs = dcs,
-	};
-	tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
-	if (!tmpl.action) {
-		claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
+	cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, offset);
+	cnt_ext->shared = shared;
+	cnt_ext->ref_cnt = 1;
+	cnt_ext->id = id;
+	cnt_ext->dcs = dcs;
+	pool->counters_raw[offset].action =
+	      mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
+	if (!pool->counters_raw[offset].action) {
+		claim_zero(mlx5_devx_cmd_destroy(dcs));
 		rte_errno = errno;
 		return 0;
 	}
-	*cnt = tmpl;
 	return MLX5_MAKE_CNT_IDX(pool->index, offset);
 }
 
@@ -3892,20 +3890,16 @@ struct field_modify_info modify_tcp[] = {
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
  * @param[in] counter
- *   Index to the counter handler.
+ *   Extend counter handler.
  */
 static void
 flow_dv_counter_release_fallback(struct rte_eth_dev *dev __rte_unused,
-				 struct mlx5_flow_counter *counter)
+				 struct mlx5_flow_counter_ext *counter)
 {
 	if (!counter)
 		return;
-	if (--counter->ref_cnt == 0) {
-		claim_zero(mlx5_glue->destroy_flow_action(counter->action));
-		claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
-		counter->action = NULL;
-		counter->dcs = NULL;
-	}
+	claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
+	counter->dcs = NULL;
 }
 
 /**
@@ -3925,7 +3919,7 @@ struct field_modify_info modify_tcp[] = {
  */
 static inline int
 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
-		     struct mlx5_flow_counter *cnt, uint64_t *pkts,
+		     struct mlx5_flow_counter_ext *cnt, uint64_t *pkts,
 		     uint64_t *bytes)
 {
 	return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
@@ -3933,25 +3927,6 @@ struct field_modify_info modify_tcp[] = {
 }
 
 /**
- * Get a pool by a counter.
- *
- * @param[in] cnt
- *   Pointer to the counter.
- *
- * @return
- *   The counter pool.
- */
-static struct mlx5_flow_counter_pool *
-flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
-{
-	if (!cnt->batch) {
-		cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
-		return (struct mlx5_flow_counter_pool *)cnt - 1;
-	}
-	return cnt->pool;
-}
-
-/**
  * Get DV flow counter by index.
  *
  * @param[in] dev
@@ -4159,7 +4134,7 @@ struct field_modify_info modify_tcp[] = {
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
  * @param[in] cnt
- *   Pointer to the flow counter.
+ *   Index to the flow counter.
  * @param[out] pkts
  *   The statistics value of packets.
  * @param[out] bytes
@@ -4169,17 +4144,23 @@ struct field_modify_info modify_tcp[] = {
  *   0 on success, otherwise a negative errno value and rte_errno is set.
  */
 static inline int
-_flow_dv_query_count(struct rte_eth_dev *dev,
-		     struct mlx5_flow_counter *cnt, uint64_t *pkts,
+_flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
 		     uint64_t *bytes)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_counter_pool *pool =
-			flow_dv_counter_pool_get(cnt);
-	int offset = cnt - &pool->counters_raw[0];
+	struct mlx5_flow_counter_pool *pool = NULL;
+	struct mlx5_flow_counter *cnt;
+	struct mlx5_flow_counter_ext *cnt_ext = NULL;
+	int offset;
 
-	if (priv->counter_fallback)
-		return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
+	cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
+	MLX5_ASSERT(pool);
+	if (counter < MLX5_CNT_BATCH_OFFSET) {
+		cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+		if (priv->counter_fallback)
+			return _flow_dv_query_count_fallback(dev, cnt_ext,
+							     pkts, bytes);
+	}
 
 	rte_spinlock_lock(&pool->sl);
 	/*
@@ -4187,10 +4168,11 @@ struct field_modify_info modify_tcp[] = {
 	 * current allocated in parallel to the host reading.
 	 * In this case the new counter values must be reported as 0.
 	 */
-	if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
+	if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
 		*pkts = 0;
 		*bytes = 0;
 	} else {
+		offset = cnt - &pool->counters_raw[0];
 		*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
 		*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
 	}
@@ -4229,8 +4211,10 @@ struct field_modify_info modify_tcp[] = {
 		if (!cont)
 			return NULL;
 	}
-	size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
-			sizeof(struct mlx5_flow_counter);
+	size = sizeof(*pool);
+	if (!batch)
+		size += MLX5_COUNTERS_PER_POOL *
+			sizeof(struct mlx5_flow_counter_ext);
 	pool = rte_calloc(__func__, 1, size, 0);
 	if (!pool) {
 		rte_errno = ENOMEM;
@@ -4307,9 +4291,10 @@ struct field_modify_info modify_tcp[] = {
 			rte_atomic64_set(&pool->a64_dcs,
 					 (int64_t)(uintptr_t)dcs);
 		}
-		cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
+		i = dcs->id % MLX5_COUNTERS_PER_POOL;
+		cnt = &pool->counters_raw[i];
 		TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
-		cnt->dcs = dcs;
+		MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
 		*cnt_free = cnt;
 		return cont;
 	}
@@ -4328,7 +4313,6 @@ struct field_modify_info modify_tcp[] = {
 	pool = TAILQ_FIRST(&cont->pool_list);
 	for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
 		cnt = &pool->counters_raw[i];
-		cnt->pool = pool;
 		TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
 	}
 	*cnt_free = &pool->counters_raw[0];
@@ -4346,13 +4330,13 @@ struct field_modify_info modify_tcp[] = {
  *   mlx5 flow counter pool in the container,
  *
  * @return
- *   NULL if not existed, otherwise pointer to the shared counter.
+ *   NULL if not existed, otherwise pointer to the shared extend counter.
  */
-static struct mlx5_flow_counter *
+static struct mlx5_flow_counter_ext *
 flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
 			      struct mlx5_flow_counter_pool **ppool)
 {
-	static struct mlx5_flow_counter *cnt;
+	static struct mlx5_flow_counter_ext *cnt;
 	struct mlx5_flow_counter_pool *pool;
 	uint32_t i;
 	uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
@@ -4360,10 +4344,10 @@ struct field_modify_info modify_tcp[] = {
 	for (i = 0; i < n_valid; i++) {
 		pool = cont->pools[i];
 		for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
-			cnt = &pool->counters_raw[i];
+			cnt = MLX5_GET_POOL_CNT_EXT(pool, i);
 			if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
 				if (ppool)
-					*ppool = pool;
+					*ppool = cont->pools[i];
 				return cnt;
 			}
 		}
@@ -4393,6 +4377,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_counter_pool *pool = NULL;
 	struct mlx5_flow_counter *cnt_free = NULL;
+	struct mlx5_flow_counter_ext *cnt_ext = NULL;
 	/*
 	 * Currently group 0 flow counter cannot be assigned to a flow if it is
 	 * not the first one in the batch counter allocation, so it is better
@@ -4411,15 +4396,16 @@ struct field_modify_info modify_tcp[] = {
 		return 0;
 	}
 	if (shared) {
-		cnt_free = flow_dv_counter_shared_search(cont, id, &pool);
-		if (cnt_free) {
-			if (cnt_free->ref_cnt + 1 == 0) {
+		cnt_ext = flow_dv_counter_shared_search(cont, id, &pool);
+		if (cnt_ext) {
+			if (cnt_ext->ref_cnt + 1 == 0) {
 				rte_errno = E2BIG;
 				return 0;
 			}
-			cnt_free->ref_cnt++;
+			cnt_ext->ref_cnt++;
 			cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
-				  (cnt_free - pool->counters_raw) + 1;
+				  (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
+				  + 1;
 			return cnt_idx;
 		}
 	}
@@ -4449,7 +4435,8 @@ struct field_modify_info modify_tcp[] = {
 			return 0;
 		pool = TAILQ_FIRST(&cont->pool_list);
 	}
-	cnt_free->batch = batch;
+	if (!batch)
+		cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
 	/* Create a DV counter action only in the first time usage. */
 	if (!cnt_free->action) {
 		uint16_t offset;
@@ -4460,7 +4447,7 @@ struct field_modify_info modify_tcp[] = {
 			dcs = pool->min_dcs;
 		} else {
 			offset = 0;
-			dcs = cnt_free->dcs;
+			dcs = cnt_ext->dcs;
 		}
 		cnt_free->action = mlx5_glue->dv_create_flow_action_counter
 					(dcs->obj, offset);
@@ -4469,13 +4456,18 @@ struct field_modify_info modify_tcp[] = {
 			return 0;
 		}
 	}
+	cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
+				    (cnt_free - pool->counters_raw));
+	cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
 	/* Update the counter reset values. */
-	if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
+	if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
 				 &cnt_free->bytes))
 		return 0;
-	cnt_free->shared = shared;
-	cnt_free->ref_cnt = 1;
-	cnt_free->id = id;
+	if (cnt_ext) {
+		cnt_ext->shared = shared;
+		cnt_ext->ref_cnt = 1;
+		cnt_ext->id = id;
+	}
 	if (!priv->sh->cmng.query_thread_on)
 		/* Start the asynchronous batch query by the host thread. */
 		mlx5_set_query_alarm(priv->sh);
@@ -4485,9 +4477,6 @@ struct field_modify_info modify_tcp[] = {
 		TAILQ_REMOVE(&cont->pool_list, pool, next);
 		TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
 	}
-	cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
-				    (cnt_free - pool->counters_raw));
-	cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
 	return cnt_idx;
 }
 
@@ -4503,27 +4492,33 @@ struct field_modify_info modify_tcp[] = {
 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_counter_pool *pool;
+	struct mlx5_flow_counter_pool *pool = NULL;
 	struct mlx5_flow_counter *cnt;
+	struct mlx5_flow_counter_ext *cnt_ext = NULL;
 
 	if (!counter)
 		return;
 	cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
+	MLX5_ASSERT(pool);
+	if (counter < MLX5_CNT_BATCH_OFFSET)
+		cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+	if (cnt_ext && --cnt_ext->ref_cnt)
+		return;
 	if (priv->counter_fallback) {
-		flow_dv_counter_release_fallback(dev, cnt);
+		claim_zero(mlx5_glue->destroy_flow_action(cnt->action));
+		flow_dv_counter_release_fallback(dev, cnt_ext);
+		cnt->action = NULL;
 		return;
 	}
-	if (--cnt->ref_cnt == 0) {
-		/* Put the counter in the end - the last updated one. */
-		TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
-		/*
-		 * Counters released between query trigger and handler need
-		 * to wait the next round of query. Since the packets arrive
-		 * in the gap period will not be taken into account to the
-		 * old counter.
-		 */
-		cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
-	}
+	/* Put the counter in the end - the last updated one. */
+	TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
+	/*
+	 * Counters released between query trigger and handler need
+	 * to wait the next round of query. Since the packets arrive
+	 * in the gap period will not be taken into account to the
+	 * old counter.
+	 */
+	cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
 }
 
 /**
@@ -8525,7 +8520,7 @@ struct field_modify_info modify_tcp[] = {
 
 		cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
 						 NULL);
-		int err = _flow_dv_query_count(dev, cnt, &pkts,
+		int err = _flow_dv_query_count(dev, flow->counter, &pkts,
 					       &bytes);
 
 		if (err)
@@ -9035,10 +9030,10 @@ struct field_modify_info modify_tcp[] = {
 	if (!priv->config.devx)
 		return -1;
 
-	cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
-	ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
+	ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
 	if (ret)
 		return -1;
+	cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
 	*pkts = inn_pkts - cnt->hits;
 	*bytes = inn_bytes - cnt->bytes;
 	if (clear) {
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 227f963..eb558fd 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -83,7 +83,7 @@
  */
 static int
 flow_verbs_counter_create(struct rte_eth_dev *dev,
-			  struct mlx5_flow_counter *counter)
+			  struct mlx5_flow_counter_ext *counter)
 {
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
 	struct mlx5_priv *priv = dev->data->dev_private;
@@ -153,6 +153,7 @@
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, 0, 0);
 	struct mlx5_flow_counter_pool *pool = NULL;
+	struct mlx5_flow_counter_ext *cnt_ext = NULL;
 	struct mlx5_flow_counter *cnt = NULL;
 	uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
 	uint32_t pool_idx;
@@ -163,9 +164,9 @@
 		for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
 			pool = cont->pools[pool_idx];
 			for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
-				cnt = &pool->counters_raw[i];
-				if (cnt->shared && cnt->id == id) {
-					cnt->ref_cnt++;
+				cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+				if (cnt_ext->shared && cnt_ext->id == id) {
+					cnt_ext->ref_cnt++;
 					return MLX5_MAKE_CNT_IDX(pool_idx, i);
 				}
 			}
@@ -200,7 +201,8 @@
 			cont->n += MLX5_CNT_CONTAINER_RESIZE;
 		}
 		/* Allocate memory for new pool*/
-		size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
+		size = sizeof(*pool) + sizeof(*cnt_ext) *
+		       MLX5_COUNTERS_PER_POOL;
 		pool = rte_calloc(__func__, 1, size, 0);
 		if (!pool)
 			return 0;
@@ -214,16 +216,18 @@
 		rte_atomic16_add(&cont->n_valid, 1);
 		TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
 	}
-	cnt->id = id;
-	cnt->shared = shared;
-	cnt->ref_cnt = 1;
+	i = cnt - pool->counters_raw;
+	cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
+	cnt_ext->id = id;
+	cnt_ext->shared = shared;
+	cnt_ext->ref_cnt = 1;
 	cnt->hits = 0;
 	cnt->bytes = 0;
 	/* Create counter with Verbs. */
-	ret = flow_verbs_counter_create(dev, cnt);
+	ret = flow_verbs_counter_create(dev, cnt_ext);
 	if (!ret) {
 		TAILQ_REMOVE(&pool->counters, cnt, next);
-		return MLX5_MAKE_CNT_IDX(pool_idx, (cnt - pool->counters_raw));
+		return MLX5_MAKE_CNT_IDX(pool_idx, i);
 	}
 	/* Some error occurred in Verbs library. */
 	rte_errno = -ret;
@@ -243,16 +247,18 @@
 {
 	struct mlx5_flow_counter_pool *pool;
 	struct mlx5_flow_counter *cnt;
+	struct mlx5_flow_counter_ext *cnt_ext;
 
 	cnt = flow_verbs_counter_get_by_idx(dev, counter,
 					    &pool);
-	if (--cnt->ref_cnt == 0) {
+	cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+	if (--cnt_ext->ref_cnt == 0) {
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
-		claim_zero(mlx5_glue->destroy_counter_set(cnt->cs));
-		cnt->cs = NULL;
+		claim_zero(mlx5_glue->destroy_counter_set(cnt_ext->cs));
+		cnt_ext->cs = NULL;
 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
-		claim_zero(mlx5_glue->destroy_counters(cnt->cs));
-		cnt->cs = NULL;
+		claim_zero(mlx5_glue->destroy_counters(cnt_ext->cs));
+		cnt_ext->cs = NULL;
 #endif
 		TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
 	}
@@ -272,13 +278,16 @@
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
 	if (flow->counter) {
+		struct mlx5_flow_counter_pool *pool;
 		struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
-						(dev, flow->counter, NULL);
+						(dev, flow->counter, &pool);
+		struct mlx5_flow_counter_ext *cnt_ext = MLX5_CNT_TO_CNT_EXT
+							(pool, cnt);
 		struct rte_flow_query_count *qc = data;
 		uint64_t counters[2] = {0, 0};
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
 		struct ibv_query_counter_set_attr query_cs_attr = {
-			.cs = cnt->cs,
+			.cs = cnt_ext->cs,
 			.query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
 		};
 		struct ibv_counter_set_data query_out = {
@@ -289,7 +298,7 @@
 						       &query_out);
 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
 		int err = mlx5_glue->query_counters
-			       (cnt->cs, counters,
+			       (cnt_ext->cs, counters,
 				RTE_DIM(counters),
 				IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
 #endif
@@ -1057,9 +1066,11 @@
 {
 	const struct rte_flow_action_count *count = action->conf;
 	struct rte_flow *flow = dev_flow->flow;
-	struct mlx5_flow_counter *cnt = NULL;
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+	struct mlx5_flow_counter_pool *pool;
+	struct mlx5_flow_counter *cnt = NULL;
+	struct mlx5_flow_counter_ext *cnt_ext;
 	unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
 	struct ibv_flow_spec_counter_action counter = {
 		.type = IBV_FLOW_SPEC_ACTION_COUNT,
@@ -1077,12 +1088,15 @@
 						  "cannot get counter"
 						  " context.");
 	}
-	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, NULL);
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
-	counter.counter_set_handle = cnt->cs->handle;
+	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
+	cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+	counter.counter_set_handle = cnt_ext->cs->handle;
 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
-	counter.counters = cnt->cs;
+	cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
+	cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
+	counter.counters = cnt_ext->cs;
 	flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
 #endif
 	return 0;
-- 
1.8.3.1



More information about the dev mailing list