[dpdk-dev] [PATCH 22/36] event/cnxk: add devargs to disable NPA

pbhagavatula at marvell.com pbhagavatula at marvell.com
Sat Mar 6 17:29:27 CET 2021


From: Pavan Nikhilesh <pbhagavatula at marvell.com>

If the chunks are allocated from NPA then TIM can automatically free
them when traversing the list of chunks.
Add devargs to disable NPA and use software mempool to manage chunks.

Example:
	--dev "0002:0e:00.0,tim_disable_npa=1"

Signed-off-by: Shijith Thotton <sthotton at marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
---
 doc/guides/eventdevs/cnxk.rst       | 10 ++++
 drivers/event/cnxk/cn10k_eventdev.c |  3 +-
 drivers/event/cnxk/cn9k_eventdev.c  |  3 +-
 drivers/event/cnxk/cnxk_eventdev.h  |  9 +++
 drivers/event/cnxk/cnxk_tim_evdev.c | 86 +++++++++++++++++++++--------
 drivers/event/cnxk/cnxk_tim_evdev.h |  5 ++
 6 files changed, 92 insertions(+), 24 deletions(-)

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 662df2971..9e14f99f2 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -93,6 +93,16 @@ Runtime Config Options
 
     -a 0002:0e:00.0,qos=[1-50-50-50]
 
+- ``TIM disable NPA``
+
+  By default chunks are allocated from NPA then TIM can automatically free
+  them when traversing the list of chunks. The ``tim_disable_npa`` devargs
+  parameter disables NPA and uses software mempool to manage chunks
+
+  For example::
+
+    -a 0002:0e:00.0,tim_disable_npa=1
+
 Debugging Options
 -----------------
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 30ca0d901..807e666d3 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -502,4 +502,5 @@ RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
 			      CNXK_SSO_GGRP_QOS "=<string>"
-			      CN10K_SSO_GW_MODE "=<int>");
+			      CN10K_SSO_GW_MODE "=<int>"
+			      CNXK_TIM_DISABLE_NPA "=1");
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 773152e55..3e27fce4a 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -571,4 +571,5 @@ RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
 			      CNXK_SSO_GGRP_QOS "=<string>"
-			      CN9K_SSO_SINGLE_WS "=1");
+			      CN9K_SSO_SINGLE_WS "=1"
+			      CNXK_TIM_DISABLE_NPA "=1");
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index e4051a64b..487c7f822 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -159,6 +159,15 @@ struct cnxk_sso_hws_cookie {
 	bool configured;
 } __rte_cache_aligned;
 
+static inline int
+parse_kvargs_flag(const char *key, const char *value, void *opaque)
+{
+	RTE_SET_USED(key);
+
+	*(uint8_t *)opaque = !!atoi(value);
+	return 0;
+}
+
 static inline int
 parse_kvargs_value(const char *key, const char *value, void *opaque)
 {
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index 986ad8493..44bcad94d 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -31,30 +31,43 @@ cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
 		cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
 	cache_sz = cache_sz != 0 ? cache_sz : 2;
 	tim_ring->nb_chunks += (cache_sz * rte_lcore_count());
-	tim_ring->chunk_pool = rte_mempool_create_empty(
-		pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz, cache_sz, 0,
-		rte_socket_id(), mp_flags);
-
-	if (tim_ring->chunk_pool == NULL) {
-		plt_err("Unable to create chunkpool.");
-		return -ENOMEM;
-	}
+	if (!tim_ring->disable_npa) {
+		tim_ring->chunk_pool = rte_mempool_create_empty(
+			pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz,
+			cache_sz, 0, rte_socket_id(), mp_flags);
+
+		if (tim_ring->chunk_pool == NULL) {
+			plt_err("Unable to create chunkpool.");
+			return -ENOMEM;
+		}
 
-	rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
-					rte_mbuf_platform_mempool_ops(), NULL);
-	if (rc < 0) {
-		plt_err("Unable to set chunkpool ops");
-		goto free;
-	}
+		rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
+						rte_mbuf_platform_mempool_ops(),
+						NULL);
+		if (rc < 0) {
+			plt_err("Unable to set chunkpool ops");
+			goto free;
+		}
 
-	rc = rte_mempool_populate_default(tim_ring->chunk_pool);
-	if (rc < 0) {
-		plt_err("Unable to set populate chunkpool.");
-		goto free;
+		rc = rte_mempool_populate_default(tim_ring->chunk_pool);
+		if (rc < 0) {
+			plt_err("Unable to set populate chunkpool.");
+			goto free;
+		}
+		tim_ring->aura = roc_npa_aura_handle_to_aura(
+			tim_ring->chunk_pool->pool_id);
+		tim_ring->ena_dfb = 0;
+	} else {
+		tim_ring->chunk_pool = rte_mempool_create(
+			pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz,
+			cache_sz, 0, NULL, NULL, NULL, NULL, rte_socket_id(),
+			mp_flags);
+		if (tim_ring->chunk_pool == NULL) {
+			plt_err("Unable to create chunkpool.");
+			return -ENOMEM;
+		}
+		tim_ring->ena_dfb = 1;
 	}
-	tim_ring->aura =
-		roc_npa_aura_handle_to_aura(tim_ring->chunk_pool->pool_id);
-	tim_ring->ena_dfb = 0;
 
 	return 0;
 
@@ -110,8 +123,17 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
 	tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
 	tim_ring->nb_timers = rcfg->nb_timers;
 	tim_ring->chunk_sz = dev->chunk_sz;
+	tim_ring->disable_npa = dev->disable_npa;
+
+	if (tim_ring->disable_npa) {
+		tim_ring->nb_chunks =
+			tim_ring->nb_timers /
+			CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
+		tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
+	} else {
+		tim_ring->nb_chunks = tim_ring->nb_timers;
+	}
 
-	tim_ring->nb_chunks = tim_ring->nb_timers;
 	tim_ring->nb_chunk_slots = CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
 	/* Create buckets. */
 	tim_ring->bkt =
@@ -199,6 +221,24 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
 	return 0;
 }
 
+static void
+cnxk_tim_parse_devargs(struct rte_devargs *devargs, struct cnxk_tim_evdev *dev)
+{
+	struct rte_kvargs *kvlist;
+
+	if (devargs == NULL)
+		return;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (kvlist == NULL)
+		return;
+
+	rte_kvargs_process(kvlist, CNXK_TIM_DISABLE_NPA, &parse_kvargs_flag,
+			   &dev->disable_npa);
+
+	rte_kvargs_free(kvlist);
+}
+
 void
 cnxk_tim_init(struct roc_sso *sso)
 {
@@ -217,6 +257,8 @@ cnxk_tim_init(struct roc_sso *sso)
 	}
 	dev = mz->addr;
 
+	cnxk_tim_parse_devargs(sso->pci_dev->device.devargs, dev);
+
 	dev->tim.roc_sso = sso;
 	rc = roc_tim_init(&dev->tim);
 	if (rc < 0) {
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index 62bb2f1eb..8c21ab1fe 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -33,11 +33,15 @@
 
 #define CN9K_TIM_MIN_TMO_TKS (256)
 
+#define CNXK_TIM_DISABLE_NPA "tim_disable_npa"
+
 struct cnxk_tim_evdev {
 	struct roc_tim tim;
 	struct rte_eventdev *event_dev;
 	uint16_t nb_rings;
 	uint32_t chunk_sz;
+	/* Dev args */
+	uint8_t disable_npa;
 };
 
 enum cnxk_tim_clk_src {
@@ -75,6 +79,7 @@ struct cnxk_tim_ring {
 	struct rte_mempool *chunk_pool;
 	uint64_t arm_cnt;
 	uint8_t prod_type_sp;
+	uint8_t disable_npa;
 	uint8_t ena_dfb;
 	uint16_t ring_id;
 	uint32_t aura;
-- 
2.17.1



More information about the dev mailing list