[dpdk-dev] [PATCH v2 02/11] mempool/cnxk: add device probe/remove

Ashwin Sekhar T K asekhar at marvell.com
Sat Apr 3 16:17:42 CEST 2021


Add the implementation for CNXk mempool device
probe and remove.

Signed-off-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
Signed-off-by: Ashwin Sekhar T K <asekhar at marvell.com>
---
 doc/guides/mempool/cnxk.rst         |  23 +++++
 drivers/mempool/cnxk/cnxk_mempool.c | 131 +++++++++++++++++++++++++++-
 2 files changed, 150 insertions(+), 4 deletions(-)

diff --git a/doc/guides/mempool/cnxk.rst b/doc/guides/mempool/cnxk.rst
index e72a77c361..907c19c841 100644
--- a/doc/guides/mempool/cnxk.rst
+++ b/doc/guides/mempool/cnxk.rst
@@ -30,6 +30,29 @@ Pre-Installation Configuration
 ------------------------------
 
 
+Runtime Config Options
+~~~~~~~~~~~~~~~~~~~~~~
+
+- ``Maximum number of mempools per application`` (default ``128``)
+
+  The maximum number of mempools per application needs to be configured on
+  HW during mempool driver initialization. HW can support up to 1M mempools,
+  Since each mempool costs set of HW resources, the ``max_pools`` ``devargs``
+  parameter is being introduced to configure the number of mempools required
+  for the application.
+  For example::
+
+    -a 0002:02:00.0,max_pools=512
+
+  With the above configuration, the driver will set up only 512 mempools for
+  the given application to save HW resources.
+
+.. note::
+
+   Since this configuration is per application, the end user needs to
+   provide ``max_pools`` parameter to the first PCIe device probed by the given
+   application.
+
 Debugging Options
 ~~~~~~~~~~~~~~~~~
 
diff --git a/drivers/mempool/cnxk/cnxk_mempool.c b/drivers/mempool/cnxk/cnxk_mempool.c
index 947078c052..703d15be42 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.c
+++ b/drivers/mempool/cnxk/cnxk_mempool.c
@@ -15,21 +15,142 @@
 
 #include "roc_api.h"
 
+#define CNXK_NPA_DEV_NAME	 RTE_STR(cnxk_npa_dev_)
+#define CNXK_NPA_DEV_NAME_LEN	 (sizeof(CNXK_NPA_DEV_NAME) + PCI_PRI_STR_SIZE)
+#define CNXK_NPA_MAX_POOLS_PARAM "max_pools"
+
+static inline uint32_t
+npa_aura_size_to_u32(uint8_t val)
+{
+	if (val == NPA_AURA_SZ_0)
+		return 128;
+	if (val >= NPA_AURA_SZ_MAX)
+		return BIT_ULL(20);
+
+	return 1 << (val + 6);
+}
+
 static int
-npa_remove(struct rte_pci_device *pci_dev)
+parse_max_pools(const char *key, const char *value, void *extra_args)
 {
-	RTE_SET_USED(pci_dev);
+	RTE_SET_USED(key);
+	uint32_t val;
 
+	val = atoi(value);
+	if (val < npa_aura_size_to_u32(NPA_AURA_SZ_128))
+		val = 128;
+	if (val > npa_aura_size_to_u32(NPA_AURA_SZ_1M))
+		val = BIT_ULL(20);
+
+	*(uint8_t *)extra_args = rte_log2_u32(val) - 6;
 	return 0;
 }
 
+static inline uint8_t
+parse_aura_size(struct rte_devargs *devargs)
+{
+	uint8_t aura_sz = NPA_AURA_SZ_128;
+	struct rte_kvargs *kvlist;
+
+	if (devargs == NULL)
+		goto exit;
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (kvlist == NULL)
+		goto exit;
+
+	rte_kvargs_process(kvlist, CNXK_NPA_MAX_POOLS_PARAM, &parse_max_pools,
+			   &aura_sz);
+	rte_kvargs_free(kvlist);
+exit:
+	return aura_sz;
+}
+
+static inline char *
+npa_dev_to_name(struct rte_pci_device *pci_dev, char *name)
+{
+	snprintf(name, CNXK_NPA_DEV_NAME_LEN, CNXK_NPA_DEV_NAME PCI_PRI_FMT,
+		 pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+		 pci_dev->addr.function);
+
+	return name;
+}
+
+static int
+npa_init(struct rte_pci_device *pci_dev)
+{
+	char name[CNXK_NPA_DEV_NAME_LEN];
+	const struct rte_memzone *mz;
+	struct roc_npa *dev;
+	int rc;
+
+	rc = roc_plt_init();
+	if (rc < 0)
+		goto error;
+
+	rc = -ENOMEM;
+	mz = rte_memzone_reserve_aligned(npa_dev_to_name(pci_dev, name),
+					 sizeof(*dev), SOCKET_ID_ANY, 0,
+					 RTE_CACHE_LINE_SIZE);
+	if (mz == NULL)
+		goto error;
+
+	dev = mz->addr;
+	dev->pci_dev = pci_dev;
+
+	roc_idev_npa_maxpools_set(parse_aura_size(pci_dev->device.devargs));
+	rc = roc_npa_dev_init(dev);
+	if (rc)
+		goto mz_free;
+
+	return 0;
+
+mz_free:
+	rte_memzone_free(mz);
+error:
+	plt_err("failed to initialize npa device rc=%d", rc);
+	return rc;
+}
+
+static int
+npa_fini(struct rte_pci_device *pci_dev)
+{
+	char name[CNXK_NPA_DEV_NAME_LEN];
+	const struct rte_memzone *mz;
+	int rc;
+
+	mz = rte_memzone_lookup(npa_dev_to_name(pci_dev, name));
+	if (mz == NULL)
+		return -EINVAL;
+
+	rc = roc_npa_dev_fini(mz->addr);
+	if (rc) {
+		if (rc != -EAGAIN)
+			plt_err("Failed to remove npa dev, rc=%d", rc);
+		return rc;
+	}
+	rte_memzone_free(mz);
+
+	return 0;
+}
+
+static int
+npa_remove(struct rte_pci_device *pci_dev)
+{
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	return npa_fini(pci_dev);
+}
+
 static int
 npa_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 {
 	RTE_SET_USED(pci_drv);
-	RTE_SET_USED(pci_dev);
 
-	return 0;
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	return npa_init(pci_dev);
 }
 
 static const struct rte_pci_id npa_pci_map[] = {
@@ -76,3 +197,5 @@ static struct rte_pci_driver npa_pci = {
 RTE_PMD_REGISTER_PCI(mempool_cnxk, npa_pci);
 RTE_PMD_REGISTER_PCI_TABLE(mempool_cnxk, npa_pci_map);
 RTE_PMD_REGISTER_KMOD_DEP(mempool_cnxk, "vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(mempool_cnxk,
+			      CNXK_NPA_MAX_POOLS_PARAM "=<128-1048576>");
-- 
2.31.0



More information about the dev mailing list