[dpdk-dev] [PATCH v1 09/11] mempool/octeontx: implement pool update range

Santosh Shukla santosh.shukla at caviumnetworks.com
Thu Aug 24 15:29:01 CEST 2017


Add support for update range ops in mempool driver.

Allow more than one HW pool when using OcteonTx mempool driver:
By storing each pool information to the list and find appropriate
list element by matching the rte_mempool pointers.

Signed-off-by: Santosh Shukla <santosh.shukla at caviumnetworks.com>
Signed-off-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
---
 drivers/mempool/octeontx/rte_mempool_octeontx.c | 73 ++++++++++++++++++++++++-
 1 file changed, 71 insertions(+), 2 deletions(-)

diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index cc1b101f4..7c16259ea 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -36,17 +36,49 @@
 
 #include "octeontx_fpavf.h"
 
+/*
+ * Per-pool descriptor.
+ * Links mempool with the corresponding memzone,
+ * that provides memory under the pool's elements.
+ */
+struct octeontx_pool_info {
+	const struct rte_mempool *mp;
+	uintptr_t mz_addr;
+
+	SLIST_ENTRY(octeontx_pool_info) link;
+};
+
+SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
+
+/* List of the allocated pools */
+static struct octeontx_pool_list octeontx_pool_head =
+				SLIST_HEAD_INITIALIZER(octeontx_pool_head);
+/* Spinlock to protect pool list */
+static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
+
 static int
 octeontx_fpavf_alloc(struct rte_mempool *mp)
 {
 	uintptr_t pool;
+	struct octeontx_pool_info *pool_info;
 	uint32_t memseg_count = mp->size;
 	uint32_t object_size;
 	uintptr_t va_start;
 	int rc = 0;
 
+	rte_spinlock_lock(&pool_list_lock);
+	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
+		if (pool_info->mp == mp)
+			break;
+	}
+	if (pool_info == NULL) {
+		rte_spinlock_unlock(&pool_list_lock);
+		return -ENXIO;
+	}
+
 	/* virtual hugepage mapped addr */
-	va_start = ~(uint64_t)0;
+	va_start = pool_info->mz_addr;
+	rte_spinlock_unlock(&pool_list_lock);
 
 	object_size = mp->elt_size + mp->header_size + mp->trailer_size;
 
@@ -77,10 +109,27 @@ octeontx_fpavf_alloc(struct rte_mempool *mp)
 static void
 octeontx_fpavf_free(struct rte_mempool *mp)
 {
+	struct octeontx_pool_info *pool_info;
 	uintptr_t pool;
 
 	pool = (uintptr_t)mp->pool_id;
 
+	rte_spinlock_lock(&pool_list_lock);
+	SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
+		if (pool_info->mp == mp)
+			break;
+	}
+
+	if (pool_info == NULL) {
+		rte_spinlock_unlock(&pool_list_lock);
+		rte_panic("%s: trying to free pool with no valid metadata",
+		    __func__);
+	}
+
+	SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
+	rte_spinlock_unlock(&pool_list_lock);
+
+	rte_free(pool_info);
 	octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
 }
 
@@ -163,6 +212,26 @@ octeontx_fpavf_get_capabilities(struct rte_mempool *mp)
 	return 0;
 }
 
+static void
+octeontx_fpavf_update_range(const struct rte_mempool *mp,
+			    char *vaddr, phys_addr_t paddr, size_t len)
+{
+	struct octeontx_pool_info *pool_info;
+
+	RTE_SET_USED(paddr);
+	RTE_SET_USED(len);
+
+	pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
+	if (pool_info == NULL)
+		return;
+
+	pool_info->mp = mp;
+	pool_info->mz_addr = (uintptr_t)vaddr;
+	rte_spinlock_lock(&pool_list_lock);
+	SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
+	rte_spinlock_unlock(&pool_list_lock);
+}
+
 static struct rte_mempool_ops octeontx_fpavf_ops = {
 	.name = "octeontx_fpavf",
 	.alloc = octeontx_fpavf_alloc,
@@ -171,7 +240,7 @@ static struct rte_mempool_ops octeontx_fpavf_ops = {
 	.dequeue = octeontx_fpavf_dequeue,
 	.get_count = octeontx_fpavf_get_count,
 	.get_capabilities = octeontx_fpavf_get_capabilities,
-	.update_range = NULL,
+	.update_range = octeontx_fpavf_update_range,
 };
 
 MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
-- 
2.11.0



More information about the dev mailing list