[PATCH v14 2/6] mempool: add functions to get extra mempool info
Paul Szczepanek
paul.szczepanek at arm.com
Fri Jun 7 17:09:56 CEST 2024
Add two functions:
- rte_mempool_get_mem_range - get virtual memory range
of the objects in the mempool,
- rte_mempool_get_obj_alignment - get alignment of
objects in the mempool.
Add two tests that test these new functions.
Signed-off-by: Paul Szczepanek <paul.szczepanek at arm.com>
Reviewed-by: Jack Bond-Preston <jack.bond-preston at foss.arm.com>
Reviewed-by: Nathan Brown <nathan.brown at arm.com>
Reviewed-by: Morten Brørup <mb at smartsharesystems.com>
Acked-by: Morten Brørup <mb at smartsharesystems.com>
---
app/test/test_mempool.c | 70 +++++++++++++++++++++++++++++++++++++++
lib/mempool/rte_mempool.c | 45 +++++++++++++++++++++++++
lib/mempool/rte_mempool.h | 47 ++++++++++++++++++++++++++
lib/mempool/version.map | 3 ++
4 files changed, 165 insertions(+)
diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index ad7ebd6363..3f7ba5872d 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -843,16 +843,19 @@ test_mempool(void)
int ret = -1;
uint32_t nb_objs = 0;
uint32_t nb_mem_chunks = 0;
+ size_t alignment = 0;
struct rte_mempool *mp_cache = NULL;
struct rte_mempool *mp_nocache = NULL;
struct rte_mempool *mp_stack_anon = NULL;
struct rte_mempool *mp_stack_mempool_iter = NULL;
struct rte_mempool *mp_stack = NULL;
struct rte_mempool *default_pool = NULL;
+ struct rte_mempool *mp_alignment = NULL;
struct mp_data cb_arg = {
.ret = -1
};
const char *default_pool_ops = rte_mbuf_best_mempool_ops();
+ struct rte_mempool_mem_range_info mem_range = { 0 };
/* create a mempool (without cache) */
mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
@@ -967,6 +970,72 @@ test_mempool(void)
}
rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
+ if (rte_mempool_get_mem_range(default_pool, &mem_range)) {
+ printf("cannot get mem range from default mempool\n");
+ GOTO_ERR(ret, err);
+ }
+
+ if (rte_mempool_get_mem_range(NULL, NULL) != -EINVAL) {
+ printf("rte_mempool_get_mem_range failed to return -EINVAL "
+ "when passed invalid arguments\n");
+ GOTO_ERR(ret, err);
+ }
+
+ if (mem_range.start == NULL || mem_range.length <
+ (MEMPOOL_SIZE * MEMPOOL_ELT_SIZE)) {
+ printf("mem range of default mempool is invalid\n");
+ GOTO_ERR(ret, err);
+ }
+
+ /* by default mempool objects are aligned by RTE_MEMPOOL_ALIGN */
+ alignment = rte_mempool_get_obj_alignment(default_pool);
+ if (alignment != RTE_MEMPOOL_ALIGN) {
+ printf("rte_mempool_get_obj_alignment returned wrong value, "
+ "expected %zu, returned %zu\n",
+ (size_t)RTE_MEMPOOL_ALIGN, alignment);
+ GOTO_ERR(ret, err);
+ }
+
+ /* create a mempool with a RTE_MEMPOOL_F_NO_CACHE_ALIGN flag */
+ mp_alignment = rte_mempool_create("test_alignment",
+ 1, 8, /* the small size guarantees single memory chunk */
+ 0, 0, NULL, NULL, my_obj_init, NULL,
+ SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_CACHE_ALIGN);
+
+ if (mp_alignment == NULL) {
+ printf("cannot allocate mempool with "
+ "RTE_MEMPOOL_F_NO_CACHE_ALIGN flag\n");
+ GOTO_ERR(ret, err);
+ }
+
+ /* mempool was created with RTE_MEMPOOL_F_NO_CACHE_ALIGN
+ * and minimum alignment is expected which is sizeof(uint64_t)
+ */
+ alignment = rte_mempool_get_obj_alignment(mp_alignment);
+ if (alignment != sizeof(uint64_t)) {
+ printf("rte_mempool_get_obj_alignment returned wrong value, "
+ "expected %zu, returned %zu\n",
+ (size_t)sizeof(uint64_t), alignment);
+ GOTO_ERR(ret, err);
+ }
+
+ alignment = rte_mempool_get_obj_alignment(NULL);
+ if (alignment != 0) {
+ printf("rte_mempool_get_obj_alignment failed to return 0 for "
+ " an invalid mempool\n");
+ GOTO_ERR(ret, err);
+ }
+
+ if (rte_mempool_get_mem_range(mp_alignment, &mem_range)) {
+ printf("cannot get mem range from mempool\n");
+ GOTO_ERR(ret, err);
+ }
+
+ if (!mem_range.is_contiguous) {
+ printf("mempool not contiguous\n");
+ GOTO_ERR(ret, err);
+ }
+
/* retrieve the mempool from its name */
if (rte_mempool_lookup("test_nocache") != mp_nocache) {
printf("Cannot lookup mempool from its name\n");
@@ -1039,6 +1108,7 @@ test_mempool(void)
rte_mempool_free(mp_stack_mempool_iter);
rte_mempool_free(mp_stack);
rte_mempool_free(default_pool);
+ rte_mempool_free(mp_alignment);
return ret;
}
diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c
index 12390a2c81..54d2804151 100644
--- a/lib/mempool/rte_mempool.c
+++ b/lib/mempool/rte_mempool.c
@@ -1386,6 +1386,51 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),
rte_mcfg_mempool_read_unlock();
}
+int rte_mempool_get_mem_range(const struct rte_mempool *mp,
+ struct rte_mempool_mem_range_info *mem_range)
+{
+ if (mp == NULL || mem_range == NULL)
+ return -EINVAL;
+
+ void *address_low = (void *)UINTPTR_MAX;
+ void *address_high = 0;
+ size_t address_diff = 0;
+ size_t total_size = 0;
+ struct rte_mempool_memhdr *hdr;
+
+ /* go through memory chunks and find the lowest and highest addresses */
+ STAILQ_FOREACH(hdr, &mp->mem_list, next) {
+ if (address_low > hdr->addr)
+ address_low = hdr->addr;
+ if (address_high < RTE_PTR_ADD(hdr->addr, hdr->len))
+ address_high = RTE_PTR_ADD(hdr->addr, hdr->len);
+ total_size += hdr->len;
+ }
+
+ /* check if mempool was not populated yet (no memory chunks) */
+ if (address_low == (void *)UINTPTR_MAX)
+ return -EINVAL;
+
+ address_diff = (size_t)RTE_PTR_DIFF(address_high, address_low);
+
+ mem_range->start = address_low;
+ mem_range->length = address_diff;
+ mem_range->is_contiguous = (total_size == address_diff) ? true : false;
+
+ return 0;
+}
+
+size_t rte_mempool_get_obj_alignment(const struct rte_mempool *mp)
+{
+ if (mp == NULL)
+ return 0;
+
+ if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
+ return sizeof(uint64_t);
+ else
+ return RTE_MEMPOOL_ALIGN;
+}
+
struct mempool_callback_data {
TAILQ_ENTRY(mempool_callback_data) callbacks;
rte_mempool_event_callback *func;
diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h
index 23fd5c8465..990491a7a7 100644
--- a/lib/mempool/rte_mempool.h
+++ b/lib/mempool/rte_mempool.h
@@ -1917,6 +1917,53 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg),
void *arg);
+/**
+ * A structure used to retrieve information about the memory range
+ * of the mempool.
+ */
+struct rte_mempool_mem_range_info {
+ /** Start of the memory range used by mempool objects */
+ void *start;
+ /** Length of the memory range used by mempool objects */
+ size_t length;
+ /** Are all memory addresses used by mempool objects contiguous */
+ bool is_contiguous;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get information about the memory range used by the mempool.
+ *
+ * @param[in] mp
+ * Pointer to an initialized mempool.
+ * @param[out] mem_range
+ * Pointer to struct which is used to return lowest address,
+ * length of the memory range containing all the addresses,
+ * and whether these addresses are contiguous.
+ * @return
+ * 0 on success, -EINVAL if mempool is not valid or mem_range is NULL.
+ **/
+__rte_experimental
+int rte_mempool_get_mem_range(const struct rte_mempool *mp,
+ struct rte_mempool_mem_range_info *mem_range);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Return object alignment.
+ *
+ * @param[in] mp
+ * Pointer to a mempool.
+ * @return
+ * Object alignment if mp is valid. 0 if mp is NULL.
+ *
+ **/
+__rte_experimental
+size_t rte_mempool_get_obj_alignment(const struct rte_mempool *mp);
+
/**
* @internal Get page size used for mempool object allocation.
* This function is internal to mempool library and mempool drivers.
diff --git a/lib/mempool/version.map b/lib/mempool/version.map
index 473277400c..02df634b2a 100644
--- a/lib/mempool/version.map
+++ b/lib/mempool/version.map
@@ -50,6 +50,9 @@ EXPERIMENTAL {
__rte_mempool_trace_get_contig_blocks;
__rte_mempool_trace_default_cache;
__rte_mempool_trace_cache_flush;
+ # added in 24.07
+ rte_mempool_get_mem_range;
+ rte_mempool_get_obj_alignment;
};
INTERNAL {
--
2.25.1
More information about the dev
mailing list