[PATCH v1 2/5] eal/memory: allocate all VA space in one go
Anatoly Burakov
anatoly.burakov at intel.com
Wed Mar 11 11:58:50 CET 2026
Instead of allocating VA space per memseg list in dynmem mode, allocate it
all in one go, and then assign memseg lists portions of that space. In a
similar way, for dynmem initialization in secondary processes, also attach
all VA space in one go. Legacy/32-bit paths are untouched.
Signed-off-by: Anatoly Burakov <anatoly.burakov at intel.com>
---
lib/eal/common/eal_common_dynmem.c | 56 ++++++++++++++++++++-----
lib/eal/common/eal_common_memory.c | 22 ++++++++++
lib/eal/common/eal_memcfg.h | 6 +++
lib/eal/common/eal_private.h | 13 ++++++
lib/eal/linux/eal_memory.c | 66 +++++++++++++++++++++++++++++-
5 files changed, 152 insertions(+), 11 deletions(-)
diff --git a/lib/eal/common/eal_common_dynmem.c b/lib/eal/common/eal_common_dynmem.c
index 86da2bd80b..bfc4de6698 100644
--- a/lib/eal/common/eal_common_dynmem.c
+++ b/lib/eal/common/eal_common_dynmem.c
@@ -24,11 +24,16 @@ eal_dynmem_memseg_lists_init(void)
struct memtype {
uint64_t page_sz;
int socket_id;
+ unsigned int n_segs;
+ size_t mem_sz;
+ size_t va_offset;
} *memtypes = NULL;
int i, hpi_idx, msl_idx, ret = -1; /* fail unless told to succeed */
struct rte_memseg_list *msl;
uint64_t max_mem, max_mem_per_type;
+ size_t mem_va_len, mem_va_page_sz;
unsigned int n_memtypes, cur_type;
+ void *mem_va_addr = NULL;
struct internal_config *internal_conf =
eal_get_internal_configuration();
@@ -108,18 +113,16 @@ eal_dynmem_memseg_lists_init(void)
max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
max_mem_per_type = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20,
max_mem / n_memtypes);
+ mem_va_len = 0;
+ mem_va_page_sz = 0;
- /* go through all mem types and create segment lists */
- msl_idx = 0;
+ /* calculate total VA space and offsets for all mem types */
for (cur_type = 0; cur_type < n_memtypes; cur_type++) {
- unsigned int n_segs;
unsigned int max_segs_per_type;
struct memtype *type = &memtypes[cur_type];
uint64_t pagesz;
- int socket_id;
pagesz = type->page_sz;
- socket_id = type->socket_id;
/*
* we need to create a segment list for this type. we must take
@@ -133,25 +136,58 @@ eal_dynmem_memseg_lists_init(void)
max_segs_per_type = max_mem_per_type / pagesz;
max_segs_per_type = RTE_MIN(max_segs_per_type,
(unsigned int)RTE_MAX_MEMSEG_PER_TYPE);
- n_segs = max_segs_per_type;
+ type->n_segs = max_segs_per_type;
+ type->mem_sz = (size_t)pagesz * type->n_segs;
+ mem_va_page_sz = RTE_MAX(mem_va_page_sz, (size_t)pagesz);
+ mem_va_len = RTE_ALIGN_CEIL(mem_va_len, pagesz);
+ type->va_offset = mem_va_len;
+ mem_va_len += type->mem_sz;
+ }
+
+ mem_va_addr = eal_get_virtual_area(NULL, &mem_va_len,
+ mem_va_page_sz, 0, 0);
+ if (mem_va_addr == NULL) {
+ EAL_LOG(ERR, "Cannot reserve VA space for memseg lists");
+ goto out;
+ }
+
+ /* go through all mem types and create segment lists */
+ msl_idx = 0;
+ for (cur_type = 0; cur_type < n_memtypes; cur_type++) {
+ struct memtype *type = &memtypes[cur_type];
+ uint64_t pagesz;
+ int socket_id;
+
+ pagesz = type->page_sz;
+ socket_id = type->socket_id;
EAL_LOG(DEBUG, "Creating segment list: "
"n_segs:%i socket_id:%i hugepage_sz:%" PRIu64,
- n_segs, socket_id, pagesz);
+ type->n_segs, socket_id, pagesz);
msl = &mcfg->memsegs[msl_idx++];
- if (eal_memseg_list_init(msl, pagesz, n_segs, socket_id, true))
+ if (eal_memseg_list_init(msl, pagesz, type->n_segs, socket_id, true))
goto out;
- if (eal_memseg_list_alloc(msl, 0)) {
- EAL_LOG(ERR, "Cannot allocate VA space for memseg list");
+ if (eal_memseg_list_assign(msl,
+ RTE_PTR_ADD(mem_va_addr, type->va_offset))) {
+ EAL_LOG(ERR, "Cannot assign VA space for memseg list");
goto out;
}
}
/* we're successful */
ret = 0;
out:
+ if (ret != 0) {
+ if (mem_va_addr != NULL)
+ eal_mem_free(mem_va_addr, mem_va_len);
+ } else {
+ /* store the VA space data in shared config */
+ mcfg->mem_va_addr = (uintptr_t)mem_va_addr;
+ mcfg->mem_va_len = mem_va_len;
+ mcfg->mem_va_page_sz = mem_va_page_sz;
+ }
free(memtypes);
return ret;
}
diff --git a/lib/eal/common/eal_common_memory.c b/lib/eal/common/eal_common_memory.c
index e8e41bb741..b91e765cbf 100644
--- a/lib/eal/common/eal_common_memory.c
+++ b/lib/eal/common/eal_common_memory.c
@@ -271,6 +271,28 @@ eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
return 0;
}
+int
+eal_memseg_list_assign(struct rte_memseg_list *msl, void *addr)
+{
+ size_t page_sz, mem_sz;
+
+ page_sz = msl->page_sz;
+ mem_sz = page_sz * msl->memseg_arr.len;
+
+ if (addr == NULL || addr != RTE_PTR_ALIGN(addr, page_sz)) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ msl->base_va = addr;
+ msl->len = mem_sz;
+
+ EAL_LOG(DEBUG, "VA assigned for memseg list at %p, size %zx",
+ addr, mem_sz);
+
+ return 0;
+}
+
void
eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs)
{
diff --git a/lib/eal/common/eal_memcfg.h b/lib/eal/common/eal_memcfg.h
index 60e2089797..2b3b3b62ba 100644
--- a/lib/eal/common/eal_memcfg.h
+++ b/lib/eal/common/eal_memcfg.h
@@ -49,6 +49,12 @@ struct rte_mem_config {
struct rte_memseg_list memsegs[RTE_MAX_MEMSEG_LISTS];
/**< List of dynamic arrays holding memsegs */
+ uintptr_t mem_va_addr;
+ /**< Base VA address reserved for dynamic memory memseg lists. */
+ size_t mem_va_len;
+ /**< Length of VA range reserved for dynamic memory memseg lists. */
+ size_t mem_va_page_sz;
+ /**< Page size alignment used for dynamic memory VA reservation. */
struct rte_tailq_head tailq_head[RTE_MAX_TAILQ];
/**< Tailqs for objects */
diff --git a/lib/eal/common/eal_private.h b/lib/eal/common/eal_private.h
index 40408d61b4..b62b71369a 100644
--- a/lib/eal/common/eal_private.h
+++ b/lib/eal/common/eal_private.h
@@ -322,6 +322,19 @@ eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
int
eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
+/**
+ * Assign a pre-reserved VA range to a memory segment list.
+ *
+ * @param msl
+ * Initialized memory segment list with page size defined.
+ * @param addr
+ * Starting address of list VA range.
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+eal_memseg_list_assign(struct rte_memseg_list *msl, void *addr);
+
/**
* Populate MSL, each segment is one page long.
*
diff --git a/lib/eal/linux/eal_memory.c b/lib/eal/linux/eal_memory.c
index 568d5da124..3b2afee852 100644
--- a/lib/eal/linux/eal_memory.c
+++ b/lib/eal/linux/eal_memory.c
@@ -1883,7 +1883,59 @@ memseg_primary_init(void)
}
static int
-memseg_secondary_init(void)
+memseg_secondary_init_dynmem(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int msl_idx = 0;
+ struct rte_memseg_list *msl;
+ void *mem_va_addr;
+ size_t mem_va_len;
+
+ if (mcfg->mem_va_addr == 0 || mcfg->mem_va_len == 0 ||
+ mcfg->mem_va_page_sz == 0) {
+ EAL_LOG(ERR, "Missing shared dynamic memory VA range from primary process");
+ return -1;
+ }
+
+ mem_va_addr = (void *)(uintptr_t)mcfg->mem_va_addr;
+ mem_va_len = mcfg->mem_va_len;
+
+ if (eal_get_virtual_area(mem_va_addr, &mem_va_len,
+ mcfg->mem_va_page_sz, 0, 0) == NULL) {
+ EAL_LOG(ERR, "Cannot reserve VA space for hugepage memory");
+ return -1;
+ }
+
+ for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+
+ msl = &mcfg->memsegs[msl_idx];
+
+ /* skip empty and external memseg lists */
+ if (msl->memseg_arr.len == 0 || msl->external)
+ continue;
+
+ if (rte_fbarray_attach(&msl->memseg_arr)) {
+ EAL_LOG(ERR, "Cannot attach to primary process memseg lists");
+ eal_mem_free(mem_va_addr, mem_va_len);
+ return -1;
+ }
+
+ if (eal_memseg_list_assign(msl, msl->base_va)) {
+ EAL_LOG(ERR, "Cannot assign VA space for hugepage memory");
+ eal_mem_free(mem_va_addr, mem_va_len);
+ return -1;
+ }
+
+ EAL_LOG(DEBUG, "Attaching segment list: "
+ "n_segs:%u socket_id:%d hugepage_sz:%" PRIu64,
+ msl->memseg_arr.len, msl->socket_id, msl->page_sz);
+ }
+
+ return 0;
+}
+
+static int
+memseg_secondary_init_legacy(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
int msl_idx = 0;
@@ -1912,6 +1964,18 @@ memseg_secondary_init(void)
return 0;
}
+static int
+memseg_secondary_init(void)
+{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ if (!internal_conf->legacy_mem)
+ return memseg_secondary_init_dynmem();
+
+ return memseg_secondary_init_legacy();
+}
+
int
rte_eal_memseg_init(void)
{
--
2.47.3
More information about the dev
mailing list