[dpdk-dev] [PATCH v7 1/2] mem: balanced allocation of hugepages
Ilya Maximets
i.maximets at samsung.com
Wed Jun 21 12:08:30 CEST 2017
Currently EAL allocates hugepages one by one not paying attention
from which NUMA node allocation was done.
Such behaviour leads to allocation failure if number of available
hugepages for application limited by cgroups or hugetlbfs and
memory requested not only from the first socket.
Example:
# 90 x 1GB hugepages availavle in a system
cgcreate -g hugetlb:/test
# Limit to 32GB of hugepages
cgset -r hugetlb.1GB.limit_in_bytes=34359738368 test
# Request 4GB from each of 2 sockets
cgexec -g hugetlb:test testpmd --socket-mem=4096,4096 ...
EAL: SIGBUS: Cannot mmap more hugepages of size 1024 MB
EAL: 32 not 90 hugepages of size 1024 MB allocated
EAL: Not enough memory available on socket 1!
Requested: 4096MB, available: 0MB
PANIC in rte_eal_init():
Cannot init memory
This happens beacause all allocated pages are
on socket 0.
Fix this issue by setting mempolicy MPOL_PREFERRED for each hugepage
to one of requested nodes using following schema:
1) Allocate essential hugepages:
1.1) Allocate as many hugepages from numa N to
only fit requested memory for this numa.
1.2) repeat 1.1 for all numa nodes.
2) Try to map all remaining free hugepages in a round-robin
fashion.
3) Sort pages and choose the most suitable.
In this case all essential memory will be allocated and all remaining
pages will be fairly distributed between all requested nodes.
New config option RTE_EAL_NUMA_AWARE_HUGEPAGES introduced and
enabled by default for linuxapp on x86, ppc and thunderx.
Enabling of this option adds libnuma as a dependency for EAL.
Fixes: 77988fc08dc5 ("mem: fix allocating all free hugepages")
Signed-off-by: Ilya Maximets <i.maximets at samsung.com>
---
config/common_base | 1 +
config/common_linuxapp | 2 +
config/defconfig_arm-armv7a-linuxapp-gcc | 3 +
config/defconfig_arm64-armv8a-linuxapp-gcc | 3 +
config/defconfig_arm64-thunderx-linuxapp-gcc | 3 +
lib/librte_eal/linuxapp/eal/Makefile | 3 +
lib/librte_eal/linuxapp/eal/eal_memory.c | 105 ++++++++++++++++++++++++++-
mk/rte.app.mk | 3 +
8 files changed, 119 insertions(+), 4 deletions(-)
diff --git a/config/common_base b/config/common_base
index f6aafd1..660588a 100644
--- a/config/common_base
+++ b/config/common_base
@@ -103,6 +103,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
diff --git a/config/common_linuxapp b/config/common_linuxapp
index b3cf41b..050526f 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -35,6 +35,8 @@
CONFIG_RTE_EXEC_ENV="linuxapp"
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index 19607eb..e06b1d4 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_ARCH_STRICT_ALIGN=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
+# NUMA is not supported on ARM
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
# ARM doesn't have support for vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 9f32766..2c67cdc 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -47,6 +47,9 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
# to address minimum DMA alignment across all arm64 implementations.
CONFIG_RTE_CACHE_LINE_SIZE=128
+# Most ARMv8 systems doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_LIBRTE_FM10K_PMD=n
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index f64da4c..3e79fa8 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -37,6 +37,9 @@ CONFIG_RTE_CACHE_LINE_SIZE=128
CONFIG_RTE_MAX_NUMA_NODES=2
CONFIG_RTE_MAX_LCORE=96
+# ThunderX supports NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
+
#
# Compile PMD for octeontx sso event device
#
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 640afd0..8651e27 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -50,6 +50,9 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
+LDLIBS += -lnuma
+endif
# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index e17c9cb..ceadca7 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -54,6 +54,9 @@
#include <sys/time.h>
#include <signal.h>
#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numaif.h>
+#endif
#include <rte_log.h>
#include <rte_memory.h>
@@ -348,6 +351,21 @@ static int huge_wrap_sigsetjmp(void)
return sigsetjmp(huge_jmpenv, 1);
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#ifndef ULONG_SIZE
+#define ULONG_SIZE sizeof(unsigned long)
+#endif
+#ifndef ULONG_BITS
+#define ULONG_BITS (ULONG_SIZE * CHAR_BIT)
+#endif
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, ULONG_SIZE)
+#endif
+#endif
+
/*
* Mmap all hugepages of hugepage table: it first open a file in
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
@@ -356,18 +374,82 @@ static int huge_wrap_sigsetjmp(void)
* map continguous physical blocks in contiguous virtual blocks.
*/
static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
- struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+ uint64_t *essential_memory __rte_unused, int orig)
{
int fd;
unsigned i;
void *virtaddr;
void *vma_addr = NULL;
size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ unsigned long nodemask[BITS_TO_LONGS(RTE_MAX_NUMA_NODES)] = {0UL};
+ unsigned long maxnode = 0;
+ int node_id = -1;
+ bool numa_available = true;
+
+ /* Check if kernel supports NUMA. */
+ if (get_mempolicy(NULL, NULL, 0, 0, 0) < 0 && errno == ENOSYS) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ numa_available = false;
+ }
+
+ if (orig && numa_available) {
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ if (internal_config.socket_mem[i])
+ maxnode = i + 1;
+ }
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode) {
+ unsigned int j;
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+ if (essential_memory[j])
+ break;
+
+ if (j == RTE_MAX_NUMA_NODES) {
+ node_id = (node_id + 1) % RTE_MAX_NUMA_NODES;
+ while (!internal_config.socket_mem[node_id]) {
+ node_id++;
+ node_id %= RTE_MAX_NUMA_NODES;
+ }
+ } else {
+ node_id = j;
+ if (essential_memory[j] < hugepage_sz)
+ essential_memory[j] = 0;
+ else
+ essential_memory[j] -= hugepage_sz;
+ }
+
+ nodemask[node_id / ULONG_BITS] =
+ 1UL << (node_id % ULONG_BITS);
+
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ node_id);
+ /*
+ * Due to old linux kernel bug (feature?) we have to
+ * increase maxnode by 1. It will be unconditionally
+ * decreased back to normal value inside the syscall
+ * handler.
+ */
+ if (set_mempolicy(MPOL_PREFERRED,
+ nodemask, maxnode + 1) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to set policy MPOL_PREFERRED: "
+ "%s\n", strerror(errno));
+ return i;
+ }
+
+ nodemask[node_id / ULONG_BITS] = 0UL;
+ }
+#endif
+
if (orig) {
hugepg_tbl[i].file_id = i;
hugepg_tbl[i].size = hugepage_sz;
@@ -478,6 +560,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
vma_len -= hugepage_sz;
}
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (maxnode && set_mempolicy(MPOL_DEFAULT, NULL, 0) < 0)
+ RTE_LOG(ERR, EAL, "Failed to set mempolicy MPOL_DEFAULT\n");
+#endif
return i;
}
@@ -562,6 +648,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
if (hugepg_tbl[i].orig_va == va) {
hugepg_tbl[i].socket_id = socket_id;
hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ RTE_LOG(DEBUG, EAL,
+ "Hugepage %s is on socket %d\n",
+ hugepg_tbl[i].filepath, socket_id);
+#endif
}
}
}
@@ -1000,6 +1091,11 @@ rte_eal_hugepage_init(void)
huge_register_sigbus();
+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ memory[i] = internal_config.socket_mem[i];
+
+
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1017,7 +1113,8 @@ rte_eal_hugepage_init(void)
/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+ memory, 1);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1060,7 +1157,7 @@ rte_eal_hugepage_init(void)
sizeof(struct hugepage_file), cmp_physaddr);
/* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
hpi->num_pages[0]) {
RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
(unsigned)(hpi->hugepage_sz / 0x100000));
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index bcaf1b3..4fe22d1 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -186,6 +186,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# The static libraries do not know their dependencies.
# So linking with static library requires explicit dependencies.
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm
--
2.7.4
More information about the dev
mailing list