[dpdk-dev] [PATCH 14/41] eal: add support for mapping hugepages at runtime

Anatoly Burakov anatoly.burakov at intel.com
Sat Mar 3 14:46:02 CET 2018


Nothing uses this code yet. The bulk of it is copied from old
memory allocation code (linuxapp eal_memory.c). We provide an
EAL-internal API to allocate either one page or multiple pages,
guaranteeing that we'll get contiguous VA for all of the pages
that we requested.

For single-file segments, we will use fallocate() to grow and
shrink memory segments, however fallocate() is not supported
on all kernel versions, so we will fall back to using
ftruncate() to grow the file, and disable shrinking as there's
little we can do there. This will enable vhost use cases where
having single file segments is of great value even without
support for hot-unplugging memory.

Not supported on FreeBSD.

Locking is done via fcntl() because that way, when it comes to
taking out write locks or unlocking on deallocation, we don't
have to keep original fd's around. Plus, using fcntl() gives us
ability to lock parts of a file, which is useful for single-file
segments.

Signed-off-by: Anatoly Burakov <anatoly.burakov at intel.com>
---
 lib/librte_eal/bsdapp/eal/Makefile         |   1 +
 lib/librte_eal/bsdapp/eal/eal_memalloc.c   |  26 ++
 lib/librte_eal/bsdapp/eal/meson.build      |   1 +
 lib/librte_eal/common/eal_memalloc.h       |  19 +
 lib/librte_eal/linuxapp/eal/Makefile       |   2 +
 lib/librte_eal/linuxapp/eal/eal_memalloc.c | 609 +++++++++++++++++++++++++++++
 lib/librte_eal/linuxapp/eal/meson.build    |   1 +
 7 files changed, 659 insertions(+)
 create mode 100644 lib/librte_eal/bsdapp/eal/eal_memalloc.c
 create mode 100644 lib/librte_eal/common/eal_memalloc.h
 create mode 100644 lib/librte_eal/linuxapp/eal/eal_memalloc.c

diff --git a/lib/librte_eal/bsdapp/eal/Makefile b/lib/librte_eal/bsdapp/eal/Makefile
index 1b43d77..19f9322 100644
--- a/lib/librte_eal/bsdapp/eal/Makefile
+++ b/lib/librte_eal/bsdapp/eal/Makefile
@@ -29,6 +29,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_memory.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_hugepage_info.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_thread.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_debug.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_memalloc.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_lcore.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_timer.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_interrupts.c
diff --git a/lib/librte_eal/bsdapp/eal/eal_memalloc.c b/lib/librte_eal/bsdapp/eal/eal_memalloc.c
new file mode 100644
index 0000000..be8340b
--- /dev/null
+++ b/lib/librte_eal/bsdapp/eal/eal_memalloc.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+
+#include "eal_memalloc.h"
+
+int
+eal_memalloc_alloc_page_bulk(struct rte_memseg **ms __rte_unused,
+		int __rte_unused n, uint64_t __rte_unused size,
+		int __rte_unused socket, bool __rte_unused exact)
+{
+	RTE_LOG(ERR, EAL, "Memory hotplug not supported on FreeBSD\n");
+	return -1;
+}
+
+struct rte_memseg *
+eal_memalloc_alloc_page(uint64_t __rte_unused size, int __rte_unused socket)
+{
+	RTE_LOG(ERR, EAL, "Memory hotplug not supported on FreeBSD\n");
+	return NULL;
+}
diff --git a/lib/librte_eal/bsdapp/eal/meson.build b/lib/librte_eal/bsdapp/eal/meson.build
index e83fc91..4b40223 100644
--- a/lib/librte_eal/bsdapp/eal/meson.build
+++ b/lib/librte_eal/bsdapp/eal/meson.build
@@ -8,6 +8,7 @@ env_sources = files('eal_alarm.c',
 		'eal_hugepage_info.c',
 		'eal_interrupts.c',
 		'eal_lcore.c',
+		'eal_memalloc.c',
 		'eal_thread.c',
 		'eal_timer.c',
 		'eal.c',
diff --git a/lib/librte_eal/common/eal_memalloc.h b/lib/librte_eal/common/eal_memalloc.h
new file mode 100644
index 0000000..c1076cf
--- /dev/null
+++ b/lib/librte_eal/common/eal_memalloc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef EAL_MEMALLOC_H
+#define EAL_MEMALLOC_H
+
+#include <stdbool.h>
+
+#include <rte_memory.h>
+
+struct rte_memseg *
+eal_memalloc_alloc_page(uint64_t size, int socket);
+
+int
+eal_memalloc_alloc_page_bulk(struct rte_memseg **ms, int n, uint64_t size,
+		int socket, bool exact);
+
+#endif // EAL_MEMALLOC_H
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index c407a43..af6b9be 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -36,6 +36,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_thread.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_log.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio_mp_sync.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_memalloc.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_debug.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_lcore.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_timer.c
@@ -82,6 +83,7 @@ CFLAGS_eal_interrupts.o := -D_GNU_SOURCE
 CFLAGS_eal_vfio_mp_sync.o := -D_GNU_SOURCE
 CFLAGS_eal_timer.o := -D_GNU_SOURCE
 CFLAGS_eal_lcore.o := -D_GNU_SOURCE
+CFLAGS_eal_memalloc.o := -D_GNU_SOURCE
 CFLAGS_eal_thread.o := -D_GNU_SOURCE
 CFLAGS_eal_log.o := -D_GNU_SOURCE
 CFLAGS_eal_common_log.o := -D_GNU_SOURCE
diff --git a/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
new file mode 100644
index 0000000..1ba1201
--- /dev/null
+++ b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
@@ -0,0 +1,609 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#define _FILE_OFFSET_BITS 64
+#include <errno.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/queue.h>
+#include <sys/file.h>
+#include <unistd.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <signal.h>
+#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_eal_memconfig.h>
+#include <rte_eal.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+
+#include "eal_filesystem.h"
+#include "eal_internal_cfg.h"
+#include "eal_memalloc.h"
+
+/*
+ * not all kernel version support fallocate on hugetlbfs, so fall back to
+ * ftruncate and disallow deallocation if fallocate is not supported.
+ */
+static int fallocate_supported = -1; /* unknown */
+
+/*
+ * If each page is in a separate file, we can close fd's since we need each fd
+ * only once. However, in single file segments mode, we can get away with using
+ * a single fd for entire segments, but we need to store them somewhere. Each
+ * fd is different within each process, so we'll store them in a local tailq.
+ */
+struct msl_entry {
+	TAILQ_ENTRY(msl_entry) next;
+	unsigned int msl_idx;
+	int fd;
+};
+
+/** Double linked list of memseg list fd's. */
+TAILQ_HEAD(msl_entry_list, msl_entry);
+
+static struct msl_entry_list msl_entry_list =
+		TAILQ_HEAD_INITIALIZER(msl_entry_list);
+static rte_spinlock_t tailq_lock = RTE_SPINLOCK_INITIALIZER;
+
+static sigjmp_buf huge_jmpenv;
+
+static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
+{
+	siglongjmp(huge_jmpenv, 1);
+}
+
+/* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
+ * non-static local variable in the stack frame calling sigsetjmp might be
+ * clobbered by a call to longjmp.
+ */
+static int __rte_unused huge_wrap_sigsetjmp(void)
+{
+	return sigsetjmp(huge_jmpenv, 1);
+}
+
+static struct sigaction huge_action_old;
+static int huge_need_recover;
+
+static void __rte_unused
+huge_register_sigbus(void)
+{
+	sigset_t mask;
+	struct sigaction action;
+
+	sigemptyset(&mask);
+	sigaddset(&mask, SIGBUS);
+	action.sa_flags = 0;
+	action.sa_mask = mask;
+	action.sa_handler = huge_sigbus_handler;
+
+	huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
+}
+
+static void __rte_unused
+huge_recover_sigbus(void)
+{
+	if (huge_need_recover) {
+		sigaction(SIGBUS, &huge_action_old, NULL);
+		huge_need_recover = 0;
+	}
+}
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+static bool
+prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
+{
+	bool have_numa = true;
+
+	/* Check if kernel supports NUMA. */
+	if (numa_available() != 0) {
+		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+		have_numa = false;
+	}
+
+	if (have_numa) {
+		RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+		if (get_mempolicy(oldpolicy, oldmask->maskp,
+				  oldmask->size + 1, 0, 0) < 0) {
+			RTE_LOG(ERR, EAL,
+				"Failed to get current mempolicy: %s. "
+				"Assuming MPOL_DEFAULT.\n", strerror(errno));
+			oldpolicy = MPOL_DEFAULT;
+		}
+		RTE_LOG(DEBUG, EAL,
+			"Setting policy MPOL_PREFERRED for socket %d\n",
+			socket_id);
+		numa_set_preferred(socket_id);
+	}
+	return have_numa;
+}
+
+static void
+resotre_numa(int *oldpolicy, struct bitmask *oldmask)
+{
+	RTE_LOG(DEBUG, EAL,
+		"Restoring previous memory policy: %d\n", *oldpolicy);
+	if (oldpolicy == MPOL_DEFAULT) {
+		numa_set_localalloc();
+	} else if (set_mempolicy(*oldpolicy, oldmask->maskp,
+				 oldmask->size + 1) < 0) {
+		RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+			strerror(errno));
+		numa_set_localalloc();
+	}
+	numa_free_cpumask(oldmask);
+}
+#endif
+
+static struct msl_entry *
+get_msl_entry_by_idx(unsigned int list_idx)
+{
+	struct msl_entry *te;
+
+	rte_spinlock_lock(&tailq_lock);
+
+	TAILQ_FOREACH(te, &msl_entry_list, next) {
+		if (te->msl_idx == list_idx)
+			break;
+	}
+	if (te == NULL) {
+		/* doesn't exist, so create it and set fd to -1 */
+
+		te = malloc(sizeof(*te));
+		if (te == NULL) {
+			RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
+				__func__);
+			goto unlock;
+		}
+		te->msl_idx = list_idx;
+		te->fd = -1;
+		TAILQ_INSERT_TAIL(&msl_entry_list, te, next);
+	}
+unlock:
+	rte_spinlock_unlock(&tailq_lock);
+	return te;
+}
+
+/*
+ * uses fstat to report the size of a file on disk
+ */
+static off_t
+getFileSize(int fd)
+{
+	struct stat st;
+	if (fstat(fd, &st) < 0)
+		return 0;
+	return st.st_size;
+}
+
+/*
+ * uses fstat to check if file size on disk is zero (regular fstat won't show
+ * true file size due to how fallocate works)
+ */
+static bool
+is_zero_length(int fd)
+{
+	struct stat st;
+	if (fstat(fd, &st) < 0)
+		return false;
+	return st.st_blocks == 0;
+}
+
+static int
+get_page_fd(char *path, int buflen, struct hugepage_info *hi,
+		unsigned int list_idx, unsigned int seg_idx)
+{
+	int fd;
+
+	if (internal_config.single_file_segments) {
+		/*
+		 * try to find a tailq entry, for this memseg list, or create
+		 * one if it doesn't exist.
+		 */
+		struct msl_entry *te = get_msl_entry_by_idx(list_idx);
+		if (te == NULL) {
+			RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
+				__func__);
+			return -1;
+		} else if (te->fd < 0) {
+			/* create a hugepage file */
+			eal_get_hugefile_path(path, buflen, hi->hugedir,
+					list_idx);
+			fd = open(path, O_CREAT | O_RDWR, 0600);
+			if (fd < 0) {
+				RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
+					__func__, strerror(errno));
+				return -1;
+			}
+			te->fd = fd;
+		} else {
+			fd = te->fd;
+		}
+	} else {
+		/* one file per page, just create it */
+		eal_get_hugefile_path(path, buflen, hi->hugedir,
+				list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+		fd = open(path, O_CREAT | O_RDWR, 0600);
+		if (fd < 0) {
+			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
+					strerror(errno));
+			return -1;
+		}
+	}
+	return fd;
+}
+
+/* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
+static int lock(int fd, uint64_t offset, uint64_t len, int type)
+{
+	struct flock lck = {0};
+	int ret;
+
+	lck.l_type = type;
+	lck.l_whence = SEEK_SET;
+	lck.l_start = offset;
+	lck.l_len = len;
+
+	ret = fcntl(fd, F_SETLK, &lck);
+
+	if (ret && (errno == EAGAIN || errno == EACCES)) {
+		/* locked by another process, not an error */
+		return 0;
+	} else if (ret) {
+		RTE_LOG(ERR, EAL, "%s(): error calling fcntl(): %s\n",
+			__func__, strerror(errno));
+		/* we've encountered an unexpected error */
+		return -1;
+	}
+	return 1;
+}
+
+static int
+resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz,
+		bool grow)
+{
+	bool again = false;
+	do {
+		if (fallocate_supported == 0) {
+			/* we cannot deallocate memory if fallocate() is not
+			 * supported, but locks are still needed to prevent
+			 * primary process' initialization from clearing out
+			 * huge pages used by this process.
+			 */
+
+			if (!grow) {
+				RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
+					__func__);
+				return -1;
+			}
+			uint64_t new_size = fa_offset + page_sz;
+			uint64_t cur_size = getFileSize(fd);
+
+			/* fallocate isn't supported, fall back to ftruncate */
+			if (new_size > cur_size &&
+					ftruncate(fd, new_size) < 0) {
+				RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
+					__func__, strerror(errno));
+				return -1;
+			}
+			/* not being able to take out a read lock is an error */
+			if (lock(fd, fa_offset, page_sz, F_RDLCK) != 1)
+				return -1;
+		} else {
+			int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
+					FALLOC_FL_KEEP_SIZE;
+			int ret;
+
+			/* if fallocate() is supported, we need to take out a
+			 * read lock on allocate (to prevent other processes
+			 * from deallocating this page), and take out a write
+			 * lock on deallocate (to ensure nobody else is using
+			 * this page).
+			 *
+			 * we can't use flock() for this, as we actually need to
+			 * lock part of the file, not the entire file.
+			 */
+
+			if (!grow) {
+				ret = lock(fd, fa_offset, page_sz, F_WRLCK);
+
+				if (ret < 0)
+					return -1;
+				else if (ret == 0)
+					/* failed to lock, not an error */
+					return 0;
+			}
+			if (fallocate(fd, flags, fa_offset, page_sz) < 0) {
+				if (fallocate_supported == -1 &&
+						errno == ENOTSUP) {
+					RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
+						__func__);
+					again = true;
+					fallocate_supported = 0;
+				} else {
+					RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
+						__func__,
+						strerror(errno));
+					return -1;
+				}
+			} else {
+				fallocate_supported = 1;
+
+				if (grow) {
+					/* if can't read lock, it's an error */
+					if (lock(fd, fa_offset, page_sz,
+							F_RDLCK) != 1)
+						return -1;
+				} else {
+					/* if can't unlock, it's an error */
+					if (lock(fd, fa_offset, page_sz,
+							F_UNLCK) != 1)
+						return -1;
+				}
+			}
+		}
+	} while (again);
+	return 0;
+}
+
+static int
+alloc_page(struct rte_memseg *ms, void *addr, uint64_t size, int socket_id,
+		struct hugepage_info *hi, unsigned int list_idx,
+		unsigned int seg_idx)
+{
+	int cur_socket_id = 0;
+	uint64_t map_offset;
+	char path[PATH_MAX];
+	int ret = 0;
+	int fd;
+
+	fd = get_page_fd(path, sizeof(path), hi, list_idx, seg_idx);
+	if (fd < 0)
+		return -1;
+
+
+	if (internal_config.single_file_segments) {
+		map_offset = seg_idx * size;
+		ret = resize_hugefile(fd, map_offset, size, true);
+		if (ret < 1)
+			goto resized;
+	} else {
+		map_offset = 0;
+		if (ftruncate(fd, size) < 0) {
+			RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
+				__func__, strerror(errno));
+			goto resized;
+		}
+		/* we've allocated a page - take out a read lock. we're using
+		 * fcntl() locks rather than flock() here because doing that
+		 * gives us one huge advantage - fcntl() locks are per-process,
+		 * not per-file descriptor, which means that we don't have to
+		 * keep the original fd's around to keep a lock on the file.
+		 *
+		 * this is useful, because when it comes to unmapping pages, we
+		 * will have to take out a write lock (to figure out if another
+		 * process still has this page mapped), and to do itwith flock()
+		 * we'll have to use original fd, as lock is associated with
+		 * that particular fd. with fcntl(), this is not necessary - we
+		 * can open a new fd and use fcntl() on that.
+		 */
+		ret = lock(fd, map_offset, size, F_RDLCK);
+
+		/* this should not fail */
+		if (ret != 1) {
+			RTE_LOG(ERR, EAL, "%s(): error locking file: %s\n",
+				__func__,
+				strerror(errno));
+			goto resized;
+		}
+	}
+
+	/*
+	 * map the segment, and populate page tables, the kernel fills this
+	 * segment with zeros if it's a new page.
+	 */
+	void *va = mmap(addr, size, PROT_READ | PROT_WRITE,
+			MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, map_offset);
+	/* for non-single file segments, we can close fd here */
+	if (!internal_config.single_file_segments)
+		close(fd);
+
+	if (va == MAP_FAILED) {
+		RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
+			strerror(errno));
+		goto resized;
+	}
+	if (va != addr) {
+		RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
+		goto mapped;
+	}
+
+	rte_iova_t iova = rte_mem_virt2iova(addr);
+	if (iova == RTE_BAD_PHYS_ADDR) {
+		RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
+			__func__);
+		goto mapped;
+	}
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
+
+	if (cur_socket_id != socket_id) {
+		RTE_LOG(DEBUG, EAL,
+				"%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
+			__func__, socket_id, cur_socket_id);
+		goto mapped;
+	}
+#endif
+
+	/* In linux, hugetlb limitations, like cgroup, are
+	 * enforced at fault time instead of mmap(), even
+	 * with the option of MAP_POPULATE. Kernel will send
+	 * a SIGBUS signal. To avoid to be killed, save stack
+	 * environment here, if SIGBUS happens, we can jump
+	 * back here.
+	 */
+	if (huge_wrap_sigsetjmp()) {
+		RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
+			(unsigned int)(size / 0x100000));
+		goto mapped;
+	}
+	*(int *)addr = *(int *) addr;
+
+	ms->addr = addr;
+	ms->hugepage_sz = size;
+	ms->len = size;
+	ms->nchannel = rte_memory_get_nchannel();
+	ms->nrank = rte_memory_get_nrank();
+	ms->iova = iova;
+	ms->socket_id = socket_id;
+
+	return 0;
+
+mapped:
+	munmap(addr, size);
+resized:
+	if (internal_config.single_file_segments) {
+		resize_hugefile(fd, map_offset, size, false);
+		if (is_zero_length(fd)) {
+			struct msl_entry *te = get_msl_entry_by_idx(list_idx);
+			if (te != NULL && te->fd >= 0) {
+				close(te->fd);
+				te->fd = -1;
+			}
+			/* ignore errors, can't make it any worse */
+			unlink(path);
+		}
+	} else {
+		close(fd);
+		unlink(path);
+	}
+	return -1;
+}
+
+int
+eal_memalloc_alloc_page_bulk(struct rte_memseg **ms, int n,
+		uint64_t size, int socket, bool exact)
+{
+	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+	struct rte_memseg_list *msl = NULL;
+	void *addr;
+	unsigned int msl_idx;
+	int cur_idx, end_idx, i, ret = -1;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	bool have_numa;
+	int oldpolicy;
+	struct bitmask *oldmask = numa_allocate_nodemask();
+#endif
+	struct hugepage_info *hi = NULL;
+
+	/* dynamic allocation not supported in legacy mode */
+	if (internal_config.legacy_mem)
+		goto restore_numa;
+
+	for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
+		if (size ==
+				internal_config.hugepage_info[i].hugepage_sz) {
+			hi = &internal_config.hugepage_info[i];
+			break;
+		}
+	}
+	if (!hi) {
+		RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
+			__func__);
+		goto restore_numa;
+	}
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	have_numa = prepare_numa(&oldpolicy, oldmask, socket);
+#endif
+
+	/* there may be several memsegs for this page size and socket id, so try
+	 * allocating on all of them.
+	 */
+
+	/* find our memseg list */
+	for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+		struct rte_memseg_list *cur_msl = &mcfg->memsegs[msl_idx];
+
+		if (cur_msl->hugepage_sz != size)
+			continue;
+		if (cur_msl->socket_id != socket)
+			continue;
+		msl = cur_msl;
+
+		/* try finding space in memseg list */
+		cur_idx = rte_fbarray_find_next_n_free(&msl->memseg_arr, 0, n);
+
+		if (cur_idx < 0)
+			continue;
+
+		end_idx = cur_idx + n;
+
+		for (i = 0; cur_idx < end_idx; cur_idx++, i++) {
+			struct rte_memseg *cur;
+
+			cur = rte_fbarray_get(&msl->memseg_arr, cur_idx);
+			addr = RTE_PTR_ADD(msl->base_va,
+					cur_idx * msl->hugepage_sz);
+
+			if (alloc_page(cur, addr, size, socket, hi, msl_idx,
+					cur_idx)) {
+				RTE_LOG(DEBUG, EAL, "attempted to allocate %i pages, but only %i were allocated\n",
+					n, i);
+
+				/* if exact number wasn't requested, stop */
+				if (!exact)
+					ret = i;
+				goto restore_numa;
+			}
+			if (ms)
+				ms[i] = cur;
+
+			rte_fbarray_set_used(&msl->memseg_arr, cur_idx);
+		}
+		ret = n;
+
+		break;
+	}
+	/* we didn't break */
+	if (!msl) {
+		RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
+			__func__);
+	}
+
+restore_numa:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+	if (have_numa)
+		resotre_numa(&oldpolicy, oldmask);
+#endif
+	return ret;
+}
+
+struct rte_memseg *
+eal_memalloc_alloc_page(uint64_t size, int socket)
+{
+	struct rte_memseg *ms;
+	if (eal_memalloc_alloc_page_bulk(&ms, 1, size, socket, true) < 0)
+		return NULL;
+	/* return pointer to newly allocated memseg */
+	return ms;
+}
diff --git a/lib/librte_eal/linuxapp/eal/meson.build b/lib/librte_eal/linuxapp/eal/meson.build
index 03974ff..5254c6c 100644
--- a/lib/librte_eal/linuxapp/eal/meson.build
+++ b/lib/librte_eal/linuxapp/eal/meson.build
@@ -10,6 +10,7 @@ env_sources = files('eal_alarm.c',
 		'eal_debug.c',
 		'eal_hugepage_info.c',
 		'eal_interrupts.c',
+		'eal_memalloc.c',
 		'eal_lcore.c',
 		'eal_log.c',
 		'eal_thread.c',
-- 
2.7.4


More information about the dev mailing list