[RFC PATCH 40/44] eal_cfg: add basic setters and getters
Bruce Richardson
bruce.richardson at intel.com
Wed Apr 29 18:58:32 CEST 2026
For simple fields in the EAL config struct we can add basic setters and
getters. For booleans, these can be autogenerated by macros. For the
other basic fields, we may need validation of the values so add explicit
setter functions, though the getter functions can similarly be
auto-generated.
Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
---
app/test/test_eal_cfg.c | 372 ++++++++++++++++++++++++++++++++++++++
lib/eal_cfg/eal_cfg.c | 357 +++++++++++++++++++++++++++++++++++-
lib/eal_cfg/rte_eal_cfg.h | 309 +++++++++++++++++++++++++++++++
3 files changed, 1034 insertions(+), 4 deletions(-)
diff --git a/app/test/test_eal_cfg.c b/app/test/test_eal_cfg.c
index f714be3fd4..4424c42533 100644
--- a/app/test/test_eal_cfg.c
+++ b/app/test/test_eal_cfg.c
@@ -3,10 +3,13 @@
*/
#include <errno.h>
+#include <inttypes.h>
#include <rte_eal.h>
#include <rte_debug.h>
#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_vect.h>
#include <rte_eal_cfg.h>
#include <stdlib.h>
@@ -40,6 +43,59 @@ test_eal_cfg_create_free(void)
int
test_eal_cfg_init(void) { return 0; }
#else
+/* Test that specific cfg values are visible through EAL query APIs post-init. */
+static int
+subtest_eal_cfg_init_with_values(void)
+{
+ struct rte_eal_cfg *cfg;
+ int ret;
+
+ cfg = rte_eal_cfg_create();
+ TEST_ASSERT_NOT_NULL(cfg, "rte_eal_cfg_create returned NULL");
+
+ TEST_ASSERT(rte_eal_cfg_set_no_pci(cfg, true) == 0,
+ "Failed to set no_pci");
+ TEST_ASSERT(rte_eal_cfg_set_iova_mode(cfg, RTE_IOVA_VA) == 0,
+ "Failed to set iova_mode");
+
+#ifdef RTE_ARCH_64
+ /*
+ * 0x200000000 (8 GiB) is above the 32-bit address space; only set and
+ * check this on 64-bit builds. rte_eal_get_baseaddr() returns
+ * eal_user_cfg.base_virtaddr directly when non-zero, so it is
+ * observable immediately after init.
+ */
+#define TEST_BASE_VIRTADDR ((uintptr_t)0x200000000ULL)
+ TEST_ASSERT(rte_eal_cfg_set_base_virtaddr(cfg, TEST_BASE_VIRTADDR) == 0,
+ "Failed to set base_virtaddr");
+#endif
+
+ ret = rte_eal_init_from_cfg("test_prog", cfg);
+ TEST_ASSERT(ret == 0,
+ "rte_eal_init_from_cfg failed: ret=%d rte_errno=%d", ret, rte_errno);
+
+ rte_eal_cfg_free(cfg);
+
+ /* no_pci=true means rte_eal_has_pci() must return 0 */
+ TEST_ASSERT(rte_eal_has_pci() == 0,
+ "Expected rte_eal_has_pci()==0 with no_pci=true, got %d",
+ rte_eal_has_pci());
+
+#ifdef RTE_ARCH_64
+ /* base_virtaddr was set non-zero so rte_eal_get_baseaddr() returns it */
+ TEST_ASSERT(rte_eal_get_baseaddr() == (uint64_t)TEST_BASE_VIRTADDR,
+ "Expected base addr 0x%" PRIx64 ", got 0x%" PRIx64,
+ (uint64_t)TEST_BASE_VIRTADDR, rte_eal_get_baseaddr());
+#endif
+
+ /* iova_mode=VA is stored directly to runtime state when not DC */
+ TEST_ASSERT(rte_eal_iova_mode() == RTE_IOVA_VA,
+ "Expected RTE_IOVA_VA after init, got %d", rte_eal_iova_mode());
+
+ rte_eal_cleanup();
+ return TEST_SUCCESS;
+}
+
/* Test initialising EAL with a freshly created (empty/default) config. */
static int
subtest_eal_cfg_init_empty(void)
@@ -97,6 +153,7 @@ test_eal_cfg_init(void)
#define TEST_CFG_FN(X) { #X, X }
TEST_CFG_FN(subtest_eal_cfg_init_null),
TEST_CFG_FN(subtest_eal_cfg_init_empty),
+ TEST_CFG_FN(subtest_eal_cfg_init_with_values),
{ NULL, NULL }
};
@@ -132,6 +189,315 @@ test_eal_cfg_init(void)
}
#endif /* RTE_EXEC_ENV_WINDOWS */
+/* Test a representative boolean field (no_pci): NULL, default, roundtrip. */
+static int
+test_eal_cfg_bool(void)
+{
+ struct rte_eal_cfg *cfg;
+
+ /* NULL cfg */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_no_pci(NULL, true) == -1,
+ "Expected -1 for NULL cfg");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for NULL cfg, got %d", rte_errno);
+ TEST_ASSERT(rte_eal_cfg_get_no_pci(NULL) == false,
+ "Expected false from get with NULL cfg");
+
+ cfg = rte_eal_cfg_create();
+ TEST_ASSERT_NOT_NULL(cfg, "rte_eal_cfg_create returned NULL");
+
+ /* default is false */
+ TEST_ASSERT(rte_eal_cfg_get_no_pci(cfg) == false,
+ "Expected default no_pci == false");
+
+ /* set true, get true */
+ TEST_ASSERT(rte_eal_cfg_set_no_pci(cfg, true) == 0,
+ "Expected 0 setting no_pci = true");
+ TEST_ASSERT(rte_eal_cfg_get_no_pci(cfg) == true,
+ "Expected no_pci == true after set");
+
+ /* set false, get false */
+ TEST_ASSERT(rte_eal_cfg_set_no_pci(cfg, false) == 0,
+ "Expected 0 setting no_pci = false");
+ TEST_ASSERT(rte_eal_cfg_get_no_pci(cfg) == false,
+ "Expected no_pci == false after reset");
+
+ rte_eal_cfg_free(cfg);
+ return TEST_SUCCESS;
+}
+
+/* Test max_simd_bitwidth: NULL, valid values, boundary, and invalid inputs. */
+static int
+test_eal_cfg_max_simd_bitwidth(void)
+{
+ struct rte_eal_cfg *cfg;
+
+ /* NULL cfg */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_max_simd_bitwidth(NULL, 256) == -1,
+ "Expected -1 for NULL cfg");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for NULL cfg, got %d", rte_errno);
+ TEST_ASSERT(rte_eal_cfg_get_max_simd_bitwidth(NULL) == 0,
+ "Expected 0 from get with NULL cfg");
+
+ cfg = rte_eal_cfg_create();
+ TEST_ASSERT_NOT_NULL(cfg, "rte_eal_cfg_create returned NULL");
+
+ /* valid values: a selection of named powers of two */
+ static const uint16_t valid[] = {
+ RTE_VECT_SIMD_DISABLED,
+ RTE_VECT_SIMD_128,
+ RTE_VECT_SIMD_256,
+ RTE_VECT_SIMD_512,
+ RTE_VECT_SIMD_MAX, /* upper boundary */
+ };
+ for (size_t i = 0; i < RTE_DIM(valid); i++) {
+ TEST_ASSERT(rte_eal_cfg_set_max_simd_bitwidth(cfg, valid[i]) == 0,
+ "Expected 0 for bitwidth %u", valid[i]);
+ TEST_ASSERT(rte_eal_cfg_get_max_simd_bitwidth(cfg) == valid[i],
+ "get returned wrong value for bitwidth %u", valid[i]);
+ }
+
+ /* == RTE_VECT_SIMD_DISABLED/2 (32): must fail (need strictly greater) */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_max_simd_bitwidth(cfg, RTE_VECT_SIMD_DISABLED / 2) == -1,
+ "Expected -1 for bitwidth == SIMD_DISABLED/2");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for bitwidth == SIMD_DISABLED/2, got %d", rte_errno);
+
+ /* non-power-of-two (192 = 128 + 64) */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_max_simd_bitwidth(cfg, 192) == -1,
+ "Expected -1 for non-power-of-two bitwidth 192");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for bitwidth 192, got %d", rte_errno);
+
+ /* > RTE_VECT_SIMD_MAX */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_max_simd_bitwidth(cfg,
+ (uint16_t)(RTE_VECT_SIMD_MAX + 1)) == -1,
+ "Expected -1 for bitwidth > SIMD_MAX");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for bitwidth > SIMD_MAX, got %d", rte_errno);
+
+ rte_eal_cfg_free(cfg);
+ return TEST_SUCCESS;
+}
+
+/* Test iova_mode: NULL, valid modes, and an invalid value. */
+static int
+test_eal_cfg_iova_mode(void)
+{
+ struct rte_eal_cfg *cfg;
+
+ /* NULL cfg */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_iova_mode(NULL, RTE_IOVA_VA) == -1,
+ "Expected -1 for NULL cfg");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for NULL cfg, got %d", rte_errno);
+ TEST_ASSERT(rte_eal_cfg_get_iova_mode(NULL) == RTE_IOVA_DC,
+ "Expected RTE_IOVA_DC from get with NULL cfg");
+
+ cfg = rte_eal_cfg_create();
+ TEST_ASSERT_NOT_NULL(cfg, "rte_eal_cfg_create returned NULL");
+
+ /* valid modes */
+ static const enum rte_iova_mode valid[] = {
+ RTE_IOVA_DC, RTE_IOVA_PA, RTE_IOVA_VA,
+ };
+ for (size_t i = 0; i < RTE_DIM(valid); i++) {
+ TEST_ASSERT(rte_eal_cfg_set_iova_mode(cfg, valid[i]) == 0,
+ "Expected 0 for iova_mode %d", valid[i]);
+ TEST_ASSERT(rte_eal_cfg_get_iova_mode(cfg) == valid[i],
+ "get returned wrong value for iova_mode %d", valid[i]);
+ }
+
+ /* invalid: combination of flags that isn't a named mode */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_iova_mode(cfg, (enum rte_iova_mode)(RTE_IOVA_PA | RTE_IOVA_VA)) == -1,
+ "Expected -1 for invalid iova_mode");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for invalid iova_mode, got %d", rte_errno);
+
+ rte_eal_cfg_free(cfg);
+ return TEST_SUCCESS;
+}
+
+/* Test process_type: NULL, valid types, and RTE_PROC_INVALID. */
+static int
+test_eal_cfg_process_type(void)
+{
+ struct rte_eal_cfg *cfg;
+
+ /* NULL cfg */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_process_type(NULL, RTE_PROC_PRIMARY) == -1,
+ "Expected -1 for NULL cfg");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for NULL cfg, got %d", rte_errno);
+ TEST_ASSERT(rte_eal_cfg_get_process_type(NULL) == RTE_PROC_PRIMARY,
+ "Expected RTE_PROC_PRIMARY from get with NULL cfg");
+
+ cfg = rte_eal_cfg_create();
+ TEST_ASSERT_NOT_NULL(cfg, "rte_eal_cfg_create returned NULL");
+
+ /* valid types */
+ static const enum rte_proc_type_t valid[] = {
+ RTE_PROC_AUTO, RTE_PROC_PRIMARY, RTE_PROC_SECONDARY,
+ };
+ for (size_t i = 0; i < RTE_DIM(valid); i++) {
+ TEST_ASSERT(rte_eal_cfg_set_process_type(cfg, valid[i]) == 0,
+ "Expected 0 for process_type %d", valid[i]);
+ TEST_ASSERT(rte_eal_cfg_get_process_type(cfg) == valid[i],
+ "get returned wrong value for process_type %d", valid[i]);
+ }
+
+ /* RTE_PROC_INVALID must be rejected */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_process_type(cfg, RTE_PROC_INVALID) == -1,
+ "Expected -1 for RTE_PROC_INVALID");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for RTE_PROC_INVALID, got %d", rte_errno);
+
+ rte_eal_cfg_free(cfg);
+ return TEST_SUCCESS;
+}
+
+/*
+ * Returns the first NUMA node ID in [0, RTE_MAX_NUMA_NODES) that does not
+ * exist on this system, or -1 if every ID in that range is occupied.
+ */
+static int
+find_absent_numa_node(void)
+{
+ unsigned int count = rte_socket_count();
+ bool present[RTE_MAX_NUMA_NODES] = {};
+
+ for (unsigned int i = 0; i < count; i++) {
+ int id = rte_socket_id_by_idx(i);
+ if (id >= 0 && (unsigned int)id < RTE_MAX_NUMA_NODES)
+ present[id] = true;
+ }
+ for (unsigned int id = 0; id < RTE_MAX_NUMA_NODES; id++) {
+ if (!present[id])
+ return (int)id;
+ }
+ return -1;
+}
+
+/* Test set/get numa_mem: valid node, ERANGE, ENODEV, NULL, roundtrip. */
+static int
+test_eal_cfg_numa_mem(void)
+{
+ struct rte_eal_cfg *cfg;
+ unsigned int valid_node;
+ int absent;
+
+ cfg = rte_eal_cfg_create();
+ TEST_ASSERT_NOT_NULL(cfg, "rte_eal_cfg_create returned NULL");
+
+ /* NULL cfg → EINVAL */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_numa_mem(NULL, 0, 1024) == -1,
+ "Expected -1 for NULL cfg");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for NULL cfg, got %d", rte_errno);
+ TEST_ASSERT(rte_eal_cfg_get_numa_mem(NULL, 0) == 0,
+ "Expected 0 from get with NULL cfg");
+
+ /* node >= RTE_MAX_NUMA_NODES → ERANGE */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_numa_mem(cfg, RTE_MAX_NUMA_NODES, 1024) == -1,
+ "Expected -1 for out-of-range node");
+ TEST_ASSERT(rte_errno == ERANGE,
+ "Expected ERANGE for node >= RTE_MAX_NUMA_NODES, got %d", rte_errno);
+
+ /* non-existent node → ENODEV */
+ absent = find_absent_numa_node();
+ if (absent >= 0) {
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_numa_mem(cfg, (unsigned int)absent, 1024) == -1,
+ "Expected -1 for absent NUMA node %d", absent);
+ TEST_ASSERT(rte_errno == ENODEV,
+ "Expected ENODEV for absent node %d, got %d", absent, rte_errno);
+ } else {
+ printf(" Skipping ENODEV test: all %u NUMA node IDs occupied\n",
+ RTE_MAX_NUMA_NODES);
+ }
+
+ /* valid node → roundtrip */
+ valid_node = (unsigned int)rte_socket_id_by_idx(0);
+ TEST_ASSERT(rte_eal_cfg_set_numa_mem(cfg, valid_node, 2048) == 0,
+ "Expected 0 for valid NUMA node %u", valid_node);
+ TEST_ASSERT(rte_eal_cfg_get_numa_mem(cfg, valid_node) == 2048,
+ "get_numa_mem returned wrong value for node %u", valid_node);
+
+ /* get with out-of-range node → 0 */
+ TEST_ASSERT(rte_eal_cfg_get_numa_mem(cfg, RTE_MAX_NUMA_NODES) == 0,
+ "Expected 0 from get with out-of-range node");
+
+ rte_eal_cfg_free(cfg);
+ return TEST_SUCCESS;
+}
+
+/* Test set/get numa_limit: valid node, ERANGE, ENODEV, NULL, roundtrip. */
+static int
+test_eal_cfg_numa_limit(void)
+{
+ struct rte_eal_cfg *cfg;
+ unsigned int valid_node;
+ int absent;
+
+ cfg = rte_eal_cfg_create();
+ TEST_ASSERT_NOT_NULL(cfg, "rte_eal_cfg_create returned NULL");
+
+ /* NULL cfg → EINVAL */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_numa_limit(NULL, 0, 1024) == -1,
+ "Expected -1 for NULL cfg");
+ TEST_ASSERT(rte_errno == EINVAL,
+ "Expected EINVAL for NULL cfg, got %d", rte_errno);
+ TEST_ASSERT(rte_eal_cfg_get_numa_limit(NULL, 0) == 0,
+ "Expected 0 from get with NULL cfg");
+
+ /* node >= RTE_MAX_NUMA_NODES → ERANGE */
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_numa_limit(cfg, RTE_MAX_NUMA_NODES, 1024) == -1,
+ "Expected -1 for out-of-range node");
+ TEST_ASSERT(rte_errno == ERANGE,
+ "Expected ERANGE for node >= RTE_MAX_NUMA_NODES, got %d", rte_errno);
+
+ /* non-existent node → ENODEV */
+ absent = find_absent_numa_node();
+ if (absent >= 0) {
+ rte_errno = 0;
+ TEST_ASSERT(rte_eal_cfg_set_numa_limit(cfg, (unsigned int)absent, 1024) == -1,
+ "Expected -1 for absent NUMA node %d", absent);
+ TEST_ASSERT(rte_errno == ENODEV,
+ "Expected ENODEV for absent node %d, got %d", absent, rte_errno);
+ } else {
+ printf(" Skipping ENODEV test: all %u NUMA node IDs occupied\n",
+ RTE_MAX_NUMA_NODES);
+ }
+
+ /* valid node → roundtrip */
+ valid_node = (unsigned int)rte_socket_id_by_idx(0);
+ TEST_ASSERT(rte_eal_cfg_set_numa_limit(cfg, valid_node, 4096) == 0,
+ "Expected 0 for valid NUMA node %u", valid_node);
+ TEST_ASSERT(rte_eal_cfg_get_numa_limit(cfg, valid_node) == 4096,
+ "get_numa_limit returned wrong value for node %u", valid_node);
+
+ /* get with out-of-range node → 0 */
+ TEST_ASSERT(rte_eal_cfg_get_numa_limit(cfg, RTE_MAX_NUMA_NODES) == 0,
+ "Expected 0 from get with out-of-range node");
+
+ rte_eal_cfg_free(cfg);
+ return TEST_SUCCESS;
+}
+
static struct unit_test_suite eal_cfg_testsuite = {
.suite_name = "EAL cfg API tests",
.setup = NULL,
@@ -139,6 +505,12 @@ static struct unit_test_suite eal_cfg_testsuite = {
.unit_test_cases = {
TEST_CASE(test_eal_cfg_create_free),
TEST_CASE(test_eal_cfg_init),
+ TEST_CASE(test_eal_cfg_bool),
+ TEST_CASE(test_eal_cfg_max_simd_bitwidth),
+ TEST_CASE(test_eal_cfg_iova_mode),
+ TEST_CASE(test_eal_cfg_process_type),
+ TEST_CASE(test_eal_cfg_numa_mem),
+ TEST_CASE(test_eal_cfg_numa_limit),
TEST_CASES_END()
}
};
diff --git a/lib/eal_cfg/eal_cfg.c b/lib/eal_cfg/eal_cfg.c
index 18a508e7ad..ce3be8201b 100644
--- a/lib/eal_cfg/eal_cfg.c
+++ b/lib/eal_cfg/eal_cfg.c
@@ -3,17 +3,70 @@
*/
#include <errno.h>
+#include <stdint.h>
#include <stdlib.h>
#include <eal_export.h>
+#include <rte_bitops.h>
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_thread.h>
+#include <rte_vect.h>
#include "eal_internal_cfg.h"
#include "rte_eal_cfg.h"
+RTE_LOG_REGISTER_DEFAULT(eal_cfg_logtype, INFO);
+
+#define RTE_LOGTYPE_EAL_CFG eal_cfg_logtype
+#define EAL_CFG_LOG(level, ...) \
+ RTE_LOG_LINE(level, EAL_CFG, "" __VA_ARGS__)
+/*
+ * Convenience macros for the repetitive bool/integer/enum setter+getter bodies.
+ * Each expands to two complete exported functions.
+ */
+
+/*
+ * CFG_REQUIRE_NOT_NULL(cfg) — standard NULL guard for all setter functions.
+ * Logs a function-name-tagged error, sets rte_errno = EINVAL, and returns -1.
+ */
+#define CFG_REQUIRE_NOT_NULL(cfg) do { \
+ if ((cfg) == NULL) { \
+ EAL_CFG_LOG(ERR, "%s: cfg is NULL", __func__); \
+ rte_errno = EINVAL; \
+ return -1; \
+ } \
+} while (0)
+/*
+ * Simple getter: cfg NULL -> return null_val; else return cfg->user_cfg.name.
+ * Only usable when the function name suffix equals the struct field name.
+ * The matching RTE_EXPORT_EXPERIMENTAL_SYMBOL line must precede this macro.
+ */
+#define EAL_CFG_GETTER(type, name, null_val) \
+type \
+rte_eal_cfg_get_##name(const struct rte_eal_cfg *cfg) \
+{ \
+ if (cfg == NULL) \
+ return (null_val); \
+ return cfg->user_cfg.name; \
+}
+/* bool field: sym_suffix == struct field name */
+#define EAL_CFG_BOOL(name) \
+int \
+rte_eal_cfg_set_##name(struct rte_eal_cfg *cfg, bool val) \
+{ \
+ CFG_REQUIRE_NOT_NULL(cfg); \
+ cfg->user_cfg.name = val; \
+ return 0; \
+} \
+bool \
+rte_eal_cfg_get_##name(const struct rte_eal_cfg *cfg) \
+{ \
+ if (cfg == NULL) return false; \
+ return cfg->user_cfg.name; \
+}
+
struct rte_eal_cfg {
struct eal_user_cfg user_cfg;
};
@@ -49,6 +102,303 @@ rte_eal_cfg_free(struct rte_eal_cfg *cfg)
free(cfg);
}
+/* --- Boolean fields --- */
+/* Export declarations must be standalone lines for gen-version-map.py */
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_no_hugetlbfs, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_no_hugetlbfs, 26.07)
+EAL_CFG_BOOL(no_hugetlbfs)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_no_pci, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_no_pci, 26.07)
+EAL_CFG_BOOL(no_pci)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_no_hpet, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_no_hpet, 26.07)
+EAL_CFG_BOOL(no_hpet)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_vmware_tsc_map, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_vmware_tsc_map, 26.07)
+EAL_CFG_BOOL(vmware_tsc_map)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_no_shconf, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_no_shconf, 26.07)
+EAL_CFG_BOOL(no_shconf)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_in_memory, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_in_memory, 26.07)
+EAL_CFG_BOOL(in_memory)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_create_uio_dev, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_create_uio_dev, 26.07)
+EAL_CFG_BOOL(create_uio_dev)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_no_telemetry, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_no_telemetry, 26.07)
+EAL_CFG_BOOL(no_telemetry)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_legacy_mem, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_legacy_mem, 26.07)
+EAL_CFG_BOOL(legacy_mem)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_match_allocations, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_match_allocations, 26.07)
+EAL_CFG_BOOL(match_allocations)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_single_file_segments, 26.07)
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_single_file_segments, 26.07)
+EAL_CFG_BOOL(single_file_segments)
+
+/* --- Integer fields --- */
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_force_nchannel, 26.07)
+int
+rte_eal_cfg_set_force_nchannel(struct rte_eal_cfg *cfg, uint8_t val)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ cfg->user_cfg.force_nchannel = val;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_force_nchannel, 26.07)
+EAL_CFG_GETTER(uint8_t, force_nchannel, 0)
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_force_nrank, 26.07)
+int
+rte_eal_cfg_set_force_nrank(struct rte_eal_cfg *cfg, uint8_t val)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ cfg->user_cfg.force_nrank = val;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_force_nrank, 26.07)
+EAL_CFG_GETTER(uint8_t, force_nrank, 0)
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_huge_worker_stack_size, 26.07)
+int
+rte_eal_cfg_set_huge_worker_stack_size(struct rte_eal_cfg *cfg, size_t val)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ cfg->user_cfg.huge_worker_stack_size = val;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_huge_worker_stack_size, 26.07)
+EAL_CFG_GETTER(size_t, huge_worker_stack_size, 0)
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_base_virtaddr, 26.07)
+int
+rte_eal_cfg_set_base_virtaddr(struct rte_eal_cfg *cfg, uintptr_t val)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ cfg->user_cfg.base_virtaddr = val;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_base_virtaddr, 26.07)
+EAL_CFG_GETTER(uintptr_t, base_virtaddr, 0)
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_main_lcore, 26.07)
+int
+rte_eal_cfg_set_main_lcore(struct rte_eal_cfg *cfg, int val)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ if (val != -1 && (val < 0 || (unsigned int)val >= RTE_MAX_LCORE)) {
+ EAL_CFG_LOG(ERR, "%s: main_lcore %d out of range [0, %u)",
+ __func__, val, RTE_MAX_LCORE);
+ rte_errno = ERANGE;
+ return -1;
+ }
+ cfg->user_cfg.main_lcore = val;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_main_lcore, 26.07)
+EAL_CFG_GETTER(int, main_lcore, -1)
+
+/* --- Enum fields --- */
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_process_type, 26.07)
+int
+rte_eal_cfg_set_process_type(struct rte_eal_cfg *cfg, enum rte_proc_type_t val)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ if (val >= RTE_PROC_INVALID) {
+ EAL_CFG_LOG(ERR, "%s: invalid process type %d", __func__, val);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ cfg->user_cfg.process_type = val;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_process_type, 26.07)
+EAL_CFG_GETTER(enum rte_proc_type_t, process_type, RTE_PROC_PRIMARY)
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_vfio_intr_mode, 26.07)
+int
+rte_eal_cfg_set_vfio_intr_mode(struct rte_eal_cfg *cfg, enum rte_intr_mode val)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ if (val < RTE_INTR_MODE_NONE || val > RTE_INTR_MODE_MSIX) {
+ EAL_CFG_LOG(ERR, "%s: invalid vfio interrupt mode %d", __func__, val);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ cfg->user_cfg.vfio_intr_mode = val;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_vfio_intr_mode, 26.07)
+EAL_CFG_GETTER(enum rte_intr_mode, vfio_intr_mode, RTE_INTR_MODE_NONE)
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_iova_mode, 26.07)
+int
+rte_eal_cfg_set_iova_mode(struct rte_eal_cfg *cfg, enum rte_iova_mode val)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ if (val != RTE_IOVA_DC && val != RTE_IOVA_PA && val != RTE_IOVA_VA) {
+ EAL_CFG_LOG(ERR, "%s: invalid IOVA mode %d", __func__, val);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ cfg->user_cfg.iova_mode = val;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_iova_mode, 26.07)
+EAL_CFG_GETTER(enum rte_iova_mode, iova_mode, RTE_IOVA_DC)
+
+/* max_simd_bitwidth: the user-visible value is the bitwidth field; setting it
+ * also marks the value as forced, matching the CLI --force-max-simd-bitwidth
+ * semantics. */
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_max_simd_bitwidth, 26.07)
+int
+rte_eal_cfg_set_max_simd_bitwidth(struct rte_eal_cfg *cfg, uint16_t bitwidth)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ if (bitwidth < RTE_VECT_SIMD_DISABLED || !rte_is_power_of_2(bitwidth)) {
+ EAL_CFG_LOG(ERR, "%s: invalid SIMD bitwidth %u (must be a power of two in [%u, %u])",
+ __func__, bitwidth, RTE_VECT_SIMD_DISABLED, RTE_VECT_SIMD_MAX);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ cfg->user_cfg.max_simd_bitwidth.bitwidth = bitwidth;
+ cfg->user_cfg.max_simd_bitwidth.forced = true;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_max_simd_bitwidth, 26.07)
+uint16_t
+rte_eal_cfg_get_max_simd_bitwidth(const struct rte_eal_cfg *cfg)
+{
+ if (cfg == NULL)
+ return 0;
+ return cfg->user_cfg.max_simd_bitwidth.bitwidth;
+}
+
+/*
+ * Check that @node is a NUMA node ID that actually exists on this system.
+ *
+ * The node argument in rte_eal_cfg_set_numa_mem / set_numa_limit is a NUMA
+ * node ID, not a sequential index. numa_mem[] is indexed by that same ID.
+ * rte_socket_id_by_idx() can convert an index to an ID when the caller only
+ * knows the ordinal position.
+ *
+ * Returns true when the node exists (or when platform info is unavailable and
+ * we cannot verify), false when the node is definitely absent.
+ */
+static bool
+numa_node_exists(unsigned int node)
+{
+ const struct eal_platform_info *pi = rte_eal_get_platform_info();
+
+ if (pi == NULL)
+ return true; /* discovery failed; allow and let EAL sort it out */
+
+ for (uint32_t i = 0; i < pi->numa_node_count; i++) {
+ if (pi->numa_nodes[i] == node)
+ return true;
+ }
+ return false;
+}
+
+/* --- Per-NUMA memory --- */
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_numa_mem, 26.07)
+int
+rte_eal_cfg_set_numa_mem(struct rte_eal_cfg *cfg, unsigned int node, uint64_t mb)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ if (node >= RTE_MAX_NUMA_NODES) {
+ EAL_CFG_LOG(ERR, "%s: node %u out of range (max %u)",
+ __func__, node, RTE_MAX_NUMA_NODES - 1);
+ rte_errno = ERANGE;
+ return -1;
+ }
+ if (!numa_node_exists(node)) {
+ EAL_CFG_LOG(ERR, "%s: NUMA node %u does not exist on this system",
+ __func__, node);
+ rte_errno = ENODEV;
+ return -1;
+ }
+ cfg->user_cfg.numa_mem[node] = mb;
+ cfg->user_cfg.force_numa = true;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_numa_mem, 26.07)
+uint64_t
+rte_eal_cfg_get_numa_mem(const struct rte_eal_cfg *cfg, unsigned int node)
+{
+ if (cfg == NULL || node >= RTE_MAX_NUMA_NODES)
+ return 0;
+ return cfg->user_cfg.numa_mem[node];
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_numa_limit, 26.07)
+int
+rte_eal_cfg_set_numa_limit(struct rte_eal_cfg *cfg, unsigned int node, uint64_t mb)
+{
+ CFG_REQUIRE_NOT_NULL(cfg);
+ if (node >= RTE_MAX_NUMA_NODES) {
+ EAL_CFG_LOG(ERR, "%s: node %u out of range (max %u)",
+ __func__, node, RTE_MAX_NUMA_NODES - 1);
+ rte_errno = ERANGE;
+ return -1;
+ }
+ if (!numa_node_exists(node)) {
+ EAL_CFG_LOG(ERR, "%s: NUMA node %u does not exist on this system",
+ __func__, node);
+ rte_errno = ENODEV;
+ return -1;
+ }
+ cfg->user_cfg.numa_limit[node] = mb;
+ cfg->user_cfg.force_numa_limits = true;
+ return 0;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_numa_limit, 26.07)
+uint64_t
+rte_eal_cfg_get_numa_limit(const struct rte_eal_cfg *cfg, unsigned int node)
+{
+ if (cfg == NULL || node >= RTE_MAX_NUMA_NODES)
+ return 0;
+ return cfg->user_cfg.numa_limit[node];
+}
+
+/* --- memory: set is unsupported; get sums all per-NUMA allocations --- */
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_memory, 26.07)
+int
+rte_eal_cfg_set_memory(struct rte_eal_cfg *cfg __rte_unused,
+ size_t mb __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -1;
+}
+
+RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_get_memory, 26.07)
+size_t
+rte_eal_cfg_get_memory(const struct rte_eal_cfg *cfg)
+{
+ uint64_t total = 0;
+
+ if (cfg == NULL)
+ return 0;
+ for (unsigned int i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ total += cfg->user_cfg.numa_mem[i];
+ return (size_t)total;
+}
+
RTE_EXPORT_EXPERIMENTAL_SYMBOL(rte_eal_cfg_set_lcores_from_affinity, 26.07)
int
rte_eal_cfg_set_lcores_from_affinity(struct rte_eal_cfg *cfg, bool remap)
@@ -57,12 +407,10 @@ rte_eal_cfg_set_lcores_from_affinity(struct rte_eal_cfg *cfg, bool remap)
unsigned int lcore_id = 0;
int count = 0;
- if (cfg == NULL) {
- rte_errno = EINVAL;
- return -1;
- }
+ CFG_REQUIRE_NOT_NULL(cfg);
if (rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset) != 0) {
+ EAL_CFG_LOG(ERR, "%s: failed to get thread CPU affinity", __func__);
rte_errno = ENOTSUP;
return -1;
}
@@ -96,6 +444,7 @@ rte_eal_cfg_set_lcores_from_affinity(struct rte_eal_cfg *cfg, bool remap)
}
if (count == 0) {
+ EAL_CFG_LOG(ERR, "%s: no CPUs in thread affinity mask", __func__);
rte_errno = ENOTSUP;
return -1;
}
diff --git a/lib/eal_cfg/rte_eal_cfg.h b/lib/eal_cfg/rte_eal_cfg.h
index ecabe136d8..1ffdcffa49 100644
--- a/lib/eal_cfg/rte_eal_cfg.h
+++ b/lib/eal_cfg/rte_eal_cfg.h
@@ -20,8 +20,12 @@ extern "C" {
#endif
#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
#include <rte_compat.h>
+#include <rte_eal.h>
+#include <rte_pci_dev_feature_defs.h>
/**
* Opaque EAL configuration handle.
@@ -58,6 +62,311 @@ __rte_experimental
void
rte_eal_cfg_free(struct rte_eal_cfg *cfg);
+/**
+ * @name Boolean configuration fields
+ *
+ * Each pair of functions gets or sets one boolean flag in the configuration
+ * handle. The setter returns 0 on success or -1 with rte_errno set to EINVAL
+ * if @p cfg is NULL. The getter returns false if @p cfg is NULL.
+ *
+ * @{
+ */
+/** Disable use of hugepages (equivalent to --no-huge). */
+__rte_experimental
+int
+rte_eal_cfg_set_no_hugetlbfs(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_no_hugetlbfs(const struct rte_eal_cfg *cfg);
+
+/** Disable PCI bus scanning (equivalent to --no-pci). */
+__rte_experimental
+int
+rte_eal_cfg_set_no_pci(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_no_pci(const struct rte_eal_cfg *cfg);
+
+/** Disable HPET timer (equivalent to --no-hpet). */
+__rte_experimental
+int
+rte_eal_cfg_set_no_hpet(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_no_hpet(const struct rte_eal_cfg *cfg);
+
+/** Use VMware TSC mapping (equivalent to --vmware-tsc-map). */
+__rte_experimental
+int
+rte_eal_cfg_set_vmware_tsc_map(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_vmware_tsc_map(const struct rte_eal_cfg *cfg);
+
+/** Disable creation of a shared config file (equivalent to --no-shconf). */
+__rte_experimental
+int
+rte_eal_cfg_set_no_shconf(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_no_shconf(const struct rte_eal_cfg *cfg);
+
+/** Run without any shared runtime files (equivalent to --in-memory). */
+__rte_experimental
+int
+rte_eal_cfg_set_in_memory(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_in_memory(const struct rte_eal_cfg *cfg);
+
+/** Create /dev/uioX devices (equivalent to --create-uio-dev). */
+__rte_experimental
+int
+rte_eal_cfg_set_create_uio_dev(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_create_uio_dev(const struct rte_eal_cfg *cfg);
+
+/** Disable telemetry (equivalent to --no-telemetry). */
+__rte_experimental
+int
+rte_eal_cfg_set_no_telemetry(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_no_telemetry(const struct rte_eal_cfg *cfg);
+
+/** Use legacy memory layout (equivalent to --legacy-mem). */
+__rte_experimental
+int
+rte_eal_cfg_set_legacy_mem(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_legacy_mem(const struct rte_eal_cfg *cfg);
+
+/** Free hugepages exactly as allocated (equivalent to --match-allocations). */
+__rte_experimental
+int
+rte_eal_cfg_set_match_allocations(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_match_allocations(const struct rte_eal_cfg *cfg);
+
+/** Store all hugepages in single files per size (equivalent to --single-file-segments). */
+__rte_experimental
+int
+rte_eal_cfg_set_single_file_segments(struct rte_eal_cfg *cfg, bool val);
+__rte_experimental
+bool
+rte_eal_cfg_get_single_file_segments(const struct rte_eal_cfg *cfg);
+/** @} */
+
+/**
+ * @name Integer configuration fields
+ *
+ * Setters return 0 on success, or -1 with rte_errno set to EINVAL if @p cfg
+ * is NULL, or ERANGE / EINVAL if the value is out of the accepted range.
+ * Getters return 0 (or a suitable zero-value) if @p cfg is NULL.
+ *
+ * @{
+ */
+/** Force number of memory channels (equivalent to -n). */
+__rte_experimental
+int
+rte_eal_cfg_set_force_nchannel(struct rte_eal_cfg *cfg, uint8_t val);
+__rte_experimental
+uint8_t
+rte_eal_cfg_get_force_nchannel(const struct rte_eal_cfg *cfg);
+
+/** Force number of memory ranks (equivalent to -r). */
+__rte_experimental
+int
+rte_eal_cfg_set_force_nrank(struct rte_eal_cfg *cfg, uint8_t val);
+__rte_experimental
+uint8_t
+rte_eal_cfg_get_force_nrank(const struct rte_eal_cfg *cfg);
+
+/** Worker lcore thread stack size in bytes (equivalent to --huge-worker-stack). */
+__rte_experimental
+int
+rte_eal_cfg_set_huge_worker_stack_size(struct rte_eal_cfg *cfg, size_t val);
+__rte_experimental
+size_t
+rte_eal_cfg_get_huge_worker_stack_size(const struct rte_eal_cfg *cfg);
+
+/** Base virtual address for memory mapping (equivalent to --base-virtaddr). */
+__rte_experimental
+int
+rte_eal_cfg_set_base_virtaddr(struct rte_eal_cfg *cfg, uintptr_t val);
+__rte_experimental
+uintptr_t
+rte_eal_cfg_get_base_virtaddr(const struct rte_eal_cfg *cfg);
+
+/**
+ * Set the ID of the main lcore (equivalent to --main-lcore).
+ *
+ * @param cfg Configuration handle. Must not be NULL.
+ * @param val Lcore ID in the range [0, RTE_MAX_LCORE), or -1 for auto-select.
+ * @return 0 on success, -1 with rte_errno set to EINVAL or ERANGE on error.
+ */
+__rte_experimental
+int
+rte_eal_cfg_set_main_lcore(struct rte_eal_cfg *cfg, int val);
+/** Returns -1 (auto-select) if @p cfg is NULL. */
+__rte_experimental
+int
+rte_eal_cfg_get_main_lcore(const struct rte_eal_cfg *cfg);
+/** @} */
+
+/**
+ * @name Enum configuration fields
+ *
+ * Setters return 0 on success, or -1 with rte_errno set to EINVAL if @p cfg
+ * is NULL or the value is not a recognised member of the enum.
+ * Getters return a suitable default if @p cfg is NULL.
+ *
+ * @{
+ */
+/**
+ * Set the process type (equivalent to --proc-type).
+ *
+ * @p val must be RTE_PROC_AUTO, RTE_PROC_PRIMARY, or RTE_PROC_SECONDARY.
+ * RTE_PROC_INVALID is rejected.
+ */
+__rte_experimental
+int
+rte_eal_cfg_set_process_type(struct rte_eal_cfg *cfg, enum rte_proc_type_t val);
+__rte_experimental
+enum rte_proc_type_t
+rte_eal_cfg_get_process_type(const struct rte_eal_cfg *cfg);
+
+/**
+ * Set the default VFIO interrupt mode (equivalent to --vfio-intr).
+ *
+ * @p val must be one of RTE_INTR_MODE_NONE, RTE_INTR_MODE_LEGACY,
+ * RTE_INTR_MODE_MSI, or RTE_INTR_MODE_MSIX.
+ */
+__rte_experimental
+int
+rte_eal_cfg_set_vfio_intr_mode(struct rte_eal_cfg *cfg, enum rte_intr_mode val);
+__rte_experimental
+enum rte_intr_mode
+rte_eal_cfg_get_vfio_intr_mode(const struct rte_eal_cfg *cfg);
+
+/**
+ * Set the requested IOVA mode (equivalent to --iova-mode).
+ *
+ * @p val must be RTE_IOVA_DC (auto-detect), RTE_IOVA_PA, or RTE_IOVA_VA.
+ */
+__rte_experimental
+int
+rte_eal_cfg_set_iova_mode(struct rte_eal_cfg *cfg, enum rte_iova_mode val);
+__rte_experimental
+enum rte_iova_mode
+rte_eal_cfg_get_iova_mode(const struct rte_eal_cfg *cfg);
+
+/**
+ * Set the maximum SIMD bitwidth for vector code paths.
+ *
+ * Marks the value as forced, equivalent to --force-max-simd-bitwidth.
+ *
+ * @param cfg Configuration handle. Must not be NULL.
+ * @param bitwidth Maximum SIMD bitwidth. Must be a power of two strictly
+ * greater than RTE_VECT_SIMD_DISABLED and at most
+ * RTE_VECT_SIMD_MAX (e.g. 128, 256, 512).
+ * @return 0 on success, -1 with rte_errno = EINVAL if cfg is NULL.
+ */
+__rte_experimental
+int
+rte_eal_cfg_set_max_simd_bitwidth(struct rte_eal_cfg *cfg, uint16_t bitwidth);
+__rte_experimental
+uint16_t
+rte_eal_cfg_get_max_simd_bitwidth(const struct rte_eal_cfg *cfg);
+/** @} */
+
+/**
+ * @name Per-NUMA memory configuration
+ * @{
+ */
+/**
+ * Set the requested memory amount for a NUMA node, in megabytes.
+ *
+ * Equivalent to the per-socket value in --socket-mem.
+ *
+ * @param cfg Configuration handle. Must not be NULL.
+ * @param node NUMA node ID (must be < RTE_MAX_NUMA_NODES and present on this
+ * system; use rte_socket_id_by_idx() to convert a sequential index).
+ * @param mb Memory in megabytes to request on this node.
+ * @return 0 on success, -1 with rte_errno set to EINVAL, ERANGE, or ENODEV on error.
+ */
+__rte_experimental
+int
+rte_eal_cfg_set_numa_mem(struct rte_eal_cfg *cfg, unsigned int node, uint64_t mb);
+/**
+ * Get the requested memory amount for a NUMA node, in megabytes.
+ *
+ * @param cfg Configuration handle.
+ * @param node NUMA node ID (must be < RTE_MAX_NUMA_NODES).
+ * @return Configured memory in megabytes, or 0 if cfg is NULL or node is out of range.
+ */
+__rte_experimental
+uint64_t
+rte_eal_cfg_get_numa_mem(const struct rte_eal_cfg *cfg, unsigned int node);
+
+/**
+ * Set the memory limit for a NUMA node, in megabytes.
+ *
+ * Equivalent to the per-socket value in --socket-limit.
+ *
+ * @param cfg Configuration handle. Must not be NULL.
+ * @param node NUMA node ID (must be < RTE_MAX_NUMA_NODES and present on this
+ * system; use rte_socket_id_by_idx() to convert a sequential index).
+ * @param mb Memory limit in megabytes for this node.
+ * @return 0 on success, -1 with rte_errno set to EINVAL, ERANGE, or ENODEV on error.
+ */
+__rte_experimental
+int
+rte_eal_cfg_set_numa_limit(struct rte_eal_cfg *cfg, unsigned int node, uint64_t mb);
+/**
+ * Get the memory limit for a NUMA node, in megabytes.
+ *
+ * @param cfg Configuration handle.
+ * @param node NUMA node ID (must be < RTE_MAX_NUMA_NODES).
+ * @return Configured memory limit in megabytes, or 0 if cfg is NULL or node is out of range.
+ */
+__rte_experimental
+uint64_t
+rte_eal_cfg_get_numa_limit(const struct rte_eal_cfg *cfg, unsigned int node);
+/** @} */
+
+/**
+ * @name Total memory
+ * @{
+ */
+/**
+ * Setting total memory directly is not supported.
+ *
+ * Use rte_eal_cfg_set_numa_mem() to configure per-NUMA memory instead.
+ *
+ * @return Always -1 with rte_errno set to ENOTSUP.
+ */
+__rte_experimental
+int
+rte_eal_cfg_set_memory(struct rte_eal_cfg *cfg, size_t mb);
+
+/**
+ * Get the total configured memory across all NUMA nodes, in megabytes.
+ *
+ * Returns the sum of all per-NUMA memory values set via
+ * rte_eal_cfg_set_numa_mem().
+ *
+ * @param cfg Configuration handle, or NULL (returns 0).
+ * @return Total memory in megabytes.
+ */
+__rte_experimental
+size_t
+rte_eal_cfg_get_memory(const struct rte_eal_cfg *cfg);
+/** @} */
+
/**
* Populate lcore configuration from the calling thread's CPU affinity.
*
--
2.51.0
More information about the dev
mailing list