[PATCH v3 3/3] pmu: handle sysconf(_SC_PAGESIZE) negative return value
Morten Brørup
mb at smartsharesystems.com
Tue Jun 24 10:03:33 CEST 2025
Coverity reports some defects, where the root cause seems to be negative
return value from sysconf(_SC_PAGESIZE) not being handled.
The PMU library cannot use the EAL function rte_mem_page_size(), because
of the dependency chain. (EAL depends on PMU, because Trace is part of
EAL, and Trace depends on PMU; so PMU cannot call EAL functions.)
So mem_page_size(), inspired by rte_mem_page_size(), was added to PMU, and
used instead.
Signed-off-by: Morten Brørup <mb at smartsharesystems.com>
Acked-by: Anatoly Burakov <anatoly.burakov at intel.com>
---
lib/pmu/pmu.c | 32 ++++++++++++++++++++++++++++++--
1 file changed, 30 insertions(+), 2 deletions(-)
diff --git a/lib/pmu/pmu.c b/lib/pmu/pmu.c
index 46b0b450ac..4c7271522a 100644
--- a/lib/pmu/pmu.c
+++ b/lib/pmu/pmu.c
@@ -212,14 +212,40 @@ open_events(struct rte_pmu_event_group *group)
return ret;
}
+/* Inspired by rte_mem_page_size() from /lib/eal/unix/eal_unix_memory.c */
+static size_t
+mem_page_size(void)
+{
+ static size_t page_size;
+
+ if (unlikely(page_size == 0)) {
+ /*
+ * When the sysconf value cannot be determined, sysconf()
+ * returns -1 without setting errno.
+ * To distinguish an indeterminate value from an error,
+ * clear errno before calling sysconf(), and check whether
+ * errno has been set if sysconf() returns -1.
+ */
+ errno = 0;
+ page_size = sysconf(_SC_PAGESIZE);
+ if ((ssize_t)page_size < 0 && errno == 0)
+ errno = ENOENT;
+ }
+
+ return page_size;
+}
+
static int
mmap_events(struct rte_pmu_event_group *group)
{
- long page_size = sysconf(_SC_PAGE_SIZE);
+ size_t page_size = mem_page_size();
unsigned int i;
void *addr;
int ret;
+ if ((ssize_t)page_size < 0)
+ return -errno;
+
for (i = 0; i < rte_pmu.num_group_events; i++) {
addr = mmap(0, page_size, PROT_READ, MAP_SHARED, group->fds[i], 0);
if (addr == MAP_FAILED) {
@@ -243,6 +269,7 @@ mmap_events(struct rte_pmu_event_group *group)
static void
cleanup_events(struct rte_pmu_event_group *group)
{
+ size_t page_size = mem_page_size();
unsigned int i;
if (group->fds[0] != -1)
@@ -250,7 +277,8 @@ cleanup_events(struct rte_pmu_event_group *group)
for (i = 0; i < rte_pmu.num_group_events; i++) {
if (group->mmap_pages[i]) {
- munmap(group->mmap_pages[i], sysconf(_SC_PAGE_SIZE));
+ __rte_assume((ssize_t)page_size >= 0);
+ munmap(group->mmap_pages[i], page_size);
group->mmap_pages[i] = NULL;
}
--
2.43.0
More information about the dev
mailing list