[PATCH v3 03/16] mem: annotate shared memory config locks
David Marchand
david.marchand at redhat.com
Tue Apr 4 14:48:27 CEST 2023
Expose internal locks via some internal accessors.
Then annotate rte_mcfg_xxx_(read|write)_(|un)lock.
Signed-off-by: David Marchand <david.marchand at redhat.com>
---
lib/eal/common/eal_common_mcfg.c | 66 +++++++++++++++++------------
lib/eal/include/rte_eal_memconfig.h | 63 +++++++++++++++++++++------
lib/eal/version.map | 4 ++
3 files changed, 91 insertions(+), 42 deletions(-)
diff --git a/lib/eal/common/eal_common_mcfg.c b/lib/eal/common/eal_common_mcfg.c
index cf4a279905..b60d41f7b6 100644
--- a/lib/eal/common/eal_common_mcfg.c
+++ b/lib/eal/common/eal_common_mcfg.c
@@ -69,102 +69,112 @@ eal_mcfg_update_from_internal(void)
mcfg->version = RTE_VERSION;
}
+rte_rwlock_t *
+rte_mcfg_mem_get_lock(void)
+{
+ return &rte_eal_get_configuration()->mem_config->memory_hotplug_lock;
+}
+
void
rte_mcfg_mem_read_lock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ rte_rwlock_read_lock(rte_mcfg_mem_get_lock());
}
void
rte_mcfg_mem_read_unlock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ rte_rwlock_read_unlock(rte_mcfg_mem_get_lock());
}
void
rte_mcfg_mem_write_lock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+ rte_rwlock_write_lock(rte_mcfg_mem_get_lock());
}
void
rte_mcfg_mem_write_unlock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ rte_rwlock_write_unlock(rte_mcfg_mem_get_lock());
+}
+
+rte_rwlock_t *
+rte_mcfg_tailq_get_lock(void)
+{
+ return &rte_eal_get_configuration()->mem_config->qlock;
}
void
rte_mcfg_tailq_read_lock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_read_lock(&mcfg->qlock);
+ rte_rwlock_read_lock(rte_mcfg_tailq_get_lock());
}
void
rte_mcfg_tailq_read_unlock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_read_unlock(&mcfg->qlock);
+ rte_rwlock_read_unlock(rte_mcfg_tailq_get_lock());
}
void
rte_mcfg_tailq_write_lock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_write_lock(&mcfg->qlock);
+ rte_rwlock_write_lock(rte_mcfg_tailq_get_lock());
}
void
rte_mcfg_tailq_write_unlock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_write_unlock(&mcfg->qlock);
+ rte_rwlock_write_unlock(rte_mcfg_tailq_get_lock());
+}
+
+rte_rwlock_t *
+rte_mcfg_mempool_get_lock(void)
+{
+ return &rte_eal_get_configuration()->mem_config->mplock;
}
void
rte_mcfg_mempool_read_lock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_read_lock(&mcfg->mplock);
+ rte_rwlock_read_lock(rte_mcfg_mempool_get_lock());
}
void
rte_mcfg_mempool_read_unlock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_read_unlock(&mcfg->mplock);
+ rte_rwlock_read_unlock(rte_mcfg_mempool_get_lock());
}
void
rte_mcfg_mempool_write_lock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_write_lock(&mcfg->mplock);
+ rte_rwlock_write_lock(rte_mcfg_mempool_get_lock());
}
void
rte_mcfg_mempool_write_unlock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_rwlock_write_unlock(&mcfg->mplock);
+ rte_rwlock_write_unlock(rte_mcfg_mempool_get_lock());
+}
+
+rte_spinlock_t *
+rte_mcfg_timer_get_lock(void)
+{
+ return &rte_eal_get_configuration()->mem_config->tlock;
}
void
rte_mcfg_timer_lock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_spinlock_lock(&mcfg->tlock);
+ rte_spinlock_lock(rte_mcfg_timer_get_lock());
}
void
rte_mcfg_timer_unlock(void)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- rte_spinlock_unlock(&mcfg->tlock);
+ rte_spinlock_unlock(rte_mcfg_timer_get_lock());
}
bool
diff --git a/lib/eal/include/rte_eal_memconfig.h b/lib/eal/include/rte_eal_memconfig.h
index e175564647..c527f9aa29 100644
--- a/lib/eal/include/rte_eal_memconfig.h
+++ b/lib/eal/include/rte_eal_memconfig.h
@@ -7,6 +7,8 @@
#include <stdbool.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
/**
* @file
@@ -18,89 +20,122 @@
extern "C" {
#endif
+/**
+ * Internal helpers used for lock annotations.
+ */
+__rte_internal
+rte_rwlock_t *
+rte_mcfg_mem_get_lock(void);
+
+__rte_internal
+rte_rwlock_t *
+rte_mcfg_tailq_get_lock(void);
+
+__rte_internal
+rte_rwlock_t *
+rte_mcfg_mempool_get_lock(void);
+
+__rte_internal
+rte_spinlock_t *
+rte_mcfg_timer_get_lock(void);
+
/**
* Lock the internal EAL shared memory configuration for shared access.
*/
void
-rte_mcfg_mem_read_lock(void);
+rte_mcfg_mem_read_lock(void)
+ __rte_shared_lock_function(rte_mcfg_mem_get_lock());
/**
* Unlock the internal EAL shared memory configuration for shared access.
*/
void
-rte_mcfg_mem_read_unlock(void);
+rte_mcfg_mem_read_unlock(void)
+ __rte_unlock_function(rte_mcfg_mem_get_lock());
/**
* Lock the internal EAL shared memory configuration for exclusive access.
*/
void
-rte_mcfg_mem_write_lock(void);
+rte_mcfg_mem_write_lock(void)
+ __rte_exclusive_lock_function(rte_mcfg_mem_get_lock());
/**
* Unlock the internal EAL shared memory configuration for exclusive access.
*/
void
-rte_mcfg_mem_write_unlock(void);
+rte_mcfg_mem_write_unlock(void)
+ __rte_unlock_function(rte_mcfg_mem_get_lock());
/**
* Lock the internal EAL TAILQ list for shared access.
*/
void
-rte_mcfg_tailq_read_lock(void);
+rte_mcfg_tailq_read_lock(void)
+ __rte_shared_lock_function(rte_mcfg_tailq_get_lock());
/**
* Unlock the internal EAL TAILQ list for shared access.
*/
void
-rte_mcfg_tailq_read_unlock(void);
+rte_mcfg_tailq_read_unlock(void)
+ __rte_unlock_function(rte_mcfg_tailq_get_lock());
/**
* Lock the internal EAL TAILQ list for exclusive access.
*/
void
-rte_mcfg_tailq_write_lock(void);
+rte_mcfg_tailq_write_lock(void)
+ __rte_exclusive_lock_function(rte_mcfg_tailq_get_lock());
/**
* Unlock the internal EAL TAILQ list for exclusive access.
*/
void
-rte_mcfg_tailq_write_unlock(void);
+rte_mcfg_tailq_write_unlock(void)
+ __rte_unlock_function(rte_mcfg_tailq_get_lock());
/**
* Lock the internal EAL Mempool list for shared access.
*/
void
-rte_mcfg_mempool_read_lock(void);
+rte_mcfg_mempool_read_lock(void)
+ __rte_shared_lock_function(rte_mcfg_mempool_get_lock());
/**
* Unlock the internal EAL Mempool list for shared access.
*/
void
-rte_mcfg_mempool_read_unlock(void);
+rte_mcfg_mempool_read_unlock(void)
+ __rte_unlock_function(rte_mcfg_mempool_get_lock());
/**
* Lock the internal EAL Mempool list for exclusive access.
*/
void
-rte_mcfg_mempool_write_lock(void);
+rte_mcfg_mempool_write_lock(void)
+ __rte_exclusive_lock_function(rte_mcfg_mempool_get_lock());
/**
* Unlock the internal EAL Mempool list for exclusive access.
*/
void
-rte_mcfg_mempool_write_unlock(void);
+rte_mcfg_mempool_write_unlock(void)
+ __rte_unlock_function(rte_mcfg_mempool_get_lock());
/**
* Lock the internal EAL Timer Library lock for exclusive access.
*/
void
-rte_mcfg_timer_lock(void);
+rte_mcfg_timer_lock(void)
+ __rte_exclusive_lock_function(rte_mcfg_timer_get_lock());
/**
* Unlock the internal EAL Timer Library lock for exclusive access.
*/
void
-rte_mcfg_timer_unlock(void);
+rte_mcfg_timer_unlock(void)
+ __rte_unlock_function(rte_mcfg_timer_get_lock());
/**
* If true, pages are put in single files (per memseg list),
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 6d6978f108..51a820d829 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -469,6 +469,10 @@ INTERNAL {
rte_intr_vec_list_free;
rte_intr_vec_list_index_get;
rte_intr_vec_list_index_set;
+ rte_mcfg_mem_get_lock;
+ rte_mcfg_mempool_get_lock;
+ rte_mcfg_tailq_get_lock;
+ rte_mcfg_timer_get_lock;
rte_mem_lock;
rte_mem_map;
rte_mem_page_size;
--
2.39.2
More information about the dev
mailing list