[dpdk-dev] [PATCH v7 1/5] eal: add new definitions for wait scheme
Feifei Wang
feifei.wang2 at arm.com
Thu Oct 28 08:56:36 CEST 2021
Introduce macros as generic interface for address monitoring.
For different size, encapsulate '__LOAD_EXC_16', '__LOAD_EXC_32'
and '__LOAD_EXC_64' into a new macro '__LOAD_EXC'.
Furthermore, to prevent compilation warning in arm:
----------------------------------------------
'warning: implicit declaration of function ...'
----------------------------------------------
Delete 'undef' constructions for '__LOAD_EXC_xx', '__SEVL' and '__WFE'.
And add ‘__RTE_ARM’ for these macros to fix the namespace.
This is because original macros are undefine at the end of the file.
If new macro 'rte_wait_event' calls them in other files, they will be
seen as 'not defined'.
Signed-off-by: Feifei Wang <feifei.wang2 at arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
---
lib/eal/arm/include/rte_pause_64.h | 166 ++++++++++++++++------------
lib/eal/include/generic/rte_pause.h | 28 +++++
2 files changed, 122 insertions(+), 72 deletions(-)
diff --git a/lib/eal/arm/include/rte_pause_64.h b/lib/eal/arm/include/rte_pause_64.h
index e87d10b8cc..d547226a8d 100644
--- a/lib/eal/arm/include/rte_pause_64.h
+++ b/lib/eal/arm/include/rte_pause_64.h
@@ -26,26 +26,18 @@ static inline void rte_pause(void)
#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
/* Send an event to quit WFE. */
-#define __SEVL() { asm volatile("sevl" : : : "memory"); }
+#define __RTE_ARM_SEVL() { asm volatile("sevl" : : : "memory"); }
/* Put processor into low power WFE(Wait For Event) state. */
-#define __WFE() { asm volatile("wfe" : : : "memory"); }
+#define __RTE_ARM_WFE() { asm volatile("wfe" : : : "memory"); }
-static __rte_always_inline void
-rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
- int memorder)
-{
- uint16_t value;
-
- assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
-
- /*
- * Atomic exclusive load from addr, it returns the 16-bit content of
- * *addr while making it 'monitored',when it is written by someone
- * else, the 'monitored' state is cleared and a event is generated
- * implicitly to exit WFE.
- */
-#define __LOAD_EXC_16(src, dst, memorder) { \
+/*
+ * Atomic exclusive load from addr, it returns the 16-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __RTE_ARM_LOAD_EXC_16(src, dst, memorder) { \
if (memorder == __ATOMIC_RELAXED) { \
asm volatile("ldxrh %w[tmp], [%x[addr]]" \
: [tmp] "=&r" (dst) \
@@ -58,15 +50,70 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
: "memory"); \
} }
- __LOAD_EXC_16(addr, value, memorder)
+/*
+ * Atomic exclusive load from addr, it returns the 32-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __RTE_ARM_LOAD_EXC_32(src, dst, memorder) { \
+ if (memorder == __ATOMIC_RELAXED) { \
+ asm volatile("ldxr %w[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } else { \
+ asm volatile("ldaxr %w[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } }
+
+/*
+ * Atomic exclusive load from addr, it returns the 64-bit content of
+ * *addr while making it 'monitored', when it is written by someone
+ * else, the 'monitored' state is cleared and an event is generated
+ * implicitly to exit WFE.
+ */
+#define __RTE_ARM_LOAD_EXC_64(src, dst, memorder) { \
+ if (memorder == __ATOMIC_RELAXED) { \
+ asm volatile("ldxr %x[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } else { \
+ asm volatile("ldaxr %x[tmp], [%x[addr]]" \
+ : [tmp] "=&r" (dst) \
+ : [addr] "r"(src) \
+ : "memory"); \
+ } }
+
+#define __RTE_ARM_LOAD_EXC(src, dst, memorder, size) { \
+ assert(size == 16 || size == 32 || size == 64); \
+ if (size == 16) \
+ __RTE_ARM_LOAD_EXC_16(src, dst, memorder) \
+ else if (size == 32) \
+ __RTE_ARM_LOAD_EXC_32(src, dst, memorder) \
+ else if (size == 64) \
+ __RTE_ARM_LOAD_EXC_64(src, dst, memorder) \
+}
+
+static __rte_always_inline void
+rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
+ int memorder)
+{
+ uint16_t value;
+
+ assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
+
+ __RTE_ARM_LOAD_EXC_16(addr, value, memorder)
if (value != expected) {
- __SEVL()
+ __RTE_ARM_SEVL()
do {
- __WFE()
- __LOAD_EXC_16(addr, value, memorder)
+ __RTE_ARM_WFE()
+ __RTE_ARM_LOAD_EXC_16(addr, value, memorder)
} while (value != expected);
}
-#undef __LOAD_EXC_16
}
static __rte_always_inline void
@@ -77,34 +124,14 @@ rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
- /*
- * Atomic exclusive load from addr, it returns the 32-bit content of
- * *addr while making it 'monitored',when it is written by someone
- * else, the 'monitored' state is cleared and a event is generated
- * implicitly to exit WFE.
- */
-#define __LOAD_EXC_32(src, dst, memorder) { \
- if (memorder == __ATOMIC_RELAXED) { \
- asm volatile("ldxr %w[tmp], [%x[addr]]" \
- : [tmp] "=&r" (dst) \
- : [addr] "r"(src) \
- : "memory"); \
- } else { \
- asm volatile("ldaxr %w[tmp], [%x[addr]]" \
- : [tmp] "=&r" (dst) \
- : [addr] "r"(src) \
- : "memory"); \
- } }
-
- __LOAD_EXC_32(addr, value, memorder)
+ __RTE_ARM_LOAD_EXC_32(addr, value, memorder)
if (value != expected) {
- __SEVL()
+ __RTE_ARM_SEVL()
do {
- __WFE()
- __LOAD_EXC_32(addr, value, memorder)
+ __RTE_ARM_WFE()
+ __RTE_ARM_LOAD_EXC_32(addr, value, memorder)
} while (value != expected);
}
-#undef __LOAD_EXC_32
}
static __rte_always_inline void
@@ -115,38 +142,33 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
assert(memorder == __ATOMIC_ACQUIRE || memorder == __ATOMIC_RELAXED);
- /*
- * Atomic exclusive load from addr, it returns the 64-bit content of
- * *addr while making it 'monitored',when it is written by someone
- * else, the 'monitored' state is cleared and a event is generated
- * implicitly to exit WFE.
- */
-#define __LOAD_EXC_64(src, dst, memorder) { \
- if (memorder == __ATOMIC_RELAXED) { \
- asm volatile("ldxr %x[tmp], [%x[addr]]" \
- : [tmp] "=&r" (dst) \
- : [addr] "r"(src) \
- : "memory"); \
- } else { \
- asm volatile("ldaxr %x[tmp], [%x[addr]]" \
- : [tmp] "=&r" (dst) \
- : [addr] "r"(src) \
- : "memory"); \
- } }
-
- __LOAD_EXC_64(addr, value, memorder)
+ __RTE_ARM_LOAD_EXC_64(addr, value, memorder)
if (value != expected) {
- __SEVL()
+ __RTE_ARM_SEVL()
do {
- __WFE()
- __LOAD_EXC_64(addr, value, memorder)
+ __RTE_ARM_WFE()
+ __RTE_ARM_LOAD_EXC_64(addr, value, memorder)
} while (value != expected);
}
}
-#undef __LOAD_EXC_64
-#undef __SEVL
-#undef __WFE
+#define rte_wait_event(addr, mask, cond, expected, memorder) \
+do { \
+ RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder)); \
+ RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && \
+ memorder != __ATOMIC_RELAXED); \
+ uint32_t size = sizeof(*(addr)) << 3; \
+ typeof(*(addr)) expected_value = (expected); \
+ typeof(*(addr)) value = 0; \
+ __RTE_ARM_LOAD_EXC((addr), value, memorder, size) \
+ if ((value & (mask)) cond expected_value) { \
+ __RTE_ARM_SEVL() \
+ do { \
+ __RTE_ARM_WFE() \
+ __RTE_ARM_LOAD_EXC((addr), value, memorder, size) \
+ } while ((value & (mask)) cond expected_value); \
+ } \
+} while (0)
#endif
diff --git a/lib/eal/include/generic/rte_pause.h b/lib/eal/include/generic/rte_pause.h
index 668ee4a184..d0c5b5a415 100644
--- a/lib/eal/include/generic/rte_pause.h
+++ b/lib/eal/include/generic/rte_pause.h
@@ -111,6 +111,34 @@ rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
while (__atomic_load_n(addr, memorder) != expected)
rte_pause();
}
+
+/*
+ * Wait until *addr breaks the condition, with a relaxed memory
+ * ordering model meaning the loads around this API can be reordered.
+ *
+ * @param addr
+ * A pointer to the memory location.
+ * @param mask
+ * A mask of value bits in interest.
+ * @param cond
+ * A symbol representing the condition.
+ * @param expected
+ * An expected value to be in the memory location.
+ * @param memorder
+ * Two different memory orders that can be specified:
+ * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
+ * C++11 memory orders with the same names, see the C++11 standard or
+ * the GCC wiki on atomic synchronization for detailed definition.
+ */
+#define rte_wait_event(addr, mask, cond, expected, memorder) \
+do { \
+ RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder)); \
+ RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && \
+ memorder != __ATOMIC_RELAXED); \
+ typeof(*(addr)) expected_value = (expected); \
+ while ((__atomic_load_n((addr), (memorder)) & (mask)) cond expected_value) \
+ rte_pause(); \
+} while (0)
#endif
#endif /* _RTE_PAUSE_H_ */
--
2.25.1
More information about the dev
mailing list