[PATCH 3/9] eal: use barrier intrinsics when compiling with msvc
Tyler Retzlaff
roretzla at linux.microsoft.com
Mon Apr 3 23:52:25 CEST 2023
Inline assembly is not supported for msvc x64 instead use
_{Read,Write,ReadWrite}Barrier() intrinsics.
Signed-off-by: Tyler Retzlaff <roretzla at linux.microsoft.com>
---
lib/eal/include/generic/rte_atomic.h | 4 ++++
lib/eal/x86/include/rte_atomic.h | 10 +++++++++-
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
index 234b268..e973184 100644
--- a/lib/eal/include/generic/rte_atomic.h
+++ b/lib/eal/include/generic/rte_atomic.h
@@ -116,9 +116,13 @@
* Guarantees that operation reordering does not occur at compile time
* for operations directly before and after the barrier.
*/
+#ifndef RTE_TOOLCHAIN_MSVC
#define rte_compiler_barrier() do { \
asm volatile ("" : : : "memory"); \
} while(0)
+#else
+#define rte_compiler_barrier() _ReadWriteBarrier()
+#endif
/**
* Synchronization fence between threads based on the specified memory order.
diff --git a/lib/eal/x86/include/rte_atomic.h b/lib/eal/x86/include/rte_atomic.h
index f2ee1a9..5cce9ba 100644
--- a/lib/eal/x86/include/rte_atomic.h
+++ b/lib/eal/x86/include/rte_atomic.h
@@ -27,9 +27,13 @@
#define rte_rmb() _mm_lfence()
+#ifndef RTE_TOOLCHAIN_MSVC
#define rte_smp_wmb() rte_compiler_barrier()
-
#define rte_smp_rmb() rte_compiler_barrier()
+#else
+#define rte_smp_wmb() _WriteBarrier()
+#define rte_smp_rmb() _ReadBarrier()
+#endif
/*
* From Intel Software Development Manual; Vol 3;
@@ -66,11 +70,15 @@
static __rte_always_inline void
rte_smp_mb(void)
{
+#ifndef RTE_TOOLCHAIN_MSVC
#ifdef RTE_ARCH_I686
asm volatile("lock addl $0, -128(%%esp); " ::: "memory");
#else
asm volatile("lock addl $0, -128(%%rsp); " ::: "memory");
#endif
+#else
+ rte_compiler_barrier();
+#endif
}
#define rte_io_mb() rte_mb()
--
1.8.3.1
More information about the dev
mailing list