[PATCH v4 03/14] eal: use barrier intrinsics
Konstantin Ananyev
konstantin.ananyev at huawei.com
Wed Apr 12 14:37:59 CEST 2023
> Inline assembly is not supported for MSVC x64 instead expand
> rte_compiler_barrier as _ReadWriteBarrier and for rte_smp_mb
> _m_mfence intrinsics.
>
> Signed-off-by: Tyler Retzlaff <roretzla at linux.microsoft.com>
> ---
> lib/eal/include/generic/rte_atomic.h | 4 ++++
> lib/eal/x86/include/rte_atomic.h | 5 ++++-
> 2 files changed, 8 insertions(+), 1 deletion(-)
>
> diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
> index 234b268..e973184 100644
> --- a/lib/eal/include/generic/rte_atomic.h
> +++ b/lib/eal/include/generic/rte_atomic.h
> @@ -116,9 +116,13 @@
> * Guarantees that operation reordering does not occur at compile time
> * for operations directly before and after the barrier.
> */
> +#ifndef RTE_TOOLCHAIN_MSVC
> #define rte_compiler_barrier() do { \
> asm volatile ("" : : : "memory"); \
> } while(0)
> +#else
> +#define rte_compiler_barrier() _ReadWriteBarrier()
> +#endif
>
> /**
> * Synchronization fence between threads based on the specified memory order.
> diff --git a/lib/eal/x86/include/rte_atomic.h b/lib/eal/x86/include/rte_atomic.h
> index f2ee1a9..ca733c5 100644
> --- a/lib/eal/x86/include/rte_atomic.h
> +++ b/lib/eal/x86/include/rte_atomic.h
> @@ -28,7 +28,6 @@
> #define rte_rmb() _mm_lfence()
>
> #define rte_smp_wmb() rte_compiler_barrier()
> -
> #define rte_smp_rmb() rte_compiler_barrier()
>
> /*
> @@ -66,11 +65,15 @@
> static __rte_always_inline void
> rte_smp_mb(void)
> {
> +#ifndef RTE_TOOLCHAIN_MSVC
> #ifdef RTE_ARCH_I686
> asm volatile("lock addl $0, -128(%%esp); " ::: "memory");
> #else
> asm volatile("lock addl $0, -128(%%rsp); " ::: "memory");
> #endif
> +#else
> + _mm_mfence();
> +#endif
> }
>
> #define rte_io_mb() rte_mb()
> --
Acked-by: Konstantin Ananyev <konstantin.ananyev at huawei.com>
> 1.8.3.1
More information about the dev
mailing list