[dpdk-dev] [PATCH v9 2/5] eal: add the APIs to wait until equal

Gavin Hu (Arm Technology China) Gavin.Hu at arm.com
Thu Oct 24 19:00:21 CEST 2019


Hi Konstantin,

> -----Original Message-----
> From: Ananyev, Konstantin <konstantin.ananyev at intel.com>
> Sent: Thursday, October 24, 2019 9:52 PM
> To: Gavin Hu (Arm Technology China) <Gavin.Hu at arm.com>;
> dev at dpdk.org
> Cc: nd <nd at arm.com>; david.marchand at redhat.com;
> thomas at monjalon.net; stephen at networkplumber.org;
> hemant.agrawal at nxp.com; jerinj at marvell.com;
> pbhagavatula at marvell.com; Honnappa Nagarahalli
> <Honnappa.Nagarahalli at arm.com>; Ruifeng Wang (Arm Technology China)
> <Ruifeng.Wang at arm.com>; Phil Yang (Arm Technology China)
> <Phil.Yang at arm.com>; Steve Capper <Steve.Capper at arm.com>
> Subject: RE: [PATCH v9 2/5] eal: add the APIs to wait until equal
> 
> Hi Gavin,
> 
> > The rte_wait_until_equal_xx APIs abstract the functionality of
> > 'polling for a memory location to become equal to a given value'.
> >
> > Add the RTE_ARM_USE_WFE configuration entry for aarch64, disabled
> > by default. When it is enabled, the above APIs will call WFE instruction
> > to save CPU cycles and power.
> >
> > From a VM, when calling this API on aarch64, it may trap in and out to
> > release vCPUs whereas cause high exit latency. Since kernel 4.18.20 an
> > adaptive trapping mechanism is introduced to balance the latency and
> > workload.
> >
> > Signed-off-by: Gavin Hu <gavin.hu at arm.com>
> > Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
> > Reviewed-by: Steve Capper <steve.capper at arm.com>
> > Reviewed-by: Ola Liljedahl <ola.liljedahl at arm.com>
> > Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli at arm.com>
> > Reviewed-by: Phil Yang <phil.yang at arm.com>
> > Acked-by: Pavan Nikhilesh <pbhagavatula at marvell.com>
> > Acked-by: Jerin Jacob <jerinj at marvell.com>
> > ---
> >  config/arm/meson.build                             |   1 +
> >  config/common_base                                 |   5 +
> >  .../common/include/arch/arm/rte_pause_64.h         |  70 +++++++
> >  lib/librte_eal/common/include/generic/rte_pause.h  | 217
> +++++++++++++++++++++
> >  4 files changed, 293 insertions(+)
> >
> > diff --git a/config/arm/meson.build b/config/arm/meson.build
> > index 979018e..b4b4cac 100644
> > --- a/config/arm/meson.build
> > +++ b/config/arm/meson.build
> > @@ -26,6 +26,7 @@ flags_common_default = [
> >  	['RTE_LIBRTE_AVP_PMD', false],
> >
> >  	['RTE_SCHED_VECTOR', false],
> > +	['RTE_ARM_USE_WFE', false],
> >  ]
> >
> >  flags_generic = [
> > diff --git a/config/common_base b/config/common_base
> > index e843a21..c812156 100644
> > --- a/config/common_base
> > +++ b/config/common_base
> > @@ -111,6 +111,11 @@ CONFIG_RTE_MAX_VFIO_CONTAINERS=64
> >  CONFIG_RTE_MALLOC_DEBUG=n
> >  CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
> >  CONFIG_RTE_USE_LIBBSD=n
> > +# Use WFE instructions to implement the rte_wait_for_equal_xxx APIs,
> > +# calling these APIs put the cores in low power state while waiting
> > +# for the memory address to become equal to the expected value.
> > +# This is supported only by aarch64.
> > +CONFIG_RTE_ARM_USE_WFE=n
> >
> >  #
> >  # Recognize/ignore the AVX/AVX512 CPU flags for performance/power
> testing.
> > diff --git a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > index 93895d3..7bc8efb 100644
> > --- a/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > +++ b/lib/librte_eal/common/include/arch/arm/rte_pause_64.h
> > @@ -1,5 +1,6 @@
> >  /* SPDX-License-Identifier: BSD-3-Clause
> >   * Copyright(c) 2017 Cavium, Inc
> > + * Copyright(c) 2019 Arm Limited
> >   */
> >
> >  #ifndef _RTE_PAUSE_ARM64_H_
> > @@ -17,6 +18,75 @@ static inline void rte_pause(void)
> >  	asm volatile("yield" ::: "memory");
> >  }
> >
> > +#ifdef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +static inline void rte_sevl(void)
> > +{
> > +	asm volatile("sevl" : : : "memory");
> > +}
> > +
> > +static inline void rte_wfe(void)
> > +{
> > +	asm volatile("wfe" : : : "memory");
> > +}
> > +
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> > +{
> > +	uint16_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxrh %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxrh %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> > +{
> > +	uint32_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxr %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxr %w[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> > +{
> > +	uint64_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	if (memorder == __ATOMIC_ACQUIRE)
> > +		asm volatile("ldaxr %x[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	else if (memorder == __ATOMIC_RELAXED)
> > +		asm volatile("ldxr %x[tmp], [%x[addr]]"
> > +			: [tmp] "=&r" (tmp)
> > +			: [addr] "r"(addr)
> > +			: "memory");
> > +	return tmp;
> > +}
> > +#endif
> > +
> 
> The function themselves seems good to me...
> But I think it was some misunderstanding about code layout/placement.
> I think arm specific functionsand defines  need to be defined in arm specific
> headers only.
> But we still can have one instance of rte_wait_until_equal_* for arm.
I will move that part to arm specific headers. 
/Gavin
> 
> To be more specific, I am talking about something like that here:
> 
> lib/librte_eal/common/include/generic/rte_pause.h:
> ...
> #ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> static __rte_always_inline void
> rte_wait_until_equal_32(volatile type * addr, type expected, int memorder)
> 							\
> {
> 	while (__atomic_load_n(addr, memorder) != expected) {
> 		rte_pause();					\
> 							\
> }
> ....
> #endif
> ...
> 
> lib/librte_eal/common/include/arch/arm/rte_pause_64.h:
> 
> ...
> #ifdef RTE_ARM_USE_WFE
> #define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> #endif
> #include "generic/rte_pause.h"
> 
> ...
> #ifdef RTE_ARM_USE_WFE
> static inline void rte_sevl(void)
> {
> 	asm volatile("sevl" : : : "memory");
> }
> static inline void rte_wfe(void)
> {
> 	asm volatile("wfe" : : : "memory");
> }
> #else
> static inline void rte_sevl(void)
> {
> }
> static inline void rte_wfe(void)
> {
> 	rte_pause();
> }
Should these arm specific APIs, including rte_load_ex_xxx APIs, be added the doxygen comments? 
These APIs are arm specific, not intended to expose, but they are in the public files(arm specific headers be considered public?) 
/Gavin
> ...
> 
> static __rte_always_inline void
> rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected, int
> memorder)
> {
> 	if (__atomic_load_ex_32(addr, memorder) != expected) {
> 		rte_sevl();
> 		do {
> 			rte_wfe();
> 		} while (__atomic_load_ex_32(addr, memorder) !=
> expected);
> 	}
> }
> 
> #endif
> 
> 
> >  #ifdef __cplusplus
> >  }
> >  #endif
> > diff --git a/lib/librte_eal/common/include/generic/rte_pause.h
> b/lib/librte_eal/common/include/generic/rte_pause.h
> > index 52bd4db..4db44f9 100644
> > --- a/lib/librte_eal/common/include/generic/rte_pause.h
> > +++ b/lib/librte_eal/common/include/generic/rte_pause.h
> > @@ -1,5 +1,6 @@
> >  /* SPDX-License-Identifier: BSD-3-Clause
> >   * Copyright(c) 2017 Cavium, Inc
> > + * Copyright(c) 2019 Arm Limited
> >   */
> >
> >  #ifndef _RTE_PAUSE_H_
> > @@ -12,6 +13,12 @@
> >   *
> >   */
> >
> > +#include <stdint.h>
> > +#include <rte_common.h>
> > +#include <rte_atomic.h>
> > +#include <rte_compat.h>
> > +#include <assert.h>
> > +
> >  /**
> >   * Pause CPU execution for a short while
> >   *
> > @@ -20,4 +27,214 @@
> >   */
> >  static inline void rte_pause(void);
> >
> > +static inline void rte_sevl(void);
> > +static inline void rte_wfe(void);
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Atomic load from addr, it returns the 16-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Atomic load from addr, it returns the 32-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Atomic load from addr, it returns the 64-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Wait for *addr to be updated with a 16-bit expected value, with a
> relaxed
> > + * memory ordering model meaning the loads around this API can be
> reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 16-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard
> or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > +int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Wait for *addr to be updated with a 32-bit expected value, with a
> relaxed
> > + * memory ordering model meaning the loads around this API can be
> reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 32-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard
> or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> > +int memorder);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Wait for *addr to be updated with a 64-bit expected value, with a
> relaxed
> > + * memory ordering model meaning the loads around this API can be
> reordered.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param expected
> > + *  A 64-bit expected value to be in the memory location.
> > + * @param memorder
> > + *  Two different memory orders that can be specified:
> > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + *  C++11 memory orders with the same names, see the C++11 standard
> or
> > + *  the GCC wiki on atomic synchronization for detailed definition.
> > + */
> > +__rte_experimental
> > +static __rte_always_inline void
> > +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
> > +int memorder);
> > +
> > +#ifdef RTE_ARM_USE_WFE
> > +#define RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +#endif
> > +
> > +#ifndef RTE_WAIT_UNTIL_EQUAL_ARCH_DEFINED
> > +static inline void rte_sevl(void)
> > +{
> > +}
> > +
> > +static inline void rte_wfe(void)
> > +{
> > +	rte_pause();
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change, or be removed, without prior
> notice
> > + *
> > + * Atomic load from addr, it returns the 16-bit content of *addr.
> > + *
> > + * @param addr
> > + *  A pointer to the memory location.
> > + * @param memorder
> > + *  The valid memory order variants are __ATOMIC_ACQUIRE and
> __ATOMIC_RELAXED.
> > + *  These map to C++11 memory orders with the same names, see the
> C++11 standard
> > + *  the GCC wiki on atomic synchronization for detailed definitions.
> > + */
> > +static __rte_always_inline uint16_t
> > +__atomic_load_ex_16(volatile uint16_t *addr, int memorder)
> > +{
> > +	uint16_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint32_t
> > +__atomic_load_ex_32(volatile uint32_t *addr, int memorder)
> > +{
> > +	uint32_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline uint64_t
> > +__atomic_load_ex_64(volatile uint64_t *addr, int memorder)
> > +{
> > +	uint64_t tmp;
> > +	assert((memorder == __ATOMIC_ACQUIRE)
> > +			|| (memorder == __ATOMIC_RELAXED));
> > +	tmp = __atomic_load_n(addr, memorder);
> > +	return tmp;
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_n(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_16(addr, memorder) !=
> expected);
> > +	}
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_32(volatile uint32_t *addr, uint32_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_ex_32(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_32(addr, memorder) !=
> expected);
> > +	}
> > +}
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_64(volatile uint64_t *addr, uint64_t expected,
> > +int memorder)
> > +{
> > +	if (__atomic_load_ex_64(addr, memorder) != expected) {
> > +		rte_sevl();
> > +		do {
> > +			rte_wfe();
> > +		} while (__atomic_load_ex_64(addr, memorder) !=
> expected);
> > +	}
> > +}
> > +#endif
> > +
> >  #endif /* _RTE_PAUSE_H_ */
> > --
> > 2.7.4



More information about the dev mailing list