[dpdk-dev] 回复: [PATCH v4 1/5] eal: add new definitions for wait scheme

Feifei Wang Feifei.Wang2 at arm.com
Tue Oct 26 03:08:51 CEST 2021



> -----邮件原件-----
> 发件人: Ananyev, Konstantin <konstantin.ananyev at intel.com>
> 发送时间: Monday, October 25, 2021 10:29 PM
> 收件人: Feifei Wang <Feifei.Wang2 at arm.com>; Ruifeng Wang
> <Ruifeng.Wang at arm.com>
> 抄送: dev at dpdk.org; nd <nd at arm.com>; nd <nd at arm.com>
> 主题: RE: [PATCH v4 1/5] eal: add new definitions for wait scheme
> 
> 
> > > > Introduce macros as generic interface for address monitoring.
> > > >
> > > > Signed-off-by: Feifei Wang <feifei.wang2 at arm.com>
> > > > Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
> > > > ---
> > > >  lib/eal/arm/include/rte_pause_64.h  | 126
> > > > ++++++++++++++++------------  lib/eal/include/generic/rte_pause.h
> > > > ++++++++++++++++|
> > > > 32 +++++++
> > > >  2 files changed, 104 insertions(+), 54 deletions(-)
> > > >
> > > > diff --git a/lib/eal/arm/include/rte_pause_64.h
> > > > b/lib/eal/arm/include/rte_pause_64.h
> > > > index e87d10b8cc..23954c2de2 100644
> > > > --- a/lib/eal/arm/include/rte_pause_64.h
> > > > +++ b/lib/eal/arm/include/rte_pause_64.h
> > > > @@ -31,20 +31,12 @@ static inline void rte_pause(void)
> > > >  /* Put processor into low power WFE(Wait For Event) state. */
> > > > #define __WFE() { asm volatile("wfe" : : : "memory"); }
> > > >
> > > > -static __rte_always_inline void
> > > > -rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > > > -		int memorder)
> > > > -{
> > > > -	uint16_t value;
> > > > -
> > > > -	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > __ATOMIC_RELAXED);
> > > > -
> > > > -	/*
> > > > -	 * Atomic exclusive load from addr, it returns the 16-bit content of
> > > > -	 * *addr while making it 'monitored',when it is written by someone
> > > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > > -	 * implicitly to exit WFE.
> > > > -	 */
> > > > +/*
> > > > + * Atomic exclusive load from addr, it returns the 16-bit content
> > > > +of
> > > > + * *addr while making it 'monitored', when it is written by
> > > > +someone
> > > > + * else, the 'monitored' state is cleared and a event is
> > > > +generated
> > > > + * implicitly to exit WFE.
> > > > + */
> > > >  #define __LOAD_EXC_16(src, dst, memorder) {               \
> > > >  	if (memorder == __ATOMIC_RELAXED) {               \
> > > >  		asm volatile("ldxrh %w[tmp], [%x[addr]]"  \ @@ -58,6 +50,52
> > > @@
> > > > rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > > >  			: "memory");                      \
> > > >  	} }
> > > >
> > > > +/*
> > > > + * Atomic exclusive load from addr, it returns the 32-bit content
> > > > +of
> > > > + * *addr while making it 'monitored', when it is written by
> > > > +someone
> > > > + * else, the 'monitored' state is cleared and a event is
> > > > +generated
> > > > + * implicitly to exit WFE.
> > > > + */
> > > > +#define __LOAD_EXC_32(src, dst, memorder) {              \
> > > > +	if (memorder == __ATOMIC_RELAXED) {              \
> > > > +		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> > > > +			: [tmp] "=&r" (dst)              \
> > > > +			: [addr] "r"(src)                \
> > > > +			: "memory");                     \
> > > > +	} else {                                         \
> > > > +		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > > > +			: [tmp] "=&r" (dst)              \
> > > > +			: [addr] "r"(src)                \
> > > > +			: "memory");                     \
> > > > +	} }
> > > > +
> > > > +/*
> > > > + * Atomic exclusive load from addr, it returns the 64-bit content
> > > > +of
> > > > + * *addr while making it 'monitored', when it is written by
> > > > +someone
> > > > + * else, the 'monitored' state is cleared and a event is
> > > > +generated
> > > > + * implicitly to exit WFE.
> > > > + */
> > > > +#define __LOAD_EXC_64(src, dst, memorder) {              \
> > > > +	if (memorder == __ATOMIC_RELAXED) {              \
> > > > +		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> > > > +			: [tmp] "=&r" (dst)              \
> > > > +			: [addr] "r"(src)                \
> > > > +			: "memory");                     \
> > > > +	} else {                                         \
> > > > +		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > > > +			: [tmp] "=&r" (dst)              \
> > > > +			: [addr] "r"(src)                \
> > > > +			: "memory");                     \
> > > > +	} }
> > > > +
> > > > +static __rte_always_inline void
> > > > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > > > +		int memorder)
> > > > +{
> > > > +	uint16_t value;
> > > > +
> > > > +	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > > +__ATOMIC_RELAXED);
> > > > +
> > > >  	__LOAD_EXC_16(addr, value, memorder)
> > > >  	if (value != expected) {
> > > >  		__SEVL()
> > > > @@ -66,7 +104,6 @@ rte_wait_until_equal_16(volatile uint16_t
> > > > *addr,
> > > uint16_t expected,
> > > >  			__LOAD_EXC_16(addr, value, memorder)
> > > >  		} while (value != expected);
> > > >  	}
> > > > -#undef __LOAD_EXC_16
> > > >  }
> > > >
> > > >  static __rte_always_inline void
> > > > @@ -77,25 +114,6 @@ rte_wait_until_equal_32(volatile uint32_t
> > > > *addr, uint32_t expected,
> > > >
> > > >  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > > __ATOMIC_RELAXED);
> > > >
> > > > -	/*
> > > > -	 * Atomic exclusive load from addr, it returns the 32-bit content of
> > > > -	 * *addr while making it 'monitored',when it is written by someone
> > > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > > -	 * implicitly to exit WFE.
> > > > -	 */
> > > > -#define __LOAD_EXC_32(src, dst, memorder) {              \
> > > > -	if (memorder == __ATOMIC_RELAXED) {              \
> > > > -		asm volatile("ldxr %w[tmp], [%x[addr]]"  \
> > > > -			: [tmp] "=&r" (dst)              \
> > > > -			: [addr] "r"(src)                \
> > > > -			: "memory");                     \
> > > > -	} else {                                         \
> > > > -		asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > > > -			: [tmp] "=&r" (dst)              \
> > > > -			: [addr] "r"(src)                \
> > > > -			: "memory");                     \
> > > > -	} }
> > > > -
> > > >  	__LOAD_EXC_32(addr, value, memorder)
> > > >  	if (value != expected) {
> > > >  		__SEVL()
> > > > @@ -104,7 +122,6 @@ rte_wait_until_equal_32(volatile uint32_t
> > > > *addr,
> > > uint32_t expected,
> > > >  			__LOAD_EXC_32(addr, value, memorder)
> > > >  		} while (value != expected);
> > > >  	}
> > > > -#undef __LOAD_EXC_32
> > > >  }
> > > >
> > > >  static __rte_always_inline void
> > > > @@ -115,25 +132,6 @@ rte_wait_until_equal_64(volatile uint64_t
> > > > *addr, uint64_t expected,
> > > >
> > > >  	assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > > > __ATOMIC_RELAXED);
> > > >
> > > > -	/*
> > > > -	 * Atomic exclusive load from addr, it returns the 64-bit content of
> > > > -	 * *addr while making it 'monitored',when it is written by someone
> > > > -	 * else, the 'monitored' state is cleared and a event is generated
> > > > -	 * implicitly to exit WFE.
> > > > -	 */
> > > > -#define __LOAD_EXC_64(src, dst, memorder) {              \
> > > > -	if (memorder == __ATOMIC_RELAXED) {              \
> > > > -		asm volatile("ldxr %x[tmp], [%x[addr]]"  \
> > > > -			: [tmp] "=&r" (dst)              \
> > > > -			: [addr] "r"(src)                \
> > > > -			: "memory");                     \
> > > > -	} else {                                         \
> > > > -		asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > > > -			: [tmp] "=&r" (dst)              \
> > > > -			: [addr] "r"(src)                \
> > > > -			: "memory");                     \
> > > > -	} }
> > > > -
> > > >  	__LOAD_EXC_64(addr, value, memorder)
> > > >  	if (value != expected) {
> > > >  		__SEVL()
> > > > @@ -143,6 +141,26 @@ rte_wait_until_equal_64(volatile uint64_t
> > > > *addr,
> > > uint64_t expected,
> > > >  		} while (value != expected);
> > > >  	}
> > > >  }
> > > > +
> > > > +#define rte_wait_event(addr, mask, expected, cond, memorder, size) \
> > > > +do {                                                               \
> > > > +	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));         \
> > > > +	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&           \
> > > > +	memorder != __ATOMIC_RELAXED);                             \
> > > > +	RTE_BUILD_BUG_ON(size != 16 && size != 32 && size != 64);  \
> > > > +	uint##size_t value;                                        \
> > > > +	__LOAD_EXC_##size(addr, value, memorder)                   \
> > > > +	if ((value & mask) cond expected) {		           \
> > > > +		__SEVL()                                           \
> > > > +		do {                                               \
> > > > +			__WFE()                                    \
> > > > +			__LOAD_EXC_##size(addr, value, memorder)   \
> > > > +		} while ((value & mask) cond expected);            \
> > > > +	}                                                          \
> > > > +} while (0)
> > > > +
> > > > +#undef __LOAD_EXC_16
> > > > +#undef __LOAD_EXC_32
> > > >  #undef __LOAD_EXC_64
> > > >
> > > >  #undef __SEVL
> > > > diff --git a/lib/eal/include/generic/rte_pause.h
> > > > b/lib/eal/include/generic/rte_pause.h
> > > > index 668ee4a184..20a5d2a9fd 100644
> > > > --- a/lib/eal/include/generic/rte_pause.h
> > > > +++ b/lib/eal/include/generic/rte_pause.h
> > > > @@ -111,6 +111,38 @@ rte_wait_until_equal_64(volatile uint64_t
> > > > *addr,
> > > uint64_t expected,
> > > >  	while (__atomic_load_n(addr, memorder) != expected)
> > > >  		rte_pause();
> > > >  }
> > > > +
> > > > +/*
> > > > + * Wait until *addr breaks the condition, with a relaxed memory
> > > > + * ordering model meaning the loads around this API can be reordered.
> > > > + *
> > > > + * @param addr
> > > > + *  A pointer to the memory location.
> > > > + * @param mask
> > > > + *  A mask of value bits in interest.
> > > > + * @param expected
> > > > + *  A 16-bit expected value to be in the memory location.
> > > > + * @param cond
> > > > + *  A symbol representing the condition (==, !=).
> > > > + * @param memorder
> > > > + *  Two different memory orders that can be specified:
> > > > + *  __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > > > + *  C++11 memory orders with the same names, see the C++11
> > > > +standard or
> > > > + *  the GCC wiki on atomic synchronization for detailed definition.
> > > > + * @param size
> > > > + * The bit size of *addr:
> > > > + * It is used for arm architecture to choose load instructions,
> > > > + * and the optional value is 16, 32 and 64.
> > > > + */
> > > > +#define rte_wait_event(addr, mask, expected, cond, memorder, size)
> \
> > > > +do {                                                                   \
> > > > +	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));             \
> > > > +	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&               \
> > > > +				memorder != __ATOMIC_RELAXED);         \
> > > > +	RTE_BUILD_BUG_ON(size != 16 && size != 32 && size != 64);      \
> > >
> > > I don't' really understand why you do need 'size' passed as parameter.
> > > Can't it be:
> > > size_t size = sizeof(*(addr));
> > > And then:
> > > RTE_BUILD_BUG_ON(size != sizeof(uint16_t) && size !=
> > > sizeof(uint32_t) && size != sizeof(uint64_t)); ?
> > >
> > > > +	while ((__atomic_load_n(addr, memorder) & mask) cond expected) \
> > > > +		rte_pause();                                           \
> > >
> > > Just to repeat my own comment from previous version review:
> > > put () around macro parameters in the macro body.
> > > Will save from a lot of unexpected troubles.
> >
> > Sorry I didn't catch the point.
> > In this version, I firstly want to use '__LOAD_EXC_##size' to choose ,
> > so I use size as a parameter.  And in the next version, I will update
> > this as:
> >
> > #define __LOAD_EXC(src, dst, memorder, size) {    \
> > 	if (size == 16)                               \
> > 		__LOAD_EXC_16(src, dst, memorder)     \
> > 	else if (size == 32)                          \
> > 		__LOAD_EXC_32(src, dst, memorder)     \
> > 	else if (size == 64)                          \
> > 		__LOAD_EXC_64(src, dst, memorder)     \
> > }
> >
> > #define rte_wait_event(addr, mask, cond, expected, memorder)    \
> > do {                                                            \
> > 	RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder));      \
> > 	RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE &&        \
> > 				memorder != __ATOMIC_RELAXED);  \
> > 	uint32_t size = sizeof(*addr) << 3;                     \
> > 	typeof(*addr) value = 0;                                \
> > 	__LOAD_EXC(addr, value, memorder, size)                 \
> > 	if ((value & mask) cond expected) {                     \
> > 		__SEVL()                                        \
> > 		do {                                            \
> > 			__WFE()                                 \
> > 			__LOAD_EXC(addr, value, memorder, size) \
> > 		} while ((value & mask) cond expected);         \
> > 	}                                                       \
> > } while (0)
> 
> Sorry, I probably wasn't clear enough.
> I meant use '(' ')' around  macro arguments (to avoid un-predicted side-effects
> with operands associativity):
> uint32_t size = sizeof(*(addr)) ...;
> ...
> if ((value & (mask)) cond (expected))
> ...
That's Ok. So in the next version, I will change the following:
1. size will not be as a parameter
2. I will add '()' around macro arguments
> 
> 
> 
> 
> 



More information about the dev mailing list