[dpdk-dev] 回复: [PATCH v4 1/5] eal: add new definitions for wait scheme
Feifei Wang
Feifei.Wang2 at arm.com
Mon Oct 25 11:30:56 CEST 2021
> -----邮件原件-----
> 发件人: Jerin Jacob <jerinjacobk at gmail.com>
> 发送时间: Friday, October 22, 2021 8:10 AM
> 收件人: Feifei Wang <Feifei.Wang2 at arm.com>
> 抄送: Ruifeng Wang <Ruifeng.Wang at arm.com>; Ananyev, Konstantin
> <konstantin.ananyev at intel.com>; dpdk-dev <dev at dpdk.org>; nd
> <nd at arm.com>
> 主题: Re: [dpdk-dev] [PATCH v4 1/5] eal: add new definitions for wait scheme
>
> On Wed, Oct 20, 2021 at 2:16 PM Feifei Wang <feifei.wang2 at arm.com>
> wrote:
> >
> > Introduce macros as generic interface for address monitoring.
> >
> > Signed-off-by: Feifei Wang <feifei.wang2 at arm.com>
> > Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
> > ---
> > lib/eal/arm/include/rte_pause_64.h | 126
> > ++++++++++++++++------------ lib/eal/include/generic/rte_pause.h |
> > 32 +++++++
> > 2 files changed, 104 insertions(+), 54 deletions(-)
> >
> > diff --git a/lib/eal/arm/include/rte_pause_64.h
> > b/lib/eal/arm/include/rte_pause_64.h
> > index e87d10b8cc..23954c2de2 100644
> > --- a/lib/eal/arm/include/rte_pause_64.h
> > +++ b/lib/eal/arm/include/rte_pause_64.h
> > @@ -31,20 +31,12 @@ static inline void rte_pause(void)
> > /* Put processor into low power WFE(Wait For Event) state. */
> > #define __WFE() { asm volatile("wfe" : : : "memory"); }
> >
> > -static __rte_always_inline void
> > -rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > - int memorder)
> > -{
> > - uint16_t value;
> > -
> > - assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> __ATOMIC_RELAXED);
> > -
> > - /*
> > - * Atomic exclusive load from addr, it returns the 16-bit content of
> > - * *addr while making it 'monitored',when it is written by someone
> > - * else, the 'monitored' state is cleared and a event is generated
>
> a event -> an event in all the occurrence.
>
> > - * implicitly to exit WFE.
> > - */
> > +/*
> > + * Atomic exclusive load from addr, it returns the 16-bit content of
> > + * *addr while making it 'monitored', when it is written by someone
> > + * else, the 'monitored' state is cleared and a event is generated
> > + * implicitly to exit WFE.
> > + */
> > #define __LOAD_EXC_16(src, dst, memorder) { \
> > if (memorder == __ATOMIC_RELAXED) { \
> > asm volatile("ldxrh %w[tmp], [%x[addr]]" \ @@ -58,6
> > +50,52 @@ rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t
> expected,
> > : "memory"); \
> > } }
> >
> > +/*
> > + * Atomic exclusive load from addr, it returns the 32-bit content of
> > + * *addr while making it 'monitored', when it is written by someone
> > + * else, the 'monitored' state is cleared and a event is generated
> > + * implicitly to exit WFE.
> > + */
> > +#define __LOAD_EXC_32(src, dst, memorder) { \
> > + if (memorder == __ATOMIC_RELAXED) { \
> > + asm volatile("ldxr %w[tmp], [%x[addr]]" \
> > + : [tmp] "=&r" (dst) \
> > + : [addr] "r"(src) \
> > + : "memory"); \
> > + } else { \
> > + asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > + : [tmp] "=&r" (dst) \
> > + : [addr] "r"(src) \
> > + : "memory"); \
> > + } }
> > +
> > +/*
> > + * Atomic exclusive load from addr, it returns the 64-bit content of
> > + * *addr while making it 'monitored', when it is written by someone
> > + * else, the 'monitored' state is cleared and a event is generated
> > + * implicitly to exit WFE.
> > + */
> > +#define __LOAD_EXC_64(src, dst, memorder) { \
> > + if (memorder == __ATOMIC_RELAXED) { \
> > + asm volatile("ldxr %x[tmp], [%x[addr]]" \
> > + : [tmp] "=&r" (dst) \
> > + : [addr] "r"(src) \
> > + : "memory"); \
> > + } else { \
> > + asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > + : [tmp] "=&r" (dst) \
> > + : [addr] "r"(src) \
> > + : "memory"); \
> > + } }
> > +
> > +static __rte_always_inline void
> > +rte_wait_until_equal_16(volatile uint16_t *addr, uint16_t expected,
> > + int memorder)
> > +{
> > + uint16_t value;
> > +
> > + assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > + __ATOMIC_RELAXED);
> > +
> > __LOAD_EXC_16(addr, value, memorder)
> > if (value != expected) {
> > __SEVL()
> > @@ -66,7 +104,6 @@ rte_wait_until_equal_16(volatile uint16_t *addr,
> uint16_t expected,
> > __LOAD_EXC_16(addr, value, memorder)
> > } while (value != expected);
> > }
> > -#undef __LOAD_EXC_16
> > }
> >
> > static __rte_always_inline void
> > @@ -77,25 +114,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr,
> > uint32_t expected,
> >
> > assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > __ATOMIC_RELAXED);
> >
> > - /*
> > - * Atomic exclusive load from addr, it returns the 32-bit content of
> > - * *addr while making it 'monitored',when it is written by someone
> > - * else, the 'monitored' state is cleared and a event is generated
> > - * implicitly to exit WFE.
> > - */
> > -#define __LOAD_EXC_32(src, dst, memorder) { \
> > - if (memorder == __ATOMIC_RELAXED) { \
> > - asm volatile("ldxr %w[tmp], [%x[addr]]" \
> > - : [tmp] "=&r" (dst) \
> > - : [addr] "r"(src) \
> > - : "memory"); \
> > - } else { \
> > - asm volatile("ldaxr %w[tmp], [%x[addr]]" \
> > - : [tmp] "=&r" (dst) \
> > - : [addr] "r"(src) \
> > - : "memory"); \
> > - } }
> > -
> > __LOAD_EXC_32(addr, value, memorder)
> > if (value != expected) {
> > __SEVL()
> > @@ -104,7 +122,6 @@ rte_wait_until_equal_32(volatile uint32_t *addr,
> uint32_t expected,
> > __LOAD_EXC_32(addr, value, memorder)
> > } while (value != expected);
> > }
> > -#undef __LOAD_EXC_32
> > }
> >
> > static __rte_always_inline void
> > @@ -115,25 +132,6 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> > uint64_t expected,
> >
> > assert(memorder == __ATOMIC_ACQUIRE || memorder ==
> > __ATOMIC_RELAXED);
> >
> > - /*
> > - * Atomic exclusive load from addr, it returns the 64-bit content of
> > - * *addr while making it 'monitored',when it is written by someone
> > - * else, the 'monitored' state is cleared and a event is generated
> > - * implicitly to exit WFE.
> > - */
> > -#define __LOAD_EXC_64(src, dst, memorder) { \
> > - if (memorder == __ATOMIC_RELAXED) { \
> > - asm volatile("ldxr %x[tmp], [%x[addr]]" \
> > - : [tmp] "=&r" (dst) \
> > - : [addr] "r"(src) \
> > - : "memory"); \
> > - } else { \
> > - asm volatile("ldaxr %x[tmp], [%x[addr]]" \
> > - : [tmp] "=&r" (dst) \
> > - : [addr] "r"(src) \
> > - : "memory"); \
> > - } }
> > -
> > __LOAD_EXC_64(addr, value, memorder)
> > if (value != expected) {
> > __SEVL()
> > @@ -143,6 +141,26 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> uint64_t expected,
> > } while (value != expected);
> > }
> > }
> > +
> > +#define rte_wait_event(addr, mask, expected, cond, memorder, size) \
>
> I think it is better to swap "cond" and "expected" positions to get better
> readability.
Thanks for the comments, it is better than before and I will update in the next version.
>
> rte_wait_event(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK, 0, !=,
> __ATOMIC_RELAXED, 64);
>
> Vs
>
> rte_wait_event(&buf->bufptr64, RTE_DISTRIB_FLAGS_MASK, !=, 0,
> __ATOMIC_RELAXED, 64);
>
> > +do { \
>
> Any reason to not make an inline function instead of macro?
Because there were many new APIs for different cases. And then we refer to
Linux 'wait_event' code for an example. Please see the first version and its discussion:
http://patches.dpdk.org/project/dpdk/cover/20210902053253.3017858-1-feifei.wang2@arm.com/
>
> > + RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder)); \
>
> Should n't we add __builtin_constant_p(size) of check?
Please see the discussion with Konstantin.
'size' will not be as a parameter and then it is unnecessary to check it with build_bug.
>
> > + RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && \
> > + memorder != __ATOMIC_RELAXED); \
> > + RTE_BUILD_BUG_ON(size != 16 && size != 32 && size != 64); \
> > + uint##size_t value;
>
>
> \
> > + __LOAD_EXC_##size(addr, value, memorder) \
> > + if ((value & mask) cond expected) { \
> > + __SEVL() \
> > + do { \
> > + __WFE() \
> > + __LOAD_EXC_##size(addr, value, memorder) \
> > + } while ((value & mask) cond expected); \
> > + } \
> > +} while (0)
> > +
> > +#undef __LOAD_EXC_16
> > +#undef __LOAD_EXC_32
> > #undef __LOAD_EXC_64
> >
> > #undef __SEVL
> > diff --git a/lib/eal/include/generic/rte_pause.h
> > b/lib/eal/include/generic/rte_pause.h
> > index 668ee4a184..20a5d2a9fd 100644
> > --- a/lib/eal/include/generic/rte_pause.h
> > +++ b/lib/eal/include/generic/rte_pause.h
> > @@ -111,6 +111,38 @@ rte_wait_until_equal_64(volatile uint64_t *addr,
> uint64_t expected,
> > while (__atomic_load_n(addr, memorder) != expected)
> > rte_pause();
> > }
> > +
> > +/*
> > + * Wait until *addr breaks the condition, with a relaxed memory
> > + * ordering model meaning the loads around this API can be reordered.
> > + *
> > + * @param addr
> > + * A pointer to the memory location.
> > + * @param mask
> > + * A mask of value bits in interest.
> > + * @param expected
> > + * A 16-bit expected value to be in the memory location.
> > + * @param cond
> > + * A symbol representing the condition (==, !=).
> > + * @param memorder
> > + * Two different memory orders that can be specified:
> > + * __ATOMIC_ACQUIRE and __ATOMIC_RELAXED. These map to
> > + * C++11 memory orders with the same names, see the C++11 standard
> > +or
> > + * the GCC wiki on atomic synchronization for detailed definition.
> > + * @param size
> > + * The bit size of *addr:
> > + * It is used for arm architecture to choose load instructions,
> > + * and the optional value is 16, 32 and 64.
> > + */
> > +#define rte_wait_event(addr, mask, expected, cond, memorder, size) \
> > +do { \
> > + RTE_BUILD_BUG_ON(!__builtin_constant_p(memorder)); \
> > + RTE_BUILD_BUG_ON(memorder != __ATOMIC_ACQUIRE && \
> > + memorder != __ATOMIC_RELAXED); \
> > + RTE_BUILD_BUG_ON(size != 16 && size != 32 && size != 64); \
> > + while ((__atomic_load_n(addr, memorder) & mask) cond expected) \
> > + rte_pause(); \
> > +} while (0)
> > #endif
> >
> > #endif /* _RTE_PAUSE_H_ */
> > --
> > 2.25.1
> >
More information about the dev
mailing list