[PATCH v2 5/5] eal: extend bitops to handle volatile pointers
Mattias Rönnblom
hofors at lysator.liu.se
Mon Aug 12 14:28:28 CEST 2024
On 2024-08-12 13:22, Jack Bond-Preston wrote:
> On 09/08/2024 10:58, Mattias Rönnblom wrote:
>> <snip>
>> +#define __RTE_GEN_BIT_ATOMIC_TEST(v, qualifier, size) \
>> __rte_experimental \
>> static inline bool \
>> - __rte_bit_atomic_test ## size(const uint ## size ## _t *addr, \
>> - unsigned int nr, int memory_order) \
>> + __rte_bit_atomic_ ## v ## test ## size(const qualifier uint ##
>> size ## _t *addr, \
>> + unsigned int nr, int memory_order) \
>> { \
>> RTE_ASSERT(nr < size); \
>> \
>> - const RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> - (const RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> + const qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> + (const qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
>> return rte_atomic_load_explicit(a_addr, memory_order) & mask; \
>> }
>> -#define __RTE_GEN_BIT_ATOMIC_SET(size) \
>> +#define __RTE_GEN_BIT_ATOMIC_SET(v, qualifier, size) \
>> __rte_experimental \
>> static inline void \
>> - __rte_bit_atomic_set ## size(uint ## size ## _t *addr, \
>> - unsigned int nr, int memory_order) \
>> + __rte_bit_atomic_ ## v ## set ## size(qualifier uint ## size ##
>> _t *addr, \
>> + unsigned int nr, int memory_order) \
>> { \
>> RTE_ASSERT(nr < size); \
>> \
>> - RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> - (RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
>> rte_atomic_fetch_or_explicit(a_addr, mask, memory_order); \
>> }
>> -#define __RTE_GEN_BIT_ATOMIC_CLEAR(size) \
>> +#define __RTE_GEN_BIT_ATOMIC_CLEAR(v, qualifier, size) \
>> __rte_experimental \
>> static inline void \
>> - __rte_bit_atomic_clear ## size(uint ## size ## _t *addr, \
>> - unsigned int nr, int memory_order) \
>> + __rte_bit_atomic_ ## v ## clear ## size(qualifier uint ## size ##
>> _t *addr, \
>> + unsigned int nr, int memory_order) \
>> { \
>> RTE_ASSERT(nr < size); \
>> \
>> - RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> - (RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
>> rte_atomic_fetch_and_explicit(a_addr, ~mask, memory_order); \
>> }
>> -#define __RTE_GEN_BIT_ATOMIC_FLIP(size) \
>> +#define __RTE_GEN_BIT_ATOMIC_FLIP(v, qualifier, size) \
>> __rte_experimental \
>> static inline void \
>> - __rte_bit_atomic_flip ## size(uint ## size ## _t *addr, \
>> - unsigned int nr, int memory_order) \
>> + __rte_bit_atomic_ ## v ## flip ## size(qualifier uint ## size ##
>> _t *addr, \
>> + unsigned int nr, int memory_order) \
>> { \
>> RTE_ASSERT(nr < size); \
>> \
>> - RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> - (RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
>> rte_atomic_fetch_xor_explicit(a_addr, mask, memory_order); \
>> }
>> -#define __RTE_GEN_BIT_ATOMIC_ASSIGN(size) \
>> +#define __RTE_GEN_BIT_ATOMIC_ASSIGN(v, qualifier, size) \
>> __rte_experimental \
>> static inline void \
>> - __rte_bit_atomic_assign ## size(uint ## size ## _t *addr, \
>> - unsigned int nr, bool value, \
>> - int memory_order) \
>> + __rte_bit_atomic_## v ## assign ## size(qualifier uint ## size ##
>> _t *addr, \
>> + unsigned int nr, bool value, \
>> + int memory_order) \
>> { \
>> if (value) \
>> - __rte_bit_atomic_set ## size(addr, nr, memory_order); \
>> + __rte_bit_atomic_ ## v ## set ## size(addr, nr,
>> memory_order); \
>> else \
>> - __rte_bit_atomic_clear ## size(addr, nr, \
>> - memory_order); \
>> + __rte_bit_atomic_ ## v ## clear ## size(addr, nr, \
>> + memory_order); \
>> }
>> -#define __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(size) \
>> +#define __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(v, qualifier, size) \
>> __rte_experimental \
>> static inline bool \
>> - __rte_bit_atomic_test_and_set ## size(uint ## size ## _t *addr, \
>> - unsigned int nr, \
>> - int memory_order) \
>> + __rte_bit_atomic_ ## v ## test_and_set ## size(qualifier uint ##
>> size ## _t *addr, \
>> + unsigned int nr, \
>> + int memory_order) \
>> { \
>> RTE_ASSERT(nr < size); \
>> \
>> - RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> - (RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
>> uint ## size ## _t prev; \
>> \
>> @@ -587,17 +632,17 @@ __RTE_GEN_BIT_FLIP(, flip,, 64)
>> return prev & mask; \
>> }
>> -#define __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(size) \
>> +#define __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(v, qualifier, size) \
>> __rte_experimental \
>> static inline bool \
>> - __rte_bit_atomic_test_and_clear ## size(uint ## size ## _t *addr, \
>> - unsigned int nr, \
>> - int memory_order) \
>> + __rte_bit_atomic_ ## v ## test_and_clear ## size(qualifier uint
>> ## size ## _t *addr, \
>> + unsigned int nr, \
>> + int memory_order) \
>> { \
>> RTE_ASSERT(nr < size); \
>> \
>> - RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> - (RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> + qualifier RTE_ATOMIC(uint ## size ## _t) *a_addr = \
>> + (qualifier RTE_ATOMIC(uint ## size ## _t) *)addr; \
>> uint ## size ## _t mask = (uint ## size ## _t)1 << nr; \
>> uint ## size ## _t prev; \
>> \
>> @@ -607,34 +652,36 @@ __RTE_GEN_BIT_FLIP(, flip,, 64)
>> return prev & mask; \
>> }
>> -#define __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(size) \
>> +#define __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(v, qualifier, size) \
>> __rte_experimental \
>> static inline bool \
>> - __rte_bit_atomic_test_and_assign ## size(uint ## size ## _t *addr, \
>> - unsigned int nr, \
>> - bool value, \
>> - int memory_order) \
>> + __rte_bit_atomic_ ## v ## test_and_assign ## size(qualifier uint
>> ## size ## _t *addr, \
>> + unsigned int nr, \
>> + bool value, \
>> + int memory_order) \
>> { \
>> if (value) \
>> - return __rte_bit_atomic_test_and_set ## size(addr, nr, \
>> - memory_order); \
>> + return __rte_bit_atomic_ ## v ## test_and_set ##
>> size(addr, nr, memory_order); \
>> else \
>> - return __rte_bit_atomic_test_and_clear ## size(addr, nr, \
>> - memory_order); \
>> + return __rte_bit_atomic_ ## v ## test_and_clear ##
>> size(addr, nr, memory_order); \
>> }
>> -#define __RTE_GEN_BIT_ATOMIC_OPS(size) \
>> - __RTE_GEN_BIT_ATOMIC_TEST(size) \
>> - __RTE_GEN_BIT_ATOMIC_SET(size) \
>> - __RTE_GEN_BIT_ATOMIC_CLEAR(size) \
>> - __RTE_GEN_BIT_ATOMIC_ASSIGN(size) \
>> - __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(size) \
>> - __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(size) \
>> - __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(size) \
>> - __RTE_GEN_BIT_ATOMIC_FLIP(size)
>> +#define __RTE_GEN_BIT_ATOMIC_OPS(v, qualifier, size) \
>> + __RTE_GEN_BIT_ATOMIC_TEST(v, qualifier, size) \
>> + __RTE_GEN_BIT_ATOMIC_SET(v, qualifier, size) \
>> + __RTE_GEN_BIT_ATOMIC_CLEAR(v, qualifier, size) \
>> + __RTE_GEN_BIT_ATOMIC_ASSIGN(v, qualifier, size) \
>> + __RTE_GEN_BIT_ATOMIC_TEST_AND_SET(v, qualifier, size) \
>> + __RTE_GEN_BIT_ATOMIC_TEST_AND_CLEAR(v, qualifier, size) \
>> + __RTE_GEN_BIT_ATOMIC_TEST_AND_ASSIGN(v, qualifier, size) \
>> + __RTE_GEN_BIT_ATOMIC_FLIP(v, qualifier, size)
>> -__RTE_GEN_BIT_ATOMIC_OPS(32)
>> -__RTE_GEN_BIT_ATOMIC_OPS(64)
>> +#define __RTE_GEN_BIT_ATOMIC_OPS_SIZE(size) \
>> + __RTE_GEN_BIT_ATOMIC_OPS(,, size) \
>> + __RTE_GEN_BIT_ATOMIC_OPS(v_, volatile, size)
>> +
>> +__RTE_GEN_BIT_ATOMIC_OPS_SIZE(32)
>> +__RTE_GEN_BIT_ATOMIC_OPS_SIZE(64)
>
> The first argument for these should probably be called "family", for
> consistency with the non-atomic ops.
>
The family is "atomic" or "" (for the non-atomic version, so it's not a
good name.
I'll rename the macro parameters in __RTE_GEN_BIT_TEST() instead.
'qualifier' should be 'c', or maybe const_qualifier or const_qual to be
more descriptive. The names should be consistent with the overload macros.
>> /*------------------------ 32-bit relaxed operations
>> ------------------------*/
>> <snip>
More information about the dev
mailing list