[dpdk-dev] [PATCH v2 1/3] eal/arm64: add 128-bit atomic compare exchange

Jerin Jacob Kollanukkaran jerinj at marvell.com
Mon Jun 24 08:41:08 CEST 2019


> -----Original Message-----
> From: Phil Yang <phil.yang at arm.com>
> Sent: Sunday, June 23, 2019 8:46 AM
> To: dev at dpdk.org
> Cc: thomas at monjalon.net; Jerin Jacob Kollanukkaran <jerinj at marvell.com>;
> hemant.agrawal at nxp.com; Honnappa.Nagarahalli at arm.com;
> gavin.hu at arm.com; nd at arm.com; gage.eads at intel.com
> Subject: [EXT] [PATCH v2 1/3] eal/arm64: add 128-bit atomic compare
> exchange
> 
> Add 128-bit atomic compare exchange on aarch64.
> 
> Signed-off-by: Phil Yang <phil.yang at arm.com>
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli at arm.com>
> Tested-by: Honnappa Nagarahalli <honnappa.nagarahalli at arm.com>
> ---
> This patch depends on 'eal/stack: fix 'pointer-sign' warning'
> http://patchwork.dpdk.org/patch/54840/
> 
> +
> +#ifdef __ARM_FEATURE_ATOMICS
> +static inline rte_int128_t
> +__rte_casp(rte_int128_t *dst, rte_int128_t old, rte_int128_t updated,
> +int mo) {

Better to change to "const int mo".

> +
> +	/* caspX instructions register pair must start from even-numbered
> +	 * register at operand 1.
> +	 * So, specify registers for local variables here.
> +	 */
> +	register uint64_t x0 __asm("x0") = (uint64_t)old.val[0];
> +	register uint64_t x1 __asm("x1") = (uint64_t)old.val[1];
> +	register uint64_t x2 __asm("x2") = (uint64_t)updated.val[0];
> +	register uint64_t x3 __asm("x3") = (uint64_t)updated.val[1];
> +
> +	if (mo ==  __ATOMIC_RELAXED) {
> +		asm volatile(
> +				"casp %[old0], %[old1], %[upd0], %[upd1],
> [%[dst]]"
> +				: [old0] "+r" (x0),
> +				  [old1] "+r" (x1)
> +				: [upd0] "r" (x2),
> +				  [upd1] "r" (x3),
> +				  [dst] "r" (dst)
> +				: "memory");
> +	} else if (mo == __ATOMIC_ACQUIRE) {
> +		asm volatile(
> +				"caspa %[old0], %[old1], %[upd0], %[upd1],
> [%[dst]]"
> +				: [old0] "+r" (x0),
> +				  [old1] "+r" (x1)
> +				: [upd0] "r" (x2),
> +				  [upd1] "r" (x3),
> +				  [dst] "r" (dst)
> +				: "memory");
> +	} else if (mo == __ATOMIC_ACQ_REL) {
> +		asm volatile(
> +				"caspal %[old0], %[old1], %[upd0], %[upd1],
> [%[dst]]"
> +				: [old0] "+r" (x0),
> +				  [old1] "+r" (x1)
> +				: [upd0] "r" (x2),
> +				  [upd1] "r" (x3),
> +				  [dst] "r" (dst)
> +				: "memory");
> +	} else if (mo == __ATOMIC_RELEASE) {
> +		asm volatile(
> +				"caspl %[old0], %[old1], %[upd0], %[upd1],
> [%[dst]]"
> +				: [old0] "+r" (x0),
> +				  [old1] "+r" (x1)
> +				: [upd0] "r" (x2),
> +				  [upd1] "r" (x3),
> +				  [dst] "r" (dst)
> +				: "memory");

I think, This duplication code can be avoid with macro and casp/capsa/casal/caspl as argument.

> +	} else {
> +		rte_panic("Invalid memory order\n");


rte_panic should be removed from library. In this case, I think, invalid mo can go for strongest barrier.

> +	}
> +
> +	old.val[0] = x0;
> +	old.val[1] = x1;
> +
> +	return old;
> +}
> +#else
> +static inline rte_int128_t
> +__rte_ldx128(const rte_int128_t *src, int mo) {
> +	rte_int128_t ret;
> +	if (mo == __ATOMIC_ACQUIRE)
> +		asm volatile(
> +				"ldaxp %0, %1, %2"
> +				: "=&r" (ret.val[0]),
> +				  "=&r" (ret.val[1])
> +				: "Q" (src->val[0])
> +				: "memory");
> +	else if (mo == __ATOMIC_RELAXED)
> +		asm volatile(
> +				"ldxp %0, %1, %2"
> +				: "=&r" (ret.val[0]),
> +				  "=&r" (ret.val[1])
> +				: "Q" (src->val[0])
> +				: "memory");

Same as above comment.

> +	else
> +		rte_panic("Invalid memory order\n");

Same as above comment.

> +
> +	return ret;
> +}
> +
> +static inline uint32_t
> +__rte_stx128(rte_int128_t *dst, const rte_int128_t src, int mo) {
> +	uint32_t ret;
> +	if (mo == __ATOMIC_RELEASE)
> +		asm volatile(
> +				"stlxp %w0, %1, %2, %3"
> +				: "=&r" (ret)
> +				: "r" (src.val[0]),
> +				  "r" (src.val[1]),
> +				  "Q" (dst->val[0])
> +				: "memory");
> +	else if (mo == __ATOMIC_RELAXED)
> +		asm volatile(
> +				"stxp %w0, %1, %2, %3"
> +				: "=&r" (ret)
> +				: "r" (src.val[0]),
> +				  "r" (src.val[1]),
> +				  "Q" (dst->val[0])
> +				: "memory");
> +	else
> +		rte_panic("Invalid memory order\n");

Same as above comment.

> +
> +	/* Return 0 on success, 1 on failure */
> +	return ret;
> +}
> +#endif
> +
> +static inline int __rte_experimental
> +rte_atomic128_cmp_exchange(rte_int128_t *dst,
> +				rte_int128_t *exp,
> +				const rte_int128_t *src,
> +				unsigned int weak,
> +				int success,
> +				int failure)
> +{
> +	// Always do strong CAS

Remove C++ style code comment.



More information about the dev mailing list