[dpdk-dev] [PATCH 02/12] Add atomic operations for IBM Power architecture

Hemant at freescale.com Hemant at freescale.com
Mon Sep 29 08:16:16 CEST 2014


Hi Chao,

This Patch seems to be incomplete. You may also need to patch the librte_eal\common\include\rte_atomic.h 
e.g.
#if !(defined RTE_ARCH_X86_64) || !(defined RTE_ARCH_I686)
#include <arch/rte_atomic.h>
#else /* if Intel*/

Otherwise you shall be getting compilation errors for "_mm_mfence"

Similar is true for other common header files as well.


Regards,
Hemant

> -----Original Message-----
> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Chao Zhu
> Sent: 26/Sep/2014 3:06 PM
> To: dev at dpdk.org
> Subject: [dpdk-dev] [PATCH 02/12] Add atomic operations for IBM Power
> architecture
> 
> The atomic operations implemented with assembly code in DPDK only support
> x86. This patch add architecture specific atomic operations for IBM Power
> architecture.
> 
> Signed-off-by: Chao Zhu <bjzhuc at cn.ibm.com>
> ---
>  .../common/include/powerpc/arch/rte_atomic.h       |  387
> ++++++++++++++++++++
>  .../common/include/powerpc/arch/rte_atomic_arch.h  |  318
> ++++++++++++++++
>  2 files changed, 705 insertions(+), 0 deletions(-)  create mode 100644
> lib/librte_eal/common/include/powerpc/arch/rte_atomic.h
>  create mode 100644
> lib/librte_eal/common/include/powerpc/arch/rte_atomic_arch.h
> 
> diff --git a/lib/librte_eal/common/include/powerpc/arch/rte_atomic.h
> b/lib/librte_eal/common/include/powerpc/arch/rte_atomic.h
> new file mode 100644
> index 0000000..7f5214e
> --- /dev/null
> +++ b/lib/librte_eal/common/include/powerpc/arch/rte_atomic.h
> @@ -0,0 +1,387 @@
> +/*
> + *   BSD LICENSE
> + *
> + *   Copyright (C) IBM Corporation 2014.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of IBM Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
> ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> +*/
> +
> +/*
> + * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
> + * Copyright (c) 2008 Marcel Moolenaar
> + * Copyright (c) 2001 Benno Rice
> + * Copyright (c) 2001 David E. O'Brien
> + * Copyright (c) 1998 Doug Rabson
> + * All rights reserved.
> + */
> +
> +#ifndef _RTE_ATOMIC_H_
> +#error "don't include this file directly, please include generic <rte_atomic.h>"
> +#endif
> +
> +#ifndef _RTE_POWERPC_64_ATOMIC_H_
> +#define _RTE_POWERPC_64_ATOMIC_H_
> +
> +/*------------------------- 64 bit atomic operations
> +-------------------------*/
> +
> +/**
> + * An atomic compare and set function used by the mutex functions.
> + * (atomic) equivalent to:
> + *   if (*dst == exp)
> + *     *dst = src (all 64-bit words)
> + *
> + * @param dst
> + *   The destination into which the value will be written.
> + * @param exp
> + *   The expected value.
> + * @param src
> + *   The new value.
> + * @return
> + *   Non-zero on success; 0 on failure.
> + */
> +static inline int
> +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
> +{
> +	unsigned int ret = 0;
> +
> +	asm volatile (
> +			"\tlwsync\n"
> +			"1: ldarx %[ret], 0, %[dst]\n"
> +			"cmpld %[exp], %[ret]\n"
> +			"bne 2f\n"
> +			"stdcx. %[src], 0, %[dst]\n"
> +			"bne- 1b\n"
> +			"li %[ret], 1\n"
> +			"b 3f\n"
> +			"2:\n"
> +			"stdcx. %[ret], 0, %[dst]\n"
> +			"li %[ret], 0\n"
> +			"3:\n"
> +			"isync\n"
> +			: [ret] "=&r" (ret), "=m" (*dst)
> +			: [dst] "r" (dst), [exp] "r" (exp), [src] "r" (src), "m" (*dst)
> +			: "cc", "memory");
> +	return ret;
> +}
> +
> +/**
> + * The atomic counter structure.
> + */
> +typedef struct {
> +	volatile int64_t cnt;  /**< Internal counter value. */ }
> +rte_atomic64_t;
> +
> +/**
> + * Static initializer for an atomic counter.
> + */
> +#define RTE_ATOMIC64_INIT(val) { (val) }
> +
> +/**
> + * Initialize the atomic counter.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + */
> +static inline void
> +rte_atomic64_init(rte_atomic64_t *v)
> +{
> +	v->cnt = 0;
> +}
> +
> +/**
> + * Atomically read a 64-bit counter.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @return
> + *   The value of the counter.
> + */
> +static inline int64_t
> +rte_atomic64_read(rte_atomic64_t *v)
> +{
> +	long ret;
> +
> +	asm volatile("ld%U1%X1 %[ret],%[cnt]" : [ret] "=r"(ret) : [cnt]
> +"m"(v->cnt));
> +
> +	return ret;
> +}
> +
> +/**
> + * Atomically set a 64-bit counter.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @param new_value
> + *   The new value of the counter.
> + */
> +static inline void
> +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) {
> +	asm volatile("std%U0%X0 %[new_value],%[cnt]" : [cnt] "=m"(v->cnt) :
> +[new_value] "r"(new_value)); }
> +
> +/**
> + * Atomically add a 64-bit value to a counter.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @param inc
> + *   The value to be added to the counter.
> + */
> +static inline void
> +rte_atomic64_add(rte_atomic64_t *v, int64_t inc) {
> +	long t;
> +
> +	asm volatile(
> +			"1: ldarx %[t],0,%[cnt]\n"
> +			"add %[t],%[inc],%[t]\n"
> +			"stdcx. %[t],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			: [t] "=&r" (t), "=m" (v->cnt)
> +			: [cnt] "r" (&v->cnt), [inc] "r" (inc), "m" (v->cnt)
> +			: "cc", "memory");
> +}
> +
> +/**
> + * Atomically subtract a 64-bit value from a counter.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @param dec
> + *   The value to be subtracted from the counter.
> + */
> +static inline void
> +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) {
> +	long t;
> +
> +	asm volatile(
> +			"1: ldarx %[t],0,%[cnt]\n"
> +			"subf %[t],%[dec],%[t]\n"
> +			"stdcx. %[t],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			: [t] "=&r" (t), "+m" (v->cnt)
> +			: [cnt] "r" (&v->cnt), [dec] "r" (dec), "m" (v->cnt)
> +			: "cc", "memory");
> +}
> +
> +/**
> + * Atomically increment a 64-bit counter by one and test.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + */
> +static inline void
> +rte_atomic64_inc(rte_atomic64_t *v)
> +{
> +	long t;
> +
> +	asm volatile(
> +			"1: ldarx %[t],0,%[cnt]\n"
> +			"addic %[t],%[t],1\n"
> +			"stdcx. %[t],0,%[cnt] \n"
> +			"bne- 1b\n"
> +			: [t] "=&r" (t), "+m" (v->cnt)
> +			: [cnt] "r" (&v->cnt), "m" (v->cnt)
> +			: "cc", "xer", "memory");
> +}
> +
> +/**
> + * Atomically decrement a 64-bit counter by one and test.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + */
> +static inline void
> +rte_atomic64_dec(rte_atomic64_t *v)
> +{
> +	long t;
> +
> +	asm volatile(
> +			"1: ldarx %[t],0,%[cnt]\n"
> +			"addic %[t],%[t],-1\n"
> +			"stdcx. %[t],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			: [t] "=&r" (t), "+m" (v->cnt)
> +			: [cnt] "r" (&v->cnt), "m" (v->cnt)
> +			: "cc", "xer", "memory");
> +}
> +
> +/**
> + * Add a 64-bit value to an atomic counter and return the result.
> + *
> + * Atomically adds the 64-bit value (inc) to the atomic counter (v) and
> + * returns the value of v after the addition.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @param inc
> + *   The value to be added to the counter.
> + * @return
> + *   The value of v after the addition.
> + */
> +static inline int64_t
> +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) {
> +	long ret;
> +
> +	asm volatile(
> +			"\n\tlwsync\n"
> +			"1: ldarx %[ret],0,%[cnt]\n"
> +			"add %[ret],%[inc],%[ret]\n"
> +			"stdcx. %[ret],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			"isync\n"
> +			: [ret] "=&r" (ret)
> +			: [inc] "r" (inc), [cnt] "r" (&v->cnt)
> +			: "cc", "memory");
> +
> +	return ret;
> +}
> +
> +/**
> + * Subtract a 64-bit value from an atomic counter and return the result.
> + *
> + * Atomically subtracts the 64-bit value (dec) from the atomic counter
> +(v)
> + * and returns the value of v after the subtraction.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @param dec
> + *   The value to be subtracted from the counter.
> + * @return
> + *   The value of v after the subtraction.
> + */
> +static inline int64_t
> +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) {
> +	long ret;
> +
> +	asm volatile(
> +			"\n\tlwsync\n"
> +			"1: ldarx %[ret],0,%[cnt]\n"
> +			"subf %[ret],%[dec],%[ret]\n"
> +			"stdcx. %[ret],0,%[cnt] \n"
> +			"bne- 1b\n"
> +			"isync\n"
> +			: [ret] "=&r" (ret)
> +			: [dec] "r" (dec), [cnt] "r" (&v->cnt)
> +			: "cc", "memory");
> +
> +	return ret;
> +}
> +
> +static __inline__ long rte_atomic64_inc_return(rte_atomic64_t *v) {
> +	long ret;
> +
> +	asm volatile(
> +			"\n\tlwsync\n"
> +			"1: ldarx %[ret],0,%[cnt]\n"
> +			"addic %[ret],%[ret],1\n"
> +			"stdcx. %[ret],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			"isync\n"
> +			: [ret] "=&r" (ret)
> +			: [cnt] "r" (&v->cnt)
> +			: "cc", "xer", "memory");
> +
> +	return ret;
> +}
> +/**
> + * Atomically increment a 64-bit counter by one and test.
> + *
> + * Atomically increments the atomic counter (v) by one and returns
> + * true if the result is 0, or false in all other cases.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @return
> + *   True if the result after the addition is 0; false otherwise.
> + */
> +#define rte_atomic64_inc_and_test(v) (rte_atomic64_inc_return(v) == 0)
> +
> +static __inline__ long rte_atomic64_dec_return(rte_atomic64_t *v) {
> +	long ret;
> +
> +	asm volatile(
> +			"\n\tlwsync\n"
> +			"1: ldarx %[ret],0,%[cnt]\n"
> +			"addic %[ret],%[ret],-1\n"
> +			"stdcx. %[ret],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			"isync\n"
> +			: [ret] "=&r" (ret)
> +			: [cnt] "r" (&v->cnt)
> +			: "cc", "xer", "memory");
> +
> +	return ret;
> +}
> +/**
> + * Atomically decrement a 64-bit counter by one and test.
> + *
> + * Atomically decrements the atomic counter (v) by one and returns true
> +if
> + * the result is 0, or false in all other cases.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @return
> + *   True if the result after subtraction is 0; false otherwise.
> + */
> +#define rte_atomic64_dec_and_test(v)    (rte_atomic64_dec_return((v)) ==
> 0)
> +
> +/**
> + * Atomically test and set a 64-bit atomic counter.
> + *
> + * If the counter value is already set, return 0 (failed). Otherwise,
> +set
> + * the counter value to 1 and return 1 (success).
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @return
> + *   0 if failed; else 1, success.
> + */
> +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) {
> +	return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); }
> +
> +/**
> + * Atomically set a 64-bit counter to 0.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + */
> +static inline void rte_atomic64_clear(rte_atomic64_t *v) {
> +	v->cnt = 0;
> +}
> +
> +#endif /* _RTE_POWERPC_64_ATOMIC_H_ */
> +
> diff --git a/lib/librte_eal/common/include/powerpc/arch/rte_atomic_arch.h
> b/lib/librte_eal/common/include/powerpc/arch/rte_atomic_arch.h
> new file mode 100644
> index 0000000..fe5666e
> --- /dev/null
> +++ b/lib/librte_eal/common/include/powerpc/arch/rte_atomic_arch.h
> @@ -0,0 +1,318 @@
> +/*
> + *   BSD LICENSE
> + *
> + *   Copyright (C) IBM Corporation 2014.
> + *
> + *   Redistribution and use in source and binary forms, with or without
> + *   modification, are permitted provided that the following conditions
> + *   are met:
> + *
> + *     * Redistributions of source code must retain the above copyright
> + *       notice, this list of conditions and the following disclaimer.
> + *     * Redistributions in binary form must reproduce the above copyright
> + *       notice, this list of conditions and the following disclaimer in
> + *       the documentation and/or other materials provided with the
> + *       distribution.
> + *     * Neither the name of IBM Corporation nor the names of its
> + *       contributors may be used to endorse or promote products derived
> + *       from this software without specific prior written permission.
> + *
> + *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
> CONTRIBUTORS
> + *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
> NOT
> + *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
> FITNESS FOR
> + *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT
> + *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL,
> + *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
> NOT
> + *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
> OF USE,
> + *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
> ON ANY
> + *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> + *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
> THE USE
> + *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
> DAMAGE.
> +*/
> +
> +/*
> + * Inspired from FreeBSD src/sys/powerpc/include/atomic.h
> + * Copyright (c) 2008 Marcel Moolenaar
> + * Copyright (c) 2001 Benno Rice
> + * Copyright (c) 2001 David E. O'Brien
> + * Copyright (c) 1998 Doug Rabson
> + * All rights reserved.
> + */
> +
> +#ifndef _RTE_ATOMIC_H_
> +#error "don't include this file directly, please include generic <rte_atomic.h>"
> +#endif
> +
> +#ifndef _RTE_ATOMIC_ARCH_H_
> +#define _RTE_ATOMIC_ARCH_H_
> +
> +#include <stdint.h>
> +
> +/**
> + * General memory barrier.
> + *
> + * Guarantees that the LOAD and STORE operations generated before the
> + * barrier occur before the LOAD and STORE operations generated after.
> + */
> +#define	rte_arch_mb()  asm volatile("sync" : : : "memory")
> +
> +/**
> + * Write memory barrier.
> + *
> + * Guarantees that the STORE operations generated before the barrier
> + * occur before the STORE operations generated after.
> + */
> +#define	rte_arch_wmb() asm volatile("sync" : : : "memory")
> +
> +/**
> + * Read memory barrier.
> + *
> + * Guarantees that the LOAD operations generated before the barrier
> + * occur before the LOAD operations generated after.
> + */
> +#define	rte_arch_rmb() asm volatile("sync" : : : "memory")
> +
> +#define	rte_arch_compiler_barrier() do {		\
> +	asm volatile ("" : : : "memory");	\
> +} while(0)
> +
> +/*------------------------- 16 bit atomic operations
> +-------------------------*/
> +
> +/**
> + * The atomic counter structure.
> + */
> +typedef struct {
> +	volatile int16_t cnt; /**< An internal counter value. */ }
> +rte_atomic16_t;
> +
> +/**
> + * Atomic compare and set.
> + *
> + * (atomic) equivalent to:
> + *   if (*dst == exp)
> + *     *dst = src (all 16-bit words)
> + *
> + * @param dst
> + *   The destination location into which the value will be written.
> + * @param exp
> + *   The expected value.
> + * @param src
> + *   The new value.
> + * @return
> + *   Non-zero on success; 0 on failure.
> + */
> +static inline int
> +rte_arch_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t
> +src) {
> +	return __atomic_compare_exchange(dst, &exp, &src, 0,
> __ATOMIC_ACQUIRE,
> +__ATOMIC_ACQUIRE) ? 1 : 0; }
> +
> +/**
> + * Atomically increment a counter by one.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + */
> +static inline void
> +rte_arch_atomic16_inc(rte_atomic16_t *v) {
> +	__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); }
> +
> +/**
> + * Atomically decrement a counter by one.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + */
> +static inline void
> +rte_arch_atomic16_dec(rte_atomic16_t *v) {
> +	__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE); }
> +
> +/**
> + * Atomically increment a 16-bit counter by one and test.
> + *
> + * Atomically increments the atomic counter (v) by one and returns true
> +if
> + * the result is 0, or false in all other cases.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @return
> + *   True if the result after the increment operation is 0; false otherwise.
> + */
> +static inline int rte_arch_atomic16_inc_and_test(rte_atomic16_t *v) {
> +	return (__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0); }
> +
> +/**
> + * Atomically decrement a 16-bit counter by one and test.
> + *
> + * Atomically decrements the atomic counter (v) by one and returns true
> +if
> + * the result is 0, or false in all other cases.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @return
> + *   True if the result after the decrement operation is 0; false otherwise.
> + */
> +static inline int rte_arch_atomic16_dec_and_test(rte_atomic16_t *v) {
> +	return (__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0); }
> +
> +/*------------------------- 32 bit atomic operations
> +-------------------------*/
> +
> +/**
> + * The atomic counter structure.
> + */
> +typedef struct {
> +	volatile int32_t cnt; /**< An internal counter value. */ }
> +rte_atomic32_t;
> +
> +/**
> + * Atomic compare and set.
> + *
> + * (atomic) equivalent to:
> + *   if (*dst == exp)
> + *     *dst = src (all 32-bit words)
> + *
> + * @param dst
> + *   The destination location into which the value will be written.
> + * @param exp
> + *   The expected value.
> + * @param src
> + *   The new value.
> + * @return
> + *   Non-zero on success; 0 on failure.
> + */
> +static inline int
> +rte_arch_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t
> +src) {
> +	unsigned int ret = 0;
> +
> +	asm volatile(
> +			"\tlwsync\n"
> +			"1:\tlwarx %[ret], 0, %[dst]\n"
> +			"cmplw %[exp], %[ret]\n"
> +			"bne 2f\n"
> +			"stwcx. %[src], 0, %[dst]\n"
> +			"bne- 1b\n"
> +			"li %[ret], 1\n"
> +			"b 3f\n"
> +			"2:\n"
> +			"stwcx. %[ret], 0, %[dst]\n"
> +			"li %[ret], 0\n"
> +			"3:\n"
> +			"isync\n"
> +			: [ret] "=&r" (ret), "=m" (*dst)
> +			: [dst] "r" (dst), [exp] "r" (exp), [src] "r" (src), "m" (*dst)
> +			: "cc", "memory");
> +
> +	return ret;
> +}
> +
> +/**
> + * Atomically increment a counter by one.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + */
> +static inline void
> +rte_arch_atomic32_inc(rte_atomic32_t *v) {
> +	int t;
> +
> +	asm volatile(
> +			"1: lwarx %[t],0,%[cnt]\n"
> +			"addic %[t],%[t],1\n"
> +			"stwcx. %[t],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			: [t] "=&r" (t), "=m" (v->cnt)
> +			: [cnt] "r" (&v->cnt), "m" (v->cnt)
> +			: "cc", "xer", "memory");
> +}
> +
> +/**
> + * Atomically decrement a counter by one.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + */
> +static inline void
> +rte_arch_atomic32_dec(rte_atomic32_t *v) {
> +	int t;
> +
> +	asm volatile(
> +			"1: lwarx %[t],0,%[cnt]\n"
> +			"addic %[t],%[t],-1\n"
> +			"stwcx. %[t],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			: [t] "=&r" (t), "=m" (v->cnt)
> +			: [cnt] "r" (&v->cnt), "m" (v->cnt)
> +			: "cc", "xer", "memory");
> +}
> +
> +/**
> + * Atomically increment a 32-bit counter by one and test.
> + *
> + * Atomically increments the atomic counter (v) by one and returns true
> +if
> + * the result is 0, or false in all other cases.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @return
> + *   True if the result after the increment operation is 0; false otherwise.
> + */
> +static inline int rte_arch_atomic32_inc_and_test(rte_atomic32_t *v) {
> +	int ret;
> +
> +	asm volatile(
> +			"\n\tlwsync\n"
> +			"1: lwarx %[ret],0,%[cnt]\n"
> +			"addic	%[ret],%[ret],1\n"
> +			"stwcx. %[ret],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			"isync\n"
> +			: [ret] "=&r" (ret)
> +			: [cnt] "r" (&v->cnt)
> +			: "cc", "xer", "memory");
> +
> +	return (ret == 0);
> +}
> +
> +/**
> + * Atomically decrement a 32-bit counter by one and test.
> + *
> + * Atomically decrements the atomic counter (v) by one and returns true
> +if
> + * the result is 0, or false in all other cases.
> + *
> + * @param v
> + *   A pointer to the atomic counter.
> + * @return
> + *   True if the result after the decrement operation is 0; false otherwise.
> + */
> +static inline int rte_arch_atomic32_dec_and_test(rte_atomic32_t *v) {
> +	int ret;
> +
> +	asm volatile(
> +			"\n\tlwsync\n"
> +			"1: lwarx %[ret],0,%[cnt]\n"
> +			"addic %[ret],%[ret],-1\n"
> +			"stwcx. %[ret],0,%[cnt]\n"
> +			"bne- 1b\n"
> +			"isync\n"
> +			: [ret] "=&r" (ret)
> +			: [cnt] "r" (&v->cnt)
> +			: "cc", "xer", "memory");
> +
> +	return (ret == 0);
> +}
> +
> +#endif /* _RTE_ATOMIC_ARCH_H_ */
> +
> --
> 1.7.1



More information about the dev mailing list