summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/atomic.h')
-rw-r--r--arch/powerpc/include/asm/atomic.h70
1 files changed, 21 insertions, 49 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 682b3e6a1e21..52eafaf74054 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
+#include <asm/asm-405.h>
#define ATOMIC_INIT(i) { (i) }
@@ -18,18 +19,11 @@
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
* on the platform without lwsync.
*/
-#define __atomic_op_acquire(op, args...) \
-({ \
- typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
- __ret; \
-})
-
-#define __atomic_op_release(op, args...) \
-({ \
- __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
- op##_relaxed(args); \
-})
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
static __inline__ int atomic_read(const atomic_t *v)
{
@@ -129,8 +123,6 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
-
static __inline__ void atomic_inc(atomic_t *v)
{
int t;
@@ -145,6 +137,7 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic_inc atomic_inc
static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
{
@@ -163,16 +156,6 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t;
}
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
static __inline__ void atomic_dec(atomic_t *v)
{
int t;
@@ -187,6 +170,7 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic_dec atomic_dec
static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
{
@@ -218,7 +202,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * __atomic_add_unless - add unless the number is a given value
+ * atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -226,13 +210,13 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%1 # __atomic_add_unless\n\
+"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
cmpw 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -248,6 +232,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return t;
}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
@@ -280,9 +265,6 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
}
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
-#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
@@ -412,8 +394,6 @@ ATOMIC64_OPS(xor, xor)
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-
static __inline__ void atomic64_inc(atomic64_t *v)
{
long t;
@@ -427,6 +407,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic64_inc atomic64_inc
static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
{
@@ -444,16 +425,6 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-/*
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-
static __inline__ void atomic64_dec(atomic64_t *v)
{
long t;
@@ -467,6 +438,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
+#define atomic64_dec atomic64_dec
static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
{
@@ -487,9 +459,6 @@ static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
@@ -513,6 +482,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
return t;
}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_cmpxchg_relaxed(v, o, n) \
@@ -524,7 +494,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
/**
- * atomic64_add_unless - add unless the number is a given value
+ * atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -532,13 +502,13 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long t;
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
+"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
cmpd 0,%0,%3 \n\
beq 2f \n\
add %0,%2,%0 \n"
@@ -551,8 +521,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
- return t != u;
+ return t;
}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -582,6 +553,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
+#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */