summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2013-09-11 16:28:47 +0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-24 19:16:46 +0400
commit86d51bc31fabd3782a99375b6848c5c667e72605 (patch)
tree781b7b812c8fd4a51dbcdc238e8b90c4b3a2cbb6 /arch/s390
parentfcd05b50fca44be3f96f8a17c5cce778669c29e7 (diff)
downloadlinux-86d51bc31fabd3782a99375b6848c5c667e72605.tar.xz
s390/atomic: implement atomic_sub_return() with atomic_add_return()
Get rid of the own atomic_sub_return() implementation. Otherwise we can't make use of the interlocked-access facility 1 instructions for atomic_sub_return(), since there is no "load and subtract" instruction available. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/atomic.h23
1 files changed, 2 insertions, 21 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c797832daa5f..fea2c8887da5 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -60,11 +60,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
#define atomic_inc(_v) atomic_add_return(1, _v)
#define atomic_inc_return(_v) atomic_add_return(1, _v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- return __CS_LOOP(v, i, "sr");
-}
+#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v) atomic_sub_return(1, _v)
@@ -152,11 +148,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return __CSG_LOOP(v, i, "agr");
}
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- return __CSG_LOOP(v, i, "sgr");
-}
-
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
__CSG_LOOP(v, ~mask, "ngr");
@@ -248,17 +239,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return new;
}
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old - i;
- } while (atomic64_cmpxchg(v, old, new) != old);
- return new;
-}
-
static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
{
long long old, new;
@@ -319,6 +299,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc(_v) atomic64_add_return(1, _v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
+#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub_return(1, _v)