summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2015-09-30 23:03:12 +0300
committerIngo Molnar <mingo@kernel.org>2015-10-06 18:28:20 +0300
commit81a43adae3b943193fb3afd20a36a7482332f964 (patch)
tree09a6a2090473927939fc8d916ec12e5fec93414c /include/asm-generic
parent63ab7bd0d450b726b88fa4b932f151b98cee2557 (diff)
downloadlinux-81a43adae3b943193fb3afd20a36a7482332f964.tar.xz
locking/mutex: Use acquire/release semantics
As of 654672d4ba1 (locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations) and 6d79ef2d30e (locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'), weakly ordered archs can benefit from more relaxed use of barriers when locking and unlocking, instead of regular full barrier semantics. While currently only arm64 supports such optimizations, updating corresponding locking primitives serves for other archs to immediately benefit as well, once the necessary machinery is implemented of course. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul E.McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1443643395-17016-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/mutex-dec.h8
-rw-r--r--include/asm-generic/mutex-xchg.h10
2 files changed, 9 insertions, 9 deletions
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index d4f9fb4e53df..fd694cfd678a 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -20,7 +20,7 @@
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_dec_return(count) < 0))
+ if (unlikely(atomic_dec_return_acquire(count) < 0))
fail_fn(count);
}
@@ -35,7 +35,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
- if (unlikely(atomic_dec_return(count) < 0))
+ if (unlikely(atomic_dec_return_acquire(count) < 0))
return -1;
return 0;
}
@@ -56,7 +56,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_inc_return(count) <= 0))
+ if (unlikely(atomic_inc_return_release(count) <= 0))
fail_fn(count);
}
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
return 1;
return 0;
}
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index f169ec064785..a6b4a7bd6ac9 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -31,7 +31,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
* to ensure that any waiting tasks are woken up by the
* unlock slow path.
*/
- if (likely(atomic_xchg(count, -1) != 1))
+ if (likely(atomic_xchg_acquire(count, -1) != 1))
fail_fn(count);
}
@@ -46,7 +46,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
- if (unlikely(atomic_xchg(count, 0) != 1))
+ if (unlikely(atomic_xchg_acquire(count, 0) != 1))
if (likely(atomic_xchg(count, -1) != 1))
return -1;
return 0;
@@ -67,7 +67,7 @@ __mutex_fastpath_lock_retval(atomic_t *count)
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- if (unlikely(atomic_xchg(count, 1) != 0))
+ if (unlikely(atomic_xchg_release(count, 1) != 0))
fail_fn(count);
}
@@ -91,7 +91,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- int prev = atomic_xchg(count, 0);
+ int prev = atomic_xchg_acquire(count, 0);
if (unlikely(prev < 0)) {
/*
@@ -105,7 +105,7 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
* owner's unlock path needlessly, but that's not a problem
* in practice. ]
*/
- prev = atomic_xchg(count, prev);
+ prev = atomic_xchg_acquire(count, prev);
if (prev < 0)
prev = 0;
}