summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/atomic_ll_sc.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-04-23 22:08:49 +0300
committerWill Deacon <will.deacon@arm.com>2015-07-27 17:28:51 +0300
commitc342f78217e822d2178265b0b1de232eeb717149 (patch)
tree2c1de7284bb6671aa9cb4e8a0f85a119d64de3a2 /arch/arm64/include/asm/atomic_ll_sc.h
parentc8366ba0fb65063b6b4f69c7af1ea74152435590 (diff)
downloadlinux-c342f78217e822d2178265b0b1de232eeb717149.tar.xz
arm64: cmpxchg: patch in lse instructions when supported by the CPU
On CPUs which support the LSE atomic instructions introduced in ARMv8.1, it makes sense to use them in preference to ll/sc sequences. This patch introduces runtime patching of our cmpxchg primitives so that the LSE cas instruction is used instead. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/atomic_ll_sc.h')
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h38
1 files changed, 38 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index 4b981ba57e78..4864158d486e 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -215,4 +215,42 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
}
__LL_SC_EXPORT(atomic64_dec_if_positive);
+#define __CMPXCHG_CASE(w, sz, name, mb, cl) \
+__LL_SC_INLINE unsigned long \
+__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
+ unsigned long old, \
+ unsigned long new)) \
+{ \
+ unsigned long tmp, oldval; \
+ \
+ asm volatile( \
+ " " #mb "\n" \
+ "1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \
+ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
+ " cbnz %" #w "[tmp], 2f\n" \
+ " stxr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
+ " cbnz %w[tmp], 1b\n" \
+ " " #mb "\n" \
+ " mov %" #w "[oldval], %" #w "[old]\n" \
+ "2:" \
+ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
+ [v] "+Q" (*(unsigned long *)ptr) \
+ : [old] "Lr" (old), [new] "r" (new) \
+ : cl); \
+ \
+ return oldval; \
+} \
+__LL_SC_EXPORT(__cmpxchg_case_##name);
+
+__CMPXCHG_CASE(w, b, 1, , )
+__CMPXCHG_CASE(w, h, 2, , )
+__CMPXCHG_CASE(w, , 4, , )
+__CMPXCHG_CASE( , , 8, , )
+__CMPXCHG_CASE(w, b, mb_1, dmb ish, "memory")
+__CMPXCHG_CASE(w, h, mb_2, dmb ish, "memory")
+__CMPXCHG_CASE(w, , mb_4, dmb ish, "memory")
+__CMPXCHG_CASE( , , mb_8, dmb ish, "memory")
+
+#undef __CMPXCHG_CASE
+
#endif /* __ASM_ATOMIC_LL_SC_H */