summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Murray <andrew.murray@arm.com>2019-08-28 20:50:06 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-03-07 14:18:54 +0300
commit5ef8dff99f434480e02cbe383ce2bb47057fc71b (patch)
tree1fd243d3f53f3ef001abc3f67b42399f0028aa69
parentf5c832bcb1947ce6339b5e6b7be189fa3c824ee2 (diff)
downloadlinux-5ef8dff99f434480e02cbe383ce2bb47057fc71b.tar.xz
arm64: Use correct ll/sc atomic constraints
commit 580fa1b874711d633f9b145b7777b0e83ebf3787 upstream. The A64 ISA accepts distinct (but overlapping) ranges of immediates for: * add arithmetic instructions ('I' machine constraint) * sub arithmetic instructions ('J' machine constraint) * 32-bit logical instructions ('K' machine constraint) * 64-bit logical instructions ('L' machine constraint) ... but we currently use the 'I' constraint for many atomic operations using sub or logical instructions, which is not always valid. When CONFIG_ARM64_LSE_ATOMICS is not set, this allows invalid immediates to be passed to instructions, potentially resulting in a build failure. When CONFIG_ARM64_LSE_ATOMICS is selected the out-of-line ll/sc atomics always use a register as they have no visibility of the value passed by the caller. This patch adds a constraint parameter to the ATOMIC_xx and __CMPXCHG_CASE macros so that we can pass appropriate constraints for each case, with uses updated accordingly. Unfortunately prior to GCC 8.1.0 the 'K' constraint erroneously accepted '4294967295', so we must instead force the use of a register. Signed-off-by: Andrew Murray <andrew.murray@arm.com> Signed-off-by: Will Deacon <will@kernel.org> [bwh: Backported to 4.19: adjust context] Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h89
1 files changed, 47 insertions, 42 deletions
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index fb841553b0b0..1cc42441bc67 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -37,7 +37,7 @@
* (the optimize attribute silently ignores these options).
*/
-#define ATOMIC_OP(op, asm_op) \
+#define ATOMIC_OP(op, asm_op, constraint) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
{ \
@@ -51,11 +51,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
" stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
+ : #constraint "r" (i)); \
} \
__LL_SC_EXPORT(atomic_##op);
-#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
{ \
@@ -70,14 +70,14 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
" cbnz %w1, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
} \
__LL_SC_EXPORT(atomic_##op##_return##name);
-#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
__LL_SC_INLINE int \
__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
{ \
@@ -92,7 +92,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
" cbnz %w2, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
@@ -110,8 +110,8 @@ __LL_SC_EXPORT(atomic_fetch_##op##name);
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
+ATOMIC_OPS(add, add, I)
+ATOMIC_OPS(sub, sub, J)
#undef ATOMIC_OPS
#define ATOMIC_OPS(...) \
@@ -121,17 +121,17 @@ ATOMIC_OPS(sub, sub)
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(and, and)
-ATOMIC_OPS(andnot, bic)
-ATOMIC_OPS(or, orr)
-ATOMIC_OPS(xor, eor)
+ATOMIC_OPS(and, and, )
+ATOMIC_OPS(andnot, bic, )
+ATOMIC_OPS(or, orr, )
+ATOMIC_OPS(xor, eor, )
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define ATOMIC64_OP(op, asm_op) \
+#define ATOMIC64_OP(op, asm_op, constraint) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
{ \
@@ -145,11 +145,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
" stxr %w1, %0, %2\n" \
" cbnz %w1, 1b" \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
+ : #constraint "r" (i)); \
} \
__LL_SC_EXPORT(atomic64_##op);
-#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
{ \
@@ -164,14 +164,14 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
" cbnz %w1, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
} \
__LL_SC_EXPORT(atomic64_##op##_return##name);
-#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
+#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
__LL_SC_INLINE long \
__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
{ \
@@ -186,7 +186,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
" cbnz %w2, 1b\n" \
" " #mb \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
+ : #constraint "r" (i) \
: cl); \
\
return result; \
@@ -204,8 +204,8 @@ __LL_SC_EXPORT(atomic64_fetch_##op##name);
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
+ATOMIC64_OPS(add, add, I)
+ATOMIC64_OPS(sub, sub, J)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(...) \
@@ -215,10 +215,10 @@ ATOMIC64_OPS(sub, sub)
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
-ATOMIC64_OPS(and, and)
-ATOMIC64_OPS(andnot, bic)
-ATOMIC64_OPS(or, orr)
-ATOMIC64_OPS(xor, eor)
+ATOMIC64_OPS(and, and, L)
+ATOMIC64_OPS(andnot, bic, )
+ATOMIC64_OPS(or, orr, L)
+ATOMIC64_OPS(xor, eor, L)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
@@ -248,7 +248,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
}
__LL_SC_EXPORT(atomic64_dec_if_positive);
-#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
__LL_SC_INLINE u##sz \
__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
unsigned long old, \
@@ -268,29 +268,34 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(u##sz *)ptr) \
- : [old] "Kr" (old), [new] "r" (new) \
+ : [old] #constraint "r" (old), [new] "r" (new) \
: cl); \
\
return oldval; \
} \
__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
-__CMPXCHG_CASE(w, b, , 8, , , , )
-__CMPXCHG_CASE(w, h, , 16, , , , )
-__CMPXCHG_CASE(w, , , 32, , , , )
-__CMPXCHG_CASE( , , , 64, , , , )
-__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
-__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
-__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
-__CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
-__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
-__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
-__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
-__CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
-__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
-__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
+/*
+ * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
+ * handle the 'K' constraint for the value 4294967295 - thus we use no
+ * constraint for 32 bit operations.
+ */
+__CMPXCHG_CASE(w, b, , 8, , , , , )
+__CMPXCHG_CASE(w, h, , 16, , , , , )
+__CMPXCHG_CASE(w, , , 32, , , , , )
+__CMPXCHG_CASE( , , , 64, , , , , L)
+__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", )
+__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", )
+__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", )
+__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
+__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", )
+__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", )
+__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", )
+__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
+__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", )
+__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", )
+__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", )
+__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE