summaryrefslogtreecommitdiff
path: root/arch/arm/lib/bitops.h
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2023-06-05 10:00:58 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-07-19 17:21:02 +0300
commitea1432a402ab5d401ea66372a4438ad55061143c (patch)
treec23fc4d6e0327042a569b8eae30fef3a74a9e1e3 /arch/arm/lib/bitops.h
parent6b54f5c68474edc241434ab50bae4a08ce45f835 (diff)
downloadlinux-ea1432a402ab5d401ea66372a4438ad55061143c.tar.xz
locking/atomic: arm: fix sync ops
[ Upstream commit dda5f312bb09e56e7a1c3e3851f2000eb2e9c879 ] The sync_*() ops on arch/arm are defined in terms of the regular bitops with no special handling. This is not correct, as UP kernels elide barriers for the fully-ordered operations, and so the required ordering is lost when such UP kernels are run under a hypervsior on an SMP system. Fix this by defining sync ops with the required barriers. Note: On 32-bit arm, the sync_*() ops are currently only used by Xen, which requires ARMv7, but the semantics can be implemented for ARMv6+. Fixes: e54d2f61528165bb ("xen/arm: sync_bitops") Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-2-mark.rutland@arm.com Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/arm/lib/bitops.h')
-rw-r--r--arch/arm/lib/bitops.h14
1 files changed, 11 insertions, 3 deletions
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 95bd35991288..f069d1b2318e 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -28,7 +28,7 @@ UNWIND( .fnend )
ENDPROC(\name )
.endm
- .macro testop, name, instr, store
+ .macro __testop, name, instr, store, barrier
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3
@@ -38,7 +38,7 @@ UNWIND( .fnstart )
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask
- smp_dmb
+ \barrier
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
.arch_extension mp
ALT_SMP(W(pldw) [r1])
@@ -50,13 +50,21 @@ UNWIND( .fnstart )
strex ip, r2, [r1]
cmp ip, #0
bne 1b
- smp_dmb
+ \barrier
cmp r0, #0
movne r0, #1
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
.endm
+
+ .macro testop, name, instr, store
+ __testop \name, \instr, \store, smp_dmb
+ .endm
+
+ .macro sync_testop, name, instr, store
+ __testop \name, \instr, \store, __smp_dmb
+ .endm
#else
.macro bitop, name, instr
ENTRY( \name )