From ec7633de404e7ce704d8f79081b97bca5b616c23 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 28 Jun 2023 11:49:18 +0200 Subject: sparc: mark __arch_xchg() as __always_inline An otherwise correct change to the atomic operations uncovered an existing bug in the sparc __arch_xchg() function, which is calls __xchg_called_with_bad_pointer() when its arguments are unknown at compile time: ERROR: modpost: "__xchg_called_with_bad_pointer" [lib/atomic64_test.ko] undefined! This now happens because gcc determines that it's better to not inline the function. Avoid this by just marking the function as __always_inline to force the compiler to do the right thing here. Reported-by: Guenter Roeck Link: https://lore.kernel.org/all/c525adc9-6623-4660-8718-e0c9311563b8@roeck-us.net/ Fixes: d12157efc8e08 ("locking/atomic: make atomic*_{cmp,}xchg optional") Signed-off-by: Arnd Bergmann Acked-by: Palmer Dabbelt Acked-by: Mark Rutland Reviewed-by: Sam Ravnborg Acked-by: Guenter Roeck Acked-by: Andi Shyti Link: https://lore.kernel.org/r/20230628094938.2318171-1-arnd@kernel.org Signed-off-by: Kees Cook --- arch/sparc/include/asm/cmpxchg_32.h | 2 +- arch/sparc/include/asm/cmpxchg_64.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sparc') diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 7a1339533d1d..d0af82c240b7 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -15,7 +15,7 @@ unsigned long __xchg_u32(volatile u32 *m, u32 new); void __xchg_called_with_bad_pointer(void); -static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size) +static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { case 4: diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 66cd61dde9ec..3de25262c411 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -87,7 +87,7 @@ xchg16(__volatile__ unsigned short *m, unsigned short val) return (load32 & mask) >> bit_shift; } -static inline unsigned long +static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { -- cgit v1.2.3