summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorGuo Jin <guoj17@chinatelecom.cn>2022-11-08 09:01:26 +0300
committerPeter Zijlstra <peterz@infradead.org>2022-11-16 12:18:09 +0300
commit23df39fc6a36183af5e6e4f47523f1ad2cdc1d30 (patch)
tree87fd1b3ade8b44b36a217194135154a07364a21a /arch/x86
parent094226ad94f471a9f19e8f8e7140a09c2625abaa (diff)
downloadlinux-23df39fc6a36183af5e6e4f47523f1ad2cdc1d30.tar.xz
locking: Fix qspinlock/x86 inline asm error
When compiling linux 6.1.0-rc3 configured with CONFIG_64BIT=y and CONFIG_PARAVIRT_SPINLOCKS=y on x86_64 using LLVM 11.0, an error: "<inline asm> error: changed section flags for .spinlock.text, expected:: 0x6" occurred. The reason is the .spinlock.text in kernel/locking/qspinlock.o is used many times, but its flags are omitted in subsequent use. LLVM 11.0 assembler didn't permit to leave out flags in subsequent uses of the same sections. So this patch adds the corresponding flags to avoid above error. Fixes: 501f7f69bca1 ("locking: Add __lockfunc to slow path functions") Signed-off-by: Guo Jin <guoj17@chinatelecom.cn> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Nathan Chancellor <nathan@kernel.org> Link: https://lore.kernel.org/r/20221108060126.2505-1-guoj17@chinatelecom.cn
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 60ece592b220..dbb38a6b4dfb 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -37,7 +37,7 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
* rsi = lockval (second argument)
* rdx = internal variable (set to 0)
*/
-asm (".pushsection .spinlock.text;"
+asm (".pushsection .spinlock.text, \"ax\";"
".globl " PV_UNLOCK ";"
".type " PV_UNLOCK ", @function;"
".align 4,0x90;"