summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-21 04:23:21 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-21 04:23:21 +0300
commit26660a4046b171a752e72a1dd32153230234fe3a (patch)
tree1389db3db2130e1082250fa0ab1e8684f7e31f39 /arch/x86
parent46e595a17dcf11404f713845ecb5b06b92a94e43 (diff)
parent1bcb58a099938c33acda78b212ed67b06b3359ef (diff)
downloadlinux-26660a4046b171a752e72a1dd32153230234fe3a.tar.xz
Merge branch 'core-objtool-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull 'objtool' stack frame validation from Ingo Molnar: "This tree adds a new kernel build-time object file validation feature (ONFIG_STACK_VALIDATION=y): kernel stack frame correctness validation. It was written by and is maintained by Josh Poimboeuf. The motivation: there's a category of hard to find kernel bugs, most of them in assembly code (but also occasionally in C code), that degrades the quality of kernel stack dumps/backtraces. These bugs are hard to detect at the source code level. Such bugs result in incorrect/incomplete backtraces most of time - but can also in some rare cases result in crashes or other undefined behavior. The build time correctness checking is done via the new 'objtool' user-space utility that was written for this purpose and which is hosted in the kernel repository in tools/objtool/. The tool's (very simple) UI and source code design is shaped after Git and perf and shares quite a bit of infrastructure with tools/perf (which tooling infrastructure sharing effort got merged via perf and is already upstream). Objtool follows the well-known kernel coding style. Objtool does not try to check .c or .S files, it instead analyzes the resulting .o generated machine code from first principles: it decodes the instruction stream and interprets it. (Right now objtool supports the x86-64 architecture.) From tools/objtool/Documentation/stack-validation.txt: "The kernel CONFIG_STACK_VALIDATION option enables a host tool named objtool which runs at compile time. It has a "check" subcommand which analyzes every .o file and ensures the validity of its stack metadata. It enforces a set of rules on asm code and C inline assembly code so that stack traces can be reliable. Currently it only checks frame pointer usage, but there are plans to add CFI validation for C files and CFI generation for asm files. For each function, it recursively follows all possible code paths and validates the correct frame pointer state at each instruction. It also follows code paths involving special sections, like .altinstructions, __jump_table, and __ex_table, which can add alternative execution paths to a given instruction (or set of instructions). Similarly, it knows how to follow switch statements, for which gcc sometimes uses jump tables." When this new kernel option is enabled (it's disabled by default), the tool, if it finds any suspicious assembly code pattern, outputs warnings in compiler warning format: warning: objtool: rtlwifi_rate_mapping()+0x2e7: frame pointer state mismatch warning: objtool: cik_tiling_mode_table_init()+0x6ce: call without frame pointer save/setup warning: objtool:__schedule()+0x3c0: duplicate frame pointer save warning: objtool:__schedule()+0x3fd: sibling call from callable instruction with changed frame pointer ... so that scripts that pick up compiler warnings will notice them. All known warnings triggered by the tool are fixed by the tree, most of the commits in fact prepare the kernel to be warning-free. Most of them are bugfixes or cleanups that stand on their own, but there are also some annotations of 'special' stack frames for justified cases such entries to JIT-ed code (BPF) or really special boot time code. There are two other long-term motivations behind this tool as well: - To improve the quality and reliability of kernel stack frames, so that they can be used for optimized live patching. - To create independent infrastructure to check the correctness of CFI stack frames at build time. CFI debuginfo is notoriously unreliable and we cannot use it in the kernel as-is without extra checking done both on the kernel side and on the build side. The quality of kernel stack frames matters to debuggability as well, so IMO we can merge this without having to consider the live patching or CFI debuginfo angle" * 'core-objtool-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits) objtool: Only print one warning per function objtool: Add several performance improvements tools: Copy hashtable.h into tools directory objtool: Fix false positive warnings for functions with multiple switch statements objtool: Rename some variables and functions objtool: Remove superflous INIT_LIST_HEAD objtool: Add helper macros for traversing instructions objtool: Fix false positive warnings related to sibling calls objtool: Compile with debugging symbols objtool: Detect infinite recursion objtool: Prevent infinite recursion in noreturn detection objtool: Detect and warn if libelf is missing and don't break the build tools: Support relative directory path for 'O=' objtool: Support CROSS_COMPILE x86/asm/decoder: Use explicitly signed chars objtool: Enable stack metadata validation on 64-bit x86 objtool: Add CONFIG_STACK_VALIDATION option objtool: Add tool to perform compile-time stack metadata validation x86/kprobes: Mark kretprobe_trampoline() stack frame as non-standard sched: Always inline context_switch() ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/Makefile3
-rw-r--r--arch/x86/boot/compressed/Makefile3
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S75
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S15
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S15
-rw-r--r--arch/x86/crypto/cast5-avx-x86_64-asm_64.S9
-rw-r--r--arch/x86/crypto/cast6-avx-x86_64-asm_64.S13
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S8
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S5
-rw-r--r--arch/x86/crypto/serpent-avx-x86_64-asm_64.S13
-rw-r--r--arch/x86/crypto/serpent-avx2-asm_64.S13
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S35
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S36
-rw-r--r--arch/x86/crypto/twofish-avx-x86_64-asm_64.S13
-rw-r--r--arch/x86/entry/Makefile4
-rw-r--r--arch/x86/entry/thunk_64.S4
-rw-r--r--arch/x86/entry/vdso/Makefile6
-rw-r--r--arch/x86/include/asm/paravirt.h9
-rw-r--r--arch/x86/include/asm/paravirt_types.h18
-rw-r--r--arch/x86/include/asm/preempt.h13
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h4
-rw-r--r--arch/x86/include/asm/uaccess.h5
-rw-r--r--arch/x86/include/asm/xen/hypercall.h5
-rw-r--r--arch/x86/kernel/Makefile11
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S3
-rw-r--r--arch/x86/kernel/cpu/amd.c5
-rw-r--r--arch/x86/kernel/kprobes/core.c59
-rw-r--r--arch/x86/kernel/vmlinux.lds.S5
-rw-r--r--arch/x86/kvm/emulate.c31
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/lib/insn.c6
-rw-r--r--arch/x86/lib/rwsem.S11
-rw-r--r--arch/x86/net/bpf_jit.S48
-rw-r--r--arch/x86/platform/efi/Makefile2
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S3
-rw-r--r--arch/x86/power/hibernate_asm_64.S7
-rw-r--r--arch/x86/purgatory/Makefile2
-rw-r--r--arch/x86/realmode/Makefile4
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/xen-asm.S10
-rw-r--r--arch/x86/xen/xen-asm_64.S1
43 files changed, 358 insertions, 185 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3c74b549ea9a..d07cca6ad37b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -155,6 +155,7 @@ config X86
select VIRT_TO_BUS
select X86_DEV_DMA_OPS if X86_64
select X86_FEATURE_NAMES if PROC_FS
+ select HAVE_STACK_VALIDATION if X86_64
config INSTRUCTION_DECODER
def_bool y
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index bbe1a62efc02..0bf6749522d9 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -9,7 +9,8 @@
# Changed by many, many contributors over the years.
#
-KASAN_SANITIZE := n
+KASAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
# If you want to preset the SVGA mode, uncomment the next line and
# set SVGA_MODE to whatever number you want.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index f9ce75d80101..5e1d26e09407 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -16,7 +16,8 @@
# (see scripts/Makefile.lib size_append)
# compressed vmlinux.bin.all + u32 size of vmlinux.bin.all
-KASAN_SANITIZE := n
+KASAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 6bd2c6c95373..383a6f84a060 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -31,6 +31,7 @@
#include <linux/linkage.h>
#include <asm/inst.h>
+#include <asm/frame.h>
/*
* The following macros are used to move an (un)aligned 16 byte value to/from
@@ -1800,11 +1801,12 @@ ENDPROC(_key_expansion_256b)
* unsigned int key_len)
*/
ENTRY(aesni_set_key)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
- movl 8(%esp), KEYP # ctx
- movl 12(%esp), UKEYP # in_key
- movl 16(%esp), %edx # key_len
+ movl (FRAME_OFFSET+8)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+12)(%esp), UKEYP # in_key
+ movl (FRAME_OFFSET+16)(%esp), %edx # key_len
#endif
movups (UKEYP), %xmm0 # user key (first 16 bytes)
movaps %xmm0, (KEYP)
@@ -1905,6 +1907,7 @@ ENTRY(aesni_set_key)
#ifndef __x86_64__
popl KEYP
#endif
+ FRAME_END
ret
ENDPROC(aesni_set_key)
@@ -1912,12 +1915,13 @@ ENDPROC(aesni_set_key)
* void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
*/
ENTRY(aesni_enc)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
pushl KLEN
- movl 12(%esp), KEYP
- movl 16(%esp), OUTP
- movl 20(%esp), INP
+ movl (FRAME_OFFSET+12)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+16)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+20)(%esp), INP # src
#endif
movl 480(KEYP), KLEN # key length
movups (INP), STATE # input
@@ -1927,6 +1931,7 @@ ENTRY(aesni_enc)
popl KLEN
popl KEYP
#endif
+ FRAME_END
ret
ENDPROC(aesni_enc)
@@ -2101,12 +2106,13 @@ ENDPROC(_aesni_enc4)
* void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
*/
ENTRY(aesni_dec)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
pushl KLEN
- movl 12(%esp), KEYP
- movl 16(%esp), OUTP
- movl 20(%esp), INP
+ movl (FRAME_OFFSET+12)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+16)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+20)(%esp), INP # src
#endif
mov 480(KEYP), KLEN # key length
add $240, KEYP
@@ -2117,6 +2123,7 @@ ENTRY(aesni_dec)
popl KLEN
popl KEYP
#endif
+ FRAME_END
ret
ENDPROC(aesni_dec)
@@ -2292,14 +2299,15 @@ ENDPROC(_aesni_dec4)
* size_t len)
*/
ENTRY(aesni_ecb_enc)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl LEN
pushl KEYP
pushl KLEN
- movl 16(%esp), KEYP
- movl 20(%esp), OUTP
- movl 24(%esp), INP
- movl 28(%esp), LEN
+ movl (FRAME_OFFSET+16)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+20)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+24)(%esp), INP # src
+ movl (FRAME_OFFSET+28)(%esp), LEN # len
#endif
test LEN, LEN # check length
jz .Lecb_enc_ret
@@ -2342,6 +2350,7 @@ ENTRY(aesni_ecb_enc)
popl KEYP
popl LEN
#endif
+ FRAME_END
ret
ENDPROC(aesni_ecb_enc)
@@ -2350,14 +2359,15 @@ ENDPROC(aesni_ecb_enc)
* size_t len);
*/
ENTRY(aesni_ecb_dec)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl LEN
pushl KEYP
pushl KLEN
- movl 16(%esp), KEYP
- movl 20(%esp), OUTP
- movl 24(%esp), INP
- movl 28(%esp), LEN
+ movl (FRAME_OFFSET+16)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+20)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+24)(%esp), INP # src
+ movl (FRAME_OFFSET+28)(%esp), LEN # len
#endif
test LEN, LEN
jz .Lecb_dec_ret
@@ -2401,6 +2411,7 @@ ENTRY(aesni_ecb_dec)
popl KEYP
popl LEN
#endif
+ FRAME_END
ret
ENDPROC(aesni_ecb_dec)
@@ -2409,16 +2420,17 @@ ENDPROC(aesni_ecb_dec)
* size_t len, u8 *iv)
*/
ENTRY(aesni_cbc_enc)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
pushl LEN
pushl KEYP
pushl KLEN
- movl 20(%esp), KEYP
- movl 24(%esp), OUTP
- movl 28(%esp), INP
- movl 32(%esp), LEN
- movl 36(%esp), IVP
+ movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+24)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+28)(%esp), INP # src
+ movl (FRAME_OFFSET+32)(%esp), LEN # len
+ movl (FRAME_OFFSET+36)(%esp), IVP # iv
#endif
cmp $16, LEN
jb .Lcbc_enc_ret
@@ -2443,6 +2455,7 @@ ENTRY(aesni_cbc_enc)
popl LEN
popl IVP
#endif
+ FRAME_END
ret
ENDPROC(aesni_cbc_enc)
@@ -2451,16 +2464,17 @@ ENDPROC(aesni_cbc_enc)
* size_t len, u8 *iv)
*/
ENTRY(aesni_cbc_dec)
+ FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
pushl LEN
pushl KEYP
pushl KLEN
- movl 20(%esp), KEYP
- movl 24(%esp), OUTP
- movl 28(%esp), INP
- movl 32(%esp), LEN
- movl 36(%esp), IVP
+ movl (FRAME_OFFSET+20)(%esp), KEYP # ctx
+ movl (FRAME_OFFSET+24)(%esp), OUTP # dst
+ movl (FRAME_OFFSET+28)(%esp), INP # src
+ movl (FRAME_OFFSET+32)(%esp), LEN # len
+ movl (FRAME_OFFSET+36)(%esp), IVP # iv
#endif
cmp $16, LEN
jb .Lcbc_dec_just_ret
@@ -2534,13 +2548,16 @@ ENTRY(aesni_cbc_dec)
popl LEN
popl IVP
#endif
+ FRAME_END
ret
ENDPROC(aesni_cbc_dec)
#ifdef __x86_64__
+.pushsection .rodata
.align 16
.Lbswap_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+.popsection
/*
* _aesni_inc_init: internal ABI
@@ -2598,6 +2615,7 @@ ENDPROC(_aesni_inc)
* size_t len, u8 *iv)
*/
ENTRY(aesni_ctr_enc)
+ FRAME_BEGIN
cmp $16, LEN
jb .Lctr_enc_just_ret
mov 480(KEYP), KLEN
@@ -2651,6 +2669,7 @@ ENTRY(aesni_ctr_enc)
.Lctr_enc_ret:
movups IV, (IVP)
.Lctr_enc_just_ret:
+ FRAME_END
ret
ENDPROC(aesni_ctr_enc)
@@ -2677,6 +2696,7 @@ ENDPROC(aesni_ctr_enc)
* bool enc, u8 *iv)
*/
ENTRY(aesni_xts_crypt8)
+ FRAME_BEGIN
cmpb $0, %cl
movl $0, %ecx
movl $240, %r10d
@@ -2777,6 +2797,7 @@ ENTRY(aesni_xts_crypt8)
pxor INC, STATE4
movdqu STATE4, 0x70(OUTP)
+ FRAME_END
ret
ENDPROC(aesni_xts_crypt8)
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index ce71f9212409..aa9e8bd163f6 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -16,6 +16,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -726,6 +727,7 @@ __camellia_enc_blk16:
* %xmm0..%xmm15: 16 encrypted blocks, order swapped:
* 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
*/
+ FRAME_BEGIN
leaq 8 * 16(%rax), %rcx;
@@ -780,6 +782,7 @@ __camellia_enc_blk16:
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
+ FRAME_END
ret;
.align 8
@@ -812,6 +815,7 @@ __camellia_dec_blk16:
* %xmm0..%xmm15: 16 plaintext blocks, order swapped:
* 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
*/
+ FRAME_BEGIN
leaq 8 * 16(%rax), %rcx;
@@ -865,6 +869,7 @@ __camellia_dec_blk16:
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
+ FRAME_END
ret;
.align 8
@@ -890,6 +895,7 @@ ENTRY(camellia_ecb_enc_16way)
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
*/
+ FRAME_BEGIN
inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
@@ -904,6 +910,7 @@ ENTRY(camellia_ecb_enc_16way)
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
+ FRAME_END
ret;
ENDPROC(camellia_ecb_enc_16way)
@@ -913,6 +920,7 @@ ENTRY(camellia_ecb_dec_16way)
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
*/
+ FRAME_BEGIN
cmpl $16, key_length(CTX);
movl $32, %r8d;
@@ -932,6 +940,7 @@ ENTRY(camellia_ecb_dec_16way)
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
+ FRAME_END
ret;
ENDPROC(camellia_ecb_dec_16way)
@@ -941,6 +950,7 @@ ENTRY(camellia_cbc_dec_16way)
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
*/
+ FRAME_BEGIN
cmpl $16, key_length(CTX);
movl $32, %r8d;
@@ -981,6 +991,7 @@ ENTRY(camellia_cbc_dec_16way)
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
+ FRAME_END
ret;
ENDPROC(camellia_cbc_dec_16way)
@@ -997,6 +1008,7 @@ ENTRY(camellia_ctr_16way)
* %rdx: src (16 blocks)
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN
subq $(16 * 16), %rsp;
movq %rsp, %rax;
@@ -1092,6 +1104,7 @@ ENTRY(camellia_ctr_16way)
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
+ FRAME_END
ret;
ENDPROC(camellia_ctr_16way)
@@ -1112,6 +1125,7 @@ camellia_xts_crypt_16way:
* %r8: index for input whitening key
* %r9: pointer to __camellia_enc_blk16 or __camellia_dec_blk16
*/
+ FRAME_BEGIN
subq $(16 * 16), %rsp;
movq %rsp, %rax;
@@ -1234,6 +1248,7 @@ camellia_xts_crypt_16way:
%xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
+ FRAME_END
ret;
ENDPROC(camellia_xts_crypt_16way)
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index 0e0b8863a34b..16186c18656d 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#define CAMELLIA_TABLE_BYTE_LEN 272
@@ -766,6 +767,7 @@ __camellia_enc_blk32:
* %ymm0..%ymm15: 32 encrypted blocks, order swapped:
* 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
*/
+ FRAME_BEGIN
leaq 8 * 32(%rax), %rcx;
@@ -820,6 +822,7 @@ __camellia_enc_blk32:
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
+ FRAME_END
ret;
.align 8
@@ -852,6 +855,7 @@ __camellia_dec_blk32:
* %ymm0..%ymm15: 16 plaintext blocks, order swapped:
* 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
*/
+ FRAME_BEGIN
leaq 8 * 32(%rax), %rcx;
@@ -905,6 +909,7 @@ __camellia_dec_blk32:
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
+ FRAME_END
ret;
.align 8
@@ -930,6 +935,7 @@ ENTRY(camellia_ecb_enc_32way)
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
*/
+ FRAME_BEGIN
vzeroupper;
@@ -948,6 +954,7 @@ ENTRY(camellia_ecb_enc_32way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(camellia_ecb_enc_32way)
@@ -957,6 +964,7 @@ ENTRY(camellia_ecb_dec_32way)
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
*/
+ FRAME_BEGIN
vzeroupper;
@@ -980,6 +988,7 @@ ENTRY(camellia_ecb_dec_32way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(camellia_ecb_dec_32way)
@@ -989,6 +998,7 @@ ENTRY(camellia_cbc_dec_32way)
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
*/
+ FRAME_BEGIN
vzeroupper;
@@ -1046,6 +1056,7 @@ ENTRY(camellia_cbc_dec_32way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(camellia_cbc_dec_32way)
@@ -1070,6 +1081,7 @@ ENTRY(camellia_ctr_32way)
* %rdx: src (32 blocks)
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN
vzeroupper;
@@ -1184,6 +1196,7 @@ ENTRY(camellia_ctr_32way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(camellia_ctr_32way)
@@ -1216,6 +1229,7 @@ camellia_xts_crypt_32way:
* %r8: index for input whitening key
* %r9: pointer to __camellia_enc_blk32 or __camellia_dec_blk32
*/
+ FRAME_BEGIN
vzeroupper;
@@ -1349,6 +1363,7 @@ camellia_xts_crypt_32way:
vzeroupper;
+ FRAME_END
ret;
ENDPROC(camellia_xts_crypt_32way)
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
index c35fd5d6ecd2..14fa1966bf01 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
.file "cast5-avx-x86_64-asm_64.S"
@@ -365,6 +366,7 @@ ENTRY(cast5_ecb_enc_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -388,6 +390,7 @@ ENTRY(cast5_ecb_enc_16way)
vmovdqu RR4, (6*4*4)(%r11);
vmovdqu RL4, (7*4*4)(%r11);
+ FRAME_END
ret;
ENDPROC(cast5_ecb_enc_16way)
@@ -398,6 +401,7 @@ ENTRY(cast5_ecb_dec_16way)
* %rdx: src
*/
+ FRAME_BEGIN
movq %rsi, %r11;
vmovdqu (0*4*4)(%rdx), RL1;
@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
vmovdqu RR4, (6*4*4)(%r11);
vmovdqu RL4, (7*4*4)(%r11);
+ FRAME_END
ret;
ENDPROC(cast5_ecb_dec_16way)
@@ -429,6 +434,7 @@ ENTRY(cast5_cbc_dec_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
pushq %r12;
@@ -469,6 +475,7 @@ ENTRY(cast5_cbc_dec_16way)
popq %r12;
+ FRAME_END
ret;
ENDPROC(cast5_cbc_dec_16way)
@@ -479,6 +486,7 @@ ENTRY(cast5_ctr_16way)
* %rdx: src
* %rcx: iv (big endian, 64bit)
*/
+ FRAME_BEGIN
pushq %r12;
@@ -542,5 +550,6 @@ ENTRY(cast5_ctr_16way)
popq %r12;
+ FRAME_END
ret;
ENDPROC(cast5_ctr_16way)
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index e3531f833951..c419389889cd 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#include "glue_helper-asm-avx.S"
.file "cast6-avx-x86_64-asm_64.S"
@@ -349,6 +350,7 @@ ENTRY(cast6_ecb_enc_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -358,6 +360,7 @@ ENTRY(cast6_ecb_enc_8way)
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(cast6_ecb_enc_8way)
@@ -367,6 +370,7 @@ ENTRY(cast6_ecb_dec_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(cast6_ecb_dec_8way)
@@ -385,6 +390,7 @@ ENTRY(cast6_cbc_dec_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
pushq %r12;
@@ -399,6 +405,7 @@ ENTRY(cast6_cbc_dec_8way)
popq %r12;
+ FRAME_END
ret;
ENDPROC(cast6_cbc_dec_8way)
@@ -409,6 +416,7 @@ ENTRY(cast6_ctr_8way)
* %rdx: src
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN
pushq %r12;
@@ -424,6 +432,7 @@ ENTRY(cast6_ctr_8way)
popq %r12;
+ FRAME_END
ret;
ENDPROC(cast6_ctr_8way)
@@ -434,6 +443,7 @@ ENTRY(cast6_xts_enc_8way)
* %rdx: src
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -446,6 +456,7 @@ ENTRY(cast6_xts_enc_8way)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(cast6_xts_enc_8way)
@@ -456,6 +467,7 @@ ENTRY(cast6_xts_dec_8way)
* %rdx: src
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -468,5 +480,6 @@ ENTRY(cast6_xts_dec_8way)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(cast6_xts_dec_8way)
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 4fe27e074194..dc05f010ca9b 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -170,8 +170,8 @@ continue_block:
## branch into array
lea jump_table(%rip), bufp
movzxw (bufp, %rax, 2), len
- offset=crc_array-jump_table
- lea offset(bufp, len, 1), bufp
+ lea crc_array(%rip), bufp
+ lea (bufp, len, 1), bufp
jmp *bufp
################################################################
@@ -310,7 +310,9 @@ do_return:
popq %rdi
popq %rbx
ret
+ENDPROC(crc_pcl)
+.section .rodata, "a", %progbits
################################################################
## jump table Table is 129 entries x 2 bytes each
################################################################
@@ -324,13 +326,11 @@ JMPTBL_ENTRY %i
i=i+1
.endr
-ENDPROC(crc_pcl)
################################################################
## PCLMULQDQ tables
## Table is 128 entries x 2 words (8 bytes) each
################################################################
-.section .rodata, "a", %progbits
.align 8
K_table:
.long 0x493c7d27, 0x00000001
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 5d1e0075ac24..eed55c8cca4f 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -18,6 +18,7 @@
#include <linux/linkage.h>
#include <asm/inst.h>
+#include <asm/frame.h>
.data
@@ -94,6 +95,7 @@ ENDPROC(__clmul_gf128mul_ble)
/* void clmul_ghash_mul(char *dst, const u128 *shash) */
ENTRY(clmul_ghash_mul)
+ FRAME_BEGIN
movups (%rdi), DATA
movups (%rsi), SHASH
movaps .Lbswap_mask, BSWAP
@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
call __clmul_gf128mul_ble
PSHUFB_XMM BSWAP DATA
movups DATA, (%rdi)
+ FRAME_END
ret
ENDPROC(clmul_ghash_mul)
@@ -109,6 +112,7 @@ ENDPROC(clmul_ghash_mul)
* const u128 *shash);
*/
ENTRY(clmul_ghash_update)
+ FRAME_BEGIN
cmp $16, %rdx
jb .Lupdate_just_ret # check length
movaps .Lbswap_mask, BSWAP
@@ -128,5 +132,6 @@ ENTRY(clmul_ghash_update)
PSHUFB_XMM BSWAP DATA
movups DATA, (%rdi)
.Lupdate_just_ret:
+ FRAME_END
ret
ENDPROC(clmul_ghash_update)
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index 2f202f49872b..8be571808342 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#include "glue_helper-asm-avx.S"
.file "serpent-avx-x86_64-asm_64.S"
@@ -681,6 +682,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
@@ -688,6 +690,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(serpent_ecb_enc_8way_avx)
@@ -697,6 +700,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+ FRAME_END
ret;
ENDPROC(serpent_ecb_dec_8way_avx)
@@ -713,6 +718,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
@@ -720,6 +726,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+ FRAME_END
ret;
ENDPROC(serpent_cbc_dec_8way_avx)
@@ -730,6 +737,7 @@ ENTRY(serpent_ctr_8way_avx)
* %rdx: src
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN
load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
RD2, RK0, RK1, RK2);
@@ -738,6 +746,7 @@ ENTRY(serpent_ctr_8way_avx)
store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(serpent_ctr_8way_avx)
@@ -748,6 +757,7 @@ ENTRY(serpent_xts_enc_8way_avx)
* %rdx: src
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
+ FRAME_BEGIN
/* regs <= src, dst <= IVs, regs <= regs xor IVs */
load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
@@ -758,6 +768,7 @@ ENTRY(serpent_xts_enc_8way_avx)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(serpent_xts_enc_8way_avx)
@@ -768,6 +779,7 @@ ENTRY(serpent_xts_dec_8way_avx)
* %rdx: src
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
+ FRAME_BEGIN
/* regs <= src, dst <= IVs, regs <= regs xor IVs */
load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
@@ -778,5 +790,6 @@ ENTRY(serpent_xts_dec_8way_avx)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+ FRAME_END
ret;
ENDPROC(serpent_xts_dec_8way_avx)
diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
index b222085cccac..97c48add33ed 100644
--- a/arch/x86/crypto/serpent-avx2-asm_64.S
+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
@@ -15,6 +15,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#include "glue_helper-asm-avx2.S"
.file "serpent-avx2-asm_64.S"
@@ -673,6 +674,7 @@ ENTRY(serpent_ecb_enc_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
vzeroupper;
@@ -684,6 +686,7 @@ ENTRY(serpent_ecb_enc_16way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(serpent_ecb_enc_16way)
@@ -693,6 +696,7 @@ ENTRY(serpent_ecb_dec_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
vzeroupper;
@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(serpent_ecb_dec_16way)
@@ -713,6 +718,7 @@ ENTRY(serpent_cbc_dec_16way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
vzeroupper;
@@ -725,6 +731,7 @@ ENTRY(serpent_cbc_dec_16way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(serpent_cbc_dec_16way)
@@ -735,6 +742,7 @@ ENTRY(serpent_ctr_16way)
* %rdx: src (16 blocks)
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN
vzeroupper;
@@ -748,6 +756,7 @@ ENTRY(serpent_ctr_16way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(serpent_ctr_16way)
@@ -758,6 +767,7 @@ ENTRY(serpent_xts_enc_16way)
* %rdx: src (16 blocks)
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
+ FRAME_BEGIN
vzeroupper;
@@ -772,6 +782,7 @@ ENTRY(serpent_xts_enc_16way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(serpent_xts_enc_16way)
@@ -782,6 +793,7 @@ ENTRY(serpent_xts_dec_16way)
* %rdx: src (16 blocks)
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
+ FRAME_BEGIN
vzeroupper;
@@ -796,5 +808,6 @@ ENTRY(serpent_xts_dec_16way)
vzeroupper;
+ FRAME_END
ret;
ENDPROC(serpent_xts_dec_16way)
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
index 85c4e1cf7172..96df6a39d7e2 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
@@ -52,6 +52,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#include "sha1_mb_mgr_datastruct.S"
@@ -86,16 +87,6 @@
#define extra_blocks %arg2
#define p %arg2
-
-# STACK_SPACE needs to be an odd multiple of 8
-_XMM_SAVE_SIZE = 10*16
-_GPR_SAVE_SIZE = 8*8
-_ALIGN_SIZE = 8
-
-_XMM_SAVE = 0
-_GPR_SAVE = _XMM_SAVE + _XMM_SAVE_SIZE
-STACK_SPACE = _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
-
.macro LABEL prefix n
\prefix\n\():
.endm
@@ -113,16 +104,8 @@ offset = \_offset
# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
# arg 1 : rcx : state
ENTRY(sha1_mb_mgr_flush_avx2)
- mov %rsp, %r10
- sub $STACK_SPACE, %rsp
- and $~31, %rsp
- mov %rbx, _GPR_SAVE(%rsp)
- mov %r10, _GPR_SAVE+8*1(%rsp) #save rsp
- mov %rbp, _GPR_SAVE+8*3(%rsp)
- mov %r12, _GPR_SAVE+8*4(%rsp)
- mov %r13, _GPR_SAVE+8*5(%rsp)
- mov %r14, _GPR_SAVE+8*6(%rsp)
- mov %r15, _GPR_SAVE+8*7(%rsp)
+ FRAME_BEGIN
+ push %rbx
# If bit (32+3) is set, then all lanes are empty
mov _unused_lanes(state), unused_lanes
@@ -230,16 +213,8 @@ len_is_0:
mov tmp2_w, offset(job_rax)
return:
-
- mov _GPR_SAVE(%rsp), %rbx
- mov _GPR_SAVE+8*1(%rsp), %r10 #saved rsp
- mov _GPR_SAVE+8*3(%rsp), %rbp
- mov _GPR_SAVE+8*4(%rsp), %r12
- mov _GPR_SAVE+8*5(%rsp), %r13
- mov _GPR_SAVE+8*6(%rsp), %r14
- mov _GPR_SAVE+8*7(%rsp), %r15
- mov %r10, %rsp
-
+ pop %rbx
+ FRAME_END
ret
return_null:
diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
index c420d89b175f..63a0d9c8e31f 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
@@ -53,6 +53,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#include "sha1_mb_mgr_datastruct.S"
@@ -86,33 +87,21 @@ job_rax = %rax
len = %rax
DWORD_len = %eax
-lane = %rbp
-tmp3 = %rbp
+lane = %r12
+tmp3 = %r12
tmp = %r9
DWORD_tmp = %r9d
lane_data = %r10
-# STACK_SPACE needs to be an odd multiple of 8
-STACK_SPACE = 8*8 + 16*10 + 8
-
# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
# arg 1 : rcx : state
# arg 2 : rdx : job
ENTRY(sha1_mb_mgr_submit_avx2)
-
- mov %rsp, %r10
- sub $STACK_SPACE, %rsp
- and $~31, %rsp
-
- mov %rbx, (%rsp)
- mov %r10, 8*2(%rsp) #save old rsp
- mov %rbp, 8*3(%rsp)
- mov %r12, 8*4(%rsp)
- mov %r13, 8*5(%rsp)
- mov %r14, 8*6(%rsp)
- mov %r15, 8*7(%rsp)
+ FRAME_BEGIN
+ push %rbx
+ push %r12
mov _unused_lanes(state), unused_lanes
mov unused_lanes, lane
@@ -203,16 +192,9 @@ len_is_0:
movl DWORD_tmp, _result_digest+1*16(job_rax)
return:
-
- mov (%rsp), %rbx
- mov 8*2(%rsp), %r10 #save old rsp
- mov 8*3(%rsp), %rbp
- mov 8*4(%rsp), %r12
- mov 8*5(%rsp), %r13
- mov 8*6(%rsp), %r14
- mov 8*7(%rsp), %r15
- mov %r10, %rsp
-
+ pop %r12
+ pop %rbx
+ FRAME_END
ret
return_null:
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
index 05058134c443..dc66273e610d 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -24,6 +24,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
#include "glue_helper-asm-avx.S"
.file "twofish-avx-x86_64-asm_64.S"
@@ -333,6 +334,7 @@ ENTRY(twofish_ecb_enc_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -342,6 +344,7 @@ ENTRY(twofish_ecb_enc_8way)
store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+ FRAME_END
ret;
ENDPROC(twofish_ecb_enc_8way)
@@ -351,6 +354,7 @@ ENTRY(twofish_ecb_dec_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(twofish_ecb_dec_8way)
@@ -369,6 +374,7 @@ ENTRY(twofish_cbc_dec_8way)
* %rsi: dst
* %rdx: src
*/
+ FRAME_BEGIN
pushq %r12;
@@ -383,6 +389,7 @@ ENTRY(twofish_cbc_dec_8way)
popq %r12;
+ FRAME_END
ret;
ENDPROC(twofish_cbc_dec_8way)
@@ -393,6 +400,7 @@ ENTRY(twofish_ctr_8way)
* %rdx: src
* %rcx: iv (little endian, 128bit)
*/
+ FRAME_BEGIN
pushq %r12;
@@ -408,6 +416,7 @@ ENTRY(twofish_ctr_8way)
popq %r12;
+ FRAME_END
ret;
ENDPROC(twofish_ctr_8way)
@@ -418,6 +427,7 @@ ENTRY(twofish_xts_enc_8way)
* %rdx: src
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -430,6 +440,7 @@ ENTRY(twofish_xts_enc_8way)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+ FRAME_END
ret;
ENDPROC(twofish_xts_enc_8way)
@@ -440,6 +451,7 @@ ENTRY(twofish_xts_dec_8way)
* %rdx: src
* %rcx: iv (t ⊕ αⁿ ∈ GF(2¹²⁸))
*/
+ FRAME_BEGIN
movq %rsi, %r11;
@@ -452,5 +464,6 @@ ENTRY(twofish_xts_dec_8way)
/* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ FRAME_END
ret;
ENDPROC(twofish_xts_dec_8way)
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index bd55dedd7614..fe91c25092da 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -1,6 +1,10 @@
#
# Makefile for the x86 low level entry code
#
+
+OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
+
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index efb2b932b748..98df1fa8825c 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -8,11 +8,14 @@
#include <linux/linkage.h>
#include "calling.h"
#include <asm/asm.h>
+#include <asm/frame.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name
+ .type \name, @function
\name:
+ FRAME_BEGIN
/* this one pushes 9 elems, the next one would be %rIP */
pushq %rdi
@@ -62,6 +65,7 @@ restore:
popq %rdx
popq %rsi
popq %rdi
+ FRAME_END
ret
_ASM_NOKPROBE(restore)
#endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index c854541d93ff..f9fb859c98b9 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -3,8 +3,9 @@
#
KBUILD_CFLAGS += $(DISABLE_LTO)
-KASAN_SANITIZE := n
-UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
VDSO64-$(CONFIG_X86_64) := y
VDSOX32-$(CONFIG_X86_X32_ABI) := y
@@ -16,6 +17,7 @@ vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
# files to link into kernel
obj-y += vma.o
+OBJECT_FILES_NON_STANDARD_vma.o := n
# vDSO images to build
vdso_img-$(VDSO64-y) += 64
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index f6192502149e..601f1b8f9961 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -13,6 +13,7 @@
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/cpumask.h>
+#include <asm/frame.h>
static inline int paravirt_enabled(void)
{
@@ -756,15 +757,19 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
* call. The return value in rax/eax will not be saved, even for void
* functions.
*/
+#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
#define PV_CALLEE_SAVE_REGS_THUNK(func) \
extern typeof(func) __raw_callee_save_##func; \
\
asm(".pushsection .text;" \
- ".globl __raw_callee_save_" #func " ; " \
- "__raw_callee_save_" #func ": " \
+ ".globl " PV_THUNK_NAME(func) ";" \
+ ".type " PV_THUNK_NAME(func) ", @function;" \
+ PV_THUNK_NAME(func) ":" \
+ FRAME_BEGIN \
PV_SAVE_ALL_CALLER_REGS \
"call " #func ";" \
PV_RESTORE_ALL_CALLER_REGS \
+ FRAME_END \
"ret;" \
".popsection")
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 77db5616a473..e8c2326478c8 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -466,8 +466,9 @@ int paravirt_disable_iospace(void);
* makes sure the incoming and outgoing types are always correct.
*/
#ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS \
- unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
+#define PVOP_VCALL_ARGS \
+ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \
+ register void *__sp asm("esp")
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
@@ -485,9 +486,10 @@ int paravirt_disable_iospace(void);
#define VEXTRA_CLOBBERS
#else /* CONFIG_X86_64 */
/* [re]ax isn't an arg, but the return val */
-#define PVOP_VCALL_ARGS \
- unsigned long __edi = __edi, __esi = __esi, \
- __edx = __edx, __ecx = __ecx, __eax = __eax
+#define PVOP_VCALL_ARGS \
+ unsigned long __edi = __edi, __esi = __esi, \
+ __edx = __edx, __ecx = __ecx, __eax = __eax; \
+ register void *__sp asm("rsp")
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
@@ -526,7 +528,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : call_clbr \
+ : call_clbr, "+r" (__sp) \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
@@ -536,7 +538,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : call_clbr \
+ : call_clbr, "+r" (__sp) \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
@@ -563,7 +565,7 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : call_clbr \
+ : call_clbr, "+r" (__sp) \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 01bcde84d3e4..d397deb58146 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -94,10 +94,19 @@ static __always_inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPT
extern asmlinkage void ___preempt_schedule(void);
-# define __preempt_schedule() asm ("call ___preempt_schedule")
+# define __preempt_schedule() \
+({ \
+ register void *__sp asm(_ASM_SP); \
+ asm volatile ("call ___preempt_schedule" : "+r"(__sp)); \
+})
+
extern asmlinkage void preempt_schedule(void);
extern asmlinkage void ___preempt_schedule_notrace(void);
-# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace")
+# define __preempt_schedule_notrace() \
+({ \
+ register void *__sp asm(_ASM_SP); \
+ asm volatile ("call ___preempt_schedule_notrace" : "+r"(__sp)); \
+})
extern asmlinkage void preempt_schedule_notrace(void);
#endif
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 9f92c180ed2f..9d55f9b6e167 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -36,8 +36,10 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
*/
asm (".pushsection .text;"
".globl " PV_UNLOCK ";"
+ ".type " PV_UNLOCK ", @function;"
".align 4,0x90;"
PV_UNLOCK ": "
+ FRAME_BEGIN
"push %rdx;"
"mov $0x1,%eax;"
"xor %edx,%edx;"
@@ -45,6 +47,7 @@ asm (".pushsection .text;"
"cmp $0x1,%al;"
"jne .slowpath;"
"pop %rdx;"
+ FRAME_END
"ret;"
".slowpath: "
"push %rsi;"
@@ -52,6 +55,7 @@ asm (".pushsection .text;"
"call " PV_UNLOCK_SLOWPATH ";"
"pop %rsi;"
"pop %rdx;"
+ FRAME_END
"ret;"
".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
".popsection");
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index c0f27d7ea7ff..88bff6dd23ad 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -179,10 +179,11 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
({ \
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
+ register void *__sp asm(_ASM_SP); \
__chk_user_ptr(ptr); \
might_fault(); \
- asm volatile("call __get_user_%P3" \
- : "=a" (__ret_gu), "=r" (__val_gu) \
+ asm volatile("call __get_user_%P4" \
+ : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 3bcdcc84259d..a12a047184ee 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -110,9 +110,10 @@ extern struct { char _entry[32]; } hypercall_page[];
register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
- register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
+ register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \
+ register void *__sp asm(_ASM_SP);
-#define __HYPERCALL_0PARAM "=r" (__res)
+#define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp)
#define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
#define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
#define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index b1b78ffe01d0..d5fb0871aba3 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -16,9 +16,14 @@ CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif
-KASAN_SANITIZE_head$(BITS).o := n
-KASAN_SANITIZE_dumpstack.o := n
-KASAN_SANITIZE_dumpstack_$(BITS).o := n
+KASAN_SANITIZE_head$(BITS).o := n
+KASAN_SANITIZE_dumpstack.o := n
+KASAN_SANITIZE_dumpstack_$(BITS).o := n
+
+OBJECT_FILES_NON_STANDARD_head_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o := y
+OBJECT_FILES_NON_STANDARD_test_nx.o := y
CFLAGS_irq.o := -I$(src)/../include/asm/trace
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 8c35df468104..169963f471bb 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -5,6 +5,7 @@
#include <asm/page_types.h>
#include <asm/msr.h>
#include <asm/asm-offsets.h>
+#include <asm/frame.h>
# Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
@@ -39,6 +40,7 @@ bogus_64_magic:
jmp bogus_64_magic
ENTRY(do_suspend_lowlevel)
+ FRAME_BEGIN
subq $8, %rsp
xorl %eax, %eax
call save_processor_state
@@ -109,6 +111,7 @@ ENTRY(do_suspend_lowlevel)
xorl %eax, %eax
addq $8, %rsp
+ FRAME_END
jmp restore_processor_state
ENDPROC(do_suspend_lowlevel)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 97c59fd60702..5026a13356c4 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -75,7 +75,10 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
*/
extern __visible void vide(void);
-__asm__(".globl vide\n\t.align 4\nvide: ret");
+__asm__(".globl vide\n"
+ ".type vide, @function\n"
+ ".align 4\n"
+ "vide: ret\n");
static void init_amd_k5(struct cpuinfo_x86 *c)
{
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 0f05deeff5ce..ae703acb85c1 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -49,6 +49,7 @@
#include <linux/kdebug.h>
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
+#include <linux/frame.h>
#include <asm/cacheflush.h>
#include <asm/desc.h>
@@ -671,39 +672,39 @@ NOKPROBE_SYMBOL(kprobe_int3_handler);
* When a retprobed function returns, this code saves registers and
* calls trampoline_handler() runs, which calls the kretprobe's handler.
*/
-static void __used kretprobe_trampoline_holder(void)
-{
- asm volatile (
- ".global kretprobe_trampoline\n"
- "kretprobe_trampoline: \n"
+asm(
+ ".global kretprobe_trampoline\n"
+ ".type kretprobe_trampoline, @function\n"
+ "kretprobe_trampoline:\n"
#ifdef CONFIG_X86_64
- /* We don't bother saving the ss register */
- " pushq %rsp\n"
- " pushfq\n"
- SAVE_REGS_STRING
- " movq %rsp, %rdi\n"
- " call trampoline_handler\n"
- /* Replace saved sp with true return address. */
- " movq %rax, 152(%rsp)\n"
- RESTORE_REGS_STRING
- " popfq\n"
+ /* We don't bother saving the ss register */
+ " pushq %rsp\n"
+ " pushfq\n"
+ SAVE_REGS_STRING
+ " movq %rsp, %rdi\n"
+ " call trampoline_handler\n"
+ /* Replace saved sp with true return address. */
+ " movq %rax, 152(%rsp)\n"
+ RESTORE_REGS_STRING
+ " popfq\n"
#else
- " pushf\n"
- SAVE_REGS_STRING
- " movl %esp, %eax\n"
- " call trampoline_handler\n"
- /* Move flags to cs */
- " movl 56(%esp), %edx\n"
- " movl %edx, 52(%esp)\n"
- /* Replace saved flags with true return address. */
- " movl %eax, 56(%esp)\n"
- RESTORE_REGS_STRING
- " popf\n"
+ " pushf\n"
+ SAVE_REGS_STRING
+ " movl %esp, %eax\n"
+ " call trampoline_handler\n"
+ /* Move flags to cs */
+ " movl 56(%esp), %edx\n"
+ " movl %edx, 52(%esp)\n"
+ /* Replace saved flags with true return address. */
+ " movl %eax, 56(%esp)\n"
+ RESTORE_REGS_STRING
+ " popf\n"
#endif
- " ret\n");
-}
-NOKPROBE_SYMBOL(kretprobe_trampoline_holder);
+ " ret\n"
+ ".size kretprobe_trampoline, .-kretprobe_trampoline\n"
+);
NOKPROBE_SYMBOL(kretprobe_trampoline);
+STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
/*
* Called from kretprobe_trampoline
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 6bb070e54fda..73de2604d779 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -340,7 +340,10 @@ SECTIONS
/* Sections to be discarded */
DISCARDS
- /DISCARD/ : { *(.eh_frame) }
+ /DISCARD/ : {
+ *(.eh_frame)
+ *(__func_stack_frame_non_standard)
+ }
}
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b9b09fec173b..0f6294376fbd 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -309,23 +309,29 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
-#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
+#define FOP_FUNC(name) \
+ ".align " __stringify(FASTOP_SIZE) " \n\t" \
+ ".type " name ", @function \n\t" \
+ name ":\n\t"
+
#define FOP_RET "ret \n\t"
#define FOP_START(op) \
extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \
- FOP_ALIGN \
- "em_" #op ": \n\t"
+ FOP_FUNC("em_" #op)
#define FOP_END \
".popsection")
-#define FOPNOP() FOP_ALIGN FOP_RET
+#define FOPNOP() \
+ FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \
+ FOP_RET
#define FOP1E(op, dst) \
- FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
+ FOP_FUNC(#op "_" #dst) \
+ "10: " #op " %" #dst " \n\t" FOP_RET
#define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
@@ -357,7 +363,8 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
FOP_END
#define FOP2E(op, dst, src) \
- FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
+ FOP_FUNC(#op "_" #dst "_" #src) \
+ #op " %" #src ", %" #dst " \n\t" FOP_RET
#define FASTOP2(op) \
FOP_START(op) \
@@ -395,7 +402,8 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
FOP_END
#define FOP3E(op, dst, src, src2) \
- FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
+ FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
+ #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
@@ -407,7 +415,12 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
FOP_END
/* Special case for SETcc - 1 instruction per cc */
-#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
+#define FOP_SETCC(op) \
+ ".align 4 \n\t" \
+ ".type " #op ", @function \n\t" \
+ #op ": \n\t" \
+ #op " %al \n\t" \
+ FOP_RET
asm(".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret");
@@ -956,7 +969,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
return fastop(ctxt, em_bsr);
}
-static u8 test_cc(unsigned int condition, unsigned long flags)
+static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5e45c2731a5d..75173efccac5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8385,6 +8385,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
{
u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ register void *__sp asm(_ASM_SP);
/*
* If external interrupt exists, IF bit is set in rflags/eflags on the
@@ -8417,8 +8418,9 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
"call *%[entry]\n\t"
:
#ifdef CONFIG_X86_64
- [sp]"=&r"(tmp)
+ [sp]"=&r"(tmp),
#endif
+ "+r"(__sp)
:
[entry]"r"(entry),
[ss]"i"(__KERNEL_DS),
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 8f72b334aea0..1a416935bac9 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -374,7 +374,7 @@ void insn_get_displacement(struct insn *insn)
if (mod == 3)
goto out;
if (mod == 1) {
- insn->displacement.value = get_next(char, insn);
+ insn->displacement.value = get_next(signed char, insn);
insn->displacement.nbytes = 1;
} else if (insn->addr_bytes == 2) {
if ((mod == 0 && rm == 6) || mod == 2) {
@@ -532,7 +532,7 @@ void insn_get_immediate(struct insn *insn)
switch (inat_immediate_size(insn->attr)) {
case INAT_IMM_BYTE:
- insn->immediate.value = get_next(char, insn);
+ insn->immediate.value = get_next(signed char, insn);
insn->immediate.nbytes = 1;
break;
case INAT_IMM_WORD:
@@ -566,7 +566,7 @@ void insn_get_immediate(struct insn *insn)
goto err_out;
}
if (inat_has_second_immediate(insn->attr)) {
- insn->immediate2.value = get_next(char, insn);
+ insn->immediate2.value = get_next(signed char, insn);
insn->immediate2.nbytes = 1;
}
done:
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
index 40027db99140..be110efa0096 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
@@ -15,6 +15,7 @@
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
+#include <asm/frame.h>
#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
@@ -84,24 +85,29 @@
/* Fix up special calling conventions */
ENTRY(call_rwsem_down_read_failed)
+ FRAME_BEGIN
save_common_regs
__ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi
call rwsem_down_read_failed
__ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
+ FRAME_END
ret
ENDPROC(call_rwsem_down_read_failed)
ENTRY(call_rwsem_down_write_failed)
+ FRAME_BEGIN
save_common_regs
movq %rax,%rdi
call rwsem_down_write_failed
restore_common_regs
+ FRAME_END
ret
ENDPROC(call_rwsem_down_write_failed)
ENTRY(call_rwsem_wake)
+ FRAME_BEGIN
/* do nothing if still outstanding active readers */
__ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
jnz 1f
@@ -109,15 +115,18 @@ ENTRY(call_rwsem_wake)
movq %rax,%rdi
call rwsem_wake
restore_common_regs
-1: ret
+1: FRAME_END
+ ret
ENDPROC(call_rwsem_wake)
ENTRY(call_rwsem_downgrade_wake)
+ FRAME_BEGIN
save_common_regs
__ASM_SIZE(push,) %__ASM_REG(dx)
movq %rax,%rdi
call rwsem_downgrade_wake
__ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
+ FRAME_END
ret
ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 4093216b3791..f2a7faf4706e 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -8,6 +8,7 @@
* of the License.
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
/*
* Calling convention :
@@ -22,15 +23,16 @@
32 /* space for rbx,r13,r14,r15 */ + \
8 /* space for skb_copy_bits */)
-sk_load_word:
- .globl sk_load_word
+#define FUNC(name) \
+ .globl name; \
+ .type name, @function; \
+ name:
+FUNC(sk_load_word)
test %esi,%esi
js bpf_slow_path_word_neg
-sk_load_word_positive_offset:
- .globl sk_load_word_positive_offset
-
+FUNC(sk_load_word_positive_offset)
mov %r9d,%eax # hlen
sub %esi,%eax # hlen - offset
cmp $3,%eax
@@ -39,15 +41,11 @@ sk_load_word_positive_offset:
bswap %eax /* ntohl() */
ret
-sk_load_half:
- .globl sk_load_half
-
+FUNC(sk_load_half)
test %esi,%esi
js bpf_slow_path_half_neg
-sk_load_half_positive_offset:
- .globl sk_load_half_positive_offset
-
+FUNC(sk_load_half_positive_offset)
mov %r9d,%eax
sub %esi,%eax # hlen - offset
cmp $1,%eax
@@ -56,15 +54,11 @@ sk_load_half_positive_offset:
rol $8,%ax # ntohs()
ret
-sk_load_byte:
- .globl sk_load_byte
-
+FUNC(sk_load_byte)
test %esi,%esi
js bpf_slow_path_byte_neg
-sk_load_byte_positive_offset:
- .globl sk_load_byte_positive_offset
-
+FUNC(sk_load_byte_positive_offset)
cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
jle bpf_slow_path_byte
movzbl (SKBDATA,%rsi),%eax
@@ -72,16 +66,18 @@ sk_load_byte_positive_offset:
/* rsi contains offset and can be scratched */
#define bpf_slow_path_common(LEN) \
+ lea -MAX_BPF_STACK + 32(%rbp), %rdx;\
+ FRAME_BEGIN; \
mov %rbx, %rdi; /* arg1 == skb */ \
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
mov $LEN,%ecx; /* len */ \
- lea - MAX_BPF_STACK + 32(%rbp),%rdx; \
call skb_copy_bits; \
test %eax,%eax; \
pop SKBDATA; \
- pop %r9;
+ pop %r9; \
+ FRAME_END
bpf_slow_path_word:
@@ -106,6 +102,7 @@ bpf_slow_path_byte:
ret
#define sk_negative_common(SIZE) \
+ FRAME_BEGIN; \
mov %rbx, %rdi; /* arg1 == skb */ \
push %r9; \
push SKBDATA; \
@@ -115,13 +112,14 @@ bpf_slow_path_byte:
test %rax,%rax; \
pop SKBDATA; \
pop %r9; \
+ FRAME_END; \
jz bpf_error
bpf_slow_path_word_neg:
cmp SKF_MAX_NEG_OFF, %esi /* test range */
jl bpf_error /* offset lower -> error */
-sk_load_word_negative_offset:
- .globl sk_load_word_negative_offset
+
+FUNC(sk_load_word_negative_offset)
sk_negative_common(4)
mov (%rax), %eax
bswap %eax
@@ -130,8 +128,8 @@ sk_load_word_negative_offset:
bpf_slow_path_half_neg:
cmp SKF_MAX_NEG_OFF, %esi
jl bpf_error
-sk_load_half_negative_offset:
- .globl sk_load_half_negative_offset
+
+FUNC(sk_load_half_negative_offset)
sk_negative_common(2)
mov (%rax),%ax
rol $8,%ax
@@ -141,8 +139,8 @@ sk_load_half_negative_offset:
bpf_slow_path_byte_neg:
cmp SKF_MAX_NEG_OFF, %esi
jl bpf_error
-sk_load_byte_negative_offset:
- .globl sk_load_byte_negative_offset
+
+FUNC(sk_load_byte_negative_offset)
sk_negative_common(1)
movzbl (%rax), %eax
ret
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
index 2846aaab5103..066619b0700c 100644
--- a/arch/x86/platform/efi/Makefile
+++ b/arch/x86/platform/efi/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y
+
obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 86d0f9e08dd9..0df2dcc18404 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -11,6 +11,7 @@
#include <asm/msr.h>
#include <asm/processor-flags.h>
#include <asm/page_types.h>
+#include <asm/frame.h>
#define SAVE_XMM \
mov %rsp, %rax; \
@@ -74,6 +75,7 @@
.endm
ENTRY(efi_call)
+ FRAME_BEGIN
SAVE_XMM
mov (%rsp), %rax
mov 8(%rax), %rax
@@ -88,6 +90,7 @@ ENTRY(efi_call)
RESTORE_PGT
addq $48, %rsp
RESTORE_XMM
+ FRAME_END
ret
ENDPROC(efi_call)
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index e2386cb4e0c3..4400a43b9e28 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -21,8 +21,10 @@
#include <asm/page_types.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/frame.h>
ENTRY(swsusp_arch_suspend)
+ FRAME_BEGIN
movq $saved_context, %rax
movq %rsp, pt_regs_sp(%rax)
movq %rbp, pt_regs_bp(%rax)
@@ -50,7 +52,9 @@ ENTRY(swsusp_arch_suspend)
movq %rax, restore_cr3(%rip)
call swsusp_save
+ FRAME_END
ret
+ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image)
/* switch to temporary page tables */
@@ -107,6 +111,7 @@ ENTRY(core_restore_code)
*/
ENTRY(restore_registers)
+ FRAME_BEGIN
/* go back to the original page tables */
movq %rbx, %cr3
@@ -147,4 +152,6 @@ ENTRY(restore_registers)
/* tell the hibernation core that we've just restored the memory */
movq %rax, in_suspend(%rip)
+ FRAME_END
ret
+ENDPROC(restore_registers)
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 2c835e356349..92e3e1d84c1d 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -1,3 +1,5 @@
+OBJECT_FILES_NON_STANDARD := y
+
purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string.o
targets += $(purgatory-y)
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index e02c2c6c56a5..682c895753d9 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -6,7 +6,9 @@
# for more details.
#
#
-KASAN_SANITIZE := n
+KASAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
+
subdir- := rm
obj-y += init.o
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 3e75fcf6b836..053abe7b0ef7 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -6,7 +6,8 @@
# for more details.
#
#
-KASAN_SANITIZE := n
+KASAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
always := realmode.bin realmode.relocs
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2c261082eadf..2379a5a88504 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -32,6 +32,7 @@
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/edd.h>
+#include <linux/frame.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/kexec.h>
@@ -351,8 +352,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
*cx &= maskecx;
*cx |= setecx;
*dx &= maskedx;
-
}
+STACK_FRAME_NON_STANDARD(xen_cpuid); /* XEN_EMULATE_PREFIX */
static bool __init xen_check_mwait(void)
{
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 3e45aa000718..eff224df813f 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -14,6 +14,7 @@
#include <asm/asm-offsets.h>
#include <asm/percpu.h>
#include <asm/processor-flags.h>
+#include <asm/frame.h>
#include "xen-asm.h"
@@ -23,6 +24,7 @@
* then enter the hypervisor to get them handled.
*/
ENTRY(xen_irq_enable_direct)
+ FRAME_BEGIN
/* Unmask events */
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
@@ -39,6 +41,7 @@ ENTRY(xen_irq_enable_direct)
2: call check_events
1:
ENDPATCH(xen_irq_enable_direct)
+ FRAME_END
ret
ENDPROC(xen_irq_enable_direct)
RELOC(xen_irq_enable_direct, 2b+1)
@@ -82,6 +85,7 @@ ENDPATCH(xen_save_fl_direct)
* enters the hypervisor to get them delivered if so.
*/
ENTRY(xen_restore_fl_direct)
+ FRAME_BEGIN
#ifdef CONFIG_X86_64
testw $X86_EFLAGS_IF, %di
#else
@@ -100,6 +104,7 @@ ENTRY(xen_restore_fl_direct)
2: call check_events
1:
ENDPATCH(xen_restore_fl_direct)
+ FRAME_END
ret
ENDPROC(xen_restore_fl_direct)
RELOC(xen_restore_fl_direct, 2b+1)
@@ -109,7 +114,8 @@ ENDPATCH(xen_restore_fl_direct)
* Force an event check by making a hypercall, but preserve regs
* before making the call.
*/
-check_events:
+ENTRY(check_events)
+ FRAME_BEGIN
#ifdef CONFIG_X86_32
push %eax
push %ecx
@@ -139,4 +145,6 @@ check_events:
pop %rcx
pop %rax
#endif
+ FRAME_END
ret
+ENDPROC(check_events)
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index cc8acc410ddb..c3df43141e70 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -26,6 +26,7 @@ ENTRY(xen_adjust_exception_frame)
mov 8+0(%rsp), %rcx
mov 8+8(%rsp), %r11
ret $16
+ENDPROC(xen_adjust_exception_frame)
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
/*