diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 21:42:40 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 21:42:40 +0300 |
commit | 1d87200446f1d10dfe9672ca8edb027a82612f8c (patch) | |
tree | 45cc71ff8e4d1bcde9b07ce8203277f2b8982941 /arch/x86/lib/copy_user_64.S | |
parent | 5c4a1c090d8676d8b84e2ac40671602be44afdfc (diff) | |
parent | f01ec4fca8207e31b59a010c3de679c833f3a877 (diff) | |
download | linux-1d87200446f1d10dfe9672ca8edb027a82612f8c.tar.xz |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar:
"The main changes in this cycle were:
- Cross-arch changes to move the linker sections for NOTES and
EXCEPTION_TABLE into the RO_DATA area, where they belong on most
architectures. (Kees Cook)
- Switch the x86 linker fill byte from x90 (NOP) to 0xcc (INT3), to
trap jumps into the middle of those padding areas instead of
sliding execution. (Kees Cook)
- A thorough cleanup of symbol definitions within x86 assembler code.
The rather randomly named macros got streamlined around a
(hopefully) straightforward naming scheme:
SYM_START(name, linkage, align...)
SYM_END(name, sym_type)
SYM_FUNC_START(name)
SYM_FUNC_END(name)
SYM_CODE_START(name)
SYM_CODE_END(name)
SYM_DATA_START(name)
SYM_DATA_END(name)
etc - with about three times of these basic primitives with some
label, local symbol or attribute variant, expressed via postfixes.
No change in functionality intended. (Jiri Slaby)
- Misc other changes, cleanups and smaller fixes"
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits)
x86/entry/64: Remove pointless jump in paranoid_exit
x86/entry/32: Remove unused resume_userspace label
x86/build/vdso: Remove meaningless CFLAGS_REMOVE_*.o
m68k: Convert missed RODATA to RO_DATA
x86/vmlinux: Use INT3 instead of NOP for linker fill bytes
x86/mm: Report actual image regions in /proc/iomem
x86/mm: Report which part of kernel image is freed
x86/mm: Remove redundant address-of operators on addresses
xtensa: Move EXCEPTION_TABLE to RO_DATA segment
powerpc: Move EXCEPTION_TABLE to RO_DATA segment
parisc: Move EXCEPTION_TABLE to RO_DATA segment
microblaze: Move EXCEPTION_TABLE to RO_DATA segment
ia64: Move EXCEPTION_TABLE to RO_DATA segment
h8300: Move EXCEPTION_TABLE to RO_DATA segment
c6x: Move EXCEPTION_TABLE to RO_DATA segment
arm64: Move EXCEPTION_TABLE to RO_DATA segment
alpha: Move EXCEPTION_TABLE to RO_DATA segment
x86/vmlinux: Move EXCEPTION_TABLE to RO_DATA segment
x86/vmlinux: Actually use _etext for the end of the text segment
vmlinux.lds.h: Allow EXCEPTION_TABLE to live in RO_DATA
...
Diffstat (limited to 'arch/x86/lib/copy_user_64.S')
-rw-r--r-- | arch/x86/lib/copy_user_64.S | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 86976b55ae74..816f128a6d52 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -53,7 +53,7 @@ * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_generic_unrolled) +SYM_FUNC_START(copy_user_generic_unrolled) ASM_STAC cmpl $8,%edx jb 20f /* less then 8 bytes, go to byte copy loop */ @@ -136,7 +136,7 @@ ENTRY(copy_user_generic_unrolled) _ASM_EXTABLE_UA(19b, 40b) _ASM_EXTABLE_UA(21b, 50b) _ASM_EXTABLE_UA(22b, 50b) -ENDPROC(copy_user_generic_unrolled) +SYM_FUNC_END(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) /* Some CPUs run faster using the string copy instructions. @@ -157,7 +157,7 @@ EXPORT_SYMBOL(copy_user_generic_unrolled) * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_generic_string) +SYM_FUNC_START(copy_user_generic_string) ASM_STAC cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ @@ -182,7 +182,7 @@ ENTRY(copy_user_generic_string) _ASM_EXTABLE_UA(1b, 11b) _ASM_EXTABLE_UA(3b, 12b) -ENDPROC(copy_user_generic_string) +SYM_FUNC_END(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) /* @@ -197,7 +197,7 @@ EXPORT_SYMBOL(copy_user_generic_string) * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_enhanced_fast_string) +SYM_FUNC_START(copy_user_enhanced_fast_string) ASM_STAC cmpl $64,%edx jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ @@ -214,7 +214,7 @@ ENTRY(copy_user_enhanced_fast_string) .previous _ASM_EXTABLE_UA(1b, 12b) -ENDPROC(copy_user_enhanced_fast_string) +SYM_FUNC_END(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* @@ -230,8 +230,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) * Output: * eax uncopied bytes or 0 if successful. */ -ALIGN; -.Lcopy_user_handle_tail: +SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) movl %edx,%ecx 1: rep movsb 2: mov %ecx,%eax @@ -239,7 +238,7 @@ ALIGN; ret _ASM_EXTABLE_UA(1b, 2b) -END(.Lcopy_user_handle_tail) +SYM_CODE_END(.Lcopy_user_handle_tail) /* * copy_user_nocache - Uncached memory copy with exception handling @@ -250,7 +249,7 @@ END(.Lcopy_user_handle_tail) * - Require 8-byte alignment when size is 8 bytes or larger. * - Require 4-byte alignment when size is 4 bytes. */ -ENTRY(__copy_user_nocache) +SYM_FUNC_START(__copy_user_nocache) ASM_STAC /* If size is less than 8 bytes, go to 4-byte copy */ @@ -389,5 +388,5 @@ ENTRY(__copy_user_nocache) _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy) _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy) _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy) -ENDPROC(__copy_user_nocache) +SYM_FUNC_END(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache) |