summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorDominik Brodowski <linux@dominikbrodowski.net>2018-03-07 00:18:08 +0300
committerIngo Molnar <mingo@kernel.org>2018-03-07 09:57:31 +0300
commitaf52201d991624d2d5adce2c123805b3d42a8d4d (patch)
tree6fcb0ec95c5ad0c7fff5dd51abb8df12c623dc53 /arch/x86
parent4ddb45db30851b2269101ee8969f079b028dd257 (diff)
downloadlinux-af52201d991624d2d5adce2c123805b3d42a8d4d.tar.xz
x86/entry: Do not special-case clone(2) in compat entry
With the CPU renaming registers on its own, and all the overhead of the syscall entry/exit, it is doubtful whether the compiled output of mov %r8, %rax mov %rcx, %r8 mov %rax, %rcx jmpq sys_clone is measurably slower than the hand-crafted version of xchg %r8, %rcx So get rid of this special case. Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: luto@amacapital.net Cc: viro@zeniv.linux.org.uk Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/entry/entry_64_compat.S12
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/ia32/sys_ia32.c11
-rw-r--r--arch/x86/include/asm/sys_ia32.h2
4 files changed, 14 insertions, 13 deletions
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index e811dd9c5e99..ff61b7bb750b 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -406,15 +406,3 @@ ENTRY(entry_INT80_compat)
TRACE_IRQS_ON
jmp swapgs_restore_regs_and_return_to_usermode
END(entry_INT80_compat)
-
-ENTRY(stub32_clone)
- /*
- * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
- * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
- *
- * The native 64-bit kernel's sys_clone() implements the latter,
- * so we need to swap arguments here before calling it:
- */
- xchg %r8, %rcx
- jmp sys_clone
-ENDPROC(stub32_clone)
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index e7fd0a76bf99..2a5e99cff859 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -126,7 +126,7 @@
117 i386 ipc sys_ipc compat_sys_ipc
118 i386 fsync sys_fsync
119 i386 sigreturn sys_sigreturn sys32_sigreturn
-120 i386 clone sys_clone stub32_clone
+120 i386 clone sys_clone compat_sys_x86_clone
121 i386 setdomainname sys_setdomainname
122 i386 uname sys_newuname
123 i386 modify_ldt sys_modify_ldt
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 3bc03446ec44..6512498bbef6 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -233,3 +233,14 @@ COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode,
return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo);
}
+
+/*
+ * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS
+ */
+COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags,
+ unsigned long, newsp, int __user *, parent_tidptr,
+ unsigned long, tls_val, int __user *, child_tidptr)
+{
+ return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr,
+ tls_val);
+}
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 32831905d97a..906794aa034e 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -55,6 +55,8 @@ asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int,
size_t, int);
asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int,
unsigned int, unsigned int);
+asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *,
+ unsigned long, int __user *);
/* ia32/ia32_signal.c */
asmlinkage long sys32_sigreturn(void);