summaryrefslogtreecommitdiff
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-09-03 17:22:40 +0300
committerAl Viro <viro@zeniv.linux.org.uk>2020-09-09 05:21:36 +0300
commit47058bb54b57962b3958a936ddbc59355e4c5504 (patch)
treeb1399d96916177ef63b8a86bf30daceb35508959 /arch/x86/lib
parenta1d826d475aafe63775b910e86ccd1bcf1e5a6e1 (diff)
downloadlinux-47058bb54b57962b3958a936ddbc59355e4c5504.tar.xz
x86: remove address space overrides using set_fs()
Stop providing the possibility to override the address space using set_fs() now that there is no need for that any more. To properly handle the TASK_SIZE_MAX checking for 4 vs 5-level page tables on x86 a new alternative is introduced, which just like the one in entry_64.S has to use the hardcoded virtual address bits to escape the fact that TASK_SIZE_MAX isn't actually a constant when 5-level page tables are enabled. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/getuser.S47
-rw-r--r--arch/x86/lib/putuser.S25
2 files changed, 37 insertions, 35 deletions
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index c8a85b512796..2f052bc96866 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -35,10 +35,19 @@
#include <asm/smap.h>
#include <asm/export.h>
+#ifdef CONFIG_X86_5LEVEL
+#define LOAD_TASK_SIZE_MINUS_N(n) \
+ ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rdx), \
+ __stringify(mov $((1 << 56) - 4096 - (n)),%rdx), X86_FEATURE_LA57
+#else
+#define LOAD_TASK_SIZE_MINUS_N(n) \
+ mov $(TASK_SIZE_MAX - (n)),%_ASM_DX
+#endif
+
.text
SYM_FUNC_START(__get_user_1)
- mov PER_CPU_VAR(current_task), %_ASM_DX
- cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ LOAD_TASK_SIZE_MINUS_N(0)
+ cmp %_ASM_DX,%_ASM_AX
jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
@@ -51,15 +60,13 @@ SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
SYM_FUNC_START(__get_user_2)
- add $1,%_ASM_AX
- jc bad_get_user
- mov PER_CPU_VAR(current_task), %_ASM_DX
- cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ LOAD_TASK_SIZE_MINUS_N(1)
+ cmp %_ASM_DX,%_ASM_AX
jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
ASM_STAC
-2: movzwl -1(%_ASM_AX),%edx
+2: movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
ret
@@ -67,15 +74,13 @@ SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
SYM_FUNC_START(__get_user_4)
- add $3,%_ASM_AX
- jc bad_get_user
- mov PER_CPU_VAR(current_task), %_ASM_DX
- cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ LOAD_TASK_SIZE_MINUS_N(3)
+ cmp %_ASM_DX,%_ASM_AX
jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
ASM_STAC
-3: movl -3(%_ASM_AX),%edx
+3: movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
ret
@@ -84,29 +89,25 @@ EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8)
#ifdef CONFIG_X86_64
- add $7,%_ASM_AX
- jc bad_get_user
- mov PER_CPU_VAR(current_task), %_ASM_DX
- cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ LOAD_TASK_SIZE_MINUS_N(7)
+ cmp %_ASM_DX,%_ASM_AX
jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
ASM_STAC
-4: movq -7(%_ASM_AX),%rdx
+4: movq (%_ASM_AX),%rdx
xor %eax,%eax
ASM_CLAC
ret
#else
- add $7,%_ASM_AX
- jc bad_get_user_8
- mov PER_CPU_VAR(current_task), %_ASM_DX
- cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
+ LOAD_TASK_SIZE_MINUS_N(7)
+ cmp %_ASM_DX,%_ASM_AX
jae bad_get_user_8
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
ASM_STAC
-4: movl -7(%_ASM_AX),%edx
-5: movl -3(%_ASM_AX),%ecx
+4: movl (%_ASM_AX),%edx
+5: movl 4(%_ASM_AX),%ecx
xor %eax,%eax
ASM_CLAC
ret
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 7c7c92db8497..358239d77dff 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -31,12 +31,19 @@
* as they get called from within inline assembly.
*/
-#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX
+#ifdef CONFIG_X86_5LEVEL
+#define LOAD_TASK_SIZE_MINUS_N(n) \
+ ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rbx), \
+ __stringify(mov $((1 << 56) - 4096 - (n)),%rbx), X86_FEATURE_LA57
+#else
+#define LOAD_TASK_SIZE_MINUS_N(n) \
+ mov $(TASK_SIZE_MAX - (n)),%_ASM_BX
+#endif
.text
SYM_FUNC_START(__put_user_1)
- ENTER
- cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
+ LOAD_TASK_SIZE_MINUS_N(0)
+ cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
ASM_STAC
1: movb %al,(%_ASM_CX)
@@ -47,9 +54,7 @@ SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
SYM_FUNC_START(__put_user_2)
- ENTER
- mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
- sub $1,%_ASM_BX
+ LOAD_TASK_SIZE_MINUS_N(1)
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
ASM_STAC
@@ -61,9 +66,7 @@ SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
SYM_FUNC_START(__put_user_4)
- ENTER
- mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
- sub $3,%_ASM_BX
+ LOAD_TASK_SIZE_MINUS_N(3)
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
ASM_STAC
@@ -75,9 +78,7 @@ SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
SYM_FUNC_START(__put_user_8)
- ENTER
- mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
- sub $7,%_ASM_BX
+ LOAD_TASK_SIZE_MINUS_N(7)
cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user
ASM_STAC