diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2020-04-21 12:20:40 +0300 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2020-04-26 19:47:05 +0300 |
commit | af5c40c6ee057c5354930abdc4d34be013d0e9e0 (patch) | |
tree | 2f896585df0ad3e920159e8c630dbc70ab136f26 /arch/x86/include/asm/tlbflush.h | |
parent | 96f59fe291d2cdc0fcb6f5f2f4b7c9cea9533fc3 (diff) | |
download | linux-af5c40c6ee057c5354930abdc4d34be013d0e9e0.tar.xz |
x86/tlb: Uninline nmi_uaccess_okay()
cpu_tlbstate is exported because various TLB-related functions need
access to it, but cpu_tlbstate is sensitive information which should
only be accessed by well-contained kernel functions and not be directly
exposed to modules.
nmi_access_ok() is the last inline function which requires access to
cpu_tlbstate. Move it into the TLB code.
No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200421092600.052543007@linutronix.de
Diffstat (limited to 'arch/x86/include/asm/tlbflush.h')
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 33 |
1 files changed, 1 insertions, 32 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 917deea058d5..1c17f5a6cb53 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -247,38 +247,7 @@ struct tlb_state { }; DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); -/* - * Blindly accessing user memory from NMI context can be dangerous - * if we're in the middle of switching the current user task or - * switching the loaded mm. It can also be dangerous if we - * interrupted some kernel code that was temporarily using a - * different mm. - */ -static inline bool nmi_uaccess_okay(void) -{ - struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); - struct mm_struct *current_mm = current->mm; - - VM_WARN_ON_ONCE(!loaded_mm); - - /* - * The condition we want to check is - * current_mm->pgd == __va(read_cr3_pa()). This may be slow, though, - * if we're running in a VM with shadow paging, and nmi_uaccess_okay() - * is supposed to be reasonably fast. - * - * Instead, we check the almost equivalent but somewhat conservative - * condition below, and we rely on the fact that switch_mm_irqs_off() - * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3. - */ - if (loaded_mm != current_mm) - return false; - - VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa())); - - return true; -} - +bool nmi_uaccess_okay(void); #define nmi_uaccess_okay nmi_uaccess_okay void cr4_update_irqsoff(unsigned long set, unsigned long clear); |