summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/extable.c12
-rw-r--r--arch/x86/mm/fault.c30
-rw-r--r--arch/x86/mm/init_32.c52
-rw-r--r--arch/x86/mm/init_64.c3
-rw-r--r--arch/x86/mm/ioremap.c3
-rw-r--r--arch/x86/mm/kmmio.c10
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/numa_emulation.c2
-rw-r--r--arch/x86/mm/pat/set_memory.c3
-rw-r--r--arch/x86/mm/pti.c8
10 files changed, 36 insertions, 91 deletions
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 30bb0bd3b1b8..b991aa4bdfae 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -80,18 +80,6 @@ __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_uaccess);
-__visible bool ex_handler_ext(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr,
- unsigned long error_code,
- unsigned long fault_addr)
-{
- /* Special hack for uaccess_err */
- current->thread.uaccess_err = 1;
- regs->ip = ex_fixup_addr(fixup);
- return true;
-}
-EXPORT_SYMBOL(ex_handler_ext);
-
__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr,
unsigned long error_code,
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 629fdf13f846..859519f5b342 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1310,7 +1310,7 @@ void do_user_addr_fault(struct pt_regs *regs,
struct task_struct *tsk;
struct mm_struct *mm;
vm_fault_t fault, major = 0;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
tsk = current;
mm = tsk->mm;
@@ -1464,27 +1464,23 @@ good_area:
fault = handle_mm_fault(vma, address, flags);
major |= fault & VM_FAULT_MAJOR;
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, hw_error_code, address, SIGBUS,
+ BUS_ADRERR);
+ return;
+ }
+
/*
* If we need to retry the mmap_sem has already been released,
* and if there is a fatal signal pending there is no guarantee
* that we made any progress. Handle this case first.
*/
- if (unlikely(fault & VM_FAULT_RETRY)) {
- /* Retry at most once */
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- if (!fatal_signal_pending(tsk))
- goto retry;
- }
-
- /* User mode? Just return to handle the fatal exception */
- if (flags & FAULT_FLAG_USER)
- return;
-
- /* Not returning to user mode? Handle exceptions or die: */
- no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR);
- return;
+ if (unlikely((fault & VM_FAULT_RETRY) &&
+ (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
up_read(&mm->mmap_sem);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 23df4885bbed..de73992b8432 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -238,7 +238,11 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
}
}
-static inline int is_kernel_text(unsigned long addr)
+/*
+ * The <linux/kallsyms.h> already defines is_kernel_text,
+ * using '__' prefix not to get in conflict.
+ */
+static inline int __is_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
return 1;
@@ -328,8 +332,8 @@ repeat:
addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
PAGE_OFFSET + PAGE_SIZE-1;
- if (is_kernel_text(addr) ||
- is_kernel_text(addr2))
+ if (__is_kernel_text(addr) ||
+ __is_kernel_text(addr2))
prot = PAGE_KERNEL_LARGE_EXEC;
pages_2m++;
@@ -354,7 +358,7 @@ repeat:
*/
pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
- if (is_kernel_text(addr))
+ if (__is_kernel_text(addr))
prot = PAGE_KERNEL_EXEC;
pages_4k++;
@@ -788,44 +792,6 @@ void __init mem_init(void)
x86_init.hyper.init_after_bootmem();
mem_init_print_info(NULL);
- printk(KERN_INFO "virtual kernel memory layout:\n"
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#endif
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
- FIXADDR_START, FIXADDR_TOP,
- (FIXADDR_TOP - FIXADDR_START) >> 10,
-
- CPU_ENTRY_AREA_BASE,
- CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
- CPU_ENTRY_AREA_MAP_SIZE >> 10,
-
-#ifdef CONFIG_HIGHMEM
- PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
- (LAST_PKMAP*PAGE_SIZE) >> 10,
-#endif
-
- VMALLOC_START, VMALLOC_END,
- (VMALLOC_END - VMALLOC_START) >> 20,
-
- (unsigned long)__va(0), (unsigned long)high_memory,
- ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
-
- (unsigned long)&__init_begin, (unsigned long)&__init_end,
- ((unsigned long)&__init_end -
- (unsigned long)&__init_begin) >> 10,
-
- (unsigned long)&_etext, (unsigned long)&_edata,
- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
-
- (unsigned long)&_text, (unsigned long)&_etext,
- ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
/*
* Check boundaries twice: Some fundamental inconsistencies can
@@ -881,7 +847,7 @@ static void mark_nxdata_nx(void)
*/
unsigned long start = PFN_ALIGN(_etext);
/*
- * This comes from is_kernel_text upper limit. Also HPAGE where used:
+ * This comes from __is_kernel_text upper limit. Also HPAGE where used:
*/
unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index abbdecb75fad..0a14711d3a93 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -818,8 +818,7 @@ void __init paging_init(void)
* will not set it back.
*/
node_clear_state(0, N_MEMORY);
- if (N_MEMORY != N_NORMAL_MEMORY)
- node_clear_state(0, N_NORMAL_MEMORY);
+ node_clear_state(0, N_NORMAL_MEMORY);
zone_sizes_init();
}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 935a91e1fd77..18c637c0dc6f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -115,6 +115,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
if (!sev_active())
return;
+ if (!IS_ENABLED(CONFIG_EFI))
+ return;
+
if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
desc->flags |= IORES_MAP_ENCRYPTED;
}
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 49d7814b59a9..9994353fb75d 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -260,7 +260,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
goto no_kmmio;
}
- ctx = &get_cpu_var(kmmio_ctx);
+ ctx = this_cpu_ptr(&kmmio_ctx);
if (ctx->active) {
if (page_base == ctx->addr) {
/*
@@ -285,7 +285,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
pr_emerg("previous hit was at 0x%08lx.\n", ctx->addr);
disarm_kmmio_fault_page(faultpage);
}
- goto no_kmmio_ctx;
+ goto no_kmmio;
}
ctx->active++;
@@ -314,11 +314,8 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
* the user should drop to single cpu before tracing.
*/
- put_cpu_var(kmmio_ctx);
return 1; /* fault handled */
-no_kmmio_ctx:
- put_cpu_var(kmmio_ctx);
no_kmmio:
rcu_read_unlock();
preempt_enable_no_resched();
@@ -333,7 +330,7 @@ no_kmmio:
static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
{
int ret = 0;
- struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
+ struct kmmio_context *ctx = this_cpu_ptr(&kmmio_ctx);
if (!ctx->active) {
/*
@@ -371,7 +368,6 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
if (!(regs->flags & X86_EFLAGS_TF))
ret = 1;
out:
- put_cpu_var(kmmio_ctx);
return ret;
}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 673de6063345..109325d77b3e 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -386,7 +386,7 @@ static void enter_uniprocessor(void)
put_online_cpus();
for_each_cpu(cpu, downed_cpus) {
- err = cpu_down(cpu);
+ err = remove_cpu(cpu);
if (!err)
pr_info("CPU%d is down.\n", cpu);
else
@@ -406,7 +406,7 @@ static void leave_uniprocessor(void)
return;
pr_notice("Re-enabling CPUs...\n");
for_each_cpu(cpu, downed_cpus) {
- err = cpu_up(cpu);
+ err = add_cpu(cpu);
if (!err)
pr_info("enabled CPU%d.\n", cpu);
else
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 7f1d2034df1e..c5174b4e318b 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -324,7 +324,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
0, NULL, NUMA_NO_NODE);
}
-int __init setup_emu2phys_nid(int *dfl_phys_nid)
+static int __init setup_emu2phys_nid(int *dfl_phys_nid)
{
int i, max_emu_nid = 0;
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index c4aedd00c1ba..6d5424069e2b 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -15,6 +15,7 @@
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
+#include <linux/libnvdimm.h>
#include <asm/e820/api.h>
#include <asm/processor.h>
@@ -304,11 +305,13 @@ void clflush_cache_range(void *vaddr, unsigned int size)
}
EXPORT_SYMBOL_GPL(clflush_cache_range);
+#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_invalidate_pmem(void *addr, size_t size)
{
clflush_cache_range(addr, size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+#endif
static void __cpa_flush_all(void *arg)
{
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 44a9f068eee0..843aa10a4cb6 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -39,6 +39,7 @@
#include <asm/tlbflush.h>
#include <asm/desc.h>
#include <asm/sections.h>
+#include <asm/set_memory.h>
#undef pr_fmt
#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
@@ -555,13 +556,6 @@ static inline bool pti_kernel_image_global_ok(void)
}
/*
- * This is the only user for these and it is not arch-generic
- * like the other set_memory.h functions. Just extern them.
- */
-extern int set_memory_nonglobal(unsigned long addr, int numpages);
-extern int set_memory_global(unsigned long addr, int numpages);
-
-/*
* For some configurations, map all of kernel text into the user page
* tables. This reduces TLB misses, especially on non-PCID systems.
*/