summaryrefslogtreecommitdiff
path: root/arch/riscv/mm
diff options
context:
space:
mode:
authorAlexandre Ghiti <alex@ghiti.fr>2021-04-11 19:41:44 +0300
committerPalmer Dabbelt <palmerdabbelt@google.com>2021-04-26 18:25:04 +0300
commit2bfc6cd81bd17e4306e24ee47b9554c967bcb499 (patch)
treec06ff6ac90cb7735bb0871aa357c5d2b8049a6dc /arch/riscv/mm
parent8a07ac39f87d6c762006398029762c40e4d9d075 (diff)
downloadlinux-2bfc6cd81bd17e4306e24ee47b9554c967bcb499.tar.xz
riscv: Move kernel mapping outside of linear mapping
This is a preparatory patch for relocatable kernel and sv48 support. The kernel used to be linked at PAGE_OFFSET address therefore we could use the linear mapping for the kernel mapping. But the relocated kernel base address will be different from PAGE_OFFSET and since in the linear mapping, two different virtual addresses cannot point to the same physical address, the kernel mapping needs to lie outside the linear mapping so that we don't have to copy it at the same physical offset. The kernel mapping is moved to the last 2GB of the address space, BPF is now always after the kernel and modules use the 2GB memory range right before the kernel, so BPF and modules regions do not overlap. KASLR implementation will simply have to move the kernel in the last 2GB range and just take care of leaving enough space for BPF. In addition, by moving the kernel to the end of the address space, both sv39 and sv48 kernels will be exactly the same without needing to be relocated at runtime. Suggested-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> [Palmer: Squash the STRICT_RWX fix, and a !MMU fix] Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r--arch/riscv/mm/fault.c13
-rw-r--r--arch/riscv/mm/init.c106
-rw-r--r--arch/riscv/mm/kasan_init.c9
-rw-r--r--arch/riscv/mm/physaddr.c2
4 files changed, 112 insertions, 18 deletions
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 8f17519208c7..1b14d523a95c 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -231,6 +231,19 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
return;
}
+#ifdef CONFIG_64BIT
+ /*
+ * Modules in 64bit kernels lie in their own virtual region which is not
+ * in the vmalloc region, but dealing with page faults in this region
+ * or the vmalloc region amounts to doing the same thing: checking that
+ * the mapping exists in init_mm.pgd and updating user page table, so
+ * just use vmalloc_fault.
+ */
+ if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) {
+ vmalloc_fault(regs, code, addr);
+ return;
+ }
+#endif
/* Enable interrupts if they were enabled in the parent context. */
if (likely(regs->status & SR_PIE))
local_irq_enable();
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 7f5036fbee8c..dc9b988e0778 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -25,6 +25,9 @@
#include "../kernel/head.h"
+unsigned long kernel_virt_addr = KERNEL_LINK_ADDR;
+EXPORT_SYMBOL(kernel_virt_addr);
+
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
@@ -88,6 +91,10 @@ static void print_vm_layout(void)
(unsigned long)VMALLOC_END);
print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
(unsigned long)high_memory);
+#ifdef CONFIG_64BIT
+ print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
+ (unsigned long)ADDRESS_SPACE_END);
+#endif
}
#else
static void print_vm_layout(void) { }
@@ -116,8 +123,13 @@ void __init setup_bootmem(void)
/* The maximal physical memory size is -PAGE_OFFSET. */
memblock_enforce_memory_limit(-PAGE_OFFSET);
- /* Reserve from the start of the kernel to the end of the kernel */
- memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+ /*
+ * Reserve from the start of the kernel to the end of the kernel
+ * and make sure we align the reservation on PMD_SIZE since we will
+ * map the kernel in the linear mapping as read-only: we do not want
+ * any allocation to happen between _end and the next pmd aligned page.
+ */
+ memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK);
/*
* memblock allocator is not aware of the fact that last 4K bytes of
@@ -152,8 +164,14 @@ void __init setup_bootmem(void)
#ifdef CONFIG_MMU
static struct pt_alloc_ops pt_ops;
+/* Offset between linear mapping virtual address and kernel load address */
unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset);
+#ifdef CONFIG_64BIT
+/* Offset between kernel mapping virtual address and kernel load address */
+unsigned long va_kernel_pa_offset;
+EXPORT_SYMBOL(va_kernel_pa_offset);
+#endif
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
@@ -257,7 +275,7 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa)
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
- BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
+ BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT);
return (uintptr_t)early_pmd;
}
@@ -372,17 +390,34 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
+uintptr_t load_pa, load_sz;
+
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+{
+ uintptr_t va, end_va;
+
+ end_va = kernel_virt_addr + load_sz;
+ for (va = kernel_virt_addr; va < end_va; va += map_size)
+ create_pgd_mapping(pgdir, va,
+ load_pa + (va - kernel_virt_addr),
+ map_size, PAGE_KERNEL_EXEC);
+}
+
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
- uintptr_t va, pa, end_va;
- uintptr_t load_pa = (uintptr_t)(&_start);
- uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
+ uintptr_t pa;
uintptr_t map_size;
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
+ load_pa = (uintptr_t)(&_start);
+ load_sz = (uintptr_t)(&_end) - load_pa;
va_pa_offset = PAGE_OFFSET - load_pa;
+#ifdef CONFIG_64BIT
+ va_kernel_pa_offset = kernel_virt_addr - load_pa;
+#endif
+
pfn_base = PFN_DOWN(load_pa);
/*
@@ -410,26 +445,22 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pmd_mapping(fixmap_pmd, FIXADDR_START,
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
/* Setup trampoline PGD and PMD */
- create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
+ create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
(uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
- create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
+ create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
#else
/* Setup trampoline PGD */
- create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
+ create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
/*
- * Setup early PGD covering entire kernel which will allows
+ * Setup early PGD covering entire kernel which will allow
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- end_va = PAGE_OFFSET + load_sz;
- for (va = PAGE_OFFSET; va < end_va; va += map_size)
- create_pgd_mapping(early_pg_dir, va,
- load_pa + (va - PAGE_OFFSET),
- map_size, PAGE_KERNEL_EXEC);
+ create_kernel_page_table(early_pg_dir, map_size);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
@@ -444,7 +475,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
#else /* CONFIG_BUILTIN_DTB */
+#ifdef CONFIG_64BIT
+ /*
+ * __va can't be used since it would return a linear mapping address
+ * whereas dtb_early_va will be used before setup_vm_final installs
+ * the linear mapping.
+ */
+ dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
+#else
dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_64BIT */
#endif /* CONFIG_BUILTIN_DTB */
#else
#ifndef CONFIG_BUILTIN_DTB
@@ -456,7 +496,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
#else /* CONFIG_BUILTIN_DTB */
+#ifdef CONFIG_64BIT
+ dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
+#else
dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_64BIT */
#endif /* CONFIG_BUILTIN_DTB */
#endif
dtb_early_pa = dtb_pa;
@@ -492,6 +536,22 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
}
+#ifdef CONFIG_64BIT
+void protect_kernel_linear_mapping_text_rodata(void)
+{
+ unsigned long text_start = (unsigned long)lm_alias(_start);
+ unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
+ unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
+ unsigned long data_start = (unsigned long)lm_alias(_data);
+
+ set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
+ set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
+
+ set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
+ set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
+}
+#endif
+
static void __init setup_vm_final(void)
{
uintptr_t va, map_size;
@@ -513,7 +573,7 @@ static void __init setup_vm_final(void)
__pa_symbol(fixmap_pgd_next),
PGDIR_SIZE, PAGE_TABLE);
- /* Map all memory banks */
+ /* Map all memory banks in the linear mapping */
for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
@@ -525,10 +585,22 @@ static void __init setup_vm_final(void)
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
create_pgd_mapping(swapper_pg_dir, va, pa,
- map_size, PAGE_KERNEL_EXEC);
+ map_size,
+#ifdef CONFIG_64BIT
+ PAGE_KERNEL
+#else
+ PAGE_KERNEL_EXEC
+#endif
+ );
+
}
}
+#ifdef CONFIG_64BIT
+ /* Map the kernel */
+ create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
+#endif
+
/* Clear fixmap PTE and PMD mappings */
clear_fixmap(FIX_PTE);
clear_fixmap(FIX_PMD);
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 2c39f0386673..28f4d52cf17e 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -171,6 +171,10 @@ void __init kasan_init(void)
phys_addr_t _start, _end;
u64 i;
+ /*
+ * Populate all kernel virtual address space with kasan_early_shadow_page
+ * except for the linear mapping and the modules/kernel/BPF mapping.
+ */
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)kasan_mem_to_shadow((void *)
VMEMMAP_END));
@@ -183,6 +187,7 @@ void __init kasan_init(void)
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+ /* Populate the linear mapping */
for_each_mem_range(i, &_start, &_end) {
void *start = (void *)__va(_start);
void *end = (void *)__va(_end);
@@ -193,6 +198,10 @@ void __init kasan_init(void)
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
};
+ /* Populate kernel, BPF, modules mapping */
+ kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
+ kasan_mem_to_shadow((const void *)BPF_JIT_REGION_END));
+
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
mk_pte(virt_to_page(kasan_early_shadow_page),
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
index e8e4dcd39fed..35703d5ef5fd 100644
--- a/arch/riscv/mm/physaddr.c
+++ b/arch/riscv/mm/physaddr.c
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
- unsigned long kernel_start = (unsigned long)PAGE_OFFSET;
+ unsigned long kernel_start = (unsigned long)kernel_virt_addr;
unsigned long kernel_end = (unsigned long)_end;
/*