summaryrefslogtreecommitdiff
path: root/arch/riscv/mm/kasan_init.c
diff options
context:
space:
mode:
authorNick Hu <nickhu@andestech.com>2020-01-06 21:38:32 +0300
committerPalmer Dabbelt <palmerdabbelt@google.com>2020-01-23 00:09:58 +0300
commit8ad8b72721d0f07fa02dbe71f901743f9c71c8e6 (patch)
treef7e4dfb5c72ced986c1183e9d0c830de608ea2fc /arch/riscv/mm/kasan_init.c
parent57ee58e39321ab4ac3f2949b90117786726cb216 (diff)
downloadlinux-8ad8b72721d0f07fa02dbe71f901743f9c71c8e6.tar.xz
riscv: Add KASAN support
This patch ports the feature Kernel Address SANitizer (KASAN). Note: The start address of shadow memory is at the beginning of kernel space, which is 2^64 - (2^39 / 2) in SV39. The size of the kernel space is 2^38 bytes so the size of shadow memory should be 2^38 / 8. Thus, the shadow memory would not overlap with the fixmap area. There are currently two limitations in this port, 1. RV64 only: KASAN need large address space for extra shadow memory region. 2. KASAN can't debug the modules since the modules are allocated in VMALLOC area. We mapped the shadow memory, which corresponding to VMALLOC area, to the kasan_early_shadow_page because we don't have enough physical space for all the shadow memory corresponding to VMALLOC area. Signed-off-by: Nick Hu <nickhu@andestech.com> Reported-by: Greentime Hu <green.hu@gmail.com> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv/mm/kasan_init.c')
-rw-r--r--arch/riscv/mm/kasan_init.c104
1 files changed, 104 insertions, 0 deletions
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
new file mode 100644
index 000000000000..f0cc86040587
--- /dev/null
+++ b/arch/riscv/mm/kasan_init.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Andes Technology Corporation
+
+#include <linux/pfn.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/tlbflush.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+
+extern pgd_t early_pg_dir[PTRS_PER_PGD];
+asmlinkage void __init kasan_early_init(void)
+{
+ uintptr_t i;
+ pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_early_shadow_pte + i,
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ PAGE_KERNEL));
+
+ for (i = 0; i < PTRS_PER_PMD; ++i)
+ set_pmd(kasan_early_shadow_pmd + i,
+ pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
+ __pgprot(_PAGE_TABLE)));
+
+ for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
+ i += PGDIR_SIZE, ++pgd)
+ set_pgd(pgd,
+ pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
+ __pgprot(_PAGE_TABLE)));
+
+ /* init for swapper_pg_dir */
+ pgd = pgd_offset_k(KASAN_SHADOW_START);
+
+ for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
+ i += PGDIR_SIZE, ++pgd)
+ set_pgd(pgd,
+ pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
+ __pgprot(_PAGE_TABLE)));
+
+ flush_tlb_all();
+}
+
+static void __init populate(void *start, void *end)
+{
+ unsigned long i;
+ unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+ unsigned long vend = PAGE_ALIGN((unsigned long)end);
+ unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
+ unsigned long n_pmds =
+ (n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
+ n_pages / PTRS_PER_PTE;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
+ pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ for (i = 0; i < n_pages; i++) {
+ phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+
+ set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ }
+
+ for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
+ set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
+ __pgprot(_PAGE_TABLE)));
+
+ for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
+ set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
+ __pgprot(_PAGE_TABLE)));
+
+ flush_tlb_all();
+ memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+ struct memblock_region *reg;
+ unsigned long i;
+
+ kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+ (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+
+ for_each_memblock(memory, reg) {
+ void *start = (void *)__va(reg->base);
+ void *end = (void *)__va(reg->base + reg->size);
+
+ if (start >= end)
+ break;
+
+ populate(kasan_mem_to_shadow(start),
+ kasan_mem_to_shadow(end));
+ };
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(&kasan_early_shadow_pte[i],
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
+
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ init_task.kasan_depth = 0;
+}