summaryrefslogtreecommitdiff
path: root/arch/x86/mm/kasan_init_64.c
diff options
context:
space:
mode:
authorAndrey Ryabinin <ryabinin.a.a@gmail.com>2015-08-13 08:37:24 +0300
committerIngo Molnar <mingo@kernel.org>2015-08-22 15:54:55 +0300
commit69786cdb379bbc6eab14cf2393c1abd879316e85 (patch)
treeddfaf2f85b64057553a745c9080c2e6714880613 /arch/x86/mm/kasan_init_64.c
parent920e277e17f12870188f4564887a95ae9ac03e31 (diff)
downloadlinux-69786cdb379bbc6eab14cf2393c1abd879316e85.tar.xz
x86/kasan, mm: Introduce generic kasan_populate_zero_shadow()
Introduce generic kasan_populate_zero_shadow(shadow_start, shadow_end). This function maps kasan_zero_page to the [shadow_start, shadow_end] addresses. This replaces x86_64 specific populate_zero_shadow() and will be used for ARM64 in follow on patches. The main changes from original version are: * Use p?d_populate*() instead of set_p?d() * Use memblock allocator directly instead of vmemmap_alloc_block() * __pa() instead of __pa_nodebug(). __pa() causes troubles iff we use it before kasan_early_init(). kasan_populate_zero_shadow() will be used later, so we ok with __pa() here. Signed-off-by: Andrey Ryabinin <ryabinin.a.a@gmail.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Alexander Potapenko <glider@google.com> Cc: Alexey Klimov <klimov.linux@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: David Keitel <dkeitel@codeaurora.org> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Linus Walleij <linus.walleij@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: Yury <yury.norov@gmail.com> Cc: linux-arm-kernel@lists.infradead.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/1439444244-26057-3-git-send-email-ryabinin.a.a@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm/kasan_init_64.c')
-rw-r--r--arch/x86/mm/kasan_init_64.c123
1 files changed, 5 insertions, 118 deletions
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index e1840f3db5b5..9ce5da27b136 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -12,20 +12,6 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_X_MAX];
-static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
-static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
-static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
-
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
-
static int __init map_range(struct range *range)
{
unsigned long start;
@@ -62,106 +48,6 @@ static void __init kasan_map_early_shadow(pgd_t *pgd)
}
}
-static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
- unsigned long end)
-{
- pte_t *pte = pte_offset_kernel(pmd, addr);
-
- while (addr + PAGE_SIZE <= end) {
- WARN_ON(!pte_none(*pte));
- set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
- | __PAGE_KERNEL_RO));
- addr += PAGE_SIZE;
- pte = pte_offset_kernel(pmd, addr);
- }
- return 0;
-}
-
-static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
- unsigned long end)
-{
- int ret = 0;
- pmd_t *pmd = pmd_offset(pud, addr);
-
- while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
- WARN_ON(!pmd_none(*pmd));
- set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
- | _KERNPG_TABLE));
- addr += PMD_SIZE;
- pmd = pmd_offset(pud, addr);
- }
- if (addr < end) {
- if (pmd_none(*pmd)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pte_populate(pmd, addr, end);
- }
- return ret;
-}
-
-
-static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
- unsigned long end)
-{
- int ret = 0;
- pud_t *pud = pud_offset(pgd, addr);
-
- while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
- WARN_ON(!pud_none(*pud));
- set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
- | _KERNPG_TABLE));
- addr += PUD_SIZE;
- pud = pud_offset(pgd, addr);
- }
-
- if (addr < end) {
- if (pud_none(*pud)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pmd_populate(pud, addr, end);
- }
- return ret;
-}
-
-static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
-{
- int ret = 0;
- pgd_t *pgd = pgd_offset_k(addr);
-
- while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
- WARN_ON(!pgd_none(*pgd));
- set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
- | _KERNPG_TABLE));
- addr += PGDIR_SIZE;
- pgd = pgd_offset_k(addr);
- }
-
- if (addr < end) {
- if (pgd_none(*pgd)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pud_populate(pgd, addr, end);
- }
- return ret;
-}
-
-
-static void __init populate_zero_shadow(const void *start, const void *end)
-{
- if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
- panic("kasan: unable to map zero shadow!");
-}
-
-
#ifdef CONFIG_KASAN_INLINE
static int kasan_die_handler(struct notifier_block *self,
unsigned long val,
@@ -213,7 +99,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
- populate_zero_shadow((void *)KASAN_SHADOW_START,
+ kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
kasan_mem_to_shadow((void *)PAGE_OFFSET));
for (i = 0; i < E820_X_MAX; i++) {
@@ -223,14 +109,15 @@ void __init kasan_init(void)
if (map_range(&pfn_mapped[i]))
panic("kasan: unable to allocate shadow!");
}
- populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
- kasan_mem_to_shadow((void *)__START_KERNEL_map));
+ kasan_populate_zero_shadow(
+ kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+ kasan_mem_to_shadow((void *)__START_KERNEL_map));
vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
(unsigned long)kasan_mem_to_shadow(_end),
NUMA_NO_NODE);
- populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+ kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END);
memset(kasan_zero_page, 0, PAGE_SIZE);