diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2015-11-16 13:18:14 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-02-20 01:28:34 +0300 |
commit | 0f530a36877e262244eacaaba4f6ec39b2501db0 (patch) | |
tree | dd304c315c4eaa9c653104f3fd76dd12f3946ca0 | |
parent | 84ba3e7dd2dfb617f890f0bd4c27c4f478d13582 (diff) | |
download | linux-0f530a36877e262244eacaaba4f6ec39b2501db0.tar.xz |
arm64: mm: use correct mapping granularity under DEBUG_RODATA
commit 4fee9f364b9b99f76732f2a6fd6df679a237fa74 upstream.
When booting a 64k pages kernel that is built with CONFIG_DEBUG_RODATA
and resides at an offset that is not a multiple of 512 MB, the rounding
that occurs in __map_memblock() and fixup_executable() results in
incorrect regions being mapped.
The following snippet from /sys/kernel/debug/kernel_page_tables shows
how, when the kernel is loaded 2 MB above the base of DRAM at 0x40000000,
the first 2 MB of memory (which may be inaccessible from non-secure EL1
or just reserved by the firmware) is inadvertently mapped into the end of
the module region.
---[ Modules start ]---
0xfffffdffffe00000-0xfffffe0000000000 2M RW NX ... UXN MEM/NORMAL
---[ Modules end ]---
---[ Kernel Mapping ]---
0xfffffe0000000000-0xfffffe0000090000 576K RW NX ... UXN MEM/NORMAL
0xfffffe0000090000-0xfffffe0000200000 1472K ro x ... UXN MEM/NORMAL
0xfffffe0000200000-0xfffffe0000800000 6M ro x ... UXN MEM/NORMAL
0xfffffe0000800000-0xfffffe0000810000 64K ro x ... UXN MEM/NORMAL
0xfffffe0000810000-0xfffffe0000a00000 1984K RW NX ... UXN MEM/NORMAL
0xfffffe0000a00000-0xfffffe00ffe00000 4084M RW NX ... UXN MEM/NORMAL
The same issue is likely to occur on 16k pages kernels whose load
address is not a multiple of 32 MB (i.e., SECTION_SIZE). So round to
SWAPPER_BLOCK_SIZE instead of SECTION_SIZE.
Fixes: da141706aea5 ("arm64: add better page protections to arm64")
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
[ard.biesheuvel: add #define of SWAPPER_BLOCK_SIZE for -stable version]
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | arch/arm64/mm/mmu.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 9317974c9b8e..326564386cfa 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -301,6 +301,7 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt, } #ifdef CONFIG_DEBUG_RODATA +#define SWAPPER_BLOCK_SIZE (PAGE_SHIFT == 12 ? SECTION_SIZE : PAGE_SIZE) static void __init __map_memblock(phys_addr_t start, phys_addr_t end) { /* @@ -308,8 +309,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end) * for now. This will get more fine grained later once all memory * is mapped */ - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); + unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); if (end < kernel_x_start) { create_mapping(start, __phys_to_virt(start), @@ -397,18 +398,18 @@ void __init fixup_executable(void) { #ifdef CONFIG_DEBUG_RODATA /* now that we are actually fully mapped, make the start/end more fine grained */ - if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { + if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { unsigned long aligned_start = round_down(__pa(_stext), - SECTION_SIZE); + SWAPPER_BLOCK_SIZE); create_mapping(aligned_start, __phys_to_virt(aligned_start), __pa(_stext) - aligned_start, PAGE_KERNEL); } - if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { + if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { unsigned long aligned_end = round_up(__pa(__init_end), - SECTION_SIZE); + SWAPPER_BLOCK_SIZE); create_mapping(__pa(__init_end), (unsigned long)__init_end, aligned_end - __pa(__init_end), PAGE_KERNEL); |