summaryrefslogtreecommitdiff
path: root/arch/riscv/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/mm/init.c')
-rw-r--r--arch/riscv/mm/init.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index dfb5e4f7a670..4faf8bd157ea 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -135,11 +135,16 @@ void __init setup_bootmem(void)
/*
* Reserve from the start of the kernel to the end of the kernel
- * and make sure we align the reservation on PMD_SIZE since we will
+ */
+#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
+ /*
+ * Make sure we align the reservation on PMD_SIZE since we will
* map the kernel in the linear mapping as read-only: we do not want
* any allocation to happen between _end and the next pmd aligned page.
*/
- memblock_reserve(vmlinux_start, (vmlinux_end - vmlinux_start + PMD_SIZE - 1) & PMD_MASK);
+ vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
+#endif
+ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
/*
* memblock allocator is not aware of the fact that last 4K bytes of
@@ -640,7 +645,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
}
-#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
+#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void protect_kernel_linear_mapping_text_rodata(void)
{
unsigned long text_start = (unsigned long)lm_alias(_start);