summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/loongarch/Kconfig25
-rw-r--r--arch/loongarch/include/asm/setup.h2
-rw-r--r--arch/loongarch/kernel/head.S13
-rw-r--r--arch/loongarch/kernel/relocate.c145
4 files changed, 179 insertions, 6 deletions
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 32ab90dd76e5..2fc18a3c565e 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -501,6 +501,31 @@ config RELOCATABLE
the kernel binary at runtime to a different virtual address from
its link address.
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel (KASLR)"
+ depends on RELOCATABLE
+ help
+ Randomizes the physical and virtual address at which the
+ kernel image is loaded, as a security feature that
+ deters exploit attempts relying on knowledge of the location
+ of kernel internals.
+
+ The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
+
+ If unsure, say N.
+
+config RANDOMIZE_BASE_MAX_OFFSET
+ hex "Maximum KASLR offset" if EXPERT
+ depends on RANDOMIZE_BASE
+ range 0x0 0x10000000
+ default "0x01000000"
+ help
+ When KASLR is active, this provides the maximum offset that will
+ be applied to the kernel image. It should be set according to the
+ amount of physical RAM available in the target system.
+
+ This is limited by the size of the lower address memory, 256MB.
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
index 27d968655b4b..be05c0e706a2 100644
--- a/arch/loongarch/include/asm/setup.h
+++ b/arch/loongarch/include/asm/setup.h
@@ -33,7 +33,7 @@ extern long __la_abs_end;
extern long __rela_dyn_begin;
extern long __rela_dyn_end;
-extern void __init relocate_kernel(void);
+extern void * __init relocate_kernel(void);
#endif
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index c5c3ec2b819a..1d35becc01ee 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -87,7 +87,20 @@ SYM_CODE_START(kernel_entry) # kernel entry point
set_saved_sp sp, t0, t1
#ifdef CONFIG_RELOCATABLE
+
bl relocate_kernel
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ /* Repoint the sp into the new kernel */
+ PTR_LI sp, (_THREAD_SIZE - PT_SIZE)
+ PTR_ADD sp, sp, tp
+ set_saved_sp sp, t0, t1
+#endif
+
+ /* relocate_kernel() returns the new kernel entry point */
+ jr a0
+ ASM_BUG()
+
#endif
bl start_kernel
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 879c40372fba..01f94d1e3edf 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -9,11 +9,15 @@
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/panic_notifier.h>
+#include <linux/start_kernel.h>
+#include <asm/bootinfo.h>
+#include <asm/early_ioremap.h>
#include <asm/inst.h>
#include <asm/sections.h>
#include <asm/setup.h>
#define RELOCATED(x) ((void *)((long)x + reloc_offset))
+#define RELOCATED_KASLR(x) ((void *)((long)x + random_offset))
static unsigned long reloc_offset;
@@ -37,13 +41,13 @@ static inline void __init relocate_relative(void)
}
}
-static inline void __init relocate_absolute(void)
+static inline void __init relocate_absolute(long random_offset)
{
void *begin, *end;
struct rela_la_abs *p;
- begin = &__la_abs_begin;
- end = &__la_abs_end;
+ begin = RELOCATED_KASLR(&__la_abs_begin);
+ end = RELOCATED_KASLR(&__la_abs_end);
for (p = begin; (void *)p < end; p++) {
long v = p->symvalue;
@@ -62,14 +66,145 @@ static inline void __init relocate_absolute(void)
}
}
-void __init relocate_kernel(void)
+#ifdef CONFIG_RANDOMIZE_BASE
+static inline __init unsigned long rotate_xor(unsigned long hash,
+ const void *area, size_t size)
{
+ size_t i, diff;
+ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+
+ diff = (void *)ptr - area;
+ if (size < diff + sizeof(hash))
+ return hash;
+
+ size = ALIGN_DOWN(size - diff, sizeof(hash));
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+static inline __init unsigned long get_random_boot(void)
+{
+ unsigned long hash = 0;
+ unsigned long entropy = random_get_entropy();
+
+ /* Attempt to create a simple but unpredictable starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
+
+ /* Add in any runtime entropy we can get */
+ hash = rotate_xor(hash, &entropy, sizeof(entropy));
+
+ return hash;
+}
+
+static inline __init bool kaslr_disabled(void)
+{
+ char *str;
+ const char *builtin_cmdline = CONFIG_CMDLINE;
+
+ str = strstr(builtin_cmdline, "nokaslr");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+
+ str = strstr(boot_command_line, "nokaslr");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+
+ return false;
+}
+
+/* Choose a new address for the kernel */
+static inline void __init *determine_relocation_address(void)
+{
+ unsigned long kernel_length;
+ unsigned long random_offset;
+ void *destination = _text;
+
+ if (kaslr_disabled())
+ return destination;
+
+ kernel_length = (long)_end - (long)_text;
+
+ random_offset = get_random_boot() << 16;
+ random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
+ if (random_offset < kernel_length)
+ random_offset += ALIGN(kernel_length, 0xffff);
+
+ return RELOCATED_KASLR(destination);
+}
+
+static inline int __init relocation_addr_valid(void *location_new)
+{
+ if ((unsigned long)location_new & 0x00000ffff)
+ return 0; /* Inappropriately aligned new location */
+
+ if ((unsigned long)location_new < (unsigned long)_end)
+ return 0; /* New location overlaps original kernel */
+
+ return 1;
+}
+#endif
+
+static inline void __init update_reloc_offset(unsigned long *addr, long random_offset)
+{
+ unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr);
+
+ *new_addr = (unsigned long)reloc_offset;
+}
+
+void * __init relocate_kernel(void)
+{
+ unsigned long kernel_length;
+ unsigned long random_offset = 0;
+ void *location_new = _text; /* Default to original kernel start */
+ void *kernel_entry = start_kernel; /* Default to original kernel entry point */
+ char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+
+ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ location_new = determine_relocation_address();
+
+ /* Sanity check relocation address */
+ if (relocation_addr_valid(location_new))
+ random_offset = (unsigned long)location_new - (unsigned long)(_text);
+#endif
reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+ if (random_offset) {
+ kernel_length = (long)(_end) - (long)(_text);
+
+ /* Copy the kernel to it's new location */
+ memcpy(location_new, _text, kernel_length);
+
+ /* Sync the caches ready for execution of new kernel */
+ __asm__ __volatile__ (
+ "ibar 0 \t\n"
+ "dbar 0 \t\n"
+ ::: "memory");
+
+ reloc_offset += random_offset;
+
+ /* Return the new kernel's entry point */
+ kernel_entry = RELOCATED_KASLR(start_kernel);
+
+ /* The current thread is now within the relocated kernel */
+ __current_thread_info = RELOCATED_KASLR(__current_thread_info);
+
+ update_reloc_offset(&reloc_offset, random_offset);
+ }
+
if (reloc_offset)
relocate_relative();
- relocate_absolute();
+ relocate_absolute(random_offset);
+
+ return kernel_entry;
}
/*