summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/head.S
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2022-06-24 18:06:42 +0300
committerWill Deacon <will@kernel.org>2022-06-24 19:18:10 +0300
commitc3cee924bd855184d15bc4aa6088dcf8e2c1394c (patch)
treeafe531afe391eadbcdf03355a5c2e5b97df39a64 /arch/arm64/kernel/head.S
parentb013c1e1c659b0742f81cc4a95fe61faf6929ae5 (diff)
downloadlinux-c3cee924bd855184d15bc4aa6088dcf8e2c1394c.tar.xz
arm64: head: cover entire kernel image in initial ID map
As a first step towards avoiding the need to create, tear down and recreate the kernel virtual mapping with MMU and caches disabled, start by expanding the ID map so it covers the page tables as well as all executable code. This will allow us to populate the page tables with the MMU and caches on, and call KASLR init code before setting up the virtual mapping. Since this ID map is only needed at boot, create it as a temporary set of page tables, and populate the permanent ID map after enabling the MMU and caches. While at it, switch to read-only attributes for the where possible, as writable permissions are only needed for the initial kernel page tables. Note that on 4k granule configurations, the permanent ID map will now be reduced to a single page rather than a 2M block mapping. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Link: https://lore.kernel.org/r/20220624150651.1358849-13-ardb@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/head.S')
-rw-r--r--arch/arm64/kernel/head.S31
1 files changed, 21 insertions, 10 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7397555f8437..93734c91a29a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -87,6 +87,7 @@
* x28 clear_page_tables() callee preserved temp register
* x19/x20 __primary_switch() callee preserved temp registers
* x24 __primary_switch() .. relocate_kernel() current RELR displacement
+ * x28 create_idmap() callee preserved temp register
*/
SYM_CODE_START(primary_entry)
bl preserve_boot_args
@@ -298,9 +299,7 @@ SYM_FUNC_START_LOCAL(remap_region)
SYM_FUNC_END(remap_region)
SYM_FUNC_START_LOCAL(create_idmap)
- adrp x0, idmap_pg_dir
- adrp x3, __idmap_text_start // __pa(__idmap_text_start)
-
+ mov x28, lr
/*
* The ID map carries a 1:1 mapping of the physical address range
* covered by the loaded image, which could be anywhere in DRAM. This
@@ -347,11 +346,22 @@ SYM_FUNC_START_LOCAL(create_idmap)
* translation level, but the top-level table has more entries.
*/
#endif
- adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
- mov x7, SWAPPER_MM_MMUFLAGS
+ adrp x0, init_idmap_pg_dir
+ adrp x3, _text
+ adrp x6, _end
+ mov x7, SWAPPER_RX_MMUFLAGS
map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
+ /* Remap the kernel page tables r/w in the ID map */
+ adrp x1, _text
+ adrp x2, init_pg_dir
+ adrp x3, init_pg_end
+ bic x4, x2, #SWAPPER_BLOCK_SIZE - 1
+ mov x5, SWAPPER_RW_MMUFLAGS
+ mov x6, #SWAPPER_BLOCK_SHIFT
+ bl remap_region
+
/*
* Since the page tables have been populated with non-cacheable
* accesses (MMU disabled), invalidate those tables again to
@@ -359,9 +369,10 @@ SYM_FUNC_START_LOCAL(create_idmap)
*/
dmb sy
- adrp x0, idmap_pg_dir
- adrp x1, idmap_pg_end
- b dcache_inval_poc // tail call
+ adrp x0, init_idmap_pg_dir
+ adrp x1, init_idmap_pg_end
+ bl dcache_inval_poc
+ ret x28
SYM_FUNC_END(create_idmap)
SYM_FUNC_START_LOCAL(create_kernel_mapping)
@@ -372,7 +383,7 @@ SYM_FUNC_START_LOCAL(create_kernel_mapping)
adrp x3, _text // runtime __pa(_text)
sub x6, x6, x3 // _end - _text
add x6, x6, x5 // runtime __va(_end)
- mov x7, SWAPPER_MM_MMUFLAGS
+ mov x7, SWAPPER_RW_MMUFLAGS
map_memory x0, x1, x5, x6, x7, x3, (VA_BITS - PGDIR_SHIFT), x10, x11, x12, x13, x14
@@ -853,7 +864,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
#endif
adrp x1, init_pg_dir
- adrp x2, idmap_pg_dir
+ adrp x2, init_idmap_pg_dir
bl __enable_mmu
#ifdef CONFIG_RELOCATABLE
#ifdef CONFIG_RELR