summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2021-02-08 12:57:12 +0300
committerWill Deacon <will@kernel.org>2021-02-08 15:51:26 +0300
commit8cc8a32415364e475c25277b507f06f67c47ca9a (patch)
tree2232aa234444c200b37fe8ea5522f19502a35e46 /arch/arm64
parentb161f92482426a7323884d57cbae683812909988 (diff)
downloadlinux-8cc8a32415364e475c25277b507f06f67c47ca9a.tar.xz
arm64: Turn the MMU-on sequence into a macro
Turning the MMU on is a popular sport in the arm64 kernel, and we do it more than once, or even twice. As we are about to add even more, let's turn it into a macro. No expected functional change. Signed-off-by: Marc Zyngier <maz@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: David Brazdil <dbrazdil@google.com> Link: https://lore.kernel.org/r/20210208095732.3267263-4-maz@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/assembler.h17
-rw-r--r--arch/arm64/kernel/head.S19
-rw-r--r--arch/arm64/mm/proc.S12
3 files changed, 22 insertions, 26 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index bf125c591116..8cded93f99c3 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -676,6 +676,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
+ * Set SCTLR_EL1 to the passed value, and invalidate the local icache
+ * in the process. This is called when setting the MMU on.
+ */
+.macro set_sctlr_el1, reg
+ msr sctlr_el1, \reg
+ isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
+.endm
+
+/*
* Check whether to yield to another runnable task from kernel mode NEON code
* (which runs with preemption disabled).
*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a0dc987724ed..28e9735302df 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -703,16 +703,9 @@ SYM_FUNC_START(__enable_mmu)
offset_ttbr1 x1, x3
msr ttbr1_el1, x1 // load TTBR1
isb
- msr sctlr_el1, x0
- isb
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
- isb
+
+ set_sctlr_el1 x0
+
ret
SYM_FUNC_END(__enable_mmu)
@@ -883,11 +876,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
tlbi vmalle1 // Remove any stale TLB entries
dsb nsh
- msr sctlr_el1, x19 // re-enable the MMU
- isb
- ic iallu // flush instructions fetched
- dsb nsh // via old mapping
- isb
+ set_sctlr_el1 x19 // re-enable the MMU
bl __relocate_kernel
#endif
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index ece785477bdc..c967bfd30d2b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -291,17 +291,7 @@ skip_pgd:
/* We're done: fire up the MMU again */
mrs x17, sctlr_el1
orr x17, x17, #SCTLR_ELx_M
- msr sctlr_el1, x17
- isb
-
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
- isb
+ set_sctlr_el1 x17
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]