summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Makefile10
-rw-r--r--arch/arm64/boot/dts/bitmain/bm1880.dtsi6
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/assembler.h17
-rw-r--r--arch/arm64/include/asm/atomic.h10
-rw-r--r--arch/arm64/include/asm/cpufeature.h11
-rw-r--r--arch/arm64/include/asm/el2_setup.h60
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/include/asm/pointer_auth.h10
-rw-r--r--arch/arm64/include/asm/setup.h11
-rw-r--r--arch/arm64/include/asm/stackprotector.h1
-rw-r--r--arch/arm64/include/asm/uaccess.h16
-rw-r--r--arch/arm64/include/asm/virt.h7
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/alternative.c2
-rw-r--r--arch/arm64/kernel/asm-offsets.c5
-rw-r--r--arch/arm64/kernel/cpufeature.c75
-rw-r--r--arch/arm64/kernel/entry-ftrace.S12
-rw-r--r--arch/arm64/kernel/entry.S16
-rw-r--r--arch/arm64/kernel/head.S75
-rw-r--r--arch/arm64/kernel/hyp-stub.S113
-rw-r--r--arch/arm64/kernel/idreg-override.c216
-rw-r--r--arch/arm64/kernel/kaslr.c43
-rw-r--r--arch/arm64/kernel/module-plts.c2
-rw-r--r--arch/arm64/kernel/perf_event.c43
-rw-r--r--arch/arm64/kernel/probes/kprobes_trampoline.S6
-rw-r--r--arch/arm64/kernel/process.c4
-rw-r--r--arch/arm64/kernel/setup.c15
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/sleep.S1
-rw-r--r--arch/arm64/kernel/smp.c3
-rw-r--r--arch/arm64/kernel/syscall.c10
-rw-r--r--arch/arm64/kernel/traps.c3
-rw-r--r--arch/arm64/kernel/vdso/Makefile3
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S5
-rw-r--r--arch/arm64/kvm/Kconfig8
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c7
-rw-r--r--arch/arm64/kvm/arm.c35
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/adjust_pc.h9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-smp.c6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c59
-rw-r--r--arch/arm64/kvm/pmu-emul.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c6
-rw-r--r--arch/arm64/kvm/va_layout.c7
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c11
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c20
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c21
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/init.c33
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/arm64/mm/proc.S18
55 files changed, 703 insertions, 405 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 05e17351e4f3..f39568b28ec1 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -174,8 +174,6 @@ config ARM64
select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
- select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
- select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 6be9b3750250..90309208bb28 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
-LDFLAGS_vmlinux :=--no-undefined -X -z norelro
+LDFLAGS_vmlinux :=--no-undefined -X
ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
@@ -115,16 +115,20 @@ KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
# Prefer the baremetal ELF build target, but not all toolchains include
# it so fall back to the standard linux version if needed.
-KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
+KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
# Same as above, prefer ELF but fall back to linux target if needed.
-KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
+KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
UTS_MACHINE := aarch64
endif
+ifeq ($(CONFIG_LD_IS_LLD), y)
+KBUILD_LDFLAGS += -z norelro
+endif
+
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
diff --git a/arch/arm64/boot/dts/bitmain/bm1880.dtsi b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
index fa6e6905f588..53a9b76057aa 100644
--- a/arch/arm64/boot/dts/bitmain/bm1880.dtsi
+++ b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
@@ -127,7 +127,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -145,7 +145,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <32>;
+ ngpios = <32>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -163,7 +163,7 @@
compatible = "snps,dw-apb-gpio-port";
gpio-controller;
#gpio-cells = <2>;
- snps,nr-gpios = <8>;
+ ngpios = <8>;
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index ff9cbb631212..07ac208edc89 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
-generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += qspinlock.h
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 27b1ea721c2d..ca31594d3d6c 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -676,6 +676,23 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
+ * Set SCTLR_EL1 to the passed value, and invalidate the local icache
+ * in the process. This is called when setting the MMU on.
+ */
+.macro set_sctlr_el1, reg
+ msr sctlr_el1, \reg
+ isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
+.endm
+
+/*
* Check whether to yield to another runnable task from kernel mode NEON code
* (which runs with preemption disabled).
*
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 015ddffaf6ca..b56a4b2bc248 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -17,7 +17,7 @@
#include <asm/lse.h>
#define ATOMIC_OP(op) \
-static inline void arch_##op(int i, atomic_t *v) \
+static __always_inline void arch_##op(int i, atomic_t *v) \
{ \
__lse_ll_sc_body(op, i, v); \
}
@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, op) \
-static inline int arch_##op##name(int i, atomic_t *v) \
+static __always_inline int arch_##op##name(int i, atomic_t *v) \
{ \
return __lse_ll_sc_body(op##name, i, v); \
}
@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
#undef ATOMIC_FETCH_OPS
#define ATOMIC64_OP(op) \
-static inline void arch_##op(long i, atomic64_t *v) \
+static __always_inline void arch_##op(long i, atomic64_t *v) \
{ \
__lse_ll_sc_body(op, i, v); \
}
@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, op) \
-static inline long arch_##op##name(long i, atomic64_t *v) \
+static __always_inline long arch_##op##name(long i, atomic64_t *v) \
{ \
return __lse_ll_sc_body(op##name, i, v); \
}
@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 9a555809b89c..61177bac49fa 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -63,6 +63,11 @@ struct arm64_ftr_bits {
s64 safe_val; /* safe value for FTR_EXACT features */
};
+struct arm64_ftr_override {
+ u64 val;
+ u64 mask;
+};
+
/*
* @arm64_ftr_reg - Feature register
* @strict_mask Bits which should match across all CPUs for sanity.
@@ -74,6 +79,7 @@ struct arm64_ftr_reg {
u64 user_mask;
u64 sys_val;
u64 user_val;
+ struct arm64_ftr_override *override;
const struct arm64_ftr_bits *ftr_bits;
};
@@ -600,6 +606,7 @@ void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);
u64 read_sanitised_ftr_reg(u32 id);
+u64 __read_sysreg_by_encoding(u32 sys_id);
static inline bool cpu_supports_mixed_endian_el0(void)
{
@@ -811,6 +818,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
return 8;
}
+extern struct arm64_ftr_override id_aa64mmfr1_override;
+extern struct arm64_ftr_override id_aa64pfr1_override;
+extern struct arm64_ftr_override id_aa64isar1_override;
+
u32 get_kvm_ipa_limit(void);
void dump_cpu_features(void);
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index a7f5a1bbc8ac..d77d358f9395 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -32,46 +32,39 @@
* to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
* EL2.
*/
-.macro __init_el2_timers mode
-.ifeqs "\mode", "nvhe"
+.macro __init_el2_timers
mrs x0, cnthctl_el2
orr x0, x0, #3 // Enable EL1 physical timers
msr cnthctl_el2, x0
-.endif
msr cntvoff_el2, xzr // Clear virtual offset
.endm
-.macro __init_el2_debug mode
+.macro __init_el2_debug
mrs x1, id_aa64dfr0_el1
sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
cmp x0, #1
- b.lt 1f // Skip if no PMU present
+ b.lt .Lskip_pmu_\@ // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
-1:
+.Lskip_pmu_\@:
csel x2, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */
ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
- cbz x0, 3f // Skip if SPE not present
+ cbz x0, .Lskip_spe_\@ // Skip if SPE not present
-.ifeqs "\mode", "nvhe"
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
- cbnz x0, 2f // then permit sampling of physical
+ cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical
mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
1 << SYS_PMSCR_EL2_PA_SHIFT)
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
-2:
+.Lskip_spe_el2_\@:
mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
orr x2, x2, x0 // If we don't have VHE, then
// use EL1&0 translation.
-.else
- orr x2, x2, #MDCR_EL2_TPMS // For VHE, use EL2 translation
- // and disable access from EL1
-.endif
-3:
+.Lskip_spe_\@:
msr mdcr_el2, x2 // Configure debug traps
.endm
@@ -79,9 +72,9 @@
.macro __init_el2_lor
mrs x1, id_aa64mmfr1_el1
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
- cbz x0, 1f
+ cbz x0, .Lskip_lor_\@
msr_s SYS_LORC_EL1, xzr
-1:
+.Lskip_lor_\@:
.endm
/* Stage-2 translation */
@@ -93,7 +86,7 @@
.macro __init_el2_gicv3
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
- cbz x0, 1f
+ cbz x0, .Lskip_gicv3_\@
mrs_s x0, SYS_ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
@@ -103,7 +96,7 @@
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
tbz x0, #0, 1f // and check that it sticks
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
-1:
+.Lskip_gicv3_\@:
.endm
.macro __init_el2_hstr
@@ -128,14 +121,14 @@
.macro __init_el2_nvhe_sve
mrs x1, id_aa64pfr0_el1
ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
- cbz x1, 1f
+ cbz x1, .Lskip_sve_\@
bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
msr cptr_el2, x0 // Disable copro. traps to EL2
isb
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
msr_s SYS_ZCR_EL2, x1 // length for EL1.
-1:
+.Lskip_sve_\@:
.endm
.macro __init_el2_nvhe_prepare_eret
@@ -145,37 +138,24 @@
/**
* Initialize EL2 registers to sane values. This should be called early on all
- * cores that were booted in EL2.
+ * cores that were booted in EL2. Note that everything gets initialised as
+ * if VHE was not evailable. The kernel context will be upgraded to VHE
+ * if possible later on in the boot process
*
* Regs: x0, x1 and x2 are clobbered.
*/
-.macro init_el2_state mode
-.ifnes "\mode", "vhe"
-.ifnes "\mode", "nvhe"
-.error "Invalid 'mode' argument"
-.endif
-.endif
-
+.macro init_el2_state
__init_el2_sctlr
- __init_el2_timers \mode
- __init_el2_debug \mode
+ __init_el2_timers
+ __init_el2_debug
__init_el2_lor
__init_el2_stage2
__init_el2_gicv3
__init_el2_hstr
-
- /*
- * When VHE is not in use, early init of EL2 needs to be done here.
- * When VHE _is_ in use, EL1 will not be used in the host and
- * requires no configuration, and all non-hyp-specific EL2 setup
- * will be done via the _EL1 system register aliases in __cpu_setup.
- */
-.ifeqs "\mode", "nvhe"
__init_el2_nvhe_idregs
__init_el2_nvhe_cptr
__init_el2_nvhe_sve
__init_el2_nvhe_prepare_eret
-.endif
.endm
#endif /* __ARM_KVM_INIT_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 11beda85ee7e..8fcfab0c2567 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -17,6 +17,7 @@
#include <linux/jump_label.h>
#include <linux/kvm_types.h>
#include <linux/percpu.h>
+#include <linux/psci.h>
#include <asm/arch_gicv3.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
@@ -240,6 +241,28 @@ struct kvm_host_data {
struct kvm_pmu_events pmu_events;
};
+struct kvm_host_psci_config {
+ /* PSCI version used by host. */
+ u32 version;
+
+ /* Function IDs used by host if version is v0.1. */
+ struct psci_0_1_function_ids function_ids_0_1;
+
+ bool psci_0_1_cpu_suspend_implemented;
+ bool psci_0_1_cpu_on_implemented;
+ bool psci_0_1_cpu_off_implemented;
+ bool psci_0_1_migrate_implemented;
+};
+
+extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
+#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
+
+extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
+#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
+
+extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
+#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
+
struct vcpu_reset_state {
unsigned long pc;
unsigned long r0;
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index c6b4f0603024..b112a11e9302 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -76,6 +76,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
return ptrauth_clear_pac(ptr);
}
+static __always_inline void ptrauth_enable(void)
+{
+ if (!system_supports_address_auth())
+ return;
+ sysreg_clear_set(sctlr_el1, 0, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB));
+ isb();
+}
+
#define ptrauth_thread_init_user(tsk) \
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
#define ptrauth_thread_init_kernel(tsk) \
@@ -84,6 +93,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
#else /* CONFIG_ARM64_PTR_AUTH */
+#define ptrauth_enable()
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_thread_init_user(tsk)
diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
new file mode 100644
index 000000000000..d3320618ed14
--- /dev/null
+++ b/arch/arm64/include/asm/setup.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __ARM64_ASM_SETUP_H
+#define __ARM64_ASM_SETUP_H
+
+#include <uapi/asm/setup.h>
+
+void *get_early_fdt_ptr(void);
+void early_fdt_map(u64 dt_phys);
+
+#endif
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
index 7263e0bac680..33f1bb453150 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -41,6 +41,7 @@ static __always_inline void boot_init_stack_canary(void)
#endif
ptrauth_thread_init_kernel(current);
ptrauth_thread_switch_kernel(current);
+ ptrauth_enable();
}
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 6f986e09a781..f0fe0cc6abe0 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -176,10 +176,21 @@ static inline void __uaccess_enable_hw_pan(void)
* The Tag check override (TCO) bit disables temporarily the tag checking
* preventing the issue.
*/
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_tco(void)
{
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void __uaccess_enable_tco(void)
+{
+ asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
+ ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void uaccess_disable_privileged(void)
+{
+ __uaccess_disable_tco();
if (uaccess_ttbr0_disable())
return;
@@ -189,8 +200,7 @@ static inline void uaccess_disable_privileged(void)
static inline void uaccess_enable_privileged(void)
{
- asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
- ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+ __uaccess_enable_tco();
if (uaccess_ttbr0_enable())
return;
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index ee6a48df89d9..7379f35ae2c6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -35,8 +35,13 @@
*/
#define HVC_RESET_VECTORS 2
+/*
+ * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
+ */
+#define HVC_VHE_RESTART 3
+
/* Max number of HYP stub hypercalls */
-#define HVC_STUB_HCALL_NR 3
+#define HVC_STUB_HCALL_NR 4
/* Error returned when an invalid stub number is passed into x0 */
#define HVC_STUB_ERR 0xbadca11
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 86364ab6f13f..2262f0392857 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,7 +17,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o
+ syscall.o proton-pack.o idreg-override.o
targets += efi-entry.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index a57cffb752e8..1184c44ea2c7 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -17,7 +17,7 @@
#include <asm/sections.h>
#include <linux/stop_machine.h>
-#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
+#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index f42fd9e33981..a36e2fc330d4 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -75,7 +75,7 @@ int main(void)
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
- DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
#ifdef CONFIG_COMPAT
DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
@@ -99,6 +99,9 @@ int main(void)
DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
+ DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));
+ DEFINE(FTR_OVR_MASK_OFFSET, offsetof(struct arm64_ftr_override, mask));
+ BLANK();
#ifdef CONFIG_KVM
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 7ffb5f1d8b68..3bce87a03717 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -352,9 +352,12 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_END,
};
+static struct arm64_ftr_override __ro_after_init no_override = { };
+
struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
.name = "SYS_CTR_EL0",
- .ftr_bits = ftr_ctr
+ .ftr_bits = ftr_ctr,
+ .override = &no_override,
};
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
@@ -544,13 +547,20 @@ static const struct arm64_ftr_bits ftr_raz[] = {
ARM64_FTR_END,
};
-#define ARM64_FTR_REG(id, table) { \
- .sys_id = id, \
- .reg = &(struct arm64_ftr_reg){ \
- .name = #id, \
- .ftr_bits = &((table)[0]), \
+#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \
+ .sys_id = id, \
+ .reg = &(struct arm64_ftr_reg){ \
+ .name = #id, \
+ .override = (ovr), \
+ .ftr_bits = &((table)[0]), \
}}
+#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override)
+
+struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
+struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
+struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
+
static const struct __ftr_reg_entry {
u32 sys_id;
struct arm64_ftr_reg *reg;
@@ -585,7 +595,8 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
+ &id_aa64pfr1_override),
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
/* Op1 = 0, CRn = 0, CRm = 5 */
@@ -594,11 +605,13 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 6 */
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
- ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
+ &id_aa64isar1_override),
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
- ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
+ &id_aa64mmfr1_override),
ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
/* Op1 = 0, CRn = 1, CRm = 2 */
@@ -770,6 +783,33 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
u64 ftr_mask = arm64_ftr_mask(ftrp);
s64 ftr_new = arm64_ftr_value(ftrp, new);
+ s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val);
+
+ if ((ftr_mask & reg->override->mask) == ftr_mask) {
+ s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new);
+ char *str = NULL;
+
+ if (ftr_ovr != tmp) {
+ /* Unsafe, remove the override */
+ reg->override->mask &= ~ftr_mask;
+ reg->override->val &= ~ftr_mask;
+ tmp = ftr_ovr;
+ str = "ignoring override";
+ } else if (ftr_new != tmp) {
+ /* Override was valid */
+ ftr_new = tmp;
+ str = "forced";
+ } else if (ftr_ovr == tmp) {
+ /* Override was the safe value */
+ str = "already set";
+ }
+
+ if (str)
+ pr_warn("%s[%d:%d]: %s to %llx\n",
+ reg->name,
+ ftrp->shift + ftrp->width - 1,
+ ftrp->shift, str, tmp);
+ }
val = arm64_ftr_set_value(ftrp, val, ftr_new);
@@ -1115,14 +1155,17 @@ u64 read_sanitised_ftr_reg(u32 id)
EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);
#define read_sysreg_case(r) \
- case r: return read_sysreg_s(r)
+ case r: val = read_sysreg_s(r); break;
/*
* __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
* Read the system register on the current CPU
*/
-static u64 __read_sysreg_by_encoding(u32 sys_id)
+u64 __read_sysreg_by_encoding(u32 sys_id)
{
+ struct arm64_ftr_reg *regp;
+ u64 val;
+
switch (sys_id) {
read_sysreg_case(SYS_ID_PFR0_EL1);
read_sysreg_case(SYS_ID_PFR1_EL1);
@@ -1165,6 +1208,14 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
BUG();
return 0;
}
+
+ regp = get_arm64_ftr_reg(sys_id);
+ if (regp) {
+ val &= ~regp->override->mask;
+ val |= (regp->override->val & regp->override->mask);
+ }
+
+ return val;
}
#include <linux/irqchip/arm-gic-v3.h>
@@ -2568,7 +2619,7 @@ static void verify_hyp_capabilities(void)
int parange, ipa_max;
unsigned int safe_vmid_bits, vmid_bits;
- if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
+ if (!IS_ENABLED(CONFIG_KVM))
return;
safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index a338f40e64d3..b3e4f9a088b1 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -35,7 +35,7 @@
*/
.macro ftrace_regs_entry, allregs=0
/* Make room for pt_regs, plus a callee frame */
- sub sp, sp, #(S_FRAME_SIZE + 16)
+ sub sp, sp, #(PT_REGS_SIZE + 16)
/* Save function arguments (and x9 for simplicity) */
stp x0, x1, [sp, #S_X0]
@@ -61,15 +61,15 @@
.endif
/* Save the callsite's SP and LR */
- add x10, sp, #(S_FRAME_SIZE + 16)
+ add x10, sp, #(PT_REGS_SIZE + 16)
stp x9, x10, [sp, #S_LR]
/* Save the PC after the ftrace callsite */
str x30, [sp, #S_PC]
/* Create a frame record for the callsite above pt_regs */
- stp x29, x9, [sp, #S_FRAME_SIZE]
- add x29, sp, #S_FRAME_SIZE
+ stp x29, x9, [sp, #PT_REGS_SIZE]
+ add x29, sp, #PT_REGS_SIZE
/* Create our frame record within pt_regs. */
stp x29, x30, [sp, #S_STACKFRAME]
@@ -120,7 +120,7 @@ ftrace_common_return:
ldr x9, [sp, #S_PC]
/* Restore the callsite's SP */
- add sp, sp, #S_FRAME_SIZE + 16
+ add sp, sp, #PT_REGS_SIZE + 16
ret x9
SYM_CODE_END(ftrace_common)
@@ -130,7 +130,7 @@ SYM_CODE_START(ftrace_graph_caller)
ldr x0, [sp, #S_PC]
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
add x1, sp, #S_LR // parent_ip (callsite's LR)
- ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
+ ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
bl prepare_ftrace_return
b ftrace_common_return
SYM_CODE_END(ftrace_graph_caller)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 2a93fa5f4e49..c9bae73f2621 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -75,7 +75,7 @@ alternative_else_nop_endif
.endif
#endif
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
#ifdef CONFIG_VMAP_STACK
/*
* Test whether the SP has overflowed, without corrupting a GPR.
@@ -96,7 +96,7 @@ alternative_else_nop_endif
* userspace, and can clobber EL0 registers to free up GPRs.
*/
- /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
+ /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
msr tpidr_el0, x0
/* Recover the original x0 value and stash it in tpidrro_el0 */
@@ -182,7 +182,6 @@ alternative_else_nop_endif
mrs_s \tmp2, SYS_GCR_EL1
bfi \tmp2, \tmp, #0, #16
msr_s SYS_GCR_EL1, \tmp2
- isb
#endif
.endm
@@ -194,6 +193,7 @@ alternative_else_nop_endif
ldr_l \tmp, gcr_kernel_excl
mte_set_gcr \tmp, \tmp2
+ isb
1:
#endif
.endm
@@ -253,7 +253,7 @@ alternative_else_nop_endif
scs_load tsk, x20
.else
- add x21, sp, #S_FRAME_SIZE
+ add x21, sp, #PT_REGS_SIZE
get_current_task tsk
.endif /* \el == 0 */
mrs x22, elr_el1
@@ -377,7 +377,7 @@ alternative_else_nop_endif
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
- add sp, sp, #S_FRAME_SIZE // restore sp
+ add sp, sp, #PT_REGS_SIZE // restore sp
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
@@ -580,12 +580,12 @@ __bad_stack:
/*
* Store the original GPRs to the new stack. The orginal SP (minus
- * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
+ * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
*/
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
kernel_entry 1
mrs x0, tpidr_el0
- add x0, x0, #S_FRAME_SIZE
+ add x0, x0, #PT_REGS_SIZE
str x0, [sp, #S_SP]
/* Stash the regs for handle_bad_stack */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a0dc987724ed..1e30b5550d2a 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -404,10 +404,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
adr_l x5, init_task
msr sp_el0, x5 // Save thread_info
-#ifdef CONFIG_ARM64_PTR_AUTH
- __ptrauth_keys_init_cpu x5, x6, x7, x8
-#endif
-
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
@@ -436,10 +432,12 @@ SYM_FUNC_START_LOCAL(__primary_switched)
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bl kasan_early_init
#endif
+ mov x0, x21 // pass FDT address in x0
+ bl early_fdt_map // Try mapping the FDT early
+ bl init_feature_override // Parse cpu feature overrides
#ifdef CONFIG_RANDOMIZE_BASE
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
b.ne 0f
- mov x0, x21 // pass FDT address in x0
bl kaslr_early_init // parse FDT for KASLR options
cbz x0, 0f // KASLR disabled? just proceed
orr x23, x23, x0 // record KASLR offset
@@ -447,6 +445,7 @@ SYM_FUNC_START_LOCAL(__primary_switched)
ret // to __primary_switch()
0:
#endif
+ bl switch_to_vhe // Prefer VHE if possible
add sp, sp, #16
mov x29, #0
mov x30, #0
@@ -478,13 +477,14 @@ EXPORT_SYMBOL(kimage_vaddr)
* booted in EL1 or EL2 respectively.
*/
SYM_FUNC_START(init_kernel_el)
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr sctlr_el1, x0
+
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq init_el2
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
- mov_q x0, INIT_SCTLR_EL1_MMU_OFF
- msr sctlr_el1, x0
isb
mov_q x0, INIT_PSTATE_EL1
msr spsr_el1, x0
@@ -493,50 +493,11 @@ SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
eret
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
-#ifdef CONFIG_ARM64_VHE
- /*
- * Check for VHE being present. x2 being non-zero indicates that we
- * do have VHE, and that the kernel is intended to run at EL2.
- */
- mrs x2, id_aa64mmfr1_el1
- ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
-#else
- mov x2, xzr
-#endif
- cbz x2, init_el2_nvhe
-
- /*
- * When VHE _is_ in use, EL1 will not be used in the host and
- * requires no configuration, and all non-hyp-specific EL2 setup
- * will be done via the _EL1 system register aliases in __cpu_setup.
- */
- mov_q x0, HCR_HOST_VHE_FLAGS
- msr hcr_el2, x0
- isb
-
- init_el2_state vhe
-
- isb
-
- mov_q x0, INIT_PSTATE_EL2
- msr spsr_el2, x0
- msr elr_el2, lr
- mov w0, #BOOT_CPU_MODE_EL2
- eret
-
-SYM_INNER_LABEL(init_el2_nvhe, SYM_L_LOCAL)
- /*
- * When VHE is not in use, early init of EL2 and EL1 needs to be
- * done here.
- */
- mov_q x0, INIT_SCTLR_EL1_MMU_OFF
- msr sctlr_el1, x0
-
mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
isb
- init_el2_state nvhe
+ init_el2_state
/* Hypervisor stub */
adr_l x0, __hyp_stub_vectors
@@ -623,6 +584,7 @@ SYM_FUNC_START_LOCAL(secondary_startup)
/*
* Common entry point for secondary CPUs.
*/
+ bl switch_to_vhe
bl __cpu_secondary_check52bitva
bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir
@@ -703,16 +665,9 @@ SYM_FUNC_START(__enable_mmu)
offset_ttbr1 x1, x3
msr ttbr1_el1, x1 // load TTBR1
isb
- msr sctlr_el1, x0
- isb
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
- isb
+
+ set_sctlr_el1 x0
+
ret
SYM_FUNC_END(__enable_mmu)
@@ -883,11 +838,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
tlbi vmalle1 // Remove any stale TLB entries
dsb nsh
- msr sctlr_el1, x19 // re-enable the MMU
- isb
- ic iallu // flush instructions fetched
- dsb nsh // via old mapping
- isb
+ set_sctlr_el1 x19 // re-enable the MMU
bl __relocate_kernel
#endif
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 160f5881a0b7..3e08dcc924b5 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -8,9 +8,9 @@
#include <linux/init.h>
#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
+#include <asm/el2_setup.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/ptrace.h>
@@ -47,10 +47,13 @@ SYM_CODE_END(__hyp_stub_vectors)
SYM_CODE_START_LOCAL(el1_sync)
cmp x0, #HVC_SET_VECTORS
- b.ne 2f
+ b.ne 1f
msr vbar_el2, x1
b 9f
+1: cmp x0, #HVC_VHE_RESTART
+ b.eq mutate_to_vhe
+
2: cmp x0, #HVC_SOFT_RESTART
b.ne 3f
mov x0, x2
@@ -70,6 +73,88 @@ SYM_CODE_START_LOCAL(el1_sync)
eret
SYM_CODE_END(el1_sync)
+// nVHE? No way! Give me the real thing!
+SYM_CODE_START_LOCAL(mutate_to_vhe)
+ // Be prepared to fail
+ mov_q x0, HVC_STUB_ERR
+
+ // Sanity check: MMU *must* be off
+ mrs x1, sctlr_el2
+ tbnz x1, #0, 1f
+
+ // Needs to be VHE capable, obviously
+ mrs x1, id_aa64mmfr1_el1
+ ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
+ cbz x1, 1f
+
+ // Check whether VHE is disabled from the command line
+ adr_l x1, id_aa64mmfr1_override
+ ldr x2, [x1, FTR_OVR_VAL_OFFSET]
+ ldr x1, [x1, FTR_OVR_MASK_OFFSET]
+ ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
+ ubfx x1, x1, #ID_AA64MMFR1_VHE_SHIFT, #4
+ cmp x1, xzr
+ and x2, x2, x1
+ csinv x2, x2, xzr, ne
+ cbz x2, 1f
+
+ // Engage the VHE magic!
+ mov_q x0, HCR_HOST_VHE_FLAGS
+ msr hcr_el2, x0
+ isb
+
+ // Use the EL1 allocated stack, per-cpu offset
+ mrs x0, sp_el1
+ mov sp, x0
+ mrs x0, tpidr_el1
+ msr tpidr_el2, x0
+
+ // FP configuration, vectors
+ mrs_s x0, SYS_CPACR_EL12
+ msr cpacr_el1, x0
+ mrs_s x0, SYS_VBAR_EL12
+ msr vbar_el1, x0
+
+ // Use EL2 translations for SPE and disable access from EL1
+ mrs x0, mdcr_el2
+ bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
+ msr mdcr_el2, x0
+
+ // Transfer the MM state from EL1 to EL2
+ mrs_s x0, SYS_TCR_EL12
+ msr tcr_el1, x0
+ mrs_s x0, SYS_TTBR0_EL12
+ msr ttbr0_el1, x0
+ mrs_s x0, SYS_TTBR1_EL12
+ msr ttbr1_el1, x0
+ mrs_s x0, SYS_MAIR_EL12
+ msr mair_el1, x0
+ isb
+
+ // Invalidate TLBs before enabling the MMU
+ tlbi vmalle1
+ dsb nsh
+
+ // Enable the EL2 S1 MMU, as set up from EL1
+ mrs_s x0, SYS_SCTLR_EL12
+ set_sctlr_el1 x0
+
+ // Disable the EL1 S1 MMU for a good measure
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr_s SYS_SCTLR_EL12, x0
+
+ // Hack the exception return to stay at EL2
+ mrs x0, spsr_el1
+ and x0, x0, #~PSR_MODE_MASK
+ mov x1, #PSR_MODE_EL2h
+ orr x0, x0, x1
+ msr spsr_el1, x0
+
+ mov x0, xzr
+
+1: eret
+SYM_CODE_END(mutate_to_vhe)
+
.macro invalid_vector label
SYM_CODE_START_LOCAL(\label)
b \label
@@ -118,3 +203,27 @@ SYM_FUNC_START(__hyp_reset_vectors)
hvc #0
ret
SYM_FUNC_END(__hyp_reset_vectors)
+
+/*
+ * Entry point to switch to VHE if deemed capable
+ */
+SYM_FUNC_START(switch_to_vhe)
+#ifdef CONFIG_ARM64_VHE
+ // Need to have booted at EL2
+ adr_l x1, __boot_cpu_mode
+ ldr w0, [x1]
+ cmp w0, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+
+ // and still be at EL1
+ mrs x0, CurrentEL
+ cmp x0, #CurrentEL_EL1
+ b.ne 1f
+
+ // Turn the world upside down
+ mov x0, #HVC_VHE_RESTART
+ hvc #0
+1:
+#endif
+ ret
+SYM_FUNC_END(switch_to_vhe)
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
new file mode 100644
index 000000000000..dffb16682330
--- /dev/null
+++ b/arch/arm64/kernel/idreg-override.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Early cpufeature override framework
+ *
+ * Copyright (C) 2020 Google LLC
+ * Author: Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/setup.h>
+
+#define FTR_DESC_NAME_LEN 20
+#define FTR_DESC_FIELD_LEN 10
+#define FTR_ALIAS_NAME_LEN 30
+#define FTR_ALIAS_OPTION_LEN 80
+
+struct ftr_set_desc {
+ char name[FTR_DESC_NAME_LEN];
+ struct arm64_ftr_override *override;
+ struct {
+ char name[FTR_DESC_FIELD_LEN];
+ u8 shift;
+ } fields[];
+};
+
+static const struct ftr_set_desc mmfr1 __initconst = {
+ .name = "id_aa64mmfr1",
+ .override = &id_aa64mmfr1_override,
+ .fields = {
+ { "vh", ID_AA64MMFR1_VHE_SHIFT },
+ {}
+ },
+};
+
+static const struct ftr_set_desc pfr1 __initconst = {
+ .name = "id_aa64pfr1",
+ .override = &id_aa64pfr1_override,
+ .fields = {
+ { "bt", ID_AA64PFR1_BT_SHIFT },
+ {}
+ },
+};
+
+static const struct ftr_set_desc isar1 __initconst = {
+ .name = "id_aa64isar1",
+ .override = &id_aa64isar1_override,
+ .fields = {
+ { "gpi", ID_AA64ISAR1_GPI_SHIFT },
+ { "gpa", ID_AA64ISAR1_GPA_SHIFT },
+ { "api", ID_AA64ISAR1_API_SHIFT },
+ { "apa", ID_AA64ISAR1_APA_SHIFT },
+ {}
+ },
+};
+
+extern struct arm64_ftr_override kaslr_feature_override;
+
+static const struct ftr_set_desc kaslr __initconst = {
+ .name = "kaslr",
+#ifdef CONFIG_RANDOMIZE_BASE
+ .override = &kaslr_feature_override,
+#endif
+ .fields = {
+ { "disabled", 0 },
+ {}
+ },
+};
+
+static const struct ftr_set_desc * const regs[] __initconst = {
+ &mmfr1,
+ &pfr1,
+ &isar1,
+ &kaslr,
+};
+
+static const struct {
+ char alias[FTR_ALIAS_NAME_LEN];
+ char feature[FTR_ALIAS_OPTION_LEN];
+} aliases[] __initconst = {
+ { "kvm-arm.mode=nvhe", "id_aa64mmfr1.vh=0" },
+ { "kvm-arm.mode=protected", "id_aa64mmfr1.vh=0" },
+ { "arm64.nobti", "id_aa64pfr1.bt=0" },
+ { "arm64.nopauth",
+ "id_aa64isar1.gpi=0 id_aa64isar1.gpa=0 "
+ "id_aa64isar1.api=0 id_aa64isar1.apa=0" },
+ { "nokaslr", "kaslr.disabled=1" },
+};
+
+static int __init find_field(const char *cmdline,
+ const struct ftr_set_desc *reg, int f, u64 *v)
+{
+ char opt[FTR_DESC_NAME_LEN + FTR_DESC_FIELD_LEN + 2];
+ int len;
+
+ len = snprintf(opt, ARRAY_SIZE(opt), "%s.%s=",
+ reg->name, reg->fields[f].name);
+
+ if (!parameqn(cmdline, opt, len))
+ return -1;
+
+ return kstrtou64(cmdline + len, 0, v);
+}
+
+static void __init match_options(const char *cmdline)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ int f;
+
+ if (!regs[i]->override)
+ continue;
+
+ for (f = 0; strlen(regs[i]->fields[f].name); f++) {
+ u64 shift = regs[i]->fields[f].shift;
+ u64 mask = 0xfUL << shift;
+ u64 v;
+
+ if (find_field(cmdline, regs[i], f, &v))
+ continue;
+
+ regs[i]->override->val &= ~mask;
+ regs[i]->override->val |= (v << shift) & mask;
+ regs[i]->override->mask |= mask;
+
+ return;
+ }
+ }
+}
+
+static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
+{
+ do {
+ char buf[256];
+ size_t len;
+ int i;
+
+ cmdline = skip_spaces(cmdline);
+
+ for (len = 0; cmdline[len] && !isspace(cmdline[len]); len++);
+ if (!len)
+ return;
+
+ len = min(len, ARRAY_SIZE(buf) - 1);
+ strncpy(buf, cmdline, len);
+ buf[len] = 0;
+
+ if (strcmp(buf, "--") == 0)
+ return;
+
+ cmdline += len;
+
+ match_options(buf);
+
+ for (i = 0; parse_aliases && i < ARRAY_SIZE(aliases); i++)
+ if (parameq(buf, aliases[i].alias))
+ __parse_cmdline(aliases[i].feature, false);
+ } while (1);
+}
+
+static __init void parse_cmdline(void)
+{
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+ const u8 *prop;
+ void *fdt;
+ int node;
+
+ fdt = get_early_fdt_ptr();
+ if (!fdt)
+ goto out;
+
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ goto out;
+
+ prop = fdt_getprop(fdt, node, "bootargs", NULL);
+ if (!prop)
+ goto out;
+
+ __parse_cmdline(prop, true);
+
+ if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
+ return;
+ }
+
+out:
+ __parse_cmdline(CONFIG_CMDLINE, true);
+}
+
+/* Keep checkers quiet */
+void init_feature_override(void);
+
+asmlinkage void __init init_feature_override(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i]->override) {
+ regs[i]->override->val = 0;
+ regs[i]->override->mask = 0;
+ }
+ }
+
+ parse_cmdline();
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ if (regs[i]->override)
+ __flush_dcache_area(regs[i]->override,
+ sizeof(*regs[i]->override));
+ }
+}
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 1c74c45b9494..27f8939deb1b 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -19,6 +19,7 @@
#include <asm/memory.h>
#include <asm/mmu.h>
#include <asm/sections.h>
+#include <asm/setup.h>
enum kaslr_status {
KASLR_ENABLED,
@@ -50,39 +51,7 @@ static __init u64 get_kaslr_seed(void *fdt)
return ret;
}
-static __init bool cmdline_contains_nokaslr(const u8 *cmdline)
-{
- const u8 *str;
-
- str = strstr(cmdline, "nokaslr");
- return str == cmdline || (str > cmdline && *(str - 1) == ' ');
-}
-
-static __init bool is_kaslr_disabled_cmdline(void *fdt)
-{
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
- int node;
- const u8 *prop;
-
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- goto out;
-
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
- if (!prop)
- goto out;
-
- if (cmdline_contains_nokaslr(prop))
- return true;
-
- if (IS_ENABLED(CONFIG_CMDLINE_EXTEND))
- goto out;
-
- return false;
- }
-out:
- return cmdline_contains_nokaslr(CONFIG_CMDLINE);
-}
+struct arm64_ftr_override kaslr_feature_override __initdata;
/*
* This routine will be executed with the kernel mapped at its default virtual
@@ -92,12 +61,11 @@ out:
* containing function pointers) to be reinitialized, and zero-initialized
* .bss variables will be reset to 0.
*/
-u64 __init kaslr_early_init(u64 dt_phys)
+u64 __init kaslr_early_init(void)
{
void *fdt;
u64 seed, offset, mask, module_range;
unsigned long raw;
- int size;
/*
* Set a reasonable default for module_alloc_base in case
@@ -111,8 +79,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* and proceed with KASLR disabled. We will make another
* attempt at mapping the FDT in setup_machine()
*/
- early_fixmap_init();
- fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
+ fdt = get_early_fdt_ptr();
if (!fdt) {
kaslr_status = KASLR_DISABLED_FDT_REMAP;
return 0;
@@ -127,7 +94,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case.
*/
- if (is_kaslr_disabled_cmdline(fdt)) {
+ if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) {
kaslr_status = KASLR_DISABLED_CMDLINE;
return 0;
}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 2e224435c024..e53493d8b208 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -131,7 +131,7 @@ u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
}
#endif
-#define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
+#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
static int cmp_rela(const void *a, const void *b)
{
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 38bb07eff872..d1fec4ab4bcf 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -23,8 +23,6 @@
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
-#include <linux/nmi.h>
-#include <linux/cpufreq.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -812,7 +810,7 @@ static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
{
int idx;
- for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
+ for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
@@ -1250,21 +1248,10 @@ static struct platform_driver armv8_pmu_driver = {
static int __init armv8_pmu_driver_init(void)
{
- int ret;
-
if (acpi_disabled)
- ret = platform_driver_register(&armv8_pmu_driver);
+ return platform_driver_register(&armv8_pmu_driver);
else
- ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
-
- /*
- * Try to re-initialize lockup detector after PMU init in
- * case PMU events are triggered via NMIs.
- */
- if (ret == 0 && arm_pmu_irq_is_nmi())
- lockup_detector_init();
-
- return ret;
+ return arm_pmu_acpi_probe(armv8_pmuv3_init);
}
device_initcall(armv8_pmu_driver_init)
@@ -1322,27 +1309,3 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
}
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
-/*
- * Safe maximum CPU frequency in case a particular platform doesn't implement
- * cpufreq driver. Although, architecture doesn't put any restrictions on
- * maximum frequency but 5 GHz seems to be safe maximum given the available
- * Arm CPUs in the market which are clocked much less than 5 GHz. On the other
- * hand, we can't make it much higher as it would lead to a large hard-lockup
- * detection timeout on parts which are running slower (eg. 1GHz on
- * Developerbox) and doesn't possess a cpufreq driver.
- */
-#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz
-u64 hw_nmi_get_sample_period(int watchdog_thresh)
-{
- unsigned int cpu = smp_processor_id();
- unsigned long max_cpu_freq;
-
- max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
- if (!max_cpu_freq)
- max_cpu_freq = SAFE_MAX_CPU_FREQ;
-
- return (u64)max_cpu_freq * watchdog_thresh;
-}
-#endif
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S
index 890ca72c5a51..288a84e253cc 100644
--- a/arch/arm64/kernel/probes/kprobes_trampoline.S
+++ b/arch/arm64/kernel/probes/kprobes_trampoline.S
@@ -25,7 +25,7 @@
stp x24, x25, [sp, #S_X24]
stp x26, x27, [sp, #S_X26]
stp x28, x29, [sp, #S_X28]
- add x0, sp, #S_FRAME_SIZE
+ add x0, sp, #PT_REGS_SIZE
stp lr, x0, [sp, #S_LR]
/*
* Construct a useful saved PSTATE
@@ -62,7 +62,7 @@
.endm
SYM_CODE_START(kretprobe_trampoline)
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
save_all_base_regs
@@ -76,7 +76,7 @@ SYM_CODE_START(kretprobe_trampoline)
restore_all_base_regs
- add sp, sp, #S_FRAME_SIZE
+ add sp, sp, #PT_REGS_SIZE
ret
SYM_CODE_END(kretprobe_trampoline)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6616486a58fe..4cc1ccc8d6ab 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -304,7 +304,7 @@ void __show_regs(struct pt_regs *regs)
}
}
-void show_regs(struct pt_regs * regs)
+void show_regs(struct pt_regs *regs)
{
__show_regs(regs);
dump_backtrace(regs, NULL, KERN_DEFAULT);
@@ -587,7 +587,7 @@ unsigned long get_wchan(struct task_struct *p)
ret = frame.pc;
goto out;
}
- } while (count ++ < 16);
+ } while (count++ < 16);
out:
put_task_stack(p);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index c18aacde8bb0..61845c0821d9 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -168,6 +168,21 @@ static void __init smp_build_mpidr_hash(void)
pr_warn("Large number of MPIDR hash buckets detected\n");
}
+static void *early_fdt_ptr __initdata;
+
+void __init *get_early_fdt_ptr(void)
+{
+ return early_fdt_ptr;
+}
+
+asmlinkage void __init early_fdt_map(u64 dt_phys)
+{
+ int fdt_size;
+
+ early_fixmap_init();
+ early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
+}
+
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
int size;
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index f71d6ce4673f..6237486ff6bb 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -914,13 +914,6 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_flags)
{
- /*
- * The assembly code enters us with IRQs off, but it hasn't
- * informed the tracing code of that for efficiency reasons.
- * Update the trace code with the current status.
- */
- trace_hardirqs_off();
-
do {
if (thread_flags & _TIF_NEED_RESCHED) {
/* Unmask Debug and SError for the next task */
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 6bdef7362c0e..5bfd9b87f85d 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -100,6 +100,7 @@ SYM_FUNC_END(__cpu_suspend_enter)
.pushsection ".idmap.text", "awx"
SYM_CODE_START(cpu_resume)
bl init_kernel_el
+ bl switch_to_vhe
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 6bc3a3698c3d..ad00f99ee9b0 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
- if (IS_ENABLED(CONFIG_KVM))
+ if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
kvm_compute_layout();
}
@@ -807,7 +807,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index f61e9d8cc55a..c2877c332f2d 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -9,6 +9,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
+#include <asm/exception.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
@@ -165,15 +166,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = current_thread_info()->flags;
- if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
- /*
- * We're off to userspace, where interrupts are
- * always enabled after we restore the flags from
- * the SPSR.
- */
- trace_hardirqs_on();
+ if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
- }
local_daif_restore(DAIF_PROCCTX);
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 08156be75569..a05d34f0e82a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -42,11 +42,10 @@
#include <asm/smp.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
-#include <asm/exception.h>
#include <asm/system_misc.h>
#include <asm/sysreg.h>
-static const char *handler[]= {
+static const char *handler[] = {
"Synchronous Abort",
"IRQ",
"FIQ",
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index a8f8e409e2bf..cd9c3fa25902 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -24,8 +24,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
# preparation in build-time C")).
ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
- -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id=sha1 -n \
- $(btildflags-y) -T
+ -Bsymbolic --build-id=sha1 -n $(btildflags-y) -T
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index d808ad31e01f..61dbb4c838ef 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -40,9 +40,6 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
- .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
- .eh_frame : { KEEP (*(.eh_frame)) } :text
-
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
@@ -54,6 +51,7 @@ SECTIONS
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
+ *(.eh_frame .eh_frame_hdr)
}
}
@@ -66,7 +64,6 @@ PHDRS
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
- eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 043756db8f6e..3964acf5451e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -49,14 +49,6 @@ if KVM
source "virt/kvm/Kconfig"
-config KVM_ARM_PMU
- bool "Virtual Performance Monitoring Unit (PMU) support"
- depends on HW_PERF_EVENTS
- default y
- help
- Adds support for a virtual Performance Monitoring Unit (PMU) in
- virtual machines.
-
endif # KVM
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 60fd181df624..13b017284bf9 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/vgic-debug.o
-kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
+kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 32ba6fbc3814..74e0699661e9 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm))
goto no_vgic;
- if (!vgic_initialized(vcpu->kvm))
- return -ENODEV;
-
+ /*
+ * At this stage, we have the guarantee that the vgic is both
+ * available and initialized.
+ */
if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n");
return -EINVAL;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 6e637d2b4cfb..597565a65ca2 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -65,10 +65,6 @@ static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
-extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-extern u32 kvm_nvhe_sym(kvm_host_psci_version);
-extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
-
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the
* first time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
- ret = kvm_vgic_map_resources(kvm);
- if (ret)
- return ret;
- }
+ ret = kvm_vgic_map_resources(kvm);
+ if (ret)
+ return ret;
} else {
/*
* Tell the rest of the code that there are userspace irqchip
@@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
.notifier_call = hyp_init_cpu_pm_notifier,
};
-static void __init hyp_cpu_pm_init(void)
+static void hyp_cpu_pm_init(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
-static void __init hyp_cpu_pm_exit(void)
+static void hyp_cpu_pm_exit(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
@@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void)
* allow any other CPUs from the `possible` set to boot.
*/
for_each_online_cpu(cpu)
- kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
+ hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
}
+#define init_psci_0_1_impl_state(config, what) \
+ config.psci_0_1_ ## what ## _implemented = psci_ops.what
+
static bool init_psci_relay(void)
{
/*
@@ -1618,8 +1615,15 @@ static bool init_psci_relay(void)
return false;
}
- kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
- kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
+ kvm_host_psci_config.version = psci_ops.get_version();
+
+ if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
+ kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
+ init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
+ }
return true;
}
@@ -1962,6 +1966,9 @@ static int __init early_kvm_mode_cfg(char *arg)
return 0;
}
+ if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode()))
+ return 0;
+
return -EINVAL;
}
early_param("kvm-arm.mode", early_kvm_mode_cfg);
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index b1f60923a8fe..61716359035d 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
}
}
+/*
+ * Skip an instruction while host sysregs are live.
+ * Assumes host is always 64-bit.
+ */
+static inline void kvm_skip_host_instr(void)
+{
+ write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 31b060a44045..222cfc3e7190 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -189,7 +189,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
2: msr SPsel, #1 // We want to use SP_EL{1,2}
/* Initialize EL2 CPU state to sane values. */
- init_el2_state nvhe // Clobbers x0..x2
+ init_el2_state // Clobbers x0..x2
/* Enable MMU, set vectors and stack. */
mov x0, x28
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index bde658d51404..a906f9e2ff34 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
__kvm_hyp_host_forward_smc(host_ctxt);
}
-static void skip_host_instruction(void)
-{
- write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
-}
-
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
bool handled;
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
if (!handled)
default_host_smc_handler(host_ctxt);
- /*
- * Unlike HVC, the return address of an SMC is the instruction's PC.
- * Move the return address past the instruction.
- */
- skip_host_instruction();
+ /* SMC was trapped, move ELR past the current PC. */
+ kvm_skip_host_instr();
}
void handle_trap(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index cbab0c6246e2..2997aa156d8e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -14,14 +14,14 @@
* Other CPUs should not be allowed to boot because their features were
* not checked against the finalized system capabilities.
*/
-u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
u64 cpu_logical_map(unsigned int cpu)
{
- if (cpu >= ARRAY_SIZE(__cpu_logical_map))
+ if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
hyp_panic();
- return __cpu_logical_map[cpu];
+ return hyp_cpu_logical_map[cpu];
}
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 08dc9de69314..e3947846ffcb 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -7,11 +7,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
-#include <linux/psci.h>
-#include <kvm/arm_psci.h>
#include <uapi/linux/psci.h>
#include <nvhe/trap_handler.h>
@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
/* Config options set by the host. */
-__ro_after_init u32 kvm_host_psci_version;
-__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
-__ro_after_init s64 hyp_physvirt_offset;
+struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
+s64 __ro_after_init hyp_physvirt_offset;
#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
@@ -47,19 +43,16 @@ struct psci_boot_args {
static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
-static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
-{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
-
- return func_id;
-}
+#define is_psci_0_1(what, func_id) \
+ (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
+ (func_id) == kvm_host_psci_config.function_ids_0_1.what)
static bool is_psci_0_1_call(u64 func_id)
{
- return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate);
+ return (is_psci_0_1(cpu_suspend, func_id) ||
+ is_psci_0_1(cpu_on, func_id) ||
+ is_psci_0_1(cpu_off, func_id) ||
+ is_psci_0_1(migrate, func_id));
}
static bool is_psci_0_2_call(u64 func_id)
@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
(PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
}
-static bool is_psci_call(u64 func_id)
-{
- switch (kvm_host_psci_version) {
- case PSCI_VERSION(0, 1):
- return is_psci_0_1_call(func_id);
- default:
- return is_psci_0_2_call(func_id);
- }
-}
-
static unsigned long psci_call(unsigned long fn, unsigned long arg0,
unsigned long arg1, unsigned long arg2)
{
@@ -248,15 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
- if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate))
+ if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
return psci_forward(host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
+ if (is_psci_0_1(cpu_on, func_id))
return psci_cpu_on(func_id, host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
+ if (is_psci_0_1(cpu_suspend, func_id))
return psci_cpu_suspend(func_id, host_ctxt);
- else
- return PSCI_RET_NOT_SUPPORTED;
+
+ return PSCI_RET_NOT_SUPPORTED;
}
static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
{
- u64 func_id = get_psci_func_id(host_ctxt);
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;
- if (!is_psci_call(func_id))
- return false;
-
- switch (kvm_host_psci_version) {
+ switch (kvm_host_psci_config.version) {
case PSCI_VERSION(0, 1):
+ if (!is_psci_0_1_call(func_id))
+ return false;
ret = psci_0_1_handler(func_id, host_ctxt);
break;
case PSCI_VERSION(0, 2):
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_0_2_handler(func_id, host_ctxt);
break;
default:
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_1_0_handler(func_id, host_ctxt);
break;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 398f6df1bbe4..4ad66a532e38 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- kvm_pmu_vcpu_reset(vcpu);
-
return 0;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3313dedfa505..42ccc27fb684 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
+ /* No PMU available, PMCR_EL0 may UNDEF... */
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
pmcr = read_sysreg(pmcr_el0);
/*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
@@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
- (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index d8cc51bd60bf..70fcd6a12fe1 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
}
/*
- * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
*/
static void init_hyp_physvirt_offset(void)
{
- extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
u64 kern_va, hyp_va;
/* Compute the offset from the hyp VA and PA of a random symbol. */
- kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+ kern_va = (u64)lm_alias(__hyp_text_start);
hyp_va = __early_kern_hyp_va(kern_va);
- CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+ hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
}
/*
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 32e32d67a127..052917deb149 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm)
* Map the MMIO regions depending on the VGIC model exposed to the guest
* called on the first VCPU run.
* Also map the virtual CPU interface into the VM.
- * v2/v3 derivatives call vgic_init if not already done.
+ * v2 calls vgic_init() if not already done.
+ * v3 and derivatives return an error if the VGIC is not initialized.
* vgic_ready() returns true if this function has succeeded.
* @kvm: kvm struct pointer
*/
@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
+ if (likely(vgic_ready(kvm)))
+ return 0;
+
mutex_lock(&kvm->lock);
+ if (vgic_ready(kvm))
+ goto out;
+
if (!irqchip_in_kernel(kvm))
goto out;
@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (ret)
__kvm_vgic_destroy(kvm);
+ else
+ dist->ready = true;
out:
mutex_unlock(&kvm->lock);
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index ebf53a4e1296..11934c2af2f4 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
- if (vgic_ready(kvm))
- goto out;
-
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
kvm_err("VGIC CPU and dist frames overlap\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm)
ret = vgic_init(kvm);
if (ret) {
kvm_err("Unable to initialize VGIC dynamic data structures\n");
- goto out;
+ return ret;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
- goto out;
+ return ret;
}
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm)
KVM_VGIC_V2_CPU_SIZE, true);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
+ return ret;
}
}
- dist->ready = true;
-
-out:
- return ret;
+ return 0;
}
DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 9cdf39a94a63..52915b342351 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm)
int ret = 0;
int c;
- if (vgic_ready(kvm))
- goto out;
-
kvm_for_each_vcpu(c, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
kvm_debug("vcpu %d redistributor base not set\n", c);
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
}
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
kvm_err("Need to set vgic distributor addresses first\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (!vgic_v3_check_base(kvm)) {
kvm_err("VGIC redist and dist frames overlap\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm)
* the VGIC before we need to use it.
*/
if (!vgic_initialized(kvm)) {
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
- goto out;
+ return ret;
}
if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm);
- dist->ready = true;
-out:
- return ret;
+ return 0;
}
DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 3c40da479899..86a3877ea86f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -564,7 +564,7 @@ retry:
mmap_read_lock(mm);
} else {
/*
- * The above down_read_trylock() might have succeeded in which
+ * The above mmap_read_trylock() might have succeeded in which
* case, we'll have missed the might_sleep() from down_read().
*/
might_sleep();
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 75addb36354a..709d98fea90c 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -53,13 +53,13 @@ s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
/*
- * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
- * memory as some devices, namely the Raspberry Pi 4, have peripherals with
- * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
- * bit addressable memory area.
+ * If the corresponding config options are enabled, we create both ZONE_DMA
+ * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
+ * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
+ * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
+ * otherwise it is empty.
*/
phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
/*
@@ -84,7 +84,7 @@ static void __init reserve_crashkernel(void)
if (crash_base == 0) {
/* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
+ crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
crash_size, SZ_2M);
if (crash_base == 0) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -196,6 +196,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
unsigned int __maybe_unused acpi_zone_dma_bits;
unsigned int __maybe_unused dt_zone_dma_bits;
+ phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
#ifdef CONFIG_ZONE_DMA
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
@@ -205,8 +206,12 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
+ if (!arm64_dma_phys_limit)
+ arm64_dma_phys_limit = dma32_phys_limit;
#endif
+ if (!arm64_dma_phys_limit)
+ arm64_dma_phys_limit = PHYS_MASK + 1;
max_zone_pfns[ZONE_NORMAL] = max;
free_area_init(max_zone_pfns);
@@ -394,16 +399,9 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- if (IS_ENABLED(CONFIG_ZONE_DMA32))
- arm64_dma32_phys_limit = max_zone_phys(32);
- else
- arm64_dma32_phys_limit = PHYS_MASK + 1;
-
reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
-
- dma_contiguous_reserve(arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@@ -439,6 +437,11 @@ void __init bootmem_init(void)
zone_sizes_init(min, max);
/*
+ * Reserve the CMA area after arm64_dma_phys_limit was initialised.
+ */
+ dma_contiguous_reserve(arm64_dma_phys_limit);
+
+ /*
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
*/
@@ -455,7 +458,7 @@ void __init bootmem_init(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
+ max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
else
swiotlb_force = SWIOTLB_NO_FORCE;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index ae0c3d023824..617e704c980b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -628,7 +628,7 @@ static bool arm64_early_this_cpu_has_bti(void)
if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
return false;
- pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
+ pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
return cpuid_feature_extract_unsigned_field(pfr1,
ID_AA64PFR1_BT_SHIFT);
}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 37a54b57178a..c967bfd30d2b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,7 +46,7 @@
#endif
#ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1
+#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
#define TCR_KASAN_HW_FLAGS 0
#endif
@@ -291,17 +291,7 @@ skip_pgd:
/* We're done: fire up the MMU again */
mrs x17, sctlr_el1
orr x17, x17, #SCTLR_ELx_M
- msr sctlr_el1, x17
- isb
-
- /*
- * Invalidate the local I-cache so that any instructions fetched
- * speculatively from the PoC are discarded, since they may have
- * been dynamically patched at the PoU.
- */
- ic iallu
- dsb nsh
- isb
+ set_sctlr_el1 x17
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
@@ -464,8 +454,8 @@ SYM_FUNC_START(__cpu_setup)
#endif
msr mair_el1, x5
/*
- * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
- * both user and kernel.
+ * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
+ * adjusted if the kernel is compiled with 52bit VA support.
*/
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \