summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/assembler.h4
-rw-r--r--arch/arm64/include/asm/cpufeature.h6
-rw-r--r--arch/arm64/include/asm/kvm_asm.h6
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h30
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h50
-rw-r--r--arch/arm64/include/asm/sysreg.h35
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h10
-rw-r--r--arch/arm64/kernel/cpu_errata.c23
-rw-r--r--arch/arm64/kernel/traps.c4
-rw-r--r--arch/arm64/kvm/hyp/entry.S36
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S30
-rw-r--r--arch/arm64/kvm/hyp/switch.c14
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c78
-rw-r--r--arch/arm64/kvm/hyp/tlb.c12
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c2
-rw-r--r--arch/arm64/kvm/regmap.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c60
-rw-r--r--arch/arm64/kvm/va_layout.c7
19 files changed, 267 insertions, 167 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 570d195a184d..e3a15c751b13 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -96,7 +96,11 @@
* RAS Error Synchronization barrier
*/
.macro esb
+#ifdef CONFIG_ARM64_RAS_EXTN
hint #16
+#else
+ nop
+#endif
.endm
/*
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 3d8db50d9ae2..407e2bf23676 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -620,6 +620,12 @@ static inline bool system_has_prio_mask_debugging(void)
system_uses_irq_prio_masking();
}
+#define ARM64_BP_HARDEN_UNKNOWN -1
+#define ARM64_BP_HARDEN_WA_NEEDED 0
+#define ARM64_BP_HARDEN_NOT_REQUIRED 1
+
+int get_spectre_v2_workaround_state(void);
+
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 2ca437ef59fa..44a243754c1b 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -30,6 +30,12 @@
{ARM_EXCEPTION_TRAP, "TRAP" }, \
{ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
+/*
+ * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
+ * that jumps over this.
+ */
+#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
+
#ifndef __ASSEMBLY__
#include <linux/mm.h>
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 034dadec7168..d69c1efc63e7 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -126,7 +126,7 @@ static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
{
if (vcpu->arch.sysregs_loaded_on_cpu)
- return read_sysreg_el1(elr);
+ return read_sysreg_el1(SYS_ELR);
else
return *__vcpu_elr_el1(vcpu);
}
@@ -134,7 +134,7 @@ static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
{
if (vcpu->arch.sysregs_loaded_on_cpu)
- write_sysreg_el1(v, elr);
+ write_sysreg_el1(v, SYS_ELR);
else
*__vcpu_elr_el1(vcpu) = v;
}
@@ -186,7 +186,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
return vcpu_read_spsr32(vcpu);
if (vcpu->arch.sysregs_loaded_on_cpu)
- return read_sysreg_el1(spsr);
+ return read_sysreg_el1(SYS_SPSR);
else
return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
}
@@ -199,7 +199,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
}
if (vcpu->arch.sysregs_loaded_on_cpu)
- write_sysreg_el1(v, spsr);
+ write_sysreg_el1(v, SYS_SPSR);
else
vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
}
@@ -353,6 +353,20 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
+static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
+}
+
+static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
+ bool flag)
+{
+ if (flag)
+ vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
+ else
+ vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
+}
+
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
@@ -451,13 +465,13 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
*/
static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
{
- *vcpu_pc(vcpu) = read_sysreg_el2(elr);
- vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
+ vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
- write_sysreg_el2(*vcpu_pc(vcpu), elr);
+ write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR);
+ write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
}
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 9f19c354b165..f656169db8c3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -19,12 +19,12 @@
#include <asm/arch_gicv3.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <asm/cputype.h>
#include <asm/daifflags.h>
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
-#include <asm/smp_plat.h>
#include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -484,11 +484,10 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
-static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
- int cpu)
+static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
- cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
+ cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
}
void __kvm_enable_ssbs(void);
@@ -621,9 +620,21 @@ static inline void kvm_arm_vhe_guest_exit(void)
isb();
}
-static inline bool kvm_arm_harden_branch_predictor(void)
+#define KVM_BP_HARDEN_UNKNOWN -1
+#define KVM_BP_HARDEN_WA_NEEDED 0
+#define KVM_BP_HARDEN_NOT_REQUIRED 1
+
+static inline int kvm_arm_harden_branch_predictor(void)
{
- return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ switch (get_spectre_v2_workaround_state()) {
+ case ARM64_BP_HARDEN_WA_NEEDED:
+ return KVM_BP_HARDEN_WA_NEEDED;
+ case ARM64_BP_HARDEN_NOT_REQUIRED:
+ return KVM_BP_HARDEN_NOT_REQUIRED;
+ case ARM64_BP_HARDEN_UNKNOWN:
+ default:
+ return KVM_BP_HARDEN_UNKNOWN;
+ }
}
#define KVM_SSBD_UNKNOWN -1
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 286f7e7e1be4..86825aa20852 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -18,7 +18,7 @@
#define read_sysreg_elx(r,nvh,vh) \
({ \
u64 reg; \
- asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
+ asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \
__mrs_s("%0", r##vh), \
ARM64_HAS_VIRT_HOST_EXTN) \
: "=r" (reg)); \
@@ -28,7 +28,7 @@
#define write_sysreg_elx(v,r,nvh,vh) \
do { \
u64 __val = (u64)(v); \
- asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
+ asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \
__msr_s(r##vh, "%x0"), \
ARM64_HAS_VIRT_HOST_EXTN) \
: : "rZ" (__val)); \
@@ -37,55 +37,15 @@
/*
* Unified accessors for registers that have a different encoding
* between VHE and non-VHE. They must be specified without their "ELx"
- * encoding.
+ * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
*/
-#define read_sysreg_el2(r) \
- ({ \
- u64 reg; \
- asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
- "mrs %0, " __stringify(r##_EL1),\
- ARM64_HAS_VIRT_HOST_EXTN) \
- : "=r" (reg)); \
- reg; \
- })
-
-#define write_sysreg_el2(v,r) \
- do { \
- u64 __val = (u64)(v); \
- asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
- "msr " __stringify(r##_EL1) ", %x0",\
- ARM64_HAS_VIRT_HOST_EXTN) \
- : : "rZ" (__val)); \
- } while (0)
#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
-
-/* The VHE specific system registers and their encoding */
-#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0)
-#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2)
-#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0)
-#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1)
-#define tcr_EL12 sys_reg(3, 5, 2, 0, 2)
-#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0)
-#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1)
-#define esr_EL12 sys_reg(3, 5, 5, 2, 0)
-#define far_EL12 sys_reg(3, 5, 6, 0, 0)
-#define mair_EL12 sys_reg(3, 5, 10, 2, 0)
-#define amair_EL12 sys_reg(3, 5, 10, 3, 0)
-#define vbar_EL12 sys_reg(3, 5, 12, 0, 0)
-#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1)
-#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0)
-#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0)
-#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1)
-#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2)
-#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0)
-#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1)
-#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2)
-#define spsr_EL12 sys_reg(3, 5, 4, 0, 0)
-#define elr_EL12 sys_reg(3, 5, 4, 0, 1)
+#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
+#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
/**
* hyp_alternate_select - Generates patchable code sequences that are
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index d0bd4ffcf2c4..a7522fca1105 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -191,6 +191,9 @@
#define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0)
#define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1)
+#define SYS_SPSR_EL1 sys_reg(3, 0, 4, 0, 0)
+#define SYS_ELR_EL1 sys_reg(3, 0, 4, 0, 1)
+
#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
@@ -382,6 +385,9 @@
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1)
+#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2)
+
#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
@@ -392,14 +398,17 @@
#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
-#define SYS_PMCCFILTR_EL0 sys_reg (3, 3, 14, 15, 7)
+#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
-
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
+#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
+#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
+#define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0)
#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
@@ -444,7 +453,29 @@
#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
/* VHE encodings for architectural EL0/1 system registers */
+#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
+#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
+#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
+#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
+#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
+#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0)
+#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1)
+#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
+#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
+#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
+#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
+#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
+#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
+#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
+#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
+#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
+#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
+#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1)
+#define SYS_CNTP_CVAL_EL02 sys_reg(3, 5, 14, 2, 2)
+#define SYS_CNTV_TVAL_EL02 sys_reg(3, 5, 14, 3, 0)
+#define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1)
+#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2)
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_DSSBS (_BITUL(44))
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index d819a3e8b552..9a507716ae2f 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -229,6 +229,16 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
/* SVE registers */
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ca11ff7bf55e..1e43ba5c79b7 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -554,6 +554,17 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
static bool __hardenbp_enab = true;
static bool __spectrev2_safe = true;
+int get_spectre_v2_workaround_state(void)
+{
+ if (__spectrev2_safe)
+ return ARM64_BP_HARDEN_NOT_REQUIRED;
+
+ if (!__hardenbp_enab)
+ return ARM64_BP_HARDEN_UNKNOWN;
+
+ return ARM64_BP_HARDEN_WA_NEEDED;
+}
+
/*
* List of CPUs that do not need any Spectre-v2 mitigation at all.
*/
@@ -854,13 +865,15 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
char *buf)
{
- if (__spectrev2_safe)
+ switch (get_spectre_v2_workaround_state()) {
+ case ARM64_BP_HARDEN_NOT_REQUIRED:
return sprintf(buf, "Not affected\n");
-
- if (__hardenbp_enab)
+ case ARM64_BP_HARDEN_WA_NEEDED:
return sprintf(buf, "Mitigation: Branch predictor hardening\n");
-
- return sprintf(buf, "Vulnerable\n");
+ case ARM64_BP_HARDEN_UNKNOWN:
+ default:
+ return sprintf(buf, "Vulnerable\n");
+ }
}
ssize_t cpu_show_spec_store_bypass(struct device *dev,
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 678af745d881..8c03456dade6 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -871,6 +871,10 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
/*
* The CPU can't make progress. The exception may have
* been imprecise.
+ *
+ * Neoverse-N1 #1349291 means a non-KVM SError reported as
+ * Unrecoverable should be treated as Uncontainable. We
+ * call arm64_serror_panic() in both cases.
*/
return true;
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index bd34016354ba..e5cc8d66bf53 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -6,6 +6,7 @@
#include <linux/linkage.h>
+#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
@@ -52,6 +53,20 @@ ENTRY(__guest_enter)
// Store the host regs
save_callee_saved_regs x1
+ // Now the host state is stored if we have a pending RAS SError it must
+ // affect the host. If any asynchronous exception is pending we defer
+ // the guest entry. The DSB isn't necessary before v8.2 as any SError
+ // would be fatal.
+alternative_if ARM64_HAS_RAS_EXTN
+ dsb nshst
+ isb
+alternative_else_nop_endif
+ mrs x1, isr_el1
+ cbz x1, 1f
+ mov x0, #ARM_EXCEPTION_IRQ
+ ret
+
+1:
add x18, x0, #VCPU_CONTEXT
// Macro ptrauth_switch_to_guest format:
@@ -127,8 +142,8 @@ ENTRY(__guest_exit)
alternative_if ARM64_HAS_RAS_EXTN
// If we have the RAS extensions we can consume a pending error
- // without an unmask-SError and isb.
- esb
+ // without an unmask-SError and isb. The ESB-instruction consumed any
+ // pending guest error when we took the exception from the guest.
mrs_s x2, SYS_DISR_EL1
str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
cbz x2, 1f
@@ -136,8 +151,16 @@ alternative_if ARM64_HAS_RAS_EXTN
orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1: ret
alternative_else
- // If we have a pending asynchronous abort, now is the
- // time to find out. From your VAXorcist book, page 666:
+ dsb sy // Synchronize against in-flight ld/st
+ isb // Prevent an early read of side-effect free ISR
+ mrs x2, isr_el1
+ tbnz x2, #8, 2f // ISR_EL1.A
+ ret
+ nop
+2:
+alternative_endif
+ // We know we have a pending asynchronous abort, now is the
+ // time to flush it out. From your VAXorcist book, page 666:
// "Threaten me not, oh Evil one! For I speak with
// the power of DEC, and I command thee to show thyself!"
mrs x2, elr_el2
@@ -145,10 +168,7 @@ alternative_else
mrs x4, spsr_el2
mov x5, x0
- dsb sy // Synchronize against in-flight ld/st
- nop
msr daifclr, #4 // Unmask aborts
-alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
@@ -161,6 +181,8 @@ abort_guest_exit_start:
.global abort_guest_exit_end
abort_guest_exit_end:
+ msr daifset, #4 // Mask aborts
+
// If the exception took place, restore the EL1 exception
// context so that we can report some information.
// Merge the exception code with the SError pending bit.
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index b8e045615961..ffa68d5713f1 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -216,17 +216,34 @@ ENDPROC(\label)
.align 11
+.macro check_preamble_length start, end
+/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
+.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
+ .error "KVM vector preamble length mismatch"
+.endif
+.endm
+
.macro valid_vect target
.align 7
+661:
+ esb
stp x0, x1, [sp, #-16]!
+662:
b \target
+
+check_preamble_length 661b, 662b
.endm
.macro invalid_vect target
.align 7
+661:
b \target
+ nop
+662:
ldp x0, x1, [sp], #16
b \target
+
+check_preamble_length 661b, 662b
.endm
ENTRY(__kvm_hyp_vector)
@@ -254,13 +271,14 @@ ENDPROC(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
.align 7
-1: .rept 27
+1: esb
+ .rept 26
nop
.endr
/*
* The default sequence is to directly branch to the KVM vectors,
* using the computed offset. This applies for VHE as well as
- * !ARM64_HARDEN_EL2_VECTORS.
+ * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
*
* For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
* with:
@@ -271,12 +289,13 @@ ENDPROC(__kvm_hyp_vector)
* movk x0, #((addr >> 32) & 0xffff), lsl #32
* br x0
*
- * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
+ * Where:
+ * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
* See kvm_patch_vector_branch for details.
*/
alternative_cb kvm_patch_vector_branch
- b __kvm_hyp_vector + (1b - 0b)
- nop
+ stp x0, x1, [sp, #-16]!
+ b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
nop
nop
nop
@@ -301,6 +320,7 @@ ENTRY(__bp_harden_hyp_vecs_end)
.popsection
ENTRY(__smccc_workaround_1_smc_start)
+ esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 58f281b6ca4a..adaf266d8de8 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -284,7 +284,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
return true;
- far = read_sysreg_el2(far);
+ far = read_sysreg_el2(SYS_FAR);
/*
* The HPFAR can be invalid if the stage 2 fault did not
@@ -401,7 +401,7 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
- vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
+ vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
/*
* We're using the raw exception code in order to only process
@@ -697,8 +697,8 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
__hyp_do_panic(str_va,
- spsr, elr,
- read_sysreg(esr_el2), read_sysreg_el2(far),
+ spsr, elr,
+ read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
read_sysreg(hpfar_el2), par, vcpu);
}
@@ -713,15 +713,15 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
panic(__hyp_panic_string,
spsr, elr,
- read_sysreg_el2(esr), read_sysreg_el2(far),
+ read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
read_sysreg(hpfar_el2), par, vcpu);
}
NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{
- u64 spsr = read_sysreg_el2(spsr);
- u64 elr = read_sysreg_el2(elr);
+ u64 spsr = read_sysreg_el2(SYS_SPSR);
+ u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
if (!has_vhe())
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index c283f7cbc702..7ddbc849b580 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -43,33 +43,33 @@ static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
- ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
+ ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
- ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr);
- ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0);
- ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1);
- ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr);
- ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr);
- ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0);
- ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1);
- ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far);
- ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair);
- ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar);
- ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr);
- ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
- ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
+ ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
+ ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
+ ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
+ ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(SYS_TCR);
+ ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(SYS_ESR);
+ ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(SYS_AFSR0);
+ ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(SYS_AFSR1);
+ ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(SYS_FAR);
+ ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(SYS_MAIR);
+ ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(SYS_VBAR);
+ ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(SYS_CONTEXTIDR);
+ ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(SYS_AMAIR);
+ ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(SYS_CNTKCTL);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
- ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
- ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
+ ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR);
+ ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
}
static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
{
- ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
- ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
+ ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
@@ -109,35 +109,35 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
{
- write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
- write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
+ write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
}
static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
- write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr);
- write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
- write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr);
- write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0);
- write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1);
- write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr);
- write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr);
- write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0);
- write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1);
- write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far);
- write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair);
- write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar);
- write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
- write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
- write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
+ write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
+ write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
+ write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
+ write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
+ write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
+ write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
+ write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR);
+ write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0);
+ write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1);
+ write_sysreg_el1(ctxt->sys_regs[FAR_EL1], SYS_FAR);
+ write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], SYS_MAIR);
+ write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], SYS_VBAR);
+ write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR);
+ write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], SYS_AMAIR);
+ write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], SYS_CNTKCTL);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
- write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
- write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
+ write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR);
+ write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
}
static void __hyp_text
@@ -160,8 +160,8 @@ __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
pstate = PSR_MODE_EL2h | PSR_IL_BIT;
- write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
- write_sysreg_el2(pstate, spsr);
+ write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR);
+ write_sysreg_el2(pstate, SYS_SPSR);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 32078b767f63..d49a14497715 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -33,12 +33,12 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
* in the TCR_EL1 register. We also need to prevent it to
* allocate IPA->PA walks, so we enable the S1 MMU...
*/
- val = cxt->tcr = read_sysreg_el1(tcr);
+ val = cxt->tcr = read_sysreg_el1(SYS_TCR);
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
- write_sysreg_el1(val, tcr);
- val = cxt->sctlr = read_sysreg_el1(sctlr);
+ write_sysreg_el1(val, SYS_TCR);
+ val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
val |= SCTLR_ELx_M;
- write_sysreg_el1(val, sctlr);
+ write_sysreg_el1(val, SYS_SCTLR);
}
/*
@@ -85,8 +85,8 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
/* Restore the registers to what they were */
- write_sysreg_el1(cxt->tcr, tcr);
- write_sysreg_el1(cxt->sctlr, sctlr);
+ write_sysreg_el1(cxt->tcr, SYS_TCR);
+ write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
}
local_irq_restore(cxt->flags);
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index ba2aaeb84c6c..29ee1feba4eb 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -16,7 +16,7 @@
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
- return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT);
+ return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
}
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index d66613e6ad08..0d60e4f0af66 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -152,7 +152,7 @@ unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
switch (spsr_idx) {
case KVM_SPSR_SVC:
- return read_sysreg_el1(spsr);
+ return read_sysreg_el1(SYS_SPSR);
case KVM_SPSR_ABT:
return read_sysreg(spsr_abt);
case KVM_SPSR_UND:
@@ -177,7 +177,7 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
switch (spsr_idx) {
case KVM_SPSR_SVC:
- write_sysreg_el1(v, spsr);
+ write_sysreg_el1(v, SYS_SPSR);
case KVM_SPSR_ABT:
write_sysreg(v, spsr_abt);
case KVM_SPSR_UND:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index ce933f296049..f26e181d881c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -81,24 +81,24 @@ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
*/
switch (reg) {
case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
- case SCTLR_EL1: return read_sysreg_s(sctlr_EL12);
+ case SCTLR_EL1: return read_sysreg_s(SYS_SCTLR_EL12);
case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
- case CPACR_EL1: return read_sysreg_s(cpacr_EL12);
- case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12);
- case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12);
- case TCR_EL1: return read_sysreg_s(tcr_EL12);
- case ESR_EL1: return read_sysreg_s(esr_EL12);
- case AFSR0_EL1: return read_sysreg_s(afsr0_EL12);
- case AFSR1_EL1: return read_sysreg_s(afsr1_EL12);
- case FAR_EL1: return read_sysreg_s(far_EL12);
- case MAIR_EL1: return read_sysreg_s(mair_EL12);
- case VBAR_EL1: return read_sysreg_s(vbar_EL12);
- case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12);
+ case CPACR_EL1: return read_sysreg_s(SYS_CPACR_EL12);
+ case TTBR0_EL1: return read_sysreg_s(SYS_TTBR0_EL12);
+ case TTBR1_EL1: return read_sysreg_s(SYS_TTBR1_EL12);
+ case TCR_EL1: return read_sysreg_s(SYS_TCR_EL12);
+ case ESR_EL1: return read_sysreg_s(SYS_ESR_EL12);
+ case AFSR0_EL1: return read_sysreg_s(SYS_AFSR0_EL12);
+ case AFSR1_EL1: return read_sysreg_s(SYS_AFSR1_EL12);
+ case FAR_EL1: return read_sysreg_s(SYS_FAR_EL12);
+ case MAIR_EL1: return read_sysreg_s(SYS_MAIR_EL12);
+ case VBAR_EL1: return read_sysreg_s(SYS_VBAR_EL12);
+ case CONTEXTIDR_EL1: return read_sysreg_s(SYS_CONTEXTIDR_EL12);
case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
- case AMAIR_EL1: return read_sysreg_s(amair_EL12);
- case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12);
+ case AMAIR_EL1: return read_sysreg_s(SYS_AMAIR_EL12);
+ case CNTKCTL_EL1: return read_sysreg_s(SYS_CNTKCTL_EL12);
case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
@@ -124,24 +124,24 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
*/
switch (reg) {
case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
- case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return;
+ case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); return;
case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
- case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return;
- case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return;
- case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return;
- case TCR_EL1: write_sysreg_s(val, tcr_EL12); return;
- case ESR_EL1: write_sysreg_s(val, esr_EL12); return;
- case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return;
- case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return;
- case FAR_EL1: write_sysreg_s(val, far_EL12); return;
- case MAIR_EL1: write_sysreg_s(val, mair_EL12); return;
- case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return;
- case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return;
+ case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); return;
+ case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); return;
+ case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); return;
+ case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); return;
+ case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); return;
+ case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); return;
+ case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); return;
+ case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); return;
+ case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); return;
+ case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); return;
+ case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return;
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
- case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return;
- case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return;
+ case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); return;
+ case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); return;
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
@@ -865,12 +865,12 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (r->Op2 & 0x1) {
/* accessing PMCNTENSET_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
- kvm_pmu_enable_counter(vcpu, val);
+ kvm_pmu_enable_counter_mask(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {
/* accessing PMCNTENCLR_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
- kvm_pmu_disable_counter(vcpu, val);
+ kvm_pmu_disable_counter_mask(vcpu, val);
}
} else {
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 2947ab1b0fa5..acd8084f1f2c 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -170,11 +170,10 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
addr |= ((u64)origptr & GENMASK_ULL(10, 7));
/*
- * Branch to the second instruction in the vectors in order to
- * avoid the initial store on the stack (which we already
- * perform in the hardening vectors).
+ * Branch over the preamble in order to avoid the initial store on
+ * the stack (which we already perform in the hardening vectors).
*/
- addr += AARCH64_INSN_SIZE;
+ addr += KVM_VECTOR_PREAMBLE;
/* stp x0, x1, [sp, #-16]! */
insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,