summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2023-08-31 20:18:53 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2023-08-31 20:18:53 +0300
commite0fb12c673e53d2a103b9e0abc92204de0fc325d (patch)
tree3f91fa1ed8efb9e2b80e542075fb93cdbd0c3370 /arch/arm64
parent2dde18cd1d8fac735875f2e4987f11817cc0bc2c (diff)
parent1f66f1246bfa08aaf13db897736de49cbeaf72a1 (diff)
downloadlinux-e0fb12c673e53d2a103b9e0abc92204de0fc325d.tar.xz
Merge tag 'kvmarm-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 updates for Linux 6.6 - Add support for TLB range invalidation of Stage-2 page tables, avoiding unnecessary invalidations. Systems that do not implement range invalidation still rely on a full invalidation when dealing with large ranges. - Add infrastructure for forwarding traps taken from a L2 guest to the L1 guest, with L0 acting as the dispatcher, another baby step towards the full nested support. - Simplify the way we deal with the (long deprecated) 'CPU target', resulting in a much needed cleanup. - Fix another set of PMU bugs, both on the guest and host sides, as we seem to never have any shortage of those... - Relax the alignment requirements of EL2 VA allocations for non-stack allocations, as we were otherwise wasting a lot of that precious VA space. - The usual set of non-functional cleanups, although I note the lack of spelling fixes...
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_arm.h51
-rw-r--r--arch/arm64/include/asm/kvm_asm.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h24
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h1
-rw-r--r--arch/arm64/include/asm/kvm_nested.h2
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h10
-rw-r--r--arch/arm64/include/asm/sysreg.h268
-rw-r--r--arch/arm64/include/asm/tlbflush.h124
-rw-r--r--arch/arm64/kernel/cpufeature.c7
-rw-r--r--arch/arm64/kvm/Kconfig2
-rw-r--r--arch/arm64/kvm/arm.c65
-rw-r--r--arch/arm64/kvm/emulate-nested.c1852
-rw-r--r--arch/arm64/kvm/guest.c15
-rw-r--r--arch/arm64/kvm/handle_exit.c29
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h127
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c83
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c27
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c30
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c63
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c28
-rw-r--r--arch/arm64/kvm/mmu.c102
-rw-r--r--arch/arm64/kvm/nested.c11
-rw-r--r--arch/arm64/kvm/pmu-emul.c37
-rw-r--r--arch/arm64/kvm/pmu.c18
-rw-r--r--arch/arm64/kvm/reset.c25
-rw-r--r--arch/arm64/kvm/sys_regs.c15
-rw-r--r--arch/arm64/kvm/trace_arm.h26
-rw-r--r--arch/arm64/kvm/vgic/vgic.h2
-rw-r--r--arch/arm64/tools/cpucaps1
-rw-r--r--arch/arm64/tools/sysreg129
33 files changed, 2924 insertions, 267 deletions
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 58e5eb27da68..5882b2415596 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -18,10 +18,19 @@
#define HCR_DCT (UL(1) << 57)
#define HCR_ATA_SHIFT 56
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
+#define HCR_TTLBOS (UL(1) << 55)
+#define HCR_TTLBIS (UL(1) << 54)
+#define HCR_ENSCXT (UL(1) << 53)
+#define HCR_TOCU (UL(1) << 52)
#define HCR_AMVOFFEN (UL(1) << 51)
+#define HCR_TICAB (UL(1) << 50)
#define HCR_TID4 (UL(1) << 49)
#define HCR_FIEN (UL(1) << 47)
#define HCR_FWB (UL(1) << 46)
+#define HCR_NV2 (UL(1) << 45)
+#define HCR_AT (UL(1) << 44)
+#define HCR_NV1 (UL(1) << 43)
+#define HCR_NV (UL(1) << 42)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
#define HCR_TEA (UL(1) << 37)
@@ -89,7 +98,6 @@
HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
-#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
@@ -324,6 +332,47 @@
BIT(18) | \
GENMASK(16, 15))
+/*
+ * FGT register definitions
+ *
+ * RES0 and polarity masks as of DDI0487J.a, to be updated as needed.
+ * We're not using the generated masks as they are usually ahead of
+ * the published ARM ARM, which we use as a reference.
+ *
+ * Once we get to a point where the two describe the same thing, we'll
+ * merge the definitions. One day.
+ */
+#define __HFGRTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51))
+#define __HFGRTR_EL2_MASK GENMASK(49, 0)
+#define __HFGRTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
+
+#define __HFGWTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51) | \
+ BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
+ GENMASK(26, 25) | BIT(21) | BIT(18) | \
+ GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
+#define __HFGWTR_EL2_MASK GENMASK(49, 0)
+#define __HFGWTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
+
+#define __HFGITR_EL2_RES0 GENMASK(63, 57)
+#define __HFGITR_EL2_MASK GENMASK(54, 0)
+#define __HFGITR_EL2_nMASK GENMASK(56, 55)
+
+#define __HDFGRTR_EL2_RES0 (BIT(49) | BIT(42) | GENMASK(39, 38) | \
+ GENMASK(21, 20) | BIT(8))
+#define __HDFGRTR_EL2_MASK ~__HDFGRTR_EL2_nMASK
+#define __HDFGRTR_EL2_nMASK GENMASK(62, 59)
+
+#define __HDFGWTR_EL2_RES0 (BIT(63) | GENMASK(59, 58) | BIT(51) | BIT(47) | \
+ BIT(43) | GENMASK(40, 38) | BIT(34) | BIT(30) | \
+ BIT(22) | BIT(9) | BIT(6))
+#define __HDFGWTR_EL2_MASK ~__HDFGWTR_EL2_nMASK
+#define __HDFGWTR_EL2_nMASK GENMASK(62, 60)
+
+/* Similar definitions for HCRX_EL2 */
+#define __HCRX_EL2_RES0 (GENMASK(63, 16) | GENMASK(13, 12))
+#define __HCRX_EL2_MASK (0)
+#define __HCRX_EL2_nMASK (GENMASK(15, 14) | GENMASK(4, 0))
+
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
/*
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 24e28bb2d95b..24b5e6b23417 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -70,6 +70,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
+ __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
@@ -229,6 +230,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
phys_addr_t ipa,
int level);
+extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t start, unsigned long pages);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d3dd05bbfe23..af06ccb7ee34 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -49,6 +49,7 @@
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
+#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -380,6 +381,7 @@ enum vcpu_sysreg {
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
HSTR_EL2, /* Hypervisor System Trap Register */
HACR_EL2, /* Hypervisor Auxiliary Control Register */
+ HCRX_EL2, /* Extended Hypervisor Configuration Register */
TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
TCR_EL2, /* Translation Control Register (EL2) */
@@ -400,6 +402,11 @@ enum vcpu_sysreg {
TPIDR_EL2, /* EL2 Software Thread ID Register */
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
SP_EL2, /* EL2 Stack Pointer */
+ HFGRTR_EL2,
+ HFGWTR_EL2,
+ HFGITR_EL2,
+ HDFGRTR_EL2,
+ HDFGWTR_EL2,
CNTHP_CTL_EL2,
CNTHP_CVAL_EL2,
CNTHV_CTL_EL2,
@@ -567,8 +574,7 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
- /* Target CPU and feature flags */
- int target;
+ /* feature flags */
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
@@ -669,6 +675,8 @@ struct kvm_vcpu_arch {
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
/* PTRAUTH exposed to guest */
#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
+/* KVM_ARM_VCPU_INIT completed */
+#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
/* Exception pending */
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -899,7 +907,6 @@ struct kvm_vcpu_stat {
u64 exits;
};
-void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
@@ -967,8 +974,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
#endif /* __KVM_NVHE_HYPERVISOR__ */
-void force_vm_exit(const cpumask_t *mask);
-
int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
@@ -983,6 +988,7 @@ int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
int __init kvm_sys_reg_table_init(void);
+int __init populate_nv_trap_config(void);
bool lock_all_vcpus(struct kvm *kvm);
void unlock_all_vcpus(struct kvm *kvm);
@@ -1049,8 +1055,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
}
-void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
-
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
@@ -1113,13 +1117,15 @@ int __init kvm_set_ipa_limit(void);
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+
static inline bool kvm_vm_is_protected(struct kvm *kvm)
{
return false;
}
-void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
-
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 0e1e1ab17b4d..96a80e8f6226 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -168,6 +168,7 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **haddr);
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
+int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
void __init free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm);
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 8fb67f032fd1..fa23cc9c2adc 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -11,6 +11,8 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features));
}
+extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
+
struct sys_reg_params;
struct sys_reg_desc;
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 929d355eae0a..d3e354bb8351 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -746,4 +746,14 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
* kvm_pgtable_prot format.
*/
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
+
+/**
+ * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
+ *
+ * @mmu: Stage-2 KVM MMU struct
+ * @addr: The base Intermediate physical address from which to invalidate
+ * @size: Size of the range from the base to invalidate
+ */
+void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t addr, size_t size);
#endif /* __ARM64_KVM_PGTABLE_H__ */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b481935e9314..818c111009ca 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -124,6 +124,37 @@
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
+#define SYS_IC_IALLUIS sys_insn(1, 0, 7, 1, 0)
+#define SYS_IC_IALLU sys_insn(1, 0, 7, 5, 0)
+#define SYS_IC_IVAU sys_insn(1, 3, 7, 5, 1)
+
+#define SYS_DC_IVAC sys_insn(1, 0, 7, 6, 1)
+#define SYS_DC_IGVAC sys_insn(1, 0, 7, 6, 3)
+#define SYS_DC_IGDVAC sys_insn(1, 0, 7, 6, 5)
+
+#define SYS_DC_CVAC sys_insn(1, 3, 7, 10, 1)
+#define SYS_DC_CGVAC sys_insn(1, 3, 7, 10, 3)
+#define SYS_DC_CGDVAC sys_insn(1, 3, 7, 10, 5)
+
+#define SYS_DC_CVAU sys_insn(1, 3, 7, 11, 1)
+
+#define SYS_DC_CVAP sys_insn(1, 3, 7, 12, 1)
+#define SYS_DC_CGVAP sys_insn(1, 3, 7, 12, 3)
+#define SYS_DC_CGDVAP sys_insn(1, 3, 7, 12, 5)
+
+#define SYS_DC_CVADP sys_insn(1, 3, 7, 13, 1)
+#define SYS_DC_CGVADP sys_insn(1, 3, 7, 13, 3)
+#define SYS_DC_CGDVADP sys_insn(1, 3, 7, 13, 5)
+
+#define SYS_DC_CIVAC sys_insn(1, 3, 7, 14, 1)
+#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3)
+#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5)
+
+/* Data cache zero operations */
+#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1)
+#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3)
+#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4)
+
/*
* Automatically generated definitions for system registers, the
* manual encodings below are in the process of being converted to
@@ -163,6 +194,82 @@
#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
+#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0))
+#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0)
+#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1))
+#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1)
+#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2))
+#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2)
+#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2)
+
+#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0)
+#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1)
+#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0)
+
+#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3)
+#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3)))
+#define SYS_TRCACVR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (0 | (m >> 3)))
+#define SYS_TRCAUTHSTATUS sys_reg(2, 1, 7, 14, 6)
+#define SYS_TRCAUXCTLR sys_reg(2, 1, 0, 6, 0)
+#define SYS_TRCBBCTLR sys_reg(2, 1, 0, 15, 0)
+#define SYS_TRCCCCTLR sys_reg(2, 1, 0, 14, 0)
+#define SYS_TRCCIDCCTLR0 sys_reg(2, 1, 3, 0, 2)
+#define SYS_TRCCIDCCTLR1 sys_reg(2, 1, 3, 1, 2)
+#define SYS_TRCCIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 0)
+#define SYS_TRCCLAIMCLR sys_reg(2, 1, 7, 9, 6)
+#define SYS_TRCCLAIMSET sys_reg(2, 1, 7, 8, 6)
+#define SYS_TRCCNTCTLR(m) sys_reg(2, 1, 0, (4 | (m & 3)), 5)
+#define SYS_TRCCNTRLDVR(m) sys_reg(2, 1, 0, (0 | (m & 3)), 5)
+#define SYS_TRCCNTVR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 5)
+#define SYS_TRCCONFIGR sys_reg(2, 1, 0, 4, 0)
+#define SYS_TRCDEVARCH sys_reg(2, 1, 7, 15, 6)
+#define SYS_TRCDEVID sys_reg(2, 1, 7, 2, 7)
+#define SYS_TRCEVENTCTL0R sys_reg(2, 1, 0, 8, 0)
+#define SYS_TRCEVENTCTL1R sys_reg(2, 1, 0, 9, 0)
+#define SYS_TRCEXTINSELR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 4)
+#define SYS_TRCIDR0 sys_reg(2, 1, 0, 8, 7)
+#define SYS_TRCIDR10 sys_reg(2, 1, 0, 2, 6)
+#define SYS_TRCIDR11 sys_reg(2, 1, 0, 3, 6)
+#define SYS_TRCIDR12 sys_reg(2, 1, 0, 4, 6)
+#define SYS_TRCIDR13 sys_reg(2, 1, 0, 5, 6)
+#define SYS_TRCIDR1 sys_reg(2, 1, 0, 9, 7)
+#define SYS_TRCIDR2 sys_reg(2, 1, 0, 10, 7)
+#define SYS_TRCIDR3 sys_reg(2, 1, 0, 11, 7)
+#define SYS_TRCIDR4 sys_reg(2, 1, 0, 12, 7)
+#define SYS_TRCIDR5 sys_reg(2, 1, 0, 13, 7)
+#define SYS_TRCIDR6 sys_reg(2, 1, 0, 14, 7)
+#define SYS_TRCIDR7 sys_reg(2, 1, 0, 15, 7)
+#define SYS_TRCIDR8 sys_reg(2, 1, 0, 0, 6)
+#define SYS_TRCIDR9 sys_reg(2, 1, 0, 1, 6)
+#define SYS_TRCIMSPEC(m) sys_reg(2, 1, 0, (m & 7), 7)
+#define SYS_TRCITEEDCR sys_reg(2, 1, 0, 2, 1)
+#define SYS_TRCOSLSR sys_reg(2, 1, 1, 1, 4)
+#define SYS_TRCPRGCTLR sys_reg(2, 1, 0, 1, 0)
+#define SYS_TRCQCTLR sys_reg(2, 1, 0, 1, 1)
+#define SYS_TRCRSCTLR(m) sys_reg(2, 1, 1, (m & 15), (0 | (m >> 4)))
+#define SYS_TRCRSR sys_reg(2, 1, 0, 10, 0)
+#define SYS_TRCSEQEVR(m) sys_reg(2, 1, 0, (m & 3), 4)
+#define SYS_TRCSEQRSTEVR sys_reg(2, 1, 0, 6, 4)
+#define SYS_TRCSEQSTR sys_reg(2, 1, 0, 7, 4)
+#define SYS_TRCSSCCR(m) sys_reg(2, 1, 1, (m & 7), 2)
+#define SYS_TRCSSCSR(m) sys_reg(2, 1, 1, (8 | (m & 7)), 2)
+#define SYS_TRCSSPCICR(m) sys_reg(2, 1, 1, (m & 7), 3)
+#define SYS_TRCSTALLCTLR sys_reg(2, 1, 0, 11, 0)
+#define SYS_TRCSTATR sys_reg(2, 1, 0, 3, 0)
+#define SYS_TRCSYNCPR sys_reg(2, 1, 0, 13, 0)
+#define SYS_TRCTRACEIDR sys_reg(2, 1, 0, 0, 1)
+#define SYS_TRCTSCTLR sys_reg(2, 1, 0, 12, 0)
+#define SYS_TRCVICTLR sys_reg(2, 1, 0, 0, 2)
+#define SYS_TRCVIIECTLR sys_reg(2, 1, 0, 1, 2)
+#define SYS_TRCVIPCSSCTLR sys_reg(2, 1, 0, 3, 2)
+#define SYS_TRCVISSCTLR sys_reg(2, 1, 0, 2, 2)
+#define SYS_TRCVMIDCCTLR0 sys_reg(2, 1, 3, 2, 2)
+#define SYS_TRCVMIDCCTLR1 sys_reg(2, 1, 3, 3, 2)
+#define SYS_TRCVMIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 1)
+
+/* ETM */
+#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4)
+
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -203,8 +310,13 @@
#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
+#define SYS_ERXPFGF_EL1 sys_reg(3, 0, 5, 4, 4)
+#define SYS_ERXPFGCTL_EL1 sys_reg(3, 0, 5, 4, 5)
+#define SYS_ERXPFGCDN_EL1 sys_reg(3, 0, 5, 4, 6)
#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+#define SYS_ERXMISC2_EL1 sys_reg(3, 0, 5, 5, 2)
+#define SYS_ERXMISC3_EL1 sys_reg(3, 0, 5, 5, 3)
#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
@@ -275,6 +387,8 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+#define SYS_ACCDATA_EL1 sys_reg(3, 0, 13, 0, 5)
+
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
@@ -383,8 +497,6 @@
#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
-#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
-#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
@@ -478,6 +590,158 @@
#define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0)
+/* AT instructions */
+#define AT_Op0 1
+#define AT_CRn 7
+
+#define OP_AT_S1E1R sys_insn(AT_Op0, 0, AT_CRn, 8, 0)
+#define OP_AT_S1E1W sys_insn(AT_Op0, 0, AT_CRn, 8, 1)
+#define OP_AT_S1E0R sys_insn(AT_Op0, 0, AT_CRn, 8, 2)
+#define OP_AT_S1E0W sys_insn(AT_Op0, 0, AT_CRn, 8, 3)
+#define OP_AT_S1E1RP sys_insn(AT_Op0, 0, AT_CRn, 9, 0)
+#define OP_AT_S1E1WP sys_insn(AT_Op0, 0, AT_CRn, 9, 1)
+#define OP_AT_S1E2R sys_insn(AT_Op0, 4, AT_CRn, 8, 0)
+#define OP_AT_S1E2W sys_insn(AT_Op0, 4, AT_CRn, 8, 1)
+#define OP_AT_S12E1R sys_insn(AT_Op0, 4, AT_CRn, 8, 4)
+#define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5)
+#define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6)
+#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7)
+
+/* TLBI instructions */
+#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0)
+#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1)
+#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2)
+#define OP_TLBI_VAAE1OS sys_insn(1, 0, 8, 1, 3)
+#define OP_TLBI_VALE1OS sys_insn(1, 0, 8, 1, 5)
+#define OP_TLBI_VAALE1OS sys_insn(1, 0, 8, 1, 7)
+#define OP_TLBI_RVAE1IS sys_insn(1, 0, 8, 2, 1)
+#define OP_TLBI_RVAAE1IS sys_insn(1, 0, 8, 2, 3)
+#define OP_TLBI_RVALE1IS sys_insn(1, 0, 8, 2, 5)
+#define OP_TLBI_RVAALE1IS sys_insn(1, 0, 8, 2, 7)
+#define OP_TLBI_VMALLE1IS sys_insn(1, 0, 8, 3, 0)
+#define OP_TLBI_VAE1IS sys_insn(1, 0, 8, 3, 1)
+#define OP_TLBI_ASIDE1IS sys_insn(1, 0, 8, 3, 2)
+#define OP_TLBI_VAAE1IS sys_insn(1, 0, 8, 3, 3)
+#define OP_TLBI_VALE1IS sys_insn(1, 0, 8, 3, 5)
+#define OP_TLBI_VAALE1IS sys_insn(1, 0, 8, 3, 7)
+#define OP_TLBI_RVAE1OS sys_insn(1, 0, 8, 5, 1)
+#define OP_TLBI_RVAAE1OS sys_insn(1, 0, 8, 5, 3)
+#define OP_TLBI_RVALE1OS sys_insn(1, 0, 8, 5, 5)
+#define OP_TLBI_RVAALE1OS sys_insn(1, 0, 8, 5, 7)
+#define OP_TLBI_RVAE1 sys_insn(1, 0, 8, 6, 1)
+#define OP_TLBI_RVAAE1 sys_insn(1, 0, 8, 6, 3)
+#define OP_TLBI_RVALE1 sys_insn(1, 0, 8, 6, 5)
+#define OP_TLBI_RVAALE1 sys_insn(1, 0, 8, 6, 7)
+#define OP_TLBI_VMALLE1 sys_insn(1, 0, 8, 7, 0)
+#define OP_TLBI_VAE1 sys_insn(1, 0, 8, 7, 1)
+#define OP_TLBI_ASIDE1 sys_insn(1, 0, 8, 7, 2)
+#define OP_TLBI_VAAE1 sys_insn(1, 0, 8, 7, 3)
+#define OP_TLBI_VALE1 sys_insn(1, 0, 8, 7, 5)
+#define OP_TLBI_VAALE1 sys_insn(1, 0, 8, 7, 7)
+#define OP_TLBI_VMALLE1OSNXS sys_insn(1, 0, 9, 1, 0)
+#define OP_TLBI_VAE1OSNXS sys_insn(1, 0, 9, 1, 1)
+#define OP_TLBI_ASIDE1OSNXS sys_insn(1, 0, 9, 1, 2)
+#define OP_TLBI_VAAE1OSNXS sys_insn(1, 0, 9, 1, 3)
+#define OP_TLBI_VALE1OSNXS sys_insn(1, 0, 9, 1, 5)
+#define OP_TLBI_VAALE1OSNXS sys_insn(1, 0, 9, 1, 7)
+#define OP_TLBI_RVAE1ISNXS sys_insn(1, 0, 9, 2, 1)
+#define OP_TLBI_RVAAE1ISNXS sys_insn(1, 0, 9, 2, 3)
+#define OP_TLBI_RVALE1ISNXS sys_insn(1, 0, 9, 2, 5)
+#define OP_TLBI_RVAALE1ISNXS sys_insn(1, 0, 9, 2, 7)
+#define OP_TLBI_VMALLE1ISNXS sys_insn(1, 0, 9, 3, 0)
+#define OP_TLBI_VAE1ISNXS sys_insn(1, 0, 9, 3, 1)
+#define OP_TLBI_ASIDE1ISNXS sys_insn(1, 0, 9, 3, 2)
+#define OP_TLBI_VAAE1ISNXS sys_insn(1, 0, 9, 3, 3)
+#define OP_TLBI_VALE1ISNXS sys_insn(1, 0, 9, 3, 5)
+#define OP_TLBI_VAALE1ISNXS sys_insn(1, 0, 9, 3, 7)
+#define OP_TLBI_RVAE1OSNXS sys_insn(1, 0, 9, 5, 1)
+#define OP_TLBI_RVAAE1OSNXS sys_insn(1, 0, 9, 5, 3)
+#define OP_TLBI_RVALE1OSNXS sys_insn(1, 0, 9, 5, 5)
+#define OP_TLBI_RVAALE1OSNXS sys_insn(1, 0, 9, 5, 7)
+#define OP_TLBI_RVAE1NXS sys_insn(1, 0, 9, 6, 1)
+#define OP_TLBI_RVAAE1NXS sys_insn(1, 0, 9, 6, 3)
+#define OP_TLBI_RVALE1NXS sys_insn(1, 0, 9, 6, 5)
+#define OP_TLBI_RVAALE1NXS sys_insn(1, 0, 9, 6, 7)
+#define OP_TLBI_VMALLE1NXS sys_insn(1, 0, 9, 7, 0)
+#define OP_TLBI_VAE1NXS sys_insn(1, 0, 9, 7, 1)
+#define OP_TLBI_ASIDE1NXS sys_insn(1, 0, 9, 7, 2)
+#define OP_TLBI_VAAE1NXS sys_insn(1, 0, 9, 7, 3)
+#define OP_TLBI_VALE1NXS sys_insn(1, 0, 9, 7, 5)
+#define OP_TLBI_VAALE1NXS sys_insn(1, 0, 9, 7, 7)
+#define OP_TLBI_IPAS2E1IS sys_insn(1, 4, 8, 0, 1)
+#define OP_TLBI_RIPAS2E1IS sys_insn(1, 4, 8, 0, 2)
+#define OP_TLBI_IPAS2LE1IS sys_insn(1, 4, 8, 0, 5)
+#define OP_TLBI_RIPAS2LE1IS sys_insn(1, 4, 8, 0, 6)
+#define OP_TLBI_ALLE2OS sys_insn(1, 4, 8, 1, 0)
+#define OP_TLBI_VAE2OS sys_insn(1, 4, 8, 1, 1)
+#define OP_TLBI_ALLE1OS sys_insn(1, 4, 8, 1, 4)
+#define OP_TLBI_VALE2OS sys_insn(1, 4, 8, 1, 5)
+#define OP_TLBI_VMALLS12E1OS sys_insn(1, 4, 8, 1, 6)
+#define OP_TLBI_RVAE2IS sys_insn(1, 4, 8, 2, 1)
+#define OP_TLBI_RVALE2IS sys_insn(1, 4, 8, 2, 5)
+#define OP_TLBI_ALLE2IS sys_insn(1, 4, 8, 3, 0)
+#define OP_TLBI_VAE2IS sys_insn(1, 4, 8, 3, 1)
+#define OP_TLBI_ALLE1IS sys_insn(1, 4, 8, 3, 4)
+#define OP_TLBI_VALE2IS sys_insn(1, 4, 8, 3, 5)
+#define OP_TLBI_VMALLS12E1IS sys_insn(1, 4, 8, 3, 6)
+#define OP_TLBI_IPAS2E1OS sys_insn(1, 4, 8, 4, 0)
+#define OP_TLBI_IPAS2E1 sys_insn(1, 4, 8, 4, 1)
+#define OP_TLBI_RIPAS2E1 sys_insn(1, 4, 8, 4, 2)
+#define OP_TLBI_RIPAS2E1OS sys_insn(1, 4, 8, 4, 3)
+#define OP_TLBI_IPAS2LE1OS sys_insn(1, 4, 8, 4, 4)
+#define OP_TLBI_IPAS2LE1 sys_insn(1, 4, 8, 4, 5)
+#define OP_TLBI_RIPAS2LE1 sys_insn(1, 4, 8, 4, 6)
+#define OP_TLBI_RIPAS2LE1OS sys_insn(1, 4, 8, 4, 7)
+#define OP_TLBI_RVAE2OS sys_insn(1, 4, 8, 5, 1)
+#define OP_TLBI_RVALE2OS sys_insn(1, 4, 8, 5, 5)
+#define OP_TLBI_RVAE2 sys_insn(1, 4, 8, 6, 1)
+#define OP_TLBI_RVALE2 sys_insn(1, 4, 8, 6, 5)
+#define OP_TLBI_ALLE2 sys_insn(1, 4, 8, 7, 0)
+#define OP_TLBI_VAE2 sys_insn(1, 4, 8, 7, 1)
+#define OP_TLBI_ALLE1 sys_insn(1, 4, 8, 7, 4)
+#define OP_TLBI_VALE2 sys_insn(1, 4, 8, 7, 5)
+#define OP_TLBI_VMALLS12E1 sys_insn(1, 4, 8, 7, 6)
+#define OP_TLBI_IPAS2E1ISNXS sys_insn(1, 4, 9, 0, 1)
+#define OP_TLBI_RIPAS2E1ISNXS sys_insn(1, 4, 9, 0, 2)
+#define OP_TLBI_IPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 5)
+#define OP_TLBI_RIPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 6)
+#define OP_TLBI_ALLE2OSNXS sys_insn(1, 4, 9, 1, 0)
+#define OP_TLBI_VAE2OSNXS sys_insn(1, 4, 9, 1, 1)
+#define OP_TLBI_ALLE1OSNXS sys_insn(1, 4, 9, 1, 4)
+#define OP_TLBI_VALE2OSNXS sys_insn(1, 4, 9, 1, 5)
+#define OP_TLBI_VMALLS12E1OSNXS sys_insn(1, 4, 9, 1, 6)
+#define OP_TLBI_RVAE2ISNXS sys_insn(1, 4, 9, 2, 1)
+#define OP_TLBI_RVALE2ISNXS sys_insn(1, 4, 9, 2, 5)
+#define OP_TLBI_ALLE2ISNXS sys_insn(1, 4, 9, 3, 0)
+#define OP_TLBI_VAE2ISNXS sys_insn(1, 4, 9, 3, 1)
+#define OP_TLBI_ALLE1ISNXS sys_insn(1, 4, 9, 3, 4)
+#define OP_TLBI_VALE2ISNXS sys_insn(1, 4, 9, 3, 5)
+#define OP_TLBI_VMALLS12E1ISNXS sys_insn(1, 4, 9, 3, 6)
+#define OP_TLBI_IPAS2E1OSNXS sys_insn(1, 4, 9, 4, 0)
+#define OP_TLBI_IPAS2E1NXS sys_insn(1, 4, 9, 4, 1)
+#define OP_TLBI_RIPAS2E1NXS sys_insn(1, 4, 9, 4, 2)
+#define OP_TLBI_RIPAS2E1OSNXS sys_insn(1, 4, 9, 4, 3)
+#define OP_TLBI_IPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 4)
+#define OP_TLBI_IPAS2LE1NXS sys_insn(1, 4, 9, 4, 5)
+#define OP_TLBI_RIPAS2LE1NXS sys_insn(1, 4, 9, 4, 6)
+#define OP_TLBI_RIPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 7)
+#define OP_TLBI_RVAE2OSNXS sys_insn(1, 4, 9, 5, 1)
+#define OP_TLBI_RVALE2OSNXS sys_insn(1, 4, 9, 5, 5)
+#define OP_TLBI_RVAE2NXS sys_insn(1, 4, 9, 6, 1)
+#define OP_TLBI_RVALE2NXS sys_insn(1, 4, 9, 6, 5)
+#define OP_TLBI_ALLE2NXS sys_insn(1, 4, 9, 7, 0)
+#define OP_TLBI_VAE2NXS sys_insn(1, 4, 9, 7, 1)
+#define OP_TLBI_ALLE1NXS sys_insn(1, 4, 9, 7, 4)
+#define OP_TLBI_VALE2NXS sys_insn(1, 4, 9, 7, 5)
+#define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6)
+
+/* Misc instructions */
+#define OP_BRB_IALL sys_insn(1, 1, 7, 2, 4)
+#define OP_BRB_INJ sys_insn(1, 1, 7, 2, 5)
+#define OP_CFP_RCTX sys_insn(1, 3, 7, 3, 4)
+#define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5)
+#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7)
+
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_ENTP2 (BIT(60))
#define SCTLR_ELx_DSSBS (BIT(44))
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 412a3b9a3c25..93f4b397f9a1 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -278,14 +278,77 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
*/
#define MAX_TLBI_OPS PTRS_PER_PTE
+/*
+ * __flush_tlb_range_op - Perform TLBI operation upon a range
+ *
+ * @op: TLBI instruction that operates on a range (has 'r' prefix)
+ * @start: The start address of the range
+ * @pages: Range as the number of pages from 'start'
+ * @stride: Flush granularity
+ * @asid: The ASID of the task (0 for IPA instructions)
+ * @tlb_level: Translation Table level hint, if known
+ * @tlbi_user: If 'true', call an additional __tlbi_user()
+ * (typically for user ASIDs). 'flase' for IPA instructions
+ *
+ * When the CPU does not support TLB range operations, flush the TLB
+ * entries one by one at the granularity of 'stride'. If the TLB
+ * range ops are supported, then:
+ *
+ * 1. If 'pages' is odd, flush the first page through non-range
+ * operations;
+ *
+ * 2. For remaining pages: the minimum range granularity is decided
+ * by 'scale', so multiple range TLBI operations may be required.
+ * Start from scale = 0, flush the corresponding number of pages
+ * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
+ * until no pages left.
+ *
+ * Note that certain ranges can be represented by either num = 31 and
+ * scale or num = 0 and scale + 1. The loop below favours the latter
+ * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
+ */
+#define __flush_tlb_range_op(op, start, pages, stride, \
+ asid, tlb_level, tlbi_user) \
+do { \
+ int num = 0; \
+ int scale = 0; \
+ unsigned long addr; \
+ \
+ while (pages > 0) { \
+ if (!system_supports_tlb_range() || \
+ pages % 2 == 1) { \
+ addr = __TLBI_VADDR(start, asid); \
+ __tlbi_level(op, addr, tlb_level); \
+ if (tlbi_user) \
+ __tlbi_user_level(op, addr, tlb_level); \
+ start += stride; \
+ pages -= stride >> PAGE_SHIFT; \
+ continue; \
+ } \
+ \
+ num = __TLBI_RANGE_NUM(pages, scale); \
+ if (num >= 0) { \
+ addr = __TLBI_VADDR_RANGE(start, asid, scale, \
+ num, tlb_level); \
+ __tlbi(r##op, addr); \
+ if (tlbi_user) \
+ __tlbi_user(r##op, addr); \
+ start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
+ pages -= __TLBI_RANGE_PAGES(num, scale); \
+ } \
+ scale++; \
+ } \
+} while (0)
+
+#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
+ __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
+
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long stride, bool last_level,
int tlb_level)
{
- int num = 0;
- int scale = 0;
- unsigned long asid, addr, pages;
+ unsigned long asid, pages;
start = round_down(start, stride);
end = round_up(end, stride);
@@ -307,56 +370,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
asid = ASID(vma->vm_mm);
- /*
- * When the CPU does not support TLB range operations, flush the TLB
- * entries one by one at the granularity of 'stride'. If the TLB
- * range ops are supported, then:
- *
- * 1. If 'pages' is odd, flush the first page through non-range
- * operations;
- *
- * 2. For remaining pages: the minimum range granularity is decided
- * by 'scale', so multiple range TLBI operations may be required.
- * Start from scale = 0, flush the corresponding number of pages
- * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
- * until no pages left.
- *
- * Note that certain ranges can be represented by either num = 31 and
- * scale or num = 0 and scale + 1. The loop below favours the latter
- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
- */
- while (pages > 0) {
- if (!system_supports_tlb_range() ||
- pages % 2 == 1) {
- addr = __TLBI_VADDR(start, asid);
- if (last_level) {
- __tlbi_level(vale1is, addr, tlb_level);
- __tlbi_user_level(vale1is, addr, tlb_level);
- } else {
- __tlbi_level(vae1is, addr, tlb_level);
- __tlbi_user_level(vae1is, addr, tlb_level);
- }
- start += stride;
- pages -= stride >> PAGE_SHIFT;
- continue;
- }
-
- num = __TLBI_RANGE_NUM(pages, scale);
- if (num >= 0) {
- addr = __TLBI_VADDR_RANGE(start, asid, scale,
- num, tlb_level);
- if (last_level) {
- __tlbi(rvale1is, addr);
- __tlbi_user(rvale1is, addr);
- } else {
- __tlbi(rvae1is, addr);
- __tlbi_user(rvae1is, addr);
- }
- start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
- pages -= __TLBI_RANGE_PAGES(num, scale);
- }
- scale++;
- }
+ if (last_level)
+ __flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
+ else
+ __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
+
dsb(ish);
}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f9d456fe132d..668e2872a086 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2627,6 +2627,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LRCPC, IMP)
},
+ {
+ .desc = "Fine Grained Traps",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HAS_FGT,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, IMP)
+ },
#ifdef CONFIG_ARM64_SME
{
.desc = "Scalable Matrix Extension",
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f531da6b362e..83c1e09be42e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -25,7 +25,6 @@ menuconfig KVM
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select HAVE_KVM_CPU_RELAX_INTERCEPT
- select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_XFER_TO_GUEST_WORK
@@ -43,6 +42,7 @@ menuconfig KVM
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
select INTERVAL_TREE
+ select XARRAY_MULTI
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index d1cb298a58a0..4866b3f7b4ea 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -36,6 +36,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
#include <asm/kvm_pkvm.h>
#include <asm/kvm_emulate.h>
#include <asm/sections.h>
@@ -365,7 +366,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
#endif
/* Force users to call KVM_ARM_VCPU_INIT */
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
@@ -462,7 +463,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu);
- if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
+ if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu);
}
@@ -574,7 +575,7 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.target >= 0;
+ return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
}
/*
@@ -803,6 +804,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
kvm_pmu_handle_pmcr(vcpu,
__vcpu_sys_reg(vcpu, PMCR_EL0));
+ if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
+ kvm_vcpu_pmu_restore_guest(vcpu);
+
if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
return kvm_vcpu_suspend(vcpu);
@@ -818,6 +822,9 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
if (likely(!vcpu_mode_is_32bit(vcpu)))
return false;
+ if (vcpu_has_nv(vcpu))
+ return true;
+
return !kvm_supports_32bit_el0();
}
@@ -1058,7 +1065,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* invalid. The VMM can try and fix it by issuing a
* KVM_ARM_VCPU_INIT if it really wants to.
*/
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
ret = ARM_EXCEPTION_IL;
}
@@ -1219,8 +1226,7 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
{
unsigned long features = init->features[0];
- return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) ||
- vcpu->arch.target != init->target;
+ return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
}
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
@@ -1236,20 +1242,18 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
!bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES))
goto out_unlock;
- vcpu->arch.target = init->target;
bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
/* Now we know what it is, we can reset it. */
ret = kvm_reset_vcpu(vcpu);
if (ret) {
- vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
goto out_unlock;
}
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
-
+ vcpu_set_flag(vcpu, VCPU_INITIALIZED);
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
return ret;
@@ -1260,14 +1264,15 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
{
int ret;
- if (init->target != kvm_target_cpu())
+ if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
+ init->target != kvm_target_cpu())
return -EINVAL;
ret = kvm_vcpu_init_check_features(vcpu, init);
if (ret)
return ret;
- if (vcpu->arch.target == -1)
+ if (!kvm_vcpu_initialized(vcpu))
return __kvm_vcpu_set_target(vcpu, init);
if (kvm_vcpu_init_changed(vcpu, init))
@@ -1532,12 +1537,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot)
-{
- kvm_flush_remote_tlbs(kvm);
-}
-
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
struct kvm_arm_device_addr *dev_addr)
{
@@ -1595,9 +1594,9 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
}
case KVM_ARM_PREFERRED_TARGET: {
- struct kvm_vcpu_init init;
-
- kvm_vcpu_preferred_target(&init);
+ struct kvm_vcpu_init init = {
+ .target = KVM_ARM_TARGET_GENERIC_V8,
+ };
if (copy_to_user(argp, &init, sizeof(init)))
return -EFAULT;
@@ -2276,30 +2275,8 @@ static int __init init_hyp_mode(void)
for_each_possible_cpu(cpu) {
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
- unsigned long hyp_addr;
- /*
- * Allocate a contiguous HYP private VA range for the stack
- * and guard page. The allocation is also aligned based on
- * the order of its size.
- */
- err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
- if (err) {
- kvm_err("Cannot allocate hyp stack guard page\n");
- goto out_err;
- }
-
- /*
- * Since the stack grows downwards, map the stack to the page
- * at the higher address and leave the lower guard page
- * unbacked.
- *
- * Any valid stack address now has the PAGE_SHIFT bit as 1
- * and addresses corresponding to the guard page have the
- * PAGE_SHIFT bit as 0 - this is used for overflow detection.
- */
- err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE,
- __pa(stack_page), PAGE_HYP);
+ err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_err;
@@ -2312,8 +2289,6 @@ static int __init init_hyp_mode(void)
* has been mapped in the flexible private VA space.
*/
params->stack_pa = __pa(stack_page);
-
- params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
for_each_possible_cpu(cpu) {
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index b96662029fb1..9ced1bf0c2b7 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -14,6 +14,1858 @@
#include "trace.h"
+enum trap_behaviour {
+ BEHAVE_HANDLE_LOCALLY = 0,
+ BEHAVE_FORWARD_READ = BIT(0),
+ BEHAVE_FORWARD_WRITE = BIT(1),
+ BEHAVE_FORWARD_ANY = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
+};
+
+struct trap_bits {
+ const enum vcpu_sysreg index;
+ const enum trap_behaviour behaviour;
+ const u64 value;
+ const u64 mask;
+};
+
+/* Coarse Grained Trap definitions */
+enum cgt_group_id {
+ /* Indicates no coarse trap control */
+ __RESERVED__,
+
+ /*
+ * The first batch of IDs denote coarse trapping that are used
+ * on their own instead of being part of a combination of
+ * trap controls.
+ */
+ CGT_HCR_TID1,
+ CGT_HCR_TID2,
+ CGT_HCR_TID3,
+ CGT_HCR_IMO,
+ CGT_HCR_FMO,
+ CGT_HCR_TIDCP,
+ CGT_HCR_TACR,
+ CGT_HCR_TSW,
+ CGT_HCR_TPC,
+ CGT_HCR_TPU,
+ CGT_HCR_TTLB,
+ CGT_HCR_TVM,
+ CGT_HCR_TDZ,
+ CGT_HCR_TRVM,
+ CGT_HCR_TLOR,
+ CGT_HCR_TERR,
+ CGT_HCR_APK,
+ CGT_HCR_NV,
+ CGT_HCR_NV_nNV2,
+ CGT_HCR_NV1_nNV2,
+ CGT_HCR_AT,
+ CGT_HCR_nFIEN,
+ CGT_HCR_TID4,
+ CGT_HCR_TICAB,
+ CGT_HCR_TOCU,
+ CGT_HCR_ENSCXT,
+ CGT_HCR_TTLBIS,
+ CGT_HCR_TTLBOS,
+
+ CGT_MDCR_TPMCR,
+ CGT_MDCR_TPM,
+ CGT_MDCR_TDE,
+ CGT_MDCR_TDA,
+ CGT_MDCR_TDOSA,
+ CGT_MDCR_TDRA,
+ CGT_MDCR_E2PB,
+ CGT_MDCR_TPMS,
+ CGT_MDCR_TTRF,
+ CGT_MDCR_E2TB,
+ CGT_MDCR_TDCC,
+
+ /*
+ * Anything after this point is a combination of coarse trap
+ * controls, which must all be evaluated to decide what to do.
+ */
+ __MULTIPLE_CONTROL_BITS__,
+ CGT_HCR_IMO_FMO = __MULTIPLE_CONTROL_BITS__,
+ CGT_HCR_TID2_TID4,
+ CGT_HCR_TTLB_TTLBIS,
+ CGT_HCR_TTLB_TTLBOS,
+ CGT_HCR_TVM_TRVM,
+ CGT_HCR_TPU_TICAB,
+ CGT_HCR_TPU_TOCU,
+ CGT_HCR_NV1_nNV2_ENSCXT,
+ CGT_MDCR_TPM_TPMCR,
+ CGT_MDCR_TDE_TDA,
+ CGT_MDCR_TDE_TDOSA,
+ CGT_MDCR_TDE_TDRA,
+ CGT_MDCR_TDCC_TDE_TDA,
+
+ /*
+ * Anything after this point requires a callback evaluating a
+ * complex trap condition. Ugly stuff.
+ */
+ __COMPLEX_CONDITIONS__,
+ CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__,
+ CGT_CNTHCTL_EL1PTEN,
+
+ /* Must be last */
+ __NR_CGT_GROUP_IDS__
+};
+
+static const struct trap_bits coarse_trap_bits[] = {
+ [CGT_HCR_TID1] = {
+ .index = HCR_EL2,
+ .value = HCR_TID1,
+ .mask = HCR_TID1,
+ .behaviour = BEHAVE_FORWARD_READ,
+ },
+ [CGT_HCR_TID2] = {
+ .index = HCR_EL2,
+ .value = HCR_TID2,
+ .mask = HCR_TID2,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TID3] = {
+ .index = HCR_EL2,
+ .value = HCR_TID3,
+ .mask = HCR_TID3,
+ .behaviour = BEHAVE_FORWARD_READ,
+ },
+ [CGT_HCR_IMO] = {
+ .index = HCR_EL2,
+ .value = HCR_IMO,
+ .mask = HCR_IMO,
+ .behaviour = BEHAVE_FORWARD_WRITE,
+ },
+ [CGT_HCR_FMO] = {
+ .index = HCR_EL2,
+ .value = HCR_FMO,
+ .mask = HCR_FMO,
+ .behaviour = BEHAVE_FORWARD_WRITE,
+ },
+ [CGT_HCR_TIDCP] = {
+ .index = HCR_EL2,
+ .value = HCR_TIDCP,
+ .mask = HCR_TIDCP,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TACR] = {
+ .index = HCR_EL2,
+ .value = HCR_TACR,
+ .mask = HCR_TACR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TSW] = {
+ .index = HCR_EL2,
+ .value = HCR_TSW,
+ .mask = HCR_TSW,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TPC] = { /* Also called TCPC when FEAT_DPB is implemented */
+ .index = HCR_EL2,
+ .value = HCR_TPC,
+ .mask = HCR_TPC,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TPU] = {
+ .index = HCR_EL2,
+ .value = HCR_TPU,
+ .mask = HCR_TPU,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TTLB] = {
+ .index = HCR_EL2,
+ .value = HCR_TTLB,
+ .mask = HCR_TTLB,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TVM] = {
+ .index = HCR_EL2,
+ .value = HCR_TVM,
+ .mask = HCR_TVM,
+ .behaviour = BEHAVE_FORWARD_WRITE,
+ },
+ [CGT_HCR_TDZ] = {
+ .index = HCR_EL2,
+ .value = HCR_TDZ,
+ .mask = HCR_TDZ,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TRVM] = {
+ .index = HCR_EL2,
+ .value = HCR_TRVM,
+ .mask = HCR_TRVM,
+ .behaviour = BEHAVE_FORWARD_READ,
+ },
+ [CGT_HCR_TLOR] = {
+ .index = HCR_EL2,
+ .value = HCR_TLOR,
+ .mask = HCR_TLOR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TERR] = {
+ .index = HCR_EL2,
+ .value = HCR_TERR,
+ .mask = HCR_TERR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_APK] = {
+ .index = HCR_EL2,
+ .value = 0,
+ .mask = HCR_APK,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_NV] = {
+ .index = HCR_EL2,
+ .value = HCR_NV,
+ .mask = HCR_NV,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_NV_nNV2] = {
+ .index = HCR_EL2,
+ .value = HCR_NV,
+ .mask = HCR_NV | HCR_NV2,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_NV1_nNV2] = {
+ .index = HCR_EL2,
+ .value = HCR_NV | HCR_NV1,
+ .mask = HCR_NV | HCR_NV1 | HCR_NV2,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_AT] = {
+ .index = HCR_EL2,
+ .value = HCR_AT,
+ .mask = HCR_AT,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_nFIEN] = {
+ .index = HCR_EL2,
+ .value = 0,
+ .mask = HCR_FIEN,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TID4] = {
+ .index = HCR_EL2,
+ .value = HCR_TID4,
+ .mask = HCR_TID4,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TICAB] = {
+ .index = HCR_EL2,
+ .value = HCR_TICAB,
+ .mask = HCR_TICAB,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TOCU] = {
+ .index = HCR_EL2,
+ .value = HCR_TOCU,
+ .mask = HCR_TOCU,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_ENSCXT] = {
+ .index = HCR_EL2,
+ .value = 0,
+ .mask = HCR_ENSCXT,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TTLBIS] = {
+ .index = HCR_EL2,
+ .value = HCR_TTLBIS,
+ .mask = HCR_TTLBIS,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TTLBOS] = {
+ .index = HCR_EL2,
+ .value = HCR_TTLBOS,
+ .mask = HCR_TTLBOS,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TPMCR] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TPMCR,
+ .mask = MDCR_EL2_TPMCR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TPM] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TPM,
+ .mask = MDCR_EL2_TPM,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDE] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDE,
+ .mask = MDCR_EL2_TDE,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDA] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDA,
+ .mask = MDCR_EL2_TDA,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDOSA] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDOSA,
+ .mask = MDCR_EL2_TDOSA,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDRA] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDRA,
+ .mask = MDCR_EL2_TDRA,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_E2PB] = {
+ .index = MDCR_EL2,
+ .value = 0,
+ .mask = BIT(MDCR_EL2_E2PB_SHIFT),
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TPMS] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TPMS,
+ .mask = MDCR_EL2_TPMS,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TTRF] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TTRF,
+ .mask = MDCR_EL2_TTRF,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_E2TB] = {
+ .index = MDCR_EL2,
+ .value = 0,
+ .mask = BIT(MDCR_EL2_E2TB_SHIFT),
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDCC] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDCC,
+ .mask = MDCR_EL2_TDCC,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+};
+
+#define MCB(id, ...) \
+ [id - __MULTIPLE_CONTROL_BITS__] = \
+ (const enum cgt_group_id[]){ \
+ __VA_ARGS__, __RESERVED__ \
+ }
+
+static const enum cgt_group_id *coarse_control_combo[] = {
+ MCB(CGT_HCR_IMO_FMO, CGT_HCR_IMO, CGT_HCR_FMO),
+ MCB(CGT_HCR_TID2_TID4, CGT_HCR_TID2, CGT_HCR_TID4),
+ MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS),
+ MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS),
+ MCB(CGT_HCR_TVM_TRVM, CGT_HCR_TVM, CGT_HCR_TRVM),
+ MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB),
+ MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU),
+ MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT),
+ MCB(CGT_MDCR_TPM_TPMCR, CGT_MDCR_TPM, CGT_MDCR_TPMCR),
+ MCB(CGT_MDCR_TDE_TDA, CGT_MDCR_TDE, CGT_MDCR_TDA),
+ MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA),
+ MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA),
+ MCB(CGT_MDCR_TDCC_TDE_TDA, CGT_MDCR_TDCC, CGT_MDCR_TDE, CGT_MDCR_TDA),
+};
+
+typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *);
+
+/*
+ * Warning, maximum confusion ahead.
+ *
+ * When E2H=0, CNTHCTL_EL2[1:0] are defined as EL1PCEN:EL1PCTEN
+ * When E2H=1, CNTHCTL_EL2[11:10] are defined as EL1PTEN:EL1PCTEN
+ *
+ * Note the single letter difference? Yet, the bits have the same
+ * function despite a different layout and a different name.
+ *
+ * We don't try to reconcile this mess. We just use the E2H=0 bits
+ * to generate something that is in the E2H=1 format, and live with
+ * it. You're welcome.
+ */
+static u64 get_sanitized_cnthctl(struct kvm_vcpu *vcpu)
+{
+ u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
+
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10;
+
+ return val & ((CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN) << 10);
+}
+
+static enum trap_behaviour check_cnthctl_el1pcten(struct kvm_vcpu *vcpu)
+{
+ if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCTEN << 10))
+ return BEHAVE_HANDLE_LOCALLY;
+
+ return BEHAVE_FORWARD_ANY;
+}
+
+static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
+{
+ if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCEN << 10))
+ return BEHAVE_HANDLE_LOCALLY;
+
+ return BEHAVE_FORWARD_ANY;
+}
+
+#define CCC(id, fn) \
+ [id - __COMPLEX_CONDITIONS__] = fn
+
+static const complex_condition_check ccc[] = {
+ CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten),
+ CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten),
+};
+
+/*
+ * Bit assignment for the trap controls. We use a 64bit word with the
+ * following layout for each trapped sysreg:
+ *
+ * [9:0] enum cgt_group_id (10 bits)
+ * [13:10] enum fgt_group_id (4 bits)
+ * [19:14] bit number in the FGT register (6 bits)
+ * [20] trap polarity (1 bit)
+ * [25:21] FG filter (5 bits)
+ * [62:26] Unused (37 bits)
+ * [63] RES0 - Must be zero, as lost on insertion in the xarray
+ */
+#define TC_CGT_BITS 10
+#define TC_FGT_BITS 4
+#define TC_FGF_BITS 5
+
+union trap_config {
+ u64 val;
+ struct {
+ unsigned long cgt:TC_CGT_BITS; /* Coarse Grained Trap id */
+ unsigned long fgt:TC_FGT_BITS; /* Fine Grained Trap id */
+ unsigned long bit:6; /* Bit number */
+ unsigned long pol:1; /* Polarity */
+ unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */
+ unsigned long unused:37; /* Unused, should be zero */
+ unsigned long mbz:1; /* Must Be Zero */
+ };
+};
+
+struct encoding_to_trap_config {
+ const u32 encoding;
+ const u32 end;
+ const union trap_config tc;
+ const unsigned int line;
+};
+
+#define SR_RANGE_TRAP(sr_start, sr_end, trap_id) \
+ { \
+ .encoding = sr_start, \
+ .end = sr_end, \
+ .tc = { \
+ .cgt = trap_id, \
+ }, \
+ .line = __LINE__, \
+ }
+
+#define SR_TRAP(sr, trap_id) SR_RANGE_TRAP(sr, sr, trap_id)
+
+/*
+ * Map encoding to trap bits for exception reported with EC=0x18.
+ * These must only be evaluated when running a nested hypervisor, but
+ * that the current context is not a hypervisor context. When the
+ * trapped access matches one of the trap controls, the exception is
+ * re-injected in the nested hypervisor.
+ */
+static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
+ SR_TRAP(SYS_REVIDR_EL1, CGT_HCR_TID1),
+ SR_TRAP(SYS_AIDR_EL1, CGT_HCR_TID1),
+ SR_TRAP(SYS_SMIDR_EL1, CGT_HCR_TID1),
+ SR_TRAP(SYS_CTR_EL0, CGT_HCR_TID2),
+ SR_TRAP(SYS_CCSIDR_EL1, CGT_HCR_TID2_TID4),
+ SR_TRAP(SYS_CCSIDR2_EL1, CGT_HCR_TID2_TID4),
+ SR_TRAP(SYS_CLIDR_EL1, CGT_HCR_TID2_TID4),
+ SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4),
+ SR_RANGE_TRAP(SYS_ID_PFR0_EL1,
+ sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3),
+ SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO),
+ SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO),
+ SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO),
+ SR_RANGE_TRAP(sys_reg(3, 0, 11, 0, 0),
+ sys_reg(3, 0, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 1, 11, 0, 0),
+ sys_reg(3, 1, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 2, 11, 0, 0),
+ sys_reg(3, 2, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 3, 11, 0, 0),
+ sys_reg(3, 3, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 4, 11, 0, 0),
+ sys_reg(3, 4, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 5, 11, 0, 0),
+ sys_reg(3, 5, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 6, 11, 0, 0),
+ sys_reg(3, 6, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 7, 11, 0, 0),
+ sys_reg(3, 7, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 0, 15, 0, 0),
+ sys_reg(3, 0, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 1, 15, 0, 0),
+ sys_reg(3, 1, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 2, 15, 0, 0),
+ sys_reg(3, 2, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 3, 15, 0, 0),
+ sys_reg(3, 3, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 4, 15, 0, 0),
+ sys_reg(3, 4, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 5, 15, 0, 0),
+ sys_reg(3, 5, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 6, 15, 0, 0),
+ sys_reg(3, 6, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 7, 15, 0, 0),
+ sys_reg(3, 7, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_TRAP(SYS_ACTLR_EL1, CGT_HCR_TACR),
+ SR_TRAP(SYS_DC_ISW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CISW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_IGSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_IGDSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CGSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CGDSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CIGSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CIGDSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CIVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CVAP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CVADP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_IVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CIGVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CIGDVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_IGVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_IGDVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGDVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGVAP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGDVAP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGVADP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGDVADP, CGT_HCR_TPC),
+ SR_TRAP(SYS_IC_IVAU, CGT_HCR_TPU_TOCU),
+ SR_TRAP(SYS_IC_IALLU, CGT_HCR_TPU_TOCU),
+ SR_TRAP(SYS_IC_IALLUIS, CGT_HCR_TPU_TICAB),
+ SR_TRAP(SYS_DC_CVAU, CGT_HCR_TPU_TOCU),
+ SR_TRAP(OP_TLBI_RVAE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAAE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVALE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAALE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VMALLE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_ASIDE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAAE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VALE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAALE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAAE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVALE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAALE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VMALLE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_ASIDE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAAE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VALE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAALE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAAE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVALE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAALE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VMALLE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_ASIDE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAAE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VALE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAALE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAAE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVALE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAALE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VMALLE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_ASIDE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAAE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VALE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAALE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VMALLE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_ASIDE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAAE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VALE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAALE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAAE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVALE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAALE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VMALLE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_ASIDE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAAE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAAE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(SYS_SCTLR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_TTBR0_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_TTBR1_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_TCR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_ESR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_FAR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_AFSR0_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_AFSR1_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_MAIR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_AMAIR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_CONTEXTIDR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_DC_ZVA, CGT_HCR_TDZ),
+ SR_TRAP(SYS_DC_GVA, CGT_HCR_TDZ),
+ SR_TRAP(SYS_DC_GZVA, CGT_HCR_TDZ),
+ SR_TRAP(SYS_LORSA_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_LOREA_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_LORN_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_LORC_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_LORID_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_ERRIDR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERRSELR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXADDR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXCTLR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXFR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXMISC0_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXMISC1_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXMISC2_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXMISC3_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXSTATUS_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_APIAKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APIAKEYHI_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APIBKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APIBKEYHI_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APDAKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APDAKEYHI_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APDBKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APDBKEYHI_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APGAKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APGAKEYHI_EL1, CGT_HCR_APK),
+ /* All _EL2 registers */
+ SR_RANGE_TRAP(sys_reg(3, 4, 0, 0, 0),
+ sys_reg(3, 4, 3, 15, 7), CGT_HCR_NV),
+ /* Skip the SP_EL1 encoding... */
+ SR_TRAP(SYS_SPSR_EL2, CGT_HCR_NV),
+ SR_TRAP(SYS_ELR_EL2, CGT_HCR_NV),
+ SR_RANGE_TRAP(sys_reg(3, 4, 4, 1, 1),
+ sys_reg(3, 4, 10, 15, 7), CGT_HCR_NV),
+ SR_RANGE_TRAP(sys_reg(3, 4, 12, 0, 0),
+ sys_reg(3, 4, 14, 15, 7), CGT_HCR_NV),
+ /* All _EL02, _EL12 registers */
+ SR_RANGE_TRAP(sys_reg(3, 5, 0, 0, 0),
+ sys_reg(3, 5, 10, 15, 7), CGT_HCR_NV),
+ SR_RANGE_TRAP(sys_reg(3, 5, 12, 0, 0),
+ sys_reg(3, 5, 14, 15, 7), CGT_HCR_NV),
+ SR_TRAP(OP_AT_S1E2R, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S1E2W, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S12E1R, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S12E1W, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S12E0R, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S12E0W, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1ISNXS,CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1OSNXS,CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_CPP_RCTX, CGT_HCR_NV),
+ SR_TRAP(OP_DVP_RCTX, CGT_HCR_NV),
+ SR_TRAP(OP_CFP_RCTX, CGT_HCR_NV),
+ SR_TRAP(SYS_SP_EL1, CGT_HCR_NV_nNV2),
+ SR_TRAP(SYS_VBAR_EL1, CGT_HCR_NV1_nNV2),
+ SR_TRAP(SYS_ELR_EL1, CGT_HCR_NV1_nNV2),
+ SR_TRAP(SYS_SPSR_EL1, CGT_HCR_NV1_nNV2),
+ SR_TRAP(SYS_SCXTNUM_EL1, CGT_HCR_NV1_nNV2_ENSCXT),
+ SR_TRAP(SYS_SCXTNUM_EL0, CGT_HCR_ENSCXT),
+ SR_TRAP(OP_AT_S1E1R, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E1W, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E0R, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E0W, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E1RP, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E1WP, CGT_HCR_AT),
+ SR_TRAP(SYS_ERXPFGF_EL1, CGT_HCR_nFIEN),
+ SR_TRAP(SYS_ERXPFGCTL_EL1, CGT_HCR_nFIEN),
+ SR_TRAP(SYS_ERXPFGCDN_EL1, CGT_HCR_nFIEN),
+ SR_TRAP(SYS_PMCR_EL0, CGT_MDCR_TPM_TPMCR),
+ SR_TRAP(SYS_PMCNTENSET_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCNTENCLR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMOVSSET_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMOVSCLR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCEID0_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCEID1_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMSWINC_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMSELR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCCNTR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMUSERENR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMINTENSET_EL1, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMINTENCLR_EL1, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMMIR_EL1, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCCFILTR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_MDCCSR_EL0, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_MDCCINT_EL1, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_OSDTRRX_EL1, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_OSDTRTX_EL1, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_DBGDTR_EL0, CGT_MDCR_TDCC_TDE_TDA),
+ /*
+ * Also covers DBGDTRRX_EL0, which has the same encoding as
+ * SYS_DBGDTRTX_EL0...
+ */
+ SR_TRAP(SYS_DBGDTRTX_EL0, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_MDSCR_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_OSECCR_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(0), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(1), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(2), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(3), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(4), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(5), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(6), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(7), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(8), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(9), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(10), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(11), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(12), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(13), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(14), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(15), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(0), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(1), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(2), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(3), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(4), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(5), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(6), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(7), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(8), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(9), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(10), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(11), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(12), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(13), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(14), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(15), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(0), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(1), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(2), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(3), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(4), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(5), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(6), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(7), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(8), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(9), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(10), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(11), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(12), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(13), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(14), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(15), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(0), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(1), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(2), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(3), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(4), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(5), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(6), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(7), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(8), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(9), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(10), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(11), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(12), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(13), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(14), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGCLAIMSET_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGCLAIMCLR_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGAUTHSTATUS_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_OSLAR_EL1, CGT_MDCR_TDE_TDOSA),
+ SR_TRAP(SYS_OSLSR_EL1, CGT_MDCR_TDE_TDOSA),
+ SR_TRAP(SYS_OSDLR_EL1, CGT_MDCR_TDE_TDOSA),
+ SR_TRAP(SYS_DBGPRCR_EL1, CGT_MDCR_TDE_TDOSA),
+ SR_TRAP(SYS_MDRAR_EL1, CGT_MDCR_TDE_TDRA),
+ SR_TRAP(SYS_PMBLIMITR_EL1, CGT_MDCR_E2PB),
+ SR_TRAP(SYS_PMBPTR_EL1, CGT_MDCR_E2PB),
+ SR_TRAP(SYS_PMBSR_EL1, CGT_MDCR_E2PB),
+ SR_TRAP(SYS_PMSCR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSEVFR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSFCR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSICR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSIDR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSIRR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSLATFR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSNEVFR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_TRFCR_EL1, CGT_MDCR_TTRF),
+ SR_TRAP(SYS_TRBBASER_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBLIMITR_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBMAR_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBPTR_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBSR_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBTRG_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_CNTP_TVAL_EL0, CGT_CNTHCTL_EL1PTEN),
+ SR_TRAP(SYS_CNTP_CVAL_EL0, CGT_CNTHCTL_EL1PTEN),
+ SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN),
+ SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN),
+ SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN),
+};
+
+static DEFINE_XARRAY(sr_forward_xa);
+
+enum fgt_group_id {
+ __NO_FGT_GROUP__,
+ HFGxTR_GROUP,
+ HDFGRTR_GROUP,
+ HDFGWTR_GROUP,
+ HFGITR_GROUP,
+
+ /* Must be last */
+ __NR_FGT_GROUP_IDS__
+};
+
+enum fg_filter_id {
+ __NO_FGF__,
+ HCRX_FGTnXS,
+
+ /* Must be last */
+ __NR_FG_FILTER_IDS__
+};
+
+#define SR_FGF(sr, g, b, p, f) \
+ { \
+ .encoding = sr, \
+ .end = sr, \
+ .tc = { \
+ .fgt = g ## _GROUP, \
+ .bit = g ## _EL2_ ## b ## _SHIFT, \
+ .pol = p, \
+ .fgf = f, \
+ }, \
+ .line = __LINE__, \
+ }
+
+#define SR_FGT(sr, g, b, p) SR_FGF(sr, g, b, p, __NO_FGF__)
+
+static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
+ /* HFGRTR_EL2, HFGWTR_EL2 */
+ SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0),
+ SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0),
+ SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0),
+ SR_FGT(SYS_ERXADDR_EL1, HFGxTR, ERXADDR_EL1, 1),
+ SR_FGT(SYS_ERXPFGCDN_EL1, HFGxTR, ERXPFGCDN_EL1, 1),
+ SR_FGT(SYS_ERXPFGCTL_EL1, HFGxTR, ERXPFGCTL_EL1, 1),
+ SR_FGT(SYS_ERXPFGF_EL1, HFGxTR, ERXPFGF_EL1, 1),
+ SR_FGT(SYS_ERXMISC0_EL1, HFGxTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC1_EL1, HFGxTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC2_EL1, HFGxTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC3_EL1, HFGxTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXSTATUS_EL1, HFGxTR, ERXSTATUS_EL1, 1),
+ SR_FGT(SYS_ERXCTLR_EL1, HFGxTR, ERXCTLR_EL1, 1),
+ SR_FGT(SYS_ERXFR_EL1, HFGxTR, ERXFR_EL1, 1),
+ SR_FGT(SYS_ERRSELR_EL1, HFGxTR, ERRSELR_EL1, 1),
+ SR_FGT(SYS_ERRIDR_EL1, HFGxTR, ERRIDR_EL1, 1),
+ SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGxTR, ICC_IGRPENn_EL1, 1),
+ SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGxTR, ICC_IGRPENn_EL1, 1),
+ SR_FGT(SYS_VBAR_EL1, HFGxTR, VBAR_EL1, 1),
+ SR_FGT(SYS_TTBR1_EL1, HFGxTR, TTBR1_EL1, 1),
+ SR_FGT(SYS_TTBR0_EL1, HFGxTR, TTBR0_EL1, 1),
+ SR_FGT(SYS_TPIDR_EL0, HFGxTR, TPIDR_EL0, 1),
+ SR_FGT(SYS_TPIDRRO_EL0, HFGxTR, TPIDRRO_EL0, 1),
+ SR_FGT(SYS_TPIDR_EL1, HFGxTR, TPIDR_EL1, 1),
+ SR_FGT(SYS_TCR_EL1, HFGxTR, TCR_EL1, 1),
+ SR_FGT(SYS_SCXTNUM_EL0, HFGxTR, SCXTNUM_EL0, 1),
+ SR_FGT(SYS_SCXTNUM_EL1, HFGxTR, SCXTNUM_EL1, 1),
+ SR_FGT(SYS_SCTLR_EL1, HFGxTR, SCTLR_EL1, 1),
+ SR_FGT(SYS_REVIDR_EL1, HFGxTR, REVIDR_EL1, 1),
+ SR_FGT(SYS_PAR_EL1, HFGxTR, PAR_EL1, 1),
+ SR_FGT(SYS_MPIDR_EL1, HFGxTR, MPIDR_EL1, 1),
+ SR_FGT(SYS_MIDR_EL1, HFGxTR, MIDR_EL1, 1),
+ SR_FGT(SYS_MAIR_EL1, HFGxTR, MAIR_EL1, 1),
+ SR_FGT(SYS_LORSA_EL1, HFGxTR, LORSA_EL1, 1),
+ SR_FGT(SYS_LORN_EL1, HFGxTR, LORN_EL1, 1),
+ SR_FGT(SYS_LORID_EL1, HFGxTR, LORID_EL1, 1),
+ SR_FGT(SYS_LOREA_EL1, HFGxTR, LOREA_EL1, 1),
+ SR_FGT(SYS_LORC_EL1, HFGxTR, LORC_EL1, 1),
+ SR_FGT(SYS_ISR_EL1, HFGxTR, ISR_EL1, 1),
+ SR_FGT(SYS_FAR_EL1, HFGxTR, FAR_EL1, 1),
+ SR_FGT(SYS_ESR_EL1, HFGxTR, ESR_EL1, 1),
+ SR_FGT(SYS_DCZID_EL0, HFGxTR, DCZID_EL0, 1),
+ SR_FGT(SYS_CTR_EL0, HFGxTR, CTR_EL0, 1),
+ SR_FGT(SYS_CSSELR_EL1, HFGxTR, CSSELR_EL1, 1),
+ SR_FGT(SYS_CPACR_EL1, HFGxTR, CPACR_EL1, 1),
+ SR_FGT(SYS_CONTEXTIDR_EL1, HFGxTR, CONTEXTIDR_EL1, 1),
+ SR_FGT(SYS_CLIDR_EL1, HFGxTR, CLIDR_EL1, 1),
+ SR_FGT(SYS_CCSIDR_EL1, HFGxTR, CCSIDR_EL1, 1),
+ SR_FGT(SYS_APIBKEYLO_EL1, HFGxTR, APIBKey, 1),
+ SR_FGT(SYS_APIBKEYHI_EL1, HFGxTR, APIBKey, 1),
+ SR_FGT(SYS_APIAKEYLO_EL1, HFGxTR, APIAKey, 1),
+ SR_FGT(SYS_APIAKEYHI_EL1, HFGxTR, APIAKey, 1),
+ SR_FGT(SYS_APGAKEYLO_EL1, HFGxTR, APGAKey, 1),
+ SR_FGT(SYS_APGAKEYHI_EL1, HFGxTR, APGAKey, 1),
+ SR_FGT(SYS_APDBKEYLO_EL1, HFGxTR, APDBKey, 1),
+ SR_FGT(SYS_APDBKEYHI_EL1, HFGxTR, APDBKey, 1),
+ SR_FGT(SYS_APDAKEYLO_EL1, HFGxTR, APDAKey, 1),
+ SR_FGT(SYS_APDAKEYHI_EL1, HFGxTR, APDAKey, 1),
+ SR_FGT(SYS_AMAIR_EL1, HFGxTR, AMAIR_EL1, 1),
+ SR_FGT(SYS_AIDR_EL1, HFGxTR, AIDR_EL1, 1),
+ SR_FGT(SYS_AFSR1_EL1, HFGxTR, AFSR1_EL1, 1),
+ SR_FGT(SYS_AFSR0_EL1, HFGxTR, AFSR0_EL1, 1),
+ /* HFGITR_EL2 */
+ SR_FGT(OP_BRB_IALL, HFGITR, nBRBIALL, 0),
+ SR_FGT(OP_BRB_INJ, HFGITR, nBRBINJ, 0),
+ SR_FGT(SYS_DC_CVAC, HFGITR, DCCVAC, 1),
+ SR_FGT(SYS_DC_CGVAC, HFGITR, DCCVAC, 1),
+ SR_FGT(SYS_DC_CGDVAC, HFGITR, DCCVAC, 1),
+ SR_FGT(OP_CPP_RCTX, HFGITR, CPPRCTX, 1),
+ SR_FGT(OP_DVP_RCTX, HFGITR, DVPRCTX, 1),
+ SR_FGT(OP_CFP_RCTX, HFGITR, CFPRCTX, 1),
+ SR_FGT(OP_TLBI_VAALE1, HFGITR, TLBIVAALE1, 1),
+ SR_FGT(OP_TLBI_VALE1, HFGITR, TLBIVALE1, 1),
+ SR_FGT(OP_TLBI_VAAE1, HFGITR, TLBIVAAE1, 1),
+ SR_FGT(OP_TLBI_ASIDE1, HFGITR, TLBIASIDE1, 1),
+ SR_FGT(OP_TLBI_VAE1, HFGITR, TLBIVAE1, 1),
+ SR_FGT(OP_TLBI_VMALLE1, HFGITR, TLBIVMALLE1, 1),
+ SR_FGT(OP_TLBI_RVAALE1, HFGITR, TLBIRVAALE1, 1),
+ SR_FGT(OP_TLBI_RVALE1, HFGITR, TLBIRVALE1, 1),
+ SR_FGT(OP_TLBI_RVAAE1, HFGITR, TLBIRVAAE1, 1),
+ SR_FGT(OP_TLBI_RVAE1, HFGITR, TLBIRVAE1, 1),
+ SR_FGT(OP_TLBI_RVAALE1IS, HFGITR, TLBIRVAALE1IS, 1),
+ SR_FGT(OP_TLBI_RVALE1IS, HFGITR, TLBIRVALE1IS, 1),
+ SR_FGT(OP_TLBI_RVAAE1IS, HFGITR, TLBIRVAAE1IS, 1),
+ SR_FGT(OP_TLBI_RVAE1IS, HFGITR, TLBIRVAE1IS, 1),
+ SR_FGT(OP_TLBI_VAALE1IS, HFGITR, TLBIVAALE1IS, 1),
+ SR_FGT(OP_TLBI_VALE1IS, HFGITR, TLBIVALE1IS, 1),
+ SR_FGT(OP_TLBI_VAAE1IS, HFGITR, TLBIVAAE1IS, 1),
+ SR_FGT(OP_TLBI_ASIDE1IS, HFGITR, TLBIASIDE1IS, 1),
+ SR_FGT(OP_TLBI_VAE1IS, HFGITR, TLBIVAE1IS, 1),
+ SR_FGT(OP_TLBI_VMALLE1IS, HFGITR, TLBIVMALLE1IS, 1),
+ SR_FGT(OP_TLBI_RVAALE1OS, HFGITR, TLBIRVAALE1OS, 1),
+ SR_FGT(OP_TLBI_RVALE1OS, HFGITR, TLBIRVALE1OS, 1),
+ SR_FGT(OP_TLBI_RVAAE1OS, HFGITR, TLBIRVAAE1OS, 1),
+ SR_FGT(OP_TLBI_RVAE1OS, HFGITR, TLBIRVAE1OS, 1),
+ SR_FGT(OP_TLBI_VAALE1OS, HFGITR, TLBIVAALE1OS, 1),
+ SR_FGT(OP_TLBI_VALE1OS, HFGITR, TLBIVALE1OS, 1),
+ SR_FGT(OP_TLBI_VAAE1OS, HFGITR, TLBIVAAE1OS, 1),
+ SR_FGT(OP_TLBI_ASIDE1OS, HFGITR, TLBIASIDE1OS, 1),
+ SR_FGT(OP_TLBI_VAE1OS, HFGITR, TLBIVAE1OS, 1),
+ SR_FGT(OP_TLBI_VMALLE1OS, HFGITR, TLBIVMALLE1OS, 1),
+ /* nXS variants must be checked against HCRX_EL2.FGTnXS */
+ SR_FGF(OP_TLBI_VAALE1NXS, HFGITR, TLBIVAALE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VALE1NXS, HFGITR, TLBIVALE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAAE1NXS, HFGITR, TLBIVAAE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_ASIDE1NXS, HFGITR, TLBIASIDE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAE1NXS, HFGITR, TLBIVAE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VMALLE1NXS, HFGITR, TLBIVMALLE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAALE1NXS, HFGITR, TLBIRVAALE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVALE1NXS, HFGITR, TLBIRVALE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAAE1NXS, HFGITR, TLBIRVAAE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAE1NXS, HFGITR, TLBIRVAE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAALE1ISNXS, HFGITR, TLBIRVAALE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVALE1ISNXS, HFGITR, TLBIRVALE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAAE1ISNXS, HFGITR, TLBIRVAAE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAE1ISNXS, HFGITR, TLBIRVAE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAALE1ISNXS, HFGITR, TLBIVAALE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VALE1ISNXS, HFGITR, TLBIVALE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAAE1ISNXS, HFGITR, TLBIVAAE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_ASIDE1ISNXS, HFGITR, TLBIASIDE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAE1ISNXS, HFGITR, TLBIVAE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VMALLE1ISNXS, HFGITR, TLBIVMALLE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAALE1OSNXS, HFGITR, TLBIRVAALE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVALE1OSNXS, HFGITR, TLBIRVALE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAAE1OSNXS, HFGITR, TLBIRVAAE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAE1OSNXS, HFGITR, TLBIRVAE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAALE1OSNXS, HFGITR, TLBIVAALE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VALE1OSNXS, HFGITR, TLBIVALE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAAE1OSNXS, HFGITR, TLBIVAAE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_ASIDE1OSNXS, HFGITR, TLBIASIDE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAE1OSNXS, HFGITR, TLBIVAE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VMALLE1OSNXS, HFGITR, TLBIVMALLE1OS, 1, HCRX_FGTnXS),
+ SR_FGT(OP_AT_S1E1WP, HFGITR, ATS1E1WP, 1),
+ SR_FGT(OP_AT_S1E1RP, HFGITR, ATS1E1RP, 1),
+ SR_FGT(OP_AT_S1E0W, HFGITR, ATS1E0W, 1),
+ SR_FGT(OP_AT_S1E0R, HFGITR, ATS1E0R, 1),
+ SR_FGT(OP_AT_S1E1W, HFGITR, ATS1E1W, 1),
+ SR_FGT(OP_AT_S1E1R, HFGITR, ATS1E1R, 1),
+ SR_FGT(SYS_DC_ZVA, HFGITR, DCZVA, 1),
+ SR_FGT(SYS_DC_GVA, HFGITR, DCZVA, 1),
+ SR_FGT(SYS_DC_GZVA, HFGITR, DCZVA, 1),
+ SR_FGT(SYS_DC_CIVAC, HFGITR, DCCIVAC, 1),
+ SR_FGT(SYS_DC_CIGVAC, HFGITR, DCCIVAC, 1),
+ SR_FGT(SYS_DC_CIGDVAC, HFGITR, DCCIVAC, 1),
+ SR_FGT(SYS_DC_CVADP, HFGITR, DCCVADP, 1),
+ SR_FGT(SYS_DC_CGVADP, HFGITR, DCCVADP, 1),
+ SR_FGT(SYS_DC_CGDVADP, HFGITR, DCCVADP, 1),
+ SR_FGT(SYS_DC_CVAP, HFGITR, DCCVAP, 1),
+ SR_FGT(SYS_DC_CGVAP, HFGITR, DCCVAP, 1),
+ SR_FGT(SYS_DC_CGDVAP, HFGITR, DCCVAP, 1),
+ SR_FGT(SYS_DC_CVAU, HFGITR, DCCVAU, 1),
+ SR_FGT(SYS_DC_CISW, HFGITR, DCCISW, 1),
+ SR_FGT(SYS_DC_CIGSW, HFGITR, DCCISW, 1),
+ SR_FGT(SYS_DC_CIGDSW, HFGITR, DCCISW, 1),
+ SR_FGT(SYS_DC_CSW, HFGITR, DCCSW, 1),
+ SR_FGT(SYS_DC_CGSW, HFGITR, DCCSW, 1),
+ SR_FGT(SYS_DC_CGDSW, HFGITR, DCCSW, 1),
+ SR_FGT(SYS_DC_ISW, HFGITR, DCISW, 1),
+ SR_FGT(SYS_DC_IGSW, HFGITR, DCISW, 1),
+ SR_FGT(SYS_DC_IGDSW, HFGITR, DCISW, 1),
+ SR_FGT(SYS_DC_IVAC, HFGITR, DCIVAC, 1),
+ SR_FGT(SYS_DC_IGVAC, HFGITR, DCIVAC, 1),
+ SR_FGT(SYS_DC_IGDVAC, HFGITR, DCIVAC, 1),
+ SR_FGT(SYS_IC_IVAU, HFGITR, ICIVAU, 1),
+ SR_FGT(SYS_IC_IALLU, HFGITR, ICIALLU, 1),
+ SR_FGT(SYS_IC_IALLUIS, HFGITR, ICIALLUIS, 1),
+ /* HDFGRTR_EL2 */
+ SR_FGT(SYS_PMBIDR_EL1, HDFGRTR, PMBIDR_EL1, 1),
+ SR_FGT(SYS_PMSNEVFR_EL1, HDFGRTR, nPMSNEVFR_EL1, 0),
+ SR_FGT(SYS_BRBINF_EL1(0), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(1), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(2), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(3), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(4), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(5), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(6), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(7), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(8), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(9), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(10), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(11), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(12), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(13), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(14), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(15), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(16), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(17), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(18), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(19), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(20), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(21), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(22), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(23), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(24), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(25), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(26), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(27), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(28), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(29), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(30), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(31), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINFINJ_EL1, HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(0), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(1), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(2), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(3), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(4), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(5), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(6), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(7), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(8), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(9), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(10), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(11), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(12), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(13), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(14), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(15), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(16), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(17), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(18), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(19), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(20), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(21), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(22), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(23), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(24), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(25), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(26), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(27), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(28), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(29), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(30), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(31), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRCINJ_EL1, HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(0), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(1), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(2), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(3), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(4), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(5), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(6), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(7), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(8), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(9), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(10), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(11), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(12), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(13), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(14), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(15), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(16), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(17), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(18), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(19), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(20), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(21), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(22), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(23), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(24), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(25), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(26), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(27), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(28), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(29), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(30), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(31), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGTINJ_EL1, HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTS_EL1, HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBCR_EL1, HDFGRTR, nBRBCTL, 0),
+ SR_FGT(SYS_BRBFCR_EL1, HDFGRTR, nBRBCTL, 0),
+ SR_FGT(SYS_BRBIDR0_EL1, HDFGRTR, nBRBIDR, 0),
+ SR_FGT(SYS_PMCEID0_EL0, HDFGRTR, PMCEIDn_EL0, 1),
+ SR_FGT(SYS_PMCEID1_EL0, HDFGRTR, PMCEIDn_EL0, 1),
+ SR_FGT(SYS_PMUSERENR_EL0, HDFGRTR, PMUSERENR_EL0, 1),
+ SR_FGT(SYS_TRBTRG_EL1, HDFGRTR, TRBTRG_EL1, 1),
+ SR_FGT(SYS_TRBSR_EL1, HDFGRTR, TRBSR_EL1, 1),
+ SR_FGT(SYS_TRBPTR_EL1, HDFGRTR, TRBPTR_EL1, 1),
+ SR_FGT(SYS_TRBMAR_EL1, HDFGRTR, TRBMAR_EL1, 1),
+ SR_FGT(SYS_TRBLIMITR_EL1, HDFGRTR, TRBLIMITR_EL1, 1),
+ SR_FGT(SYS_TRBIDR_EL1, HDFGRTR, TRBIDR_EL1, 1),
+ SR_FGT(SYS_TRBBASER_EL1, HDFGRTR, TRBBASER_EL1, 1),
+ SR_FGT(SYS_TRCVICTLR, HDFGRTR, TRCVICTLR, 1),
+ SR_FGT(SYS_TRCSTATR, HDFGRTR, TRCSTATR, 1),
+ SR_FGT(SYS_TRCSSCSR(0), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(1), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(2), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(3), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(4), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(5), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(6), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(7), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSEQSTR, HDFGRTR, TRCSEQSTR, 1),
+ SR_FGT(SYS_TRCPRGCTLR, HDFGRTR, TRCPRGCTLR, 1),
+ SR_FGT(SYS_TRCOSLSR, HDFGRTR, TRCOSLSR, 1),
+ SR_FGT(SYS_TRCIMSPEC(0), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(1), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(2), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(3), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(4), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(5), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(6), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(7), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCDEVARCH, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCDEVID, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR0, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR1, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR2, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR3, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR4, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR5, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR6, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR7, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR8, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR9, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR10, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR11, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR12, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR13, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCCNTVR(0), HDFGRTR, TRCCNTVRn, 1),
+ SR_FGT(SYS_TRCCNTVR(1), HDFGRTR, TRCCNTVRn, 1),
+ SR_FGT(SYS_TRCCNTVR(2), HDFGRTR, TRCCNTVRn, 1),
+ SR_FGT(SYS_TRCCNTVR(3), HDFGRTR, TRCCNTVRn, 1),
+ SR_FGT(SYS_TRCCLAIMCLR, HDFGRTR, TRCCLAIM, 1),
+ SR_FGT(SYS_TRCCLAIMSET, HDFGRTR, TRCCLAIM, 1),
+ SR_FGT(SYS_TRCAUXCTLR, HDFGRTR, TRCAUXCTLR, 1),
+ SR_FGT(SYS_TRCAUTHSTATUS, HDFGRTR, TRCAUTHSTATUS, 1),
+ SR_FGT(SYS_TRCACATR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(8), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(9), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(10), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(11), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(12), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(13), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(14), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(15), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(8), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(9), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(10), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(11), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(12), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(13), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(14), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(15), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCBBCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCCCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCCTLR0, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCCTLR1, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTCTLR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTCTLR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTCTLR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTCTLR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTRLDVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTRLDVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTRLDVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTRLDVR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCONFIGR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEVENTCTL0R, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEVENTCTL1R, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEXTINSELR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEXTINSELR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEXTINSELR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEXTINSELR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCQCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(8), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(9), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(10), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(11), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(12), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(13), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(14), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(15), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(16), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(17), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(18), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(19), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(20), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(21), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(22), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(23), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(24), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(25), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(26), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(27), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(28), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(29), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(30), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(31), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSEQEVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSEQEVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSEQEVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSEQRSTEVR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSTALLCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSYNCPR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCTRACEIDR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCTSCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVIIECTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVIPCSSCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVISSCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCCTLR0, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCCTLR1, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_PMSLATFR_EL1, HDFGRTR, PMSLATFR_EL1, 1),
+ SR_FGT(SYS_PMSIRR_EL1, HDFGRTR, PMSIRR_EL1, 1),
+ SR_FGT(SYS_PMSIDR_EL1, HDFGRTR, PMSIDR_EL1, 1),
+ SR_FGT(SYS_PMSICR_EL1, HDFGRTR, PMSICR_EL1, 1),
+ SR_FGT(SYS_PMSFCR_EL1, HDFGRTR, PMSFCR_EL1, 1),
+ SR_FGT(SYS_PMSEVFR_EL1, HDFGRTR, PMSEVFR_EL1, 1),
+ SR_FGT(SYS_PMSCR_EL1, HDFGRTR, PMSCR_EL1, 1),
+ SR_FGT(SYS_PMBSR_EL1, HDFGRTR, PMBSR_EL1, 1),
+ SR_FGT(SYS_PMBPTR_EL1, HDFGRTR, PMBPTR_EL1, 1),
+ SR_FGT(SYS_PMBLIMITR_EL1, HDFGRTR, PMBLIMITR_EL1, 1),
+ SR_FGT(SYS_PMMIR_EL1, HDFGRTR, PMMIR_EL1, 1),
+ SR_FGT(SYS_PMSELR_EL0, HDFGRTR, PMSELR_EL0, 1),
+ SR_FGT(SYS_PMOVSCLR_EL0, HDFGRTR, PMOVS, 1),
+ SR_FGT(SYS_PMOVSSET_EL0, HDFGRTR, PMOVS, 1),
+ SR_FGT(SYS_PMINTENCLR_EL1, HDFGRTR, PMINTEN, 1),
+ SR_FGT(SYS_PMINTENSET_EL1, HDFGRTR, PMINTEN, 1),
+ SR_FGT(SYS_PMCNTENCLR_EL0, HDFGRTR, PMCNTEN, 1),
+ SR_FGT(SYS_PMCNTENSET_EL0, HDFGRTR, PMCNTEN, 1),
+ SR_FGT(SYS_PMCCNTR_EL0, HDFGRTR, PMCCNTR_EL0, 1),
+ SR_FGT(SYS_PMCCFILTR_EL0, HDFGRTR, PMCCFILTR_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(0), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(1), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(2), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(3), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(4), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(5), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(6), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(7), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(8), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(9), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(10), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(11), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(12), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(13), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(14), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(15), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(16), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(17), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(18), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(19), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(20), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(21), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(22), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(23), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(24), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(25), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(26), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(27), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(28), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(29), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(30), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(0), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(1), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(2), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(3), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(4), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(5), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(6), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(7), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(8), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(9), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(10), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(11), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(12), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(13), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(14), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(15), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(16), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(17), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(18), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(19), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(20), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(21), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(22), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(23), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(24), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(25), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(26), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(27), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(28), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(29), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(30), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_OSDLR_EL1, HDFGRTR, OSDLR_EL1, 1),
+ SR_FGT(SYS_OSECCR_EL1, HDFGRTR, OSECCR_EL1, 1),
+ SR_FGT(SYS_OSLSR_EL1, HDFGRTR, OSLSR_EL1, 1),
+ SR_FGT(SYS_DBGPRCR_EL1, HDFGRTR, DBGPRCR_EL1, 1),
+ SR_FGT(SYS_DBGAUTHSTATUS_EL1, HDFGRTR, DBGAUTHSTATUS_EL1, 1),
+ SR_FGT(SYS_DBGCLAIMSET_EL1, HDFGRTR, DBGCLAIM, 1),
+ SR_FGT(SYS_DBGCLAIMCLR_EL1, HDFGRTR, DBGCLAIM, 1),
+ SR_FGT(SYS_MDSCR_EL1, HDFGRTR, MDSCR_EL1, 1),
+ /*
+ * The trap bits capture *64* debug registers per bit, but the
+ * ARM ARM only describes the encoding for the first 16, and
+ * we don't really support more than that anyway.
+ */
+ SR_FGT(SYS_DBGWVRn_EL1(0), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(1), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(2), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(3), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(4), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(5), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(6), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(7), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(8), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(9), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(10), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(11), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(12), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(13), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(14), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(15), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(0), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(1), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(2), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(3), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(4), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(5), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(6), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(7), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(8), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(9), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(10), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(11), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(12), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(13), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(14), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(15), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(0), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(1), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(2), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(3), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(4), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(5), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(6), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(7), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(8), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(9), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(10), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(11), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(12), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(13), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(14), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(15), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(0), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(1), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(2), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(3), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(4), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(5), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(6), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(7), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(8), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(9), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(10), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(11), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(12), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(13), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(14), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(15), HDFGRTR, DBGBCRn_EL1, 1),
+ /*
+ * HDFGWTR_EL2
+ *
+ * Although HDFGRTR_EL2 and HDFGWTR_EL2 registers largely
+ * overlap in their bit assignment, there are a number of bits
+ * that are RES0 on one side, and an actual trap bit on the
+ * other. The policy chosen here is to describe all the
+ * read-side mappings, and only the write-side mappings that
+ * differ from the read side, and the trap handler will pick
+ * the correct shadow register based on the access type.
+ */
+ SR_FGT(SYS_TRFCR_EL1, HDFGWTR, TRFCR_EL1, 1),
+ SR_FGT(SYS_TRCOSLAR, HDFGWTR, TRCOSLAR, 1),
+ SR_FGT(SYS_PMCR_EL0, HDFGWTR, PMCR_EL0, 1),
+ SR_FGT(SYS_PMSWINC_EL0, HDFGWTR, PMSWINC_EL0, 1),
+ SR_FGT(SYS_OSLAR_EL1, HDFGWTR, OSLAR_EL1, 1),
+};
+
+static union trap_config get_trap_config(u32 sysreg)
+{
+ return (union trap_config) {
+ .val = xa_to_value(xa_load(&sr_forward_xa, sysreg)),
+ };
+}
+
+static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc,
+ const char *type, int err)
+{
+ kvm_err("%s line %d encoding range "
+ "(%d, %d, %d, %d, %d) - (%d, %d, %d, %d, %d) (err=%d)\n",
+ type, tc->line,
+ sys_reg_Op0(tc->encoding), sys_reg_Op1(tc->encoding),
+ sys_reg_CRn(tc->encoding), sys_reg_CRm(tc->encoding),
+ sys_reg_Op2(tc->encoding),
+ sys_reg_Op0(tc->end), sys_reg_Op1(tc->end),
+ sys_reg_CRn(tc->end), sys_reg_CRm(tc->end),
+ sys_reg_Op2(tc->end),
+ err);
+}
+
+int __init populate_nv_trap_config(void)
+{
+ int ret = 0;
+
+ BUILD_BUG_ON(sizeof(union trap_config) != sizeof(void *));
+ BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS));
+ BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS));
+ BUILD_BUG_ON(__NR_FG_FILTER_IDS__ > BIT(TC_FGF_BITS));
+
+ for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) {
+ const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i];
+ void *prev;
+
+ if (cgt->tc.val & BIT(63)) {
+ kvm_err("CGT[%d] has MBZ bit set\n", i);
+ ret = -EINVAL;
+ }
+
+ if (cgt->encoding != cgt->end) {
+ prev = xa_store_range(&sr_forward_xa,
+ cgt->encoding, cgt->end,
+ xa_mk_value(cgt->tc.val),
+ GFP_KERNEL);
+ } else {
+ prev = xa_store(&sr_forward_xa, cgt->encoding,
+ xa_mk_value(cgt->tc.val), GFP_KERNEL);
+ if (prev && !xa_is_err(prev)) {
+ ret = -EINVAL;
+ print_nv_trap_error(cgt, "Duplicate CGT", ret);
+ }
+ }
+
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(cgt, "Failed CGT insertion", ret);
+ }
+ }
+
+ kvm_info("nv: %ld coarse grained trap handlers\n",
+ ARRAY_SIZE(encoding_to_cgt));
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT))
+ goto check_mcb;
+
+ for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) {
+ const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i];
+ union trap_config tc;
+
+ if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) {
+ ret = -EINVAL;
+ print_nv_trap_error(fgt, "Invalid FGT", ret);
+ }
+
+ tc = get_trap_config(fgt->encoding);
+
+ if (tc.fgt) {
+ ret = -EINVAL;
+ print_nv_trap_error(fgt, "Duplicate FGT", ret);
+ }
+
+ tc.val |= fgt->tc.val;
+ xa_store(&sr_forward_xa, fgt->encoding,
+ xa_mk_value(tc.val), GFP_KERNEL);
+ }
+
+ kvm_info("nv: %ld fine grained trap handlers\n",
+ ARRAY_SIZE(encoding_to_fgt));
+
+check_mcb:
+ for (int id = __MULTIPLE_CONTROL_BITS__; id < __COMPLEX_CONDITIONS__; id++) {
+ const enum cgt_group_id *cgids;
+
+ cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__];
+
+ for (int i = 0; cgids[i] != __RESERVED__; i++) {
+ if (cgids[i] >= __MULTIPLE_CONTROL_BITS__) {
+ kvm_err("Recursive MCB %d/%d\n", id, cgids[i]);
+ ret = -EINVAL;
+ }
+ }
+ }
+
+ if (ret)
+ xa_destroy(&sr_forward_xa);
+
+ return ret;
+}
+
+static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu,
+ const struct trap_bits *tb)
+{
+ enum trap_behaviour b = BEHAVE_HANDLE_LOCALLY;
+ u64 val;
+
+ val = __vcpu_sys_reg(vcpu, tb->index);
+ if ((val & tb->mask) == tb->value)
+ b |= tb->behaviour;
+
+ return b;
+}
+
+static enum trap_behaviour __compute_trap_behaviour(struct kvm_vcpu *vcpu,
+ const enum cgt_group_id id,
+ enum trap_behaviour b)
+{
+ switch (id) {
+ const enum cgt_group_id *cgids;
+
+ case __RESERVED__ ... __MULTIPLE_CONTROL_BITS__ - 1:
+ if (likely(id != __RESERVED__))
+ b |= get_behaviour(vcpu, &coarse_trap_bits[id]);
+ break;
+ case __MULTIPLE_CONTROL_BITS__ ... __COMPLEX_CONDITIONS__ - 1:
+ /* Yes, this is recursive. Don't do anything stupid. */
+ cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__];
+ for (int i = 0; cgids[i] != __RESERVED__; i++)
+ b |= __compute_trap_behaviour(vcpu, cgids[i], b);
+ break;
+ default:
+ if (ARRAY_SIZE(ccc))
+ b |= ccc[id - __COMPLEX_CONDITIONS__](vcpu);
+ break;
+ }
+
+ return b;
+}
+
+static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu,
+ const union trap_config tc)
+{
+ enum trap_behaviour b = BEHAVE_HANDLE_LOCALLY;
+
+ return __compute_trap_behaviour(vcpu, tc.cgt, b);
+}
+
+static bool check_fgt_bit(u64 val, const union trap_config tc)
+{
+ return ((val >> tc.bit) & 1) == tc.pol;
+}
+
+#define sanitised_sys_reg(vcpu, reg) \
+ ({ \
+ u64 __val; \
+ __val = __vcpu_sys_reg(vcpu, reg); \
+ __val &= ~__ ## reg ## _RES0; \
+ (__val); \
+ })
+
+bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
+{
+ union trap_config tc;
+ enum trap_behaviour b;
+ bool is_read;
+ u32 sysreg;
+ u64 esr, val;
+
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ return false;
+
+ esr = kvm_vcpu_get_esr(vcpu);
+ sysreg = esr_sys64_to_sysreg(esr);
+ is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
+
+ tc = get_trap_config(sysreg);
+
+ /*
+ * A value of 0 for the whole entry means that we know nothing
+ * for this sysreg, and that it cannot be re-injected into the
+ * nested hypervisor. In this situation, let's cut it short.
+ *
+ * Note that ultimately, we could also make use of the xarray
+ * to store the index of the sysreg in the local descriptor
+ * array, avoiding another search... Hint, hint...
+ */
+ if (!tc.val)
+ return false;
+
+ switch ((enum fgt_group_id)tc.fgt) {
+ case __NO_FGT_GROUP__:
+ break;
+
+ case HFGxTR_GROUP:
+ if (is_read)
+ val = sanitised_sys_reg(vcpu, HFGRTR_EL2);
+ else
+ val = sanitised_sys_reg(vcpu, HFGWTR_EL2);
+ break;
+
+ case HDFGRTR_GROUP:
+ case HDFGWTR_GROUP:
+ if (is_read)
+ val = sanitised_sys_reg(vcpu, HDFGRTR_EL2);
+ else
+ val = sanitised_sys_reg(vcpu, HDFGWTR_EL2);
+ break;
+
+ case HFGITR_GROUP:
+ val = sanitised_sys_reg(vcpu, HFGITR_EL2);
+ switch (tc.fgf) {
+ u64 tmp;
+
+ case __NO_FGF__:
+ break;
+
+ case HCRX_FGTnXS:
+ tmp = sanitised_sys_reg(vcpu, HCRX_EL2);
+ if (tmp & HCRX_EL2_FGTnXS)
+ tc.fgt = __NO_FGT_GROUP__;
+ }
+ break;
+
+ case __NR_FGT_GROUP_IDS__:
+ /* Something is really wrong, bail out */
+ WARN_ONCE(1, "__NR_FGT_GROUP_IDS__");
+ return false;
+ }
+
+ if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc))
+ goto inject;
+
+ b = compute_trap_behaviour(vcpu, tc);
+
+ if (((b & BEHAVE_FORWARD_READ) && is_read) ||
+ ((b & BEHAVE_FORWARD_WRITE) && !is_read))
+ goto inject;
+
+ return false;
+
+inject:
+ trace_kvm_forward_sysreg_trap(vcpu, sysreg, is_read);
+
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return true;
+}
+
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
{
u64 mode = spsr & PSR_MODE_MASK;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 20280a5233f6..95f6945c4432 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -884,21 +884,6 @@ u32 __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_GENERIC_V8;
}
-void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
-{
- u32 target = kvm_target_cpu();
-
- memset(init, 0, sizeof(*init));
-
- /*
- * For now, we don't return any features.
- * In future, we might use features to return target
- * specific features available for the preferred
- * target type.
- */
- init->target = (__u32)target;
-}
-
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 6dcd6604b6bc..617ae6dea5d5 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -222,7 +222,33 @@ static int kvm_handle_eret(struct kvm_vcpu *vcpu)
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET)
return kvm_handle_ptrauth(vcpu);
- kvm_emulate_nested_eret(vcpu);
+ /*
+ * If we got here, two possibilities:
+ *
+ * - the guest is in EL2, and we need to fully emulate ERET
+ *
+ * - the guest is in EL1, and we need to reinject the
+ * exception into the L1 hypervisor.
+ *
+ * If KVM ever traps ERET for its own use, we'll have to
+ * revisit this.
+ */
+ if (is_hyp_ctxt(vcpu))
+ kvm_emulate_nested_eret(vcpu);
+ else
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+ return 1;
+}
+
+static int handle_svc(struct kvm_vcpu *vcpu)
+{
+ /*
+ * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a
+ * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so
+ * we should only have to deal with a 64 bit exception.
+ */
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
return 1;
}
@@ -239,6 +265,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_SMC32] = handle_smc,
[ESR_ELx_EC_HVC64] = handle_hvc,
[ESR_ELx_EC_SMC64] = handle_smc,
+ [ESR_ELx_EC_SVC64] = handle_svc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_SVE] = handle_sve,
[ESR_ELx_EC_ERET] = kvm_handle_eret,
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 34f222af6165..9cfe6bd1dbe4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -70,20 +70,26 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
}
}
-static inline bool __hfgxtr_traps_required(void)
-{
- if (cpus_have_final_cap(ARM64_SME))
- return true;
-
- if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
- return true;
+#define compute_clr_set(vcpu, reg, clr, set) \
+ do { \
+ u64 hfg; \
+ hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \
+ set |= hfg & __ ## reg ## _MASK; \
+ clr |= ~hfg & __ ## reg ## _nMASK; \
+ } while(0)
- return false;
-}
-static inline void __activate_traps_hfgxtr(void)
+static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+ u64 r_val, w_val;
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT))
+ return;
+
+ ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2);
+ ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
if (cpus_have_final_cap(ARM64_SME)) {
tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
@@ -98,26 +104,72 @@ static inline void __activate_traps_hfgxtr(void)
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
w_set |= HFGxTR_EL2_TCR_EL1_MASK;
- sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
- sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set);
+ compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
+ }
+
+ /* The default is not to trap anything but ACCDATA_EL1 */
+ r_val = __HFGRTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1;
+ r_val |= r_set;
+ r_val &= ~r_clr;
+
+ w_val = __HFGWTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1;
+ w_val |= w_set;
+ w_val &= ~w_clr;
+
+ write_sysreg_s(r_val, SYS_HFGRTR_EL2);
+ write_sysreg_s(w_val, SYS_HFGWTR_EL2);
+
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ return;
+
+ ctxt_sys_reg(hctxt, HFGITR_EL2) = read_sysreg_s(SYS_HFGITR_EL2);
+
+ r_set = r_clr = 0;
+ compute_clr_set(vcpu, HFGITR_EL2, r_clr, r_set);
+ r_val = __HFGITR_EL2_nMASK;
+ r_val |= r_set;
+ r_val &= ~r_clr;
+
+ write_sysreg_s(r_val, SYS_HFGITR_EL2);
+
+ ctxt_sys_reg(hctxt, HDFGRTR_EL2) = read_sysreg_s(SYS_HDFGRTR_EL2);
+ ctxt_sys_reg(hctxt, HDFGWTR_EL2) = read_sysreg_s(SYS_HDFGWTR_EL2);
+
+ r_clr = r_set = w_clr = w_set = 0;
+
+ compute_clr_set(vcpu, HDFGRTR_EL2, r_clr, r_set);
+ compute_clr_set(vcpu, HDFGWTR_EL2, w_clr, w_set);
+
+ r_val = __HDFGRTR_EL2_nMASK;
+ r_val |= r_set;
+ r_val &= ~r_clr;
+
+ w_val = __HDFGWTR_EL2_nMASK;
+ w_val |= w_set;
+ w_val &= ~w_clr;
+
+ write_sysreg_s(r_val, SYS_HDFGRTR_EL2);
+ write_sysreg_s(w_val, SYS_HDFGWTR_EL2);
}
-static inline void __deactivate_traps_hfgxtr(void)
+static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
- u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+ struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- if (cpus_have_final_cap(ARM64_SME)) {
- tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
+ if (!cpus_have_final_cap(ARM64_HAS_FGT))
+ return;
- r_set |= tmp;
- w_set |= tmp;
- }
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
- if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
- w_clr |= HFGxTR_EL2_TCR_EL1_MASK;
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ return;
- sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
- sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
}
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
@@ -145,8 +197,21 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
- if (__hfgxtr_traps_required())
- __activate_traps_hfgxtr();
+ if (cpus_have_final_cap(ARM64_HAS_HCX)) {
+ u64 hcrx = HCRX_GUEST_FLAGS;
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ u64 clr = 0, set = 0;
+
+ compute_clr_set(vcpu, HCRX_EL2, clr, set);
+
+ hcrx |= set;
+ hcrx &= ~clr;
+ }
+
+ write_sysreg_s(hcrx, SYS_HCRX_EL2);
+ }
+
+ __activate_traps_hfgxtr(vcpu);
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
@@ -162,8 +227,10 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
- if (__hfgxtr_traps_required())
- __deactivate_traps_hfgxtr();
+ if (cpus_have_final_cap(ARM64_HAS_HCX))
+ write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
+
+ __deactivate_traps_hfgxtr(vcpu);
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
@@ -177,9 +244,6 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu)
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
-
- if (cpus_have_final_cap(ARM64_HAS_HCX))
- write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2);
}
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
@@ -194,9 +258,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~HCR_VSE;
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
}
-
- if (cpus_have_final_cap(ARM64_HAS_HCX))
- write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
}
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index d5ec972b5c1e..230e4f2527de 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -26,6 +26,7 @@ int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot
int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
enum kvm_pgtable_prot prot,
unsigned long *haddr);
+int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr);
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
#endif /* __KVM_HYP_MM_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index a169c619db60..857d9bc04fd4 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -135,6 +135,16 @@ static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctx
__kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level);
}
+static void
+handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, start, host_ctxt, 2);
+ DECLARE_REG(unsigned long, pages, host_ctxt, 3);
+
+ __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages);
+}
+
static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
@@ -327,6 +337,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
+ HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
HANDLE_FUNC(__vgic_v3_read_vmcr),
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 318298eb3d6b..65a7a186d7b2 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -44,6 +44,27 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
return err;
}
+static int __pkvm_alloc_private_va_range(unsigned long start, size_t size)
+{
+ unsigned long cur;
+
+ hyp_assert_lock_held(&pkvm_pgd_lock);
+
+ if (!start || start < __io_map_base)
+ return -EINVAL;
+
+ /* The allocated size is always a multiple of PAGE_SIZE */
+ cur = start + PAGE_ALIGN(size);
+
+ /* Are we overflowing on the vmemmap ? */
+ if (cur > __hyp_vmemmap)
+ return -ENOMEM;
+
+ __io_map_base = cur;
+
+ return 0;
+}
+
/**
* pkvm_alloc_private_va_range - Allocates a private VA range.
* @size: The size of the VA range to reserve.
@@ -56,27 +77,16 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
*/
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
{
- unsigned long base, addr;
- int ret = 0;
+ unsigned long addr;
+ int ret;
hyp_spin_lock(&pkvm_pgd_lock);
-
- /* Align the allocation based on the order of its size */
- addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
-
- /* The allocated size is always a multiple of PAGE_SIZE */
- base = addr + PAGE_ALIGN(size);
-
- /* Are we overflowing on the vmemmap ? */
- if (!addr || base > __hyp_vmemmap)
- ret = -ENOMEM;
- else {
- __io_map_base = base;
- *haddr = addr;
- }
-
+ addr = __io_map_base;
+ ret = __pkvm_alloc_private_va_range(addr, size);
hyp_spin_unlock(&pkvm_pgd_lock);
+ *haddr = addr;
+
return ret;
}
@@ -340,6 +350,45 @@ int hyp_create_idmap(u32 hyp_va_bits)
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
}
+int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
+{
+ unsigned long addr, prev_base;
+ size_t size;
+ int ret;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+
+ prev_base = __io_map_base;
+ /*
+ * Efficient stack verification using the PAGE_SHIFT bit implies
+ * an alignment of our allocation on the order of the size.
+ */
+ size = PAGE_SIZE * 2;
+ addr = ALIGN(__io_map_base, size);
+
+ ret = __pkvm_alloc_private_va_range(addr, size);
+ if (!ret) {
+ /*
+ * Since the stack grows downwards, map the stack to the page
+ * at the higher address and leave the lower guard page
+ * unbacked.
+ *
+ * Any valid stack address now has the PAGE_SHIFT bit as 1
+ * and addresses corresponding to the guard page have the
+ * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+ */
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
+ PAGE_SIZE, phys, PAGE_HYP);
+ if (ret)
+ __io_map_base = prev_base;
+ }
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ *haddr = addr + size;
+
+ return ret;
+}
+
static void *admit_host_page(void *arg)
{
struct kvm_hyp_memcache *host_mc = arg;
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index bb98630dfeaf..0d5e0a89ddce 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -113,7 +113,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
for (i = 0; i < hyp_nr_cpus; i++) {
struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
- unsigned long hyp_addr;
start = (void *)kern_hyp_va(per_cpu_base[i]);
end = start + PAGE_ALIGN(hyp_percpu_size);
@@ -121,33 +120,9 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
- /*
- * Allocate a contiguous HYP private VA range for the stack
- * and guard page. The allocation is also aligned based on
- * the order of its size.
- */
- ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
+ ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
if (ret)
return ret;
-
- /*
- * Since the stack grows downwards, map the stack to the page
- * at the higher address and leave the lower guard page
- * unbacked.
- *
- * Any valid stack address now has the PAGE_SHIFT bit as 1
- * and addresses corresponding to the guard page have the
- * PAGE_SHIFT bit as 0 - this is used for overflow detection.
- */
- hyp_spin_lock(&pkvm_pgd_lock);
- ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
- PAGE_SIZE, params->stack_pa, PAGE_HYP);
- hyp_spin_unlock(&pkvm_pgd_lock);
- if (ret)
- return ret;
-
- /* Update stack_hyp_va to end of the stack's private VA range */
- params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index e89a23153e85..c353a06ee7e6 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -236,7 +236,7 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
* KVM_ARM_VCPU_INIT, however, this is likely not possible for
* protected VMs.
*/
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index b9991bbd8e3f..1b265713d6be 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -182,6 +182,36 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
__tlb_switch_to_host(&cxt);
}
+void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t start, unsigned long pages)
+{
+ struct tlb_inv_context cxt;
+ unsigned long stride;
+
+ /*
+ * Since the range of addresses may not be mapped at
+ * the same level, assume the worst case as PAGE_SIZE
+ */
+ stride = PAGE_SIZE;
+ start = round_down(start, stride);
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt, false);
+
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+
+ dsb(ish);
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
+
+ /* See the comment in __kvm_tlb_flush_vmid_ipa() */
+ if (icache_is_vpipt())
+ icache_inval_all_pou();
+
+ __tlb_switch_to_host(&cxt);
+}
+
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index f7a93ef29250..f155b8c9e98c 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -670,6 +670,26 @@ static bool stage2_has_fwb(struct kvm_pgtable *pgt)
return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
}
+void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t addr, size_t size)
+{
+ unsigned long pages, inval_pages;
+
+ if (!system_supports_tlb_range()) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ return;
+ }
+
+ pages = size >> PAGE_SHIFT;
+ while (pages > 0) {
+ inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
+
+ addr += inval_pages << PAGE_SHIFT;
+ pages -= inval_pages;
+ }
+}
+
#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
@@ -786,7 +806,8 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
* evicted pte value (if any).
*/
if (kvm_pte_table(ctx->old, ctx->level))
- kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ kvm_tlb_flush_vmid_range(mmu, ctx->addr,
+ kvm_granule_size(ctx->level));
else if (kvm_pte_valid(ctx->old))
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
ctx->addr, ctx->level);
@@ -810,16 +831,36 @@ static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t n
smp_store_release(ctx->ptep, new);
}
-static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu,
- struct kvm_pgtable_mm_ops *mm_ops)
+static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
+{
+ /*
+ * If FEAT_TLBIRANGE is implemented, defer the individual
+ * TLB invalidations until the entire walk is finished, and
+ * then use the range-based TLBI instructions to do the
+ * invalidations. Condition deferred TLB invalidation on the
+ * system supporting FWB as the optimization is entirely
+ * pointless when the unmap walker needs to perform CMOs.
+ */
+ return system_supports_tlb_range() && stage2_has_fwb(pgt);
+}
+
+static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
+ struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops)
{
+ struct kvm_pgtable *pgt = ctx->arg;
+
/*
- * Clear the existing PTE, and perform break-before-make with
- * TLB maintenance if it was valid.
+ * Clear the existing PTE, and perform break-before-make if it was
+ * valid. Depending on the system support, defer the TLB maintenance
+ * for the same until the entire unmap walk is completed.
*/
if (kvm_pte_valid(ctx->old)) {
kvm_clear_pte(ctx->ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
+
+ if (!stage2_unmap_defer_tlb_flush(pgt))
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
+ ctx->addr, ctx->level);
}
mm_ops->put_page(ctx->ptep);
@@ -1077,7 +1118,7 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
* block entry and rely on the remaining portions being faulted
* back lazily.
*/
- stage2_put_pte(ctx, mmu, mm_ops);
+ stage2_unmap_put_pte(ctx, mmu, mm_ops);
if (need_flush && mm_ops->dcache_clean_inval_poc)
mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
@@ -1091,13 +1132,19 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
+ int ret;
struct kvm_pgtable_walker walker = {
.cb = stage2_unmap_walker,
.arg = pgt,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
};
- return kvm_pgtable_walk(pgt, addr, size, &walker);
+ ret = kvm_pgtable_walk(pgt, addr, size, &walker);
+ if (stage2_unmap_defer_tlb_flush(pgt))
+ /* Perform the deferred TLB invalidations */
+ kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
+
+ return ret;
}
struct stage2_attr_data {
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index e69da550cdc5..46bd43f61d76 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -143,6 +143,34 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
__tlb_switch_to_host(&cxt);
}
+void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t start, unsigned long pages)
+{
+ struct tlb_inv_context cxt;
+ unsigned long stride;
+
+ /*
+ * Since the range of addresses may not be mapped at
+ * the same level, assume the worst case as PAGE_SIZE
+ */
+ stride = PAGE_SIZE;
+ start = round_down(start, stride);
+
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt);
+
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+
+ dsb(ish);
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
+
+ __tlb_switch_to_host(&cxt);
+}
+
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index d3b4feed460c..11c1d786c506 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -161,15 +161,23 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
}
/**
- * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
+ * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries for v7/8
* @kvm: pointer to kvm structure.
*
* Interface to HYP function to flush all VM TLB entries
*/
-void kvm_flush_remote_tlbs(struct kvm *kvm)
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
- ++kvm->stat.generic.remote_tlb_flush_requests;
kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
+ return 0;
+}
+
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ kvm_tlb_flush_vmid_range(&kvm->arch.mmu,
+ gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
+ return 0;
}
static bool kvm_is_device_pfn(unsigned long pfn)
@@ -592,6 +600,25 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
return 0;
}
+static int __hyp_alloc_private_va_range(unsigned long base)
+{
+ lockdep_assert_held(&kvm_hyp_pgd_mutex);
+
+ if (!PAGE_ALIGNED(base))
+ return -EINVAL;
+
+ /*
+ * Verify that BIT(VA_BITS - 1) hasn't been flipped by
+ * allocating the new area, as it would indicate we've
+ * overflowed the idmap/IO address range.
+ */
+ if ((base ^ io_map_base) & BIT(VA_BITS - 1))
+ return -ENOMEM;
+
+ io_map_base = base;
+
+ return 0;
+}
/**
* hyp_alloc_private_va_range - Allocates a private VA range.
@@ -612,26 +639,16 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
/*
* This assumes that we have enough space below the idmap
- * page to allocate our VAs. If not, the check below will
- * kick. A potential alternative would be to detect that
- * overflow and switch to an allocation above the idmap.
+ * page to allocate our VAs. If not, the check in
+ * __hyp_alloc_private_va_range() will kick. A potential
+ * alternative would be to detect that overflow and switch
+ * to an allocation above the idmap.
*
* The allocated size is always a multiple of PAGE_SIZE.
*/
- base = io_map_base - PAGE_ALIGN(size);
-
- /* Align the allocation based on the order of its size */
- base = ALIGN_DOWN(base, PAGE_SIZE << get_order(size));
-
- /*
- * Verify that BIT(VA_BITS - 1) hasn't been flipped by
- * allocating the new area, as it would indicate we've
- * overflowed the idmap/IO address range.
- */
- if ((base ^ io_map_base) & BIT(VA_BITS - 1))
- ret = -ENOMEM;
- else
- *haddr = io_map_base = base;
+ size = PAGE_ALIGN(size);
+ base = io_map_base - size;
+ ret = __hyp_alloc_private_va_range(base);
mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -668,6 +685,48 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
return ret;
}
+int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
+{
+ unsigned long base;
+ size_t size;
+ int ret;
+
+ mutex_lock(&kvm_hyp_pgd_mutex);
+ /*
+ * Efficient stack verification using the PAGE_SHIFT bit implies
+ * an alignment of our allocation on the order of the size.
+ */
+ size = PAGE_SIZE * 2;
+ base = ALIGN_DOWN(io_map_base - size, size);
+
+ ret = __hyp_alloc_private_va_range(base);
+
+ mutex_unlock(&kvm_hyp_pgd_mutex);
+
+ if (ret) {
+ kvm_err("Cannot allocate hyp stack guard page\n");
+ return ret;
+ }
+
+ /*
+ * Since the stack grows downwards, map the stack to the page
+ * at the higher address and leave the lower guard page
+ * unbacked.
+ *
+ * Any valid stack address now has the PAGE_SHIFT bit as 1
+ * and addresses corresponding to the guard page have the
+ * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+ */
+ ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
+ PAGE_HYP);
+ if (ret)
+ kvm_err("Cannot map hyp stack\n");
+
+ *haddr = base + size;
+
+ return ret;
+}
+
/**
* create_hyp_io_mappings - Map IO into both kernel and HYP
* @phys_addr: The physical start address which gets mapped
@@ -1075,7 +1134,7 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
write_lock(&kvm->mmu_lock);
stage2_wp_range(&kvm->arch.mmu, start, end);
write_unlock(&kvm->mmu_lock);
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_memslot(kvm, memslot);
}
/**
@@ -1541,7 +1600,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
out_unlock:
read_unlock(&kvm->mmu_lock);
- kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
}
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 315354d27978..042695a210ce 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -71,8 +71,9 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
break;
case SYS_ID_AA64MMFR0_EL1:
- /* Hide ECV, FGT, ExS, Secure Memory */
- val &= ~(GENMASK_ULL(63, 43) |
+ /* Hide ECV, ExS, Secure Memory */
+ val &= ~(NV_FTR(MMFR0, ECV) |
+ NV_FTR(MMFR0, EXS) |
NV_FTR(MMFR0, TGRAN4_2) |
NV_FTR(MMFR0, TGRAN16_2) |
NV_FTR(MMFR0, TGRAN64_2) |
@@ -116,7 +117,8 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
break;
case SYS_ID_AA64MMFR1_EL1:
- val &= (NV_FTR(MMFR1, PAN) |
+ val &= (NV_FTR(MMFR1, HCX) |
+ NV_FTR(MMFR1, PAN) |
NV_FTR(MMFR1, LO) |
NV_FTR(MMFR1, HPDS) |
NV_FTR(MMFR1, VH) |
@@ -124,8 +126,7 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
break;
case SYS_ID_AA64MMFR2_EL1:
- val &= ~(NV_FTR(MMFR2, EVT) |
- NV_FTR(MMFR2, BBM) |
+ val &= ~(NV_FTR(MMFR2, BBM) |
NV_FTR(MMFR2, TTL) |
GENMASK_ULL(47, 44) |
NV_FTR(MMFR2, ST) |
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 560650972478..6b066e04dc5d 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -14,6 +14,7 @@
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
+#include <asm/arm_pmuv3.h>
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
@@ -35,12 +36,8 @@ static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
return &vcpu->arch.pmu.pmc[cnt_idx];
}
-static u32 kvm_pmu_event_mask(struct kvm *kvm)
+static u32 __kvm_pmu_event_mask(unsigned int pmuver)
{
- unsigned int pmuver;
-
- pmuver = kvm->arch.arm_pmu->pmuver;
-
switch (pmuver) {
case ID_AA64DFR0_EL1_PMUVer_IMP:
return GENMASK(9, 0);
@@ -55,6 +52,14 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
}
}
+static u32 kvm_pmu_event_mask(struct kvm *kvm)
+{
+ u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
+ u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
+
+ return __kvm_pmu_event_mask(pmuver);
+}
+
/**
* kvm_pmc_is_64bit - determine if counter is 64bit
* @pmc: counter context
@@ -672,8 +677,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{
struct arm_pmu_entry *entry;
- if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
- pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+ /*
+ * Check the sanitised PMU version for the system, as KVM does not
+ * support implementations where PMUv3 exists on a subset of CPUs.
+ */
+ if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
return;
mutex_lock(&arm_pmus_lock);
@@ -750,11 +758,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
} else {
val = read_sysreg(pmceid1_el0);
/*
- * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
+ * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
* as RAZ
*/
- if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
- val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
+ val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
+ BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
+ BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
base = 32;
}
@@ -950,11 +959,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return 0;
}
case KVM_ARM_VCPU_PMU_V3_FILTER: {
+ u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
struct kvm_pmu_event_filter __user *uaddr;
struct kvm_pmu_event_filter filter;
int nr_events;
- nr_events = kvm_pmu_event_mask(kvm) + 1;
+ /*
+ * Allow userspace to specify an event filter for the entire
+ * event range supported by PMUVer of the hardware, rather
+ * than the guest's PMUVer for KVM backward compatibility.
+ */
+ nr_events = __kvm_pmu_event_mask(pmuver) + 1;
uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 121f1a14c829..0eea225fd09a 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -236,3 +236,21 @@ bool kvm_set_pmuserenr(u64 val)
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
return true;
}
+
+/*
+ * If we interrupted the guest to update the host PMU context, make
+ * sure we re-apply the guest EL0 state.
+ */
+void kvm_vcpu_pmu_resync_el0(void)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!has_vhe() || !in_interrupt())
+ return;
+
+ vcpu = kvm_get_running_vcpu();
+ if (!vcpu)
+ return;
+
+ kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index bc8556b6f459..7a65a35ee4ac 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -248,21 +248,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
- switch (vcpu->arch.target) {
- default:
- if (vcpu_el1_is_32bit(vcpu)) {
- pstate = VCPU_RESET_PSTATE_SVC;
- } else if (vcpu_has_nv(vcpu)) {
- pstate = VCPU_RESET_PSTATE_EL2;
- } else {
- pstate = VCPU_RESET_PSTATE_EL1;
- }
-
- if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
- ret = -EINVAL;
- goto out;
- }
- break;
+ if (vcpu_el1_is_32bit(vcpu))
+ pstate = VCPU_RESET_PSTATE_SVC;
+ else if (vcpu_has_nv(vcpu))
+ pstate = VCPU_RESET_PSTATE_EL2;
+ else
+ pstate = VCPU_RESET_PSTATE_EL1;
+
+ if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
+ ret = -EINVAL;
+ goto out;
}
/* Reset core registers */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2ca2973abe66..e92ec810d449 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2151,6 +2151,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
+ { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
+
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
@@ -2365,8 +2367,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HFGRTR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HFGWTR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HFGITR_EL2, access_rw, reset_val, 0),
EL2_REG(HACR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HCRX_EL2, access_rw, reset_val, 0),
+
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
@@ -2374,6 +2381,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
+ EL2_REG(HDFGRTR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HDFGWTR_EL2, access_rw, reset_val, 0),
EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
EL2_REG(ELR_EL2, access_rw, reset_val, 0),
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
@@ -3170,6 +3179,9 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
trace_kvm_handle_sys_reg(esr);
+ if (__check_nv_sr_forward(vcpu))
+ return 1;
+
params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
@@ -3587,5 +3599,8 @@ int __init kvm_sys_reg_table_init(void)
if (!first_idreg)
return -EINVAL;
+ if (kvm_get_mode() == KVM_MODE_NV)
+ return populate_nv_trap_config();
+
return 0;
}
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 6ce5c025218d..8ad53104934d 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -364,6 +364,32 @@ TRACE_EVENT(kvm_inject_nested_exception,
__entry->hcr_el2)
);
+TRACE_EVENT(kvm_forward_sysreg_trap,
+ TP_PROTO(struct kvm_vcpu *vcpu, u32 sysreg, bool is_read),
+ TP_ARGS(vcpu, sysreg, is_read),
+
+ TP_STRUCT__entry(
+ __field(u64, pc)
+ __field(u32, sysreg)
+ __field(bool, is_read)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = *vcpu_pc(vcpu);
+ __entry->sysreg = sysreg;
+ __entry->is_read = is_read;
+ ),
+
+ TP_printk("%llx %c (%d,%d,%d,%d,%d)",
+ __entry->pc,
+ __entry->is_read ? 'R' : 'W',
+ sys_reg_Op0(__entry->sysreg),
+ sys_reg_Op1(__entry->sysreg),
+ sys_reg_CRn(__entry->sysreg),
+ sys_reg_CRm(__entry->sysreg),
+ sys_reg_Op2(__entry->sysreg))
+);
+
#endif /* _TRACE_ARM_ARM64_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index f9923beedd27..0ab09b0d4440 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -199,7 +199,6 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
-void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val);
@@ -233,7 +232,6 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
-void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_enable(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index c80ed4f3cbce..c3f06fdef609 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -26,6 +26,7 @@ HAS_ECV
HAS_ECV_CNTPOFF
HAS_EPAN
HAS_EVT
+HAS_FGT
HAS_GENERIC_AUTH
HAS_GENERIC_AUTH_ARCH_QARMA3
HAS_GENERIC_AUTH_ARCH_QARMA5
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 65866bf819c3..2517ef7c21cf 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -2156,6 +2156,135 @@ Field 1 ICIALLU
Field 0 ICIALLUIS
EndSysreg
+Sysreg HDFGRTR_EL2 3 4 3 1 4
+Field 63 PMBIDR_EL1
+Field 62 nPMSNEVFR_EL1
+Field 61 nBRBDATA
+Field 60 nBRBCTL
+Field 59 nBRBIDR
+Field 58 PMCEIDn_EL0
+Field 57 PMUSERENR_EL0
+Field 56 TRBTRG_EL1
+Field 55 TRBSR_EL1
+Field 54 TRBPTR_EL1
+Field 53 TRBMAR_EL1
+Field 52 TRBLIMITR_EL1
+Field 51 TRBIDR_EL1
+Field 50 TRBBASER_EL1
+Res0 49
+Field 48 TRCVICTLR
+Field 47 TRCSTATR
+Field 46 TRCSSCSRn
+Field 45 TRCSEQSTR
+Field 44 TRCPRGCTLR
+Field 43 TRCOSLSR
+Res0 42
+Field 41 TRCIMSPECn
+Field 40 TRCID
+Res0 39:38
+Field 37 TRCCNTVRn
+Field 36 TRCCLAIM
+Field 35 TRCAUXCTLR
+Field 34 TRCAUTHSTATUS
+Field 33 TRC
+Field 32 PMSLATFR_EL1
+Field 31 PMSIRR_EL1
+Field 30 PMSIDR_EL1
+Field 29 PMSICR_EL1
+Field 28 PMSFCR_EL1
+Field 27 PMSEVFR_EL1
+Field 26 PMSCR_EL1
+Field 25 PMBSR_EL1
+Field 24 PMBPTR_EL1
+Field 23 PMBLIMITR_EL1
+Field 22 PMMIR_EL1
+Res0 21:20
+Field 19 PMSELR_EL0
+Field 18 PMOVS
+Field 17 PMINTEN
+Field 16 PMCNTEN
+Field 15 PMCCNTR_EL0
+Field 14 PMCCFILTR_EL0
+Field 13 PMEVTYPERn_EL0
+Field 12 PMEVCNTRn_EL0
+Field 11 OSDLR_EL1
+Field 10 OSECCR_EL1
+Field 9 OSLSR_EL1
+Res0 8
+Field 7 DBGPRCR_EL1
+Field 6 DBGAUTHSTATUS_EL1
+Field 5 DBGCLAIM
+Field 4 MDSCR_EL1
+Field 3 DBGWVRn_EL1
+Field 2 DBGWCRn_EL1
+Field 1 DBGBVRn_EL1
+Field 0 DBGBCRn_EL1
+EndSysreg
+
+Sysreg HDFGWTR_EL2 3 4 3 1 5
+Res0 63
+Field 62 nPMSNEVFR_EL1
+Field 61 nBRBDATA
+Field 60 nBRBCTL
+Res0 59:58
+Field 57 PMUSERENR_EL0
+Field 56 TRBTRG_EL1
+Field 55 TRBSR_EL1
+Field 54 TRBPTR_EL1
+Field 53 TRBMAR_EL1
+Field 52 TRBLIMITR_EL1
+Res0 51
+Field 50 TRBBASER_EL1
+Field 49 TRFCR_EL1
+Field 48 TRCVICTLR
+Res0 47
+Field 46 TRCSSCSRn
+Field 45 TRCSEQSTR
+Field 44 TRCPRGCTLR
+Res0 43
+Field 42 TRCOSLAR
+Field 41 TRCIMSPECn
+Res0 40:38
+Field 37 TRCCNTVRn
+Field 36 TRCCLAIM
+Field 35 TRCAUXCTLR
+Res0 34
+Field 33 TRC
+Field 32 PMSLATFR_EL1
+Field 31 PMSIRR_EL1
+Res0 30
+Field 29 PMSICR_EL1
+Field 28 PMSFCR_EL1
+Field 27 PMSEVFR_EL1
+Field 26 PMSCR_EL1
+Field 25 PMBSR_EL1
+Field 24 PMBPTR_EL1
+Field 23 PMBLIMITR_EL1
+Res0 22
+Field 21 PMCR_EL0
+Field 20 PMSWINC_EL0
+Field 19 PMSELR_EL0
+Field 18 PMOVS
+Field 17 PMINTEN
+Field 16 PMCNTEN
+Field 15 PMCCNTR_EL0
+Field 14 PMCCFILTR_EL0
+Field 13 PMEVTYPERn_EL0
+Field 12 PMEVCNTRn_EL0
+Field 11 OSDLR_EL1
+Field 10 OSECCR_EL1
+Res0 9
+Field 8 OSLAR_EL1
+Field 7 DBGPRCR_EL1
+Res0 6
+Field 5 DBGCLAIM
+Field 4 MDSCR_EL1
+Field 3 DBGWVRn_EL1
+Field 2 DBGWCRn_EL1
+Field 1 DBGBVRn_EL1
+Field 0 DBGBCRn_EL1
+EndSysreg
+
Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx
EndSysreg