summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2023-04-21 11:30:46 +0300
committerMarc Zyngier <maz@kernel.org>2023-04-21 11:30:46 +0300
commitef5f97e9de9fc0d5bb6136de3d01d78c072a452f (patch)
tree4e5c64a8c0724b7584928a45c34241dcab0d2c0f /arch/arm64/include/asm/kvm_host.h
parent197b6b60ae7bc51dd0814953c562833143b292aa (diff)
parenta189884bdc9238aeba941c50f02e25eb584fafed (diff)
downloadlinux-ef5f97e9de9fc0d5bb6136de3d01d78c072a452f.tar.xz
Merge branch kvm-arm64/lock-inversion into kvmarm-master/next
* kvm-arm64/lock-inversion: : . : vm/vcpu lock inversion fixes, courtesy of Oliver Upton, plus a few : extra fixes from both Oliver and Reiji Watanabe. : : From the initial cover letter: : : As it so happens, lock ordering in KVM/arm64 is completely backwards. : There's a significant amount of VM-wide state that needs to be accessed : from the context of a vCPU. Until now, this was accomplished by : acquiring the kvm->lock, but that cannot be nested within vcpu->mutex. : : This series fixes the issue with some fine-grained locking for MP state : and a new, dedicated mutex that can nest with both kvm->lock and : vcpu->mutex. : . KVM: arm64: Have kvm_psci_vcpu_on() use WRITE_ONCE() to update mp_state KVM: arm64: Acquire mp_state_lock in kvm_arch_vcpu_ioctl_vcpu_init() KVM: arm64: vgic: Don't acquire its_lock before config_lock KVM: arm64: Use config_lock to protect vgic state KVM: arm64: Use config_lock to protect data ordered against KVM_RUN KVM: arm64: Avoid lock inversion when setting the VM register width KVM: arm64: Avoid vcpu->mutex v. kvm->lock inversion in CPU_ON Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/include/asm/kvm_host.h')
-rw-r--r--arch/arm64/include/asm/kvm_host.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index bcd774d74f34..cd1ef8716719 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -199,6 +199,9 @@ struct kvm_arch {
/* Mandated version of PSCI */
u32 psci_version;
+ /* Protects VM-scoped configuration data */
+ struct mutex config_lock;
+
/*
* If we encounter a data abort without valid instruction syndrome
* information, report this to user space. User space can (and
@@ -522,6 +525,7 @@ struct kvm_vcpu_arch {
/* vcpu power state */
struct kvm_mp_state mp_state;
+ spinlock_t mp_state_lock;
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;