summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/mmu.c
diff options
context:
space:
mode:
authorOliver Upton <oliver.upton@linux.dev>2022-12-02 21:51:55 +0300
committerOliver Upton <oliver.upton@linux.dev>2023-01-13 00:09:20 +0300
commitfc61f554e6947edd21cd84fb814f8418349a3569 (patch)
treec42d6a473a8ba916d293a007722a601dd7dd6f77 /arch/arm64/kvm/mmu.c
parent7d29a2407df612b0903cee94fc3469d7335b442c (diff)
downloadlinux-fc61f554e6947edd21cd84fb814f8418349a3569.tar.xz
KVM: arm64: Handle access faults behind the read lock
As the underlying software walkers are able to traverse and update stage-2 in parallel there is no need to serialize access faults. Only take the read lock when handling an access fault. Link: https://lore.kernel.org/r/20221202185156.696189-6-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/kvm/mmu.c')
-rw-r--r--arch/arm64/kvm/mmu.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index edfbe85f8f8a..d24ce2ddb38c 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1406,10 +1406,10 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
trace_kvm_access_fault(fault_ipa);
- write_lock(&vcpu->kvm->mmu_lock);
+ read_lock(&vcpu->kvm->mmu_lock);
mmu = vcpu->arch.hw_mmu;
pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
- write_unlock(&vcpu->kvm->mmu_lock);
+ read_unlock(&vcpu->kvm->mmu_lock);
if (kvm_pte_valid(pte))
kvm_set_pfn_accessed(kvm_pte_to_pfn(pte));