summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorFuad Tabba <tabba@google.com>2024-04-23 18:05:23 +0300
committerMarc Zyngier <maz@kernel.org>2024-05-01 18:48:14 +0300
commit9c30fc615daa3ef177a5fd4a9b2451697c515ce9 (patch)
tree1308ed285f6095f279e89e5f63fd5802d1e886ae /arch/arm64/kvm
parentcc81b6dfc3bc82c3a2600eefbd3823bdb2190197 (diff)
downloadlinux-9c30fc615daa3ef177a5fd4a9b2451697c515ce9.tar.xz
KVM: arm64: Move setting the page as dirty out of the critical section
Move the unlock earlier in user_mem_abort() to shorten the critical section. This also helps for future refactoring and reuse of similar code. This moves out marking the page as dirty outside of the critical section. That code does not interact with the stage-2 page tables, which the read lock in the critical section protects. Signed-off-by: Fuad Tabba <tabba@google.com> Acked-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20240423150538.2103045-16-tabba@google.com Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/mmu.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 18680771cdb0..3afc42d8833e 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1522,8 +1522,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
read_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
- if (mmu_invalidate_retry(kvm, mmu_seq))
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
+ ret = -EAGAIN;
goto out_unlock;
+ }
/*
* If we are not forced to use page mapping, check if we are
@@ -1581,6 +1583,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
memcache,
KVM_PGTABLE_WALK_HANDLE_FAULT |
KVM_PGTABLE_WALK_SHARED);
+out_unlock:
+ read_unlock(&kvm->mmu_lock);
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
@@ -1588,8 +1592,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
-out_unlock:
- read_unlock(&kvm->mmu_lock);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
}