summaryrefslogtreecommitdiff
path: root/virt/kvm/dirty_ring.c
diff options
context:
space:
mode:
authorBen Gardon <bgardon@google.com>2021-02-02 21:57:24 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2021-02-04 13:27:43 +0300
commit531810caa9f4bc99ffbb90e09256792c56a6b07a (patch)
tree2f8d7a33fea95476a3181b4f1aca2e1f973a4295 /virt/kvm/dirty_ring.c
parentf3d4b4b1dc1c5fb9ea17cac14133463bfe72f170 (diff)
downloadlinux-531810caa9f4bc99ffbb90e09256792c56a6b07a.tar.xz
KVM: x86/mmu: Use an rwlock for the x86 MMU
Add a read / write lock to be used in place of the MMU spinlock on x86. The rwlock will enable the TDP MMU to handle page faults, and other operations in parallel in future commits. Reviewed-by: Peter Feiner <pfeiner@google.com> Signed-off-by: Ben Gardon <bgardon@google.com> Message-Id: <20210202185734.1680553-19-bgardon@google.com> [Introduce virt/kvm/mmu_lock.h - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt/kvm/dirty_ring.c')
-rw-r--r--virt/kvm/dirty_ring.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 790f17325f8d..7aafefc50aa7 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -9,6 +9,7 @@
#include <linux/vmalloc.h>
#include <linux/kvm_dirty_ring.h>
#include <trace/events/kvm.h>
+#include "mmu_lock.h"
int __weak kvm_cpu_dirty_log_size(void)
{
@@ -60,9 +61,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
if (!memslot || (offset + __fls(mask)) >= memslot->npages)
return;
- spin_lock(&kvm->mmu_lock);
+ KVM_MMU_LOCK(kvm);
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
- spin_unlock(&kvm->mmu_lock);
+ KVM_MMU_UNLOCK(kvm);
}
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)