summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-02-19 00:07:26 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2020-03-16 19:57:22 +0300
commit5c0b4f3d5ccc2ced94b01c3256db1cf79dc95b81 (patch)
treea9ab598b5c2c35163cfa8bd3ed900f52612d6726 /virt
parent21198846de1c348304280436caf3a5dc936d5c65 (diff)
downloadlinux-5c0b4f3d5ccc2ced94b01c3256db1cf79dc95b81.tar.xz
KVM: Move memslot deletion to helper function
Move memslot deletion into its own routine so that the success path for other memslot updates does not need to use kvm_free_memslot(), i.e. can explicitly destroy the dirty bitmap when necessary. This paves the way for dropping @dont from kvm_free_memslot(), i.e. all callers now pass NULL for @dont. Add a comment above the code to make a copy of the existing memslot prior to deletion, it is not at all obvious that the pointer will become stale during sorting and/or installation of new memslots. Note, kvm_arch_commit_memory_region() allows an architecture to free resources when moving a memslot or changing its flags, e.g. x86 frees its arch specific memslot metadata during commit_memory_region(). Acked-by: Christoffer Dall <christoffer.dall@arm.com> Tested-by: Christoffer Dall <christoffer.dall@arm.com> Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c75
1 files changed, 46 insertions, 29 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 10e9456da6eb..d15ed920f627 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1043,6 +1043,27 @@ out_slots:
return r;
}
+static int kvm_delete_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot *old, int as_id)
+{
+ struct kvm_memory_slot new;
+ int r;
+
+ if (!old->npages)
+ return -EINVAL;
+
+ memset(&new, 0, sizeof(new));
+ new.id = old->id;
+
+ r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
+ if (r)
+ return r;
+
+ kvm_free_memslot(kvm, old, NULL);
+ return 0;
+}
+
/*
* Allocate some memory and give it an address in the guest physical address
* space.
@@ -1092,7 +1113,17 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (npages > KVM_MEM_MAX_NR_PAGES)
return -EINVAL;
- new = old = *slot;
+ /*
+ * Make a full copy of the old memslot, the pointer will become stale
+ * when the memslots are re-sorted by update_memslots(), and the old
+ * memslot needs to be referenced after calling update_memslots(), e.g.
+ * to free its resources and for arch specific behavior.
+ */
+ old = *slot;
+ if (!mem->memory_size)
+ return kvm_delete_memslot(kvm, mem, &old, as_id);
+
+ new = old;
new.id = id;
new.base_gfn = base_gfn;
@@ -1100,29 +1131,20 @@ int __kvm_set_memory_region(struct kvm *kvm,
new.flags = mem->flags;
new.userspace_addr = mem->userspace_addr;
- if (npages) {
- if (!old.npages)
- change = KVM_MR_CREATE;
- else { /* Modify an existing slot. */
- if ((new.userspace_addr != old.userspace_addr) ||
- (npages != old.npages) ||
- ((new.flags ^ old.flags) & KVM_MEM_READONLY))
- return -EINVAL;
-
- if (base_gfn != old.base_gfn)
- change = KVM_MR_MOVE;
- else if (new.flags != old.flags)
- change = KVM_MR_FLAGS_ONLY;
- else /* Nothing to change. */
- return 0;
- }
- } else {
- if (!old.npages)
+ if (!old.npages) {
+ change = KVM_MR_CREATE;
+ } else { /* Modify an existing slot. */
+ if ((new.userspace_addr != old.userspace_addr) ||
+ (npages != old.npages) ||
+ ((new.flags ^ old.flags) & KVM_MEM_READONLY))
return -EINVAL;
- change = KVM_MR_DELETE;
- new.base_gfn = 0;
- new.flags = 0;
+ if (base_gfn != old.base_gfn)
+ change = KVM_MR_MOVE;
+ else if (new.flags != old.flags)
+ change = KVM_MR_FLAGS_ONLY;
+ else /* Nothing to change. */
+ return 0;
}
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1145,17 +1167,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
return r;
}
- /* actual memory is freed via old in kvm_free_memslot below */
- if (change == KVM_MR_DELETE) {
- new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
- }
-
r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
if (r)
goto out_bitmap;
- kvm_free_memslot(kvm, &old, &new);
+ if (old.dirty_bitmap && !new.dirty_bitmap)
+ kvm_destroy_dirty_bitmap(&old);
return 0;
out_bitmap: