summaryrefslogtreecommitdiff
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/eventfd.c15
-rw-r--r--virt/kvm/kvm_main.c301
2 files changed, 161 insertions, 155 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index e996989cd580..2ad013b8bde9 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -281,6 +281,13 @@ int __attribute__((weak)) kvm_arch_update_irqfd_routing(
{
return 0;
}
+
+bool __attribute__((weak)) kvm_arch_irqfd_route_changed(
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new)
+{
+ return true;
+}
#endif
static int
@@ -615,10 +622,16 @@ void kvm_irq_routing_update(struct kvm *kvm)
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
+#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ /* Under irqfds.lock, so can read irq_entry safely */
+ struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
+#endif
+
irqfd_update(kvm, irqfd);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
- if (irqfd->producer) {
+ if (irqfd->producer &&
+ kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
int ret = kvm_arch_update_irqfd_routing(
irqfd->kvm, irqfd->producer->irq,
irqfd->gsi, 1);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 439d3b4cd1a9..72c4e6b39389 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
+static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
+
__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end)
{
@@ -235,11 +237,8 @@ static void ack_flush(void *_completed)
{
}
-static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
+static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
{
- if (unlikely(!cpus))
- cpus = cpu_online_mask;
-
if (cpumask_empty(cpus))
return false;
@@ -247,33 +246,55 @@ static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
return true;
}
+static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int req, struct cpumask *tmp,
+ int current_cpu)
+{
+ int cpu;
+
+ kvm_make_request(req, vcpu);
+
+ if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
+ return;
+
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any point
+ * after kvm_request_needs_ipi(), which could result in sending an IPI
+ * to the previous pCPU. But, that's OK because the purpose of the IPI
+ * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
+ * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
+ * after this point is also OK, as the requirement is only that KVM wait
+ * for vCPUs that were reading SPTEs _before_ any changes were
+ * finalized. See kvm_vcpu_kick() for more details on handling requests.
+ */
+ if (kvm_request_needs_ipi(vcpu, req)) {
+ cpu = READ_ONCE(vcpu->cpu);
+ if (cpu != -1 && cpu != current_cpu)
+ __cpumask_set_cpu(cpu, tmp);
+ }
+}
+
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except,
- unsigned long *vcpu_bitmap, cpumask_var_t tmp)
+ unsigned long *vcpu_bitmap)
{
- int i, cpu, me;
struct kvm_vcpu *vcpu;
+ struct cpumask *cpus;
+ int i, me;
bool called;
me = get_cpu();
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
- vcpu == except)
- continue;
-
- kvm_make_request(req, vcpu);
- cpu = vcpu->cpu;
+ cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
+ cpumask_clear(cpus);
- if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
+ for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
+ vcpu = kvm_get_vcpu(kvm, i);
+ if (!vcpu)
continue;
-
- if (tmp != NULL && cpu != -1 && cpu != me &&
- kvm_request_needs_ipi(vcpu, req))
- __cpumask_set_cpu(cpu, tmp);
+ kvm_make_vcpu_request(kvm, vcpu, req, cpus, me);
}
- called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT));
+ called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
put_cpu();
return called;
@@ -282,14 +303,25 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except)
{
- cpumask_var_t cpus;
+ struct kvm_vcpu *vcpu;
+ struct cpumask *cpus;
bool called;
+ int i, me;
+
+ me = get_cpu();
- zalloc_cpumask_var(&cpus, GFP_ATOMIC);
+ cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
+ cpumask_clear(cpus);
- called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu == except)
+ continue;
+ kvm_make_vcpu_request(kvm, vcpu, req, cpus, me);
+ }
+
+ called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
+ put_cpu();
- free_cpumask_var(cpus);
return called;
}
@@ -302,13 +334,8 @@ EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
- /*
- * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in
- * kvm_make_all_cpus_request.
- */
- long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);
-
++kvm->stat.generic.remote_tlb_flush_requests;
+
/*
* We want to publish modifications to the page tables before reading
* mode. Pairs with a memory barrier in arch-specific code.
@@ -323,7 +350,6 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
if (!kvm_arch_flush_remote_tlb(kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.generic.remote_tlb_flush;
- cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
#endif
@@ -528,7 +554,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
}
}
- if (range->flush_on_ret && (ret || kvm->tlbs_dirty))
+ if (range->flush_on_ret && ret)
kvm_flush_remote_tlbs(kvm);
if (locked)
@@ -1505,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
static int kvm_set_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
struct kvm_memory_slot *new, int as_id,
enum kvm_mr_change change)
{
- struct kvm_memory_slot *slot;
+ struct kvm_memory_slot *slot, old;
struct kvm_memslots *slots;
int r;
@@ -1540,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm,
* Note, the INVALID flag needs to be in the appropriate entry
* in the freshly allocated memslots, not in @old or @new.
*/
- slot = id_to_memslot(slots, old->id);
+ slot = id_to_memslot(slots, new->id);
slot->flags |= KVM_MEMSLOT_INVALID;
/*
@@ -1571,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm,
kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
}
+ /*
+ * Make a full copy of the old memslot, the pointer will become stale
+ * when the memslots are re-sorted by update_memslots(), and the old
+ * memslot needs to be referenced after calling update_memslots(), e.g.
+ * to free its resources and for arch specific behavior. This needs to
+ * happen *after* (re)acquiring slots_arch_lock.
+ */
+ slot = id_to_memslot(slots, new->id);
+ if (slot) {
+ old = *slot;
+ } else {
+ WARN_ON_ONCE(change != KVM_MR_CREATE);
+ memset(&old, 0, sizeof(old));
+ old.id = new->id;
+ old.as_id = as_id;
+ }
+
+ /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
+ memcpy(&new->arch, &old.arch, sizeof(old.arch));
+
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
if (r)
goto out_slots;
@@ -1578,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm,
update_memslots(slots, new, change);
slots = install_new_memslots(kvm, as_id, slots);
- kvm_arch_commit_memory_region(kvm, mem, old, new, change);
+ kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
+
+ /* Free the old memslot's metadata. Note, this is the full copy!!! */
+ if (change == KVM_MR_DELETE)
+ kvm_free_memslot(kvm, &old);
kvfree(slots);
return 0;
out_slots:
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
- slot = id_to_memslot(slots, old->id);
+ slot = id_to_memslot(slots, new->id);
slot->flags &= ~KVM_MEMSLOT_INVALID;
slots = install_new_memslots(kvm, as_id, slots);
} else {
@@ -1600,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm,
struct kvm_memory_slot *old, int as_id)
{
struct kvm_memory_slot new;
- int r;
if (!old->npages)
return -EINVAL;
@@ -1613,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm,
*/
new.as_id = as_id;
- r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
- if (r)
- return r;
-
- kvm_free_memslot(kvm, old);
- return 0;
+ return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
}
/*
@@ -1646,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
id = (u16)mem->slot;
/* General sanity checks */
- if (mem->memory_size & (PAGE_SIZE - 1))
+ if ((mem->memory_size & (PAGE_SIZE - 1)) ||
+ (mem->memory_size != (unsigned long)mem->memory_size))
return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
return -EINVAL;
@@ -1692,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!old.npages) {
change = KVM_MR_CREATE;
new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
} else { /* Modify an existing slot. */
if ((new.userspace_addr != old.userspace_addr) ||
(new.npages != old.npages) ||
@@ -1706,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
else /* Nothing to change. */
return 0;
- /* Copy dirty_bitmap and arch from the current memslot. */
+ /* Copy dirty_bitmap from the current memslot. */
new.dirty_bitmap = old.dirty_bitmap;
- memcpy(&new.arch, &old.arch, sizeof(new.arch));
}
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1734,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
bitmap_set(new.dirty_bitmap, 0, new.npages);
}
- r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
+ r = kvm_set_memslot(kvm, mem, &new, as_id, change);
if (r)
goto out_bitmap;
@@ -2522,72 +2564,36 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_page);
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
{
if (pfn == 0)
return;
- if (cache)
- cache->pfn = cache->gfn = 0;
-
if (dirty)
kvm_release_pfn_dirty(pfn);
else
kvm_release_pfn_clean(pfn);
}
-static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
- struct gfn_to_pfn_cache *cache, u64 gen)
-{
- kvm_release_pfn(cache->pfn, cache->dirty, cache);
-
- cache->pfn = gfn_to_pfn_memslot(slot, gfn);
- cache->gfn = gfn;
- cache->dirty = false;
- cache->generation = gen;
-}
-
-static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
- struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache,
- bool atomic)
+int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
{
kvm_pfn_t pfn;
void *hva = NULL;
struct page *page = KVM_UNMAPPED_PAGE;
- struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
- u64 gen = slots->generation;
if (!map)
return -EINVAL;
- if (cache) {
- if (!cache->pfn || cache->gfn != gfn ||
- cache->generation != gen) {
- if (atomic)
- return -EAGAIN;
- kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
- }
- pfn = cache->pfn;
- } else {
- if (atomic)
- return -EAGAIN;
- pfn = gfn_to_pfn_memslot(slot, gfn);
- }
+ pfn = gfn_to_pfn(vcpu->kvm, gfn);
if (is_error_noslot_pfn(pfn))
return -EINVAL;
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
- if (atomic)
- hva = kmap_atomic(page);
- else
- hva = kmap(page);
+ hva = kmap(page);
#ifdef CONFIG_HAS_IOMEM
- } else if (!atomic) {
- hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
} else {
- return -EINVAL;
+ hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
#endif
}
@@ -2601,27 +2607,9 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
return 0;
}
-
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache, bool atomic)
-{
- return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
- cache, atomic);
-}
-EXPORT_SYMBOL_GPL(kvm_map_gfn);
-
-int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
-{
- return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
- NULL, false);
-}
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
-static void __kvm_unmap_gfn(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache,
- bool dirty, bool atomic)
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{
if (!map)
return;
@@ -2629,45 +2617,21 @@ static void __kvm_unmap_gfn(struct kvm *kvm,
if (!map->hva)
return;
- if (map->page != KVM_UNMAPPED_PAGE) {
- if (atomic)
- kunmap_atomic(map->hva);
- else
- kunmap(map->page);
- }
+ if (map->page != KVM_UNMAPPED_PAGE)
+ kunmap(map->page);
#ifdef CONFIG_HAS_IOMEM
- else if (!atomic)
- memunmap(map->hva);
else
- WARN_ONCE(1, "Unexpected unmapping in atomic context");
+ memunmap(map->hva);
#endif
if (dirty)
- mark_page_dirty_in_slot(kvm, memslot, map->gfn);
+ kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
- if (cache)
- cache->dirty |= dirty;
- else
- kvm_release_pfn(map->pfn, dirty, NULL);
+ kvm_release_pfn(map->pfn, dirty);
map->hva = NULL;
map->page = NULL;
}
-
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
-{
- __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
- cache, dirty, atomic);
- return 0;
-}
-EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
-
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
-{
- __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
- map, NULL, dirty, false);
-}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -2967,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int r;
gpa_t gpa = ghc->gpa + offset;
- BUG_ON(len + offset > ghc->len);
+ if (WARN_ON_ONCE(len + offset > ghc->len))
+ return -EINVAL;
if (slots->generation != ghc->generation) {
if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
@@ -3004,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int r;
gpa_t gpa = ghc->gpa + offset;
- BUG_ON(len + offset > ghc->len);
+ if (WARN_ON_ONCE(len + offset > ghc->len))
+ return -EINVAL;
if (slots->generation != ghc->generation) {
if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
@@ -3134,15 +3100,19 @@ out:
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{
- unsigned int old, val, shrink;
+ unsigned int old, val, shrink, grow_start;
old = val = vcpu->halt_poll_ns;
shrink = READ_ONCE(halt_poll_ns_shrink);
+ grow_start = READ_ONCE(halt_poll_ns_grow_start);
if (shrink == 0)
val = 0;
else
val /= shrink;
+ if (val < grow_start)
+ val = 0;
+
vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
}
@@ -3290,16 +3260,24 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
*/
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
- int me;
- int cpu = vcpu->cpu;
+ int me, cpu;
if (kvm_vcpu_wake_up(vcpu))
return;
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any point
+ * after kvm_arch_vcpu_should_kick(), which could result in sending an
+ * IPI to the previous pCPU. But, that's ok because the purpose of the
+ * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
+ * vCPU also requires it to leave IN_GUEST_MODE.
+ */
me = get_cpu();
- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
- if (kvm_arch_vcpu_should_kick(vcpu))
+ if (kvm_arch_vcpu_should_kick(vcpu)) {
+ cpu = READ_ONCE(vcpu->cpu);
+ if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
+ }
put_cpu();
}
EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
@@ -3493,7 +3471,7 @@ static const struct vm_operations_struct kvm_vcpu_vm_ops = {
static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
struct kvm_vcpu *vcpu = file->private_data;
- unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long pages = vma_pages(vma);
if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
@@ -3557,7 +3535,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
struct kvm_vcpu *vcpu;
struct page *page;
- if (id >= KVM_MAX_VCPU_ID)
+ if (id >= KVM_MAX_VCPU_IDS)
return -EINVAL;
mutex_lock(&kvm->lock);
@@ -3709,7 +3687,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
struct kvm_fpu *fpu = NULL;
struct kvm_sregs *kvm_sregs = NULL;
- if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
+ if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
return -EIO;
if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
@@ -3919,7 +3897,7 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
void __user *argp = compat_ptr(arg);
int r;
- if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged)
+ if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
return -EIO;
switch (ioctl) {
@@ -3985,7 +3963,7 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
{
struct kvm_device *dev = filp->private_data;
- if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged)
+ if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
return -EIO;
switch (ioctl) {
@@ -4307,7 +4285,7 @@ static long kvm_vm_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
int r;
- if (kvm->mm != current->mm || kvm->vm_bugged)
+ if (kvm->mm != current->mm || kvm->vm_dead)
return -EIO;
switch (ioctl) {
case KVM_CREATE_VCPU:
@@ -4518,7 +4496,7 @@ static long kvm_vm_compat_ioctl(struct file *filp,
struct kvm *kvm = filp->private_data;
int r;
- if (kvm->mm != current->mm || kvm->vm_bugged)
+ if (kvm->mm != current->mm || kvm->vm_dead)
return -EIO;
switch (ioctl) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
@@ -5513,9 +5491,17 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
goto out_free_3;
}
+ for_each_possible_cpu(cpu) {
+ if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu))) {
+ r = -ENOMEM;
+ goto out_free_4;
+ }
+ }
+
r = kvm_async_pf_init();
if (r)
- goto out_free;
+ goto out_free_5;
kvm_chardev_ops.owner = module;
kvm_vm_fops.owner = module;
@@ -5541,7 +5527,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
out_unreg:
kvm_async_pf_deinit();
-out_free:
+out_free_5:
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
+out_free_4:
kmem_cache_destroy(kvm_vcpu_cache);
out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
@@ -5561,8 +5550,12 @@ EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
+ int cpu;
+
debugfs_remove_recursive(kvm_debugfs_dir);
misc_deregister(&kvm_dev);
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
kmem_cache_destroy(kvm_vcpu_cache);
kvm_async_pf_deinit();
unregister_syscore_ops(&kvm_syscore_ops);