summaryrefslogtreecommitdiff
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c170
1 files changed, 145 insertions, 25 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f69357a29688..5f260488e999 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -63,6 +63,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
+#include <linux/kvm_dirty_ring.h>
+
/* Worst case buffer size needed for holding an integer. */
#define ITOA_MAX_LEN 12
@@ -415,6 +417,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ kvm_dirty_ring_free(&vcpu->dirty_ring);
kvm_arch_vcpu_destroy(vcpu);
/*
@@ -1362,7 +1365,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Allocate/free page dirty bitmap as needed */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
- else if (!new.dirty_bitmap) {
+ else if (!new.dirty_bitmap && !kvm->dirty_ring_size) {
r = kvm_alloc_dirty_bitmap(&new);
if (r)
return r;
@@ -1423,6 +1426,10 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
unsigned long n;
unsigned long any = 0;
+ /* Dirty ring tracking is exclusive to dirty log tracking */
+ if (kvm->dirty_ring_size)
+ return -ENXIO;
+
*memslot = NULL;
*is_dirty = 0;
@@ -1484,6 +1491,10 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
unsigned long *dirty_bitmap_buffer;
bool flush;
+ /* Dirty ring tracking is exclusive to dirty log tracking */
+ if (kvm->dirty_ring_size)
+ return -ENXIO;
+
as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
@@ -1592,6 +1603,10 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
unsigned long *dirty_bitmap_buffer;
bool flush;
+ /* Dirty ring tracking is exclusive to dirty log tracking */
+ if (kvm->dirty_ring_size)
+ return -ENXIO;
+
as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
@@ -2196,7 +2211,8 @@ int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
-static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
+static void __kvm_unmap_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache,
bool dirty, bool atomic)
@@ -2221,7 +2237,7 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
#endif
if (dirty)
- mark_page_dirty_in_slot(memslot, map->gfn);
+ mark_page_dirty_in_slot(kvm, memslot, map->gfn);
if (cache)
cache->dirty |= dirty;
@@ -2235,7 +2251,7 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
{
- __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
+ __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
cache, dirty, atomic);
return 0;
}
@@ -2243,8 +2259,8 @@ EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
{
- __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
- dirty, false);
+ __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
+ map, NULL, dirty, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
@@ -2418,7 +2434,8 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
-static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
+static int __kvm_write_guest_page(struct kvm *kvm,
+ struct kvm_memory_slot *memslot, gfn_t gfn,
const void *data, int offset, int len)
{
int r;
@@ -2430,7 +2447,7 @@ static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
r = __copy_to_user((void __user *)addr + offset, data, len);
if (r)
return -EFAULT;
- mark_page_dirty_in_slot(memslot, gfn);
+ mark_page_dirty_in_slot(kvm, memslot, gfn);
return 0;
}
@@ -2439,7 +2456,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
- return __kvm_write_guest_page(slot, gfn, data, offset, len);
+ return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);
@@ -2448,7 +2465,7 @@ int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return __kvm_write_guest_page(slot, gfn, data, offset, len);
+ return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
@@ -2567,7 +2584,7 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
if (r)
return -EFAULT;
- mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
+ mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
return 0;
}
@@ -2616,23 +2633,16 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
}
EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
-int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
-{
- const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
-
- return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
-}
-EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
-
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
+ const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
- ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
+ ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
if (ret < 0)
return ret;
offset = 0;
@@ -2643,12 +2653,19 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);
-void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn)
+void mark_page_dirty_in_slot(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ gfn_t gfn)
{
- if (memslot && memslot->dirty_bitmap) {
+ if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
+ u32 slot = (memslot->as_id << 16) | memslot->id;
- set_bit_le(rel_gfn, memslot->dirty_bitmap);
+ if (kvm->dirty_ring_size)
+ kvm_dirty_ring_push(kvm_dirty_ring_get(kvm),
+ slot, rel_gfn);
+ else
+ set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
@@ -2658,7 +2675,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
struct kvm_memory_slot *memslot;
memslot = gfn_to_memslot(kvm, gfn);
- mark_page_dirty_in_slot(memslot, gfn);
+ mark_page_dirty_in_slot(kvm, memslot, gfn);
}
EXPORT_SYMBOL_GPL(mark_page_dirty);
@@ -2667,7 +2684,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
struct kvm_memory_slot *memslot;
memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- mark_page_dirty_in_slot(memslot, gfn);
+ mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
@@ -3008,6 +3025,17 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
+static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
+{
+#if KVM_DIRTY_LOG_PAGE_OFFSET > 0
+ return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
+ (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
+ kvm->dirty_ring_size / PAGE_SIZE);
+#else
+ return false;
+#endif
+}
+
static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
{
struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
@@ -3023,6 +3051,10 @@ static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
#endif
+ else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
+ page = kvm_dirty_ring_get_page(
+ &vcpu->dirty_ring,
+ vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
else
return kvm_arch_vcpu_fault(vcpu, vmf);
get_page(page);
@@ -3036,6 +3068,14 @@ static const struct vm_operations_struct kvm_vcpu_vm_ops = {
static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct kvm_vcpu *vcpu = file->private_data;
+ unsigned long pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+ if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
+ kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
+ ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
+ return -EINVAL;
+
vma->vm_ops = &kvm_vcpu_vm_ops;
return 0;
}
@@ -3129,6 +3169,13 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (r)
goto vcpu_free_run_page;
+ if (kvm->dirty_ring_size) {
+ r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
+ id, kvm->dirty_ring_size);
+ if (r)
+ goto arch_vcpu_destroy;
+ }
+
mutex_lock(&kvm->lock);
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
@@ -3162,6 +3209,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
+ kvm_dirty_ring_free(&vcpu->dirty_ring);
+arch_vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
vcpu_free_run_page:
free_page((unsigned long)vcpu->run);
@@ -3634,12 +3683,78 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#endif
case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS;
+ case KVM_CAP_DIRTY_LOG_RING:
+#if KVM_DIRTY_LOG_PAGE_OFFSET > 0
+ return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
+#else
+ return 0;
+#endif
default:
break;
}
return kvm_vm_ioctl_check_extension(kvm, arg);
}
+static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
+{
+ int r;
+
+ if (!KVM_DIRTY_LOG_PAGE_OFFSET)
+ return -EINVAL;
+
+ /* the size should be power of 2 */
+ if (!size || (size & (size - 1)))
+ return -EINVAL;
+
+ /* Should be bigger to keep the reserved entries, or a page */
+ if (size < kvm_dirty_ring_get_rsvd_entries() *
+ sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
+ return -EINVAL;
+
+ if (size > KVM_DIRTY_RING_MAX_ENTRIES *
+ sizeof(struct kvm_dirty_gfn))
+ return -E2BIG;
+
+ /* We only allow it to set once */
+ if (kvm->dirty_ring_size)
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+
+ if (kvm->created_vcpus) {
+ /* We don't allow to change this value after vcpu created */
+ r = -EINVAL;
+ } else {
+ kvm->dirty_ring_size = size;
+ r = 0;
+ }
+
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
+static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+ int cleared = 0;
+
+ if (!kvm->dirty_ring_size)
+ return -EINVAL;
+
+ mutex_lock(&kvm->slots_lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
+
+ mutex_unlock(&kvm->slots_lock);
+
+ if (cleared)
+ kvm_flush_remote_tlbs(kvm);
+
+ return cleared;
+}
+
int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
@@ -3670,6 +3785,8 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
kvm->max_halt_poll_ns = cap->args[0];
return 0;
}
+ case KVM_CAP_DIRTY_LOG_RING:
+ return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
default:
return kvm_vm_ioctl_enable_cap(kvm, cap);
}
@@ -3854,6 +3971,9 @@ static long kvm_vm_ioctl(struct file *filp,
case KVM_CHECK_EXTENSION:
r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
break;
+ case KVM_RESET_DIRTY_RINGS:
+ r = kvm_vm_ioctl_reset_dirty_pages(kvm);
+ break;
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}