summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-05-19 17:01:50 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2015-05-28 11:43:33 +0300
commitd9ef13c2b3983de8dd1373ef670799dbb6498122 (patch)
tree8fa5aedbdb534029ca8a69d1b4608720fd90e817
parentf36f3f2846b5578d62910ee0b6dbef59fdd1cfa4 (diff)
downloadlinux-d9ef13c2b3983de8dd1373ef670799dbb6498122.tar.xz
KVM: pass kvm_memory_slot to gfn_to_page_many_atomic
The memory slot is already available from gfn_to_memslot_dirty_bitmap. Isn't it a shame to look it up again? Plus, it makes gfn_to_page_many_atomic agnostic of multiple VCPU address spaces. Reviewed-by: Radim Krcmar <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--virt/kvm/kvm_main.c6
3 files changed, 9 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1bf2ae9ca521..6a7e5b6246b1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2728,15 +2728,17 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
u64 *start, u64 *end)
{
struct page *pages[PTE_PREFETCH_NUM];
+ struct kvm_memory_slot *slot;
unsigned access = sp->role.access;
int i, ret;
gfn_t gfn;
gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
- if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
+ slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
+ if (!slot)
return -1;
- ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
+ ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
if (ret <= 0)
return -1;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9bd3bc16be87..a8bcbc9c6078 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -526,8 +526,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
-int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
- int nr_pages);
+int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9f67c942d8ee..c57f44216a4e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1428,13 +1428,13 @@ pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
-int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
- int nr_pages)
+int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages)
{
unsigned long addr;
gfn_t entry;
- addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
+ addr = gfn_to_hva_many(slot, gfn, &entry);
if (kvm_is_error_hva(addr))
return -1;