summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu
diff options
context:
space:
mode:
authorDavid Matlack <dmatlack@google.com>2023-01-26 21:40:23 +0300
committerSean Christopherson <seanjc@google.com>2023-03-18 01:36:20 +0300
commit9d4655da1a4c17f6691a6434303d9973017bf1ca (patch)
tree4692706a5ef30a58a6bd8d279d5d70f6561f1695 /arch/x86/kvm/mmu
parent8c63e8c2176552d5c003d7459609383d32bf47f3 (diff)
downloadlinux-9d4655da1a4c17f6691a6434303d9973017bf1ca.tar.xz
KVM: x86/mmu: Use gfn_t in kvm_flush_remote_tlbs_range()
Use gfn_t instead of u64 for kvm_flush_remote_tlbs_range()'s parameters, since gfn_t is the standard type for GFNs throughout KVM. Opportunistically rename pages to nr_pages to make its role even more obvious. No functional change intended. Signed-off-by: David Matlack <dmatlack@google.com> Link: https://lore.kernel.org/r/20230126184025.2294823-6-dmatlack@google.com [sean: convert pages to gfn_t too, and rename] Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r--arch/x86/kvm/mmu/mmu.c5
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h3
2 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index b6635da53cb3..cc42fa097d5b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -261,13 +261,14 @@ static inline bool kvm_available_flush_tlb_with_range(void)
return kvm_x86_ops.tlb_remote_flush_with_range;
}
-void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages)
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
+ gfn_t nr_pages)
{
struct kvm_tlb_range range;
int ret = -EOPNOTSUPP;
range.start_gfn = start_gfn;
- range.pages = pages;
+ range.pages = nr_pages;
if (kvm_x86_ops.tlb_remote_flush_with_range)
ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, &range);
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 4b2a1dc43db3..d39af5639ce9 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -170,7 +170,8 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn,
int min_level);
-void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
+ gfn_t nr_pages);
/* Flush the given page (huge or not) of guest memory. */
static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)