summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYosry Ahmed <yosryahmed@google.com>2022-08-23 03:46:37 +0300
committerSean Christopherson <seanjc@google.com>2022-08-30 17:41:12 +0300
commit43a063cab325ee7cc50349967e536b3cd4e57f03 (patch)
treeef1028659ec2f62930896e5d56eb54b0e46c612f
parentebc97a52b5d6cd5fb0c15a3fc9cdd6eb924646a1 (diff)
downloadlinux-43a063cab325ee7cc50349967e536b3cd4e57f03.tar.xz
KVM: x86/mmu: count KVM mmu usage in secondary pagetable stats.
Count the pages used by KVM mmu on x86 in memory stats under secondary pagetable stats (e.g. "SecPageTables" in /proc/meminfo) to give better visibility into the memory consumption of KVM mmu in a similar way to how normal user page tables are accounted. Add the inner helper in common KVM, ARM will also use it to count stats in a future commit. Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Reviewed-by: Sean Christopherson <seanjc@google.com> Acked-by: Marc Zyngier <maz@kernel.org> # generic KVM changes Link: https://lore.kernel.org/r/20220823004639.2387269-3-yosryahmed@google.com Link: https://lore.kernel.org/r/20220823004639.2387269-4-yosryahmed@google.com [sean: squash x86 usage to workaround modpost issues] Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r--arch/x86/kvm/mmu/mmu.c16
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c12
-rw-r--r--include/linux/kvm_host.h13
3 files changed, 39 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d25d55b1f0b5..32b60a6b83bd 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1665,6 +1665,18 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
+static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ kvm_mod_used_mmu_pages(kvm, +1);
+ kvm_account_pgtable_pages((void *)sp->spt, +1);
+}
+
+static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ kvm_mod_used_mmu_pages(kvm, -1);
+ kvm_account_pgtable_pages((void *)sp->spt, -1);
+}
+
static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
{
MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
@@ -2122,7 +2134,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
*/
sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
list_add(&sp->link, &kvm->arch.active_mmu_pages);
- kvm_mod_used_mmu_pages(kvm, +1);
+ kvm_account_mmu_page(kvm, sp);
sp->gfn = gfn;
sp->role = role;
@@ -2456,7 +2468,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
list_add(&sp->link, invalid_list);
else
list_move(&sp->link, invalid_list);
- kvm_mod_used_mmu_pages(kvm, -1);
+ kvm_unaccount_mmu_page(kvm, sp);
} else {
/*
* Remove the active root from the active page list, the root
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index bf2ccf9debca..672f0432d777 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -372,6 +372,16 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
}
}
+static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ kvm_account_pgtable_pages((void *)sp->spt, +1);
+}
+
+static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ kvm_account_pgtable_pages((void *)sp->spt, -1);
+}
+
/**
* tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
*
@@ -384,6 +394,7 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
bool shared)
{
+ tdp_unaccount_mmu_page(kvm, sp);
if (shared)
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
else
@@ -1132,6 +1143,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
if (account_nx)
account_huge_nx_page(kvm, sp);
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+ tdp_account_mmu_page(kvm, sp);
return 0;
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f4519d3689e1..04c7e5f2f727 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2248,6 +2248,19 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+ mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
+
+/*
* This defines how many reserved entries we want to keep before we
* kick the vcpu to the userspace to avoid dirty ring full. This
* value can be tuned to higher if e.g. PML is enabled on the host.