From 982bae43f11c37b51d2f1961bb25ef7cac3746fa Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Aug 2022 22:49:55 +0000 Subject: KVM: x86: Tag kvm_mmu_x86_module_init() with __init Mark kvm_mmu_x86_module_init() with __init, the entire reason it exists is to initialize variables when kvm.ko is loaded, i.e. it must never be called after module initialization. Fixes: 1d0e84806047 ("KVM: x86/mmu: Resolve nx_huge_pages when kvm.ko is loaded") Cc: stable@vger.kernel.org Reviewed-by: Kai Huang Tested-by: Michael Roth Signed-off-by: Sean Christopherson Message-Id: <20220803224957.1285926-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/kvm/mmu/mmu.c') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 06ac8c7cef67..300927fd7b53 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6698,7 +6698,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as * its default value of -1 is technically undefined behavior for a boolean. */ -void kvm_mmu_x86_module_init(void) +void __init kvm_mmu_x86_module_init(void) { if (nx_huge_pages == -1) __set_nx_huge_pages(get_nx_auto_mode()); -- cgit v1.2.3 From c3e0c8c2e8b17bae30d5978bc2decdd4098f0f99 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 3 Aug 2022 22:49:56 +0000 Subject: KVM: x86/mmu: Fully re-evaluate MMIO caching when SPTE masks change Fully re-evaluate whether or not MMIO caching can be enabled when SPTE masks change; simply clearing enable_mmio_caching when a configuration isn't compatible with caching fails to handle the scenario where the masks are updated, e.g. by VMX for EPT or by SVM to account for the C-bit location, and toggle compatibility from false=>true. Snapshot the original module param so that re-evaluating MMIO caching preserves userspace's desire to allow caching. Use a snapshot approach so that enable_mmio_caching still reflects KVM's actual behavior. Fixes: 8b9e74bfbf8c ("KVM: x86/mmu: Use enable_mmio_caching to track if MMIO caching is enabled") Reported-by: Michael Roth Cc: Tom Lendacky Cc: stable@vger.kernel.org Tested-by: Michael Roth Signed-off-by: Sean Christopherson Reviewed-by: Kai Huang Message-Id: <20220803224957.1285926-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 4 ++++ arch/x86/kvm/mmu/spte.c | 19 +++++++++++++++++++ arch/x86/kvm/mmu/spte.h | 1 + 3 files changed, 24 insertions(+) (limited to 'arch/x86/kvm/mmu/mmu.c') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 300927fd7b53..918ff23c0bc3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -6697,11 +6697,15 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) /* * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as * its default value of -1 is technically undefined behavior for a boolean. + * Forward the module init call to SPTE code so that it too can handle module + * params that need to be resolved/snapshot. */ void __init kvm_mmu_x86_module_init(void) { if (nx_huge_pages == -1) __set_nx_huge_pages(get_nx_auto_mode()); + + kvm_mmu_spte_module_init(); } /* diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 7314d27d57a4..66f76f5a15bd 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -20,6 +20,7 @@ #include bool __read_mostly enable_mmio_caching = true; +static bool __ro_after_init allow_mmio_caching; module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); u64 __read_mostly shadow_host_writable_mask; @@ -43,6 +44,18 @@ u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; u8 __read_mostly shadow_phys_bits; +void __init kvm_mmu_spte_module_init(void) +{ + /* + * Snapshot userspace's desire to allow MMIO caching. Whether or not + * KVM can actually enable MMIO caching depends on vendor-specific + * hardware capabilities and other module params that can't be resolved + * until the vendor module is loaded, i.e. enable_mmio_caching can and + * will change when the vendor module is (re)loaded. + */ + allow_mmio_caching = enable_mmio_caching; +} + static u64 generation_mmio_spte_mask(u64 gen) { u64 mask; @@ -340,6 +353,12 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) BUG_ON((u64)(unsigned)access_mask != access_mask); WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); + /* + * Reset to the original module param value to honor userspace's desire + * to (dis)allow MMIO caching. Update the param itself so that + * userspace can see whether or not KVM is actually using MMIO caching. + */ + enable_mmio_caching = allow_mmio_caching; if (!enable_mmio_caching) mmio_value = 0; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index cabe3fbb4f39..26b144ffd146 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -450,6 +450,7 @@ static inline u64 restore_acc_track_spte(u64 spte) u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn); +void __init kvm_mmu_spte_module_init(void); void kvm_mmu_reset_all_pte_masks(void); #endif -- cgit v1.2.3 From 1685c0f32554a7f35962061d17155c58454f1cd2 Mon Sep 17 00:00:00 2001 From: Mingwei Zhang Date: Sun, 7 Aug 2022 05:21:41 +0000 Subject: KVM: x86/mmu: rename trace function name for asynchronous page fault Rename the tracepoint function from trace_kvm_async_pf_doublefault() to trace_kvm_async_pf_repeated_fault() to make it clear, since double fault has nothing to do with this trace function. Asynchronous Page Fault (APF) is an artifact generated by KVM when it cannot find a physical page to satisfy an EPT violation. KVM uses APF to tell the guest OS to do something else such as scheduling other guest processes to make forward progress. However, when another guest process also touches a previously APFed page, KVM halts the vCPU instead of generating a repeated APF to avoid wasting cycles. Double fault (#DF) clearly has a different meaning and a different consequence when triggered. #DF requires two nested contributory exceptions instead of two page faults faulting at the same address. A prevous bug on APF indicates that it may trigger a double fault in the guest [1] and clearly this trace function has nothing to do with it. So rename this function should be a valid choice. No functional change intended. [1] https://www.spinics.net/lists/kvm/msg214957.html Signed-off-by: Mingwei Zhang Message-Id: <20220807052141.69186-1-mizhang@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/mmu.c | 2 +- include/trace/events/kvm.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/kvm/mmu/mmu.c') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 918ff23c0bc3..eccddb136954 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4164,7 +4164,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) { trace_kvm_try_async_get_page(fault->addr, fault->gfn); if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) { - trace_kvm_async_pf_doublefault(fault->addr, fault->gfn); + trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn); kvm_make_request(KVM_REQ_APF_HALT, vcpu); return RET_PF_RETRY; } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) { diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 37e1e1a2d67d..3bd31ea23fee 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -282,7 +282,7 @@ DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page, TP_ARGS(gva, gfn) ); -DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault, +DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault, TP_PROTO(u64 gva, u64 gfn), -- cgit v1.2.3