From 258d985f6eb360c9c7aacd025d0dbc080a59423f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 2 Feb 2023 18:28:15 +0000 Subject: KVM: x86/mmu: Use EMULTYPE flag to track write #PFs to shadow pages Use a new EMULTYPE flag, EMULTYPE_WRITE_PF_TO_SP, to track page faults on self-changing writes to shadowed page tables instead of propagating that information to the emulator via a semi-persistent vCPU flag. Using a flag in "struct kvm_vcpu_arch" is confusing, especially as implemented, as it's not at all obvious that clearing the flag only when emulation actually occurs is correct. E.g. if KVM sets the flag and then retries the fault without ever getting to the emulator, the flag will be left set for future calls into the emulator. But because the flag is consumed if and only if both EMULTYPE_PF and EMULTYPE_ALLOW_RETRY_PF are set, and because EMULTYPE_ALLOW_RETRY_PF is deliberately not set for direct MMUs, emulated MMIO, or while L2 is active, KVM avoids false positives on a stale flag since FNAME(page_fault) is guaranteed to be run and refresh the flag before it's ultimately consumed by the tail end of reexecute_instruction(). Signed-off-by: Sean Christopherson Message-Id: <20230202182817.407394-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 37 ++++++++++++++++++++----------------- arch/x86/kvm/mmu/mmu.c | 5 +++-- arch/x86/kvm/mmu/mmu_internal.h | 12 +++++++++++- arch/x86/kvm/mmu/paging_tmpl.h | 4 +--- arch/x86/kvm/x86.c | 15 ++------------- 5 files changed, 37 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 808c292ad3f4..a45de1118a42 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -947,23 +947,6 @@ struct kvm_vcpu_arch { u64 msr_kvm_poll_control; - /* - * Indicates the guest is trying to write a gfn that contains one or - * more of the PTEs used to translate the write itself, i.e. the access - * is changing its own translation in the guest page tables. KVM exits - * to userspace if emulation of the faulting instruction fails and this - * flag is set, as KVM cannot make forward progress. - * - * If emulation fails for a write to guest page tables, KVM unprotects - * (zaps) the shadow page for the target gfn and resumes the guest to - * retry the non-emulatable instruction (on hardware). Unprotecting the - * gfn doesn't allow forward progress for a self-changing access because - * doing so also zaps the translation for the gfn, i.e. retrying the - * instruction will hit a !PRESENT fault, which results in a new shadow - * page and sends KVM back to square one. - */ - bool write_fault_to_shadow_pgtable; - /* set at EPT violation at this point */ unsigned long exit_qualification; @@ -1907,6 +1890,25 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility * state and inject single-step #DBs after skipping * an instruction (after completing userspace I/O). + * + * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that + * is attempting to write a gfn that contains one or + * more of the PTEs used to translate the write itself, + * and the owning page table is being shadowed by KVM. + * If emulation of the faulting instruction fails and + * this flag is set, KVM will exit to userspace instead + * of retrying emulation as KVM cannot make forward + * progress. + * + * If emulation fails for a write to guest page tables, + * KVM unprotects (zaps) the shadow page for the target + * gfn and resumes the guest to retry the non-emulatable + * instruction (on hardware). Unprotecting the gfn + * doesn't allow forward progress for a self-changing + * access because doing so also zaps the translation for + * the gfn, i.e. retrying the instruction will hit a + * !PRESENT fault, which results in a new shadow page + * and sends KVM back to square one. */ #define EMULTYPE_NO_DECODE (1 << 0) #define EMULTYPE_TRAP_UD (1 << 1) @@ -1916,6 +1918,7 @@ u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); #define EMULTYPE_VMWARE_GP (1 << 5) #define EMULTYPE_PF (1 << 6) #define EMULTYPE_COMPLETE_USER_EXIT (1 << 7) +#define EMULTYPE_WRITE_PF_TO_SP (1 << 8) int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index c8ebe542c565..144c5a01cd77 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4203,7 +4203,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) return; - kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); + kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL); } static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) @@ -5664,7 +5664,8 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err if (r == RET_PF_INVALID) { r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, - lower_32_bits(error_code), false); + lower_32_bits(error_code), false, + &emulation_type); if (KVM_BUG_ON(r == RET_PF_INVALID, vcpu->kvm)) return -EIO; } diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index cc58631e2336..2cbb155c686c 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -240,6 +240,13 @@ struct kvm_page_fault { kvm_pfn_t pfn; hva_t hva; bool map_writable; + + /* + * Indicates the guest is trying to write a gfn that contains one or + * more of the PTEs used to translate the write itself, i.e. the access + * is changing its own translation in the guest page tables. + */ + bool write_fault_to_shadow_pgtable; }; int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); @@ -273,7 +280,7 @@ enum { }; static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - u32 err, bool prefetch) + u32 err, bool prefetch, int *emulation_type) { struct kvm_page_fault fault = { .addr = cr2_or_gpa, @@ -312,6 +319,9 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, else r = vcpu->arch.mmu->page_fault(vcpu, &fault); + if (fault.write_fault_to_shadow_pgtable && emulation_type) + *emulation_type |= EMULTYPE_WRITE_PF_TO_SP; + /* * Similar to above, prefetch faults aren't truly spurious, and the * async #PF path doesn't do emulation. Do count faults that are fixed diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 57f0b75c80f9..5d2958299b4f 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -825,10 +825,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r) return r; - vcpu->arch.write_fault_to_shadow_pgtable = false; - is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, - &walker, fault->user, &vcpu->arch.write_fault_to_shadow_pgtable); + &walker, fault->user, &fault->write_fault_to_shadow_pgtable); if (is_self_change_mapping) fault->max_level = PG_LEVEL_4K; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7713420abab0..ff7f398a0c6a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8463,7 +8463,6 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - bool write_fault_to_shadow_pgtable, int emulation_type) { gpa_t gpa = cr2_or_gpa; @@ -8534,7 +8533,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, * be fixed by unprotecting shadow page and it should * be reported to userspace. */ - return !write_fault_to_shadow_pgtable; + return !(emulation_type & EMULTYPE_WRITE_PF_TO_SP); } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, @@ -8782,20 +8781,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int r; struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; bool writeback = true; - bool write_fault_to_spt; if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len))) return 1; vcpu->arch.l1tf_flush_l1d = true; - /* - * Clear write_fault_to_shadow_pgtable here to ensure it is - * never reused. - */ - write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; - vcpu->arch.write_fault_to_shadow_pgtable = false; - if (!(emulation_type & EMULTYPE_NO_DECODE)) { kvm_clear_exception_queue(vcpu); @@ -8816,7 +8807,6 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, return 1; } if (reexecute_instruction(vcpu, cr2_or_gpa, - write_fault_to_spt, emulation_type)) return 1; @@ -8895,8 +8885,7 @@ restart: return 1; if (r == EMULATION_FAILED) { - if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, - emulation_type)) + if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type)) return 1; return handle_emulation_failure(vcpu, emulation_type); -- cgit v1.2.3 From 39fda5d873eb1f59613ce49249ee2effea9f8e06 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Feb 2023 18:28:16 +0000 Subject: KVM: x86/mmu: Detect write #PF to shadow pages during FNAME(fetch) walk Move the detection of write #PF to shadow pages, i.e. a fault on a write to a page table that is being shadowed by KVM that is used to translate the write itself, from FNAME(is_self_change_mapping) to FNAME(fetch). There is no need to detect the self-referential write before kvm_faultin_pfn() as KVM does not consume EMULTYPE_WRITE_PF_TO_SP for accesses that resolve to "error or no-slot" pfns, i.e. KVM doesn't allow retrying MMIO accesses or writes to read-only memslots. Detecting the EMULTYPE_WRITE_PF_TO_SP scenario in FNAME(fetch) will allow dropping FNAME(is_self_change_mapping) entirely, as the hugepage interaction can be deferred to kvm_mmu_hugepage_adjust(). Cc: Huang Hang Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20221213125538.81209-1-jiangshanlai@gmail.com [sean: split to separate patch, write changelog] Signed-off-by: Sean Christopherson Message-Id: <20230202182817.407394-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/paging_tmpl.h | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 5d2958299b4f..f57d9074fb9b 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -685,6 +685,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, if (sp != ERR_PTR(-EEXIST)) link_shadow_page(vcpu, it.sptep, sp); + + if (fault->write && table_gfn == fault->gfn) + fault->write_fault_to_shadow_pgtable = true; } kvm_mmu_hugepage_adjust(vcpu, fault); @@ -741,17 +744,13 @@ out_gpte_changed: * created when kvm establishes shadow page table that stop kvm using large * page size. Do it early can avoid unnecessary #PF and emulation. * - * @write_fault_to_shadow_pgtable will return true if the fault gfn is - * currently used as its page table. - * * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok * since the PDPT is always shadowed, that means, we can not use large page * size to map the gfn which is used as PDPT. */ static bool FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, - struct guest_walker *walker, bool user_fault, - bool *write_fault_to_shadow_pgtable) + struct guest_walker *walker, bool user_fault) { int level; gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); @@ -765,7 +764,6 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; self_changed |= !(gfn & mask); - *write_fault_to_shadow_pgtable |= !gfn; } return self_changed; @@ -826,7 +824,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault return r; is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, - &walker, fault->user, &fault->write_fault_to_shadow_pgtable); + &walker, fault->user); if (is_self_change_mapping) fault->max_level = PG_LEVEL_4K; -- cgit v1.2.3 From 9a967700496feac1f006ffd97ffe2d82b6aac5ac Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 2 Feb 2023 18:28:17 +0000 Subject: KVM: x86/mmu: Remove FNAME(is_self_change_mapping) Drop FNAME(is_self_change_mapping) and instead rely on kvm_mmu_hugepage_adjust() to adjust the hugepage accordingly. Prior to commit 4cd071d13c5c ("KVM: x86/mmu: Move calls to thp_adjust() down a level"), the hugepage adjustment was done before allocating new shadow pages, i.e. failed to restrict the hugepage sizes if a new shadow page resulted in account_shadowed() changing the disallowed hugepage tracking. Removing FNAME(is_self_change_mapping) fixes a bug reported by Huang Hang where KVM unnecessarily forces a 4KiB page. FNAME(is_self_change_mapping) has a defect in that it blindly disables _all_ hugepage mappings rather than trying to reduce the size of the hugepage. If the guest is writing to a 1GiB page and the 1GiB is self-referential but a 2MiB page is not, then KVM can and should create a 2MiB mapping. Add a comment above the call to kvm_mmu_hugepage_adjust() to call out the new dependency on adjusting the hugepage size after walking indirect PTEs. Reported-by: Huang Hang Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20221213125538.81209-1-jiangshanlai@gmail.com [sean: rework changelog after separating out the emulator change] Signed-off-by: Sean Christopherson Message-Id: <20230202182817.407394-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu/paging_tmpl.h | 51 ++++++------------------------------------ 1 file changed, 7 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index f57d9074fb9b..a056f2773dd9 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -690,6 +690,12 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, fault->write_fault_to_shadow_pgtable = true; } + /* + * Adjust the hugepage size _after_ resolving indirect shadow pages. + * KVM doesn't support mapping hugepages into the guest for gfns that + * are being shadowed by KVM, i.e. allocating a new shadow page may + * affect the allowed hugepage size. + */ kvm_mmu_hugepage_adjust(vcpu, fault); trace_kvm_mmu_spte_requested(fault); @@ -734,41 +740,6 @@ out_gpte_changed: return RET_PF_RETRY; } - /* - * To see whether the mapped gfn can write its page table in the current - * mapping. - * - * It is the helper function of FNAME(page_fault). When guest uses large page - * size to map the writable gfn which is used as current page table, we should - * force kvm to use small page size to map it because new shadow page will be - * created when kvm establishes shadow page table that stop kvm using large - * page size. Do it early can avoid unnecessary #PF and emulation. - * - * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok - * since the PDPT is always shadowed, that means, we can not use large page - * size to map the gfn which is used as PDPT. - */ -static bool -FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, - struct guest_walker *walker, bool user_fault) -{ - int level; - gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); - bool self_changed = false; - - if (!(walker->pte_access & ACC_WRITE_MASK || - (!is_cr0_wp(vcpu->arch.mmu) && !user_fault))) - return false; - - for (level = walker->level; level <= walker->max_level; level++) { - gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; - - self_changed |= !(gfn & mask); - } - - return self_changed; -} - /* * Page fault handler. There are several causes for a page fault: * - there is no shadow pte for the guest pte @@ -787,7 +758,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault { struct guest_walker walker; int r; - bool is_self_change_mapping; pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code); WARN_ON_ONCE(fault->is_tdp); @@ -812,6 +782,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault } fault->gfn = walker.gfn; + fault->max_level = walker.level; fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); if (page_fault_handle_page_track(vcpu, fault)) { @@ -823,14 +794,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r) return r; - is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, - &walker, fault->user); - - if (is_self_change_mapping) - fault->max_level = PG_LEVEL_4K; - else - fault->max_level = walker.level; - r = kvm_faultin_pfn(vcpu, fault, walker.pte_access); if (r != RET_PF_CONTINUE) return r; -- cgit v1.2.3 From 68ac4221497b9a54f32c452a774ae747da908a81 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Sat, 11 Feb 2023 00:35:32 +0000 Subject: KVM: nVMX: Move EVMCS1_SUPPORT_* macros to hyperv.c Move the macros that define the set of VMCS controls that are supported by eVMCS1 from hyperv.h to hyperv.c, i.e. make them "private". The macros should never be consumed directly by KVM at-large since the "final" set of supported controls depends on guest CPUID. No functional change intended. Reviewed-by: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Message-Id: <20230211003534.564198-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/hyperv.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/hyperv.h | 105 ---------------------------------------------- 2 files changed, 105 insertions(+), 105 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c index 22daca752797..b6748055c586 100644 --- a/arch/x86/kvm/vmx/hyperv.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -13,6 +13,111 @@ #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK +/* + * Enlightened VMCSv1 doesn't support these: + * + * POSTED_INTR_NV = 0x00000002, + * GUEST_INTR_STATUS = 0x00000810, + * APIC_ACCESS_ADDR = 0x00002014, + * POSTED_INTR_DESC_ADDR = 0x00002016, + * EOI_EXIT_BITMAP0 = 0x0000201c, + * EOI_EXIT_BITMAP1 = 0x0000201e, + * EOI_EXIT_BITMAP2 = 0x00002020, + * EOI_EXIT_BITMAP3 = 0x00002022, + * GUEST_PML_INDEX = 0x00000812, + * PML_ADDRESS = 0x0000200e, + * VM_FUNCTION_CONTROL = 0x00002018, + * EPTP_LIST_ADDRESS = 0x00002024, + * VMREAD_BITMAP = 0x00002026, + * VMWRITE_BITMAP = 0x00002028, + * + * TSC_MULTIPLIER = 0x00002032, + * PLE_GAP = 0x00004020, + * PLE_WINDOW = 0x00004022, + * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, + * + * Currently unsupported in KVM: + * GUEST_IA32_RTIT_CTL = 0x00002814, + */ +#define EVMCS1_SUPPORTED_PINCTRL \ + (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ + PIN_BASED_EXT_INTR_MASK | \ + PIN_BASED_NMI_EXITING | \ + PIN_BASED_VIRTUAL_NMIS) + +#define EVMCS1_SUPPORTED_EXEC_CTRL \ + (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ + CPU_BASED_HLT_EXITING | \ + CPU_BASED_CR3_LOAD_EXITING | \ + CPU_BASED_CR3_STORE_EXITING | \ + CPU_BASED_UNCOND_IO_EXITING | \ + CPU_BASED_MOV_DR_EXITING | \ + CPU_BASED_USE_TSC_OFFSETTING | \ + CPU_BASED_MWAIT_EXITING | \ + CPU_BASED_MONITOR_EXITING | \ + CPU_BASED_INVLPG_EXITING | \ + CPU_BASED_RDPMC_EXITING | \ + CPU_BASED_INTR_WINDOW_EXITING | \ + CPU_BASED_CR8_LOAD_EXITING | \ + CPU_BASED_CR8_STORE_EXITING | \ + CPU_BASED_RDTSC_EXITING | \ + CPU_BASED_TPR_SHADOW | \ + CPU_BASED_USE_IO_BITMAPS | \ + CPU_BASED_MONITOR_TRAP_FLAG | \ + CPU_BASED_USE_MSR_BITMAPS | \ + CPU_BASED_NMI_WINDOW_EXITING | \ + CPU_BASED_PAUSE_EXITING | \ + CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) + +#define EVMCS1_SUPPORTED_2NDEXEC \ + (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \ + SECONDARY_EXEC_WBINVD_EXITING | \ + SECONDARY_EXEC_ENABLE_VPID | \ + SECONDARY_EXEC_ENABLE_EPT | \ + SECONDARY_EXEC_UNRESTRICTED_GUEST | \ + SECONDARY_EXEC_DESC | \ + SECONDARY_EXEC_ENABLE_RDTSCP | \ + SECONDARY_EXEC_ENABLE_INVPCID | \ + SECONDARY_EXEC_XSAVES | \ + SECONDARY_EXEC_RDSEED_EXITING | \ + SECONDARY_EXEC_RDRAND_EXITING | \ + SECONDARY_EXEC_TSC_SCALING | \ + SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \ + SECONDARY_EXEC_PT_USE_GPA | \ + SECONDARY_EXEC_PT_CONCEAL_VMX | \ + SECONDARY_EXEC_BUS_LOCK_DETECTION | \ + SECONDARY_EXEC_NOTIFY_VM_EXITING | \ + SECONDARY_EXEC_ENCLS_EXITING) + +#define EVMCS1_SUPPORTED_3RDEXEC (0ULL) + +#define EVMCS1_SUPPORTED_VMEXIT_CTRL \ + (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \ + VM_EXIT_SAVE_DEBUG_CONTROLS | \ + VM_EXIT_ACK_INTR_ON_EXIT | \ + VM_EXIT_HOST_ADDR_SPACE_SIZE | \ + VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \ + VM_EXIT_SAVE_IA32_PAT | \ + VM_EXIT_LOAD_IA32_PAT | \ + VM_EXIT_SAVE_IA32_EFER | \ + VM_EXIT_LOAD_IA32_EFER | \ + VM_EXIT_CLEAR_BNDCFGS | \ + VM_EXIT_PT_CONCEAL_PIP | \ + VM_EXIT_CLEAR_IA32_RTIT_CTL) + +#define EVMCS1_SUPPORTED_VMENTRY_CTRL \ + (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \ + VM_ENTRY_LOAD_DEBUG_CONTROLS | \ + VM_ENTRY_IA32E_MODE | \ + VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \ + VM_ENTRY_LOAD_IA32_PAT | \ + VM_ENTRY_LOAD_IA32_EFER | \ + VM_ENTRY_LOAD_BNDCFGS | \ + VM_ENTRY_PT_CONCEAL_PIP | \ + VM_ENTRY_LOAD_IA32_RTIT_CTL) + +#define EVMCS1_SUPPORTED_VMFUNC (0) + DEFINE_STATIC_KEY_FALSE(enable_evmcs); #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x) diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h index 78d17667e7ec..1299143d00df 100644 --- a/arch/x86/kvm/vmx/hyperv.h +++ b/arch/x86/kvm/vmx/hyperv.h @@ -22,111 +22,6 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs); #define KVM_EVMCS_VERSION 1 -/* - * Enlightened VMCSv1 doesn't support these: - * - * POSTED_INTR_NV = 0x00000002, - * GUEST_INTR_STATUS = 0x00000810, - * APIC_ACCESS_ADDR = 0x00002014, - * POSTED_INTR_DESC_ADDR = 0x00002016, - * EOI_EXIT_BITMAP0 = 0x0000201c, - * EOI_EXIT_BITMAP1 = 0x0000201e, - * EOI_EXIT_BITMAP2 = 0x00002020, - * EOI_EXIT_BITMAP3 = 0x00002022, - * GUEST_PML_INDEX = 0x00000812, - * PML_ADDRESS = 0x0000200e, - * VM_FUNCTION_CONTROL = 0x00002018, - * EPTP_LIST_ADDRESS = 0x00002024, - * VMREAD_BITMAP = 0x00002026, - * VMWRITE_BITMAP = 0x00002028, - * - * TSC_MULTIPLIER = 0x00002032, - * PLE_GAP = 0x00004020, - * PLE_WINDOW = 0x00004022, - * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E, - * - * Currently unsupported in KVM: - * GUEST_IA32_RTIT_CTL = 0x00002814, - */ -#define EVMCS1_SUPPORTED_PINCTRL \ - (PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ - PIN_BASED_EXT_INTR_MASK | \ - PIN_BASED_NMI_EXITING | \ - PIN_BASED_VIRTUAL_NMIS) - -#define EVMCS1_SUPPORTED_EXEC_CTRL \ - (CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \ - CPU_BASED_HLT_EXITING | \ - CPU_BASED_CR3_LOAD_EXITING | \ - CPU_BASED_CR3_STORE_EXITING | \ - CPU_BASED_UNCOND_IO_EXITING | \ - CPU_BASED_MOV_DR_EXITING | \ - CPU_BASED_USE_TSC_OFFSETTING | \ - CPU_BASED_MWAIT_EXITING | \ - CPU_BASED_MONITOR_EXITING | \ - CPU_BASED_INVLPG_EXITING | \ - CPU_BASED_RDPMC_EXITING | \ - CPU_BASED_INTR_WINDOW_EXITING | \ - CPU_BASED_CR8_LOAD_EXITING | \ - CPU_BASED_CR8_STORE_EXITING | \ - CPU_BASED_RDTSC_EXITING | \ - CPU_BASED_TPR_SHADOW | \ - CPU_BASED_USE_IO_BITMAPS | \ - CPU_BASED_MONITOR_TRAP_FLAG | \ - CPU_BASED_USE_MSR_BITMAPS | \ - CPU_BASED_NMI_WINDOW_EXITING | \ - CPU_BASED_PAUSE_EXITING | \ - CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) - -#define EVMCS1_SUPPORTED_2NDEXEC \ - (SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \ - SECONDARY_EXEC_WBINVD_EXITING | \ - SECONDARY_EXEC_ENABLE_VPID | \ - SECONDARY_EXEC_ENABLE_EPT | \ - SECONDARY_EXEC_UNRESTRICTED_GUEST | \ - SECONDARY_EXEC_DESC | \ - SECONDARY_EXEC_ENABLE_RDTSCP | \ - SECONDARY_EXEC_ENABLE_INVPCID | \ - SECONDARY_EXEC_XSAVES | \ - SECONDARY_EXEC_RDSEED_EXITING | \ - SECONDARY_EXEC_RDRAND_EXITING | \ - SECONDARY_EXEC_TSC_SCALING | \ - SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \ - SECONDARY_EXEC_PT_USE_GPA | \ - SECONDARY_EXEC_PT_CONCEAL_VMX | \ - SECONDARY_EXEC_BUS_LOCK_DETECTION | \ - SECONDARY_EXEC_NOTIFY_VM_EXITING | \ - SECONDARY_EXEC_ENCLS_EXITING) - -#define EVMCS1_SUPPORTED_3RDEXEC (0ULL) - -#define EVMCS1_SUPPORTED_VMEXIT_CTRL \ - (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \ - VM_EXIT_SAVE_DEBUG_CONTROLS | \ - VM_EXIT_ACK_INTR_ON_EXIT | \ - VM_EXIT_HOST_ADDR_SPACE_SIZE | \ - VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \ - VM_EXIT_SAVE_IA32_PAT | \ - VM_EXIT_LOAD_IA32_PAT | \ - VM_EXIT_SAVE_IA32_EFER | \ - VM_EXIT_LOAD_IA32_EFER | \ - VM_EXIT_CLEAR_BNDCFGS | \ - VM_EXIT_PT_CONCEAL_PIP | \ - VM_EXIT_CLEAR_IA32_RTIT_CTL) - -#define EVMCS1_SUPPORTED_VMENTRY_CTRL \ - (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \ - VM_ENTRY_LOAD_DEBUG_CONTROLS | \ - VM_ENTRY_IA32E_MODE | \ - VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \ - VM_ENTRY_LOAD_IA32_PAT | \ - VM_ENTRY_LOAD_IA32_EFER | \ - VM_ENTRY_LOAD_BNDCFGS | \ - VM_ENTRY_PT_CONCEAL_PIP | \ - VM_ENTRY_LOAD_IA32_RTIT_CTL) - -#define EVMCS1_SUPPORTED_VMFUNC (0) - struct evmcs_field { u16 offset; u16 clean_field; -- cgit v1.2.3 From 19f10315fd53599d8186aa46a0363b233292eb3d Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Sat, 11 Feb 2023 00:35:33 +0000 Subject: KVM: VMX: Stub out enable_evmcs static key for CONFIG_HYPERV=n Wrap enable_evmcs in a helper and stub it out when CONFIG_HYPERV=n in order to eliminate the static branch nop placeholders. clang-14 is clever enough to elide the nop, but gcc-12 is not. Stubbing out the key reduces the size of kvm-intel.ko by ~7.5% (200KiB) when compiled with gcc-12 (there are a _lot_ of VMCS accesses throughout KVM). Reviewed-by: Vitaly Kuznetsov Signed-off-by: Sean Christopherson Message-Id: <20230211003534.564198-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/hyperv.c | 4 ++-- arch/x86/kvm/vmx/hyperv.h | 10 ++++++++-- arch/x86/kvm/vmx/vmx.c | 15 +++++++-------- arch/x86/kvm/vmx/vmx_ops.h | 22 +++++++++++----------- 4 files changed, 28 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c index b6748055c586..274fbd38c64e 100644 --- a/arch/x86/kvm/vmx/hyperv.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -118,8 +118,6 @@ #define EVMCS1_SUPPORTED_VMFUNC (0) -DEFINE_STATIC_KEY_FALSE(enable_evmcs); - #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x) #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \ {EVMCS1_OFFSET(name), clean_field} @@ -611,6 +609,8 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12) } #if IS_ENABLED(CONFIG_HYPERV) +DEFINE_STATIC_KEY_FALSE(enable_evmcs); + /* * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption * is: in case a feature has corresponding fields in eVMCS described and it was diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h index 1299143d00df..a54a2fdf0a5b 100644 --- a/arch/x86/kvm/vmx/hyperv.h +++ b/arch/x86/kvm/vmx/hyperv.h @@ -16,8 +16,6 @@ struct vmcs_config; -DECLARE_STATIC_KEY_FALSE(enable_evmcs); - #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs)) #define KVM_EVMCS_VERSION 1 @@ -69,6 +67,13 @@ static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs, #if IS_ENABLED(CONFIG_HYPERV) +DECLARE_STATIC_KEY_FALSE(enable_evmcs); + +static __always_inline bool kvm_is_using_evmcs(void) +{ + return static_branch_unlikely(&enable_evmcs); +} + static __always_inline int get_evmcs_offset(unsigned long field, u16 *clean_field) { @@ -158,6 +163,7 @@ static inline void evmcs_load(u64 phys_addr) void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf); #else /* !IS_ENABLED(CONFIG_HYPERV) */ +static __always_inline bool kvm_is_using_evmcs(void) { return false; } static __always_inline void evmcs_write64(unsigned long field, u64 value) {} static __always_inline void evmcs_write32(unsigned long field, u32 value) {} static __always_inline void evmcs_write16(unsigned long field, u16 value) {} diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d2d6e1b6c788..09e07fb83c37 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -595,7 +595,7 @@ static void hv_reset_evmcs(void) { struct hv_vp_assist_page *vp_ap; - if (!static_branch_unlikely(&enable_evmcs)) + if (!kvm_is_using_evmcs()) return; /* @@ -2816,8 +2816,7 @@ static int vmx_hardware_enable(void) * This can happen if we hot-added a CPU but failed to allocate * VP assist page for it. */ - if (static_branch_unlikely(&enable_evmcs) && - !hv_get_vp_assist_page(cpu)) + if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu)) return -EFAULT; intel_pt_handle_vmx(1); @@ -2869,7 +2868,7 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) memset(vmcs, 0, vmcs_config.size); /* KVM supports Enlightened VMCS v1 only */ - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) vmcs->hdr.revision_id = KVM_EVMCS_VERSION; else vmcs->hdr.revision_id = vmcs_config.revision_id; @@ -2964,7 +2963,7 @@ static __init int alloc_kvm_area(void) * still be marked with revision_id reported by * physical CPU. */ - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) vmcs->hdr.revision_id = vmcs_config.revision_id; per_cpu(vmxarea, cpu) = vmcs; @@ -3931,7 +3930,7 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx) * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR * bitmap has changed. */ - if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs)) { + if (kvm_is_using_evmcs()) { struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; if (evmcs->hv_enlightenments_control.msr_bitmap) @@ -7310,7 +7309,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx)); /* All fields are clean at this point */ - if (static_branch_unlikely(&enable_evmcs)) { + if (kvm_is_using_evmcs()) { current_evmcs->hv_clean_fields |= HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; @@ -7440,7 +7439,7 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu) * feature only for vmcs01, KVM currently isn't equipped to realize any * performance benefits from enabling it for vmcs02. */ - if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) && + if (kvm_is_using_evmcs() && (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) { struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs; diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h index db95bde52998..ce47dc265f89 100644 --- a/arch/x86/kvm/vmx/vmx_ops.h +++ b/arch/x86/kvm/vmx/vmx_ops.h @@ -147,7 +147,7 @@ do_exception: static __always_inline u16 vmcs_read16(unsigned long field) { vmcs_check16(field); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_read16(field); return __vmcs_readl(field); } @@ -155,7 +155,7 @@ static __always_inline u16 vmcs_read16(unsigned long field) static __always_inline u32 vmcs_read32(unsigned long field) { vmcs_check32(field); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_read32(field); return __vmcs_readl(field); } @@ -163,7 +163,7 @@ static __always_inline u32 vmcs_read32(unsigned long field) static __always_inline u64 vmcs_read64(unsigned long field) { vmcs_check64(field); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_read64(field); #ifdef CONFIG_X86_64 return __vmcs_readl(field); @@ -175,7 +175,7 @@ static __always_inline u64 vmcs_read64(unsigned long field) static __always_inline unsigned long vmcs_readl(unsigned long field) { vmcs_checkl(field); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_read64(field); return __vmcs_readl(field); } @@ -222,7 +222,7 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val static __always_inline void vmcs_write16(unsigned long field, u16 value) { vmcs_check16(field); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_write16(field, value); __vmcs_writel(field, value); @@ -231,7 +231,7 @@ static __always_inline void vmcs_write16(unsigned long field, u16 value) static __always_inline void vmcs_write32(unsigned long field, u32 value) { vmcs_check32(field); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_write32(field, value); __vmcs_writel(field, value); @@ -240,7 +240,7 @@ static __always_inline void vmcs_write32(unsigned long field, u32 value) static __always_inline void vmcs_write64(unsigned long field, u64 value) { vmcs_check64(field); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_write64(field, value); __vmcs_writel(field, value); @@ -252,7 +252,7 @@ static __always_inline void vmcs_write64(unsigned long field, u64 value) static __always_inline void vmcs_writel(unsigned long field, unsigned long value) { vmcs_checkl(field); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_write64(field, value); __vmcs_writel(field, value); @@ -262,7 +262,7 @@ static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_clear_bits does not support 64-bit fields"); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_write32(field, evmcs_read32(field) & ~mask); __vmcs_writel(field, __vmcs_readl(field) & ~mask); @@ -272,7 +272,7 @@ static __always_inline void vmcs_set_bits(unsigned long field, u32 mask) { BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000, "vmcs_set_bits does not support 64-bit fields"); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_write32(field, evmcs_read32(field) | mask); __vmcs_writel(field, __vmcs_readl(field) | mask); @@ -289,7 +289,7 @@ static inline void vmcs_load(struct vmcs *vmcs) { u64 phys_addr = __pa(vmcs); - if (static_branch_unlikely(&enable_evmcs)) + if (kvm_is_using_evmcs()) return evmcs_load(phys_addr); vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr); -- cgit v1.2.3 From fbc722aac1ce66960de50c0f488b6ff865a41d74 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Sat, 11 Feb 2023 00:35:34 +0000 Subject: KVM: VMX: Rename "KVM is using eVMCS" static key to match its wrapper Rename enable_evmcs to __kvm_is_using_evmcs to match its wrapper, and to avoid confusion with enabling eVMCS for nested virtualization, i.e. have "enable eVMCS" be reserved for "enable eVMCS support for L1". No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20230211003534.564198-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/hyperv.c | 2 +- arch/x86/kvm/vmx/hyperv.h | 4 ++-- arch/x86/kvm/vmx/vmx.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c index 274fbd38c64e..79450e1ed7cf 100644 --- a/arch/x86/kvm/vmx/hyperv.c +++ b/arch/x86/kvm/vmx/hyperv.c @@ -609,7 +609,7 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12) } #if IS_ENABLED(CONFIG_HYPERV) -DEFINE_STATIC_KEY_FALSE(enable_evmcs); +DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs); /* * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption diff --git a/arch/x86/kvm/vmx/hyperv.h b/arch/x86/kvm/vmx/hyperv.h index a54a2fdf0a5b..9623fe1651c4 100644 --- a/arch/x86/kvm/vmx/hyperv.h +++ b/arch/x86/kvm/vmx/hyperv.h @@ -67,11 +67,11 @@ static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs, #if IS_ENABLED(CONFIG_HYPERV) -DECLARE_STATIC_KEY_FALSE(enable_evmcs); +DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs); static __always_inline bool kvm_is_using_evmcs(void) { - return static_branch_unlikely(&enable_evmcs); + return static_branch_unlikely(&__kvm_is_using_evmcs); } static __always_inline int get_evmcs_offset(unsigned long field, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 09e07fb83c37..f777509ecf17 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -579,7 +579,7 @@ static __init void hv_init_evmcs(void) if (enlightened_vmcs) { pr_info("Using Hyper-V Enlightened VMCS\n"); - static_branch_enable(&enable_evmcs); + static_branch_enable(&__kvm_is_using_evmcs); } if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) -- cgit v1.2.3 From a807b78ad04b2eaa348f52f5cc7702385b6de1ee Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 1 Feb 2023 08:29:03 -0500 Subject: kvm: vmx: Add IA32_FLUSH_CMD guest support Expose IA32_FLUSH_CMD to the guest if the guest CPUID enumerates support for this MSR. As with IA32_PRED_CMD, permission for unintercepted writes to this MSR will be granted to the guest after the first non-zero write. Co-developed-by: Jim Mattson Signed-off-by: Jim Mattson Signed-off-by: Emanuele Giuseppe Esposito Message-Id: <20230201132905.549148-2-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/nested.c | 3 +++ arch/x86/kvm/vmx/vmx.c | 68 ++++++++++++++++++++++++++++++----------------- 2 files changed, 46 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 1bc2b80273c9..f63b28f46a71 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -654,6 +654,9 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, MSR_IA32_PRED_CMD, MSR_TYPE_W); + nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_FLUSH_CMD, MSR_TYPE_W); + kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); vmx->nested.force_msr_bitmap_recalc = false; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f777509ecf17..d7bf14abdba1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2133,6 +2133,39 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated return debugctl; } +static int vmx_set_msr_ia32_cmd(struct kvm_vcpu *vcpu, + struct msr_data *msr_info, + bool guest_has_feat, u64 cmd, + int x86_feature_bit) +{ + if (!msr_info->host_initiated && !guest_has_feat) + return 1; + + if (!(msr_info->data & ~cmd)) + return 1; + if (!boot_cpu_has(x86_feature_bit)) + return 1; + if (!msr_info->data) + return 0; + + wrmsrl(msr_info->index, cmd); + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_prepare_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. + */ + vmx_disable_intercept_for_msr(vcpu, msr_info->index, MSR_TYPE_W); + + return 0; +} + /* * Writes msr value into the appropriate "register". * Returns 0 on success, non-0 otherwise. @@ -2286,31 +2319,16 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; goto find_uret_msr; case MSR_IA32_PRED_CMD: - if (!msr_info->host_initiated && - !guest_has_pred_cmd_msr(vcpu)) - return 1; - - if (data & ~PRED_CMD_IBPB) - return 1; - if (!boot_cpu_has(X86_FEATURE_IBPB)) - return 1; - if (!data) - break; - - wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); - - /* - * For non-nested: - * When it's written (to non-zero) for the first time, pass - * it through. - * - * For nested: - * The handling of the MSR bitmap for L2 guests is done in - * nested_vmx_prepare_msr_bitmap. We should not touch the - * vmcs02.msr_bitmap here since it gets completely overwritten - * in the merging. - */ - vmx_disable_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W); + ret = vmx_set_msr_ia32_cmd(vcpu, msr_info, + guest_has_pred_cmd_msr(vcpu), + PRED_CMD_IBPB, + X86_FEATURE_IBPB); + break; + case MSR_IA32_FLUSH_CMD: + ret = vmx_set_msr_ia32_cmd(vcpu, msr_info, + guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D), + L1D_FLUSH, + X86_FEATURE_FLUSH_L1D); break; case MSR_IA32_CR_PAT: if (!kvm_pat_valid(data)) -- cgit v1.2.3 From 723d5fb0ffe4c02bd4edf47ea02c02e454719f28 Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 1 Feb 2023 08:29:04 -0500 Subject: kvm: svm: Add IA32_FLUSH_CMD guest support Expose IA32_FLUSH_CMD to the guest if the guest CPUID enumerates support for this MSR. As with IA32_PRED_CMD, permission for unintercepted writes to this MSR will be granted to the guest after the first non-zero write. Signed-off-by: Emanuele Giuseppe Esposito Message-Id: <20230201132905.549148-3-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 43 ++++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 252e7f37e4e2..70183d2271b5 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2869,6 +2869,28 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) return 0; } +static int svm_set_msr_ia32_cmd(struct kvm_vcpu *vcpu, struct msr_data *msr, + bool guest_has_feat, u64 cmd, + int x86_feature_bit) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (!msr->host_initiated && !guest_has_feat) + return 1; + + if (!(msr->data & ~cmd)) + return 1; + if (!boot_cpu_has(x86_feature_bit)) + return 1; + if (!msr->data) + return 0; + + wrmsrl(msr->index, cmd); + set_msr_interception(vcpu, svm->msrpm, msr->index, 0, 1); + + return 0; +} + static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); @@ -2943,19 +2965,14 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); break; case MSR_IA32_PRED_CMD: - if (!msr->host_initiated && - !guest_has_pred_cmd_msr(vcpu)) - return 1; - - if (data & ~PRED_CMD_IBPB) - return 1; - if (!boot_cpu_has(X86_FEATURE_IBPB)) - return 1; - if (!data) - break; - - wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); + r = svm_set_msr_ia32_cmd(vcpu, msr, + guest_has_pred_cmd_msr(vcpu), + PRED_CMD_IBPB, X86_FEATURE_IBPB); + break; + case MSR_IA32_FLUSH_CMD: + r = svm_set_msr_ia32_cmd(vcpu, msr, + guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D), + L1D_FLUSH, X86_FEATURE_FLUSH_L1D); break; case MSR_AMD64_VIRT_SPEC_CTRL: if (!msr->host_initiated && -- cgit v1.2.3 From 45cf86f26148e549c5ba4a8ab32a390e4bde216e Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 1 Feb 2023 08:29:05 -0500 Subject: kvm: x86: Advertise FLUSH_L1D to user space FLUSH_L1D was already added in 11e34e64e4103, but the feature is not visible to userspace yet. The bit definition: CPUID.(EAX=7,ECX=0):EDX[bit 28] If the feature is supported by the host, kvm should support it too so that userspace can choose whether to expose it to the guest or not. One disadvantage of not exposing it is that the guest will report a non existing vulnerability in /sys/devices/system/cpu/vulnerabilities/mmio_stale_data because the mitigation is present only if the guest supports (FLUSH_L1D and MD_CLEAR) or FB_CLEAR. Signed-off-by: Emanuele Giuseppe Esposito Message-Id: <20230201132905.549148-4-eesposit@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 599aebec2d52..9583a110cf5f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -653,7 +653,7 @@ void kvm_set_cpu_caps(void) F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) | - F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) + F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D) ); /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ -- cgit v1.2.3 From 67c48662e2b5539f0ba3d4151f4e7e84c9871324 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 8 Feb 2023 15:01:00 +0100 Subject: KVM: PPC: Standardize on "int" return types in the powerpc KVM code Most functions that are related to kvm_arch_vm_ioctl() already use "int" as return type to pass error values back to the caller. Some outlier functions use "long" instead for no good reason (they do not really require long values here). Let's standardize on "int" here to avoid casting the values back and forth between the two types. Reviewed-by: Nicholas Piggin Signed-off-by: Thomas Huth Message-Id: <20230208140105.655814-2-thuth@redhat.com> Signed-off-by: Paolo Bonzini --- arch/powerpc/include/asm/kvm_ppc.h | 14 +++++++------- arch/powerpc/kvm/book3s_64_mmu_hv.c | 14 +++++++------- arch/powerpc/kvm/book3s_64_vio.c | 4 ++-- arch/powerpc/kvm/book3s_hv.c | 6 +++--- arch/powerpc/kvm/book3s_pr.c | 4 ++-- 5 files changed, 21 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 6bef23d6d0e3..5b9f851d4035 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -156,7 +156,7 @@ extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order); extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info); -extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order); +extern int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order); extern void kvmppc_free_hpt(struct kvm_hpt_info *info); extern void kvmppc_rmap_reset(struct kvm *kvm); extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, @@ -170,7 +170,7 @@ extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm); extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm); extern void kvmppc_setup_partition_table(struct kvm *kvm); -extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, +extern int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce_64 *args); #define kvmppc_ioba_validate(stt, ioba, npages) \ (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ @@ -211,10 +211,10 @@ extern void kvmppc_bookehv_exit(void); extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *); -extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, - struct kvm_ppc_resize_hpt *rhpt); -extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, +extern int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, struct kvm_ppc_resize_hpt *rhpt); +extern int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, + struct kvm_ppc_resize_hpt *rhpt); int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); @@ -286,8 +286,8 @@ struct kvmppc_ops { int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); - long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, - unsigned long arg); + int (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, + unsigned long arg); int (*hcall_implemented)(unsigned long hcall); int (*irq_bypass_add_producer)(struct irq_bypass_consumer *, struct irq_bypass_producer *); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7006bcbc2e37..1f4896de58ca 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -124,9 +124,9 @@ void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) info->virt, (long)info->order, kvm->arch.lpid); } -long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) +int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) { - long err = -EBUSY; + int err = -EBUSY; struct kvm_hpt_info info; mutex_lock(&kvm->arch.mmu_setup_lock); @@ -1468,8 +1468,8 @@ static void resize_hpt_prepare_work(struct work_struct *work) mutex_unlock(&kvm->arch.mmu_setup_lock); } -long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, - struct kvm_ppc_resize_hpt *rhpt) +int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, + struct kvm_ppc_resize_hpt *rhpt) { unsigned long flags = rhpt->flags; unsigned long shift = rhpt->shift; @@ -1534,13 +1534,13 @@ static void resize_hpt_boot_vcpu(void *opaque) /* Nothing to do, just force a KVM exit */ } -long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, - struct kvm_ppc_resize_hpt *rhpt) +int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, + struct kvm_ppc_resize_hpt *rhpt) { unsigned long flags = rhpt->flags; unsigned long shift = rhpt->shift; struct kvm_resize_hpt *resize; - long ret; + int ret; if (flags != 0 || kvm_is_radix(kvm)) return -EINVAL; diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 95e738ef9062..93b695b289e9 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -288,8 +288,8 @@ static const struct file_operations kvm_spapr_tce_fops = { .release = kvm_spapr_tce_release, }; -long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, - struct kvm_create_spapr_tce_64 *args) +int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, + struct kvm_create_spapr_tce_64 *args) { struct kvmppc_spapr_tce_table *stt = NULL; struct kvmppc_spapr_tce_table *siter; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6ba68dd6190b..cd139a1edc67 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5779,12 +5779,12 @@ static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons, } #endif -static long kvm_arch_vm_ioctl_hv(struct file *filp, - unsigned int ioctl, unsigned long arg) +static int kvm_arch_vm_ioctl_hv(struct file *filp, + unsigned int ioctl, unsigned long arg) { struct kvm *kvm __maybe_unused = filp->private_data; void __user *argp = (void __user *)arg; - long r; + int r; switch (ioctl) { diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 9fc4dd8f66eb..5908b514bfb6 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -2042,8 +2042,8 @@ static int kvmppc_core_check_processor_compat_pr(void) return 0; } -static long kvm_arch_vm_ioctl_pr(struct file *filp, - unsigned int ioctl, unsigned long arg) +static int kvm_arch_vm_ioctl_pr(struct file *filp, + unsigned int ioctl, unsigned long arg) { return -ENOTTY; } -- cgit v1.2.3 From 71fb165e23e3b66e68ca7baab6c317c2663c32d5 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 8 Feb 2023 15:01:01 +0100 Subject: KVM: s390: Use "int" as return type for kvm_s390_get/set_skeys() These two functions only return normal integers, so it does not make sense to declare the return type as "long" here. Reviewed-by: Claudio Imbrenda Signed-off-by: Thomas Huth Message-Id: <20230208140105.655814-3-thuth@redhat.com> Signed-off-by: Paolo Bonzini --- arch/s390/kvm/kvm-s390.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 39b36562c043..7039abda6d49 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1989,7 +1989,7 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) return ret; } -static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) +static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) { uint8_t *keys; uint64_t hva; @@ -2037,7 +2037,7 @@ static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) return r; } -static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) +static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) { uint8_t *keys; uint64_t hva; -- cgit v1.2.3 From c5edd753a0bd6243a597f5199c227a50457ee179 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 8 Feb 2023 15:01:02 +0100 Subject: KVM: x86: Remove the KVM_GET_NR_MMU_PAGES ioctl The KVM_GET_NR_MMU_PAGES ioctl is quite questionable on 64-bit hosts since it fails to return the full 64 bits of the value that can be set with the corresponding KVM_SET_NR_MMU_PAGES call. Its "long" return value is truncated into an "int" in the kvm_arch_vm_ioctl() function. Since this ioctl also never has been used by userspace applications (QEMU, Google's internal VMM, kvmtool and CrosVM have been checked), it's likely the best if we remove this badly designed ioctl before anybody really tries to use it. Signed-off-by: Thomas Huth Reviewed-by: Sean Christopherson Message-Id: <20230208140105.655814-4-thuth@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 8 -------- include/uapi/linux/kvm.h | 2 +- tools/include/uapi/linux/kvm.h | 2 +- 3 files changed, 2 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ff7f398a0c6a..4282daeaee84 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6021,11 +6021,6 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, return 0; } -static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) -{ - return kvm->arch.n_max_mmu_pages; -} - static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { struct kvm_pic *pic = kvm->arch.vpic; @@ -6711,9 +6706,6 @@ set_identity_unlock: case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); break; - case KVM_GET_NR_MMU_PAGES: - r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); - break; case KVM_CREATE_IRQCHIP: { mutex_lock(&kvm->lock); diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d77aef872a0a..4003a166328c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1451,7 +1451,7 @@ struct kvm_vfio_spapr_tce { #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) -#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) +#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) /* deprecated */ #define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \ struct kvm_userspace_memory_region) #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index d77aef872a0a..4003a166328c 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1451,7 +1451,7 @@ struct kvm_vfio_spapr_tce { #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) -#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) +#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) /* deprecated */ #define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \ struct kvm_userspace_memory_region) #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) -- cgit v1.2.3 From 2def950c63e3f976af87a2606dabe0c9e21c605b Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 8 Feb 2023 15:01:03 +0100 Subject: KVM: arm64: Limit length in kvm_vm_ioctl_mte_copy_tags() to INT_MAX In case of success, this function returns the amount of handled bytes. However, this does not work for large values: The function is called from kvm_arch_vm_ioctl() (which still returns a long), which in turn is called from kvm_vm_ioctl() in virt/kvm/kvm_main.c. And that function stores the return value in an "int r" variable. So the upper 32-bits of the "long" return value are lost there. KVM ioctl functions should only return "int" values, so let's limit the amount of bytes that can be requested here to INT_MAX to avoid the problem with the truncated return value. We can then also change the return type of the function to "int" to make it clearer that it is not possible to return a "long" here. Fixes: f0376edb1ddc ("KVM: arm64: Add ioctl to fetch/store tags in a guest") Signed-off-by: Thomas Huth Reviewed-by: Cornelia Huck Reviewed-by: Gavin Shan Reviewed-by: Steven Price Message-Id: <20230208140105.655814-5-thuth@redhat.com> Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.rst | 3 ++- arch/arm64/include/asm/kvm_host.h | 4 ++-- arch/arm64/kvm/guest.c | 8 ++++++-- 3 files changed, 10 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 62de0768d6aa..48fad6556822 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -5645,7 +5645,8 @@ with the KVM_XEN_VCPU_GET_ATTR ioctl. }; Copies Memory Tagging Extension (MTE) tags to/from guest tag memory. The -``guest_ipa`` and ``length`` fields must be ``PAGE_SIZE`` aligned. The ``addr`` +``guest_ipa`` and ``length`` fields must be ``PAGE_SIZE`` aligned. +``length`` must not be bigger than 2^31 - PAGE_SIZE bytes. The ``addr`` field must point to a buffer which the tags will be copied to or from. ``flags`` specifies the direction of copy, either ``KVM_ARM_TAGS_TO_GUEST`` or diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bcd774d74f34..6f7b218a681f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1005,8 +1005,8 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, - struct kvm_arm_copy_mte_tags *copy_tags); +int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, + struct kvm_arm_copy_mte_tags *copy_tags); /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 07444fa22888..26a2ebc465ea 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1019,8 +1019,8 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, return ret; } -long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, - struct kvm_arm_copy_mte_tags *copy_tags) +int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, + struct kvm_arm_copy_mte_tags *copy_tags) { gpa_t guest_ipa = copy_tags->guest_ipa; size_t length = copy_tags->length; @@ -1041,6 +1041,10 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK) return -EINVAL; + /* Lengths above INT_MAX cannot be represented in the return value */ + if (length > INT_MAX) + return -EINVAL; + gfn = gpa_to_gfn(guest_ipa); mutex_lock(&kvm->slots_lock); -- cgit v1.2.3 From d8708b80fa0e6e21bc0c9e7276ad0bccef73b6e7 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Wed, 8 Feb 2023 15:01:05 +0100 Subject: KVM: Change return type of kvm_arch_vm_ioctl() to "int" All kvm_arch_vm_ioctl() implementations now only deal with "int" types as return values, so we can change the return type of these functions to use "int" instead of "long". Signed-off-by: Thomas Huth Acked-by: Anup Patel Message-Id: <20230208140105.655814-7-thuth@redhat.com> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arm.c | 3 +-- arch/mips/kvm/mips.c | 4 ++-- arch/powerpc/kvm/powerpc.c | 5 ++--- arch/riscv/kvm/vm.c | 3 +-- arch/s390/kvm/kvm-s390.c | 3 +-- arch/x86/kvm/x86.c | 3 +-- include/linux/kvm_host.h | 3 +-- 7 files changed, 9 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3bd732eaf087..a43e1cb3b7e9 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1439,8 +1439,7 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, } } -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 36c8991b5d39..884be4ef99dc 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -993,9 +993,9 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, kvm_flush_remote_tlbs(kvm); } -long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - long r; + int r; switch (ioctl) { default: diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4c5405fc5538..c0bac9cf2d87 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -2371,12 +2371,11 @@ static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) } #endif -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm __maybe_unused = filp->private_data; void __user *argp = (void __user *)arg; - long r; + int r; switch (ioctl) { case KVM_PPC_GET_PVINFO: { diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 65a964d7e70d..c13130ab459a 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -87,8 +87,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) return r; } -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -EINVAL; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 7039abda6d49..4c3edccb9911 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2898,8 +2898,7 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) } } -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4282daeaee84..237c483b1230 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6667,8 +6667,7 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp) return 0; } -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8ada23756b0e..90edc16d37e5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1397,8 +1397,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status); int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg); +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); -- cgit v1.2.3 From 753b43c9d1b73337610861a6e644f8df3635d656 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:07 +0800 Subject: KVM: x86/mmu: Use 64-bit address to invalidate to fix a subtle bug FNAME(invlpg)() and kvm_mmu_invalidate_gva() take a gva_t, i.e. unsigned long, as the type of the address to invalidate. On 32-bit kernels, the upper 32 bits of the GPA will get dropped when an L2 GPA address is invalidated in the shadowed nested TDP MMU. Convert it to u64 to fix the problem. Reported-by: Sean Christopherson Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-2-jiangshanlai@gmail.com [sean: tweak changelog] Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 6 +++--- arch/x86/kvm/mmu/mmu.c | 16 ++++++++-------- arch/x86/kvm/mmu/paging_tmpl.h | 7 ++++--- arch/x86/kvm/x86.c | 4 ++-- 4 files changed, 17 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a45de1118a42..214636554675 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -441,7 +441,7 @@ struct kvm_mmu { struct x86_exception *exception); int (*sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); - void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa); + void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa); struct kvm_mmu_root_info root; union kvm_cpu_role cpu_role; union kvm_mmu_page_role root_role; @@ -2044,8 +2044,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); -void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gva_t gva, hpa_t root_hpa); +void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + u64 addr, hpa_t root_hpa); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 144c5a01cd77..edad1a4828dc 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5707,25 +5707,25 @@ emulate: } EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); -void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gva_t gva, hpa_t root_hpa) +void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + u64 addr, hpa_t root_hpa) { int i; /* It's actually a GPA for vcpu->arch.guest_mmu. */ if (mmu != &vcpu->arch.guest_mmu) { /* INVLPG on a non-canonical address is a NOP according to the SDM. */ - if (is_noncanonical_address(gva, vcpu)) + if (is_noncanonical_address(addr, vcpu)) return; - static_call(kvm_x86_flush_tlb_gva)(vcpu, gva); + static_call(kvm_x86_flush_tlb_gva)(vcpu, addr); } if (!mmu->invlpg) return; if (root_hpa == INVALID_PAGE) { - mmu->invlpg(vcpu, gva, mmu->root.hpa); + mmu->invlpg(vcpu, addr, mmu->root.hpa); /* * INVLPG is required to invalidate any global mappings for the VA, @@ -5740,15 +5740,15 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, */ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) if (VALID_PAGE(mmu->prev_roots[i].hpa)) - mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); + mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); } else { - mmu->invlpg(vcpu, gva, root_hpa); + mmu->invlpg(vcpu, addr, root_hpa); } } void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { - kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); + kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); ++vcpu->stat.invlpg; } EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index a056f2773dd9..0a9c11c24195 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -846,7 +846,8 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); } -static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) +/* Note, @addr is a GPA when invlpg() invalidates an L2 GPA translation in shadowed TDP */ +static void FNAME(invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa) { struct kvm_shadow_walk_iterator iterator; struct kvm_mmu_page *sp; @@ -854,7 +855,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) int level; u64 *sptep; - vcpu_clear_mmio_info(vcpu, gva); + vcpu_clear_mmio_info(vcpu, addr); /* * No need to check return value here, rmap_can_add() can @@ -868,7 +869,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) } write_lock(&vcpu->kvm->mmu_lock); - for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { + for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { level = iterator.level; sptep = iterator.sptep; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 237c483b1230..0b6b587d7914 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -802,8 +802,8 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, */ if ((fault->error_code & PFERR_PRESENT_MASK) && !(fault->error_code & PFERR_RSVD_MASK)) - kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, - fault_mmu->root.hpa); + kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address, + fault_mmu->root.hpa); fault_mmu->inject_page_fault(vcpu, fault); } -- cgit v1.2.3 From 90e444702a7c2e5d5806260735476d9bba0b598d Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:08 +0800 Subject: KVM: x86/mmu: Move the check in FNAME(sync_page) as kvm_sync_page_check() Prepare to check mmu->sync_page pointer before calling it. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-3-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 43 +++++++++++++++++++++++++++++++++++++++++- arch/x86/kvm/mmu/paging_tmpl.h | 27 -------------------------- 2 files changed, 42 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index edad1a4828dc..6749fa4794a4 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1914,10 +1914,51 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp) &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else +static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role; + + /* + * Ignore various flags when verifying that it's safe to sync a shadow + * page using the current MMU context. + * + * - level: not part of the overall MMU role and will never match as the MMU's + * level tracks the root level + * - access: updated based on the new guest PTE + * - quadrant: not part of the overall MMU role (similar to level) + */ + const union kvm_mmu_page_role sync_role_ign = { + .level = 0xf, + .access = 0x7, + .quadrant = 0x3, + .passthrough = 0x1, + }; + + /* + * Direct pages can never be unsync, and KVM should never attempt to + * sync a shadow page for a different MMU context, e.g. if the role + * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the + * reserved bits checks will be wrong, etc... + */ + if (WARN_ON_ONCE(sp->role.direct || + (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) + return false; + + return true; +} + +static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + if (!kvm_sync_page_check(vcpu, sp)) + return -1; + + return vcpu->arch.mmu->sync_page(vcpu, sp); +} + static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { - int ret = vcpu->arch.mmu->sync_page(vcpu, sp); + int ret = __kvm_sync_page(vcpu, sp); if (ret < 0) kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 0a9c11c24195..abb1d325ad30 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -943,38 +943,11 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { - union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role; int i; bool host_writable; gpa_t first_pte_gpa; bool flush = false; - /* - * Ignore various flags when verifying that it's safe to sync a shadow - * page using the current MMU context. - * - * - level: not part of the overall MMU role and will never match as the MMU's - * level tracks the root level - * - access: updated based on the new guest PTE - * - quadrant: not part of the overall MMU role (similar to level) - */ - const union kvm_mmu_page_role sync_role_ign = { - .level = 0xf, - .access = 0x7, - .quadrant = 0x3, - .passthrough = 0x1, - }; - - /* - * Direct pages can never be unsync, and KVM should never attempt to - * sync a shadow page for a different MMU context, e.g. if the role - * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the - * reserved bits checks will be wrong, etc... - */ - if (WARN_ON_ONCE(sp->role.direct || - (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) - return -1; - first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { -- cgit v1.2.3 From 51dddf6c49b9f60341a80c4947f3be67b3d50dc0 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:09 +0800 Subject: KVM: x86/mmu: Check mmu->sync_page pointer in kvm_sync_page_check() Assert that mmu->sync_page is non-NULL as part of the sanity checks performed before attempting to sync a shadow page. Explicitly checking mmu->sync_page is all but guaranteed to be redundant with the existing sanity check that the MMU is indirect, but the cost is negligible, and the explicit check also serves as documentation. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-4-jiangshanlai@gmail.com [sean: increase verbosity of changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 6749fa4794a4..9d31724d26ad 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1940,7 +1940,7 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the * reserved bits checks will be wrong, etc... */ - if (WARN_ON_ONCE(sp->role.direct || + if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_page || (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) return false; -- cgit v1.2.3 From 8ef228c20cae89c701c1ef7b8b8a84d6925b3575 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:10 +0800 Subject: KVM: x86/mmu: Set mmu->sync_page as NULL for direct paging mmu->sync_page for direct paging is never called. And both mmu->sync_page and mm->invlpg only make sense in shadow paging. Setting mmu->sync_page as NULL for direct paging makes it consistent with mm->invlpg which is set NULL for the case. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-5-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 9d31724d26ad..7fd824cf0a25 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1789,12 +1789,6 @@ static void mark_unsync(u64 *spte) kvm_mmu_mark_parents_unsync(sp); } -static int nonpaging_sync_page(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp) -{ - return -1; -} - #define KVM_PAGE_ARRAY_NR 16 struct kvm_mmu_pages { @@ -4510,7 +4504,7 @@ static void nonpaging_init_context(struct kvm_mmu *context) { context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; - context->sync_page = nonpaging_sync_page; + context->sync_page = NULL; context->invlpg = NULL; } @@ -5198,7 +5192,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, context->cpu_role.as_u64 = cpu_role.as_u64; context->root_role.word = root_role.word; context->page_fault = kvm_tdp_page_fault; - context->sync_page = nonpaging_sync_page; + context->sync_page = NULL; context->invlpg = NULL; context->get_guest_pgd = get_cr3; context->get_pdptr = kvm_pdptr_read; -- cgit v1.2.3 From c3c6c9fc5d24bcafbbeda2edb521b70f5df052b7 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:11 +0800 Subject: KVM: x86/mmu: Move the code out of FNAME(sync_page)'s loop body into mmu.c Rename mmu->sync_page to mmu->sync_spte and move the code out of FNAME(sync_page)'s loop body into mmu.c. No functionalities change intended. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-6-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 4 +- arch/x86/kvm/mmu/mmu.c | 34 +++++++++--- arch/x86/kvm/mmu/paging_tmpl.h | 114 +++++++++++++++++----------------------- 3 files changed, 76 insertions(+), 76 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 214636554675..b7526c15501e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -439,8 +439,8 @@ struct kvm_mmu { gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t gva_or_gpa, u64 access, struct x86_exception *exception); - int (*sync_page)(struct kvm_vcpu *vcpu, - struct kvm_mmu_page *sp); + int (*sync_spte)(struct kvm_vcpu *vcpu, + struct kvm_mmu_page *sp, int i); void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa); struct kvm_mmu_root_info root; union kvm_cpu_role cpu_role; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7fd824cf0a25..cb01e01e0404 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1934,7 +1934,7 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the * reserved bits checks will be wrong, etc... */ - if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_page || + if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte || (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) return false; @@ -1943,10 +1943,30 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { + int flush = 0; + int i; + if (!kvm_sync_page_check(vcpu, sp)) return -1; - return vcpu->arch.mmu->sync_page(vcpu, sp); + for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { + int ret = vcpu->arch.mmu->sync_spte(vcpu, sp, i); + + if (ret < -1) + return -1; + flush |= ret; + } + + /* + * Note, any flush is purely for KVM's correctness, e.g. when dropping + * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier + * unmap or dirty logging event doesn't fail to flush. The guest is + * responsible for flushing the TLB to ensure any changes in protection + * bits are recognized, i.e. until the guest flushes or page faults on + * a relevant address, KVM is architecturally allowed to let vCPUs use + * cached translations with the old protection bits. + */ + return flush; } static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, @@ -4504,7 +4524,7 @@ static void nonpaging_init_context(struct kvm_mmu *context) { context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; - context->sync_page = NULL; + context->sync_spte = NULL; context->invlpg = NULL; } @@ -5095,7 +5115,7 @@ static void paging64_init_context(struct kvm_mmu *context) { context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; - context->sync_page = paging64_sync_page; + context->sync_spte = paging64_sync_spte; context->invlpg = paging64_invlpg; } @@ -5103,7 +5123,7 @@ static void paging32_init_context(struct kvm_mmu *context) { context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; - context->sync_page = paging32_sync_page; + context->sync_spte = paging32_sync_spte; context->invlpg = paging32_invlpg; } @@ -5192,7 +5212,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, context->cpu_role.as_u64 = cpu_role.as_u64; context->root_role.word = root_role.word; context->page_fault = kvm_tdp_page_fault; - context->sync_page = NULL; + context->sync_spte = NULL; context->invlpg = NULL; context->get_guest_pgd = get_cr3; context->get_pdptr = kvm_pdptr_read; @@ -5324,7 +5344,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->page_fault = ept_page_fault; context->gva_to_gpa = ept_gva_to_gpa; - context->sync_page = ept_sync_page; + context->sync_spte = ept_sync_spte; context->invlpg = ept_invlpg; update_permission_bitmask(context, true); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index abb1d325ad30..c07b2e7def37 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -937,87 +937,67 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, * can't change unless all sptes pointing to it are nuked first. * * Returns - * < 0: the sp should be zapped - * 0: the sp is synced and no tlb flushing is required - * > 0: the sp is synced and tlb flushing is required + * < 0: failed to sync spte + * 0: the spte is synced and no tlb flushing is required + * > 0: the spte is synced and tlb flushing is required */ -static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i) { - int i; bool host_writable; gpa_t first_pte_gpa; - bool flush = false; - - first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); - - for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { - u64 *sptep, spte; - struct kvm_memory_slot *slot; - unsigned pte_access; - pt_element_t gpte; - gpa_t pte_gpa; - gfn_t gfn; - - if (!sp->spt[i]) - continue; + u64 *sptep, spte; + struct kvm_memory_slot *slot; + unsigned pte_access; + pt_element_t gpte; + gpa_t pte_gpa; + gfn_t gfn; - pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); + if (!sp->spt[i]) + return 0; - if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, - sizeof(pt_element_t))) - return -1; + first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); + pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); - if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { - flush = true; - continue; - } + if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, + sizeof(pt_element_t))) + return -1; - gfn = gpte_to_gfn(gpte); - pte_access = sp->role.access; - pte_access &= FNAME(gpte_access)(gpte); - FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); + if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) + return 1; - if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access)) - continue; + gfn = gpte_to_gfn(gpte); + pte_access = sp->role.access; + pte_access &= FNAME(gpte_access)(gpte); + FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); - /* - * Drop the SPTE if the new protections would result in a RWX=0 - * SPTE or if the gfn is changing. The RWX=0 case only affects - * EPT with execute-only support, i.e. EPT without an effective - * "present" bit, as all other paging modes will create a - * read-only SPTE if pte_access is zero. - */ - if ((!pte_access && !shadow_present_mask) || - gfn != kvm_mmu_page_get_gfn(sp, i)) { - drop_spte(vcpu->kvm, &sp->spt[i]); - flush = true; - continue; - } + if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access)) + return 0; - /* Update the shadowed access bits in case they changed. */ - kvm_mmu_page_set_access(sp, i, pte_access); + /* + * Drop the SPTE if the new protections would result in a RWX=0 + * SPTE or if the gfn is changing. The RWX=0 case only affects + * EPT with execute-only support, i.e. EPT without an effective + * "present" bit, as all other paging modes will create a + * read-only SPTE if pte_access is zero. + */ + if ((!pte_access && !shadow_present_mask) || + gfn != kvm_mmu_page_get_gfn(sp, i)) { + drop_spte(vcpu->kvm, &sp->spt[i]); + return 1; + } - sptep = &sp->spt[i]; - spte = *sptep; - host_writable = spte & shadow_host_writable_mask; - slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - make_spte(vcpu, sp, slot, pte_access, gfn, - spte_to_pfn(spte), spte, true, false, - host_writable, &spte); + /* Update the shadowed access bits in case they changed. */ + kvm_mmu_page_set_access(sp, i, pte_access); - flush |= mmu_spte_update(sptep, spte); - } + sptep = &sp->spt[i]; + spte = *sptep; + host_writable = spte & shadow_host_writable_mask; + slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); + make_spte(vcpu, sp, slot, pte_access, gfn, + spte_to_pfn(spte), spte, true, false, + host_writable, &spte); - /* - * Note, any flush is purely for KVM's correctness, e.g. when dropping - * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier - * unmap or dirty logging event doesn't fail to flush. The guest is - * responsible for flushing the TLB to ensure any changes in protection - * bits are recognized, i.e. until the guest flushes or page faults on - * a relevant address, KVM is architecturally allowed to let vCPUs use - * cached translations with the old protection bits. - */ - return flush; + return mmu_spte_update(sptep, spte); } #undef pt_element_t -- cgit v1.2.3 From e6722d9211b2aa5f195267faaa6858004b4f42a0 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:12 +0800 Subject: KVM: x86/mmu: Reduce the update to the spte in FNAME(sync_spte) Sometimes when the guest updates its pagetable, it adds only new gptes to it without changing any existed one, so there is no point to update the sptes for these existed gptes. Also when the sptes for these unchanged gptes are updated, the AD bits are also removed since make_spte() is called with prefetch=true which might result unneeded TLB flushing. Just do nothing if the gpte's permissions are unchanged. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-7-jiangshanlai@gmail.com [sean: expand comment to call out A/D bits] Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/paging_tmpl.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index c07b2e7def37..4047b8ff83d6 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -985,6 +985,14 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int drop_spte(vcpu->kvm, &sp->spt[i]); return 1; } + /* + * Do nothing if the permissions are unchanged. The existing SPTE is + * still, and prefetch_invalid_gpte() has verified that the A/D bits + * are set in the "new" gPTE, i.e. there is no danger of missing an A/D + * update due to A/D bits being set in the SPTE but not the gPTE. + */ + if (kvm_mmu_page_get_access(sp, i) == pte_access) + return 0; /* Update the shadowed access bits in case they changed. */ kvm_mmu_page_set_access(sp, i, pte_access); -- cgit v1.2.3 From f94db0c8b9fa50296d00e236e2416aea11186e18 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 16 Feb 2023 23:41:13 +0800 Subject: KVM: x86/mmu: Sanity check input to kvm_mmu_free_roots() Tweak KVM_MMU_ROOTS_ALL to precisely cover all current+previous root flags, and add a sanity in kvm_mmu_free_roots() to verify that the set of roots to free doesn't stray outside KVM_MMU_ROOTS_ALL. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-8-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 8 ++++---- arch/x86/kvm/mmu/mmu.c | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b7526c15501e..c23c49e97ce2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -420,6 +420,10 @@ struct kvm_mmu_root_info { #define KVM_MMU_NUM_PREV_ROOTS 3 +#define KVM_MMU_ROOT_CURRENT BIT(0) +#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i) +#define KVM_MMU_ROOTS_ALL (BIT(1 + KVM_MMU_NUM_PREV_ROOTS) - 1) + #define KVM_HAVE_MMU_RWLOCK struct kvm_mmu_page; @@ -1997,10 +2001,6 @@ static inline int __kvm_irq_line_state(unsigned long *irq_state, return !!(*irq_state); } -#define KVM_MMU_ROOT_CURRENT BIT(0) -#define KVM_MMU_ROOT_PREVIOUS(i) BIT(1+i) -#define KVM_MMU_ROOTS_ALL (~0UL) - int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cb01e01e0404..ffc18f5a1adf 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3568,6 +3568,8 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, LIST_HEAD(invalid_list); bool free_active_root; + WARN_ON_ONCE(roots_to_free & ~KVM_MMU_ROOTS_ALL); + BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); /* Before acquiring the MMU lock, see if we need to do any real work. */ -- cgit v1.2.3 From cd42853e9530fb64097cbe83abce10a34e68cdf4 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:14 +0800 Subject: kvm: x86/mmu: Use KVM_MMU_ROOT_XXX for kvm_mmu_invalidate_addr() The @root_hpa for kvm_mmu_invalidate_addr() is called with @mmu->root.hpa or INVALID_PAGE where @mmu->root.hpa is to invalidate gva for the current root (the same meaning as KVM_MMU_ROOT_CURRENT) and INVALID_PAGE is to invalidate gva for all roots (the same meaning as KVM_MMU_ROOTS_ALL). Change the argument type of kvm_mmu_invalidate_addr() and use KVM_MMU_ROOT_XXX instead so that we can reuse the function for kvm_mmu_invpcid_gva() and nested_ept_invalidate_addr() for invalidating gva for different set of roots. No fuctionalities changed. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-9-jiangshanlai@gmail.com [sean: massage comment slightly] Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/mmu/mmu.c | 38 +++++++++++++++++++------------------- arch/x86/kvm/x86.c | 2 +- 3 files changed, 21 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c23c49e97ce2..9a2c5925875b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2045,7 +2045,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva); void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - u64 addr, hpa_t root_hpa); + u64 addr, unsigned long roots); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid); void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ffc18f5a1adf..a9cbd94ea858 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5765,10 +5765,12 @@ emulate: EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - u64 addr, hpa_t root_hpa) + u64 addr, unsigned long roots) { int i; + WARN_ON_ONCE(roots & ~KVM_MMU_ROOTS_ALL); + /* It's actually a GPA for vcpu->arch.guest_mmu. */ if (mmu != &vcpu->arch.guest_mmu) { /* INVLPG on a non-canonical address is a NOP according to the SDM. */ @@ -5781,31 +5783,29 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, if (!mmu->invlpg) return; - if (root_hpa == INVALID_PAGE) { + if (roots & KVM_MMU_ROOT_CURRENT) mmu->invlpg(vcpu, addr, mmu->root.hpa); - /* - * INVLPG is required to invalidate any global mappings for the VA, - * irrespective of PCID. Since it would take us roughly similar amount - * of work to determine whether any of the prev_root mappings of the VA - * is marked global, or to just sync it blindly, so we might as well - * just always sync it. - * - * Mappings not reachable via the current cr3 or the prev_roots will be - * synced when switching to that cr3, so nothing needs to be done here - * for them. - */ - for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) - if (VALID_PAGE(mmu->prev_roots[i].hpa)) - mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); - } else { - mmu->invlpg(vcpu, addr, root_hpa); + for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { + if ((roots & KVM_MMU_ROOT_PREVIOUS(i)) && + VALID_PAGE(mmu->prev_roots[i].hpa)) + mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); } } void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { - kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); + /* + * INVLPG is required to invalidate any global mappings for the VA, + * irrespective of PCID. Blindly sync all roots as it would take + * roughly the same amount of work/time to determine whether any of the + * previous roots have a global mapping. + * + * Mappings not reachable via the current or previous cached roots will + * be synced when switching to that new cr3, so nothing needs to be + * done here for them. + */ + kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL); ++vcpu->stat.invlpg; } EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0b6b587d7914..e5a94d2228b0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -803,7 +803,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, if ((fault->error_code & PFERR_PRESENT_MASK) && !(fault->error_code & PFERR_RSVD_MASK)) kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address, - fault_mmu->root.hpa); + KVM_MMU_ROOT_CURRENT); fault_mmu->inject_page_fault(vcpu, fault); } -- cgit v1.2.3 From 9ebc3f51da6f526326b3d2c08c7f582e4ccabd11 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Thu, 16 Feb 2023 23:41:15 +0800 Subject: KVM: x86/mmu: Use kvm_mmu_invalidate_addr() in kvm_mmu_invpcid_gva() Use kvm_mmu_invalidate_addr() instead open calls to mmu->invlpg(). No functional change intended. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216154115.710033-10-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a9cbd94ea858..3ed8879e93ed 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5814,27 +5814,20 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) { struct kvm_mmu *mmu = vcpu->arch.mmu; - bool tlb_flush = false; + unsigned long roots = 0; uint i; - if (pcid == kvm_get_active_pcid(vcpu)) { - if (mmu->invlpg) - mmu->invlpg(vcpu, gva, mmu->root.hpa); - tlb_flush = true; - } + if (pcid == kvm_get_active_pcid(vcpu)) + roots |= KVM_MMU_ROOT_CURRENT; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (VALID_PAGE(mmu->prev_roots[i].hpa) && - pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { - if (mmu->invlpg) - mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); - tlb_flush = true; - } + pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) + roots |= KVM_MMU_ROOT_PREVIOUS(i); } - if (tlb_flush) - static_call(kvm_x86_flush_tlb_gva)(vcpu, gva); - + if (roots) + kvm_mmu_invalidate_addr(vcpu, mmu, gva, roots); ++vcpu->stat.invlpg; /* -- cgit v1.2.3 From 2c86c444e2751a867bfa6a059a80ba67ef9d441a Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 17 Feb 2023 07:53:17 +0800 Subject: KVM: x86/mmu: Use kvm_mmu_invalidate_addr() in nested_ept_invalidate_addr() Use kvm_mmu_invalidate_addr() instead open calls to mmu->invlpg(). No functional change intended. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216235321.735214-1-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 1 + arch/x86/kvm/vmx/nested.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3ed8879e93ed..aafa431a3533 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5792,6 +5792,7 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); } } +EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr); void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) { diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f63b28f46a71..c4ede229ed82 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -358,6 +358,7 @@ static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp) static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, gpa_t addr) { + unsigned long roots = 0; uint i; struct kvm_mmu_root_info *cached_root; @@ -368,8 +369,10 @@ static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp, if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd, eptp)) - vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa); + roots |= KVM_MMU_ROOT_PREVIOUS(i); } + if (roots) + kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots); } static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, -- cgit v1.2.3 From ed335278bd1282641748b2b0ca23291c457038ef Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 17 Feb 2023 07:53:18 +0800 Subject: KVM: x86/mmu: Allow the roots to be invalid in FNAME(invlpg) Don't assume the current root to be valid, just check it and remove the WARN(). Also move the code to check if the root is valid into FNAME(invlpg) to simplify the code. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216235321.735214-2-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 3 +-- arch/x86/kvm/mmu/paging_tmpl.h | 4 +--- 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index aafa431a3533..32291a84fdc6 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5787,8 +5787,7 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, mmu->invlpg(vcpu, addr, mmu->root.hpa); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { - if ((roots & KVM_MMU_ROOT_PREVIOUS(i)) && - VALID_PAGE(mmu->prev_roots[i].hpa)) + if (roots & KVM_MMU_ROOT_PREVIOUS(i)) mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); } } diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 4047b8ff83d6..0b4e3c9a3f05 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -863,10 +863,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa) */ mmu_topup_memory_caches(vcpu, true); - if (!VALID_PAGE(root_hpa)) { - WARN_ON(1); + if (!VALID_PAGE(root_hpa)) return; - } write_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { -- cgit v1.2.3 From 9fd4a4e3a3d9fc0306525d95bf3eca693d311406 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 17 Feb 2023 07:53:19 +0800 Subject: KVM: x86/mmu: Remove FNAME(invlpg) and use FNAME(sync_spte) to update vTLB instead. In hardware TLB, invalidating TLB entries means the translations are removed from the TLB. In KVM shadowed vTLB, the translations (combinations of shadow paging and hardware TLB) are generally maintained as long as they remain "clean" when the TLB of an address space (i.e. a PCID or all) is flushed with the help of write-protections, sp->unsync, and kvm_sync_page(), where "clean" in this context means that no updates to KVM's SPTEs are needed. However, FNAME(invlpg) always zaps/removes the vTLB if the shadow page is unsync, and thus triggers a remote flush even if the original vTLB entry is clean, i.e. is usable as-is. Besides this, FNAME(invlpg) is largely is a duplicate implementation of FNAME(sync_spte) to invalidate a vTLB entry. To address both issues, reuse FNAME(sync_spte) to share the code and slightly modify the semantics, i.e. keep the vTLB entry if it's "clean" and avoid remote TLB flush. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216235321.735214-3-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/mmu/mmu.c | 50 ++++++++++++++++++++++++------------ arch/x86/kvm/mmu/paging_tmpl.h | 57 ----------------------------------------- 3 files changed, 33 insertions(+), 75 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9a2c5925875b..991296a2f807 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -445,7 +445,6 @@ struct kvm_mmu { struct x86_exception *exception); int (*sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i); - void (*invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa); struct kvm_mmu_root_info root; union kvm_cpu_role cpu_role; union kvm_mmu_page_role root_role; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 32291a84fdc6..5bca99e50d1a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1073,14 +1073,6 @@ static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level, return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; } -static bool rmap_can_add(struct kvm_vcpu *vcpu) -{ - struct kvm_mmu_memory_cache *mc; - - mc = &vcpu->arch.mmu_pte_list_desc_cache; - return kvm_mmu_memory_cache_nr_free_objects(mc); -} - static void rmap_remove(struct kvm *kvm, u64 *spte) { struct kvm_memslots *slots; @@ -4527,7 +4519,6 @@ static void nonpaging_init_context(struct kvm_mmu *context) context->page_fault = nonpaging_page_fault; context->gva_to_gpa = nonpaging_gva_to_gpa; context->sync_spte = NULL; - context->invlpg = NULL; } static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, @@ -5118,7 +5109,6 @@ static void paging64_init_context(struct kvm_mmu *context) context->page_fault = paging64_page_fault; context->gva_to_gpa = paging64_gva_to_gpa; context->sync_spte = paging64_sync_spte; - context->invlpg = paging64_invlpg; } static void paging32_init_context(struct kvm_mmu *context) @@ -5126,7 +5116,6 @@ static void paging32_init_context(struct kvm_mmu *context) context->page_fault = paging32_page_fault; context->gva_to_gpa = paging32_gva_to_gpa; context->sync_spte = paging32_sync_spte; - context->invlpg = paging32_invlpg; } static union kvm_cpu_role @@ -5215,7 +5204,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, context->root_role.word = root_role.word; context->page_fault = kvm_tdp_page_fault; context->sync_spte = NULL; - context->invlpg = NULL; context->get_guest_pgd = get_cr3; context->get_pdptr = kvm_pdptr_read; context->inject_page_fault = kvm_inject_page_fault; @@ -5347,7 +5335,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, context->page_fault = ept_page_fault; context->gva_to_gpa = ept_gva_to_gpa; context->sync_spte = ept_sync_spte; - context->invlpg = ept_invlpg; update_permission_bitmask(context, true); context->pkru_mask = 0; @@ -5388,7 +5375,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, * L2 page tables are never shadowed, so there is no need to sync * SPTEs. */ - g_context->invlpg = NULL; + g_context->sync_spte = NULL; /* * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using @@ -5764,6 +5751,35 @@ emulate: } EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); +static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, + u64 addr, hpa_t root_hpa) +{ + struct kvm_shadow_walk_iterator iterator; + + vcpu_clear_mmio_info(vcpu, addr); + + if (!VALID_PAGE(root_hpa)) + return; + + write_lock(&vcpu->kvm->mmu_lock); + for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { + struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep); + + if (sp->unsync) { + int ret = mmu->sync_spte(vcpu, sp, iterator.index); + + if (ret < 0) + mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL); + if (ret) + kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep); + } + + if (!sp->unsync_children) + break; + } + write_unlock(&vcpu->kvm->mmu_lock); +} + void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, unsigned long roots) { @@ -5780,15 +5796,15 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, static_call(kvm_x86_flush_tlb_gva)(vcpu, addr); } - if (!mmu->invlpg) + if (!mmu->sync_spte) return; if (roots & KVM_MMU_ROOT_CURRENT) - mmu->invlpg(vcpu, addr, mmu->root.hpa); + __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa); for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (roots & KVM_MMU_ROOT_PREVIOUS(i)) - mmu->invlpg(vcpu, addr, mmu->prev_roots[i].hpa); + __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa); } } EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 0b4e3c9a3f05..e9d97f9b2ec3 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -846,63 +846,6 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); } -/* Note, @addr is a GPA when invlpg() invalidates an L2 GPA translation in shadowed TDP */ -static void FNAME(invlpg)(struct kvm_vcpu *vcpu, u64 addr, hpa_t root_hpa) -{ - struct kvm_shadow_walk_iterator iterator; - struct kvm_mmu_page *sp; - u64 old_spte; - int level; - u64 *sptep; - - vcpu_clear_mmio_info(vcpu, addr); - - /* - * No need to check return value here, rmap_can_add() can - * help us to skip pte prefetch later. - */ - mmu_topup_memory_caches(vcpu, true); - - if (!VALID_PAGE(root_hpa)) - return; - - write_lock(&vcpu->kvm->mmu_lock); - for_each_shadow_entry_using_root(vcpu, root_hpa, addr, iterator) { - level = iterator.level; - sptep = iterator.sptep; - - sp = sptep_to_sp(sptep); - old_spte = *sptep; - if (is_last_spte(old_spte, level)) { - pt_element_t gpte; - gpa_t pte_gpa; - - if (!sp->unsync) - break; - - pte_gpa = FNAME(get_level1_sp_gpa)(sp); - pte_gpa += spte_index(sptep) * sizeof(pt_element_t); - - mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL); - if (is_shadow_present_pte(old_spte)) - kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep); - - if (!rmap_can_add(vcpu)) - break; - - if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte, - sizeof(pt_element_t))) - break; - - FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false); - } - - if (!sp->unsync_children) - break; - } - write_unlock(&vcpu->kvm->mmu_lock); -} - /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t addr, u64 access, -- cgit v1.2.3 From 91ca7672dc7386c0d181ead2e03f4e65b73ca6b8 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 17 Feb 2023 07:53:20 +0800 Subject: kvm: x86/mmu: Remove @no_dirty_log from FNAME(prefetch_gpte) FNAME(prefetch_gpte) is always called with @no_dirty_log=true. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216235321.735214-4-jiangshanlai@gmail.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/paging_tmpl.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index e9d97f9b2ec3..8ef67f76ee68 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -519,7 +519,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, static bool FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, - u64 *spte, pt_element_t gpte, bool no_dirty_log) + u64 *spte, pt_element_t gpte) { struct kvm_memory_slot *slot; unsigned pte_access; @@ -535,8 +535,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, pte_access = sp->role.access & FNAME(gpte_access)(gpte); FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); - slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, - no_dirty_log && (pte_access & ACC_WRITE_MASK)); + slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, pte_access & ACC_WRITE_MASK); if (!slot) return false; @@ -605,7 +604,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, if (is_shadow_present_pte(*spte)) continue; - if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) + if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i])) break; } } -- cgit v1.2.3 From 19ace7d6ca15a4395dd294286fe253a233bbf20a Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 17 Feb 2023 07:53:21 +0800 Subject: KVM: x86/mmu: Skip calling mmu->sync_spte() when the spte is 0 Sync the spte only when the spte is set and avoid the indirect branch. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230216235321.735214-5-jiangshanlai@gmail.com [sean: add wrapper instead of open coding each check] Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 12 ++++++++++-- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 5bca99e50d1a..9b036a961847 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1933,6 +1933,14 @@ static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) return true; } +static int kvm_sync_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i) +{ + if (!sp->spt[i]) + return 0; + + return vcpu->arch.mmu->sync_spte(vcpu, sp, i); +} + static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { int flush = 0; @@ -1942,7 +1950,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) return -1; for (i = 0; i < SPTE_ENT_PER_PAGE; i++) { - int ret = vcpu->arch.mmu->sync_spte(vcpu, sp, i); + int ret = kvm_sync_spte(vcpu, sp, i); if (ret < -1) return -1; @@ -5766,7 +5774,7 @@ static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep); if (sp->unsync) { - int ret = mmu->sync_spte(vcpu, sp, iterator.index); + int ret = kvm_sync_spte(vcpu, sp, iterator.index); if (ret < 0) mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 8ef67f76ee68..03a9577329fc 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -892,7 +892,7 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int gpa_t pte_gpa; gfn_t gfn; - if (!sp->spt[i]) + if (WARN_ON_ONCE(!sp->spt[i])) return 0; first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); -- cgit v1.2.3 From 141705b78381ab1dcb52c84ebb396e434e86b624 Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 13 Jan 2023 20:29:10 +0800 Subject: KVM: x86/mmu: Track tail count in pte_list_desc to optimize guest fork() Rework "struct pte_list_desc" and pte_list_{add|remove} to track the tail count, i.e. number of PTEs in non-head descriptors, and to always keep all tail descriptors full so that adding a new entry and counting the number of entries is done in constant time instead of linear time. No visible performace is changed in tests. But pte_list_add() is no longer shown in the perf result for the COWed pages even the guest forks millions of tasks. Signed-off-by: Lai Jiangshan Link: https://lore.kernel.org/r/20230113122910.672417-1-jiangshanlai@gmail.com [sean: reword shortlog, tweak changelog, add lots of comments, add BUG_ON()] Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 109 +++++++++++++++++++++++++++++-------------------- 1 file changed, 65 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 9b036a961847..9655656dce50 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -125,17 +125,31 @@ module_param(dbg, bool, 0644); #define PTE_LIST_EXT 14 /* - * Slight optimization of cacheline layout, by putting `more' and `spte_count' - * at the start; then accessing it will only use one single cacheline for - * either full (entries==PTE_LIST_EXT) case or entries<=6. + * struct pte_list_desc is the core data structure used to implement a custom + * list for tracking a set of related SPTEs, e.g. all the SPTEs that map a + * given GFN when used in the context of rmaps. Using a custom list allows KVM + * to optimize for the common case where many GFNs will have at most a handful + * of SPTEs pointing at them, i.e. allows packing multiple SPTEs into a small + * memory footprint, which in turn improves runtime performance by exploiting + * cache locality. + * + * A list is comprised of one or more pte_list_desc objects (descriptors). + * Each individual descriptor stores up to PTE_LIST_EXT SPTEs. If a descriptor + * is full and a new SPTEs needs to be added, a new descriptor is allocated and + * becomes the head of the list. This means that by definitions, all tail + * descriptors are full. + * + * Note, the meta data fields are deliberately placed at the start of the + * structure to optimize the cacheline layout; accessing the descriptor will + * touch only a single cacheline so long as @spte_count<=6 (or if only the + * descriptors metadata is accessed). */ struct pte_list_desc { struct pte_list_desc *more; - /* - * Stores number of entries stored in the pte_list_desc. No need to be - * u64 but just for easier alignment. When PTE_LIST_EXT, means full. - */ - u64 spte_count; + /* The number of PTEs stored in _this_ descriptor. */ + u32 spte_count; + /* The number of PTEs stored in all tails of this descriptor. */ + u32 tail_count; u64 *sptes[PTE_LIST_EXT]; }; @@ -929,22 +943,25 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, desc->sptes[0] = (u64 *)rmap_head->val; desc->sptes[1] = spte; desc->spte_count = 2; + desc->tail_count = 0; rmap_head->val = (unsigned long)desc | 1; ++count; } else { rmap_printk("%p %llx many->many\n", spte, *spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); - while (desc->spte_count == PTE_LIST_EXT) { - count += PTE_LIST_EXT; - if (!desc->more) { - desc->more = kvm_mmu_memory_cache_alloc(cache); - desc = desc->more; - desc->spte_count = 0; - break; - } - desc = desc->more; + count = desc->tail_count + desc->spte_count; + + /* + * If the previous head is full, allocate a new head descriptor + * as tail descriptors are always kept full. + */ + if (desc->spte_count == PTE_LIST_EXT) { + desc = kvm_mmu_memory_cache_alloc(cache); + desc->more = (struct pte_list_desc *)(rmap_head->val & ~1ul); + desc->spte_count = 0; + desc->tail_count = count; + rmap_head->val = (unsigned long)desc | 1; } - count += desc->spte_count; desc->sptes[desc->spte_count++] = spte; } return count; @@ -952,30 +969,44 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, - struct pte_list_desc *desc, int i, - struct pte_list_desc *prev_desc) + struct pte_list_desc *desc, int i) { - int j = desc->spte_count - 1; + struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); + int j = head_desc->spte_count - 1; - desc->sptes[i] = desc->sptes[j]; - desc->sptes[j] = NULL; - desc->spte_count--; - if (desc->spte_count) + /* + * The head descriptor should never be empty. A new head is added only + * when adding an entry and the previous head is full, and heads are + * removed (this flow) when they become empty. + */ + BUG_ON(j < 0); + + /* + * Replace the to-be-freed SPTE with the last valid entry from the head + * descriptor to ensure that tail descriptors are full at all times. + * Note, this also means that tail_count is stable for each descriptor. + */ + desc->sptes[i] = head_desc->sptes[j]; + head_desc->sptes[j] = NULL; + head_desc->spte_count--; + if (head_desc->spte_count) return; - if (!prev_desc && !desc->more) + + /* + * The head descriptor is empty. If there are no tail descriptors, + * nullify the rmap head to mark the list as emtpy, else point the rmap + * head at the next descriptor, i.e. the new head. + */ + if (!head_desc->more) rmap_head->val = 0; else - if (prev_desc) - prev_desc->more = desc->more; - else - rmap_head->val = (unsigned long)desc->more | 1; - mmu_free_pte_list_desc(desc); + rmap_head->val = (unsigned long)head_desc->more | 1; + mmu_free_pte_list_desc(head_desc); } static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) { struct pte_list_desc *desc; - struct pte_list_desc *prev_desc; int i; if (!rmap_head->val) { @@ -991,16 +1022,13 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) } else { rmap_printk("%p many->many\n", spte); desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); - prev_desc = NULL; while (desc) { for (i = 0; i < desc->spte_count; ++i) { if (desc->sptes[i] == spte) { - pte_list_desc_remove_entry(rmap_head, - desc, i, prev_desc); + pte_list_desc_remove_entry(rmap_head, desc, i); return; } } - prev_desc = desc; desc = desc->more; } pr_err("%s: %p many->many\n", __func__, spte); @@ -1047,7 +1075,6 @@ out: unsigned int pte_list_count(struct kvm_rmap_head *rmap_head) { struct pte_list_desc *desc; - unsigned int count = 0; if (!rmap_head->val) return 0; @@ -1055,13 +1082,7 @@ unsigned int pte_list_count(struct kvm_rmap_head *rmap_head) return 1; desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); - - while (desc) { - count += desc->spte_count; - desc = desc->more; - } - - return count; + return desc->tail_count + desc->spte_count; } static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level, -- cgit v1.2.3 From 28e4b4597d65927f8147c493d66aa0fe006e364c Mon Sep 17 00:00:00 2001 From: David Matlack Date: Thu, 26 Jan 2023 10:40:21 -0800 Subject: KVM: x86/mmu: Collapse kvm_flush_remote_tlbs_with_{range,address}() together Collapse kvm_flush_remote_tlbs_with_range() and kvm_flush_remote_tlbs_with_address() into a single function. This eliminates some lines of code and a useless NULL check on the range struct. Opportunistically switch from ENOTSUPP to EOPNOTSUPP to make checkpatch happy. Signed-off-by: David Matlack Link: https://lore.kernel.org/r/20230126184025.2294823-4-dmatlack@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 9655656dce50..ed1df733f12a 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -261,27 +261,20 @@ static inline bool kvm_available_flush_tlb_with_range(void) return kvm_x86_ops.tlb_remote_flush_with_range; } -static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, - struct kvm_tlb_range *range) -{ - int ret = -ENOTSUPP; - - if (range && kvm_x86_ops.tlb_remote_flush_with_range) - ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, range); - - if (ret) - kvm_flush_remote_tlbs(kvm); -} - void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, u64 start_gfn, u64 pages) { struct kvm_tlb_range range; + int ret = -EOPNOTSUPP; range.start_gfn = start_gfn; range.pages = pages; - kvm_flush_remote_tlbs_with_range(kvm, &range); + if (kvm_x86_ops.tlb_remote_flush_with_range) + ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, &range); + + if (ret) + kvm_flush_remote_tlbs(kvm); } static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index); -- cgit v1.2.3 From 8c63e8c2176552d5c003d7459609383d32bf47f3 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Thu, 26 Jan 2023 10:40:22 -0800 Subject: KVM: x86/mmu: Rename kvm_flush_remote_tlbs_with_address() Rename kvm_flush_remote_tlbs_with_address() to kvm_flush_remote_tlbs_range(). This name is shorter, which reduces the number of callsites that need to be broken up across multiple lines, and more readable since it conveys a range of memory is being flushed rather than a single address. No functional change intended. Signed-off-by: David Matlack Link: https://lore.kernel.org/r/20230126184025.2294823-5-dmatlack@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 14 +++++--------- arch/x86/kvm/mmu/mmu_internal.h | 7 +++---- 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ed1df733f12a..b6635da53cb3 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -261,8 +261,7 @@ static inline bool kvm_available_flush_tlb_with_range(void) return kvm_x86_ops.tlb_remote_flush_with_range; } -void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, - u64 start_gfn, u64 pages) +void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages) { struct kvm_tlb_range range; int ret = -EOPNOTSUPP; @@ -5922,9 +5921,8 @@ slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot, if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { if (flush && flush_on_yield) { - kvm_flush_remote_tlbs_with_address(kvm, - start_gfn, - iterator.gfn - start_gfn + 1); + kvm_flush_remote_tlbs_range(kvm, start_gfn, + iterator.gfn - start_gfn + 1); flush = false; } cond_resched_rwlock_write(&kvm->mmu_lock); @@ -6279,8 +6277,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) } if (flush) - kvm_flush_remote_tlbs_with_address(kvm, gfn_start, - gfn_end - gfn_start); + kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start); kvm_mmu_invalidate_end(kvm, 0, -1ul); @@ -6669,8 +6666,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, * is observed by any other operation on the same memslot. */ lockdep_assert_held(&kvm->slots_lock); - kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, - memslot->npages); + kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); } void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 2cbb155c686c..4b2a1dc43db3 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -170,14 +170,13 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level); -void kvm_flush_remote_tlbs_with_address(struct kvm *kvm, - u64 start_gfn, u64 pages); +void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages); /* Flush the given page (huge or not) of guest memory. */ static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) { - kvm_flush_remote_tlbs_with_address(kvm, gfn_round_for_level(gfn, level), - KVM_PAGES_PER_HPAGE(level)); + kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), + KVM_PAGES_PER_HPAGE(level)); } unsigned int pte_list_count(struct kvm_rmap_head *rmap_head); -- cgit v1.2.3 From 9d4655da1a4c17f6691a6434303d9973017bf1ca Mon Sep 17 00:00:00 2001 From: David Matlack Date: Thu, 26 Jan 2023 10:40:23 -0800 Subject: KVM: x86/mmu: Use gfn_t in kvm_flush_remote_tlbs_range() Use gfn_t instead of u64 for kvm_flush_remote_tlbs_range()'s parameters, since gfn_t is the standard type for GFNs throughout KVM. Opportunistically rename pages to nr_pages to make its role even more obvious. No functional change intended. Signed-off-by: David Matlack Link: https://lore.kernel.org/r/20230126184025.2294823-6-dmatlack@google.com [sean: convert pages to gfn_t too, and rename] Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 5 +++-- arch/x86/kvm/mmu/mmu_internal.h | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b6635da53cb3..cc42fa097d5b 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -261,13 +261,14 @@ static inline bool kvm_available_flush_tlb_with_range(void) return kvm_x86_ops.tlb_remote_flush_with_range; } -void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages) +void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, + gfn_t nr_pages) { struct kvm_tlb_range range; int ret = -EOPNOTSUPP; range.start_gfn = start_gfn; - range.pages = pages; + range.pages = nr_pages; if (kvm_x86_ops.tlb_remote_flush_with_range) ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, &range); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 4b2a1dc43db3..d39af5639ce9 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -170,7 +170,8 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level); -void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages); +void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, + gfn_t nr_pages); /* Flush the given page (huge or not) of guest memory. */ static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) -- cgit v1.2.3 From 727ae377013249c05359f84975d21f3d13c70907 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 2 Feb 2023 18:27:49 +0000 Subject: KVM: x86/mmu: Rename slot rmap walkers to add clarity and clean up code Replace "slot_handle_level" with "walk_slot_rmaps" to better capture what the helpers are doing, and to slightly shorten the function names so that each function's return type and attributes can be placed on the same line as the function declaration. No functional change intended. Link: https://lore.kernel.org/mm-commits/CAHk-=wjS-Jg7sGMwUPpDsjv392nDOOs0CtUtVkp=S6Q7JzFJRw@mail.gmail.com Signed-off-by: Ben Gardon Link: https://lore.kernel.org/r/20230202182809.1929122-2-bgardon@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 66 +++++++++++++++++++++++++------------------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index cc42fa097d5b..88aba0d8e9e4 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5902,23 +5902,24 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, EXPORT_SYMBOL_GPL(kvm_configure_mmu); /* The return value indicates if tlb flush on all vcpus is needed. */ -typedef bool (*slot_level_handler) (struct kvm *kvm, +typedef bool (*slot_rmaps_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot); /* The caller should hold mmu-lock before calling this function. */ -static __always_inline bool -slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot, - slot_level_handler fn, int start_level, int end_level, - gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, - bool flush) +static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, + const struct kvm_memory_slot *slot, + slot_rmaps_handler fn, + int start_level, int end_level, + gfn_t start_gfn, gfn_t end_gfn, + bool flush_on_yield, bool flush) { struct slot_rmap_walk_iterator iterator; - for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, + for_each_slot_rmap_range(slot, start_level, end_level, start_gfn, end_gfn, &iterator) { if (iterator.rmap) - flush |= fn(kvm, iterator.rmap, memslot); + flush |= fn(kvm, iterator.rmap, slot); if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) { if (flush && flush_on_yield) { @@ -5933,23 +5934,23 @@ slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot, return flush; } -static __always_inline bool -slot_handle_level(struct kvm *kvm, const struct kvm_memory_slot *memslot, - slot_level_handler fn, int start_level, int end_level, - bool flush_on_yield) +static __always_inline bool walk_slot_rmaps(struct kvm *kvm, + const struct kvm_memory_slot *slot, + slot_rmaps_handler fn, + int start_level, int end_level, + bool flush_on_yield) { - return slot_handle_level_range(kvm, memslot, fn, start_level, - end_level, memslot->base_gfn, - memslot->base_gfn + memslot->npages - 1, - flush_on_yield, false); + return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level, + slot->base_gfn, slot->base_gfn + slot->npages - 1, + flush_on_yield, false); } -static __always_inline bool -slot_handle_level_4k(struct kvm *kvm, const struct kvm_memory_slot *memslot, - slot_level_handler fn, bool flush_on_yield) +static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm, + const struct kvm_memory_slot *slot, + slot_rmaps_handler fn, + bool flush_on_yield) { - return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, - PG_LEVEL_4K, flush_on_yield); + return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield); } static void free_mmu_pages(struct kvm_mmu *mmu) @@ -6244,9 +6245,9 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e if (WARN_ON_ONCE(start >= end)) continue; - flush = slot_handle_level_range(kvm, memslot, __kvm_zap_rmap, - PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, - start, end - 1, true, flush); + flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap, + PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, + start, end - 1, true, flush); } } @@ -6298,8 +6299,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, { if (kvm_memslots_have_rmaps(kvm)) { write_lock(&kvm->mmu_lock); - slot_handle_level(kvm, memslot, slot_rmap_write_protect, - start_level, KVM_MAX_HUGEPAGE_LEVEL, false); + walk_slot_rmaps(kvm, memslot, slot_rmap_write_protect, + start_level, KVM_MAX_HUGEPAGE_LEVEL, false); write_unlock(&kvm->mmu_lock); } @@ -6534,10 +6535,9 @@ static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm, * all the way to the target level. There's no need to split pages * already at the target level. */ - for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--) { - slot_handle_level_range(kvm, slot, shadow_mmu_try_split_huge_pages, - level, level, start, end - 1, true, false); - } + for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--) + __walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages, + level, level, start, end - 1, true, false); } /* Must be called with the mmu_lock held in write-mode. */ @@ -6635,8 +6635,8 @@ static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, * Note, use KVM_MAX_HUGEPAGE_LEVEL - 1 since there's no need to zap * pages that are already mapped at the maximum hugepage level. */ - if (slot_handle_level(kvm, slot, kvm_mmu_zap_collapsible_spte, - PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true)) + if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte, + PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true)) kvm_arch_flush_remote_tlbs_memslot(kvm, slot); } @@ -6679,7 +6679,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, * Clear dirty bits only on 4k SPTEs since the legacy MMU only * support dirty logging at a 4k granularity. */ - slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false); + walk_slot_rmaps_4k(kvm, memslot, __rmap_clear_dirty, false); write_unlock(&kvm->mmu_lock); } -- cgit v1.2.3 From eddd9e8302de77cea52b3b2aaa3e1db19e680dd0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 2 Feb 2023 18:27:50 +0000 Subject: KVM: x86/mmu: Replace comment with an actual lockdep assertion on mmu_lock Assert that mmu_lock is held for write in __walk_slot_rmaps() instead of hoping the function comment will magically prevent introducing bugs. Signed-off-by: Ben Gardon Link: https://lore.kernel.org/r/20230202182809.1929122-3-bgardon@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 88aba0d8e9e4..bda2814dd158 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5906,7 +5906,6 @@ typedef bool (*slot_rmaps_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot); -/* The caller should hold mmu-lock before calling this function. */ static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, @@ -5916,6 +5915,8 @@ static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, { struct slot_rmap_walk_iterator iterator; + lockdep_assert_held_write(&kvm->mmu_lock); + for_each_slot_rmap_range(slot, start_level, end_level, start_gfn, end_gfn, &iterator) { if (iterator.rmap) -- cgit v1.2.3 From f3d90f901d18749dca096719540a075f59240051 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 2 Feb 2023 18:27:51 +0000 Subject: KVM: x86/mmu: Clean up mmu.c functions that put return type on separate line Adjust a variety of functions in mmu.c to put the function return type on the same line as the function declaration. As stated in the Linus specification: But the "on their own line" is complete garbage to begin with. That will NEVER be a kernel rule. We should never have a rule that assumes things are so long that they need to be on multiple lines. We don't put function return types on their own lines either, even if some other projects have that rule (just to get function names at the beginning of lines or some other odd reason). Leave the functions generated by BUILD_MMU_ROLE_REGS_ACCESSOR() as-is, that code is basically illegible no matter how it's formatted. No functional change intended. Link: https://lore.kernel.org/mm-commits/CAHk-=wjS-Jg7sGMwUPpDsjv392nDOOs0CtUtVkp=S6Q7JzFJRw@mail.gmail.com Signed-off-by: Ben Gardon Link: https://lore.kernel.org/r/20230202182809.1929122-4-bgardon@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 57 ++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index bda2814dd158..b2124962e5a8 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -895,9 +895,9 @@ static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) untrack_possible_nx_huge_page(kvm, sp); } -static struct kvm_memory_slot * -gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, - bool no_dirty_log) +static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, + gfn_t gfn, + bool no_dirty_log) { struct kvm_memory_slot *slot; @@ -960,9 +960,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, return count; } -static void -pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, - struct pte_list_desc *desc, int i) +static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, + struct pte_list_desc *desc, int i) { struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); int j = head_desc->spte_count - 1; @@ -1510,8 +1509,8 @@ struct slot_rmap_walk_iterator { struct kvm_rmap_head *end_rmap; }; -static void -rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) +static void rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, + int level) { iterator->level = level; iterator->gfn = iterator->start_gfn; @@ -1519,10 +1518,10 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot); } -static void -slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, - const struct kvm_memory_slot *slot, int start_level, - int end_level, gfn_t start_gfn, gfn_t end_gfn) +static void slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, + const struct kvm_memory_slot *slot, + int start_level, int end_level, + gfn_t start_gfn, gfn_t end_gfn) { iterator->slot = slot; iterator->start_level = start_level; @@ -3373,9 +3372,9 @@ static bool page_fault_can_be_fast(struct kvm_page_fault *fault) * Returns true if the SPTE was fixed successfully. Otherwise, * someone else modified the SPTE from its original value. */ -static bool -fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, - u64 *sptep, u64 old_spte, u64 new_spte) +static bool fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, + struct kvm_page_fault *fault, + u64 *sptep, u64 old_spte, u64 new_spte) { /* * Theoretically we could also set dirty bit (and flush TLB) here in @@ -4708,10 +4707,9 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, #include "paging_tmpl.h" #undef PTTYPE -static void -__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check, - u64 pa_bits_rsvd, int level, bool nx, bool gbpages, - bool pse, bool amd) +static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check, + u64 pa_bits_rsvd, int level, bool nx, + bool gbpages, bool pse, bool amd) { u64 gbpages_bit_rsvd = 0; u64 nonleaf_bit8_rsvd = 0; @@ -4824,9 +4822,9 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu, guest_cpuid_is_amd_or_hygon(vcpu)); } -static void -__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, - u64 pa_bits_rsvd, bool execonly, int huge_page_level) +static void __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, + u64 pa_bits_rsvd, bool execonly, + int huge_page_level) { u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51); u64 large_1g_rsvd = 0, large_2m_rsvd = 0; @@ -4926,8 +4924,7 @@ static inline bool boot_cpu_is_amd(void) * the direct page table on host, use as much mmu features as * possible, however, kvm currently does not do execution-protection. */ -static void -reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) +static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context) { struct rsvd_bits_validate *shadow_zero_check; int i; @@ -5140,8 +5137,8 @@ static void paging32_init_context(struct kvm_mmu *context) context->sync_spte = paging32_sync_spte; } -static union kvm_cpu_role -kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) +static union kvm_cpu_role kvm_calc_cpu_role(struct kvm_vcpu *vcpu, + const struct kvm_mmu_role_regs *regs) { union kvm_cpu_role role = {0}; @@ -6750,8 +6747,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) } } -static unsigned long -mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) +static unsigned long mmu_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) { struct kvm *kvm; int nr_to_scan = sc->nr_to_scan; @@ -6809,8 +6806,8 @@ unlock: return freed; } -static unsigned long -mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +static unsigned long mmu_shrink_count(struct shrinker *shrink, + struct shrink_control *sc) { return percpu_counter_read_positive(&kvm_total_used_mmu_pages); } -- cgit v1.2.3 From ad36aab37ae4f2ca898a809454020c5acc23b47c Mon Sep 17 00:00:00 2001 From: Yu Zhang Date: Thu, 19 Jan 2023 22:19:45 +0800 Subject: KVM: nVMX: Remove outdated comments in nested_vmx_setup_ctls_msrs() nested_vmx_setup_ctls_msrs() initializes the vmcs_conf.nested, which stores the global VMX MSR configurations when nested is supported, regardless of any particular CPUID settings for one VM. Commit 6defc591846d ("KVM: nVMX: include conditional controls in /dev/kvm KVM_GET_MSRS") added the some feature flags for secondary proc-based controls, so that those features can be available in KVM_GET_MSRS. Yet this commit did not remove the obsolete comments in nested_vmx_setup_ctls_msrs(). Just fix the comments, and no functional change intended. Fixes: 6defc591846d ("KVM: nVMX: include conditional controls in /dev/kvm KVM_GET_MSRS") Signed-off-by: Yu Zhang Link: https://lore.kernel.org/r/20230119141946.585610-1-yu.c.zhang@linux.intel.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/nested.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f63b28f46a71..d778fa304f51 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6866,11 +6866,6 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) msrs->procbased_ctls_low &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); - /* - * secondary cpu-based controls. Do not include those that - * depend on CPUID bits, they are added later by - * vmx_vcpu_after_set_cpuid. - */ msrs->secondary_ctls_low = 0; msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl; -- cgit v1.2.3 From f6cde92083dec5cf424504d7029acdffbe5beed8 Mon Sep 17 00:00:00 2001 From: Yu Zhang Date: Thu, 19 Jan 2023 22:19:46 +0800 Subject: KVM: nVMX: Add helpers to setup VMX control msr configs nested_vmx_setup_ctls_msrs() is used to set up the various VMX MSR controls for nested VMX. But it is a bit lengthy, just add helpers to setup the configuration of VMX MSRs. Suggested-by: Sean Christopherson Signed-off-by: Yu Zhang Link: https://lore.kernel.org/r/20230119141946.585610-2-yu.c.zhang@linux.intel.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/nested.c | 107 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 74 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d778fa304f51..dab529cd799c 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6753,36 +6753,9 @@ static u64 nested_vmx_calc_vmcs_enum_msr(void) return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT; } -/* - * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be - * returned for the various VMX controls MSRs when nested VMX is enabled. - * The same values should also be used to verify that vmcs12 control fields are - * valid during nested entry from L1 to L2. - * Each of these control msrs has a low and high 32-bit half: A low bit is on - * if the corresponding bit in the (32-bit) control field *must* be on, and a - * bit in the high half is on if the corresponding bit in the control field - * may be on. See also vmx_control_verify(). - */ -void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) +static void nested_vmx_setup_pinbased_ctls(struct vmcs_config *vmcs_conf, + struct nested_vmx_msrs *msrs) { - struct nested_vmx_msrs *msrs = &vmcs_conf->nested; - - /* - * Note that as a general rule, the high half of the MSRs (bits in - * the control fields which may be 1) should be initialized by the - * intersection of the underlying hardware's MSR (i.e., features which - * can be supported) and the list of features we want to expose - - * because they are known to be properly supported in our code. - * Also, usually, the low half of the MSRs (bits which must be 1) can - * be set to 0, meaning that L1 may turn off any of these bits. The - * reason is that if one of these bits is necessary, it will appear - * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control - * fields of vmcs01 and vmcs02, will turn these bits off - and - * nested_vmx_l1_wants_exit() will not pass related exits to L1. - * These rules have exceptions below. - */ - - /* pin-based controls */ msrs->pinbased_ctls_low = PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; @@ -6795,8 +6768,11 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) msrs->pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | PIN_BASED_VMX_PREEMPTION_TIMER; +} - /* exit controls */ +static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf, + struct nested_vmx_msrs *msrs) +{ msrs->exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; @@ -6815,8 +6791,11 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) /* We support free control of debug control saving. */ msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; +} - /* entry controls */ +static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf, + struct nested_vmx_msrs *msrs) +{ msrs->entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; @@ -6832,8 +6811,11 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) /* We support free control of debug control loading. */ msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; +} - /* cpu-based controls */ +static void nested_vmx_setup_cpubased_ctls(struct vmcs_config *vmcs_conf, + struct nested_vmx_msrs *msrs) +{ msrs->procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; @@ -6865,7 +6847,12 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) /* We support free control of CR3 access interception. */ msrs->procbased_ctls_low &= ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); +} +static void nested_vmx_setup_secondary_ctls(u32 ept_caps, + struct vmcs_config *vmcs_conf, + struct nested_vmx_msrs *msrs) +{ msrs->secondary_ctls_low = 0; msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl; @@ -6943,8 +6930,11 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) if (enable_sgx) msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; +} - /* miscellaneous data */ +static void nested_vmx_setup_misc_data(struct vmcs_config *vmcs_conf, + struct nested_vmx_msrs *msrs) +{ msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA; msrs->misc_low |= MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | @@ -6952,7 +6942,10 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) VMX_MISC_ACTIVITY_HLT | VMX_MISC_ACTIVITY_WAIT_SIPI; msrs->misc_high = 0; +} +static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs) +{ /* * This MSR reports some information about VMX support. We * should return information about the VMX we emulate for the @@ -6967,7 +6960,10 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) if (cpu_has_vmx_basic_inout()) msrs->basic |= VMX_BASIC_INOUT; +} +static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs) +{ /* * These MSRs specify bits which the guest must keep fixed on * while L1 is in VMXON mode (in L1's root mode, or running an L2). @@ -6984,6 +6980,51 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) if (vmx_umip_emulated()) msrs->cr4_fixed1 |= X86_CR4_UMIP; +} + +/* + * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be + * returned for the various VMX controls MSRs when nested VMX is enabled. + * The same values should also be used to verify that vmcs12 control fields are + * valid during nested entry from L1 to L2. + * Each of these control msrs has a low and high 32-bit half: A low bit is on + * if the corresponding bit in the (32-bit) control field *must* be on, and a + * bit in the high half is on if the corresponding bit in the control field + * may be on. See also vmx_control_verify(). + */ +void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps) +{ + struct nested_vmx_msrs *msrs = &vmcs_conf->nested; + + /* + * Note that as a general rule, the high half of the MSRs (bits in + * the control fields which may be 1) should be initialized by the + * intersection of the underlying hardware's MSR (i.e., features which + * can be supported) and the list of features we want to expose - + * because they are known to be properly supported in our code. + * Also, usually, the low half of the MSRs (bits which must be 1) can + * be set to 0, meaning that L1 may turn off any of these bits. The + * reason is that if one of these bits is necessary, it will appear + * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control + * fields of vmcs01 and vmcs02, will turn these bits off - and + * nested_vmx_l1_wants_exit() will not pass related exits to L1. + * These rules have exceptions below. + */ + nested_vmx_setup_pinbased_ctls(vmcs_conf, msrs); + + nested_vmx_setup_exit_ctls(vmcs_conf, msrs); + + nested_vmx_setup_entry_ctls(vmcs_conf, msrs); + + nested_vmx_setup_cpubased_ctls(vmcs_conf, msrs); + + nested_vmx_setup_secondary_ctls(ept_caps, vmcs_conf, msrs); + + nested_vmx_setup_misc_data(vmcs_conf, msrs); + + nested_vmx_setup_basic(msrs); + + nested_vmx_setup_cr_fixed(msrs); msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr(); } -- cgit v1.2.3 From 2fdcc1b324189b5fb20655baebd40cd82e2bdf0c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 22 Mar 2023 02:37:26 +0100 Subject: KVM: x86/mmu: Avoid indirect call for get_cr3 Most of the time, calls to get_guest_pgd result in calling kvm_read_cr3 (the exception is only nested TDP). Hardcode the default instead of using the get_cr3 function, avoiding a retpoline if they are enabled. Signed-off-by: Paolo Bonzini Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20230322013731.102955-2-minipli@grsecurity.net Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/mmu.c | 31 ++++++++++++++++++++----------- arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 2 files changed, 21 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index b2124962e5a8..4c874d4ec68f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -256,6 +256,20 @@ static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu) return regs; } +static unsigned long get_guest_cr3(struct kvm_vcpu *vcpu) +{ + return kvm_read_cr3(vcpu); +} + +static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu) +{ + if (IS_ENABLED(CONFIG_RETPOLINE) && mmu->get_guest_pgd == get_guest_cr3) + return kvm_read_cr3(vcpu); + + return mmu->get_guest_pgd(vcpu); +} + static inline bool kvm_available_flush_tlb_with_range(void) { return kvm_x86_ops.tlb_remote_flush_with_range; @@ -3801,7 +3815,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) int quadrant, i, r; hpa_t root; - root_pgd = mmu->get_guest_pgd(vcpu); + root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu); root_gfn = root_pgd >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) @@ -4251,7 +4265,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, arch.token = alloc_apf_token(vcpu); arch.gfn = gfn; arch.direct_map = vcpu->arch.mmu->root_role.direct; - arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); + arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu); return kvm_setup_async_pf(vcpu, cr2_or_gpa, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); @@ -4270,7 +4284,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) return; if (!vcpu->arch.mmu->root_role.direct && - work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) + work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu)) return; kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true, NULL); @@ -4673,11 +4687,6 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd) } EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); -static unsigned long get_cr3(struct kvm_vcpu *vcpu) -{ - return kvm_read_cr3(vcpu); -} - static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access) { @@ -5223,7 +5232,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, context->root_role.word = root_role.word; context->page_fault = kvm_tdp_page_fault; context->sync_spte = NULL; - context->get_guest_pgd = get_cr3; + context->get_guest_pgd = get_guest_cr3; context->get_pdptr = kvm_pdptr_read; context->inject_page_fault = kvm_inject_page_fault; @@ -5372,7 +5381,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu, kvm_init_shadow_mmu(vcpu, cpu_role); - context->get_guest_pgd = get_cr3; + context->get_guest_pgd = get_guest_cr3; context->get_pdptr = kvm_pdptr_read; context->inject_page_fault = kvm_inject_page_fault; } @@ -5386,7 +5395,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, return; g_context->cpu_role.as_u64 = new_mode.as_u64; - g_context->get_guest_pgd = get_cr3; + g_context->get_guest_pgd = get_guest_cr3; g_context->get_pdptr = kvm_pdptr_read; g_context->inject_page_fault = kvm_inject_page_fault; diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 03a9577329fc..0662e0278e70 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -324,7 +324,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, trace_kvm_mmu_pagetable_walk(addr, access); retry_walk: walker->level = mmu->cpu_role.base.level; - pte = mmu->get_guest_pgd(vcpu); + pte = kvm_mmu_get_guest_pgd(vcpu, mmu); have_ad = PT_HAVE_ACCESSED_DIRTY(mmu); #if PTTYPE == 64 -- cgit v1.2.3 From 50f13998451effea5c5fdc70fe576f8b435d6224 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Wed, 22 Mar 2023 02:37:30 +0100 Subject: KVM: x86/mmu: Fix comment typo Fix a small comment typo in make_spte(). Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20230322013731.102955-6-minipli@grsecurity.net Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/spte.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index c15bfca3ed15..cf2c6426a6fc 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -164,7 +164,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, /* * For simplicity, enforce the NX huge page mitigation even if not * strictly necessary. KVM could ignore the mitigation if paging is - * disabled in the guest, as the guest doesn't have an page tables to + * disabled in the guest, as the guest doesn't have any page tables to * abuse. But to safely ignore the mitigation, KVM would have to * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG * is toggled on, and that's a net negative for performance when TDP is -- cgit v1.2.3 From 01b31714bd90be2784f7145bf93b7f78f3d081e1 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Wed, 22 Mar 2023 02:37:27 +0100 Subject: KVM: x86: Do not unload MMU roots when only toggling CR0.WP with TDP enabled There is no need to unload the MMU roots with TDP enabled when only CR0.WP has changed -- the paging structures are still valid, only the permission bitmap needs to be updated. One heavy user of toggling CR0.WP is grsecurity's KERNEXEC feature to implement kernel W^X. The optimization brings a huge performance gain for this case as the following micro-benchmark running 'ssdd 10 50000' from rt-tests[1] on a grsecurity L1 VM shows (runtime in seconds, lower is better): legacy TDP shadow kvm-x86/next@d8708b 8.43s 9.45s 70.3s +patch 5.39s 5.63s 70.2s For legacy MMU this is ~36% faster, for TDP MMU even ~40% faster. Also TDP and legacy MMU now both have a similar runtime which vanishes the need to disable TDP MMU for grsecurity. Shadow MMU sees no measurable difference and is still slow, as expected. [1] https://git.kernel.org/pub/scm/utils/rt-tests/rt-tests.git Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20230322013731.102955-3-minipli@grsecurity.net Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 237c483b1230..c6d909778b2c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -906,6 +906,18 @@ EXPORT_SYMBOL_GPL(load_pdptrs); void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) { + /* + * CR0.WP is incorporated into the MMU role, but only for non-nested, + * indirect shadow MMUs. If TDP is enabled, the MMU's metadata needs + * to be updated, e.g. so that emulating guest translations does the + * right thing, but there's no need to unload the root as CR0.WP + * doesn't affect SPTEs. + */ + if (tdp_enabled && (cr0 ^ old_cr0) == X86_CR0_WP) { + kvm_init_mmu(vcpu); + return; + } + if ((cr0 ^ old_cr0) & X86_CR0_PG) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); -- cgit v1.2.3 From e40bcf9f3a187126ce74a05d3b177fc16874446f Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Wed, 22 Mar 2023 02:37:28 +0100 Subject: KVM: x86: Ignore CR0.WP toggles in non-paging mode If paging is disabled, there are no permission bits to emulate. Micro-optimize this case to avoid unnecessary work. Suggested-by: Sean Christopherson Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20230322013731.102955-4-minipli@grsecurity.net Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c6d909778b2c..8a66ac7a4878 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -908,14 +908,20 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon { /* * CR0.WP is incorporated into the MMU role, but only for non-nested, - * indirect shadow MMUs. If TDP is enabled, the MMU's metadata needs - * to be updated, e.g. so that emulating guest translations does the - * right thing, but there's no need to unload the root as CR0.WP - * doesn't affect SPTEs. + * indirect shadow MMUs. If paging is disabled, no updates are needed + * as there are no permission bits to emulate. If TDP is enabled, the + * MMU's metadata needs to be updated, e.g. so that emulating guest + * translations does the right thing, but there's no need to unload the + * root as CR0.WP doesn't affect SPTEs. */ - if (tdp_enabled && (cr0 ^ old_cr0) == X86_CR0_WP) { - kvm_init_mmu(vcpu); - return; + if ((cr0 ^ old_cr0) == X86_CR0_WP) { + if (!(cr0 & X86_CR0_PG)) + return; + + if (tdp_enabled) { + kvm_init_mmu(vcpu); + return; + } } if ((cr0 ^ old_cr0) & X86_CR0_PG) { -- cgit v1.2.3 From 74cdc836919bf34684ef66f995273f35e2189daf Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Wed, 22 Mar 2023 02:37:29 +0100 Subject: KVM: x86: Make use of kvm_read_cr*_bits() when testing bits Make use of the kvm_read_cr{0,4}_bits() helper functions when we only want to know the state of certain bits instead of the whole register. This not only makes the intent cleaner, it also avoids a potential VMREAD in case the tested bits aren't guest owned. Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20230322013731.102955-5-minipli@grsecurity.net Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 4 ++-- arch/x86/kvm/vmx/vmx.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 612e6c70ce2e..f4aa170b5b97 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -540,9 +540,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) if (!pmc) return 1; - if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) && + if (!(kvm_read_cr4_bits(vcpu, X86_CR4_PCE)) && (static_call(kvm_x86_get_cpl)(vcpu) != 0) && - (kvm_read_cr0(vcpu) & X86_CR0_PE)) + (kvm_read_cr0_bits(vcpu, X86_CR0_PE))) return 1; *data = pmc_read_counter(pmc) & mask; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d7bf14abdba1..8fc1a0c7856f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5517,7 +5517,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) break; case 3: /* lmsw */ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; - trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); + trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu, ~0xful) | val)); kvm_lmsw(vcpu, val); return kvm_skip_emulated_instruction(vcpu); @@ -7575,7 +7575,7 @@ static u8 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT; - if (kvm_read_cr0(vcpu) & X86_CR0_CD) { + if (kvm_read_cr0_bits(vcpu, X86_CR0_CD)) { if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cache = MTRR_TYPE_WRBACK; else -- cgit v1.2.3 From fb509f76acc8d42bed11bca308404f81c2be856a Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Wed, 22 Mar 2023 02:37:31 +0100 Subject: KVM: VMX: Make CR0.WP a guest owned bit Guests like grsecurity that make heavy use of CR0.WP to implement kernel level W^X will suffer from the implied VMEXITs. With EPT there is no need to intercept a guest change of CR0.WP, so simply make it a guest owned bit if we can do so. This implies that a read of a guest's CR0.WP bit might need a VMREAD. However, the only potentially affected user seems to be kvm_init_mmu() which is a heavy operation to begin with. But also most callers already cache the full value of CR0 anyway, so no additional VMREAD is needed. The only exception is nested_vmx_load_cr3(). This change is VMX-specific, as SVM has no such fine grained control register intercept control. Suggested-by: Sean Christopherson Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20230322013731.102955-7-minipli@grsecurity.net Co-developed-by: Sean Christopherson Signed-off-by: Sean Christopherson --- arch/x86/kvm/kvm_cache_regs.h | 2 +- arch/x86/kvm/vmx/nested.c | 4 ++-- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/vmx/vmx.h | 18 ++++++++++++++++++ 4 files changed, 22 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 4c91f626c058..e50d353b5c1c 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -4,7 +4,7 @@ #include -#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS +#define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP) #define KVM_POSSIBLE_CR4_GUEST_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f63b28f46a71..61d940fc91ba 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -4481,7 +4481,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, * CR0_GUEST_HOST_MASK is already set in the original vmcs01 * (KVM doesn't change it); */ - vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; + vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); vmx_set_cr0(vcpu, vmcs12->host_cr0); /* Same as above - no reason to call set_cr4_guest_host_mask(). */ @@ -4632,7 +4632,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) */ vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); - vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; + vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8fc1a0c7856f..e501f6864a72 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4790,7 +4790,7 @@ static void init_vmcs(struct vcpu_vmx *vmx) /* 22.2.1, 20.8.1 */ vm_entry_controls_set(vmx, vmx_vmentry_ctrl()); - vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; + vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits(); vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits); set_cr4_guest_host_mask(vmx); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 2acdc54bc34b..423e9d3c9c40 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -640,6 +640,24 @@ BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64) (1 << VCPU_EXREG_EXIT_INFO_1) | \ (1 << VCPU_EXREG_EXIT_INFO_2)) +static inline unsigned long vmx_l1_guest_owned_cr0_bits(void) +{ + unsigned long bits = KVM_POSSIBLE_CR0_GUEST_BITS; + + /* + * CR0.WP needs to be intercepted when KVM is shadowing legacy paging + * in order to construct shadow PTEs with the correct protections. + * Note! CR0.WP technically can be passed through to the guest if + * paging is disabled, but checking CR0.PG would generate a cyclical + * dependency of sorts due to forcing the caller to ensure CR0 holds + * the correct value prior to determining which CR0 bits can be owned + * by L1. Keep it simple and limit the optimization to EPT. + */ + if (!enable_ept) + bits &= ~X86_CR0_WP; + return bits; +} + static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) { return container_of(kvm, struct kvm_vmx, kvm); -- cgit v1.2.3 From 0c928ff26bd68d9638444601c0cbf4e4f978b752 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 22 Mar 2023 09:55:20 -0700 Subject: KVM: SVM: Fix benign "bool vs. int" comparison in svm_set_cr0() Explicitly convert the return from is_paging() to a bool when comparing against old_paging, which is also a boolean. is_paging() sneakily uses kvm_read_cr0_bits() and returns an int, i.e. returns X86_CR0_PG or 0, not 1 or 0. Luckily, the bug is benign as it only results in a false positive, not a false negative, i.e. only causes a spurious refresh of CR4 when paging is enabled in both the old and new. Cc: Maxim Levitsky Fixes: c53bbe2145f5 ("KVM: x86: SVM: don't passthrough SMAP/SMEP/PKE bits in !NPT && !gCR0.PG case") Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 70183d2271b5..19794a8ea28c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1802,7 +1802,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!npt_enabled) { hcr0 |= X86_CR0_PG | X86_CR0_WP; - if (old_paging != is_paging(vcpu)) + if (old_paging != !!is_paging(vcpu)) svm_set_cr4(vcpu, kvm_read_cr4(vcpu)); } -- cgit v1.2.3 From 607475cfa0f753862c0030de836875fe4ac5cf8a Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 22 Mar 2023 12:58:21 +0800 Subject: KVM: x86: Add helpers to query individual CR0/CR4 bits Add helpers to check if a specific CR0/CR4 bit is set to avoid a plethora of implicit casts from the "unsigned long" return of kvm_read_cr*_bits(), and to make each caller's intent more obvious. Defer converting helpers that do truly ugly casts from "unsigned long" to "int", e.g. is_pse(), to a future commit so that their conversion is more isolated. Opportunistically drop the superfluous pcid_enabled from kvm_set_cr3(); the local variable is used only once, immediately after its declaration. Suggested-by: Sean Christopherson Signed-off-by: Binbin Wu Link: https://lore.kernel.org/r/20230322045824.22970-2-binbin.wu@linux.intel.com [sean: move "obvious" conversions to this commit, massage changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/cpuid.c | 4 ++-- arch/x86/kvm/kvm_cache_regs.h | 16 ++++++++++++++++ arch/x86/kvm/mmu.h | 2 +- arch/x86/kvm/pmu.c | 4 ++-- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/x86.c | 22 ++++++++++------------ arch/x86/kvm/x86.h | 4 ++-- 8 files changed, 35 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 9583a110cf5f..1ad3bde72526 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -266,7 +266,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e /* Update OSXSAVE bit */ if (boot_cpu_has(X86_FEATURE_XSAVE)) cpuid_entry_change(best, X86_FEATURE_OSXSAVE, - kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)); + kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)); cpuid_entry_change(best, X86_FEATURE_APIC, vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); @@ -275,7 +275,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e best = cpuid_entry2_find(entries, nent, 7, 0); if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) cpuid_entry_change(best, X86_FEATURE_OSPKE, - kvm_read_cr4_bits(vcpu, X86_CR4_PKE)); + kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)); best = cpuid_entry2_find(entries, nent, 0xD, 0); if (best) diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index e50d353b5c1c..75eae9c4998a 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -157,6 +157,14 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) return vcpu->arch.cr0 & mask; } +static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, + unsigned long cr0_bit) +{ + BUILD_BUG_ON(!is_power_of_2(cr0_bit)); + + return !!kvm_read_cr0_bits(vcpu, cr0_bit); +} + static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) { return kvm_read_cr0_bits(vcpu, ~0UL); @@ -171,6 +179,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) return vcpu->arch.cr4 & mask; } +static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, + unsigned long cr4_bit) +{ + BUILD_BUG_ON(!is_power_of_2(cr4_bit)); + + return !!kvm_read_cr4_bits(vcpu, cr4_bit); +} + static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 168c46fd8dd1..89f532516a45 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -132,7 +132,7 @@ static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3) { BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0); - return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE) + return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE) ? cr3 & X86_CR3_PCID_MASK : 0; } diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index f4aa170b5b97..bb1733bb5bf9 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -540,9 +540,9 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) if (!pmc) return 1; - if (!(kvm_read_cr4_bits(vcpu, X86_CR4_PCE)) && + if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) && (static_call(kvm_x86_get_cpl)(vcpu) != 0) && - (kvm_read_cr0_bits(vcpu, X86_CR0_PE))) + kvm_is_cr0_bit_set(vcpu, X86_CR0_PE)) return 1; *data = pmc_read_counter(pmc) & mask; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 61d940fc91ba..06b2c24d5a74 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5154,7 +5154,7 @@ static int handle_vmxon(struct kvm_vcpu *vcpu) * does force CR0.PE=1, but only to also force VM86 in order to emulate * Real Mode, and so there's no need to check CR0.PE manually. */ - if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { + if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_VMXE)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e501f6864a72..c3ef74562158 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5180,7 +5180,7 @@ bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu) if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) return true; - return vmx_get_cpl(vcpu) == 3 && kvm_read_cr0_bits(vcpu, X86_CR0_AM) && + return vmx_get_cpl(vcpu) == 3 && kvm_is_cr0_bit_set(vcpu, X86_CR0_AM) && (kvm_get_rflags(vcpu) & X86_EFLAGS_AC); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8a66ac7a4878..3fab192862d4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -841,7 +841,7 @@ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) { - if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) + if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE)) return true; kvm_queue_exception(vcpu, UD_VECTOR); @@ -983,7 +983,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) return 1; if (!(cr0 & X86_CR0_PG) && - (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))) + (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))) return 1; static_call(kvm_x86_set_cr0)(vcpu, cr0); @@ -1005,7 +1005,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) if (vcpu->arch.guest_state_protected) return; - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { + if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); @@ -1019,7 +1019,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) if (static_cpu_has(X86_FEATURE_PKU) && vcpu->arch.pkru != vcpu->arch.host_pkru && ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || - kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) + kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) write_pkru(vcpu->arch.pkru); #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ } @@ -1033,14 +1033,14 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS if (static_cpu_has(X86_FEATURE_PKU) && ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || - kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) { + kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) { vcpu->arch.pkru = rdpkru(); if (vcpu->arch.pkru != vcpu->arch.host_pkru) write_pkru(vcpu->arch.host_pkru); } #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { + if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); @@ -1245,7 +1245,7 @@ static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB * with PCIDE=0. */ - if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) + if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) return; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) @@ -1260,9 +1260,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) bool skip_tlb_flush = false; unsigned long pcid = 0; #ifdef CONFIG_X86_64 - bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); - - if (pcid_enabled) { + if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) { skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH; cr3 &= ~X86_CR3_PCID_NOFLUSH; pcid = cr3 & X86_CR3_PCID_MASK; @@ -5051,7 +5049,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || - !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { + !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } @@ -13254,7 +13252,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) return 1; } - pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); + pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE); switch (type) { case INVPCID_TYPE_INDIV_ADDR: diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index a8167b47b8c8..6d68c6772105 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -123,7 +123,7 @@ static inline bool kvm_exception_is_soft(unsigned int nr) static inline bool is_protmode(struct kvm_vcpu *vcpu) { - return kvm_read_cr0_bits(vcpu, X86_CR0_PE); + return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE); } static inline int is_long_mode(struct kvm_vcpu *vcpu) @@ -193,7 +193,7 @@ static inline bool is_pae_paging(struct kvm_vcpu *vcpu) static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) { - return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; + return kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 57 : 48; } static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) -- cgit v1.2.3 From bede6eb4db19424477c36dace426ae12255f4a0d Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 22 Mar 2023 12:58:22 +0800 Subject: KVM: x86: Use boolean return value for is_{pae,pse,paging}() Convert is_{pae,pse,paging}() to use kvm_is_cr{0,4}_bit_set() and return bools. Returning an "int" requires not one, but two implicit casts, first from "unsigned long" to "int", and then again to a "bool". Both casts are more than a bit dangerous; the ulong=>int casts would drop a bit on 64-bit kernels _if_ the bits in question weren't in the lower 32 bits, and the int=>bool cast can result in false negatives/positives, e.g. see commit 0c928ff26bd6 ("KVM: SVM: Fix benign "bool vs. int" comparison in svm_set_cr0()"). Suggested-by: Sean Christopherson Signed-off-by: Binbin Wu Link: https://lore.kernel.org/r/20230322045824.22970-3-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/x86.h | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 19794a8ea28c..70183d2271b5 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1802,7 +1802,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!npt_enabled) { hcr0 |= X86_CR0_PG | X86_CR0_WP; - if (old_paging != !!is_paging(vcpu)) + if (old_paging != is_paging(vcpu)) svm_set_cr4(vcpu, kvm_read_cr4(vcpu)); } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 6d68c6772105..577b82358529 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -171,19 +171,19 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; } -static inline int is_pae(struct kvm_vcpu *vcpu) +static inline bool is_pae(struct kvm_vcpu *vcpu) { - return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); + return kvm_is_cr4_bit_set(vcpu, X86_CR4_PAE); } -static inline int is_pse(struct kvm_vcpu *vcpu) +static inline bool is_pse(struct kvm_vcpu *vcpu) { - return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); + return kvm_is_cr4_bit_set(vcpu, X86_CR4_PSE); } -static inline int is_paging(struct kvm_vcpu *vcpu) +static inline bool is_paging(struct kvm_vcpu *vcpu) { - return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); + return likely(kvm_is_cr0_bit_set(vcpu, X86_CR0_PG)); } static inline bool is_pae_paging(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 627778bfcfa1ff81acfe863dcc4316edd0a51276 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 22 Mar 2023 12:58:23 +0800 Subject: KVM: SVM: Use kvm_is_cr4_bit_set() to query SMAP/SMEP in "can emulate" Use kvm_is_cr4_bit_set() to query SMAP and SMEP when determining whether or not AMD's SMAP+SEV errata prevents KVM from emulating an instruction. This eliminates an implicit cast from ulong to bool and makes the code slightly more readable. Note, any overhead from making multiple calls to kvm_read_cr4_bits() is negligible, not to mention the code is question is encountered only in rare situations, i.e. is not a remotely hot path. Suggested-by: Sean Christopherson Signed-off-by: Binbin Wu Link: https://lore.kernel.org/r/20230322045824.22970-4-binbin.wu@linux.intel.com [sean: keep local smap/smep variables, massage changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 70183d2271b5..9c707dd7349a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4545,7 +4545,6 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len) { bool smep, smap, is_user; - unsigned long cr4; u64 error_code; /* Emulation is always possible when KVM has access to all guest state. */ @@ -4637,9 +4636,8 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK)) goto resume_guest; - cr4 = kvm_read_cr4(vcpu); - smep = cr4 & X86_CR4_SMEP; - smap = cr4 & X86_CR4_SMAP; + smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP); + smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP); is_user = svm_get_cpl(vcpu) == 3; if (smap && (!smep || is_user)) { pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n"); -- cgit v1.2.3 From 5faaffab5ba8cd4f3781b90cccd47440b54aea5b Mon Sep 17 00:00:00 2001 From: Santosh Shukla Date: Mon, 27 Feb 2023 14:10:06 +0530 Subject: KVM: nSVM: Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting VINTR Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting virtual interrupts in order to request an interrupt window, as KVM has usurped vmcb02's int_ctl. If an interrupt window opens before the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl. If no window opens, V_IRQ will be correctly preserved in vmcb12's int_ctl (because it was never recognized while L2 was running). Suggested-by: Sean Christopherson Link: https://lkml.kernel.org/r/Y9hybI65So5X2LFg%40google.com Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-2-santosh.shukla@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/nested.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 05d38944a6c0..a77055d96e31 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -416,18 +416,17 @@ void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) /* Only a few fields of int_ctl are written by the processor. */ mask = V_IRQ_MASK | V_TPR_MASK; - if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && - svm_is_intercept(svm, INTERCEPT_VINTR)) { - /* - * In order to request an interrupt window, L0 is usurping - * svm->vmcb->control.int_ctl and possibly setting V_IRQ - * even if it was clear in L1's VMCB. Restoring it would be - * wrong. However, in this case V_IRQ will remain true until - * interrupt_window_interception calls svm_clear_vintr and - * restores int_ctl. We can just leave it aside. - */ + /* + * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting + * virtual interrupts in order to request an interrupt window, as KVM + * has usurped vmcb02's int_ctl. If an interrupt window opens before + * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl. + * If no window opens, V_IRQ will be correctly preserved in vmcb12's + * int_ctl (because it was never recognized while L2 was running). + */ + if (svm_is_intercept(svm, INTERCEPT_VINTR) && + !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts)) mask &= ~V_IRQ_MASK; - } if (nested_vgif_enabled(svm)) mask |= V_GIF_MASK; -- cgit v1.2.3 From 7334ede457c6f1f7a26344ed25928f63f9a53a8d Mon Sep 17 00:00:00 2001 From: Santosh Shukla Date: Mon, 27 Feb 2023 14:10:07 +0530 Subject: KVM: nSVM: Disable intercept of VINTR if saved L1 host RFLAGS.IF is 0 Disable intercept of virtual interrupts (used to detect interrupt windows) if the saved host (L1) RFLAGS.IF is '0', as the effective RFLAGS.IF for L1 interrupts will never be set while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs when virtual interrupts are enabled). Suggested-by: Sean Christopherson Link: https://lkml.kernel.org/r/Y9hybI65So5X2LFg%40google.com Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-3-santosh.shukla@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/nested.c | 15 ++++++++++----- arch/x86/kvm/svm/svm.c | 10 ++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index a77055d96e31..a0a525758ed9 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -139,13 +139,18 @@ void recalc_intercepts(struct vcpu_svm *svm) if (g->int_ctl & V_INTR_MASKING_MASK) { /* - * Once running L2 with HF_VINTR_MASK, EFLAGS.IF and CR8 - * does not affect any interrupt we may want to inject; - * therefore, writes to CR8 are irrelevant to L0, as are - * interrupt window vmexits. + * If L2 is active and V_INTR_MASKING is enabled in vmcb12, + * disable intercept of CR8 writes as L2's CR8 does not affect + * any interrupt KVM may want to inject. + * + * Similarly, disable intercept of virtual interrupts (used to + * detect interrupt windows) if the saved RFLAGS.IF is '0', as + * the effective RFLAGS.IF for L1 interrupts will never be set + * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs). */ vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); - vmcb_clr_intercept(c, INTERCEPT_VINTR); + if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)) + vmcb_clr_intercept(c, INTERCEPT_VINTR); } /* diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 70183d2271b5..e347225b3914 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1583,6 +1583,16 @@ static void svm_set_vintr(struct vcpu_svm *svm) svm_set_intercept(svm, INTERCEPT_VINTR); + /* + * Recalculating intercepts may have cleared the VINTR intercept. If + * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF + * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN. + * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as + * interrupts will never be unblocked while L2 is running. + */ + if (!svm_is_intercept(svm, INTERCEPT_VINTR)) + return; + /* * This is just a dummy VINTR to actually cause a vmexit to happen. * Actual injection of virtual interrupts happens through EVENTINJ. -- cgit v1.2.3 From 5d1ec4565200f461943990f0988932ca86823087 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Mon, 27 Feb 2023 14:10:08 +0530 Subject: KVM: nSVM: Raise event on nested VM exit if L1 doesn't intercept IRQs If L1 doesn't intercept interrupts, then KVM will use vmcb02's V_IRQ to detect an interrupt window for L1 IRQs. On a subsequent nested VM-Exit, KVM might need to copy the current V_IRQ from vmcb02 to vmcb01 to continue waiting for an interrupt window, i.e. if there is still a pending IRQ for L1. Raise KVM_REQ_EVENT on nested exit if L1 isn't intercepting IRQs to ensure that KVM will re-enable interrupt window detection if needed. Note that this is a theoretical bug because KVM already raises KVM_REQ_EVENT on each nested VM exit, because the nested VM exit resets RFLAGS and kvm_set_rflags() raises the KVM_REQ_EVENT unconditionally. Explicitly raise KVM_REQ_EVENT for the interrupt window case to avoid having an unnecessary dependency on kvm_set_rflags(), and to document the scenario. Signed-off-by: Maxim Levitsky [santosh: reworded description as per Sean's v2 comment] Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-4-santosh.shukla@amd.com [sean: further massage changelog and comment] Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/nested.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index a0a525758ed9..8300fd31878d 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1025,6 +1025,28 @@ int nested_svm_vmexit(struct vcpu_svm *svm) svm_switch_vmcb(svm, &svm->vmcb01); + /* + * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01: + * + * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR: If L1 doesn't + * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related + * flags) to detect interrupt windows for L1 IRQs (even if L1 uses + * virtual interrupt masking). Raise KVM_REQ_EVENT to ensure that + * KVM re-requests an interrupt window if necessary, which implicitly + * copies this bits from vmcb02 to vmcb01. + * + * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR + * is stored in vmcb02, but its value doesn't need to be copied from/to + * vmcb01 because it is copied from/to the virtual APIC's TPR register + * on each VM entry/exit. + * + * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's + * V_GIF. However, GIF is architecturally clear on each VM exit, thus + * there is no need to copy V_GIF from vmcb02 to vmcb01. + */ + if (!nested_exit_on_intr(svm)) + kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); + if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { svm_copy_lbrs(vmcb12, vmcb02); svm_update_lbrv(vcpu); -- cgit v1.2.3 From 772f254d4d5687ee349cbe443959bc01304880d6 Mon Sep 17 00:00:00 2001 From: Maxim Levitsky Date: Mon, 27 Feb 2023 14:10:09 +0530 Subject: KVM: SVM: add wrappers to enable/disable IRET interception SEV-ES guests don't use IRET interception for the detection of an end of a NMI. Therefore it makes sense to create a wrapper to avoid repeating the check for the SEV-ES. No functional change is intended. Suggested-by: Sean Christopherson Signed-off-by: Maxim Levitsky [Renamed iret intercept API of style svm_{clr,set}_iret_intercept()] Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-5-santosh.shukla@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e347225b3914..69ace08ab1e4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2490,16 +2490,29 @@ static int task_switch_interception(struct kvm_vcpu *vcpu) has_error_code, error_code); } +static void svm_clr_iret_intercept(struct vcpu_svm *svm) +{ + if (!sev_es_guest(svm->vcpu.kvm)) + svm_clr_intercept(svm, INTERCEPT_IRET); +} + +static void svm_set_iret_intercept(struct vcpu_svm *svm) +{ + if (!sev_es_guest(svm->vcpu.kvm)) + svm_set_intercept(svm, INTERCEPT_IRET); +} + static int iret_interception(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); ++vcpu->stat.nmi_window_exits; svm->awaiting_iret_completion = true; - if (!sev_es_guest(vcpu->kvm)) { - svm_clr_intercept(svm, INTERCEPT_IRET); + + svm_clr_iret_intercept(svm); + if (!sev_es_guest(vcpu->kvm)) svm->nmi_iret_rip = kvm_rip_read(vcpu); - } + kvm_make_request(KVM_REQ_EVENT, vcpu); return 1; } @@ -3508,8 +3521,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu) return; svm->nmi_masked = true; - if (!sev_es_guest(vcpu->kvm)) - svm_set_intercept(svm, INTERCEPT_IRET); + svm_set_iret_intercept(svm); ++vcpu->stat.nmi_injections; } @@ -3649,12 +3661,10 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) if (masked) { svm->nmi_masked = true; - if (!sev_es_guest(vcpu->kvm)) - svm_set_intercept(svm, INTERCEPT_IRET); + svm_set_iret_intercept(svm); } else { svm->nmi_masked = false; - if (!sev_es_guest(vcpu->kvm)) - svm_clr_intercept(svm, INTERCEPT_IRET); + svm_clr_iret_intercept(svm); } } -- cgit v1.2.3 From 2cb9317377caaec647d7485bc53ab33a0b54f27c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 27 Feb 2023 14:10:10 +0530 Subject: KVM: x86: Raise an event request when processing NMIs if an NMI is pending Don't raise KVM_REQ_EVENT if no NMIs are pending at the end of process_nmi(). Finishing process_nmi() without a pending NMI will become much more likely when KVM gains support for AMD's vNMI, which allows pending vNMIs in hardware, i.e. doesn't require explicit injection. Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-6-santosh.shukla@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 237c483b1230..3c995483b405 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10150,7 +10150,9 @@ static void process_nmi(struct kvm_vcpu *vcpu) vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); - kvm_make_request(KVM_REQ_EVENT, vcpu); + + if (vcpu->arch.nmi_pending) + kvm_make_request(KVM_REQ_EVENT, vcpu); } void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, -- cgit v1.2.3 From 400fee8c9b2df61721de8103a4054247bea79fd0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 27 Feb 2023 14:10:11 +0530 Subject: KVM: x86: Tweak the code and comment related to handling concurrent NMIs Tweak the code and comment that deals with concurrent NMIs to explicitly call out that x86 allows exactly one pending NMI, but that KVM needs to temporarily allow two pending NMIs in order to workaround the fact that the target vCPU cannot immediately recognize an incoming NMI, unlike bare metal. No functional change intended. Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-7-santosh.shukla@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3c995483b405..e317559df489 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10138,15 +10138,22 @@ out: static void process_nmi(struct kvm_vcpu *vcpu) { - unsigned limit = 2; + unsigned int limit; /* - * x86 is limited to one NMI running, and one NMI pending after it. - * If an NMI is already in progress, limit further NMIs to just one. - * Otherwise, allow two (and we'll inject the first one immediately). + * x86 is limited to one NMI pending, but because KVM can't react to + * incoming NMIs as quickly as bare metal, e.g. if the vCPU is + * scheduled out, KVM needs to play nice with two queued NMIs showing + * up at the same time. To handle this scenario, allow two NMIs to be + * (temporarily) pending so long as NMIs are not blocked and KVM is not + * waiting for a previous NMI injection to complete (which effectively + * blocks NMIs). KVM will immediately inject one of the two NMIs, and + * will request an NMI window to handle the second NMI. */ if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) limit = 1; + else + limit = 2; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); -- cgit v1.2.3 From ab2ee212a57bf136b896a47ce37da38a1fdbdc36 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 27 Feb 2023 14:10:12 +0530 Subject: KVM: x86: Save/restore all NMIs when multiple NMIs are pending Save all pending NMIs in KVM_GET_VCPU_EVENTS, and queue KVM_REQ_NMI if one or more NMIs are pending after KVM_SET_VCPU_EVENTS in order to re-evaluate pending NMIs with respect to NMI blocking. KVM allows multiple NMIs to be pending in order to faithfully emulate bare metal handling of simultaneous NMIs (on bare metal, truly simultaneous NMIs are impossible, i.e. one will always arrive first and be consumed). Support for simultaneous NMIs botched the save/restore though. KVM only saves one pending NMI, but allows userspace to restore 255 pending NMIs as kvm_vcpu_events.nmi.pending is a u8, and KVM's internal state is stored in an unsigned int. Fixes: 7460fb4a3400 ("KVM: Fix simultaneous NMIs") Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-8-santosh.shukla@amd.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e317559df489..f18f740f75d8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5125,7 +5125,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; - events->nmi.pending = vcpu->arch.nmi_pending != 0; + events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); /* events->sipi_vector is never valid when reporting to user space */ @@ -5212,8 +5212,11 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; - if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) + if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) { vcpu->arch.nmi_pending = events->nmi.pending; + if (vcpu->arch.nmi_pending) + kvm_make_request(KVM_REQ_NMI, vcpu); + } static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && -- cgit v1.2.3 From 3763bf58029f3459d7187f687eadfc8eb08e5ca8 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 22 Mar 2023 11:33:08 -0700 Subject: x86/cpufeatures: Redefine synthetic virtual NMI bit as AMD's "real" vNMI The existing X86_FEATURE_VNMI is a synthetic feature flag that exists purely to maintain /proc/cpuinfo's ABI, the "real" Intel vNMI feature flag is tracked as VMX_FEATURE_VIRTUAL_NMIS, as the feature is enumerated through VMX MSRs, not CPUID. AMD is also gaining virtual NMI support, but in true VMX vs. SVM form, enumerates support through CPUID, i.e. wants to add real feature flag for vNMI. Redefine the syntheic X86_FEATURE_VNMI to AMD's real CPUID bit to avoid having both X86_FEATURE_VNMI and e.g. X86_FEATURE_AMD_VNMI. Signed-off-by: Sean Christopherson --- arch/x86/include/asm/cpufeatures.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 73c9672c123b..ced9e1832589 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -226,10 +226,9 @@ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ -#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ -#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ -#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ -#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ +#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* Intel FlexPriority */ +#define X86_FEATURE_EPT ( 8*32+ 2) /* Intel Extended Page Table */ +#define X86_FEATURE_VPID ( 8*32+ 3) /* Intel Virtual Processor ID */ #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ @@ -369,6 +368,7 @@ #define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */ #define X86_FEATURE_X2AVIC (15*32+18) /* Virtual x2apic */ #define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */ +#define X86_FEATURE_VNMI (15*32+25) /* Virtual NMI */ #define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */ -- cgit v1.2.3 From 68f7c82ab1b8c7057b0c241907ff7906c7407e6d Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 22 Mar 2023 12:58:24 +0800 Subject: KVM: x86: Change return type of is_long_mode() to bool Change return type of is_long_mode() to bool to avoid implicit cast, as literally every user of is_long_mode() treats its return value as a boolean. Signed-off-by: Binbin Wu Link: https://lore.kernel.org/r/20230322045824.22970-5-binbin.wu@linux.intel.com Reviewed-by: Kai Huang Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 577b82358529..203fb6640b5b 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -126,12 +126,12 @@ static inline bool is_protmode(struct kvm_vcpu *vcpu) return kvm_is_cr0_bit_set(vcpu, X86_CR0_PE); } -static inline int is_long_mode(struct kvm_vcpu *vcpu) +static inline bool is_long_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 - return vcpu->arch.efer & EFER_LMA; + return !!(vcpu->arch.efer & EFER_LMA); #else - return 0; + return false; #endif } -- cgit v1.2.3 From 1c4522ab13b163173971d8b0c0eb0b253b1b7f28 Mon Sep 17 00:00:00 2001 From: Santosh Shukla Date: Mon, 27 Feb 2023 14:10:14 +0530 Subject: KVM: SVM: Add definitions for new bits in VMCB::int_ctrl related to vNMI Add defines for three new bits in VMVC::int_ctrl that are part of SVM's Virtual NMI (vNMI) support: V_NMI_PENDING_MASK(11) - Virtual NMI is pending V_NMI_BLOCKING_MASK(12) - Virtual NMI is masked V_NMI_ENABLE_MASK(26) - Enable NMI virtualization To "inject" an NMI, the hypervisor (KVM) sets V_NMI_PENDING. When the CPU services the pending vNMI, hardware clears V_NMI_PENDING and sets V_NMI_BLOCKING, e.g. to indicate that the vCPU is handling an NMI. Hardware clears V_NMI_BLOCKING upon successful execution of IRET, or if a VM-Exit occurs while delivering the virtual NMI. Reviewed-by: Maxim Levitsky Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-10-santosh.shukla@amd.com [sean: massage changelog] Signed-off-by: Sean Christopherson --- arch/x86/include/asm/svm.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 770dcf75eaa9..25306a420b84 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -183,6 +183,12 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define V_GIF_SHIFT 9 #define V_GIF_MASK (1 << V_GIF_SHIFT) +#define V_NMI_PENDING_SHIFT 11 +#define V_NMI_PENDING_MASK (1 << V_NMI_PENDING_SHIFT) + +#define V_NMI_BLOCKING_SHIFT 12 +#define V_NMI_BLOCKING_MASK (1 << V_NMI_BLOCKING_SHIFT) + #define V_INTR_PRIO_SHIFT 16 #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT) @@ -197,6 +203,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area { #define V_GIF_ENABLE_SHIFT 25 #define V_GIF_ENABLE_MASK (1 << V_GIF_ENABLE_SHIFT) +#define V_NMI_ENABLE_SHIFT 26 +#define V_NMI_ENABLE_MASK (1 << V_NMI_ENABLE_SHIFT) + #define AVIC_ENABLE_SHIFT 31 #define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT) -- cgit v1.2.3 From bdedff263132c862924f5cad96f0e82eeeb4e2e6 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 22 Mar 2023 17:40:16 -0700 Subject: KVM: x86: Route pending NMIs from userspace through process_nmi() Use the asynchronous NMI queue to handle pending NMIs coming in from userspace during KVM_SET_VCPU_EVENTS so that all of KVM's logic for handling multiple NMIs goes through process_nmi(). This will simplify supporting SVM's upcoming "virtual NMI" functionality, which will need changes KVM manages pending NMIs. Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f18f740f75d8..befabb4678f9 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5213,9 +5213,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) { - vcpu->arch.nmi_pending = events->nmi.pending; - if (vcpu->arch.nmi_pending) - kvm_make_request(KVM_REQ_NMI, vcpu); + vcpu->arch.nmi_pending = 0; + atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending); + kvm_make_request(KVM_REQ_NMI, vcpu); } static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); -- cgit v1.2.3 From fa4c027a7956f5e07697bfcb580d25eeb8471257 Mon Sep 17 00:00:00 2001 From: Santosh Shukla Date: Mon, 27 Feb 2023 14:10:15 +0530 Subject: KVM: x86: Add support for SVM's Virtual NMI Add support for SVM's Virtual NMIs implementation, which adds proper tracking of virtual NMI blocking, and an intr_ctrl flag that software can set to mark a virtual NMI as pending. Pending virtual NMIs are serviced by hardware if/when virtual NMIs become unblocked, i.e. act more or less like real NMIs. Introduce two new kvm_x86_ops callbacks so to support SVM's vNMI, as KVM needs to treat a pending vNMI as partially injected. Specifically, if two NMIs (for L1) arrive concurrently in KVM's software model, KVM's ABI is to inject one and pend the other. Without vNMI, KVM manually tracks the pending NMI and uses NMI windows to detect when the NMI should be injected. With vNMI, the pending NMI is simply stuffed into the VMCB and handed off to hardware. This means that KVM needs to be able to set a vNMI pending on-demand, and also query if a vNMI is pending, e.g. to honor the "at most one NMI pending" rule and to preserve all NMIs across save and restore. Warn if KVM attempts to open an NMI window when vNMI is fully enabled, as the above logic should prevent KVM from ever getting to kvm_check_and_inject_events() with two NMIs pending _in software_, and the "at most one NMI pending" logic should prevent having an NMI pending in hardware and an NMI pending in software if NMIs are also blocked, i.e. if KVM can't immediately inject the second NMI. Signed-off-by: Santosh Shukla Co-developed-by: Maxim Levitsky Signed-off-by: Maxim Levitsky Link: https://lore.kernel.org/r/20230227084016.3368-11-santosh.shukla@amd.com [sean: rewrite shortlog and changelog, massage code comments] Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm-x86-ops.h | 2 + arch/x86/include/asm/kvm_host.h | 11 +++- arch/x86/kvm/svm/svm.c | 114 ++++++++++++++++++++++++++++++------- arch/x86/kvm/svm/svm.h | 22 +++++++ arch/x86/kvm/x86.c | 20 ++++++- 5 files changed, 146 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 8dc345cc6318..092ef2398857 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -68,6 +68,8 @@ KVM_X86_OP(get_interrupt_shadow) KVM_X86_OP(patch_hypercall) KVM_X86_OP(inject_irq) KVM_X86_OP(inject_nmi) +KVM_X86_OP_OPTIONAL_RET0(is_vnmi_pending) +KVM_X86_OP_OPTIONAL_RET0(set_vnmi_pending) KVM_X86_OP(inject_exception) KVM_X86_OP(cancel_injection) KVM_X86_OP(interrupt_allowed) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a45de1118a42..3edb8b3e3ef5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -876,7 +876,8 @@ struct kvm_vcpu_arch { u64 tsc_scaling_ratio; /* current scaling ratio */ atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ - unsigned nmi_pending; /* NMI queued after currently running handler */ + /* Number of NMIs pending injection, not including hardware vNMIs. */ + unsigned int nmi_pending; bool nmi_injected; /* Trying to inject an NMI this entry */ bool smi_pending; /* SMI queued after currently running handler */ u8 handling_intr_from_guest; @@ -1621,6 +1622,13 @@ struct kvm_x86_ops { int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection); bool (*get_nmi_mask)(struct kvm_vcpu *vcpu); void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked); + /* Whether or not a virtual NMI is pending in hardware. */ + bool (*is_vnmi_pending)(struct kvm_vcpu *vcpu); + /* + * Attempt to pend a virtual NMI in harware. Returns %true on success + * to allow using static_call_ret0 as the fallback. + */ + bool (*set_vnmi_pending)(struct kvm_vcpu *vcpu); void (*enable_nmi_window)(struct kvm_vcpu *vcpu); void (*enable_irq_window)(struct kvm_vcpu *vcpu); void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); @@ -2005,6 +2013,7 @@ int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level); void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); void kvm_inject_nmi(struct kvm_vcpu *vcpu); +int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu); void kvm_update_dr7(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 69ace08ab1e4..7a14c937a36d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -230,6 +230,8 @@ module_param(dump_invalid_vmcb, bool, 0644); bool intercept_smi = true; module_param(intercept_smi, bool, 0444); +bool vnmi = true; +module_param(vnmi, bool, 0444); static bool svm_gp_erratum_intercept = true; @@ -1311,6 +1313,9 @@ static void init_vmcb(struct kvm_vcpu *vcpu) if (kvm_vcpu_apicv_active(vcpu)) avic_init_vmcb(svm, vmcb); + if (vnmi) + svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK; + if (vgif) { svm_clr_intercept(svm, INTERCEPT_STGI); svm_clr_intercept(svm, INTERCEPT_CLGI); @@ -3525,6 +3530,39 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu) ++vcpu->stat.nmi_injections; } +static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (!is_vnmi_enabled(svm)) + return false; + + return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK); +} + +static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (!is_vnmi_enabled(svm)) + return false; + + if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK) + return false; + + svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK; + vmcb_mark_dirty(svm->vmcb, VMCB_INTR); + + /* + * Because the pending NMI is serviced by hardware, KVM can't know when + * the NMI is "injected", but for all intents and purposes, passing the + * NMI off to hardware counts as injection. + */ + ++vcpu->stat.nmi_injections; + + return true; +} + static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3620,6 +3658,35 @@ static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) svm_set_intercept(svm, INTERCEPT_CR8_WRITE); } +static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (is_vnmi_enabled(svm)) + return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK; + else + return svm->nmi_masked; +} + +static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (is_vnmi_enabled(svm)) { + if (masked) + svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK; + else + svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK; + + } else { + svm->nmi_masked = masked; + if (masked) + svm_set_iret_intercept(svm); + else + svm_clr_iret_intercept(svm); + } +} + bool svm_nmi_blocked(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3631,8 +3698,10 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu) if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) return false; - return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || - svm->nmi_masked; + if (svm_get_nmi_mask(vcpu)) + return true; + + return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK; } static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) @@ -3650,24 +3719,6 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) return 1; } -static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) -{ - return to_svm(vcpu)->nmi_masked; -} - -static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) -{ - struct vcpu_svm *svm = to_svm(vcpu); - - if (masked) { - svm->nmi_masked = true; - svm_set_iret_intercept(svm); - } else { - svm->nmi_masked = false; - svm_clr_iret_intercept(svm); - } -} - bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); @@ -3748,7 +3799,16 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if (svm->nmi_masked && !svm->awaiting_iret_completion) + /* + * KVM should never request an NMI window when vNMI is enabled, as KVM + * allows at most one to-be-injected NMI and one pending NMI, i.e. if + * two NMIs arrive simultaneously, KVM will inject one and set + * V_NMI_PENDING for the other. WARN, but continue with the standard + * single-step approach to try and salvage the pending NMI. + */ + WARN_ON_ONCE(is_vnmi_enabled(svm)); + + if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion) return; /* IRET will cause a vm exit */ if (!gif_set(svm)) { @@ -4797,6 +4857,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .patch_hypercall = svm_patch_hypercall, .inject_irq = svm_inject_irq, .inject_nmi = svm_inject_nmi, + .is_vnmi_pending = svm_is_vnmi_pending, + .set_vnmi_pending = svm_set_vnmi_pending, .inject_exception = svm_inject_exception, .cancel_injection = svm_cancel_injection, .interrupt_allowed = svm_interrupt_allowed, @@ -5090,6 +5152,16 @@ static __init int svm_hardware_setup(void) pr_info("Virtual GIF supported\n"); } + vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI); + if (vnmi) + pr_info("Virtual NMI enabled\n"); + + if (!vnmi) { + svm_x86_ops.is_vnmi_pending = NULL; + svm_x86_ops.set_vnmi_pending = NULL; + } + + if (lbrv) { if (!boot_cpu_has(X86_FEATURE_LBRV)) lbrv = false; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 839809972da1..fb48c347bbe0 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -36,6 +36,7 @@ extern bool npt_enabled; extern int vgif; extern bool intercept_smi; extern bool x2avic_enabled; +extern bool vnmi; /* * Clean bits in VMCB. @@ -548,6 +549,27 @@ static inline bool is_x2apic_msrpm_offset(u32 offset) (msr < (APIC_BASE_MSR + 0x100)); } +static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm) +{ + if (!vnmi) + return NULL; + + if (is_guest_mode(&svm->vcpu)) + return NULL; + else + return svm->vmcb01.ptr; +} + +static inline bool is_vnmi_enabled(struct vcpu_svm *svm) +{ + struct vmcb *vmcb = get_vnmi_vmcb_l1(svm); + + if (vmcb) + return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK); + else + return false; +} + /* svm.c */ #define MSR_INVALID 0xffffffffU diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index befabb4678f9..f32c2b51c157 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5125,7 +5125,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); events->nmi.injected = vcpu->arch.nmi_injected; - events->nmi.pending = vcpu->arch.nmi_pending; + events->nmi.pending = kvm_get_nr_pending_nmis(vcpu); events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); /* events->sipi_vector is never valid when reporting to user space */ @@ -10158,13 +10158,31 @@ static void process_nmi(struct kvm_vcpu *vcpu) else limit = 2; + /* + * Adjust the limit to account for pending virtual NMIs, which aren't + * tracked in vcpu->arch.nmi_pending. + */ + if (static_call(kvm_x86_is_vnmi_pending)(vcpu)) + limit--; + vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); + if (vcpu->arch.nmi_pending && + (static_call(kvm_x86_set_vnmi_pending)(vcpu))) + vcpu->arch.nmi_pending--; + if (vcpu->arch.nmi_pending) kvm_make_request(KVM_REQ_EVENT, vcpu); } +/* Return total number of NMIs pending injection to the VM */ +int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.nmi_pending + + static_call(kvm_x86_is_vnmi_pending)(vcpu); +} + void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, unsigned long *vcpu_bitmap) { -- cgit v1.2.3 From 0977cfac6e76fa976618129a4585bcf53784cc87 Mon Sep 17 00:00:00 2001 From: Santosh Shukla Date: Mon, 27 Feb 2023 14:10:16 +0530 Subject: KVM: nSVM: Implement support for nested VNMI Allow L1 to use vNMI to accelerate its injection of NMI to L2 by propagating vNMI int_ctl bits from/to vmcb12 to/from vmcb02. To handle both the case where vNMI is enabled for L1 and L2, and where vNMI is enabled for L1 but _not_ L2, move pending L1 vNMIs to nmi_pending on nested VM-Entry and raise KVM_REQ_EVENT, i.e. rely on existing code to route the NMI to the correct domain. On nested VM-Exit, reverse the process and set/clear V_NMI_PENDING for L1 based one whether nmi_pending is zero or non-zero. There is no need to consider vmcb02 in this case, as V_NMI_PENDING can be set in vmcb02 if vNMI is disabled for L2, and if vNMI is enabled for L2, then L1 and L2 have different NMI contexts. Co-developed-by: Maxim Levitsky Signed-off-by: Maxim Levitsky Signed-off-by: Santosh Shukla Link: https://lore.kernel.org/r/20230227084016.3368-12-santosh.shukla@amd.com [sean: massage changelog to match the code] Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/nested.c | 33 +++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 5 +++++ arch/x86/kvm/svm/svm.h | 7 +++++++ 3 files changed, 45 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 8300fd31878d..96936ddf1b3c 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -281,6 +281,11 @@ static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl))) return false; + if (CC((control->int_ctl & V_NMI_ENABLE_MASK) && + !vmcb12_is_intercept(control, INTERCEPT_NMI))) { + return false; + } + return true; } @@ -436,6 +441,9 @@ void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) if (nested_vgif_enabled(svm)) mask |= V_GIF_MASK; + if (nested_vnmi_enabled(svm)) + mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK; + svm->nested.ctl.int_ctl &= ~mask; svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; } @@ -655,6 +663,17 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, else int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK); + if (vnmi) { + if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) { + svm->vcpu.arch.nmi_pending++; + kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); + } + if (nested_vnmi_enabled(svm)) + int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK | + V_NMI_ENABLE_MASK | + V_NMI_BLOCKING_MASK); + } + /* Copied from vmcb01. msrpm_base can be overwritten later. */ vmcb02->control.nested_ctl = vmcb01->control.nested_ctl; vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa; @@ -1055,6 +1074,20 @@ int nested_svm_vmexit(struct vcpu_svm *svm) svm_update_lbrv(vcpu); } + if (vnmi) { + if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK) + vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK; + else + vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK; + + if (vcpu->arch.nmi_pending) { + vcpu->arch.nmi_pending--; + vmcb01->control.int_ctl |= V_NMI_PENDING_MASK; + } else { + vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK; + } + } + /* * On vmexit the GIF is set to false and * no event can be injected in L1. diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7a14c937a36d..e0ec95f1f068 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4246,6 +4246,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF); + svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_VNMI); + svm_recalc_instruction_intercepts(vcpu, svm); /* For sev guests, the memory encryption bit is not reserved in CR3. */ @@ -5001,6 +5003,9 @@ static __init void svm_set_cpu_caps(void) if (vgif) kvm_cpu_cap_set(X86_FEATURE_VGIF); + if (vnmi) + kvm_cpu_cap_set(X86_FEATURE_VNMI); + /* Nested VM can receive #VMEXIT instead of triggering #GP */ kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index fb48c347bbe0..f44751dd8d5d 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -266,6 +266,7 @@ struct vcpu_svm { bool pause_filter_enabled : 1; bool pause_threshold_enabled : 1; bool vgif_enabled : 1; + bool vnmi_enabled : 1; u32 ldr_reg; u32 dfr_reg; @@ -540,6 +541,12 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm) return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; } +static inline bool nested_vnmi_enabled(struct vcpu_svm *svm) +{ + return svm->vnmi_enabled && + (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK); +} + static inline bool is_x2apic_msrpm_offset(u32 offset) { /* 4 msrs per u8, and 4 u8 in u32 */ -- cgit v1.2.3 From 3d8f61bf8bcd69bcd397276d53aa18f7ca8347f9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 24 Jan 2023 19:45:19 +0000 Subject: x86: KVM: Add common feature flag for AMD's PSFD Use a common X86_FEATURE_* flag for AMD's PSFD, and suppress it from /proc/cpuinfo via the standard method of an empty string instead of hacking in a one-off "private" #define in KVM. The request that led to KVM defining its own flag was really just that the feature not show up in /proc/cpuinfo, and additional patches+discussions in the interim have clarified that defining flags in cpufeatures.h purely so that KVM can advertise features to userspace is ok so long as the kernel already uses a word to track the associated CPUID leaf. No functional change intended. Link: https://lore.kernel.org/all/d1b1e0da-29f0-c443-6c86-9549bbe1c79d@redhat.como Link: https://lore.kernel.org/all/YxGZH7aOXQF7Pu5q@nazgul.tnic Link: https://lore.kernel.org/all/Y3O7UYWfOLfJkwM%2F@zn.tnic Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20230124194519.2893234-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kvm/cpuid.c | 8 +------- 2 files changed, 2 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 73c9672c123b..42abcd3db7a7 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -337,6 +337,7 @@ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ #define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ #define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */ +#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */ #define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */ #define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */ diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 1ad3bde72526..6972e0be60fa 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -60,12 +60,6 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted) return ret; } -/* - * This one is tied to SSB in the user API, and not - * visible in /proc/cpuinfo. - */ -#define KVM_X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */ - #define F feature_bit /* Scattered Flag - For features that are scattered by cpufeatures.h. */ @@ -715,7 +709,7 @@ void kvm_set_cpu_caps(void) F(CLZERO) | F(XSAVEERPTR) | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) | - __feature_bit(KVM_X86_FEATURE_AMD_PSFD) + F(AMD_PSFD) ); /* -- cgit v1.2.3 From 65966aaca18a5cbf42ac22234cb9cbbf60a4d33c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 16 Feb 2023 12:22:54 -0800 Subject: KVM: x86: Assert that the emulator doesn't load CS with garbage in !RM Yell loudly if KVM attempts to load CS outside of Real Mode without an accompanying control transfer type, i.e. on X86_TRANSFER_NONE. KVM uses X86_TRANSFER_NONE when emulating IRET and exceptions/interrupts for Real Mode, but IRET emulation for Protected Mode is non-existent. WARN instead of trying to pass in a less-wrong type, e.g. X86_TRANSFER_RET, as emulating IRET goes even beyond emulating FAR RET (which KVM also doesn't fully support). Reported-by: Hou Wenlong Link: https://lore.kernel.org/r/20230216202254.671772-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/emulate.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a20bec931764..936a397a08cd 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1640,6 +1640,14 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, goto exception; break; case VCPU_SREG_CS: + /* + * KVM uses "none" when loading CS as part of emulating Real + * Mode exceptions and IRET (handled above). In all other + * cases, loading CS without a control transfer is a KVM bug. + */ + if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE)) + goto exception; + if (!(seg_desc.type & 8)) goto exception; -- cgit v1.2.3 From 99b30869804ea59d9596cdbefa5cc3aabd588521 Mon Sep 17 00:00:00 2001 From: Robert Hoo Date: Wed, 8 Mar 2023 15:29:36 +0800 Subject: KVM: x86: Remove a redundant guest cpuid check in kvm_set_cr4() If !guest_cpuid_has(vcpu, X86_FEATURE_PCID), CR4.PCIDE would have been in vcpu->arch.cr4_guest_rsvd_bits and failed earlier kvm_is_valid_cr4() check. Remove this meaningless check. Reviewed-by: Xiaoyao Li Signed-off-by: Robert Hoo Fixes: 4683d758f48e ("KVM: x86: Supplement __cr4_reserved_bits() with X86_FEATURE_PCID check") Link: https://lore.kernel.org/r/20230308072936.1293101-1-robert.hu@intel.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3fab192862d4..2e0933f510ee 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1196,9 +1196,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { - if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) - return 1; - /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) return 1; -- cgit v1.2.3 From 12aad9164763bb4890277d3b2a4664f443a48e3f Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Fri, 17 Feb 2023 20:33:35 +0100 Subject: KVM: x86: Shrink struct kvm_pmu Move the 'version' member to the beginning of the structure to reuse an existing hole instead of introducing another one. This allows us to save 8 bytes for 64 bit builds. Signed-off-by: Mathias Krause Link: https://lore.kernel.org/r/20230217193336.15278-2-minipli@grsecurity.net Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a45de1118a42..a440ceae06fb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -515,6 +515,7 @@ struct kvm_pmc { #define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1) #define KVM_AMD_PMC_MAX_GENERIC 6 struct kvm_pmu { + u8 version; unsigned nr_arch_gp_counters; unsigned nr_arch_fixed_counters; unsigned available_event_types; @@ -527,7 +528,6 @@ struct kvm_pmu { u64 global_ovf_ctrl_mask; u64 reserved_bits; u64 raw_event_mask; - u8 version; struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC]; struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED]; struct irq_work irq_work; -- cgit v1.2.3 From 7ffc2e89518a3ad9b81faad67254737d87073f44 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 24 Feb 2023 11:28:32 -0800 Subject: KVM: MIPS: Make kvm_mips_callbacks const MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make kvm_mips_callbacks fully const as it's now hardcoded to point at kvm_vz_callbacks, the only remaining the set of callbacks. Link: https://lore.kernel.org/all/beb697c2-dfad-780e-4638-76b229f28731@linaro.org Suggested-by: Philippe Mathieu-Daudé Reviewed-by: Philippe Mathieu-Daudé Link: https://lore.kernel.org/r/20230224192832.1286267-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/mips/include/asm/kvm_host.h | 2 +- arch/mips/kvm/vz.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 2803c9c21ef9..957121a495f0 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -757,7 +757,7 @@ struct kvm_mips_callbacks { int (*vcpu_run)(struct kvm_vcpu *vcpu); void (*vcpu_reenter)(struct kvm_vcpu *vcpu); }; -extern struct kvm_mips_callbacks *kvm_mips_callbacks; +extern const struct kvm_mips_callbacks * const kvm_mips_callbacks; int kvm_mips_emulation_init(void); /* Debug: dump vcpu state */ diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index dafab003ea0d..3d21cbfa7443 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -3305,7 +3305,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = { }; /* FIXME: Get rid of the callbacks now that trap-and-emulate is gone. */ -struct kvm_mips_callbacks *kvm_mips_callbacks = &kvm_vz_callbacks; +const struct kvm_mips_callbacks * const kvm_mips_callbacks = &kvm_vz_callbacks; int kvm_mips_emulation_init(void) { -- cgit v1.2.3 From 0acc7239c20a8401b8968c2adace8f7c9b0295ae Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 27 Mar 2023 16:47:44 +0000 Subject: KVM: arm64: Avoid vcpu->mutex v. kvm->lock inversion in CPU_ON KVM/arm64 had the lock ordering backwards on vcpu->mutex and kvm->lock from the very beginning. One such example is the way vCPU resets are handled: the kvm->lock is acquired while handling a guest CPU_ON PSCI call. Add a dedicated lock to serialize writes to kvm_vcpu_arch::{mp_state, reset_state}. Promote all accessors of mp_state to {READ,WRITE}_ONCE() as readers do not acquire the mp_state_lock. While at it, plug yet another race by taking the mp_state_lock in the KVM_SET_MP_STATE ioctl handler. As changes to MP state are now guarded with a dedicated lock, drop the kvm->lock acquisition from the PSCI CPU_ON path. Similarly, move the reader of reset_state outside of the kvm->lock and instead protect it with the mp_state_lock. Note that writes to reset_state::reset have been demoted to regular stores as both readers and writers acquire the mp_state_lock. While the kvm->lock inversion still exists in kvm_reset_vcpu(), at least now PSCI CPU_ON no longer depends on it for serializing vCPU reset. Cc: stable@vger.kernel.org Tested-by: Jeremy Linton Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230327164747.2466958-2-oliver.upton@linux.dev --- arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/arm.c | 31 ++++++++++++++++++++++--------- arch/arm64/kvm/psci.c | 28 ++++++++++++++++------------ arch/arm64/kvm/reset.c | 9 +++++---- 4 files changed, 44 insertions(+), 25 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bcd774d74f34..917586237a4d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -522,6 +522,7 @@ struct kvm_vcpu_arch { /* vcpu power state */ struct kvm_mp_state mp_state; + spinlock_t mp_state_lock; /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3bd732eaf087..647798da8c41 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -326,6 +326,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int err; + spin_lock_init(&vcpu->arch.mp_state_lock); + /* Force users to call KVM_ARM_VCPU_INIT */ vcpu->arch.target = -1; bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); @@ -443,34 +445,41 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; } -void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) +static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) { - vcpu->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; + WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); kvm_make_request(KVM_REQ_SLEEP, vcpu); kvm_vcpu_kick(vcpu); } +void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) +{ + spin_lock(&vcpu->arch.mp_state_lock); + __kvm_arm_vcpu_power_off(vcpu); + spin_unlock(&vcpu->arch.mp_state_lock); +} + bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu) { - return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_STOPPED; + return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; } static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu) { - vcpu->arch.mp_state.mp_state = KVM_MP_STATE_SUSPENDED; + WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); kvm_make_request(KVM_REQ_SUSPEND, vcpu); kvm_vcpu_kick(vcpu); } static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu) { - return vcpu->arch.mp_state.mp_state == KVM_MP_STATE_SUSPENDED; + return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { - *mp_state = vcpu->arch.mp_state; + *mp_state = READ_ONCE(vcpu->arch.mp_state); return 0; } @@ -480,12 +489,14 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, { int ret = 0; + spin_lock(&vcpu->arch.mp_state_lock); + switch (mp_state->mp_state) { case KVM_MP_STATE_RUNNABLE: - vcpu->arch.mp_state = *mp_state; + WRITE_ONCE(vcpu->arch.mp_state, *mp_state); break; case KVM_MP_STATE_STOPPED: - kvm_arm_vcpu_power_off(vcpu); + __kvm_arm_vcpu_power_off(vcpu); break; case KVM_MP_STATE_SUSPENDED: kvm_arm_vcpu_suspend(vcpu); @@ -494,6 +505,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ret = -EINVAL; } + spin_unlock(&vcpu->arch.mp_state_lock); + return ret; } @@ -1213,7 +1226,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) kvm_arm_vcpu_power_off(vcpu); else - vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE; + WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); return 0; } diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 7fbc4c1b9df0..5767e6baa61a 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -62,6 +62,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) struct vcpu_reset_state *reset_state; struct kvm *kvm = source_vcpu->kvm; struct kvm_vcpu *vcpu = NULL; + int ret = PSCI_RET_SUCCESS; unsigned long cpu_id; cpu_id = smccc_get_arg1(source_vcpu); @@ -76,11 +77,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) */ if (!vcpu) return PSCI_RET_INVALID_PARAMS; + + spin_lock(&vcpu->arch.mp_state_lock); if (!kvm_arm_vcpu_stopped(vcpu)) { if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) - return PSCI_RET_ALREADY_ON; + ret = PSCI_RET_ALREADY_ON; else - return PSCI_RET_INVALID_PARAMS; + ret = PSCI_RET_INVALID_PARAMS; + + goto out_unlock; } reset_state = &vcpu->arch.reset_state; @@ -96,7 +101,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) */ reset_state->r0 = smccc_get_arg3(source_vcpu); - WRITE_ONCE(reset_state->reset, true); + reset_state->reset = true; kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); /* @@ -108,7 +113,9 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE; kvm_vcpu_wake_up(vcpu); - return PSCI_RET_SUCCESS; +out_unlock: + spin_unlock(&vcpu->arch.mp_state_lock); + return ret; } static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) @@ -168,8 +175,11 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type, u64 flags) * after this call is handled and before the VCPUs have been * re-initialized. */ - kvm_for_each_vcpu(i, tmp, vcpu->kvm) - tmp->arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; + kvm_for_each_vcpu(i, tmp, vcpu->kvm) { + spin_lock(&tmp->arch.mp_state_lock); + WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); + spin_unlock(&tmp->arch.mp_state_lock); + } kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP); memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); @@ -229,7 +239,6 @@ static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) { - struct kvm *kvm = vcpu->kvm; u32 psci_fn = smccc_get_function(vcpu); unsigned long val; int ret = 1; @@ -254,9 +263,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) kvm_psci_narrow_to_32bit(vcpu); fallthrough; case PSCI_0_2_FN64_CPU_ON: - mutex_lock(&kvm->lock); val = kvm_psci_vcpu_on(vcpu); - mutex_unlock(&kvm->lock); break; case PSCI_0_2_FN_AFFINITY_INFO: kvm_psci_narrow_to_32bit(vcpu); @@ -395,7 +402,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor) static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) { - struct kvm *kvm = vcpu->kvm; u32 psci_fn = smccc_get_function(vcpu); unsigned long val; @@ -405,9 +411,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) val = PSCI_RET_SUCCESS; break; case KVM_PSCI_FN_CPU_ON: - mutex_lock(&kvm->lock); val = kvm_psci_vcpu_on(vcpu); - mutex_unlock(&kvm->lock); break; default: val = PSCI_RET_NOT_SUPPORTED; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 49a3257dec46..9e023546bde0 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -264,15 +264,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) mutex_lock(&vcpu->kvm->lock); ret = kvm_set_vm_width(vcpu); - if (!ret) { - reset_state = vcpu->arch.reset_state; - WRITE_ONCE(vcpu->arch.reset_state.reset, false); - } mutex_unlock(&vcpu->kvm->lock); if (ret) return ret; + spin_lock(&vcpu->arch.mp_state_lock); + reset_state = vcpu->arch.reset_state; + vcpu->arch.reset_state.reset = false; + spin_unlock(&vcpu->arch.mp_state_lock); + /* Reset PMU outside of the non-preemptible section */ kvm_pmu_vcpu_reset(vcpu); -- cgit v1.2.3 From c43120afb5c66a3465c7468f5cf9806a26484cde Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 27 Mar 2023 16:47:45 +0000 Subject: KVM: arm64: Avoid lock inversion when setting the VM register width kvm->lock must be taken outside of the vcpu->mutex. Of course, the locking documentation for KVM makes this abundantly clear. Nonetheless, the locking order in KVM/arm64 has been wrong for quite a while; we acquire the kvm->lock while holding the vcpu->mutex all over the shop. All was seemingly fine until commit 42a90008f890 ("KVM: Ensure lockdep knows about kvm->lock vs. vcpu->mutex ordering rule") caught us with our pants down, leading to lockdep barfing: ====================================================== WARNING: possible circular locking dependency detected 6.2.0-rc7+ #19 Not tainted ------------------------------------------------------ qemu-system-aar/859 is trying to acquire lock: ffff5aa69269eba0 (&host_kvm->lock){+.+.}-{3:3}, at: kvm_reset_vcpu+0x34/0x274 but task is already holding lock: ffff5aa68768c0b8 (&vcpu->mutex){+.+.}-{3:3}, at: kvm_vcpu_ioctl+0x8c/0xba0 which lock already depends on the new lock. Add a dedicated lock to serialize writes to VM-scoped configuration from the context of a vCPU. Protect the register width flags with the new lock, thus avoiding the need to grab the kvm->lock while holding vcpu->mutex in kvm_reset_vcpu(). Cc: stable@vger.kernel.org Reported-by: Jeremy Linton Link: https://lore.kernel.org/kvmarm/f6452cdd-65ff-34b8-bab0-5c06416da5f6@arm.com/ Tested-by: Jeremy Linton Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230327164747.2466958-3-oliver.upton@linux.dev --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/arm.c | 18 ++++++++++++++++++ arch/arm64/kvm/reset.c | 6 +++--- 3 files changed, 24 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 917586237a4d..cd1ef8716719 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -199,6 +199,9 @@ struct kvm_arch { /* Mandated version of PSCI */ u32 psci_version; + /* Protects VM-scoped configuration data */ + struct mutex config_lock; + /* * If we encounter a data abort without valid instruction syndrome * information, report this to user space. User space can (and diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 647798da8c41..1620ec3d95ef 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -128,6 +128,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret; + mutex_init(&kvm->arch.config_lock); + +#ifdef CONFIG_LOCKDEP + /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ + mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); + mutex_unlock(&kvm->arch.config_lock); + mutex_unlock(&kvm->lock); +#endif + ret = kvm_share_hyp(kvm, kvm + 1); if (ret) return ret; @@ -328,6 +338,14 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) spin_lock_init(&vcpu->arch.mp_state_lock); +#ifdef CONFIG_LOCKDEP + /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ + mutex_lock(&vcpu->mutex); + mutex_lock(&vcpu->kvm->arch.config_lock); + mutex_unlock(&vcpu->kvm->arch.config_lock); + mutex_unlock(&vcpu->mutex); +#endif + /* Force users to call KVM_ARM_VCPU_INIT */ vcpu->arch.target = -1; bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 9e023546bde0..b5dee8e57e77 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -205,7 +205,7 @@ static int kvm_set_vm_width(struct kvm_vcpu *vcpu) is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); - lockdep_assert_held(&kvm->lock); + lockdep_assert_held(&kvm->arch.config_lock); if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) { /* @@ -262,9 +262,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) bool loaded; u32 pstate; - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.config_lock); ret = kvm_set_vm_width(vcpu); - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.config_lock); if (ret) return ret; -- cgit v1.2.3 From 4bba7f7def6f278266dadf845da472cfbfed784e Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 27 Mar 2023 16:47:46 +0000 Subject: KVM: arm64: Use config_lock to protect data ordered against KVM_RUN There are various bits of VM-scoped data that can only be configured before the first call to KVM_RUN, such as the hypercall bitmaps and the PMU. As these fields are protected by the kvm->lock and accessed while holding vcpu->mutex, this is yet another example of lock inversion. Change out the kvm->lock for kvm->arch.config_lock in all of these instances. Opportunistically simplify the locking mechanics of the PMU configuration by holding the config_lock for the entirety of kvm_arm_pmu_v3_set_attr(). Note that this also addresses a couple of bugs. There is an unguarded read of the PMU version in KVM_ARM_VCPU_PMU_V3_FILTER which could race with KVM_ARM_VCPU_PMU_V3_SET_PMU. Additionally, until now writes to the per-vCPU vPMU irq were not serialized VM-wide, meaning concurrent calls to KVM_ARM_VCPU_PMU_V3_IRQ could lead to a false positive in pmu_irq_is_valid(). Cc: stable@vger.kernel.org Tested-by: Jeremy Linton Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230327164747.2466958-4-oliver.upton@linux.dev --- arch/arm64/kvm/arm.c | 4 ++-- arch/arm64/kvm/guest.c | 2 ++ arch/arm64/kvm/hypercalls.c | 4 ++-- arch/arm64/kvm/pmu-emul.c | 23 ++++++----------------- 4 files changed, 12 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 1620ec3d95ef..fd8d355aca15 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -624,9 +624,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (kvm_vm_is_protected(kvm)) kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); return ret; } diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 07444fa22888..481c79cf22cd 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -957,7 +957,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, switch (attr->group) { case KVM_ARM_VCPU_PMU_V3_CTRL: + mutex_lock(&vcpu->kvm->arch.config_lock); ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); + mutex_unlock(&vcpu->kvm->arch.config_lock); break; case KVM_ARM_VCPU_TIMER_CTRL: ret = kvm_arm_timer_set_attr(vcpu, attr); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 5da884e11337..fbdbf4257f76 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -377,7 +377,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val) if (val & ~fw_reg_features) return -EINVAL; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) && val != *fw_reg_bmap) { @@ -387,7 +387,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val) WRITE_ONCE(*fw_reg_bmap, val); out: - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); return ret; } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 24908400e190..240168416838 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -874,7 +874,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) struct arm_pmu *arm_pmu; int ret = -ENXIO; - mutex_lock(&kvm->lock); + lockdep_assert_held(&kvm->arch.config_lock); mutex_lock(&arm_pmus_lock); list_for_each_entry(entry, &arm_pmus, entry) { @@ -894,7 +894,6 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) } mutex_unlock(&arm_pmus_lock); - mutex_unlock(&kvm->lock); return ret; } @@ -902,22 +901,20 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { struct kvm *kvm = vcpu->kvm; + lockdep_assert_held(&kvm->arch.config_lock); + if (!kvm_vcpu_has_pmu(vcpu)) return -ENODEV; if (vcpu->arch.pmu.created) return -EBUSY; - mutex_lock(&kvm->lock); if (!kvm->arch.arm_pmu) { /* No PMU set, get the default one */ kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); - if (!kvm->arch.arm_pmu) { - mutex_unlock(&kvm->lock); + if (!kvm->arch.arm_pmu) return -ENODEV; - } } - mutex_unlock(&kvm->lock); switch (attr->attr) { case KVM_ARM_VCPU_PMU_V3_IRQ: { @@ -961,19 +958,13 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) filter.action != KVM_PMU_EVENT_DENY)) return -EINVAL; - mutex_lock(&kvm->lock); - - if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) { - mutex_unlock(&kvm->lock); + if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) return -EBUSY; - } if (!kvm->arch.pmu_filter) { kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); - if (!kvm->arch.pmu_filter) { - mutex_unlock(&kvm->lock); + if (!kvm->arch.pmu_filter) return -ENOMEM; - } /* * The default depends on the first applied filter. @@ -992,8 +983,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) else bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); - mutex_unlock(&kvm->lock); - return 0; } case KVM_ARM_VCPU_PMU_V3_SET_PMU: { -- cgit v1.2.3 From f00327731131d1b5aa6a1aa9f50bcf8d620ace4c Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Mon, 27 Mar 2023 16:47:47 +0000 Subject: KVM: arm64: Use config_lock to protect vgic state Almost all of the vgic state is VM-scoped but accessed from the context of a vCPU. These accesses were serialized on the kvm->lock which cannot be nested within a vcpu->mutex critical section. Move over the vgic state to using the config_lock. Tweak the lock ordering where necessary to ensure that the config_lock is acquired after the vcpu->mutex. Acquire the config_lock in kvm_vgic_create() to avoid a race between the converted flows and GIC creation. Where necessary, continue to acquire kvm->lock to avoid a race with vCPU creation (i.e. flows that use lock_all_vcpus()). Finally, promote the locking expectations in comments to lockdep assertions and update the locking documentation for the config_lock as well as vcpu->mutex. Cc: stable@vger.kernel.org Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230327164747.2466958-5-oliver.upton@linux.dev --- arch/arm64/kvm/vgic/vgic-debug.c | 8 +++--- arch/arm64/kvm/vgic/vgic-init.c | 36 +++++++++++++++++---------- arch/arm64/kvm/vgic/vgic-its.c | 18 +++++++++----- arch/arm64/kvm/vgic/vgic-kvm-device.c | 47 +++++++++++++++++++++-------------- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 4 +-- arch/arm64/kvm/vgic/vgic-mmio.c | 12 ++++----- arch/arm64/kvm/vgic/vgic-v4.c | 11 ++++---- arch/arm64/kvm/vgic/vgic.c | 12 +++++---- 8 files changed, 88 insertions(+), 60 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c index 78cde687383c..07aa0437125a 100644 --- a/arch/arm64/kvm/vgic/vgic-debug.c +++ b/arch/arm64/kvm/vgic/vgic-debug.c @@ -85,7 +85,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos) struct kvm *kvm = s->private; struct vgic_state_iter *iter; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); iter = kvm->arch.vgic.iter; if (iter) { iter = ERR_PTR(-EBUSY); @@ -104,7 +104,7 @@ static void *vgic_debug_start(struct seq_file *s, loff_t *pos) if (end_of_vgic(iter)) iter = NULL; out: - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); return iter; } @@ -132,12 +132,12 @@ static void vgic_debug_stop(struct seq_file *s, void *v) if (IS_ERR(v)) return; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); iter = kvm->arch.vgic.iter; kfree(iter->lpi_array); kfree(iter); kvm->arch.vgic.iter = NULL; - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); } static void print_dist_state(struct seq_file *s, struct vgic_dist *dist) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index cd134db41a57..9d42c7cb2b58 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -74,9 +74,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) unsigned long i; int ret; - if (irqchip_in_kernel(kvm)) - return -EEXIST; - /* * This function is also called by the KVM_CREATE_IRQCHIP handler, * which had no chance yet to check the availability of the GICv2 @@ -87,10 +84,20 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) !kvm_vgic_global_state.can_emulate_gicv2) return -ENODEV; + /* Must be held to avoid race with vCPU creation */ + lockdep_assert_held(&kvm->lock); + ret = -EBUSY; if (!lock_all_vcpus(kvm)) return ret; + mutex_lock(&kvm->arch.config_lock); + + if (irqchip_in_kernel(kvm)) { + ret = -EEXIST; + goto out_unlock; + } + kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu_has_run_once(vcpu)) goto out_unlock; @@ -118,6 +125,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); out_unlock: + mutex_unlock(&kvm->arch.config_lock); unlock_all_vcpus(kvm); return ret; } @@ -227,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) * KVM io device for the redistributor that belongs to this VCPU. */ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.config_lock); ret = vgic_register_redist_iodev(vcpu); - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.config_lock); } return ret; } @@ -250,7 +258,6 @@ static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu) * The function is generally called when nr_spis has been explicitly set * by the guest through the KVM DEVICE API. If not nr_spis is set to 256. * vgic_initialized() returns true when this function has succeeded. - * Must be called with kvm->lock held! */ int vgic_init(struct kvm *kvm) { @@ -259,6 +266,8 @@ int vgic_init(struct kvm *kvm) int ret = 0, i; unsigned long idx; + lockdep_assert_held(&kvm->arch.config_lock); + if (vgic_initialized(kvm)) return 0; @@ -373,12 +382,13 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; } -/* To be called with kvm->lock held */ static void __kvm_vgic_destroy(struct kvm *kvm) { struct kvm_vcpu *vcpu; unsigned long i; + lockdep_assert_held(&kvm->arch.config_lock); + vgic_debug_destroy(kvm); kvm_for_each_vcpu(i, vcpu, kvm) @@ -389,9 +399,9 @@ static void __kvm_vgic_destroy(struct kvm *kvm) void kvm_vgic_destroy(struct kvm *kvm) { - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); __kvm_vgic_destroy(kvm); - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); } /** @@ -414,9 +424,9 @@ int vgic_lazy_init(struct kvm *kvm) if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) return -EBUSY; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); ret = vgic_init(kvm); - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); } return ret; @@ -441,7 +451,7 @@ int kvm_vgic_map_resources(struct kvm *kvm) if (likely(vgic_ready(kvm))) return 0; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); if (vgic_ready(kvm)) goto out; @@ -459,7 +469,7 @@ int kvm_vgic_map_resources(struct kvm *kvm) dist->ready = true; out: - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 2642e9ce2819..7713cd06104e 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -2045,6 +2045,13 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); + if (!lock_all_vcpus(dev->kvm)) { + mutex_unlock(&dev->kvm->lock); + return -EBUSY; + } + + mutex_lock(&dev->kvm->arch.config_lock); + if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) { ret = -ENXIO; goto out; @@ -2058,11 +2065,6 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, goto out; } - if (!lock_all_vcpus(dev->kvm)) { - ret = -EBUSY; - goto out; - } - addr = its->vgic_its_base + offset; len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4; @@ -2076,8 +2078,9 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, } else { *reg = region->its_read(dev->kvm, its, addr, len); } - unlock_all_vcpus(dev->kvm); out: + mutex_unlock(&dev->kvm->arch.config_lock); + unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); return ret; } @@ -2757,6 +2760,8 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) return -EBUSY; } + mutex_lock(&kvm->arch.config_lock); + switch (attr) { case KVM_DEV_ARM_ITS_CTRL_RESET: vgic_its_reset(kvm, its); @@ -2769,6 +2774,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) break; } + mutex_unlock(&kvm->arch.config_lock); unlock_all_vcpus(kvm); mutex_unlock(&its->its_lock); mutex_unlock(&kvm->lock); diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index edeac2380591..07e727023deb 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -46,7 +46,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev struct vgic_dist *vgic = &kvm->arch.vgic; int r; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) { case KVM_VGIC_V2_ADDR_TYPE_DIST: r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); @@ -68,7 +68,7 @@ int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev r = -ENODEV; } - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); return r; } @@ -102,7 +102,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri if (get_user(addr, uaddr)) return -EFAULT; - mutex_lock(&kvm->lock); + mutex_lock(&kvm->arch.config_lock); switch (attr->attr) { case KVM_VGIC_V2_ADDR_TYPE_DIST: r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); @@ -191,7 +191,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri } out: - mutex_unlock(&kvm->lock); + mutex_unlock(&kvm->arch.config_lock); if (!r && !write) r = put_user(addr, uaddr); @@ -227,7 +227,7 @@ static int vgic_set_common_attr(struct kvm_device *dev, (val & 31)) return -EINVAL; - mutex_lock(&dev->kvm->lock); + mutex_lock(&dev->kvm->arch.config_lock); if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) ret = -EBUSY; @@ -235,16 +235,16 @@ static int vgic_set_common_attr(struct kvm_device *dev, dev->kvm->arch.vgic.nr_spis = val - VGIC_NR_PRIVATE_IRQS; - mutex_unlock(&dev->kvm->lock); + mutex_unlock(&dev->kvm->arch.config_lock); return ret; } case KVM_DEV_ARM_VGIC_GRP_CTRL: { switch (attr->attr) { case KVM_DEV_ARM_VGIC_CTRL_INIT: - mutex_lock(&dev->kvm->lock); + mutex_lock(&dev->kvm->arch.config_lock); r = vgic_init(dev->kvm); - mutex_unlock(&dev->kvm->lock); + mutex_unlock(&dev->kvm->arch.config_lock); return r; case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES: /* @@ -260,7 +260,10 @@ static int vgic_set_common_attr(struct kvm_device *dev, mutex_unlock(&dev->kvm->lock); return -EBUSY; } + + mutex_lock(&dev->kvm->arch.config_lock); r = vgic_v3_save_pending_tables(dev->kvm); + mutex_unlock(&dev->kvm->arch.config_lock); unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); return r; @@ -411,15 +414,17 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); + if (!lock_all_vcpus(dev->kvm)) { + mutex_unlock(&dev->kvm->lock); + return -EBUSY; + } + + mutex_lock(&dev->kvm->arch.config_lock); + ret = vgic_init(dev->kvm); if (ret) goto out; - if (!lock_all_vcpus(dev->kvm)) { - ret = -EBUSY; - goto out; - } - switch (attr->group) { case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val); @@ -432,8 +437,9 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev, break; } - unlock_all_vcpus(dev->kvm); out: + mutex_unlock(&dev->kvm->arch.config_lock); + unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); if (!ret && !is_write) @@ -569,12 +575,14 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); - if (unlikely(!vgic_initialized(dev->kvm))) { - ret = -EBUSY; - goto out; + if (!lock_all_vcpus(dev->kvm)) { + mutex_unlock(&dev->kvm->lock); + return -EBUSY; } - if (!lock_all_vcpus(dev->kvm)) { + mutex_lock(&dev->kvm->arch.config_lock); + + if (unlikely(!vgic_initialized(dev->kvm))) { ret = -EBUSY; goto out; } @@ -609,8 +617,9 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, break; } - unlock_all_vcpus(dev->kvm); out: + mutex_unlock(&dev->kvm->arch.config_lock); + unlock_all_vcpus(dev->kvm); mutex_unlock(&dev->kvm->lock); if (!ret && uaccess && !is_write) { diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 91201f743033..472b18ac92a2 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -111,7 +111,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, case GICD_CTLR: { bool was_enabled, is_hwsgi; - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.config_lock); was_enabled = dist->enabled; is_hwsgi = dist->nassgireq; @@ -139,7 +139,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, else if (!was_enabled && dist->enabled) vgic_kick_vcpus(vcpu->kvm); - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.config_lock); break; } case GICD_TYPER: diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index e67b3b2c8044..1939c94e0b24 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -530,13 +530,13 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 val; - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.config_lock); vgic_access_active_prepare(vcpu, intid); val = __vgic_mmio_read_active(vcpu, addr, len); vgic_access_active_finish(vcpu, intid); - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.config_lock); return val; } @@ -625,13 +625,13 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.config_lock); vgic_access_active_prepare(vcpu, intid); __vgic_mmio_write_cactive(vcpu, addr, len, val); vgic_access_active_finish(vcpu, intid); - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.config_lock); } int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, @@ -662,13 +662,13 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); - mutex_lock(&vcpu->kvm->lock); + mutex_lock(&vcpu->kvm->arch.config_lock); vgic_access_active_prepare(vcpu, intid); __vgic_mmio_write_sactive(vcpu, addr, len, val); vgic_access_active_finish(vcpu, intid); - mutex_unlock(&vcpu->kvm->lock); + mutex_unlock(&vcpu->kvm->arch.config_lock); } int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index a413718be92b..3bb003478060 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -232,9 +232,8 @@ int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq) * @kvm: Pointer to the VM being initialized * * We may be called each time a vITS is created, or when the - * vgic is initialized. This relies on kvm->lock to be - * held. In both cases, the number of vcpus should now be - * fixed. + * vgic is initialized. In both cases, the number of vcpus + * should now be fixed. */ int vgic_v4_init(struct kvm *kvm) { @@ -243,6 +242,8 @@ int vgic_v4_init(struct kvm *kvm) int nr_vcpus, ret; unsigned long i; + lockdep_assert_held(&kvm->arch.config_lock); + if (!kvm_vgic_global_state.has_gicv4) return 0; /* Nothing to see here... move along. */ @@ -309,14 +310,14 @@ int vgic_v4_init(struct kvm *kvm) /** * vgic_v4_teardown - Free the GICv4 data structures * @kvm: Pointer to the VM being destroyed - * - * Relies on kvm->lock to be held. */ void vgic_v4_teardown(struct kvm *kvm) { struct its_vm *its_vm = &kvm->arch.vgic.its_vm; int i; + lockdep_assert_held(&kvm->arch.config_lock); + if (!its_vm->vpes) return; diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index d97e6080b421..0a005da83ae6 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -24,11 +24,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { /* * Locking order is always: * kvm->lock (mutex) - * its->cmd_lock (mutex) - * its->its_lock (mutex) - * vgic_cpu->ap_list_lock must be taken with IRQs disabled - * kvm->lpi_list_lock must be taken with IRQs disabled - * vgic_irq->irq_lock must be taken with IRQs disabled + * vcpu->mutex (mutex) + * kvm->arch.config_lock (mutex) + * its->cmd_lock (mutex) + * its->its_lock (mutex) + * vgic_cpu->ap_list_lock must be taken with IRQs disabled + * kvm->lpi_list_lock must be taken with IRQs disabled + * vgic_irq->irq_lock must be taken with IRQs disabled * * As the ap_list_lock might be taken from the timer interrupt handler, * we have to disable IRQs before taking this lock and everything lower -- cgit v1.2.3 From 0d0ae656b71155ccc0be9388beef77a1f7e7558e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:41 +0100 Subject: KVM: arm64: timers: Use a per-vcpu, per-timer accumulator for fractional ns Instead of accumulating the fractional ns value generated every time we compute a ns delta in a global variable, use a per-vcpu, per-timer variable. This keeps the fractional ns local to the timer instead of contributing to any odd, unrelated timer. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-2-maz@kernel.org --- arch/arm64/kvm/arch_timer.c | 2 +- include/kvm/arm_arch_timer.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index e1af4301b913..9515c645f03d 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -212,7 +212,7 @@ static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx, ns = cyclecounter_cyc2ns(timecounter->cc, val - now, timecounter->mask, - &timecounter->frac); + &timer_ctx->ns_frac); return ns; } diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index c52a6e6839da..70d47c4adc6a 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -44,6 +44,7 @@ struct arch_timer_context { /* Emulated Timer (may be unused) */ struct hrtimer hrtimer; + u64 ns_frac; /* Offset for this counter/timer */ struct arch_timer_offset offset; -- cgit v1.2.3 From eaacaa4f26ade43dfdfdd3707c95d734b5ffc530 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:42 +0100 Subject: arm64: Add CNTPOFF_EL2 register definition Add the definition for CNTPOFF_EL2 in the description file. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-3-maz@kernel.org --- arch/arm64/tools/sysreg | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index dd5a9c7e310f..7063f1aacc54 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -1952,6 +1952,10 @@ Sysreg CONTEXTIDR_EL2 3 4 13 0 1 Fields CONTEXTIDR_ELx EndSysreg +Sysreg CNTPOFF_EL2 3 4 14 0 6 +Field 63:0 PhysicalOffset +EndSysreg + Sysreg CPACR_EL12 3 5 1 0 2 Fields CPACR_ELx EndSysreg -- cgit v1.2.3 From 326349943ed181890b8b2af2755bd0eac93bd66d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:43 +0100 Subject: arm64: Add HAS_ECV_CNTPOFF capability Add the probing code for the FEAT_ECV variant that implements CNTPOFF_EL2. Why it is optional is a mystery, but let's try and detect it. Reviewed-by: Reiji Watanabe Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-4-maz@kernel.org --- arch/arm64/kernel/cpufeature.c | 11 +++++++++++ arch/arm64/tools/cpucaps | 1 + 2 files changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2e3e55139777..c331c49a7d19 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2223,6 +2223,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .min_field_value = 1, }, + { + .desc = "Enhanced Counter Virtualization (CNTPOFF)", + .capability = ARM64_HAS_ECV_CNTPOFF, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64MMFR0_EL1, + .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT, + .field_width = 4, + .sign = FTR_UNSIGNED, + .min_field_value = ID_AA64MMFR0_EL1_ECV_CNTPOFF, + }, #ifdef CONFIG_ARM64_PAN { .desc = "Privileged Access Never", diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 37b1340e9646..40ba95472594 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -23,6 +23,7 @@ HAS_DCPOP HAS_DIT HAS_E0PD HAS_ECV +HAS_ECV_CNTPOFF HAS_EPAN HAS_GENERIC_AUTH HAS_GENERIC_AUTH_ARCH_QARMA3 -- cgit v1.2.3 From 2b4825a8694018901e641ccc2eafd0fff58d1415 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:44 +0100 Subject: KVM: arm64: timers: Use CNTPOFF_EL2 to offset the physical timer With ECV and CNTPOFF_EL2, it is very easy to offer an offset for the physical timer. So let's do just that. Nothing can set the offset yet, so this should have no effect whatsoever (famous last words...). Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-5-maz@kernel.org --- arch/arm64/kvm/arch_timer.c | 18 +++++++++++++++++- arch/arm64/kvm/hypercalls.c | 2 +- include/clocksource/arm_arch_timer.h | 1 + include/kvm/arm_arch_timer.h | 2 ++ 4 files changed, 21 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 9515c645f03d..3118ea0a1b41 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -52,6 +52,11 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, struct arch_timer_context *timer, enum kvm_arch_timer_regs treg); +static bool has_cntpoff(void) +{ + return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); +} + u32 timer_get_ctl(struct arch_timer_context *ctxt) { struct kvm_vcpu *vcpu = ctxt->vcpu; @@ -84,7 +89,7 @@ u64 timer_get_cval(struct arch_timer_context *ctxt) static u64 timer_get_offset(struct arch_timer_context *ctxt) { - if (ctxt->offset.vm_offset) + if (ctxt && ctxt->offset.vm_offset) return *ctxt->offset.vm_offset; return 0; @@ -432,6 +437,12 @@ static void set_cntvoff(u64 cntvoff) kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff); } +static void set_cntpoff(u64 cntpoff) +{ + if (has_cntpoff()) + write_sysreg_s(cntpoff, SYS_CNTPOFF_EL2); +} + static void timer_save_state(struct arch_timer_context *ctx) { struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); @@ -480,6 +491,7 @@ static void timer_save_state(struct arch_timer_context *ctx) write_sysreg_el0(0, SYS_CNTP_CTL); isb(); + set_cntpoff(0); break; case NR_KVM_TIMERS: BUG(); @@ -550,6 +562,7 @@ static void timer_restore_state(struct arch_timer_context *ctx) write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL); break; case TIMER_PTIMER: + set_cntpoff(timer_get_offset(ctx)); write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL); isb(); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL); @@ -767,6 +780,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) vtimer->vcpu = vcpu; vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset; ptimer->vcpu = vcpu; + ptimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.poffset; /* Synchronize cntvoff across all vtimers of a VM. */ timer_set_offset(vtimer, kvm_phys_timer_read()); @@ -1297,6 +1311,8 @@ void kvm_timer_init_vhe(void) val = read_sysreg(cnthctl_el2); val |= (CNTHCTL_EL1PCEN << cnthctl_shift); val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); + if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)) + val |= CNTHCTL_ECV; write_sysreg(val, cnthctl_el2); } diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 5da884e11337..39a4707e081d 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -47,7 +47,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset; break; case KVM_PTP_PHYS_COUNTER: - cycles = systime_snapshot.cycles; + cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.poffset; break; default: return; diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index 057c8964aefb..cbbc9a6dc571 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -21,6 +21,7 @@ #define CNTHCTL_EVNTEN (1 << 2) #define CNTHCTL_EVNTDIR (1 << 3) #define CNTHCTL_EVNTI (0xF << 4) +#define CNTHCTL_ECV (1 << 12) enum arch_timer_reg { ARCH_TIMER_REG_CTRL, diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 70d47c4adc6a..2dd0fd2406fb 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -34,6 +34,8 @@ struct arch_timer_offset { struct arch_timer_vm_data { /* Offset applied to the virtual timer/counter */ u64 voffset; + /* Offset applied to the physical timer/counter */ + u64 poffset; }; struct arch_timer_context { -- cgit v1.2.3 From c605ee245097d02ed5933e63ac601a8571712457 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:45 +0100 Subject: KVM: arm64: timers: Allow physical offset without CNTPOFF_EL2 CNTPOFF_EL2 is awesome, but it is mostly vapourware, and no publicly available implementation has it. So for the common mortals, let's implement the emulated version of this thing. It means trapping accesses to the physical counter and timer, and emulate some of it as necessary. As for CNTPOFF_EL2, nobody sets the offset yet. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-6-maz@kernel.org --- arch/arm64/include/asm/sysreg.h | 2 + arch/arm64/kvm/arch_timer.c | 98 +++++++++++++++++++++++++++++--------- arch/arm64/kvm/hyp/nvhe/timer-sr.c | 18 ++++--- arch/arm64/kvm/sys_regs.c | 9 ++++ 4 files changed, 98 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 9e3ecba3c4e6..f8da9e1b0c11 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -388,6 +388,7 @@ #define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0) +#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1) #define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5) #define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6) @@ -400,6 +401,7 @@ #define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0) #define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) +#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0) #define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) #define __PMEV_op2(n) ((n) & 0x7) diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 3118ea0a1b41..bb64a71ae193 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -458,6 +458,8 @@ static void timer_save_state(struct arch_timer_context *ctx) goto out; switch (index) { + u64 cval; + case TIMER_VTIMER: timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL)); timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL)); @@ -485,7 +487,12 @@ static void timer_save_state(struct arch_timer_context *ctx) break; case TIMER_PTIMER: timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL)); - timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL)); + cval = read_sysreg_el0(SYS_CNTP_CVAL); + + if (!has_cntpoff()) + cval -= timer_get_offset(ctx); + + timer_set_cval(ctx, cval); /* Disable the timer */ write_sysreg_el0(0, SYS_CNTP_CTL); @@ -555,6 +562,8 @@ static void timer_restore_state(struct arch_timer_context *ctx) goto out; switch (index) { + u64 cval, offset; + case TIMER_VTIMER: set_cntvoff(timer_get_offset(ctx)); write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL); @@ -562,8 +571,12 @@ static void timer_restore_state(struct arch_timer_context *ctx) write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL); break; case TIMER_PTIMER: - set_cntpoff(timer_get_offset(ctx)); - write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL); + cval = timer_get_cval(ctx); + offset = timer_get_offset(ctx); + set_cntpoff(offset); + if (!has_cntpoff()) + cval += offset; + write_sysreg_el0(cval, SYS_CNTP_CVAL); isb(); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL); break; @@ -634,6 +647,61 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); } +/* If _pred is true, set bit in _set, otherwise set it in _clr */ +#define assign_clear_set_bit(_pred, _bit, _clr, _set) \ + do { \ + if (_pred) \ + (_set) |= (_bit); \ + else \ + (_clr) |= (_bit); \ + } while (0) + +static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) +{ + bool tpt, tpc; + u64 clr, set; + + /* + * No trapping gets configured here with nVHE. See + * __timer_enable_traps(), which is where the stuff happens. + */ + if (!has_vhe()) + return; + + /* + * Our default policy is not to trap anything. As we progress + * within this function, reality kicks in and we start adding + * traps based on emulation requirements. + */ + tpt = tpc = false; + + /* + * We have two possibility to deal with a physical offset: + * + * - Either we have CNTPOFF (yay!) or the offset is 0: + * we let the guest freely access the HW + * + * - or neither of these condition apply: + * we trap accesses to the HW, but still use it + * after correcting the physical offset + */ + if (!has_cntpoff() && timer_get_offset(map->direct_ptimer)) + tpt = tpc = true; + + /* + * Now that we have collected our requirements, compute the + * trap and enable bits. + */ + set = 0; + clr = 0; + + assign_clear_set_bit(tpt, CNTHCTL_EL1PCEN << 10, set, clr); + assign_clear_set_bit(tpc, CNTHCTL_EL1PCTEN << 10, set, clr); + + /* This only happens on VHE, so use the CNTKCTL_EL1 accessor */ + sysreg_clear_set(cntkctl_el1, clr, set); +} + void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); @@ -657,9 +725,10 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) timer_restore_state(map.direct_vtimer); if (map.direct_ptimer) timer_restore_state(map.direct_ptimer); - if (map.emul_ptimer) timer_emulate(map.emul_ptimer); + + timer_set_traps(vcpu, &map); } bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) @@ -1292,28 +1361,11 @@ no_vgic: return 0; } -/* - * On VHE system, we only need to configure the EL2 timer trap register once, - * not for every world switch. - * The host kernel runs at EL2 with HCR_EL2.TGE == 1, - * and this makes those bits have no effect for the host kernel execution. - */ +/* If we have CNTPOFF, permanently set ECV to enable it */ void kvm_timer_init_vhe(void) { - /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */ - u32 cnthctl_shift = 10; - u64 val; - - /* - * VHE systems allow the guest direct access to the EL1 physical - * timer/counter. - */ - val = read_sysreg(cnthctl_el2); - val |= (CNTHCTL_EL1PCEN << cnthctl_shift); - val |= (CNTHCTL_EL1PCTEN << cnthctl_shift); if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)) - val |= CNTHCTL_ECV; - write_sysreg(val, cnthctl_el2); + sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV); } static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) diff --git a/arch/arm64/kvm/hyp/nvhe/timer-sr.c b/arch/arm64/kvm/hyp/nvhe/timer-sr.c index 9072e71693ba..b185ac0dbd47 100644 --- a/arch/arm64/kvm/hyp/nvhe/timer-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/timer-sr.c @@ -9,6 +9,7 @@ #include #include +#include void __kvm_timer_set_cntvoff(u64 cntvoff) { @@ -35,14 +36,19 @@ void __timer_disable_traps(struct kvm_vcpu *vcpu) */ void __timer_enable_traps(struct kvm_vcpu *vcpu) { - u64 val; + u64 clr = 0, set = 0; /* * Disallow physical timer access for the guest - * Physical counter access is allowed + * Physical counter access is allowed if no offset is enforced + * or running protected (we don't offset anything in this case). */ - val = read_sysreg(cnthctl_el2); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, cnthctl_el2); + clr = CNTHCTL_EL1PCEN; + if (is_protected_kvm_enabled() || + !kern_hyp_va(vcpu->kvm)->arch.timer_data.poffset) + set |= CNTHCTL_EL1PCTEN; + else + clr |= CNTHCTL_EL1PCTEN; + + sysreg_clear_set(cnthctl_el2, clr, set); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 53749d3a0996..be7c2598e563 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1139,6 +1139,12 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, tmr = TIMER_PTIMER; treg = TIMER_REG_CVAL; break; + case SYS_CNTPCT_EL0: + case SYS_CNTPCTSS_EL0: + case SYS_AARCH32_CNTPCT: + tmr = TIMER_PTIMER; + treg = TIMER_REG_CNT; + break; default: print_sys_reg_msg(p, "%s", "Unhandled trapped timer register"); kvm_inject_undefined(vcpu); @@ -2075,6 +2081,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { AMU_AMEVTYPER1_EL0(14), AMU_AMEVTYPER1_EL0(15), + { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, @@ -2525,6 +2533,7 @@ static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 }, { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr }, { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */ + { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer }, { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 }, { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ -- cgit v1.2.3 From 96906a9150a86a86b0464939625279b8e19f6e88 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:46 +0100 Subject: KVM: arm64: Expose {un,}lock_all_vcpus() to the rest of KVM Being able to lock/unlock all vcpus in one go is a feature that only the vgic has enjoyed so far. Let's be brave and expose it to the world. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-7-maz@kernel.org --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/arm.c | 43 +++++++++++++++++++++++++++++++++++ arch/arm64/kvm/vgic/vgic-kvm-device.c | 38 ------------------------------- arch/arm64/kvm/vgic/vgic.h | 3 --- 4 files changed, 46 insertions(+), 41 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bcd774d74f34..002a10cbade2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -922,6 +922,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); int __init kvm_sys_reg_table_init(void); +bool lock_all_vcpus(struct kvm *kvm); +void unlock_all_vcpus(struct kvm *kvm); + /* MMIO helpers */ void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3bd732eaf087..ae5110cc3bad 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1484,6 +1484,49 @@ long kvm_arch_vm_ioctl(struct file *filp, } } +/* unlocks vcpus from @vcpu_lock_idx and smaller */ +static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) +{ + struct kvm_vcpu *tmp_vcpu; + + for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { + tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); + mutex_unlock(&tmp_vcpu->mutex); + } +} + +void unlock_all_vcpus(struct kvm *kvm) +{ + lockdep_assert_held(&kvm->lock); + + unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); +} + +/* Returns true if all vcpus were locked, false otherwise */ +bool lock_all_vcpus(struct kvm *kvm) +{ + struct kvm_vcpu *tmp_vcpu; + unsigned long c; + + lockdep_assert_held(&kvm->lock); + + /* + * Any time a vcpu is in an ioctl (including running), the + * core KVM code tries to grab the vcpu->mutex. + * + * By grabbing the vcpu->mutex of all VCPUs we ensure that no + * other VCPUs can fiddle with the state while we access it. + */ + kvm_for_each_vcpu(c, tmp_vcpu, kvm) { + if (!mutex_trylock(&tmp_vcpu->mutex)) { + unlock_vcpus(kvm, c - 1); + return false; + } + } + + return true; +} + static unsigned long nvhe_percpu_size(void) { return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index edeac2380591..04dd68835b3f 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -342,44 +342,6 @@ int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, return 0; } -/* unlocks vcpus from @vcpu_lock_idx and smaller */ -static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) -{ - struct kvm_vcpu *tmp_vcpu; - - for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { - tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); - mutex_unlock(&tmp_vcpu->mutex); - } -} - -void unlock_all_vcpus(struct kvm *kvm) -{ - unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); -} - -/* Returns true if all vcpus were locked, false otherwise */ -bool lock_all_vcpus(struct kvm *kvm) -{ - struct kvm_vcpu *tmp_vcpu; - unsigned long c; - - /* - * Any time a vcpu is run, vcpu_load is called which tries to grab the - * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure - * that no other VCPUs are run and fiddle with the vgic state while we - * access it. - */ - kvm_for_each_vcpu(c, tmp_vcpu, kvm) { - if (!mutex_trylock(&tmp_vcpu->mutex)) { - unlock_vcpus(kvm, c - 1); - return false; - } - } - - return true; -} - /** * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state * diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 7f7f3c5ed85a..f9923beedd27 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -273,9 +273,6 @@ int vgic_init(struct kvm *kvm); void vgic_debug_init(struct kvm *kvm); void vgic_debug_destroy(struct kvm *kvm); -bool lock_all_vcpus(struct kvm *kvm); -void unlock_all_vcpus(struct kvm *kvm); - static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu) { struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu; -- cgit v1.2.3 From 30ec7997d175cd689fc61bfc4059f4d35b11858c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:47 +0100 Subject: KVM: arm64: timers: Allow userspace to set the global counter offset And this is the moment you have all been waiting for: setting the counter offset from userspace. We expose a brand new capability that reports the ability to set the offset for both the virtual and physical sides. In keeping with the architecture, the offset is expressed as a delta that is substracted from the physical counter value. Once this new API is used, there is no going back, and the counters cannot be written to to set the offsets implicitly (the writes are instead ignored). Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-8-maz@kernel.org --- arch/arm64/include/asm/kvm_host.h | 4 +++ arch/arm64/include/uapi/asm/kvm.h | 9 +++++++ arch/arm64/kvm/arch_timer.c | 54 +++++++++++++++++++++++++++++++++++---- arch/arm64/kvm/arm.c | 8 ++++++ include/uapi/linux/kvm.h | 3 +++ 5 files changed, 73 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 002a10cbade2..116233a390e9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -221,6 +221,8 @@ struct kvm_arch { #define KVM_ARCH_FLAG_EL1_32BIT 4 /* PSCI SYSTEM_SUSPEND enabled for the guest */ #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5 + /* VM counter offset */ +#define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 6 unsigned long flags; @@ -1010,6 +1012,8 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, struct kvm_arm_copy_mte_tags *copy_tags); +int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, + struct kvm_arm_counter_offset *offset); /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f8129c624b07..12fb0d8a760a 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -198,6 +198,15 @@ struct kvm_arm_copy_mte_tags { __u64 reserved[2]; }; +/* + * Counter/Timer offset structure. Describe the virtual/physical offset. + * To be used with KVM_ARM_SET_COUNTER_OFFSET. + */ +struct kvm_arm_counter_offset { + __u64 counter_offset; + __u64 reserved; +}; + #define KVM_ARM_TAGS_TO_GUEST 0 #define KVM_ARM_TAGS_FROM_GUEST 1 diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index bb64a71ae193..771504c79711 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -851,9 +851,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) ptimer->vcpu = vcpu; ptimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.poffset; - /* Synchronize cntvoff across all vtimers of a VM. */ - timer_set_offset(vtimer, kvm_phys_timer_read()); - timer_set_offset(ptimer, 0); + /* Synchronize offsets across timers of a VM if not already provided */ + if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) { + timer_set_offset(vtimer, kvm_phys_timer_read()); + timer_set_offset(ptimer, 0); + } hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); timer->bg_timer.function = kvm_bg_timer_expire; @@ -897,8 +899,11 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); break; case KVM_REG_ARM_TIMER_CNT: - timer = vcpu_vtimer(vcpu); - timer_set_offset(timer, kvm_phys_timer_read() - value); + if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, + &vcpu->kvm->arch.flags)) { + timer = vcpu_vtimer(vcpu); + timer_set_offset(timer, kvm_phys_timer_read() - value); + } break; case KVM_REG_ARM_TIMER_CVAL: timer = vcpu_vtimer(vcpu); @@ -908,6 +913,13 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) timer = vcpu_ptimer(vcpu); kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); break; + case KVM_REG_ARM_PTIMER_CNT: + if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, + &vcpu->kvm->arch.flags)) { + timer = vcpu_ptimer(vcpu); + timer_set_offset(timer, kvm_phys_timer_read() - value); + } + break; case KVM_REG_ARM_PTIMER_CVAL: timer = vcpu_ptimer(vcpu); kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); @@ -1443,3 +1455,35 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return -ENXIO; } + +int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, + struct kvm_arm_counter_offset *offset) +{ + int ret = 0; + + if (offset->reserved) + return -EINVAL; + + mutex_lock(&kvm->lock); + + if (lock_all_vcpus(kvm)) { + set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags); + + /* + * If userspace decides to set the offset using this + * API rather than merely restoring the counter + * values, the offset applies to both the virtual and + * physical views. + */ + kvm->arch.timer_data.voffset = offset->counter_offset; + kvm->arch.timer_data.poffset = offset->counter_offset; + + unlock_all_vcpus(kvm); + } else { + ret = -EBUSY; + } + + mutex_unlock(&kvm->lock); + + return ret; +} diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ae5110cc3bad..1c8a4bbae684 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -220,6 +220,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_PTP_KVM: case KVM_CAP_ARM_SYSTEM_SUSPEND: + case KVM_CAP_COUNTER_OFFSET: r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2: @@ -1479,6 +1480,13 @@ long kvm_arch_vm_ioctl(struct file *filp, return -EFAULT; return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags); } + case KVM_ARM_SET_COUNTER_OFFSET: { + struct kvm_arm_counter_offset offset; + + if (copy_from_user(&offset, argp, sizeof(offset))) + return -EFAULT; + return kvm_vm_ioctl_set_counter_offset(kvm, &offset); + } default: return -EINVAL; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d77aef872a0a..6a7e1a0ecf04 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1184,6 +1184,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224 #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225 #define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226 +#define KVM_CAP_COUNTER_OFFSET 227 #ifdef KVM_CAP_IRQ_ROUTING @@ -1543,6 +1544,8 @@ struct kvm_s390_ucas_mapping { #define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter) #define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3) #define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) +/* Available with KVM_CAP_COUNTER_OFFSET */ +#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) -- cgit v1.2.3 From 680232a94c1289aad25ffae02f2785823763b456 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:48 +0100 Subject: KVM: arm64: timers: Allow save/restoring of the physical timer Nothing like being 10 year late to a party! Now that userspace can set counter offsets, we can save/restore the physical timer as well! Nobody really cared so far, but you're welcome anyway. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-9-maz@kernel.org --- arch/arm64/kvm/guest.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 07444fa22888..46e910819de6 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -590,11 +590,16 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) return copy_core_reg_indices(vcpu, NULL); } -/** - * ARM64 versions of the TIMER registers, always available on arm64 - */ +static const u64 timer_reg_list[] = { + KVM_REG_ARM_TIMER_CTL, + KVM_REG_ARM_TIMER_CNT, + KVM_REG_ARM_TIMER_CVAL, + KVM_REG_ARM_PTIMER_CTL, + KVM_REG_ARM_PTIMER_CNT, + KVM_REG_ARM_PTIMER_CVAL, +}; -#define NUM_TIMER_REGS 3 +#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list) static bool is_timer_reg(u64 index) { @@ -602,6 +607,9 @@ static bool is_timer_reg(u64 index) case KVM_REG_ARM_TIMER_CTL: case KVM_REG_ARM_TIMER_CNT: case KVM_REG_ARM_TIMER_CVAL: + case KVM_REG_ARM_PTIMER_CTL: + case KVM_REG_ARM_PTIMER_CNT: + case KVM_REG_ARM_PTIMER_CVAL: return true; } return false; @@ -609,14 +617,11 @@ static bool is_timer_reg(u64 index) static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { - if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) - return -EFAULT; - uindices++; - if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) - return -EFAULT; - uindices++; - if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) - return -EFAULT; + for (int i = 0; i < NUM_TIMER_REGS; i++) { + if (put_user(timer_reg_list[i], uindices)) + return -EFAULT; + uindices++; + } return 0; } -- cgit v1.2.3 From 5591805d2c21b70838b723b71b8ff613de51cfff Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:49 +0100 Subject: KVM: arm64: timers: Rationalise per-vcpu timer init The way we initialise our timer contexts may be satisfactory for two timers, but will be getting pretty annoying with four. Cleanup the whole thing by removing the code duplication and getting rid of unused IRQ configuration elements. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-10-maz@kernel.org --- arch/arm64/kvm/arch_timer.c | 73 +++++++++++++++++++++++--------------------- include/kvm/arm_arch_timer.h | 1 - 2 files changed, 39 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 771504c79711..e46f04ed8f86 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -30,14 +30,9 @@ static u32 host_ptimer_irq_flags; static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); -static const struct kvm_irq_level default_ptimer_irq = { - .irq = 30, - .level = 1, -}; - -static const struct kvm_irq_level default_vtimer_irq = { - .irq = 27, - .level = 1, +static const u8 default_ppi[] = { + [TIMER_PTIMER] = 30, + [TIMER_VTIMER] = 27, }; static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); @@ -820,12 +815,14 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) * resets the timer to be disabled and unmasked and is compliant with * the ARMv7 architecture. */ - timer_set_ctl(vcpu_vtimer(vcpu), 0); - timer_set_ctl(vcpu_ptimer(vcpu), 0); + for (int i = 0; i < NR_KVM_TIMERS; i++) + timer_set_ctl(vcpu_get_timer(vcpu, i), 0); + if (timer->enabled) { - kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); - kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu)); + for (int i = 0; i < NR_KVM_TIMERS; i++) + kvm_timer_update_irq(vcpu, false, + vcpu_get_timer(vcpu, i)); if (irqchip_in_kernel(vcpu->kvm)) { kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); @@ -840,39 +837,47 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) return 0; } +static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) +{ + struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid); + struct kvm *kvm = vcpu->kvm; + + ctxt->vcpu = vcpu; + + if (timerid == TIMER_VTIMER) + ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset; + else + ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset; + + hrtimer_init(&ctxt->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); + ctxt->hrtimer.function = kvm_hrtimer_expire; + ctxt->irq.irq = default_ppi[timerid]; + + switch (timerid) { + case TIMER_PTIMER: + ctxt->host_timer_irq = host_ptimer_irq; + break; + case TIMER_VTIMER: + ctxt->host_timer_irq = host_vtimer_irq; + break; + } +} + void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); - struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - vtimer->vcpu = vcpu; - vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset; - ptimer->vcpu = vcpu; - ptimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.poffset; + for (int i = 0; i < NR_KVM_TIMERS; i++) + timer_context_init(vcpu, i); /* Synchronize offsets across timers of a VM if not already provided */ if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) { - timer_set_offset(vtimer, kvm_phys_timer_read()); - timer_set_offset(ptimer, 0); + timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read()); + timer_set_offset(vcpu_ptimer(vcpu), 0); } hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); timer->bg_timer.function = kvm_bg_timer_expire; - - hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); - vtimer->hrtimer.function = kvm_hrtimer_expire; - ptimer->hrtimer.function = kvm_hrtimer_expire; - - vtimer->irq.irq = default_vtimer_irq.irq; - ptimer->irq.irq = default_ptimer_irq.irq; - - vtimer->host_timer_irq = host_vtimer_irq; - ptimer->host_timer_irq = host_ptimer_irq; - - vtimer->host_timer_irq_flags = host_vtimer_irq_flags; - ptimer->host_timer_irq_flags = host_ptimer_irq_flags; } void kvm_timer_cpu_up(void) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 2dd0fd2406fb..c746ef64220b 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -59,7 +59,6 @@ struct arch_timer_context { /* Duplicated state from arch_timer.c for convenience */ u32 host_timer_irq; - u32 host_timer_irq_flags; }; struct timer_map { -- cgit v1.2.3 From 33c549460ef9119eb115484e81f54521122341db Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:50 +0100 Subject: KVM: arm64: timers: Abstract per-timer IRQ access As we are about to move the location of the per-timer IRQ into the VM structure, abstract the location of the IRQ behind an accessor. This will make the repainting sligntly less painful. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-11-maz@kernel.org --- arch/arm64/kvm/arch_timer.c | 38 +++++++++++++++++++------------------- include/kvm/arm_arch_timer.h | 2 ++ 2 files changed, 21 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index e46f04ed8f86..d08d8c2fc30d 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -392,12 +392,12 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, int ret; timer_ctx->irq.level = new_level; - trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, + trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx), timer_ctx->irq.level); if (!userspace_irqchip(vcpu->kvm)) { ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, - timer_ctx->irq.irq, + timer_irq(timer_ctx), timer_ctx->irq.level, timer_ctx); WARN_ON(ret); @@ -607,7 +607,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx) kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx); if (irqchip_in_kernel(vcpu->kvm)) - phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq); + phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx)); phys_active |= ctx->irq.level; @@ -825,9 +825,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) vcpu_get_timer(vcpu, i)); if (irqchip_in_kernel(vcpu->kvm)) { - kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); + kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_vtimer)); if (map.direct_ptimer) - kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq); + kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_ptimer)); } } @@ -851,7 +851,7 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) hrtimer_init(&ctxt->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); ctxt->hrtimer.function = kvm_hrtimer_expire; - ctxt->irq.irq = default_ppi[timerid]; + timer_irq(ctxt) = default_ppi[timerid]; switch (timerid) { case TIMER_PTIMER: @@ -1295,19 +1295,19 @@ static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) int vtimer_irq, ptimer_irq, ret; unsigned long i; - vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; + vtimer_irq = timer_irq(vcpu_vtimer(vcpu)); ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); if (ret) return false; - ptimer_irq = vcpu_ptimer(vcpu)->irq.irq; + ptimer_irq = timer_irq(vcpu_ptimer(vcpu)); ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu)); if (ret) return false; kvm_for_each_vcpu(i, vcpu, vcpu->kvm) { - if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq || - vcpu_ptimer(vcpu)->irq.irq != ptimer_irq) + if (timer_irq(vcpu_vtimer(vcpu)) != vtimer_irq || + timer_irq(vcpu_ptimer(vcpu)) != ptimer_irq) return false; } @@ -1322,9 +1322,9 @@ bool kvm_arch_timer_get_input_level(int vintid) if (WARN(!vcpu, "No vcpu context!\n")) return false; - if (vintid == vcpu_vtimer(vcpu)->irq.irq) + if (vintid == timer_irq(vcpu_vtimer(vcpu))) timer = vcpu_vtimer(vcpu); - else if (vintid == vcpu_ptimer(vcpu)->irq.irq) + else if (vintid == timer_irq(vcpu_ptimer(vcpu))) timer = vcpu_ptimer(vcpu); else BUG(); @@ -1358,7 +1358,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) ret = kvm_vgic_map_phys_irq(vcpu, map.direct_vtimer->host_timer_irq, - map.direct_vtimer->irq.irq, + timer_irq(map.direct_vtimer), &arch_timer_irq_ops); if (ret) return ret; @@ -1366,7 +1366,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) if (map.direct_ptimer) { ret = kvm_vgic_map_phys_irq(vcpu, map.direct_ptimer->host_timer_irq, - map.direct_ptimer->irq.irq, + timer_irq(map.direct_ptimer), &arch_timer_irq_ops); } @@ -1391,8 +1391,8 @@ static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { - vcpu_vtimer(vcpu)->irq.irq = vtimer_irq; - vcpu_ptimer(vcpu)->irq.irq = ptimer_irq; + timer_irq(vcpu_vtimer(vcpu)) = vtimer_irq; + timer_irq(vcpu_ptimer(vcpu)) = ptimer_irq; } } @@ -1417,10 +1417,10 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) switch (attr->attr) { case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: - set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq); + set_timer_irqs(vcpu->kvm, irq, timer_irq(ptimer)); break; case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: - set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq); + set_timer_irqs(vcpu->kvm, timer_irq(vtimer), irq); break; default: return -ENXIO; @@ -1446,7 +1446,7 @@ int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return -ENXIO; } - irq = timer->irq.irq; + irq = timer_irq(timer); return put_user(irq, uaddr); } diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index c746ef64220b..27cada09f588 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -109,6 +109,8 @@ bool kvm_arch_timer_get_input_level(int vintid); #define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers) +#define timer_irq(ctx) ((ctx)->irq.irq) + u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, enum kvm_arch_timers tmr, enum kvm_arch_timer_regs treg); -- cgit v1.2.3 From 8a5eb2d210807e7dbe9ece7075533014cf4b9c27 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:51 +0100 Subject: KVM: arm64: timers: Move the timer IRQs into arch_timer_vm_data Having the timer IRQs duplicated into each vcpu isn't great, and becomes absolutely awful with NV. So let's move these into the per-VM arch_timer_vm_data structure. This simplifies a lot of code, but requires us to introduce a mutex so that we can reason about userspace trying to change an interrupt number while another vcpu is running, something that wasn't really well handled so far. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-12-maz@kernel.org --- arch/arm64/include/asm/kvm_host.h | 2 + arch/arm64/kvm/arch_timer.c | 108 ++++++++++++++++++++++---------------- arch/arm64/kvm/arm.c | 2 + include/kvm/arm_arch_timer.h | 18 +++++-- 4 files changed, 82 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 116233a390e9..1280154c9ef3 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -223,6 +223,8 @@ struct kvm_arch { #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5 /* VM counter offset */ #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 6 + /* Timer PPIs made immutable */ +#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 7 unsigned long flags; diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index d08d8c2fc30d..1d811735e05f 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -851,7 +851,6 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) hrtimer_init(&ctxt->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); ctxt->hrtimer.function = kvm_hrtimer_expire; - timer_irq(ctxt) = default_ppi[timerid]; switch (timerid) { case TIMER_PTIMER: @@ -880,6 +879,13 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) timer->bg_timer.function = kvm_bg_timer_expire; } +void kvm_timer_init_vm(struct kvm *kvm) +{ + mutex_init(&kvm->arch.timer_data.lock); + for (int i = 0; i < NR_KVM_TIMERS; i++) + kvm->arch.timer_data.ppi[i] = default_ppi[i]; +} + void kvm_timer_cpu_up(void) { enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags); @@ -1292,44 +1298,56 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) { - int vtimer_irq, ptimer_irq, ret; - unsigned long i; + u32 ppis = 0; + bool valid; - vtimer_irq = timer_irq(vcpu_vtimer(vcpu)); - ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); - if (ret) - return false; + mutex_lock(&vcpu->kvm->arch.timer_data.lock); - ptimer_irq = timer_irq(vcpu_ptimer(vcpu)); - ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu)); - if (ret) - return false; + for (int i = 0; i < NR_KVM_TIMERS; i++) { + struct arch_timer_context *ctx; + int irq; - kvm_for_each_vcpu(i, vcpu, vcpu->kvm) { - if (timer_irq(vcpu_vtimer(vcpu)) != vtimer_irq || - timer_irq(vcpu_ptimer(vcpu)) != ptimer_irq) - return false; + ctx = vcpu_get_timer(vcpu, i); + irq = timer_irq(ctx); + if (kvm_vgic_set_owner(vcpu, irq, ctx)) + break; + + /* + * We know by construction that we only have PPIs, so + * all values are less than 32. + */ + ppis |= BIT(irq); } - return true; + valid = hweight32(ppis) == NR_KVM_TIMERS; + + if (valid) + set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, &vcpu->kvm->arch.flags); + + mutex_unlock(&vcpu->kvm->arch.timer_data.lock); + + return valid; } bool kvm_arch_timer_get_input_level(int vintid) { struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); - struct arch_timer_context *timer; if (WARN(!vcpu, "No vcpu context!\n")) return false; - if (vintid == timer_irq(vcpu_vtimer(vcpu))) - timer = vcpu_vtimer(vcpu); - else if (vintid == timer_irq(vcpu_ptimer(vcpu))) - timer = vcpu_ptimer(vcpu); - else - BUG(); + for (int i = 0; i < NR_KVM_TIMERS; i++) { + struct arch_timer_context *ctx; + + ctx = vcpu_get_timer(vcpu, i); + if (timer_irq(ctx) == vintid) + return kvm_timer_should_fire(ctx); + } - return kvm_timer_should_fire(timer); + /* A timer IRQ has fired, but no matching timer was found? */ + WARN_RATELIMIT(1, "timer INTID%d unknown\n", vintid); + + return false; } int kvm_timer_enable(struct kvm_vcpu *vcpu) @@ -1385,23 +1403,10 @@ void kvm_timer_init_vhe(void) sysreg_clear_set(cntkctl_el1, 0, CNTHCTL_ECV); } -static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq) -{ - struct kvm_vcpu *vcpu; - unsigned long i; - - kvm_for_each_vcpu(i, vcpu, kvm) { - timer_irq(vcpu_vtimer(vcpu)) = vtimer_irq; - timer_irq(vcpu_ptimer(vcpu)) = ptimer_irq; - } -} - int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { int __user *uaddr = (int __user *)(long)attr->addr; - struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - int irq; + int irq, idx, ret = 0; if (!irqchip_in_kernel(vcpu->kvm)) return -EINVAL; @@ -1412,21 +1417,36 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) if (!(irq_is_ppi(irq))) return -EINVAL; - if (vcpu->arch.timer_cpu.enabled) - return -EBUSY; + mutex_lock(&vcpu->kvm->arch.timer_data.lock); + + if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, + &vcpu->kvm->arch.flags)) { + ret = -EBUSY; + goto out; + } switch (attr->attr) { case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: - set_timer_irqs(vcpu->kvm, irq, timer_irq(ptimer)); + idx = TIMER_VTIMER; break; case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: - set_timer_irqs(vcpu->kvm, timer_irq(vtimer), irq); + idx = TIMER_PTIMER; break; default: - return -ENXIO; + ret = -ENXIO; + goto out; } - return 0; + /* + * We cannot validate the IRQ unicity before we run, so take it at + * face value. The verdict will be given on first vcpu run, for each + * vcpu. Yes this is late. Blame it on the stupid API. + */ + vcpu->kvm->arch.timer_data.ppi[idx] = irq; + +out: + mutex_unlock(&vcpu->kvm->arch.timer_data.lock); + return ret; } int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 1c8a4bbae684..4c5e9dfbf83a 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -148,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_vgic_early_init(kvm); + kvm_timer_init_vm(kvm); + /* The maximum number of VCPUs is limited by the host's GIC model */ kvm->max_vcpus = kvm_arm_default_max_vcpus(); diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 27cada09f588..f093ea9f540d 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -36,14 +36,16 @@ struct arch_timer_vm_data { u64 voffset; /* Offset applied to the physical timer/counter */ u64 poffset; + + struct mutex lock; + + /* The PPI for each timer, global to the VM */ + u8 ppi[NR_KVM_TIMERS]; }; struct arch_timer_context { struct kvm_vcpu *vcpu; - /* Timer IRQ */ - struct kvm_irq_level irq; - /* Emulated Timer (may be unused) */ struct hrtimer hrtimer; u64 ns_frac; @@ -57,6 +59,11 @@ struct arch_timer_context { */ bool loaded; + /* Output level of the timer IRQ */ + struct { + bool level; + } irq; + /* Duplicated state from arch_timer.c for convenience */ u32 host_timer_irq; }; @@ -86,6 +93,8 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); void kvm_timer_update_run(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); +void kvm_timer_init_vm(struct kvm *kvm); + u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); @@ -109,7 +118,8 @@ bool kvm_arch_timer_get_input_level(int vintid); #define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers) -#define timer_irq(ctx) ((ctx)->irq.irq) +#define timer_vm_data(ctx) (&(ctx)->vcpu->kvm->arch.timer_data) +#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)]) u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, enum kvm_arch_timers tmr, -- cgit v1.2.3 From 1a6511eb8430533920559c5f01f487f4901081cd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:52 +0100 Subject: KVM: arm64: Elide kern_hyp_va() in VHE-specific parts of the hypervisor For VHE-specific hypervisor code, kern_hyp_va() is a NOP. Actually, it is a whole range of NOPs. It'd be much better if this code simply didn't exist. Let's just do that. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-13-maz@kernel.org --- arch/arm64/include/asm/kvm_mmu.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 083cc47dca08..27e63c111f78 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -63,6 +63,7 @@ * specific registers encoded in the instructions). */ .macro kern_hyp_va reg +#ifndef __KVM_VHE_HYPERVISOR__ alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask and \reg, \reg, #1 /* mask with va_mask */ ror \reg, \reg, #1 /* rotate to the first tag bit */ @@ -70,6 +71,7 @@ alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */ ror \reg, \reg, #63 /* rotate back */ alternative_cb_end +#endif .endm /* @@ -127,6 +129,7 @@ void kvm_apply_hyp_relocations(void); static __always_inline unsigned long __kern_hyp_va(unsigned long v) { +#ifndef __KVM_VHE_HYPERVISOR__ asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" "ror %0, %0, #1\n" "add %0, %0, #0\n" @@ -135,6 +138,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) ARM64_ALWAYS_SYSTEM, kvm_update_va_mask) : "+r" (v)); +#endif return v; } -- cgit v1.2.3 From e9adde432bf7371f1c83f67d9f8d75b95810f124 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:53 +0100 Subject: KVM: arm64: timers: Fast-track CNTPCT_EL0 trap handling Now that it is likely that CNTPCT_EL0 accesses will trap, fast-track the emulation of the counter read which doesn't need more that a simple offsetting. One day, we'll have CNTPOFF everywhere. One day. Suggested-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-14-maz@kernel.org --- arch/arm64/kvm/hyp/include/hyp/switch.h | 36 +++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 07d37ff88a3f..9954368f639d 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -326,6 +327,38 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) return true; } +static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) +{ + struct arch_timer_context *ctxt; + u32 sysreg; + u64 val; + + /* + * We only get here for 64bit guests, 32bit guests will hit + * the long and winding road all the way to the standard + * handling. Yes, it sucks to be irrelevant. + */ + sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); + + switch (sysreg) { + case SYS_CNTPCT_EL0: + case SYS_CNTPCTSS_EL0: + ctxt = vcpu_ptimer(vcpu); + break; + default: + return false; + } + + val = arch_timer_read_cntpct_el0(); + + if (ctxt->offset.vm_offset) + val -= *kern_hyp_va(ctxt->offset.vm_offset); + + vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); + __kvm_skip_instr(vcpu); + return true; +} + static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) { if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && @@ -339,6 +372,9 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) return kvm_hyp_handle_ptrauth(vcpu, exit_code); + if (kvm_hyp_handle_cntpct(vcpu)) + return true; + return false; } -- cgit v1.2.3 From 476fcd4b7bb54ac959b683f30d0cf305c3e11f3c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:54 +0100 Subject: KVM: arm64: timers: Abstract the number of valid timers per vcpu We so far have a pretty fixed number of timers to take care of. This is about to change as NV brings another two into the picture, and we must be careful not to try and emulate non-valid timers in a given VM. For this, abstract the number of timers for a given vcpu behind an accessor, which helpfully returns a constant for now. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-15-maz@kernel.org --- arch/arm64/kvm/arch_timer.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 1d811735e05f..d3a7902269c1 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -52,6 +52,11 @@ static bool has_cntpoff(void) return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); } +static int nr_timers(struct kvm_vcpu *vcpu) +{ + return NR_KVM_TIMERS; +} + u32 timer_get_ctl(struct arch_timer_context *ctxt) { struct kvm_vcpu *vcpu = ctxt->vcpu; @@ -255,7 +260,7 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) u64 min_delta = ULLONG_MAX; int i; - for (i = 0; i < NR_KVM_TIMERS; i++) { + for (i = 0; i < nr_timers(vcpu); i++) { struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i]; WARN(ctx->loaded, "timer %d loaded\n", i); @@ -815,12 +820,12 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) * resets the timer to be disabled and unmasked and is compliant with * the ARMv7 architecture. */ - for (int i = 0; i < NR_KVM_TIMERS; i++) + for (int i = 0; i < nr_timers(vcpu); i++) timer_set_ctl(vcpu_get_timer(vcpu, i), 0); if (timer->enabled) { - for (int i = 0; i < NR_KVM_TIMERS; i++) + for (int i = 0; i < nr_timers(vcpu); i++) kvm_timer_update_irq(vcpu, false, vcpu_get_timer(vcpu, i)); @@ -1303,7 +1308,7 @@ static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) mutex_lock(&vcpu->kvm->arch.timer_data.lock); - for (int i = 0; i < NR_KVM_TIMERS; i++) { + for (int i = 0; i < nr_timers(vcpu); i++) { struct arch_timer_context *ctx; int irq; @@ -1319,7 +1324,7 @@ static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) ppis |= BIT(irq); } - valid = hweight32(ppis) == NR_KVM_TIMERS; + valid = hweight32(ppis) == nr_timers(vcpu); if (valid) set_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE, &vcpu->kvm->arch.flags); @@ -1336,7 +1341,7 @@ bool kvm_arch_timer_get_input_level(int vintid) if (WARN(!vcpu, "No vcpu context!\n")) return false; - for (int i = 0; i < NR_KVM_TIMERS; i++) { + for (int i = 0; i < nr_timers(vcpu); i++) { struct arch_timer_context *ctx; ctx = vcpu_get_timer(vcpu, i); -- cgit v1.2.3 From 1e0eec09d43a55125ff80e40b2d6e2f369a338b9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:56 +0100 Subject: KVM: arm64: nv: timers: Add a per-timer, per-vcpu offset Being able to set a global offset isn't enough. With NV, we also need to a per-vcpu, per-timer offset (for example, CNTVCT_EL0 being offset by CNTVOFF_EL2). Use a similar method as the VM-wide offset to have a timer point to the shadow register that contains the offset value. Reviewed-by: Colton Lewis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-17-maz@kernel.org --- arch/arm64/kvm/arch_timer.c | 13 ++++++++++--- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 ++ include/kvm/arm_arch_timer.h | 5 +++++ 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index d3a7902269c1..b87bf182af33 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -89,10 +89,17 @@ u64 timer_get_cval(struct arch_timer_context *ctxt) static u64 timer_get_offset(struct arch_timer_context *ctxt) { - if (ctxt && ctxt->offset.vm_offset) - return *ctxt->offset.vm_offset; + u64 offset = 0; - return 0; + if (!ctxt) + return 0; + + if (ctxt->offset.vm_offset) + offset += *ctxt->offset.vm_offset; + if (ctxt->offset.vcpu_offset) + offset += *ctxt->offset.vcpu_offset; + + return offset; } static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9954368f639d..d07cbc313889 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -353,6 +353,8 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) if (ctxt->offset.vm_offset) val -= *kern_hyp_va(ctxt->offset.vm_offset); + if (ctxt->offset.vcpu_offset) + val -= *kern_hyp_va(ctxt->offset.vcpu_offset); vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); __kvm_skip_instr(vcpu); diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index f093ea9f540d..209da0c2ac9f 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -29,6 +29,11 @@ struct arch_timer_offset { * structure. If NULL, assume a zero offset. */ u64 *vm_offset; + /* + * If set, pointer to one of the offsets in the vcpu's sysreg + * array. If NULL, assume a zero offset. + */ + u64 *vcpu_offset; }; struct arch_timer_vm_data { -- cgit v1.2.3 From 81dc9504a7006b484cfcf074796094ee526b0c45 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 30 Mar 2023 18:47:57 +0100 Subject: KVM: arm64: nv: timers: Support hyp timer emulation Emulating EL2 also means emulating the EL2 timers. To do so, we expand our timer framework to deal with at most 4 timers. At any given time, two timers are using the HW timers, and the two others are purely emulated. The role of deciding which is which at any given time is left to a mapping function which is called every time we need to make such a decision. Reviewed-by: Colton Lewis Co-developed-by: Christoffer Dall Signed-off-by: Christoffer Dall Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230330174800.2677007-18-maz@kernel.org --- arch/arm64/include/asm/kvm_host.h | 4 + arch/arm64/include/uapi/asm/kvm.h | 2 + arch/arm64/kvm/arch_timer.c | 180 ++++++++++++++++++++++++++++++-- arch/arm64/kvm/hyp/include/hyp/switch.h | 15 +++ arch/arm64/kvm/trace_arm.h | 6 +- arch/arm64/kvm/vgic/vgic.c | 15 +++ include/kvm/arm_arch_timer.h | 9 +- include/kvm/arm_vgic.h | 1 + 8 files changed, 220 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 1280154c9ef3..633a7c0750bb 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -369,6 +369,10 @@ enum vcpu_sysreg { TPIDR_EL2, /* EL2 Software Thread ID Register */ CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */ SP_EL2, /* EL2 Stack Pointer */ + CNTHP_CTL_EL2, + CNTHP_CVAL_EL2, + CNTHV_CTL_EL2, + CNTHV_CVAL_EL2, NR_SYS_REGS /* Nothing after this line! */ }; diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 12fb0d8a760a..0921f366c49f 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -420,6 +420,8 @@ enum { #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 +#define KVM_ARM_VCPU_TIMER_IRQ_HVTIMER 2 +#define KVM_ARM_VCPU_TIMER_IRQ_HPTIMER 3 #define KVM_ARM_VCPU_PVTIME_CTRL 2 #define KVM_ARM_VCPU_PVTIME_IPA 0 diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index b87bf182af33..c5c8cc3c25ae 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -33,6 +34,8 @@ static DEFINE_STATIC_KEY_FALSE(has_gic_active_state); static const u8 default_ppi[] = { [TIMER_PTIMER] = 30, [TIMER_VTIMER] = 27, + [TIMER_HPTIMER] = 26, + [TIMER_HVTIMER] = 28, }; static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); @@ -46,6 +49,11 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, struct arch_timer_context *timer, enum kvm_arch_timer_regs treg); +static bool kvm_arch_timer_get_input_level(int vintid); + +static struct irq_ops arch_timer_irq_ops = { + .get_input_level = kvm_arch_timer_get_input_level, +}; static bool has_cntpoff(void) { @@ -54,6 +62,9 @@ static bool has_cntpoff(void) static int nr_timers(struct kvm_vcpu *vcpu) { + if (!vcpu_has_nv(vcpu)) + return NR_KVM_EL0_TIMERS; + return NR_KVM_TIMERS; } @@ -66,6 +77,10 @@ u32 timer_get_ctl(struct arch_timer_context *ctxt) return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0); case TIMER_PTIMER: return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0); + case TIMER_HVTIMER: + return __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2); + case TIMER_HPTIMER: + return __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2); default: WARN_ON(1); return 0; @@ -81,6 +96,10 @@ u64 timer_get_cval(struct arch_timer_context *ctxt) return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0); case TIMER_PTIMER: return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0); + case TIMER_HVTIMER: + return __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2); + case TIMER_HPTIMER: + return __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2); default: WARN_ON(1); return 0; @@ -113,6 +132,12 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) case TIMER_PTIMER: __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl; break; + case TIMER_HVTIMER: + __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2) = ctl; + break; + case TIMER_HPTIMER: + __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2) = ctl; + break; default: WARN_ON(1); } @@ -129,6 +154,12 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) case TIMER_PTIMER: __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval; break; + case TIMER_HVTIMER: + __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2) = cval; + break; + case TIMER_HPTIMER: + __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = cval; + break; default: WARN_ON(1); } @@ -151,13 +182,27 @@ u64 kvm_phys_timer_read(void) static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map) { - if (has_vhe()) { + if (vcpu_has_nv(vcpu)) { + if (is_hyp_ctxt(vcpu)) { + map->direct_vtimer = vcpu_hvtimer(vcpu); + map->direct_ptimer = vcpu_hptimer(vcpu); + map->emul_vtimer = vcpu_vtimer(vcpu); + map->emul_ptimer = vcpu_ptimer(vcpu); + } else { + map->direct_vtimer = vcpu_vtimer(vcpu); + map->direct_ptimer = vcpu_ptimer(vcpu); + map->emul_vtimer = vcpu_hvtimer(vcpu); + map->emul_ptimer = vcpu_hptimer(vcpu); + } + } else if (has_vhe()) { map->direct_vtimer = vcpu_vtimer(vcpu); map->direct_ptimer = vcpu_ptimer(vcpu); + map->emul_vtimer = NULL; map->emul_ptimer = NULL; } else { map->direct_vtimer = vcpu_vtimer(vcpu); map->direct_ptimer = NULL; + map->emul_vtimer = NULL; map->emul_ptimer = vcpu_ptimer(vcpu); } @@ -252,8 +297,11 @@ static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu) static u64 wfit_delay_ns(struct kvm_vcpu *vcpu) { - struct arch_timer_context *ctx = vcpu_vtimer(vcpu); u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); + struct arch_timer_context *ctx; + + ctx = (vcpu_has_nv(vcpu) && is_hyp_ctxt(vcpu)) ? vcpu_hvtimer(vcpu) + : vcpu_vtimer(vcpu); return kvm_counter_compute_delta(ctx, val); } @@ -350,9 +398,11 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) switch (index) { case TIMER_VTIMER: + case TIMER_HVTIMER: cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL); break; case TIMER_PTIMER: + case TIMER_HPTIMER: cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL); break; case NR_KVM_TIMERS: @@ -468,6 +518,7 @@ static void timer_save_state(struct arch_timer_context *ctx) u64 cval; case TIMER_VTIMER: + case TIMER_HVTIMER: timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL)); timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL)); @@ -493,6 +544,7 @@ static void timer_save_state(struct arch_timer_context *ctx) set_cntvoff(0); break; case TIMER_PTIMER: + case TIMER_HPTIMER: timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL)); cval = read_sysreg_el0(SYS_CNTP_CVAL); @@ -536,6 +588,7 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu) */ if (!kvm_timer_irq_can_fire(map.direct_vtimer) && !kvm_timer_irq_can_fire(map.direct_ptimer) && + !kvm_timer_irq_can_fire(map.emul_vtimer) && !kvm_timer_irq_can_fire(map.emul_ptimer) && !vcpu_has_wfit_active(vcpu)) return; @@ -572,12 +625,14 @@ static void timer_restore_state(struct arch_timer_context *ctx) u64 cval, offset; case TIMER_VTIMER: + case TIMER_HVTIMER: set_cntvoff(timer_get_offset(ctx)); write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL); isb(); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL); break; case TIMER_PTIMER: + case TIMER_HPTIMER: cval = timer_get_cval(ctx); offset = timer_get_offset(ctx); set_cntpoff(offset); @@ -663,6 +718,57 @@ static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) (_clr) |= (_bit); \ } while (0) +static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu, + struct timer_map *map) +{ + int hw, ret; + + if (!irqchip_in_kernel(vcpu->kvm)) + return; + + /* + * We only ever unmap the vtimer irq on a VHE system that runs nested + * virtualization, in which case we have both a valid emul_vtimer, + * emul_ptimer, direct_vtimer, and direct_ptimer. + * + * Since this is called from kvm_timer_vcpu_load(), a change between + * vEL2 and vEL1/0 will have just happened, and the timer_map will + * represent this, and therefore we switch the emul/direct mappings + * below. + */ + hw = kvm_vgic_get_map(vcpu, timer_irq(map->direct_vtimer)); + if (hw < 0) { + kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_vtimer)); + kvm_vgic_unmap_phys_irq(vcpu, timer_irq(map->emul_ptimer)); + + ret = kvm_vgic_map_phys_irq(vcpu, + map->direct_vtimer->host_timer_irq, + timer_irq(map->direct_vtimer), + &arch_timer_irq_ops); + WARN_ON_ONCE(ret); + ret = kvm_vgic_map_phys_irq(vcpu, + map->direct_ptimer->host_timer_irq, + timer_irq(map->direct_ptimer), + &arch_timer_irq_ops); + WARN_ON_ONCE(ret); + + /* + * The virtual offset behaviour is "interresting", as it + * always applies when HCR_EL2.E2H==0, but only when + * accessed from EL1 when HCR_EL2.E2H==1. So make sure we + * track E2H when putting the HV timer in "direct" mode. + */ + if (map->direct_vtimer == vcpu_hvtimer(vcpu)) { + struct arch_timer_offset *offs = &map->direct_vtimer->offset; + + if (vcpu_el2_e2h_is_set(vcpu)) + offs->vcpu_offset = NULL; + else + offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2); + } + } +} + static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) { bool tpt, tpc; @@ -695,6 +801,22 @@ static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map) if (!has_cntpoff() && timer_get_offset(map->direct_ptimer)) tpt = tpc = true; + /* + * Apply the enable bits that the guest hypervisor has requested for + * its own guest. We can only add traps that wouldn't have been set + * above. + */ + if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); + + /* Use the VHE format for mental sanity */ + if (!vcpu_el2_e2h_is_set(vcpu)) + val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10; + + tpt |= !(val & (CNTHCTL_EL1PCEN << 10)); + tpc |= !(val & (CNTHCTL_EL1PCTEN << 10)); + } + /* * Now that we have collected our requirements, compute the * trap and enable bits. @@ -720,6 +842,9 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) get_timer_map(vcpu, &map); if (static_branch_likely(&has_gic_active_state)) { + if (vcpu_has_nv(vcpu)) + kvm_timer_vcpu_load_nested_switch(vcpu, &map); + kvm_timer_vcpu_load_gic(map.direct_vtimer); if (map.direct_ptimer) kvm_timer_vcpu_load_gic(map.direct_ptimer); @@ -732,6 +857,8 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) timer_restore_state(map.direct_vtimer); if (map.direct_ptimer) timer_restore_state(map.direct_ptimer); + if (map.emul_vtimer) + timer_emulate(map.emul_vtimer); if (map.emul_ptimer) timer_emulate(map.emul_ptimer); @@ -778,6 +905,8 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) * In any case, we re-schedule the hrtimer for the physical timer when * coming back to the VCPU thread in kvm_timer_vcpu_load(). */ + if (map.emul_vtimer) + soft_timer_cancel(&map.emul_vtimer->hrtimer); if (map.emul_ptimer) soft_timer_cancel(&map.emul_ptimer->hrtimer); @@ -830,6 +959,17 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) for (int i = 0; i < nr_timers(vcpu); i++) timer_set_ctl(vcpu_get_timer(vcpu, i), 0); + /* + * A vcpu running at EL2 is in charge of the offset applied to + * the virtual timer, so use the physical VM offset, and point + * the vcpu offset to CNTVOFF_EL2. + */ + if (vcpu_has_nv(vcpu)) { + struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset; + + offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2); + offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset; + } if (timer->enabled) { for (int i = 0; i < nr_timers(vcpu); i++) @@ -843,6 +983,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) } } + if (map.emul_vtimer) + soft_timer_cancel(&map.emul_vtimer->hrtimer); if (map.emul_ptimer) soft_timer_cancel(&map.emul_ptimer->hrtimer); @@ -866,9 +1008,11 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) switch (timerid) { case TIMER_PTIMER: + case TIMER_HPTIMER: ctxt->host_timer_irq = host_ptimer_irq; break; case TIMER_VTIMER: + case TIMER_HVTIMER: ctxt->host_timer_irq = host_vtimer_irq; break; } @@ -1020,6 +1164,10 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, val = kvm_phys_timer_read() - timer_get_offset(timer); break; + case TIMER_REG_VOFF: + val = *timer->offset.vcpu_offset; + break; + default: BUG(); } @@ -1038,7 +1186,7 @@ u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, get_timer_map(vcpu, &map); timer = vcpu_get_timer(vcpu, tmr); - if (timer == map.emul_ptimer) + if (timer == map.emul_vtimer || timer == map.emul_ptimer) return kvm_arm_timer_read(vcpu, timer, treg); preempt_disable(); @@ -1070,6 +1218,10 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, timer_set_cval(timer, val); break; + case TIMER_REG_VOFF: + *timer->offset.vcpu_offset = val; + break; + default: BUG(); } @@ -1085,7 +1237,7 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, get_timer_map(vcpu, &map); timer = vcpu_get_timer(vcpu, tmr); - if (timer == map.emul_ptimer) { + if (timer == map.emul_vtimer || timer == map.emul_ptimer) { soft_timer_cancel(&timer->hrtimer); kvm_arm_timer_write(vcpu, timer, treg, val); timer_emulate(timer); @@ -1165,10 +1317,6 @@ static const struct irq_domain_ops timer_domain_ops = { .free = timer_irq_domain_free, }; -static struct irq_ops arch_timer_irq_ops = { - .get_input_level = kvm_arch_timer_get_input_level, -}; - static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags) { *flags = irq_get_trigger_type(virq); @@ -1341,7 +1489,7 @@ static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) return valid; } -bool kvm_arch_timer_get_input_level(int vintid) +static bool kvm_arch_timer_get_input_level(int vintid) { struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); @@ -1444,6 +1592,12 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: idx = TIMER_PTIMER; break; + case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER: + idx = TIMER_HVTIMER; + break; + case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER: + idx = TIMER_HPTIMER; + break; default: ret = -ENXIO; goto out; @@ -1474,6 +1628,12 @@ int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: timer = vcpu_ptimer(vcpu); break; + case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER: + timer = vcpu_hvtimer(vcpu); + break; + case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER: + timer = vcpu_hptimer(vcpu); + break; default: return -ENXIO; } @@ -1487,6 +1647,8 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) switch (attr->attr) { case KVM_ARM_VCPU_TIMER_IRQ_VTIMER: case KVM_ARM_VCPU_TIMER_IRQ_PTIMER: + case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER: + case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER: return 0; } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index d07cbc313889..c41166f1a1dd 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -343,6 +343,21 @@ static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) switch (sysreg) { case SYS_CNTPCT_EL0: case SYS_CNTPCTSS_EL0: + if (vcpu_has_nv(vcpu)) { + if (is_hyp_ctxt(vcpu)) { + ctxt = vcpu_hptimer(vcpu); + break; + } + + /* Check for guest hypervisor trapping */ + val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); + if (!vcpu_el2_e2h_is_set(vcpu)) + val = (val & CNTHCTL_EL1PCTEN) << 10; + + if (!(val & (CNTHCTL_EL1PCTEN << 10))) + return false; + } + ctxt = vcpu_ptimer(vcpu); break; default: diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index f3e46a976125..6ce5c025218d 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -206,6 +206,7 @@ TRACE_EVENT(kvm_get_timer_map, __field( unsigned long, vcpu_id ) __field( int, direct_vtimer ) __field( int, direct_ptimer ) + __field( int, emul_vtimer ) __field( int, emul_ptimer ) ), @@ -214,14 +215,17 @@ TRACE_EVENT(kvm_get_timer_map, __entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer); __entry->direct_ptimer = (map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1; + __entry->emul_vtimer = + (map->emul_vtimer) ? arch_timer_ctx_index(map->emul_vtimer) : -1; __entry->emul_ptimer = (map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1; ), - TP_printk("VCPU: %ld, dv: %d, dp: %d, ep: %d", + TP_printk("VCPU: %ld, dv: %d, dp: %d, ev: %d, ep: %d", __entry->vcpu_id, __entry->direct_vtimer, __entry->direct_ptimer, + __entry->emul_vtimer, __entry->emul_ptimer) ); diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index d97e6080b421..ae491ef97188 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -573,6 +573,21 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) return 0; } +int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid) +{ + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); + unsigned long flags; + int ret = -1; + + raw_spin_lock_irqsave(&irq->irq_lock, flags); + if (irq->hw) + ret = irq->hwintid; + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + + vgic_put_irq(vcpu->kvm, irq); + return ret; +} + /** * kvm_vgic_set_owner - Set the owner of an interrupt for a VM * diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 209da0c2ac9f..52008f5cff06 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -13,6 +13,9 @@ enum kvm_arch_timers { TIMER_PTIMER, TIMER_VTIMER, + NR_KVM_EL0_TIMERS, + TIMER_HVTIMER = NR_KVM_EL0_TIMERS, + TIMER_HPTIMER, NR_KVM_TIMERS }; @@ -21,6 +24,7 @@ enum kvm_arch_timer_regs { TIMER_REG_CVAL, TIMER_REG_TVAL, TIMER_REG_CTL, + TIMER_REG_VOFF, }; struct arch_timer_offset { @@ -76,6 +80,7 @@ struct arch_timer_context { struct timer_map { struct arch_timer_context *direct_vtimer; struct arch_timer_context *direct_ptimer; + struct arch_timer_context *emul_vtimer; struct arch_timer_context *emul_ptimer; }; @@ -114,12 +119,12 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); void kvm_timer_init_vhe(void); -bool kvm_arch_timer_get_input_level(int vintid); - #define vcpu_timer(v) (&(v)->arch.timer_cpu) #define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)]) #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER]) #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER]) +#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER]) +#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER]) #define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers) diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index d3ad51fde9db..402b545959af 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -380,6 +380,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, u32 vintid, struct irq_ops *ops); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); +int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid); bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From c0d0ce9b5a851895f34fd401c9dddc70616711a4 Mon Sep 17 00:00:00 2001 From: Xinghui Li Date: Mon, 3 Apr 2023 17:52:00 +0800 Subject: KVM: SVM: Remove a duplicate definition of VMCB_AVIC_APIC_BAR_MASK VMCB_AVIC_APIC_BAR_MASK is defined twice with the same value in svm.h, which is meaningless. Delete the duplicate one. Fixes: 391503528257 ("KVM: x86: SVM: move avic definitions from AMD's spec to svm.h") Signed-off-by: Xinghui Li Reviewed-by: Like Xu Link: https://lore.kernel.org/r/20230403095200.1391782-1-korantwork@gmail.com [sean: tweak shortlog] Signed-off-by: Sean Christopherson --- arch/x86/include/asm/svm.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 25306a420b84..e7c7379d6ac7 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -287,7 +287,6 @@ static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID); #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF) -#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL struct vmcb_seg { -- cgit v1.2.3 From 41e07665f1a683064f4c69b0d652dff1baf5d689 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:09 -0700 Subject: KVM: x86/mmu: Add a helper function to check if an SPTE needs atomic write Move conditions in kvm_tdp_mmu_write_spte() to check if an SPTE should be written atomically or not to a separate function. This new function, kvm_tdp_mmu_spte_need_atomic_write(), will be used in future commits to optimize clearing bits in SPTEs. Signed-off-by: Vipin Sharma Reviewed-by: David Matlack Reviewed-by: Ben Gardon Link: https://lore.kernel.org/r/20230321220021.2119033-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_iter.h | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index f0af385c56e0..c11c5d00b2c1 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -29,23 +29,29 @@ static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte) WRITE_ONCE(*rcu_dereference(sptep), new_spte); } +/* + * SPTEs must be modified atomically if they are shadow-present, leaf + * SPTEs, and have volatile bits, i.e. has bits that can be set outside + * of mmu_lock. The Writable bit can be set by KVM's fast page fault + * handler, and Accessed and Dirty bits can be set by the CPU. + * + * Note, non-leaf SPTEs do have Accessed bits and those bits are + * technically volatile, but KVM doesn't consume the Accessed bit of + * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit. This + * logic needs to be reassessed if KVM were to use non-leaf Accessed + * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs. + */ +static inline bool kvm_tdp_mmu_spte_need_atomic_write(u64 old_spte, int level) +{ + return is_shadow_present_pte(old_spte) && + is_last_spte(old_spte, level) && + spte_has_volatile_bits(old_spte); +} + static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, u64 new_spte, int level) { - /* - * Atomically write the SPTE if it is a shadow-present, leaf SPTE with - * volatile bits, i.e. has bits that can be set outside of mmu_lock. - * The Writable bit can be set by KVM's fast page fault handler, and - * Accessed and Dirty bits can be set by the CPU. - * - * Note, non-leaf SPTEs do have Accessed bits and those bits are - * technically volatile, but KVM doesn't consume the Accessed bit of - * non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit. This - * logic needs to be reassessed if KVM were to use non-leaf Accessed - * bits, e.g. to skip stepping down into child SPTEs when aging SPTEs. - */ - if (is_shadow_present_pte(old_spte) && is_last_spte(old_spte, level) && - spte_has_volatile_bits(old_spte)) + if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte); __kvm_tdp_mmu_write_spte(sptep, new_spte); -- cgit v1.2.3 From 5982a5392663b30f57ee90b0372c19a7e9cb655a Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:10 -0700 Subject: KVM: x86/mmu: Use kvm_ad_enabled() to determine if TDP MMU SPTEs need wrprot Use the constant-after-module-load kvm_ad_enabled() to check if SPTEs in the TDP MMU need to be write-protected when clearing accessed/dirty status instead of manually checking every SPTE. The per-SPTE A/D enabling is specific to nested EPT MMUs, i.e. when KVM is using EPT A/D bits but L1 is not, and so cannot happen in the TDP MMU (which is non-nested only). Keep the original code as sanity checks buried under MMU_WARN_ON(). MMU_WARN_ON() is more or less useless at the moment, but there are plans to change that. Link: https://lore.kernel.org/all/Yz4Qi7cn7TWTWQjj@google.com Signed-off-by: Vipin Sharma [sean: split to separate patch, apply to dirty path, write changelog] Link: https://lore.kernel.org/r/20230321220021.2119033-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 7c25dbf32ecc..5a5642650c3e 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1621,7 +1621,10 @@ retry: if (!is_shadow_present_pte(iter.old_spte)) continue; - if (spte_ad_need_write_protect(iter.old_spte)) { + MMU_WARN_ON(kvm_ad_enabled() && + spte_ad_need_write_protect(iter.old_spte)); + + if (!kvm_ad_enabled()) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; else @@ -1685,13 +1688,16 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, if (!mask) break; + MMU_WARN_ON(kvm_ad_enabled() && + spte_ad_need_write_protect(iter.old_spte)); + if (iter.level > PG_LEVEL_4K || !(mask & (1UL << (iter.gfn - gfn)))) continue; mask &= ~(1UL << (iter.gfn - gfn)); - if (wrprot || spte_ad_need_write_protect(iter.old_spte)) { + if (wrprot || !kvm_ad_enabled()) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; else -- cgit v1.2.3 From 697c89bed94effde145d84e618142dd89b2b54af Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:11 -0700 Subject: KVM: x86/mmu: Consolidate Dirty vs. Writable clearing logic in TDP MMU Deduplicate the guts of the TDP MMU's clearing of dirty status by snapshotting whether to check+clear the Dirty bit vs. the Writable bit, which is the only difference between the two flavors of dirty tracking. Note, kvm_ad_enabled() is just a wrapper for shadow_accessed_mask, i.e. is constant after kvm-{intel,amd}.ko is loaded. Link: https://lore.kernel.org/all/Yz4Qi7cn7TWTWQjj@google.com Signed-off-by: Vipin Sharma [sean: split to separate patch, apply to dirty log, write changelog] Link: https://lore.kernel.org/r/20230321220021.2119033-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 35 +++++++++-------------------------- 1 file changed, 9 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 5a5642650c3e..b32c9ba05c89 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1607,8 +1607,8 @@ void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end) { + u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK; struct tdp_iter iter; - u64 new_spte; bool spte_set = false; rcu_read_lock(); @@ -1624,19 +1624,10 @@ retry: MMU_WARN_ON(kvm_ad_enabled() && spte_ad_need_write_protect(iter.old_spte)); - if (!kvm_ad_enabled()) { - if (is_writable_pte(iter.old_spte)) - new_spte = iter.old_spte & ~PT_WRITABLE_MASK; - else - continue; - } else { - if (iter.old_spte & shadow_dirty_mask) - new_spte = iter.old_spte & ~shadow_dirty_mask; - else - continue; - } + if (!(iter.old_spte & dbit)) + continue; - if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte)) + if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit)) goto retry; spte_set = true; @@ -1678,8 +1669,9 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, unsigned long mask, bool wrprot) { + u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK : + shadow_dirty_mask; struct tdp_iter iter; - u64 new_spte; rcu_read_lock(); @@ -1697,19 +1689,10 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, mask &= ~(1UL << (iter.gfn - gfn)); - if (wrprot || !kvm_ad_enabled()) { - if (is_writable_pte(iter.old_spte)) - new_spte = iter.old_spte & ~PT_WRITABLE_MASK; - else - continue; - } else { - if (iter.old_spte & shadow_dirty_mask) - new_spte = iter.old_spte & ~shadow_dirty_mask; - else - continue; - } + if (!(iter.old_spte & dbit)) + continue; - tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte); + tdp_mmu_set_spte_no_dirty_log(kvm, &iter, iter.old_spte & ~dbit); } rcu_read_unlock(); -- cgit v1.2.3 From 89c313f20c1ed30e04cedae735994c902ee93ddb Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:12 -0700 Subject: KVM: x86/mmu: Atomically clear SPTE dirty state in the clear-dirty-log flow Optimize the clearing of dirty state in TDP MMU SPTEs by doing an atomic-AND (on SPTEs that have volatile bits) instead of the full XCHG that currently ends up being invoked (see kvm_tdp_mmu_write_spte()). Clearing _only_ the bit in question will allow KVM to skip the many irrelevant checks in __handle_changed_spte() by avoiding any collateral damage due to the XCHG writing all SPTE bits, e.g. the XCHG could race with fast_page_fault() setting the W-bit and the CPU setting the D-bit, and thus incorrectly drop the CPU's D-bit update. Link: https://lore.kernel.org/all/Y9hXmz%2FnDOr1hQal@google.com Signed-off-by: Vipin Sharma Reviewed-by: David Matlack [sean: split the switch to atomic-AND to a separate patch] Link: https://lore.kernel.org/r/20230321220021.2119033-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_iter.h | 14 ++++++++++++++ arch/x86/kvm/mmu/tdp_mmu.c | 16 ++++++++-------- 2 files changed, 22 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index c11c5d00b2c1..fae559559a80 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -58,6 +58,20 @@ static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, return old_spte; } +static inline u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte, + u64 mask, int level) +{ + atomic64_t *sptep_atomic; + + if (kvm_tdp_mmu_spte_need_atomic_write(old_spte, level)) { + sptep_atomic = (atomic64_t *)rcu_dereference(sptep); + return (u64)atomic64_fetch_and(~mask, sptep_atomic); + } + + __kvm_tdp_mmu_write_spte(sptep, old_spte & ~mask); + return old_spte; +} + /* * A TDP iterator performs a pre-order walk over a TDP paging structure. */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index b32c9ba05c89..a70cc1dae18a 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -770,13 +770,6 @@ static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, _tdp_mmu_set_spte(kvm, iter, new_spte, false, true); } -static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm, - struct tdp_iter *iter, - u64 new_spte) -{ - _tdp_mmu_set_spte(kvm, iter, new_spte, true, false); -} - #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ for_each_tdp_pte(_iter, _root, _start, _end) @@ -1692,7 +1685,14 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, if (!(iter.old_spte & dbit)) continue; - tdp_mmu_set_spte_no_dirty_log(kvm, &iter, iter.old_spte & ~dbit); + iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep, + iter.old_spte, dbit, + iter.level); + + __handle_changed_spte(kvm, iter.as_id, iter.gfn, iter.old_spte, + iter.old_spte & ~dbit, iter.level, false); + handle_changed_spte_acc_track(iter.old_spte, iter.old_spte & ~dbit, + iter.level); } rcu_read_unlock(); -- cgit v1.2.3 From cf05e8c7325e341d7e1b1920fa580a130ec2c350 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:13 -0700 Subject: KVM: x86/mmu: Drop access tracking checks when clearing TDP MMU dirty bits Drop the unnecessary call to handle access-tracking changes when clearing the dirty status of TDP MMU SPTEs. Neither the Dirty bit nor the Writable bit has any impact on the accessed state of a page, i.e. clearing only the aforementioned bits doesn't make an accessed SPTE suddently not accessed. Signed-off-by: Vipin Sharma [sean: split to separate patch, write changelog] Link: https://lore.kernel.org/r/20230321220021.2119033-6-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index a70cc1dae18a..950c5d23ecee 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1691,8 +1691,6 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, __handle_changed_spte(kvm, iter.as_id, iter.gfn, iter.old_spte, iter.old_spte & ~dbit, iter.level, false); - handle_changed_spte_acc_track(iter.old_spte, iter.old_spte & ~dbit, - iter.level); } rcu_read_unlock(); -- cgit v1.2.3 From 1e0f42985ffad5c60e7a0a627405e1ff00208289 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:14 -0700 Subject: KVM: x86/mmu: Bypass __handle_changed_spte() when clearing TDP MMU dirty bits Drop everything except marking the PFN dirty and the relevant tracepoint parts of __handle_changed_spte() when clearing the dirty status of gfns in the TDP MMU. Clearing only the Dirty (or Writable) bit doesn't affect the SPTEs shadow-present status, whether or not the SPTE is a leaf, or change the SPTE's PFN. I.e. other than marking the PFN dirty, none of the functional updates handled by __handle_changed_spte() are relevant. Losing __handle_changed_spte()'s sanity checks does mean that a bug could theoretical go unnoticed, but that scenario is extremely unlikely, e.g. would effectively require a misconfigured or a locking bug elsewhere. Opportunistically remove a comment blurb from __handle_changed_spte() about all modifications to TDP MMU SPTEs needing to invoke said function, that "rule" hasn't been true since fast page fault support was added for the TDP MMU (and perhaps even before). Tested on a VM (160 vCPUs, 160 GB memory) and found that performance of clear dirty log stage improved by ~40% in dirty_log_perf_test (with the full optimization applied). Before optimization: -------------------- Iteration 1 clear dirty log time: 3.638543593s Iteration 2 clear dirty log time: 3.145032742s Iteration 3 clear dirty log time: 3.142340358s Clear dirty log over 3 iterations took 9.925916693s. (Avg 3.308638897s/iteration) After optimization: ------------------- Iteration 1 clear dirty log time: 2.318988110s Iteration 2 clear dirty log time: 1.794470164s Iteration 3 clear dirty log time: 1.791668628s Clear dirty log over 3 iterations took 5.905126902s. (Avg 1.968375634s/iteration) Link: https://lore.kernel.org/all/Y9hXmz%2FnDOr1hQal@google.com Signed-off-by: Vipin Sharma Reviewed-by: David Matlack [sean: split the switch to atomic-AND to a separate patch] Link: https://lore.kernel.org/r/20230321220021.2119033-7-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 950c5d23ecee..467931c43968 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -517,7 +517,6 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) * threads that might be modifying SPTEs. * * Handle bookkeeping that might result from the modification of a SPTE. - * This function must be called for all TDP SPTE modifications. */ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, u64 old_spte, u64 new_spte, int level, @@ -1689,8 +1688,10 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, iter.old_spte, dbit, iter.level); - __handle_changed_spte(kvm, iter.as_id, iter.gfn, iter.old_spte, - iter.old_spte & ~dbit, iter.level, false); + trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level, + iter.old_spte, + iter.old_spte & ~dbit); + kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte)); } rcu_read_unlock(); -- cgit v1.2.3 From e73008705d0c88ac69e3fe262bb03d86af23c521 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:15 -0700 Subject: KVM: x86/mmu: Remove "record_dirty_log" in __tdp_mmu_set_spte() Remove bool parameter "record_dirty_log" from __tdp_mmu_set_spte() and refactor the code as this variable is always set to true by its caller. Signed-off-by: Vipin Sharma Reviewed-by: David Matlack Link: https://lore.kernel.org/r/20230321220021.2119033-8-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 467931c43968..3cc81fa22b7f 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -708,18 +708,13 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, * notifier for access tracking. Leaving record_acc_track * unset in that case prevents page accesses from being * double counted. - * @record_dirty_log: Record the page as dirty in the dirty bitmap if - * appropriate for the change being made. Should be set - * unless performing certain dirty logging operations. - * Leaving record_dirty_log unset in that case prevents page - * writes from being double counted. * * Returns the old SPTE value, which _may_ be different than @old_spte if the * SPTE had voldatile bits. */ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, u64 old_spte, u64 new_spte, gfn_t gfn, int level, - bool record_acc_track, bool record_dirty_log) + bool record_acc_track) { lockdep_assert_held_write(&kvm->mmu_lock); @@ -738,35 +733,34 @@ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, if (record_acc_track) handle_changed_spte_acc_track(old_spte, new_spte, level); - if (record_dirty_log) - handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, - new_spte, level); + + handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, new_spte, + level); return old_spte; } static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, - u64 new_spte, bool record_acc_track, - bool record_dirty_log) + u64 new_spte, bool record_acc_track) { WARN_ON_ONCE(iter->yielded); iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte, new_spte, iter->gfn, iter->level, - record_acc_track, record_dirty_log); + record_acc_track); } static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte) { - _tdp_mmu_set_spte(kvm, iter, new_spte, true, true); + _tdp_mmu_set_spte(kvm, iter, new_spte, true); } static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, struct tdp_iter *iter, u64 new_spte) { - _tdp_mmu_set_spte(kvm, iter, new_spte, false, true); + _tdp_mmu_set_spte(kvm, iter, new_spte, false); } #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ @@ -916,7 +910,7 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) return false; __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, - sp->gfn, sp->role.level + 1, true, true); + sp->gfn, sp->role.level + 1, true); return true; } -- cgit v1.2.3 From 7ee131e3a3c38dc7939ce35861a71f4ee55d9b02 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:16 -0700 Subject: KVM: x86/mmu: Clear only A-bit (if enabled) when aging TDP MMU SPTEs Use tdp_mmu_clear_spte_bits() when clearing the Accessed bit in TDP MMU SPTEs so as to use an atomic-AND instead of XCHG to clear the A-bit. Similar to the D-bit story, this will allow KVM to bypass __handle_changed_spte() by ensuring only the A-bit is modified. Link: https://lore.kernel.org/all/Y9HcHRBShQgjxsQb@google.com Signed-off-by: Vipin Sharma Reviewed-by: David Matlack [sean: massage changelog] Link: https://lore.kernel.org/r/20230321220021.2119033-9-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 3cc81fa22b7f..adbdfed287cc 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -756,13 +756,6 @@ static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, _tdp_mmu_set_spte(kvm, iter, new_spte, true); } -static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm, - struct tdp_iter *iter, - u64 new_spte) -{ - _tdp_mmu_set_spte(kvm, iter, new_spte, false); -} - #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ for_each_tdp_pte(_iter, _root, _start, _end) @@ -1248,33 +1241,44 @@ static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, /* * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero * if any of the GFNs in the range have been accessed. + * + * No need to mark the corresponding PFN as accessed as this call is coming + * from the clear_young() or clear_flush_young() notifier, which uses the + * return value to determine if the page has been accessed. */ static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, struct kvm_gfn_range *range) { - u64 new_spte = 0; + u64 new_spte; /* If we have a non-accessed entry we don't need to change the pte. */ if (!is_accessed_spte(iter->old_spte)) return false; - new_spte = iter->old_spte; - - if (spte_ad_enabled(new_spte)) { - new_spte &= ~shadow_accessed_mask; + if (spte_ad_enabled(iter->old_spte)) { + iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep, + iter->old_spte, + shadow_accessed_mask, + iter->level); + new_spte = iter->old_spte & ~shadow_accessed_mask; } else { /* * Capture the dirty status of the page, so that it doesn't get * lost when the SPTE is marked for access tracking. */ - if (is_writable_pte(new_spte)) - kvm_set_pfn_dirty(spte_to_pfn(new_spte)); + if (is_writable_pte(iter->old_spte)) + kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte)); - new_spte = mark_spte_for_access_track(new_spte); + new_spte = mark_spte_for_access_track(iter->old_spte); + iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep, + iter->old_spte, new_spte, + iter->level); } - tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte); - + __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, + new_spte, iter->level, false); + handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, + iter->old_spte, new_spte, iter->level); return true; } -- cgit v1.2.3 From 6141df067d04599706994b616e1a4b4c19ae5d07 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:17 -0700 Subject: KVM: x86/mmu: Drop unnecessary dirty log checks when aging TDP MMU SPTEs Drop the unnecessary call to handle dirty log updates when aging TDP MMU SPTEs, as neither clearing the Accessed bit nor marking a SPTE for access tracking can _set_ the Writable bit, i.e. can't trigger marking a gfn dirty in its memslot. The access tracking path can _clear_ the Writable bit, e.g. if the XCHG races with fast_page_fault() and writes the stale value without the Writable bit set, but clearing the Writable bit outside of mmu_lock is not allowed, i.e. access tracking can't spuriously set the Writable bit. Signed-off-by: Vipin Sharma [sean: split to separate patch, apply to dirty path, write changelog] Link: https://lore.kernel.org/r/20230321220021.2119033-10-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index adbdfed287cc..29bb97ff266e 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1277,8 +1277,6 @@ static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, new_spte, iter->level, false); - handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn, - iter->old_spte, new_spte, iter->level); return true; } -- cgit v1.2.3 From 891f115960682b84440bdfb1ea72ab2824206db7 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:18 -0700 Subject: KVM: x86/mmu: Bypass __handle_changed_spte() when aging TDP MMU SPTEs Drop everything except the "tdp_mmu_spte_changed" tracepoint part of __handle_changed_spte() when aging SPTEs in the TDP MMU, as clearing the accessed status doesn't affect the SPTE's shadow-present status, whether or not the SPTE is a leaf, or change the PFN. I.e. none of the functional updates handled by __handle_changed_spte() are relevant. Losing __handle_changed_spte()'s sanity checks does mean that a bug could theoretical go unnoticed, but that scenario is extremely unlikely, e.g. would effectively require a misconfigured MMU or a locking bug elsewhere. Link: https://lore.kernel.org/all/Y9HcHRBShQgjxsQb@google.com Signed-off-by: Vipin Sharma Reviewed-by: David Matlack [sean: massage changelog] Link: https://lore.kernel.org/r/20230321220021.2119033-11-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 29bb97ff266e..cdfb67ef5800 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1275,8 +1275,8 @@ static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, iter->level); } - __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, - new_spte, iter->level, false); + trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level, + iter->old_spte, new_spte); return true; } -- cgit v1.2.3 From 0b7cc2547d5329b996b6687e04a82ed4eef1adda Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:19 -0700 Subject: KVM: x86/mmu: Remove "record_acc_track" in __tdp_mmu_set_spte() Remove bool parameter "record_acc_track" from __tdp_mmu_set_spte() and refactor the code. This variable is always set to true by its caller. Remove single and double underscore prefix from tdp_mmu_set_spte() related APIs: 1. Change __tdp_mmu_set_spte() to tdp_mmu_set_spte() 2. Change _tdp_mmu_set_spte() to tdp_mmu_iter_set_spte() Signed-off-by: Vipin Sharma Reviewed-by: David Matlack Link: https://lore.kernel.org/r/20230321220021.2119033-12-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 51 ++++++++++++++++------------------------------ 1 file changed, 17 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index cdfb67ef5800..9649e0fe4302 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -695,7 +695,7 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, /* - * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping + * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping * @kvm: KVM instance * @as_id: Address space ID, i.e. regular vs. SMM * @sptep: Pointer to the SPTE @@ -703,18 +703,12 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm, * @new_spte: The new value that will be set for the SPTE * @gfn: The base GFN that was (or will be) mapped by the SPTE * @level: The level _containing_ the SPTE (its parent PT's level) - * @record_acc_track: Notify the MM subsystem of changes to the accessed state - * of the page. Should be set unless handling an MMU - * notifier for access tracking. Leaving record_acc_track - * unset in that case prevents page accesses from being - * double counted. * * Returns the old SPTE value, which _may_ be different than @old_spte if the * SPTE had voldatile bits. */ -static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, - u64 old_spte, u64 new_spte, gfn_t gfn, int level, - bool record_acc_track) +static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, + u64 old_spte, u64 new_spte, gfn_t gfn, int level) { lockdep_assert_held_write(&kvm->mmu_lock); @@ -730,30 +724,19 @@ static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); - - if (record_acc_track) - handle_changed_spte_acc_track(old_spte, new_spte, level); - + handle_changed_spte_acc_track(old_spte, new_spte, level); handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, new_spte, level); return old_spte; } -static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, - u64 new_spte, bool record_acc_track) +static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter, + u64 new_spte) { WARN_ON_ONCE(iter->yielded); - - iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, - iter->old_spte, new_spte, - iter->gfn, iter->level, - record_acc_track); -} - -static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter, - u64 new_spte) -{ - _tdp_mmu_set_spte(kvm, iter, new_spte, true); + iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, + iter->old_spte, new_spte, + iter->gfn, iter->level); } #define tdp_root_for_each_pte(_iter, _root, _start, _end) \ @@ -845,7 +828,7 @@ retry: continue; if (!shared) - tdp_mmu_set_spte(kvm, &iter, 0); + tdp_mmu_iter_set_spte(kvm, &iter, 0); else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0)) goto retry; } @@ -902,8 +885,8 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte))) return false; - __tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, - sp->gfn, sp->role.level + 1, true); + tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0, + sp->gfn, sp->role.level + 1); return true; } @@ -937,7 +920,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, !is_last_spte(iter.old_spte, iter.level)) continue; - tdp_mmu_set_spte(kvm, &iter, 0); + tdp_mmu_iter_set_spte(kvm, &iter, 0); flush = true; } @@ -1107,7 +1090,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, if (ret) return ret; } else { - tdp_mmu_set_spte(kvm, iter, spte); + tdp_mmu_iter_set_spte(kvm, iter, spte); } tdp_account_mmu_page(kvm, sp); @@ -1314,13 +1297,13 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, * invariant that the PFN of a present * leaf SPTE can never change. * See __handle_changed_spte(). */ - tdp_mmu_set_spte(kvm, iter, 0); + tdp_mmu_iter_set_spte(kvm, iter, 0); if (!pte_write(range->pte)) { new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, pte_pfn(range->pte)); - tdp_mmu_set_spte(kvm, iter, new_spte); + tdp_mmu_iter_set_spte(kvm, iter, new_spte); } return true; @@ -1805,7 +1788,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, if (new_spte == iter.old_spte) break; - tdp_mmu_set_spte(kvm, &iter, new_spte); + tdp_mmu_iter_set_spte(kvm, &iter, new_spte); spte_set = true; } -- cgit v1.2.3 From 1f9973456e802199e863030ed09493419a02f6c7 Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:20 -0700 Subject: KVM: x86/mmu: Remove handle_changed_spte_dirty_log() Remove handle_changed_spte_dirty_log() as there is no code flow which sets 4KiB SPTE writable and hit this path. This function marks the page dirty in a memslot only if new SPTE is 4KiB in size and writable. Current users of handle_changed_spte_dirty_log() are: 1. set_spte_gfn() - Create only non writable SPTEs. 2. write_protect_gfn() - Change an SPTE to non writable. 3. zap leaf and roots APIs - Everything is 0. 4. handle_removed_pt() - Sets SPTEs to REMOVED_SPTE 5. tdp_mmu_link_sp() - Makes non leaf SPTEs. There is also no path which creates a writable 4KiB without going through make_spte() and this functions takes care of marking SPTE dirty in the memslot if it is PT_WRITABLE. Signed-off-by: Vipin Sharma Reviewed-by: David Matlack [sean: add blurb to __handle_changed_spte()'s comment] Link: https://lore.kernel.org/r/20230321220021.2119033-13-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 9649e0fe4302..e8ee49b6da5b 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -345,24 +345,6 @@ static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) kvm_set_pfn_accessed(spte_to_pfn(old_spte)); } -static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level) -{ - bool pfn_changed; - struct kvm_memory_slot *slot; - - if (level > PG_LEVEL_4K) - return; - - pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte); - - if ((!is_writable_pte(old_spte) || pfn_changed) && - is_writable_pte(new_spte)) { - slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); - mark_page_dirty_in_slot(kvm, slot, gfn); - } -} - static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { kvm_account_pgtable_pages((void *)sp->spt, +1); @@ -516,7 +498,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) * the MMU lock and the operation must synchronize with other * threads that might be modifying SPTEs. * - * Handle bookkeeping that might result from the modification of a SPTE. + * Handle bookkeeping that might result from the modification of a SPTE. Note, + * dirty logging updates are handled in common code, not here (see make_spte() + * and fast_pf_fix_direct_spte()). */ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, u64 old_spte, u64 new_spte, int level, @@ -613,8 +597,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, shared); handle_changed_spte_acc_track(old_spte, new_spte, level); - handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, - new_spte, level); } /* @@ -725,8 +707,6 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); handle_changed_spte_acc_track(old_spte, new_spte, level); - handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte, new_spte, - level); return old_spte; } -- cgit v1.2.3 From 40fa907e5a69edbae503a4937f8f487ed1ca92ea Mon Sep 17 00:00:00 2001 From: Vipin Sharma Date: Tue, 21 Mar 2023 15:00:21 -0700 Subject: KVM: x86/mmu: Merge all handle_changed_pte*() functions Merge __handle_changed_pte() and handle_changed_spte_acc_track() into a single function, handle_changed_pte(), as the two are always used together. Remove the existing handle_changed_pte(), as it's just a wrapper that calls __handle_changed_pte() and handle_changed_spte_acc_track(). Signed-off-by: Vipin Sharma Reviewed-by: Ben Gardon Reviewed-by: David Matlack [sean: massage changelog] Link: https://lore.kernel.org/r/20230321220021.2119033-14-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu/tdp_mmu.c | 42 ++++++++++++------------------------------ 1 file changed, 12 insertions(+), 30 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index e8ee49b6da5b..b2fca11b91ff 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -334,17 +334,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, u64 old_spte, u64 new_spte, int level, bool shared); -static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level) -{ - if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level)) - return; - - if (is_accessed_spte(old_spte) && - (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) || - spte_to_pfn(old_spte) != spte_to_pfn(new_spte))) - kvm_set_pfn_accessed(spte_to_pfn(old_spte)); -} - static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) { kvm_account_pgtable_pages((void *)sp->spt, +1); @@ -487,7 +476,7 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) } /** - * __handle_changed_spte - handle bookkeeping associated with an SPTE change + * handle_changed_spte - handle bookkeeping associated with an SPTE change * @kvm: kvm instance * @as_id: the address space of the paging structure the SPTE was a part of * @gfn: the base GFN that was mapped by the SPTE @@ -502,9 +491,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared) * dirty logging updates are handled in common code, not here (see make_spte() * and fast_pf_fix_direct_spte()). */ -static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level, - bool shared) +static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, + u64 old_spte, u64 new_spte, int level, + bool shared) { bool was_present = is_shadow_present_pte(old_spte); bool is_present = is_shadow_present_pte(new_spte); @@ -588,15 +577,10 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, if (was_present && !was_leaf && (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed))) handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared); -} -static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, - u64 old_spte, u64 new_spte, int level, - bool shared) -{ - __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, - shared); - handle_changed_spte_acc_track(old_spte, new_spte, level); + if (was_leaf && is_accessed_spte(old_spte) && + (!is_present || !is_accessed_spte(new_spte) || pfn_changed)) + kvm_set_pfn_accessed(spte_to_pfn(old_spte)); } /* @@ -639,9 +623,8 @@ static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm, if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte)) return -EBUSY; - __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, - new_spte, iter->level, true); - handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level); + handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte, + new_spte, iter->level, true); return 0; } @@ -705,8 +688,7 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level); - __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); - handle_changed_spte_acc_track(old_spte, new_spte, level); + handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false); return old_spte; } @@ -1275,7 +1257,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, * Note, when changing a read-only SPTE, it's not strictly necessary to * zero the SPTE before setting the new PFN, but doing so preserves the * invariant that the PFN of a present * leaf SPTE can never change. - * See __handle_changed_spte(). + * See handle_changed_spte(). */ tdp_mmu_iter_set_spte(kvm, iter, 0); @@ -1300,7 +1282,7 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) /* * No need to handle the remote TLB flush under RCU protection, the * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a - * shadow page. See the WARN on pfn_changed in __handle_changed_spte(). + * shadow page. See the WARN on pfn_changed in handle_changed_spte(). */ return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); } -- cgit v1.2.3 From e65733b5c59a1ea20324a03494364958bef3fc68 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:38 +0000 Subject: KVM: x86: Redefine 'longmode' as a flag for KVM_EXIT_HYPERCALL The 'longmode' field is a bit annoying as it blows an entire __u32 to represent a boolean value. Since other architectures are looking to add support for KVM_EXIT_HYPERCALL, now is probably a good time to clean it up. Redefine the field (and the remaining padding) as a set of flags. Preserve the existing ABI by using bit 0 to indicate if the guest was in long mode and requiring that the remaining 31 bits must be zero. Cc: Paolo Bonzini Acked-by: Sean Christopherson Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-2-oliver.upton@linux.dev --- Documentation/virt/kvm/api.rst | 3 +-- arch/x86/include/asm/kvm_host.h | 7 +++++++ arch/x86/include/uapi/asm/kvm.h | 3 +++ arch/x86/kvm/x86.c | 6 +++++- include/uapi/linux/kvm.h | 9 +++++++-- 5 files changed, 23 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 62de0768d6aa..9b01e3d0e757 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6218,8 +6218,7 @@ to the byte array. __u64 nr; __u64 args[6]; __u64 ret; - __u32 longmode; - __u32 pad; + __u64 flags; } hypercall; Unused. This was once used for 'hypercall to userspace'. To implement diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 808c292ad3f4..15bda40517ff 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2204,4 +2204,11 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \ KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) +/* + * KVM previously used a u32 field in kvm_run to indicate the hypercall was + * initiated from long mode. KVM now sets bit 0 to indicate long mode, but the + * remaining 31 lower bits must be 0 to preserve ABI. + */ +#define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1) + #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 7f467fe05d42..1a6a1f987949 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -559,4 +559,7 @@ struct kvm_pmu_event_filter { #define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */ #define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ +/* x86-specific KVM_EXIT_HYPERCALL flags. */ +#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0) + #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7713420abab0..27a1d5c1a018 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9803,7 +9803,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) vcpu->run->hypercall.args[0] = gpa; vcpu->run->hypercall.args[1] = npages; vcpu->run->hypercall.args[2] = attrs; - vcpu->run->hypercall.longmode = op_64_bit; + vcpu->run->hypercall.flags = 0; + if (op_64_bit) + vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE; + + WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ); vcpu->arch.complete_userspace_io = complete_hypercall_exit; return 0; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d77aef872a0a..dd42d7dfb86c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -341,8 +341,13 @@ struct kvm_run { __u64 nr; __u64 args[6]; __u64 ret; - __u32 longmode; - __u32 pad; + + union { +#ifndef __KERNEL__ + __u32 longmode; +#endif + __u64 flags; + }; } hypercall; /* KVM_EXIT_TPR_ACCESS */ struct { -- cgit v1.2.3 From de40bb8abb764f6866d82c4e2a43acdb22892cf4 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:39 +0000 Subject: KVM: arm64: Add a helper to check if a VM has ran once The test_bit(...) pattern is quite a lot of keystrokes. Replace existing callsites with a helper. No functional change intended. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-3-oliver.upton@linux.dev --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/hypercalls.c | 3 +-- arch/arm64/kvm/pmu-emul.c | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index bcd774d74f34..d091d1c9890b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1061,6 +1061,9 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); (system_supports_32bit_el0() && \ !static_branch_unlikely(&arm64_mismatched_32bit_el0)) +#define kvm_vm_has_ran_once(kvm) \ + (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags)) + int kvm_trng_call(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM extern phys_addr_t hyp_mem_base; diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 5da884e11337..a09a526a7d7c 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -379,8 +379,7 @@ static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val) mutex_lock(&kvm->lock); - if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) && - val != *fw_reg_bmap) { + if (kvm_vm_has_ran_once(kvm) && val != *fw_reg_bmap) { ret = -EBUSY; goto out; } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 24908400e190..a0fc569fdbca 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -880,7 +880,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) list_for_each_entry(entry, &arm_pmus, entry) { arm_pmu = entry->arm_pmu; if (arm_pmu->pmu.type == pmu_id) { - if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) || + if (kvm_vm_has_ran_once(kvm) || (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { ret = -EBUSY; break; @@ -963,7 +963,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) mutex_lock(&kvm->lock); - if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) { + if (kvm_vm_has_ran_once(kvm)) { mutex_unlock(&kvm->lock); return -EBUSY; } -- cgit v1.2.3 From e0fc6b21616dd917899ee4a2d4126b4a963c0871 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:40 +0000 Subject: KVM: arm64: Add vm fd device attribute accessors A subsequent change will allow userspace to convey a filter for hypercalls through a vm device attribute. Add the requisite boilerplate for vm attribute accessors. Reviewed-by: Suzuki K Poulose Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-4-oliver.upton@linux.dev --- arch/arm64/kvm/arm.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3bd732eaf087..b6e26c0e65e5 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1439,11 +1439,28 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, } } +static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->group) { + default: + return -ENXIO; + } +} + +static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->group) { + default: + return -ENXIO; + } +} + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; + struct kvm_device_attr attr; switch (ioctl) { case KVM_CREATE_IRQCHIP: { @@ -1479,6 +1496,18 @@ long kvm_arch_vm_ioctl(struct file *filp, return -EFAULT; return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags); } + case KVM_HAS_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + + return kvm_vm_has_attr(kvm, &attr); + } + case KVM_SET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + + return kvm_vm_set_attr(kvm, &attr); + } default: return -EINVAL; } -- cgit v1.2.3 From aac94968126beb9846c12a940f1302ece7849b4f Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:41 +0000 Subject: KVM: arm64: Rename SMC/HVC call handler to reflect reality KVM handles SMCCC calls from virtual EL2 that use the SMC instruction since commit bd36b1a9eb5a ("KVM: arm64: nv: Handle SMCs taken from virtual EL2"). Thus, the function name of the handler no longer reflects reality. Normalize the name on SMCCC, since that's the only hypercall interface KVM supports in the first place. No fuctional change intended. Reviewed-by: Suzuki K Poulose Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-5-oliver.upton@linux.dev --- arch/arm64/kvm/handle_exit.c | 4 ++-- arch/arm64/kvm/hypercalls.c | 2 +- include/kvm/arm_hypercalls.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index a798c0b4d717..5e4f9737cbd5 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -52,7 +52,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu) return 1; } - ret = kvm_hvc_call_handler(vcpu); + ret = kvm_smccc_call_handler(vcpu); if (ret < 0) { vcpu_set_reg(vcpu, 0, ~0UL); return 1; @@ -89,7 +89,7 @@ static int handle_smc(struct kvm_vcpu *vcpu) * at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than * being treated as UNDEFINED. */ - ret = kvm_hvc_call_handler(vcpu); + ret = kvm_smccc_call_handler(vcpu); if (ret < 0) vcpu_set_reg(vcpu, 0, ~0UL); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index a09a526a7d7c..5ead6c6afff0 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -121,7 +121,7 @@ static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id) } } -int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) +int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) { struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; u32 func_id = smccc_get_function(vcpu); diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h index 1188f116cf4e..8f4e33bc43e8 100644 --- a/include/kvm/arm_hypercalls.h +++ b/include/kvm/arm_hypercalls.h @@ -6,7 +6,7 @@ #include -int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); +int kvm_smccc_call_handler(struct kvm_vcpu *vcpu); static inline u32 smccc_get_function(struct kvm_vcpu *vcpu) { -- cgit v1.2.3 From c2d2e9b3d8ce9db825a5630d9d52d542f5138ae0 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:42 +0000 Subject: KVM: arm64: Start handling SMCs from EL1 Whelp, the architecture gods have spoken and confirmed that the function ID space is common between SMCs and HVCs. Not only that, the expectation is that hypervisors handle calls to both SMC and HVC conduits. KVM recently picked up support for SMCCCs in commit bd36b1a9eb5a ("KVM: arm64: nv: Handle SMCs taken from virtual EL2") but scoped it only to a nested hypervisor. Let's just open the floodgates and let EL1 access our SMCCC implementation with the SMC instruction as well. Reviewed-by: Suzuki K Poulose Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-6-oliver.upton@linux.dev --- arch/arm64/kvm/handle_exit.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 5e4f9737cbd5..68f95dcd41a1 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -72,13 +72,15 @@ static int handle_smc(struct kvm_vcpu *vcpu) * * We need to advance the PC after the trap, as it would * otherwise return to the same address... - * - * Only handle SMCs from the virtual EL2 with an immediate of zero and - * skip it otherwise. */ - if (!vcpu_is_el2(vcpu) || kvm_vcpu_hvc_get_imm(vcpu)) { + kvm_incr_pc(vcpu); + + /* + * SMCs with a nonzero immediate are reserved according to DEN0028E 2.9 + * "SMC and HVC immediate value". + */ + if (kvm_vcpu_hvc_get_imm(vcpu)) { vcpu_set_reg(vcpu, 0, ~0UL); - kvm_incr_pc(vcpu); return 1; } @@ -93,8 +95,6 @@ static int handle_smc(struct kvm_vcpu *vcpu) if (ret < 0) vcpu_set_reg(vcpu, 0, ~0UL); - kvm_incr_pc(vcpu); - return ret; } -- cgit v1.2.3 From a8308b3fc9494953c453480fb277e24f82f7d2b9 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:43 +0000 Subject: KVM: arm64: Refactor hvc filtering to support different actions KVM presently allows userspace to filter guest hypercalls with bitmaps expressed via pseudo-firmware registers. These bitmaps have a narrow scope and, of course, can only allow/deny a particular call. A subsequent change to KVM will introduce a generalized UAPI for filtering hypercalls, allowing functions to be forwarded to userspace. Refactor the existing hypercall filtering logic to make room for more than two actions. While at it, generalize the function names around SMCCC as it is the basis for the upcoming UAPI. No functional change intended. Reviewed-by: Suzuki K Poulose Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-7-oliver.upton@linux.dev --- arch/arm64/include/uapi/asm/kvm.h | 9 +++++++++ arch/arm64/kvm/hypercalls.c | 26 ++++++++++++++++++++++---- 2 files changed, 31 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f8129c624b07..f9672ef1159a 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -469,6 +469,15 @@ enum { /* run->fail_entry.hardware_entry_failure_reason codes. */ #define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0) +enum kvm_smccc_filter_action { + KVM_SMCCC_FILTER_HANDLE = 0, + KVM_SMCCC_FILTER_DENY, + +#ifdef __KERNEL__ + NR_SMCCC_FILTER_ACTIONS +#endif +}; + #endif #endif /* __ARM_KVM_H__ */ diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 5ead6c6afff0..0be974e2f1fc 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -65,7 +65,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) val[3] = lower_32_bits(cycles); } -static bool kvm_hvc_call_default_allowed(u32 func_id) +static bool kvm_smccc_default_allowed(u32 func_id) { switch (func_id) { /* @@ -93,7 +93,7 @@ static bool kvm_hvc_call_default_allowed(u32 func_id) } } -static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id) +static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) { struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; @@ -117,20 +117,38 @@ static bool kvm_hvc_call_allowed(struct kvm_vcpu *vcpu, u32 func_id) return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP, &smccc_feat->vendor_hyp_bmap); default: - return kvm_hvc_call_default_allowed(func_id); + return false; } } +static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id) +{ + if (kvm_smccc_test_fw_bmap(vcpu, func_id) || + kvm_smccc_default_allowed(func_id)) + return KVM_SMCCC_FILTER_HANDLE; + + return KVM_SMCCC_FILTER_DENY; +} + int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) { struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; u32 func_id = smccc_get_function(vcpu); u64 val[4] = {SMCCC_RET_NOT_SUPPORTED}; u32 feature; + u8 action; gpa_t gpa; - if (!kvm_hvc_call_allowed(vcpu, func_id)) + action = kvm_smccc_get_action(vcpu, func_id); + switch (action) { + case KVM_SMCCC_FILTER_HANDLE: + break; + case KVM_SMCCC_FILTER_DENY: + goto out; + default: + WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action); goto out; + } switch (func_id) { case ARM_SMCCC_VERSION_FUNC_ID: -- cgit v1.2.3 From fb88707dd39bd1d5ec4a058776de9ee99bcc7b72 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:44 +0000 Subject: KVM: arm64: Use a maple tree to represent the SMCCC filter Maple tree is an efficient B-tree implementation that is intended for storing non-overlapping intervals. Such a data structure is a good fit for the SMCCC filter as it is desirable to sparsely allocate the 32 bit function ID space. To that end, add a maple tree to kvm_arch and correctly init/teardown along with the VM. Wire in a test against the hypercall filter for HVCs which does nothing until the controls are exposed to userspace. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-8-oliver.upton@linux.dev --- arch/arm64/include/asm/kvm_host.h | 5 +++- arch/arm64/kvm/arm.c | 2 ++ arch/arm64/kvm/hypercalls.c | 57 +++++++++++++++++++++++++++++++++++++++ include/kvm/arm_hypercalls.h | 1 + 4 files changed, 64 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index d091d1c9890b..2682b3fd0881 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -221,7 +222,8 @@ struct kvm_arch { #define KVM_ARCH_FLAG_EL1_32BIT 4 /* PSCI SYSTEM_SUSPEND enabled for the guest */ #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5 - + /* SMCCC filter initialized for the VM */ +#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 6 unsigned long flags; /* @@ -242,6 +244,7 @@ struct kvm_arch { /* Hypercall features firmware registers' descriptor */ struct kvm_smccc_features smccc_feat; + struct maple_tree smccc_filter; /* * For an untrusted host VM, 'pkvm.handle' is used to lookup diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b6e26c0e65e5..1202ac03bee0 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -192,6 +192,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_destroy_vcpus(kvm); kvm_unshare_hyp(kvm, kvm + 1); + + kvm_arm_teardown_hypercalls(kvm); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 0be974e2f1fc..ba7cd84c6668 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -121,8 +121,58 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) } } +#define SMCCC_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID +#define SMCCC_ARCH_RANGE_END \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, ARM_SMCCC_FUNC_MASK) + +static void init_smccc_filter(struct kvm *kvm) +{ + int r; + + mt_init(&kvm->arch.smccc_filter); + + /* + * Prevent userspace from handling any SMCCC calls in the architecture + * range, avoiding the risk of misrepresenting Spectre mitigation status + * to the guest. + */ + r = mtree_insert_range(&kvm->arch.smccc_filter, + SMCCC_ARCH_RANGE_BEGIN, SMCCC_ARCH_RANGE_END, + xa_mk_value(KVM_SMCCC_FILTER_HANDLE), + GFP_KERNEL_ACCOUNT); + WARN_ON_ONCE(r); +} + +static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id) +{ + unsigned long idx = func_id; + void *val; + + if (!test_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags)) + return KVM_SMCCC_FILTER_HANDLE; + + /* + * But where's the error handling, you say? + * + * mt_find() returns NULL if no entry was found, which just so happens + * to match KVM_SMCCC_FILTER_HANDLE. + */ + val = mt_find(&kvm->arch.smccc_filter, &idx, idx); + return xa_to_value(val); +} + static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id) { + /* + * Intervening actions in the SMCCC filter take precedence over the + * pseudo-firmware register bitmaps. + */ + u8 action = kvm_smccc_filter_get_action(vcpu->kvm, func_id); + if (action != KVM_SMCCC_FILTER_HANDLE) + return action; + if (kvm_smccc_test_fw_bmap(vcpu, func_id) || kvm_smccc_default_allowed(func_id)) return KVM_SMCCC_FILTER_HANDLE; @@ -263,6 +313,13 @@ void kvm_arm_init_hypercalls(struct kvm *kvm) smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES; smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES; smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES; + + init_smccc_filter(kvm); +} + +void kvm_arm_teardown_hypercalls(struct kvm *kvm) +{ + mtree_destroy(&kvm->arch.smccc_filter); } int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h index 8f4e33bc43e8..fe6c31575b05 100644 --- a/include/kvm/arm_hypercalls.h +++ b/include/kvm/arm_hypercalls.h @@ -43,6 +43,7 @@ static inline void smccc_set_retval(struct kvm_vcpu *vcpu, struct kvm_one_reg; void kvm_arm_init_hypercalls(struct kvm *kvm); +void kvm_arm_teardown_hypercalls(struct kvm *kvm); int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -- cgit v1.2.3 From d824dff1919bbd523d4d5c860437d043c0ad121d Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:45 +0000 Subject: KVM: arm64: Add support for KVM_EXIT_HYPERCALL In anticipation of user hypercall filters, add the necessary plumbing to get SMCCC calls out to userspace. Even though the exit structure has space for KVM to pass register arguments, let's just avoid it altogether and let userspace poke at the registers via KVM_GET_ONE_REG. This deliberately stretches the definition of a 'hypercall' to cover SMCs from EL1 in addition to the HVCs we know and love. KVM doesn't support EL1 calls into secure services, but now we can paint that as a userspace problem and be done with it. Finally, we need a flag to let userspace know what conduit instruction was used (i.e. SMC vs. HVC). Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-9-oliver.upton@linux.dev --- Documentation/virt/kvm/api.rst | 18 ++++++++++++++++-- arch/arm64/include/uapi/asm/kvm.h | 4 ++++ arch/arm64/kvm/handle_exit.c | 4 +++- arch/arm64/kvm/hypercalls.c | 16 ++++++++++++++++ 4 files changed, 39 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 9b01e3d0e757..9497792c4ee5 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6221,11 +6221,25 @@ to the byte array. __u64 flags; } hypercall; -Unused. This was once used for 'hypercall to userspace'. To implement -such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO (all except s390). + +It is strongly recommended that userspace use ``KVM_EXIT_IO`` (x86) or +``KVM_EXIT_MMIO`` (all except s390) to implement functionality that +requires a guest to interact with host userpace. .. note:: KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO. +For arm64: +---------- + +``nr`` contains the function ID of the guest's SMCCC call. Userspace is +expected to use the ``KVM_GET_ONE_REG`` ioctl to retrieve the call +parameters from the vCPU's GPRs. + +Definition of ``flags``: + - ``KVM_HYPERCALL_EXIT_SMC``: Indicates that the guest used the SMC + conduit to initiate the SMCCC call. If this bit is 0 then the guest + used the HVC conduit for the SMCCC call. + :: /* KVM_EXIT_TPR_ACCESS */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f9672ef1159a..f86446c5a7e3 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -472,12 +472,16 @@ enum { enum kvm_smccc_filter_action { KVM_SMCCC_FILTER_HANDLE = 0, KVM_SMCCC_FILTER_DENY, + KVM_SMCCC_FILTER_FWD_TO_USER, #ifdef __KERNEL__ NR_SMCCC_FILTER_ACTIONS #endif }; +/* arm64-specific KVM_EXIT_HYPERCALL flags */ +#define KVM_HYPERCALL_EXIT_SMC (1U << 0) + #endif #endif /* __ARM_KVM_H__ */ diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 68f95dcd41a1..3f43e20c48b6 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -71,7 +71,9 @@ static int handle_smc(struct kvm_vcpu *vcpu) * Trap exception, not a Secure Monitor Call exception [...]" * * We need to advance the PC after the trap, as it would - * otherwise return to the same address... + * otherwise return to the same address. Furthermore, pre-incrementing + * the PC before potentially exiting to userspace maintains the same + * abstraction for both SMCs and HVCs. */ kvm_incr_pc(vcpu); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index ba7cd84c6668..2db53709bec1 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -180,6 +180,19 @@ static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id) return KVM_SMCCC_FILTER_DENY; } +static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id) +{ + u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); + struct kvm_run *run = vcpu->run; + + run->exit_reason = KVM_EXIT_HYPERCALL; + run->hypercall.nr = func_id; + run->hypercall.flags = 0; + + if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64) + run->hypercall.flags |= KVM_HYPERCALL_EXIT_SMC; +} + int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) { struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat; @@ -195,6 +208,9 @@ int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) break; case KVM_SMCCC_FILTER_DENY: goto out; + case KVM_SMCCC_FILTER_FWD_TO_USER: + kvm_prepare_hypercall_exit(vcpu, func_id); + return 0; default: WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action); goto out; -- cgit v1.2.3 From 821d935c87bc95253f82deec3cbb457ccf3de003 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:46 +0000 Subject: KVM: arm64: Introduce support for userspace SMCCC filtering As the SMCCC (and related specifications) march towards an 'everything and the kitchen sink' interface for interacting with a system it becomes less likely that KVM will support every related feature. We could do better by letting userspace have a crack at it instead. Allow userspace to define an 'SMCCC filter' that applies to both HVCs and SMCs initiated by the guest. Supporting both conduits with this interface is important for a couple of reasons. Guest SMC usage is table stakes for a nested guest, as HVCs are always taken to the virtual EL2. Additionally, guests may want to interact with a service on the secure side which can now be proxied by userspace. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-10-oliver.upton@linux.dev --- Documentation/virt/kvm/api.rst | 4 ++ Documentation/virt/kvm/devices/vm.rst | 79 +++++++++++++++++++++++++++++++++++ arch/arm64/include/uapi/asm/kvm.h | 11 +++++ arch/arm64/kvm/arm.c | 4 ++ arch/arm64/kvm/hypercalls.c | 60 ++++++++++++++++++++++++++ include/kvm/arm_hypercalls.h | 3 ++ 6 files changed, 161 insertions(+) (limited to 'arch') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 9497792c4ee5..c8ab2f730945 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6231,6 +6231,10 @@ requires a guest to interact with host userpace. For arm64: ---------- +SMCCC exits can be enabled depending on the configuration of the SMCCC +filter. See the Documentation/virt/kvm/devices/vm.rst +``KVM_ARM_SMCCC_FILTER`` for more details. + ``nr`` contains the function ID of the guest's SMCCC call. Userspace is expected to use the ``KVM_GET_ONE_REG`` ioctl to retrieve the call parameters from the vCPU's GPRs. diff --git a/Documentation/virt/kvm/devices/vm.rst b/Documentation/virt/kvm/devices/vm.rst index 147efec626e5..9d726e60ec47 100644 --- a/Documentation/virt/kvm/devices/vm.rst +++ b/Documentation/virt/kvm/devices/vm.rst @@ -321,3 +321,82 @@ Allows userspace to query the status of migration mode. if it is enabled :Returns: -EFAULT if the given address is not accessible from kernel space; 0 in case of success. + +6. GROUP: KVM_ARM_VM_SMCCC_CTRL +=============================== + +:Architectures: arm64 + +6.1. ATTRIBUTE: KVM_ARM_VM_SMCCC_FILTER (w/o) +--------------------------------------------- + +:Parameters: Pointer to a ``struct kvm_smccc_filter`` + +:Returns: + + ====== =========================================== + EEXIST Range intersects with a previously inserted + or reserved range + EBUSY A vCPU in the VM has already run + EINVAL Invalid filter configuration + ENOMEM Failed to allocate memory for the in-kernel + representation of the SMCCC filter + ====== =========================================== + +Requests the installation of an SMCCC call filter described as follows:: + + enum kvm_smccc_filter_action { + KVM_SMCCC_FILTER_HANDLE = 0, + KVM_SMCCC_FILTER_DENY, + KVM_SMCCC_FILTER_FWD_TO_USER, + }; + + struct kvm_smccc_filter { + __u32 base; + __u32 nr_functions; + __u8 action; + __u8 pad[15]; + }; + +The filter is defined as a set of non-overlapping ranges. Each +range defines an action to be applied to SMCCC calls within the range. +Userspace can insert multiple ranges into the filter by using +successive calls to this attribute. + +The default configuration of KVM is such that all implemented SMCCC +calls are allowed. Thus, the SMCCC filter can be defined sparsely +by userspace, only describing ranges that modify the default behavior. + +The range expressed by ``struct kvm_smccc_filter`` is +[``base``, ``base + nr_functions``). The range is not allowed to wrap, +i.e. userspace cannot rely on ``base + nr_functions`` overflowing. + +The SMCCC filter applies to both SMC and HVC calls initiated by the +guest. The SMCCC filter gates the in-kernel emulation of SMCCC calls +and as such takes effect before other interfaces that interact with +SMCCC calls (e.g. hypercall bitmap registers). + +Actions: + + - ``KVM_SMCCC_FILTER_HANDLE``: Allows the guest SMCCC call to be + handled in-kernel. It is strongly recommended that userspace *not* + explicitly describe the allowed SMCCC call ranges. + + - ``KVM_SMCCC_FILTER_DENY``: Rejects the guest SMCCC call in-kernel + and returns to the guest. + + - ``KVM_SMCCC_FILTER_FWD_TO_USER``: The guest SMCCC call is forwarded + to userspace with an exit reason of ``KVM_EXIT_HYPERCALL``. + +The ``pad`` field is reserved for future use and must be zero. KVM may +return ``-EINVAL`` if the field is nonzero. + +KVM reserves the 'Arm Architecture Calls' range of function IDs and +will reject attempts to define a filter for any portion of these ranges: + + =========== =============== + Start End (inclusive) + =========== =============== + 0x8000_0000 0x8000_FFFF + 0xC000_0000 0xC000_FFFF + =========== =============== diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f86446c5a7e3..3dcfa4bfdf83 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -372,6 +372,10 @@ enum { #endif }; +/* Device Control API on vm fd */ +#define KVM_ARM_VM_SMCCC_CTRL 0 +#define KVM_ARM_VM_SMCCC_FILTER 0 + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 @@ -479,6 +483,13 @@ enum kvm_smccc_filter_action { #endif }; +struct kvm_smccc_filter { + __u32 base; + __u32 nr_functions; + __u8 action; + __u8 pad[15]; +}; + /* arm64-specific KVM_EXIT_HYPERCALL flags */ #define KVM_HYPERCALL_EXIT_SMC (1U << 0) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 1202ac03bee0..efee032c9560 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1444,6 +1444,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) { switch (attr->group) { + case KVM_ARM_VM_SMCCC_CTRL: + return kvm_vm_smccc_has_attr(kvm, attr); default: return -ENXIO; } @@ -1452,6 +1454,8 @@ static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) { switch (attr->group) { + case KVM_ARM_VM_SMCCC_CTRL: + return kvm_vm_smccc_set_attr(kvm, attr); default: return -ENXIO; } diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 2db53709bec1..9a35d6d18193 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -145,6 +145,44 @@ static void init_smccc_filter(struct kvm *kvm) WARN_ON_ONCE(r); } +static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr) +{ + const void *zero_page = page_to_virt(ZERO_PAGE(0)); + struct kvm_smccc_filter filter; + u32 start, end; + int r; + + if (copy_from_user(&filter, uaddr, sizeof(filter))) + return -EFAULT; + + if (memcmp(filter.pad, zero_page, sizeof(filter.pad))) + return -EINVAL; + + start = filter.base; + end = start + filter.nr_functions - 1; + + if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS) + return -EINVAL; + + mutex_lock(&kvm->lock); + + if (kvm_vm_has_ran_once(kvm)) { + r = -EBUSY; + goto out_unlock; + } + + r = mtree_insert_range(&kvm->arch.smccc_filter, start, end, + xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT); + if (r) + goto out_unlock; + + set_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags); + +out_unlock: + mutex_unlock(&kvm->lock); + return r; +} + static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id) { unsigned long idx = func_id; @@ -569,3 +607,25 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) return -EINVAL; } + +int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_ARM_VM_SMCCC_FILTER: + return 0; + default: + return -ENXIO; + } +} + +int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + void __user *uaddr = (void __user *)attr->addr; + + switch (attr->attr) { + case KVM_ARM_VM_SMCCC_FILTER: + return kvm_smccc_set_filter(kvm, uaddr); + default: + return -ENXIO; + } +} diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h index fe6c31575b05..2df152207ccd 100644 --- a/include/kvm/arm_hypercalls.h +++ b/include/kvm/arm_hypercalls.h @@ -49,4 +49,7 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr); +int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr); + #endif -- cgit v1.2.3 From 7e484d2785e2a2e526a6b2679d3e4c1402ffe0ec Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:47 +0000 Subject: KVM: arm64: Return NOT_SUPPORTED to guest for unknown PSCI version A subsequent change to KVM will allow negative returns from SMCCC handlers to exit to userspace. Make way for this change by explicitly returning SMCCC_RET_NOT_SUPPORTED to the guest if the VM is configured to use an unknown PSCI version. Add a WARN since this is undoubtedly a KVM bug. Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-11-oliver.upton@linux.dev --- arch/arm64/kvm/psci.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 7fbc4c1b9df0..aff54b106c30 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -435,6 +435,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) int kvm_psci_call(struct kvm_vcpu *vcpu) { u32 psci_fn = smccc_get_function(vcpu); + int version = kvm_psci_version(vcpu); unsigned long val; val = kvm_psci_check_allowed_function(vcpu, psci_fn); @@ -443,7 +444,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) return 1; } - switch (kvm_psci_version(vcpu)) { + switch (version) { case KVM_ARM_PSCI_1_1: return kvm_psci_1_x_call(vcpu, 1); case KVM_ARM_PSCI_1_0: @@ -453,6 +454,8 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) case KVM_ARM_PSCI_0_1: return kvm_psci_0_1_call(vcpu); default: - return -EINVAL; + WARN_ONCE(1, "Unknown PSCI version %d", version); + smccc_set_retval(vcpu, SMCCC_RET_NOT_SUPPORTED, 0, 0, 0); + return 1; } } -- cgit v1.2.3 From 37c8e494794786aa8e4acba1f0f5b45f37b11699 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 4 Apr 2023 15:40:48 +0000 Subject: KVM: arm64: Let errors from SMCCC emulation to reach userspace Typically a negative return from an exit handler is used to request a return to userspace with the specified error. KVM's handling of SMCCC emulation (i.e. both HVCs and SMCs) deviates from the trend and resumes the guest instead. Stop handling negative returns this way and instead let the error percolate to userspace. Suggested-by: Suzuki K Poulose Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230404154050.2270077-12-oliver.upton@linux.dev --- arch/arm64/kvm/handle_exit.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 3f43e20c48b6..6dcd6604b6bc 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -36,8 +36,6 @@ static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr) static int handle_hvc(struct kvm_vcpu *vcpu) { - int ret; - trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; @@ -52,19 +50,11 @@ static int handle_hvc(struct kvm_vcpu *vcpu) return 1; } - ret = kvm_smccc_call_handler(vcpu); - if (ret < 0) { - vcpu_set_reg(vcpu, 0, ~0UL); - return 1; - } - - return ret; + return kvm_smccc_call_handler(vcpu); } static int handle_smc(struct kvm_vcpu *vcpu) { - int ret; - /* * "If an SMC instruction executed at Non-secure EL1 is * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a @@ -93,11 +83,7 @@ static int handle_smc(struct kvm_vcpu *vcpu) * at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than * being treated as UNDEFINED. */ - ret = kvm_smccc_call_handler(vcpu); - if (ret < 0) - vcpu_set_reg(vcpu, 0, ~0UL); - - return ret; + return kvm_smccc_call_handler(vcpu); } /* -- cgit v1.2.3 From 0e5c9a9d6548e9b178d4696c696ae4a21c39ae58 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 5 Apr 2023 12:48:58 +0100 Subject: KVM: arm64: Expose SMC/HVC width to userspace When returning to userspace to handle a SMCCC call, we consistently set PC to point to the instruction immediately after the HVC/SMC. However, should userspace need to know the exact address of the trapping instruction, it needs to know about the *size* of that instruction. For AArch64, this is pretty easy. For AArch32, this is a bit more funky, as Thumb has 16bit encodings for both HVC and SMC. Expose this to userspace with a new flag that directly derives from ESR_EL2.IL. Also update the documentation to reflect the PC state at the point of exit. Finally, this fixes a small buglet where the hypercall.{args,ret} fields would not be cleared on exit, and could contain some random junk. Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/86pm8iv8tj.wl-maz@kernel.org --- Documentation/virt/kvm/api.rst | 8 ++++++++ arch/arm64/include/uapi/asm/kvm.h | 3 ++- arch/arm64/kvm/hypercalls.c | 16 +++++++++++----- 3 files changed, 21 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index c8ab2f730945..103f945959ed 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6244,6 +6244,14 @@ Definition of ``flags``: conduit to initiate the SMCCC call. If this bit is 0 then the guest used the HVC conduit for the SMCCC call. + - ``KVM_HYPERCALL_EXIT_16BIT``: Indicates that the guest used a 16bit + instruction to initiate the SMCCC call. If this bit is 0 then the + guest used a 32bit instruction. An AArch64 guest always has this + bit set to 0. + +At the point of exit, PC points to the instruction immediately following +the trapping instruction. + :: /* KVM_EXIT_TPR_ACCESS */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 3dcfa4bfdf83..b1c1edf85480 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -491,7 +491,8 @@ struct kvm_smccc_filter { }; /* arm64-specific KVM_EXIT_HYPERCALL flags */ -#define KVM_HYPERCALL_EXIT_SMC (1U << 0) +#define KVM_HYPERCALL_EXIT_SMC (1U << 0) +#define KVM_HYPERCALL_EXIT_16BIT (1U << 1) #endif diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 9a35d6d18193..3b6523f25afc 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -222,13 +222,19 @@ static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id) { u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); struct kvm_run *run = vcpu->run; - - run->exit_reason = KVM_EXIT_HYPERCALL; - run->hypercall.nr = func_id; - run->hypercall.flags = 0; + u64 flags = 0; if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64) - run->hypercall.flags |= KVM_HYPERCALL_EXIT_SMC; + flags |= KVM_HYPERCALL_EXIT_SMC; + + if (!kvm_vcpu_trap_il_is32bit(vcpu)) + flags |= KVM_HYPERCALL_EXIT_16BIT; + + run->exit_reason = KVM_EXIT_HYPERCALL; + run->hypercall = (typeof(run->hypercall)) { + .nr = func_id, + .flags = flags, + }; } int kvm_smccc_call_handler(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 548bd27428b924cb725ab7c1418c0203e61f9ed7 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Tue, 4 Apr 2023 11:25:02 +0800 Subject: KVM: VMX: Use is_64_bit_mode() to check 64-bit mode in SGX handler sgx_get_encls_gva() uses is_long_mode() to check 64-bit mode, however, SGX system leaf instructions are valid in compatibility mode, should use is_64_bit_mode() instead. Fixes: 70210c044b4e ("KVM: VMX: Add SGX ENCLS[ECREATE] handler to enforce CPUID restrictions") Signed-off-by: Binbin Wu Reviewed-by: Kai Huang Link: https://lore.kernel.org/r/20230404032502.27798-1-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/sgx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index aa53c98034bf..0574030b071f 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -29,14 +29,14 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, /* Skip vmcs.GUEST_DS retrieval for 64-bit mode to avoid VMREADs. */ *gva = offset; - if (!is_long_mode(vcpu)) { + if (!is_64_bit_mode(vcpu)) { vmx_get_segment(vcpu, &s, VCPU_SREG_DS); *gva += s.base; } if (!IS_ALIGNED(*gva, alignment)) { fault = true; - } else if (likely(is_long_mode(vcpu))) { + } else if (likely(is_64_bit_mode(vcpu))) { fault = is_noncanonical_address(*gva, vcpu); } else { *gva &= 0xffffffff; -- cgit v1.2.3 From 7e768ce8278bafe43e2a4771a82b61856190a3fc Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 4 Apr 2023 15:17:59 +0800 Subject: KVM: x86/pmu: Zero out pmu->all_valid_pmc_idx each time it's refreshed The kvm_pmu_refresh() may be called repeatedly (e.g. configure guest CPUID repeatedly or update MSR_IA32_PERF_CAPABILITIES) and each call will use the last pmu->all_valid_pmc_idx value, with the residual bits introducing additional overhead later in the vPMU emulation. Fixes: b35e5548b411 ("KVM: x86/vPMU: Add lazy mechanism to release perf_event per vPMC") Suggested-by: Sean Christopherson Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230404071759.75376-1-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 612e6c70ce2e..29492c2a0c82 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -589,6 +589,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) */ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) { + bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX); static_call(kvm_x86_pmu_refresh)(vcpu); } -- cgit v1.2.3 From 944a8dad8b4eb0b565cc36600ee1339c21278d76 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 3 Apr 2023 21:01:41 -0400 Subject: KVM: x86: set "mitigate_smt_rsb" storage-class-specifier to static smatch reports arch/x86/kvm/x86.c:199:20: warning: symbol 'mitigate_smt_rsb' was not declared. Should it be static? This variable is only used in one file so it should be static. Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20230404010141.1913667-1-trix@redhat.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2e0933f510ee..1b74da8682a0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -194,7 +194,7 @@ bool __read_mostly eager_page_split = true; module_param(eager_page_split, bool, 0644); /* Enable/disable SMT_RSB bug mitigation */ -bool __read_mostly mitigate_smt_rsb; +static bool __read_mostly mitigate_smt_rsb; module_param(mitigate_smt_rsb, bool, 0444); /* -- cgit v1.2.3 From 098f4c061ea10b777033b71c10bd9fd706820ee9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Sat, 28 Jan 2023 00:14:27 +0000 Subject: KVM: x86/pmu: Disallow legacy LBRs if architectural LBRs are available Disallow enabling LBR support if the CPU supports architectural LBRs. Traditional LBR support is absent on CPU models that have architectural LBRs, and KVM doesn't yet support arch LBRs, i.e. KVM will pass through non-existent MSRs if userspace enables LBRs for the guest. Cc: stable@vger.kernel.org Cc: Yang Weijiang Cc: Like Xu Reported-by: Paolo Bonzini Fixes: be635e34c284 ("KVM: vmx/pmu: Expose LBR_FMT in the MSR_IA32_PERF_CAPABILITIES") Tested-by: Like Xu Link: https://lore.kernel.org/r/20230128001427.2548858-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d7bf14abdba1..c18f74899f01 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7793,9 +7793,11 @@ static u64 vmx_get_perf_capabilities(void) if (boot_cpu_has(X86_FEATURE_PDCM)) rdmsrl(MSR_IA32_PERF_CAPABILITIES, host_perf_cap); - x86_perf_get_lbr(&lbr); - if (lbr.nr) - perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; + if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR)) { + x86_perf_get_lbr(&lbr); + if (lbr.nr) + perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT; + } if (vmx_pebs_supported()) { perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK; -- cgit v1.2.3 From 52887af5650e35ea78b37602722313ff7b3c0c30 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 Mar 2023 18:14:35 -0700 Subject: KVM: x86: Revert MSR_IA32_FLUSH_CMD.FLUSH_L1D enabling Revert the recently added virtualizing of MSR_IA32_FLUSH_CMD, as both the VMX and SVM are fatally buggy to guests that use MSR_IA32_FLUSH_CMD or MSR_IA32_PRED_CMD, and because the entire foundation of the logic is flawed. The most immediate problem is an inverted check on @cmd that results in rejecting legal values. SVM doubles down on bugs and drops the error, i.e. silently breaks all guest mitigations based on the command MSRs. The next issue is that neither VMX nor SVM was updated to mark MSR_IA32_FLUSH_CMD as being a possible passthrough MSR, which isn't hugely problematic, but does break MSR filtering and triggers a WARN on VMX designed to catch this exact bug. The foundational issues stem from the MSR_IA32_FLUSH_CMD code reusing logic from MSR_IA32_PRED_CMD, which in turn was likely copied from KVM's support for MSR_IA32_SPEC_CTRL. The copy+paste from MSR_IA32_SPEC_CTRL was misguided as MSR_IA32_PRED_CMD (and MSR_IA32_FLUSH_CMD) is a write-only MSR, i.e. doesn't need the same "deferred passthrough" shenanigans as MSR_IA32_SPEC_CTRL. Revert all MSR_IA32_FLUSH_CMD enabling in one fell swoop so that there is no point where KVM advertises, but does not support, L1D_FLUSH. This reverts commits 45cf86f26148e549c5ba4a8ab32a390e4bde216e, 723d5fb0ffe4c02bd4edf47ea02c02e454719f28, and a807b78ad04b2eaa348f52f5cc7702385b6de1ee. Reported-by: Nathan Chancellor Link: https://lkml.kernel.org/r/20230317190432.GA863767%40dev-arch.thelio-3990X Cc: Emanuele Giuseppe Esposito Cc: Pawan Gupta Cc: Jim Mattson Signed-off-by: Sean Christopherson Tested-by: Mathias Krause Message-Id: <20230322011440.2195485-2-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/svm/svm.c | 43 +++++++++--------------------- arch/x86/kvm/vmx/nested.c | 3 --- arch/x86/kvm/vmx/vmx.c | 68 +++++++++++++++++------------------------------ 4 files changed, 39 insertions(+), 77 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 9583a110cf5f..599aebec2d52 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -653,7 +653,7 @@ void kvm_set_cpu_caps(void) F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) | - F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D) + F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) ); /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 70183d2271b5..252e7f37e4e2 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2869,28 +2869,6 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) return 0; } -static int svm_set_msr_ia32_cmd(struct kvm_vcpu *vcpu, struct msr_data *msr, - bool guest_has_feat, u64 cmd, - int x86_feature_bit) -{ - struct vcpu_svm *svm = to_svm(vcpu); - - if (!msr->host_initiated && !guest_has_feat) - return 1; - - if (!(msr->data & ~cmd)) - return 1; - if (!boot_cpu_has(x86_feature_bit)) - return 1; - if (!msr->data) - return 0; - - wrmsrl(msr->index, cmd); - set_msr_interception(vcpu, svm->msrpm, msr->index, 0, 1); - - return 0; -} - static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); @@ -2965,14 +2943,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); break; case MSR_IA32_PRED_CMD: - r = svm_set_msr_ia32_cmd(vcpu, msr, - guest_has_pred_cmd_msr(vcpu), - PRED_CMD_IBPB, X86_FEATURE_IBPB); - break; - case MSR_IA32_FLUSH_CMD: - r = svm_set_msr_ia32_cmd(vcpu, msr, - guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D), - L1D_FLUSH, X86_FEATURE_FLUSH_L1D); + if (!msr->host_initiated && + !guest_has_pred_cmd_msr(vcpu)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + if (!boot_cpu_has(X86_FEATURE_IBPB)) + return 1; + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); break; case MSR_AMD64_VIRT_SPEC_CTRL: if (!msr->host_initiated && diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index f63b28f46a71..1bc2b80273c9 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -654,9 +654,6 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, MSR_IA32_PRED_CMD, MSR_TYPE_W); - nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, - MSR_IA32_FLUSH_CMD, MSR_TYPE_W); - kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); vmx->nested.force_msr_bitmap_recalc = false; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d7bf14abdba1..f777509ecf17 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2133,39 +2133,6 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated return debugctl; } -static int vmx_set_msr_ia32_cmd(struct kvm_vcpu *vcpu, - struct msr_data *msr_info, - bool guest_has_feat, u64 cmd, - int x86_feature_bit) -{ - if (!msr_info->host_initiated && !guest_has_feat) - return 1; - - if (!(msr_info->data & ~cmd)) - return 1; - if (!boot_cpu_has(x86_feature_bit)) - return 1; - if (!msr_info->data) - return 0; - - wrmsrl(msr_info->index, cmd); - - /* - * For non-nested: - * When it's written (to non-zero) for the first time, pass - * it through. - * - * For nested: - * The handling of the MSR bitmap for L2 guests is done in - * nested_vmx_prepare_msr_bitmap. We should not touch the - * vmcs02.msr_bitmap here since it gets completely overwritten - * in the merging. - */ - vmx_disable_intercept_for_msr(vcpu, msr_info->index, MSR_TYPE_W); - - return 0; -} - /* * Writes msr value into the appropriate "register". * Returns 0 on success, non-0 otherwise. @@ -2319,16 +2286,31 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; goto find_uret_msr; case MSR_IA32_PRED_CMD: - ret = vmx_set_msr_ia32_cmd(vcpu, msr_info, - guest_has_pred_cmd_msr(vcpu), - PRED_CMD_IBPB, - X86_FEATURE_IBPB); - break; - case MSR_IA32_FLUSH_CMD: - ret = vmx_set_msr_ia32_cmd(vcpu, msr_info, - guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D), - L1D_FLUSH, - X86_FEATURE_FLUSH_L1D); + if (!msr_info->host_initiated && + !guest_has_pred_cmd_msr(vcpu)) + return 1; + + if (data & ~PRED_CMD_IBPB) + return 1; + if (!boot_cpu_has(X86_FEATURE_IBPB)) + return 1; + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + + /* + * For non-nested: + * When it's written (to non-zero) for the first time, pass + * it through. + * + * For nested: + * The handling of the MSR bitmap for L2 guests is done in + * nested_vmx_prepare_msr_bitmap. We should not touch the + * vmcs02.msr_bitmap here since it gets completely overwritten + * in the merging. + */ + vmx_disable_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W); break; case MSR_IA32_CR_PAT: if (!kvm_pat_valid(data)) -- cgit v1.2.3 From 9a4c4850137e929d49958fb62d4275bd2f5b8c7a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 Mar 2023 18:14:36 -0700 Subject: KVM: VMX: Passthrough MSR_IA32_PRED_CMD based purely on host+guest CPUID Passthrough MSR_IA32_PRED_CMD based purely on whether or not the MSR is supported and enabled, i.e. don't wait until the first write. There's no benefit to deferred passthrough, and the extra logic only adds complexity. Signed-off-by: Sean Christopherson Reviewed-by: Xiaoyao Li Message-Id: <20230322011440.2195485-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f777509ecf17..5c01c76c0d45 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2298,19 +2298,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) break; wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); - - /* - * For non-nested: - * When it's written (to non-zero) for the first time, pass - * it through. - * - * For nested: - * The handling of the MSR bitmap for L2 guests is done in - * nested_vmx_prepare_msr_bitmap. We should not touch the - * vmcs02.msr_bitmap here since it gets completely overwritten - * in the merging. - */ - vmx_disable_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W); break; case MSR_IA32_CR_PAT: if (!kvm_pat_valid(data)) @@ -7743,6 +7730,9 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R, !guest_cpuid_has(vcpu, X86_FEATURE_XFD)); + if (boot_cpu_has(X86_FEATURE_IBPB)) + vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W, + !guest_has_pred_cmd_msr(vcpu)); set_cr4_guest_host_mask(vmx); -- cgit v1.2.3 From bff903e8cd743cddb42ef1690edd33130f12c40a Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 Mar 2023 18:14:37 -0700 Subject: KVM: SVM: Passthrough MSR_IA32_PRED_CMD based purely on host+guest CPUID Passthrough MSR_IA32_PRED_CMD based purely on whether or not the MSR is supported and enabled, i.e. don't wait until the first write. There's no benefit to deferred passthrough, and the extra logic only adds complexity. Signed-off-by: Sean Christopherson Message-Id: <20230322011440.2195485-4-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 252e7f37e4e2..f757b436ffae 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2955,7 +2955,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); - set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); break; case MSR_AMD64_VIRT_SPEC_CTRL: if (!msr->host_initiated && @@ -4151,6 +4150,10 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) svm_recalc_instruction_intercepts(vcpu, svm); + if (boot_cpu_has(X86_FEATURE_IBPB)) + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, + !!guest_has_pred_cmd_msr(vcpu)); + /* For sev guests, the memory encryption bit is not reserved in CR3. */ if (sev_guest(vcpu->kvm)) { best = kvm_find_cpuid_entry(vcpu, 0x8000001F); -- cgit v1.2.3 From 903358c7eddb9a8a83013f967909805e6af050d4 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 Mar 2023 18:14:38 -0700 Subject: KVM: x86: Move MSR_IA32_PRED_CMD WRMSR emulation to common code Dedup the handling of MSR_IA32_PRED_CMD across VMX and SVM by moving the logic to kvm_set_msr_common(). Now that the MSR interception toggling is handled as part of setting guest CPUID, the VMX and SVM paths are identical. Opportunistically massage the code to make it a wee bit denser. Signed-off-by: Sean Christopherson Reviewed-by: Xiaoyao Li Message-Id: <20230322011440.2195485-5-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 14 -------------- arch/x86/kvm/vmx/vmx.c | 14 -------------- arch/x86/kvm/x86.c | 11 +++++++++++ 3 files changed, 11 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f757b436ffae..85bb535fc321 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2942,20 +2942,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) */ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); break; - case MSR_IA32_PRED_CMD: - if (!msr->host_initiated && - !guest_has_pred_cmd_msr(vcpu)) - return 1; - - if (data & ~PRED_CMD_IBPB) - return 1; - if (!boot_cpu_has(X86_FEATURE_IBPB)) - return 1; - if (!data) - break; - - wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); - break; case MSR_AMD64_VIRT_SPEC_CTRL: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 5c01c76c0d45..29807be219b9 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2285,20 +2285,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR)) return 1; goto find_uret_msr; - case MSR_IA32_PRED_CMD: - if (!msr_info->host_initiated && - !guest_has_pred_cmd_msr(vcpu)) - return 1; - - if (data & ~PRED_CMD_IBPB) - return 1; - if (!boot_cpu_has(X86_FEATURE_IBPB)) - return 1; - if (!data) - break; - - wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); - break; case MSR_IA32_CR_PAT: if (!kvm_pat_valid(data)) return 1; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 237c483b1230..c83ec88da043 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3617,6 +3617,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vcpu->arch.perf_capabilities = data; kvm_pmu_refresh(vcpu); return 0; + case MSR_IA32_PRED_CMD: + if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu)) + return 1; + + if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB)) + return 1; + if (!data) + break; + + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); + break; case MSR_EFER: return set_efer(vcpu, msr_info); case MSR_K7_HWCR: -- cgit v1.2.3 From da3db168fb671f15e393b227f5c312c698ecb6ea Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 Mar 2023 18:14:39 -0700 Subject: KVM: x86: Virtualize FLUSH_L1D and passthrough MSR_IA32_FLUSH_CMD Virtualize FLUSH_L1D so that the guest can use the performant L1D flush if one of the many mitigations might require a flush in the guest, e.g. Linux provides an option to flush the L1D when switching mms. Passthrough MSR_IA32_FLUSH_CMD for write when it's supported in hardware and exposed to the guest, i.e. always let the guest write it directly if FLUSH_L1D is fully supported. Forward writes to hardware in host context on the off chance that KVM ends up emulating a WRMSR, or in the really unlikely scenario where userspace wants to force a flush. Restrict these forwarded WRMSRs to the known command out of an abundance of caution. Passing through the MSR means the guest can throw any and all values at hardware, but doing so in host context is arguably a bit more dangerous. Link: https://lkml.kernel.org/r/CALMp9eTt3xzAEoQ038bJQ9LN0ZOXrSWsN7xnNUD%2B0SS%3DWwF7Pg%40mail.gmail.com Link: https://lore.kernel.org/all/20230201132905.549148-2-eesposit@redhat.com Signed-off-by: Sean Christopherson Message-Id: <20230322011440.2195485-6-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/svm/svm.c | 5 +++++ arch/x86/kvm/vmx/nested.c | 3 +++ arch/x86/kvm/vmx/vmx.c | 5 +++++ arch/x86/kvm/vmx/vmx.h | 2 +- arch/x86/kvm/x86.c | 12 ++++++++++++ 6 files changed, 27 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 599aebec2d52..9583a110cf5f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -653,7 +653,7 @@ void kvm_set_cpu_caps(void) F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) | F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) | - F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) + F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D) ); /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 85bb535fc321..b32edaf5a74b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -95,6 +95,7 @@ static const struct svm_direct_access_msrs { #endif { .index = MSR_IA32_SPEC_CTRL, .always = false }, { .index = MSR_IA32_PRED_CMD, .always = false }, + { .index = MSR_IA32_FLUSH_CMD, .always = false }, { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, @@ -4140,6 +4141,10 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, !!guest_has_pred_cmd_msr(vcpu)); + if (boot_cpu_has(X86_FEATURE_FLUSH_L1D)) + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0, + !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D)); + /* For sev guests, the memory encryption bit is not reserved in CR3. */ if (sev_guest(vcpu->kvm)) { best = kvm_find_cpuid_entry(vcpu, 0x8000001F); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 1bc2b80273c9..f63b28f46a71 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -654,6 +654,9 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, MSR_IA32_PRED_CMD, MSR_TYPE_W); + nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, + MSR_IA32_FLUSH_CMD, MSR_TYPE_W); + kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); vmx->nested.force_msr_bitmap_recalc = false; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 29807be219b9..56e0c7ae961d 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -164,6 +164,7 @@ module_param(allow_smaller_maxphyaddr, bool, S_IRUGO); static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { MSR_IA32_SPEC_CTRL, MSR_IA32_PRED_CMD, + MSR_IA32_FLUSH_CMD, MSR_IA32_TSC, #ifdef CONFIG_X86_64 MSR_FS_BASE, @@ -7720,6 +7721,10 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmx_set_intercept_for_msr(vcpu, MSR_IA32_PRED_CMD, MSR_TYPE_W, !guest_has_pred_cmd_msr(vcpu)); + if (boot_cpu_has(X86_FEATURE_FLUSH_L1D)) + vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W, + !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D)); + set_cr4_guest_host_mask(vmx); vmx_write_encls_bitmap(vcpu, NULL); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 2acdc54bc34b..cb766f65a3eb 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -369,7 +369,7 @@ struct vcpu_vmx { struct lbr_desc lbr_desc; /* Save desired MSR intercept (read: pass-through) state */ -#define MAX_POSSIBLE_PASSTHROUGH_MSRS 15 +#define MAX_POSSIBLE_PASSTHROUGH_MSRS 16 struct { DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS); DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c83ec88da043..3c58dbae7b4c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3628,6 +3628,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); break; + case MSR_IA32_FLUSH_CMD: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D)) + return 1; + + if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH)) + return 1; + if (!data) + break; + + wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH); + break; case MSR_EFER: return set_efer(vcpu, msr_info); case MSR_K7_HWCR: -- cgit v1.2.3 From 400d2132288edbd6d500f45eab5d85526ca94e46 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 Mar 2023 18:14:40 -0700 Subject: KVM: SVM: Return the local "r" variable from svm_set_msr() Rename "r" to "ret" and actually return it from svm_set_msr() to reduce the probability of repeating the mistake of commit 723d5fb0ffe4 ("kvm: svm: Add IA32_FLUSH_CMD guest support"), which set "r" thinking that it would be propagated to the caller. Alternatively, the declaration of "r" could be moved into the handling of MSR_TSC_AUX, but that risks variable shadowing in the future. A wrapper for kvm_set_user_return_msr() would allow eliding a local variable, but that feels like delaying the inevitable. No functional change intended. Signed-off-by: Sean Christopherson Message-Id: <20230322011440.2195485-7-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index b32edaf5a74b..57f241c5a371 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2873,7 +2873,7 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct vcpu_svm *svm = to_svm(vcpu); - int r; + int ret = 0; u32 ecx = msr->index; u64 data = msr->data; @@ -2995,10 +2995,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) * guest via direct_access_msrs, and switch it via user return. */ preempt_disable(); - r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); + ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); preempt_enable(); - if (r) - return 1; + if (ret) + break; svm->tsc_aux = data; break; @@ -3056,7 +3056,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) default: return kvm_set_msr_common(vcpu, msr); } - return 0; + return ret; } static int msr_interception(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From b1932c5c19ddcbe9140b0583a0931b620c21ca02 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:45:58 -0800 Subject: KVM: x86: Rename kvm_init_msr_list() to clarify it inits multiple lists Rename kvm_init_msr_list() to kvm_init_msr_lists() to clarify that it initializes multiple lists: MSRs to save, emulated MSRs, and feature MSRs. No functional change intended. Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 237c483b1230..087497aebded 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7084,7 +7084,7 @@ static void kvm_probe_msr_to_save(u32 msr_index) msrs_to_save[num_msrs_to_save++] = msr_index; } -static void kvm_init_msr_list(void) +static void kvm_init_msr_lists(void) { unsigned i; @@ -9452,7 +9452,7 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) kvm_caps.max_guest_tsc_khz = max; } kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits; - kvm_init_msr_list(); + kvm_init_msr_lists(); return 0; out_unwind_ops: -- cgit v1.2.3 From fb3146b4dc3bc6d0c0402a75f21d628eccf9bf8c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:45:59 -0800 Subject: KVM: x86: Add a helper to query whether or not a vCPU has ever run Add a helper to query if a vCPU has run so that KVM doesn't have to open code the check on last_vmentry_cpu being set to a magic value. No functional change intended. Suggested-by: Xiaoyao Li Cc: Like Xu Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/x86.h | 5 +++++ 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 9583a110cf5f..b736ddb42088 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -420,7 +420,7 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, * KVM_SET_CPUID{,2} again. To support this legacy behavior, check * whether the supplied CPUID data is equal to what's already set. */ - if (vcpu->arch.last_vmentry_cpu != -1) { + if (kvm_vcpu_has_run(vcpu)) { r = kvm_cpuid_check_equal(vcpu, e2, nent); if (r) return r; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 144c5a01cd77..8ced48797d59 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5393,7 +5393,7 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu) * Changing guest CPUID after KVM_RUN is forbidden, see the comment in * kvm_arch_vcpu_ioctl(). */ - KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm); + KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm); } void kvm_mmu_reset_context(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index a8167b47b8c8..754190af1791 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -83,6 +83,11 @@ static inline unsigned int __shrink_ple_window(unsigned int val, void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu); int kvm_check_nested_events(struct kvm_vcpu *vcpu); +static inline bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.last_vmentry_cpu != -1; +} + static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu) { return vcpu->arch.exception.pending || -- cgit v1.2.3 From 5757f5b9562244e4b63f65576c16ca9eb21f81d2 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:00 -0800 Subject: KVM: x86: Add macros to track first...last VMX feature MSRs Add macros to track the range of VMX feature MSRs that are emulated by KVM to reduce the maintenance cost of extending the set of emulated MSRs. Note, KVM doesn't necessarily emulate all known/consumed VMX MSRs, e.g. PROCBASED_CTLS3 is consumed by KVM to enable IPI virtualization, but is not emulated as KVM doesn't emulate/virtualize IPI virtualization for nested guests. No functional change intended. Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-4-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 8 ++++---- arch/x86/kvm/x86.h | 8 ++++++++ 3 files changed, 13 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 70183d2271b5..7584eb85410b 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4124,7 +4124,7 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) { switch (index) { case MSR_IA32_MCG_EXT_CTL: - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: return false; case MSR_IA32_SMBASE: if (!IS_ENABLED(CONFIG_KVM_SMM)) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c18f74899f01..e89340dfa322 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1945,7 +1945,7 @@ static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, static int vmx_get_msr_feature(struct kvm_msr_entry *msr) { switch (msr->index) { - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!nested) return 1; return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); @@ -2030,7 +2030,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; break; - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!nested_vmx_allowed(vcpu)) return 1; if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, @@ -2384,7 +2384,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmx->msr_ia32_sgxlepubkeyhash [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; break; - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: if (!msr_info->host_initiated) return 1; /* they are read-only */ if (!nested_vmx_allowed(vcpu)) @@ -6974,7 +6974,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) * real mode. */ return enable_unrestricted_guest || emulate_invalid_guest_state; - case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: + case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR: return nested; case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_TSC_RATIO: diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 754190af1791..4bc483d082ee 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -40,6 +40,14 @@ void kvm_spurious_fault(void); failed; \ }) +/* + * The first...last VMX feature MSRs that are emulated by KVM. This may or may + * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an + * associated feature that KVM supports for nested virtualization. + */ +#define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC +#define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC + #define KVM_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_DEFAULT_PLE_WINDOW_GROW 2 -- cgit v1.2.3 From 9eb6ba31db27253a11441368d2801c1eedc48b4f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:01 -0800 Subject: KVM: x86: Generate set of VMX feature MSRs using first/last definitions Add VMX MSRs to the runtime list of feature MSRs by iterating over the range of emulated MSRs instead of manually defining each MSR in the "all" list. Using the range definition reduces the cost of emulating a new VMX MSR, e.g. prevents forgetting to add an MSR to the list. Extracting the VMX MSRs from the "all" list, which is a compile-time constant, also shrinks the list to the point where the compiler can heavily optimize code that iterates over the list. No functional change intended. Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-5-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 53 ++++++++++++++++++++++------------------------------- 1 file changed, 22 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 087497aebded..6b667c651951 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1543,36 +1543,19 @@ static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; static unsigned num_emulated_msrs; /* - * List of msr numbers which are used to expose MSR-based features that - * can be used by a hypervisor to validate requested CPU features. + * List of MSRs that control the existence of MSR-based features, i.e. MSRs + * that are effectively CPUID leafs. VMX MSRs are also included in the set of + * feature MSRs, but are handled separately to allow expedited lookups. */ -static const u32 msr_based_features_all[] = { - MSR_IA32_VMX_BASIC, - MSR_IA32_VMX_TRUE_PINBASED_CTLS, - MSR_IA32_VMX_PINBASED_CTLS, - MSR_IA32_VMX_TRUE_PROCBASED_CTLS, - MSR_IA32_VMX_PROCBASED_CTLS, - MSR_IA32_VMX_TRUE_EXIT_CTLS, - MSR_IA32_VMX_EXIT_CTLS, - MSR_IA32_VMX_TRUE_ENTRY_CTLS, - MSR_IA32_VMX_ENTRY_CTLS, - MSR_IA32_VMX_MISC, - MSR_IA32_VMX_CR0_FIXED0, - MSR_IA32_VMX_CR0_FIXED1, - MSR_IA32_VMX_CR4_FIXED0, - MSR_IA32_VMX_CR4_FIXED1, - MSR_IA32_VMX_VMCS_ENUM, - MSR_IA32_VMX_PROCBASED_CTLS2, - MSR_IA32_VMX_EPT_VPID_CAP, - MSR_IA32_VMX_VMFUNC, - +static const u32 msr_based_features_all_except_vmx[] = { MSR_AMD64_DE_CFG, MSR_IA32_UCODE_REV, MSR_IA32_ARCH_CAPABILITIES, MSR_IA32_PERF_CAPABILITIES, }; -static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)]; +static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) + + (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)]; static unsigned int num_msr_based_features; /* @@ -7009,6 +6992,18 @@ out: return r; } +static void kvm_probe_feature_msr(u32 msr_index) +{ + struct kvm_msr_entry msr = { + .index = msr_index, + }; + + if (kvm_get_msr_feature(&msr)) + return; + + msr_based_features[num_msr_based_features++] = msr_index; +} + static void kvm_probe_msr_to_save(u32 msr_index) { u32 dummy[2]; @@ -7110,15 +7105,11 @@ static void kvm_init_msr_lists(void) emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i]; } - for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) { - struct kvm_msr_entry msr; + for (i = KVM_FIRST_EMULATED_VMX_MSR; i <= KVM_LAST_EMULATED_VMX_MSR; i++) + kvm_probe_feature_msr(i); - msr.index = msr_based_features_all[i]; - if (kvm_get_msr_feature(&msr)) - continue; - - msr_based_features[num_msr_based_features++] = msr_based_features_all[i]; - } + for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) + kvm_probe_feature_msr(msr_based_features_all_except_vmx[i]); } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, -- cgit v1.2.3 From 0094f62c7eaaaf53a011a4e46f9f32e5f3295e8c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:03 -0800 Subject: KVM: x86: Disallow writes to immutable feature MSRs after KVM_RUN Disallow writes to feature MSRs after KVM_RUN to prevent userspace from changing the vCPU model after running the vCPU. Similar to guest CPUID, KVM uses feature MSRs to configure intercepts, determine what operations are/aren't allowed, etc. Changing the capabilities while the vCPU is active will at best yield unpredictable guest behavior, and at worst could be dangerous to KVM. Allow writing the current value, e.g. so that userspace can blindly set all MSRs when emulating RESET, and unconditionally allow writes to MSR_IA32_UCODE_REV so that userspace can emulate patch loads. Special case the VMX MSRs to keep the generic list small, i.e. so that KVM can do a linear walk of the generic list without incurring meaningful overhead. Cc: Like Xu Cc: Yu Zhang Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20230311004618.920745-7-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6b667c651951..cf33b7d910a6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1558,6 +1558,25 @@ static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) + (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)]; static unsigned int num_msr_based_features; +/* + * All feature MSRs except uCode revID, which tracks the currently loaded uCode + * patch, are immutable once the vCPU model is defined. + */ +static bool kvm_is_immutable_feature_msr(u32 msr) +{ + int i; + + if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR) + return true; + + for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) { + if (msr == msr_based_features_all_except_vmx[i]) + return msr != MSR_IA32_UCODE_REV; + } + + return false; +} + /* * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM * does not yet virtualize. These include: @@ -2175,6 +2194,22 @@ static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { + u64 val; + + /* + * Disallow writes to immutable feature MSRs after KVM_RUN. KVM does + * not support modifying the guest vCPU model on the fly, e.g. changing + * the nVMX capabilities while L2 is running is nonsensical. Ignore + * writes of the same value, e.g. to allow userspace to blindly stuff + * all MSRs when emulating RESET. + */ + if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index)) { + if (do_get_msr(vcpu, index, &val) || *data != val) + return -EINVAL; + + return 0; + } + return kvm_set_msr_ignored_check(vcpu, index, *data, true); } -- cgit v1.2.3 From 3a6de51a437fb4d2433f8a99fb59f43866cdbb98 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:04 -0800 Subject: KVM: x86/pmu: WARN and bug the VM if PMU is refreshed after vCPU has run Now that KVM disallows changing feature MSRs, i.e. PERF_CAPABILITIES, after running a vCPU, WARN and bug the VM if the PMU is refreshed after the vCPU has run. Note, KVM has disallowed CPUID updates after running a vCPU since commit feb627e8d6f6 ("KVM: x86: Forbid KVM_SET_CPUID{,2} after KVM_RUN"), i.e. PERF_CAPABILITIES was the only remaining way to trigger a PMU refresh after KVM_RUN. Cc: Like Xu Link: https://lore.kernel.org/r/20230311004618.920745-8-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 3 +++ arch/x86/kvm/x86.c | 10 +++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 29492c2a0c82..136184fe9e92 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -589,6 +589,9 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) */ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) { + if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm)) + return; + bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX); static_call(kvm_x86_pmu_refresh)(vcpu); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf33b7d910a6..a86ad45a53b8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3632,9 +3632,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & ~kvm_caps.supported_perf_cap) return 1; + /* + * Note, this is not just a performance optimization! KVM + * disallows changing feature MSRs after the vCPU has run; PMU + * refresh will bug the VM if called after the vCPU has run. + */ + if (vcpu->arch.perf_capabilities == data) + break; + vcpu->arch.perf_capabilities = data; kvm_pmu_refresh(vcpu); - return 0; + break; case MSR_EFER: return set_efer(vcpu, msr_info); case MSR_K7_HWCR: -- cgit v1.2.3 From 957d0f70e97bb2257d8bf746ff2f524b793751b3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 10 Mar 2023 16:46:05 -0800 Subject: KVM: x86/pmu: Zero out LBR capabilities during PMU refresh Zero out the LBR capabilities during PMU refresh to avoid exposing LBRs to the guest against userspace's wishes. If userspace modifies the guest's CPUID model or invokes KVM_CAP_PMU_CAPABILITY to disable vPMU after an initial KVM_SET_CPUID2, but before the first KVM_RUN, KVM will retain the previous LBR info due to bailing before refreshing the LBR descriptor. Note, this is a very theoretical bug, there is no known use case where a VMM would deliberately enable the vPMU via KVM_SET_CPUID2, and then later disable the vPMU. Link: https://lore.kernel.org/r/20230311004618.920745-9-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index e8a3be0b9df9..d889bb2a1de5 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -531,6 +531,16 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) pmu->pebs_enable_mask = ~0ull; pmu->pebs_data_cfg_mask = ~0ull; + memset(&lbr_desc->records, 0, sizeof(lbr_desc->records)); + + /* + * Setting passthrough of LBR MSRs is done only in the VM-Entry loop, + * and PMU refresh is disallowed after the vCPU has run, i.e. this code + * should never be reached while KVM is passing through MSRs. + */ + if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm)) + return; + entry = kvm_find_cpuid_entry(vcpu, 0xa); if (!entry || !vcpu->kvm->arch.enable_pmu) return; -- cgit v1.2.3 From cdd2fbf6360e84c02ac4cea11733083c2ca8dace Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 14 Feb 2023 13:07:46 +0800 Subject: KVM: x86/pmu: Rename pmc_is_enabled() to pmc_is_globally_enabled() The name of function pmc_is_enabled() is a bit misleading. A PMC can be disabled either by PERF_CLOBAL_CTRL or by its corresponding EVTSEL. Append global semantics to its name. Suggested-by: Jim Mattson Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230214050757.9623-2-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 136184fe9e92..35d80151b853 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -93,7 +93,7 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops) #undef __KVM_X86_PMU_OP } -static inline bool pmc_is_enabled(struct kvm_pmc *pmc) +static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc) { return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc); } @@ -409,7 +409,7 @@ static void reprogram_counter(struct kvm_pmc *pmc) pmc_pause_counter(pmc); - if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc)) + if (!pmc_speculative_in_use(pmc) || !pmc_is_globally_enabled(pmc)) goto reprogram_complete; if (!check_pmu_event_filter(pmc)) @@ -688,7 +688,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id) for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) { pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); - if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc)) + if (!pmc || !pmc_is_globally_enabled(pmc) || !pmc_speculative_in_use(pmc)) continue; /* Ignore checks for edge detect, pin control, invert and CMASK bits */ -- cgit v1.2.3 From 8bca8c5ce40b03862d782b9991da146909199327 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 26 Jan 2023 17:08:03 -0800 Subject: KVM: VMX: Refactor intel_pmu_{g,}set_msr() to align with other helpers Invert the flows in intel_pmu_{g,s}et_msr()'s case statements so that they follow the kernel's preferred style of: if () return return which is also the style used by every other {g,s}et_msr() helper (except AMD's PMU variant, which doesn't use a switch statement). Modify the "set" paths with costly side effects, i.e. that reprogram counters, to skip only the side effects, i.e. to perform reserved bits checks even if the value is unchanged. None of the reserved bits checks are expensive, so there's no strong justification for skipping them, and guarding only the side effect makes it slightly more obvious what is being skipped and why. No functional change intended (assuming no reserved bit bugs). Link: https://lkml.kernel.org/r/Y%2B6cfen%2FCpO3%2FdLO%40google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 109 ++++++++++++++++++++++--------------------- 1 file changed, 57 insertions(+), 52 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index d889bb2a1de5..c45bd10f80a1 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -351,45 +351,47 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: msr_info->data = pmu->fixed_ctr_ctrl; - return 0; + break; case MSR_CORE_PERF_GLOBAL_STATUS: msr_info->data = pmu->global_status; - return 0; + break; case MSR_CORE_PERF_GLOBAL_CTRL: msr_info->data = pmu->global_ctrl; - return 0; + break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: msr_info->data = 0; - return 0; + break; case MSR_IA32_PEBS_ENABLE: msr_info->data = pmu->pebs_enable; - return 0; + break; case MSR_IA32_DS_AREA: msr_info->data = pmu->ds_area; - return 0; + break; case MSR_PEBS_DATA_CFG: msr_info->data = pmu->pebs_data_cfg; - return 0; + break; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { u64 val = pmc_read_counter(pmc); msr_info->data = val & pmu->counter_bitmask[KVM_PMC_GP]; - return 0; + break; } else if ((pmc = get_fixed_pmc(pmu, msr))) { u64 val = pmc_read_counter(pmc); msr_info->data = val & pmu->counter_bitmask[KVM_PMC_FIXED]; - return 0; + break; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { msr_info->data = pmc->eventsel; - return 0; - } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) - return 0; + break; + } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) { + break; + } + return 1; } - return 1; + return 0; } static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -402,44 +404,43 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_CORE_PERF_FIXED_CTR_CTRL: - if (pmu->fixed_ctr_ctrl == data) - return 0; - if (!(data & pmu->fixed_ctr_ctrl_mask)) { + if (data & pmu->fixed_ctr_ctrl_mask) + return 1; + + if (pmu->fixed_ctr_ctrl != data) reprogram_fixed_counters(pmu, data); - return 0; - } break; case MSR_CORE_PERF_GLOBAL_STATUS: - if (msr_info->host_initiated) { - pmu->global_status = data; - return 0; - } - break; /* RO MSR */ + if (!msr_info->host_initiated) + return 1; /* RO MSR */ + + pmu->global_status = data; + break; case MSR_CORE_PERF_GLOBAL_CTRL: - if (pmu->global_ctrl == data) - return 0; - if (kvm_valid_perf_global_ctrl(pmu, data)) { + if (!kvm_valid_perf_global_ctrl(pmu, data)) + return 1; + + if (pmu->global_ctrl != data) { diff = pmu->global_ctrl ^ data; pmu->global_ctrl = data; reprogram_counters(pmu, diff); - return 0; } break; case MSR_CORE_PERF_GLOBAL_OVF_CTRL: - if (!(data & pmu->global_ovf_ctrl_mask)) { - if (!msr_info->host_initiated) - pmu->global_status &= ~data; - return 0; - } + if (data & pmu->global_ovf_ctrl_mask) + return 1; + + if (!msr_info->host_initiated) + pmu->global_status &= ~data; break; case MSR_IA32_PEBS_ENABLE: - if (pmu->pebs_enable == data) - return 0; - if (!(data & pmu->pebs_enable_mask)) { + if (data & pmu->pebs_enable_mask) + return 1; + + if (pmu->pebs_enable != data) { diff = pmu->pebs_enable ^ data; pmu->pebs_enable = data; reprogram_counters(pmu, diff); - return 0; } break; case MSR_IA32_DS_AREA: @@ -447,15 +448,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; if (is_noncanonical_address(data, vcpu)) return 1; + pmu->ds_area = data; - return 0; + break; case MSR_PEBS_DATA_CFG: - if (pmu->pebs_data_cfg == data) - return 0; - if (!(data & pmu->pebs_data_cfg_mask)) { - pmu->pebs_data_cfg = data; - return 0; - } + if (data & pmu->pebs_data_cfg_mask) + return 1; + + pmu->pebs_data_cfg = data; break; default: if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || @@ -463,33 +463,38 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if ((msr & MSR_PMC_FULL_WIDTH_BIT) && (data & ~pmu->counter_bitmask[KVM_PMC_GP])) return 1; + if (!msr_info->host_initiated && !(msr & MSR_PMC_FULL_WIDTH_BIT)) data = (s64)(s32)data; pmc->counter += data - pmc_read_counter(pmc); pmc_update_sample_period(pmc); - return 0; + break; } else if ((pmc = get_fixed_pmc(pmu, msr))) { pmc->counter += data - pmc_read_counter(pmc); pmc_update_sample_period(pmc); - return 0; + break; } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { - if (data == pmc->eventsel) - return 0; reserved_bits = pmu->reserved_bits; if ((pmc->idx == 2) && (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED)) reserved_bits ^= HSW_IN_TX_CHECKPOINTED; - if (!(data & reserved_bits)) { + if (data & reserved_bits) + return 1; + + if (data != pmc->eventsel) { pmc->eventsel = data; kvm_pmu_request_counter_reprogam(pmc); - return 0; } - } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) - return 0; + break; + } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) { + break; + } + /* Not a known PMU MSR. */ + return 1; } - return 1; + return 0; } static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu) -- cgit v1.2.3 From 649bccd7fac98225525c79cf4b1cecc4bafdfc54 Mon Sep 17 00:00:00 2001 From: Like Xu Date: Tue, 14 Feb 2023 13:07:48 +0800 Subject: KVM: x86/pmu: Rewrite reprogram_counters() to improve performance A valid pmc is always tested before using pmu->reprogram_pmi. Eliminate this part of the redundancy by setting the counter's bitmask directly, and in addition, trigger KVM_REQ_PMU only once to save more cpu cycles. Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230214050757.9623-4-likexu@tencent.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/pmu_intel.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index c45bd10f80a1..eb291dfbe4aa 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -76,13 +76,13 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) static void reprogram_counters(struct kvm_pmu *pmu, u64 diff) { int bit; - struct kvm_pmc *pmc; - for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) { - pmc = intel_pmc_idx_to_pmc(pmu, bit); - if (pmc) - kvm_pmu_request_counter_reprogam(pmc); - } + if (!diff) + return; + + for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) + set_bit(bit, pmu->reprogram_pmi); + kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu)); } static bool intel_hw_event_available(struct kvm_pmc *pmc) -- cgit v1.2.3 From 4fa5843d81fdce9ba7e03f8a666e2f8e592aec2b Mon Sep 17 00:00:00 2001 From: Like Xu Date: Fri, 10 Mar 2023 19:33:49 +0800 Subject: KVM: x86/pmu: Fix a typo in kvm_pmu_request_counter_reprogam() Fix a "reprogam" => "reprogram" typo in kvm_pmu_request_counter_reprogam(). Fixes: 68fb4757e867 ("KVM: x86/pmu: Defer reprogram_counter() to kvm_pmu_handle_event()") Signed-off-by: Like Xu Link: https://lore.kernel.org/r/20230310113349.31799-1-likexu@tencent.com [sean: trim the changelog] Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 2 +- arch/x86/kvm/pmu.h | 2 +- arch/x86/kvm/svm/pmu.c | 2 +- arch/x86/kvm/vmx/pmu_intel.c | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 35d80151b853..2226db9601e2 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -650,7 +650,7 @@ static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) { pmc->prev_counter = pmc->counter; pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc); - kvm_pmu_request_counter_reprogam(pmc); + kvm_pmu_request_counter_reprogram(pmc); } static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index be62c16f2265..5c7bbf03b599 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -195,7 +195,7 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) KVM_PMC_MAX_FIXED); } -static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc) +static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc) { set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); kvm_make_request(KVM_REQ_PMU, pmc->vcpu); diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index cc77a0681800..5fa939e411d8 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -161,7 +161,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) data &= ~pmu->reserved_bits; if (data != pmc->eventsel) { pmc->eventsel = data; - kvm_pmu_request_counter_reprogam(pmc); + kvm_pmu_request_counter_reprogram(pmc); } return 0; } diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index eb291dfbe4aa..741efe2c497b 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -57,7 +57,7 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); - kvm_pmu_request_counter_reprogam(pmc); + kvm_pmu_request_counter_reprogram(pmc); } } @@ -484,7 +484,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data != pmc->eventsel) { pmc->eventsel = data; - kvm_pmu_request_counter_reprogam(pmc); + kvm_pmu_request_counter_reprogram(pmc); } break; } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) { -- cgit v1.2.3 From dfdeda67ea2dac57d2d7506d65cfe5a0878ad285 Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Tue, 7 Mar 2023 14:13:56 +0000 Subject: KVM: x86/pmu: Prevent the PMU from counting disallowed events When counting "Instructions Retired" (0xc0) in a guest, KVM will occasionally increment the PMU counter regardless of if that event is being filtered. This is because some PMU events are incremented via kvm_pmu_trigger_event(), which doesn't know about the event filter. Add the event filter to kvm_pmu_trigger_event(), so events that are disallowed do not increment their counters. Fixes: 9cd803d496e7 ("KVM: x86: Update vPMCs when retiring instructions") Signed-off-by: Aaron Lewis Reviewed-by: Like Xu Link: https://lore.kernel.org/r/20230307141400.1486314-2-aaronlewis@google.com [sean: prepend "pmc" to the new function] Signed-off-by: Sean Christopherson --- arch/x86/kvm/pmu.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 2226db9601e2..597a8f8f90b9 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -400,6 +400,12 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc) return is_fixed_event_allowed(filter, pmc->idx); } +static bool pmc_event_is_allowed(struct kvm_pmc *pmc) +{ + return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) && + check_pmu_event_filter(pmc); +} + static void reprogram_counter(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc); @@ -409,10 +415,7 @@ static void reprogram_counter(struct kvm_pmc *pmc) pmc_pause_counter(pmc); - if (!pmc_speculative_in_use(pmc) || !pmc_is_globally_enabled(pmc)) - goto reprogram_complete; - - if (!check_pmu_event_filter(pmc)) + if (!pmc_event_is_allowed(pmc)) goto reprogram_complete; if (pmc->counter < pmc->prev_counter) @@ -688,7 +691,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id) for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) { pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); - if (!pmc || !pmc_is_globally_enabled(pmc) || !pmc_speculative_in_use(pmc)) + if (!pmc || !pmc_event_is_allowed(pmc)) continue; /* Ignore checks for edge detect, pin control, invert and CMASK bits */ -- cgit v1.2.3 From 5a23ad6510c82049f5ab3795841c30e8f3ca324d Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Sat, 8 Apr 2023 12:17:31 +0000 Subject: KVM: arm64: Prevent userspace from handling SMC64 arch range Though presently unused, there is an SMC64 view of the Arm architecture calls defined by the SMCCC. The documentation of the SMCCC filter states that the SMC64 range is reserved, but nothing actually prevents userspace from applying a filter to the range. Insert a range with the HANDLE action for the SMC64 arch range, thereby preventing userspace from imposing filtering/forwarding on it. Fixes: fb88707dd39b ("KVM: arm64: Use a maple tree to represent the SMCCC filter") Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230408121732.3411329-2-oliver.upton@linux.dev --- arch/arm64/kvm/hypercalls.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 3b6523f25afc..47254a361295 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -121,11 +121,17 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) } } -#define SMCCC_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID -#define SMCCC_ARCH_RANGE_END \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_32, \ - 0, ARM_SMCCC_FUNC_MASK) +#define SMC32_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID +#define SMC32_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, ARM_SMCCC_FUNC_MASK) + +#define SMC64_ARCH_RANGE_BEGIN ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + 0, 0) +#define SMC64_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + 0, ARM_SMCCC_FUNC_MASK) static void init_smccc_filter(struct kvm *kvm) { @@ -139,10 +145,17 @@ static void init_smccc_filter(struct kvm *kvm) * to the guest. */ r = mtree_insert_range(&kvm->arch.smccc_filter, - SMCCC_ARCH_RANGE_BEGIN, SMCCC_ARCH_RANGE_END, + SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END, xa_mk_value(KVM_SMCCC_FILTER_HANDLE), GFP_KERNEL_ACCOUNT); WARN_ON_ONCE(r); + + r = mtree_insert_range(&kvm->arch.smccc_filter, + SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END, + xa_mk_value(KVM_SMCCC_FILTER_HANDLE), + GFP_KERNEL_ACCOUNT); + WARN_ON_ONCE(r); + } static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr) -- cgit v1.2.3 From 8a1300ff95185b23baff9c226a001c269108f9ea Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 Apr 2023 17:31:32 -0700 Subject: KVM: x86: Rename Hyper-V remote TLB hooks to match established scheme Rename the Hyper-V hooks for TLB flushing to match the naming scheme used by all the other TLB flushing hooks, e.g. in kvm_x86_ops, vendor code, arch hooks from common code, etc. Reviewed-by: David Matlack Reviewed-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/20230405003133.419177-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm-x86-ops.h | 4 ++-- arch/x86/include/asm/kvm_host.h | 10 +++++----- arch/x86/kvm/kvm_onhyperv.c | 13 ++++++------- arch/x86/kvm/kvm_onhyperv.h | 5 ++--- arch/x86/kvm/mmu/mmu.c | 12 ++++++------ arch/x86/kvm/svm/svm_onhyperv.h | 5 ++--- arch/x86/kvm/vmx/vmx.c | 5 ++--- 7 files changed, 25 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 8dc345cc6318..430ca22170e0 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -54,8 +54,8 @@ KVM_X86_OP(set_rflags) KVM_X86_OP(get_if_flag) KVM_X86_OP(flush_tlb_all) KVM_X86_OP(flush_tlb_current) -KVM_X86_OP_OPTIONAL(tlb_remote_flush) -KVM_X86_OP_OPTIONAL(tlb_remote_flush_with_range) +KVM_X86_OP_OPTIONAL(flush_remote_tlbs) +KVM_X86_OP_OPTIONAL(flush_remote_tlbs_range) KVM_X86_OP(flush_tlb_gva) KVM_X86_OP(flush_tlb_guest) KVM_X86_OP(vcpu_pre_run) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 991296a2f807..ec22101410ee 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1588,9 +1588,9 @@ struct kvm_x86_ops { void (*flush_tlb_all)(struct kvm_vcpu *vcpu); void (*flush_tlb_current)(struct kvm_vcpu *vcpu); - int (*tlb_remote_flush)(struct kvm *kvm); - int (*tlb_remote_flush_with_range)(struct kvm *kvm, - struct kvm_tlb_range *range); + int (*flush_remote_tlbs)(struct kvm *kvm); + int (*flush_remote_tlbs_range)(struct kvm *kvm, + struct kvm_tlb_range *range); /* * Flush any TLB entries associated with the given GVA. @@ -1794,8 +1794,8 @@ void kvm_arch_free_vm(struct kvm *kvm); #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) { - if (kvm_x86_ops.tlb_remote_flush && - !static_call(kvm_x86_tlb_remote_flush)(kvm)) + if (kvm_x86_ops.flush_remote_tlbs && + !static_call(kvm_x86_flush_remote_tlbs)(kvm)) return 0; else return -ENOTSUPP; diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c index 482d6639ef88..2e2d08da8a3f 100644 --- a/arch/x86/kvm/kvm_onhyperv.c +++ b/arch/x86/kvm/kvm_onhyperv.c @@ -29,8 +29,7 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, return hyperv_flush_guest_mapping(root_tdp); } -int hv_remote_flush_tlb_with_range(struct kvm *kvm, - struct kvm_tlb_range *range) +int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range) { struct kvm_arch *kvm_arch = &kvm->arch; struct kvm_vcpu *vcpu; @@ -86,19 +85,19 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm, spin_unlock(&kvm_arch->hv_root_tdp_lock); return ret; } -EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range); +EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range); -int hv_remote_flush_tlb(struct kvm *kvm) +int hv_flush_remote_tlbs(struct kvm *kvm) { - return hv_remote_flush_tlb_with_range(kvm, NULL); + return hv_flush_remote_tlbs_range(kvm, NULL); } -EXPORT_SYMBOL_GPL(hv_remote_flush_tlb); +EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) { struct kvm_arch *kvm_arch = &vcpu->kvm->arch; - if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) { + if (kvm_x86_ops.flush_remote_tlbs == hv_flush_remote_tlbs) { spin_lock(&kvm_arch->hv_root_tdp_lock); vcpu->arch.hv_root_tdp = root_tdp; if (root_tdp != kvm_arch->hv_root_tdp) diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h index 287e98ef9df3..55d7fcb84cc1 100644 --- a/arch/x86/kvm/kvm_onhyperv.h +++ b/arch/x86/kvm/kvm_onhyperv.h @@ -7,9 +7,8 @@ #define __ARCH_X86_KVM_KVM_ONHYPERV_H__ #if IS_ENABLED(CONFIG_HYPERV) -int hv_remote_flush_tlb_with_range(struct kvm *kvm, - struct kvm_tlb_range *range); -int hv_remote_flush_tlb(struct kvm *kvm); +int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range); +int hv_flush_remote_tlbs(struct kvm *kvm); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp); #else /* !CONFIG_HYPERV */ static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 4c874d4ec68f..7654be48ff69 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -270,9 +270,9 @@ static inline unsigned long kvm_mmu_get_guest_pgd(struct kvm_vcpu *vcpu, return mmu->get_guest_pgd(vcpu); } -static inline bool kvm_available_flush_tlb_with_range(void) +static inline bool kvm_available_flush_remote_tlbs_range(void) { - return kvm_x86_ops.tlb_remote_flush_with_range; + return kvm_x86_ops.flush_remote_tlbs_range; } void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, @@ -284,8 +284,8 @@ void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, range.start_gfn = start_gfn; range.pages = nr_pages; - if (kvm_x86_ops.tlb_remote_flush_with_range) - ret = static_call(kvm_x86_tlb_remote_flush_with_range)(kvm, &range); + if (kvm_x86_ops.flush_remote_tlbs_range) + ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, &range); if (ret) kvm_flush_remote_tlbs(kvm); @@ -1498,7 +1498,7 @@ restart: } } - if (need_flush && kvm_available_flush_tlb_with_range()) { + if (need_flush && kvm_available_flush_remote_tlbs_range()) { kvm_flush_remote_tlbs_gfn(kvm, gfn, level); return false; } @@ -6623,7 +6623,7 @@ restart: PG_LEVEL_NUM)) { kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); - if (kvm_available_flush_tlb_with_range()) + if (kvm_available_flush_remote_tlbs_range()) kvm_flush_remote_tlbs_sptep(kvm, sptep); else need_tlb_flush = 1; diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h index cff838f15db5..823001033539 100644 --- a/arch/x86/kvm/svm/svm_onhyperv.h +++ b/arch/x86/kvm/svm/svm_onhyperv.h @@ -35,9 +35,8 @@ static inline __init void svm_hv_hardware_setup(void) if (npt_enabled && ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) { pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n"); - svm_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; - svm_x86_ops.tlb_remote_flush_with_range = - hv_remote_flush_tlb_with_range; + svm_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs; + svm_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range; } if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d7bf14abdba1..8031bded75cc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8432,9 +8432,8 @@ static __init int hardware_setup(void) #if IS_ENABLED(CONFIG_HYPERV) if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH && enable_ept) { - vmx_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; - vmx_x86_ops.tlb_remote_flush_with_range = - hv_remote_flush_tlb_with_range; + vmx_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs; + vmx_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range; } #endif -- cgit v1.2.3 From 9ed3bf411226f446a9795f2b49a15b9df98d7cf5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 Apr 2023 17:31:33 -0700 Subject: KVM: x86/mmu: Move filling of Hyper-V's TLB range struct into Hyper-V code Refactor Hyper-V's range-based TLB flushing API to take a gfn+nr_pages pair instead of a struct, and bury said struct in Hyper-V specific code. Passing along two params generates much better code for the common case where KVM is _not_ running on Hyper-V, as forwarding the flush on to Hyper-V's hv_flush_remote_tlbs_range() from kvm_flush_remote_tlbs_range() becomes a tail call. Cc: David Matlack Reviewed-by: David Matlack Reviewed-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/20230405003133.419177-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/include/asm/kvm_host.h | 9 ++------- arch/x86/kvm/kvm_onhyperv.c | 24 ++++++++++++++++++++---- arch/x86/kvm/kvm_onhyperv.h | 2 +- arch/x86/kvm/mmu/mmu.c | 8 ++------ 4 files changed, 25 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ec22101410ee..09eb37853cb1 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -482,11 +482,6 @@ struct kvm_mmu { u64 pdptrs[4]; /* pae */ }; -struct kvm_tlb_range { - u64 start_gfn; - u64 pages; -}; - enum pmc_type { KVM_PMC_GP = 0, KVM_PMC_FIXED, @@ -1589,8 +1584,8 @@ struct kvm_x86_ops { void (*flush_tlb_all)(struct kvm_vcpu *vcpu); void (*flush_tlb_current)(struct kvm_vcpu *vcpu); int (*flush_remote_tlbs)(struct kvm *kvm); - int (*flush_remote_tlbs_range)(struct kvm *kvm, - struct kvm_tlb_range *range); + int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn, + gfn_t nr_pages); /* * Flush any TLB entries associated with the given GVA. diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c index 2e2d08da8a3f..ded0bd688c65 100644 --- a/arch/x86/kvm/kvm_onhyperv.c +++ b/arch/x86/kvm/kvm_onhyperv.c @@ -10,17 +10,22 @@ #include "hyperv.h" #include "kvm_onhyperv.h" +struct kvm_hv_tlb_range { + u64 start_gfn; + u64 pages; +}; + static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush, void *data) { - struct kvm_tlb_range *range = data; + struct kvm_hv_tlb_range *range = data; return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn, range->pages); } static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, - struct kvm_tlb_range *range) + struct kvm_hv_tlb_range *range) { if (range) return hyperv_flush_guest_mapping_range(root_tdp, @@ -29,7 +34,8 @@ static inline int hv_remote_flush_root_tdp(hpa_t root_tdp, return hyperv_flush_guest_mapping(root_tdp); } -int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range) +static int __hv_flush_remote_tlbs_range(struct kvm *kvm, + struct kvm_hv_tlb_range *range) { struct kvm_arch *kvm_arch = &kvm->arch; struct kvm_vcpu *vcpu; @@ -85,11 +91,21 @@ int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range) spin_unlock(&kvm_arch->hv_root_tdp_lock); return ret; } + +int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages) +{ + struct kvm_hv_tlb_range range = { + .start_gfn = start_gfn, + .pages = nr_pages, + }; + + return __hv_flush_remote_tlbs_range(kvm, &range); +} EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range); int hv_flush_remote_tlbs(struct kvm *kvm) { - return hv_flush_remote_tlbs_range(kvm, NULL); + return __hv_flush_remote_tlbs_range(kvm, NULL); } EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs); diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h index 55d7fcb84cc1..ff127d313242 100644 --- a/arch/x86/kvm/kvm_onhyperv.h +++ b/arch/x86/kvm/kvm_onhyperv.h @@ -7,7 +7,7 @@ #define __ARCH_X86_KVM_KVM_ONHYPERV_H__ #if IS_ENABLED(CONFIG_HYPERV) -int hv_flush_remote_tlbs_range(struct kvm *kvm, struct kvm_tlb_range *range); +int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages); int hv_flush_remote_tlbs(struct kvm *kvm); void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp); #else /* !CONFIG_HYPERV */ diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7654be48ff69..a7adbac0855c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -278,15 +278,11 @@ static inline bool kvm_available_flush_remote_tlbs_range(void) void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages) { - struct kvm_tlb_range range; int ret = -EOPNOTSUPP; - range.start_gfn = start_gfn; - range.pages = nr_pages; - if (kvm_x86_ops.flush_remote_tlbs_range) - ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, &range); - + ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, start_gfn, + nr_pages); if (ret) kvm_flush_remote_tlbs(kvm); } -- cgit v1.2.3 From cf9f4c0eb1699d306e348b1fd0225af7b2c282d3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 Apr 2023 17:26:08 -0700 Subject: KVM: x86/mmu: Refresh CR0.WP prior to checking for emulated permission faults Refresh the MMU's snapshot of the vCPU's CR0.WP prior to checking for permission faults when emulating a guest memory access and CR0.WP may be guest owned. If the guest toggles only CR0.WP and triggers emulation of a supervisor write, e.g. when KVM is emulating UMIP, KVM may consume a stale CR0.WP, i.e. use stale protection bits metadata. Note, KVM passes through CR0.WP if and only if EPT is enabled as CR0.WP is part of the MMU role for legacy shadow paging, and SVM (NPT) doesn't support per-bit interception controls for CR0. Don't bother checking for EPT vs. NPT as the "old == new" check will always be true under NPT, i.e. the only cost is the read of vcpu->arch.cr4 (SVM unconditionally grabs CR0 from the VMCB on VM-Exit). Reported-by: Mathias Krause Link: https://lkml.kernel.org/r/677169b4-051f-fcae-756b-9a3e1bb9f8fe%40grsecurity.net Fixes: fb509f76acc8 ("KVM: VMX: Make CR0.WP a guest owned bit") Tested-by: Mathias Krause Link: https://lore.kernel.org/r/20230405002608.418442-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/mmu.h | 26 +++++++++++++++++++++++++- arch/x86/kvm/mmu/mmu.c | 15 +++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 89f532516a45..92d5a1924fc1 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -113,6 +113,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len); +void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); @@ -153,6 +155,24 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_role.level); } +static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu) +{ + /* + * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e. + * @mmu's snapshot of CR0.WP and thus all related paging metadata may + * be stale. Refresh CR0.WP and the metadata on-demand when checking + * for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing + * nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does + * need to refresh nested_mmu, a.k.a. the walker used to translate L2 + * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP. + */ + if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) + return; + + __kvm_mmu_refresh_passthrough_bits(vcpu, mmu); +} + /* * Check if a given access (described through the I/D, W/R and U/S bits of a * page fault error code pfec) causes a permission fault with the given PTE @@ -184,8 +204,12 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 implicit_access = access & PFERR_IMPLICIT_ACCESS; bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC; int index = (pfec + (not_smap << PFERR_RSVD_BIT)) >> 1; - bool fault = (mmu->permissions[index] >> pte_access) & 1; u32 errcode = PFERR_PRESENT_MASK; + bool fault; + + kvm_mmu_refresh_passthrough_bits(vcpu, mmu); + + fault = (mmu->permissions[index] >> pte_access) & 1; WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); if (unlikely(mmu->pkru_mask)) { diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 144c5a01cd77..4544605272e2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5112,6 +5112,21 @@ kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) return role; } +void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, + struct kvm_mmu *mmu) +{ + const bool cr0_wp = kvm_is_cr0_bit_set(vcpu, X86_CR0_WP); + + BUILD_BUG_ON((KVM_MMU_CR0_ROLE_BITS & KVM_POSSIBLE_CR0_GUEST_BITS) != X86_CR0_WP); + BUILD_BUG_ON((KVM_MMU_CR4_ROLE_BITS & KVM_POSSIBLE_CR4_GUEST_BITS)); + + if (is_cr0_wp(mmu) == cr0_wp) + return; + + mmu->cpu_role.base.cr0_wp = cr0_wp; + reset_guest_paging_metadata(vcpu, mmu); +} + static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) { /* tdp_root_level is architecture forced level, use it if nonzero */ -- cgit v1.2.3 From 4984563823f0034d3533854c1b50e729f5191089 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 Apr 2023 17:23:59 -0700 Subject: KVM: nVMX: Emulate NOPs in L2, and PAUSE if it's not intercepted Extend VMX's nested intercept logic for emulated instructions to handle "pause" interception, in quotes because KVM's emulator doesn't filter out NOPs when checking for nested intercepts. Failure to allow emulation of NOPs results in KVM injecting a #UD into L2 on any NOP that collides with the emulator's definition of PAUSE, i.e. on all single-byte NOPs. For PAUSE itself, honor L1's PAUSE-exiting control, but ignore PLE to avoid unnecessarily injecting a #UD into L2. Per the SDM, the first execution of PAUSE after VM-Entry is treated as the beginning of a new loop, i.e. will never trigger a PLE VM-Exit, and so L1 can't expect any given execution of PAUSE to deterministically exit. ... the processor considers this execution to be the first execution of PAUSE in a loop. (It also does so for the first execution of PAUSE at CPL 0 after VM entry.) All that said, the PLE side of things is currently a moot point, as KVM doesn't expose PLE to L1. Note, vmx_check_intercept() is still wildly broken when L1 wants to intercept an instruction, as KVM injects a #UD instead of synthesizing a nested VM-Exit. That issue extends far beyond NOP/PAUSE and needs far more effort to fix, i.e. is a problem for the future. Fixes: 07721feee46b ("KVM: nVMX: Don't emulate instructions in guest mode") Cc: Mathias Krause Cc: stable@vger.kernel.org Reviewed-by: Paolo Bonzini Link: https://lore.kernel.org/r/20230405002359.418138-1-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/vmx/vmx.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d7bf14abdba1..e06fcd6144b0 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7935,6 +7935,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu, /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ break; + case x86_intercept_pause: + /* + * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides + * with vanilla NOPs in the emulator. Apply the interception + * check only to actual PAUSE instructions. Don't check + * PAUSE-loop-exiting, software can't expect a given PAUSE to + * exit, i.e. KVM is within its rights to allow L2 to execute + * the PAUSE. + */ + if ((info->rep_prefix != REPE_PREFIX) || + !nested_cpu_has2(vmcs12, CPU_BASED_PAUSE_EXITING)) + return X86EMUL_CONTINUE; + + break; + /* TODO: check more intercepts... */ default: break; -- cgit v1.2.3 From 6be3ae45f567cdebb5b9938137bf3445ceacf9fb Mon Sep 17 00:00:00 2001 From: Aaron Lewis Date: Tue, 4 Apr 2023 17:45:15 -0700 Subject: KVM: x86: Add a helper to handle filtering of unpermitted XCR0 features Add a helper, kvm_get_filtered_xcr0(), to dedup code that needs to account for XCR0 features that require explicit opt-in on a per-process basis. In addition to documenting when KVM should/shouldn't consult xstate_get_guest_group_perm(), the helper will also allow sanitizing the filtered XCR0 to avoid enumerating architecturally illegal XCR0 values, e.g. XTILE_CFG without XTILE_DATA. No functional changes intended. Signed-off-by: Aaron Lewis Reviewed-by: Mingwei Zhang [sean: rename helper, move to x86.h, massage changelog] Reviewed-by: Aaron Lewis Tested-by: Aaron Lewis Link: https://lore.kernel.org/r/20230405004520.421768-2-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/cpuid.c | 2 +- arch/x86/kvm/x86.c | 4 +--- arch/x86/kvm/x86.h | 13 +++++++++++++ 3 files changed, 15 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 9583a110cf5f..1daf0df6a9b8 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1002,7 +1002,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->eax = entry->ebx = entry->ecx = 0; break; case 0xd: { - u64 permitted_xcr0 = kvm_caps.supported_xcr0 & xstate_get_guest_group_perm(); + u64 permitted_xcr0 = kvm_get_filtered_xcr0(); u64 permitted_xss = kvm_caps.supported_xss; entry->eax &= permitted_xcr0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 237c483b1230..181f155933cf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4531,9 +4531,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 0; break; case KVM_CAP_XSAVE2: { - u64 guest_perm = xstate_get_guest_group_perm(); - - r = xstate_required_size(kvm_caps.supported_xcr0 & guest_perm, false); + r = xstate_required_size(kvm_get_filtered_xcr0(), false); if (r < sizeof(struct kvm_xsave)) r = sizeof(struct kvm_xsave); break; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index a8167b47b8c8..9589c86812eb 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -315,6 +315,19 @@ extern struct kvm_caps kvm_caps; extern bool enable_pmu; +/* + * Get a filtered version of KVM's supported XCR0 that strips out dynamic + * features for which the current process doesn't (yet) have permission to use. + * This is intended to be used only when enumerating support to userspace, + * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be + * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if + * userspace attempts to enable unpermitted features. + */ +static inline u64 kvm_get_filtered_xcr0(void) +{ + return kvm_caps.supported_xcr0 & xstate_get_guest_group_perm(); +} + static inline bool kvm_mpx_supported(void) { return (kvm_caps.supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) -- cgit v1.2.3 From 55cd57b596e86140503214eafc8fb62c9c544e8f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 4 Apr 2023 17:45:16 -0700 Subject: KVM: x86: Filter out XTILE_CFG if XTILE_DATA isn't permitted Filter out XTILE_CFG from the supported XCR0 reported to userspace if the current process doesn't have access to XTILE_DATA. Attempting to set XTILE_CFG in XCR0 will #GP if XTILE_DATA is also not set, and so keeping XTILE_CFG as supported results in explosions if userspace feeds KVM_GET_SUPPORTED_CPUID back into KVM and the guest doesn't sanity check CPUID. Fixes: 445ecdf79be0 ("kvm: x86: Exclude unpermitted xfeatures at KVM_GET_SUPPORTED_CPUID") Reported-by: Aaron Lewis Reviewed-by: Aaron Lewis Tested-by: Aaron Lewis Link: https://lore.kernel.org/r/20230405004520.421768-3-seanjc@google.com Signed-off-by: Sean Christopherson --- arch/x86/kvm/x86.h | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 9589c86812eb..bca706406189 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -3,6 +3,7 @@ #define ARCH_X86_KVM_X86_H #include +#include #include #include #include "kvm_cache_regs.h" @@ -325,7 +326,22 @@ extern bool enable_pmu; */ static inline u64 kvm_get_filtered_xcr0(void) { - return kvm_caps.supported_xcr0 & xstate_get_guest_group_perm(); + u64 permitted_xcr0 = kvm_caps.supported_xcr0; + + BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA); + + if (permitted_xcr0 & XFEATURE_MASK_USER_DYNAMIC) { + permitted_xcr0 &= xstate_get_guest_group_perm(); + + /* + * Treat XTILE_CFG as unsupported if the current process isn't + * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in + * XCR0 without setting XTILE_DATA is architecturally illegal. + */ + if (!(permitted_xcr0 & XFEATURE_MASK_XTILE_DATA)) + permitted_xcr0 &= ~XFEATURE_MASK_XTILE_CFG; + } + return permitted_xcr0; } static inline bool kvm_mpx_supported(void) -- cgit v1.2.3 From 49e5d16b6fc003407a33a9961b4bcbb970bd1c76 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 12 Apr 2023 06:27:33 +0000 Subject: KVM: arm64: vgic: Don't acquire its_lock before config_lock commit f00327731131 ("KVM: arm64: Use config_lock to protect vgic state") was meant to rectify a longstanding lock ordering issue in KVM where the kvm->lock is taken while holding vcpu->mutex. As it so happens, the aforementioned commit introduced yet another locking issue by acquiring the its_lock before acquiring the config lock. This is obviously wrong, especially considering that the lock ordering is well documented in vgic.c. Reshuffle the locks once more to take the config_lock before the its_lock. While at it, sprinkle in the lockdep hinting that has become popular as of late to keep lockdep apprised of our ordering. Cc: stable@vger.kernel.org Fixes: f00327731131 ("KVM: arm64: Use config_lock to protect vgic state") Signed-off-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230412062733.988229-1-oliver.upton@linux.dev --- arch/arm64/kvm/vgic/vgic-its.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 7713cd06104e..750e51e3779a 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -1958,6 +1958,16 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) mutex_init(&its->its_lock); mutex_init(&its->cmd_lock); + /* Yep, even more trickery for lock ordering... */ +#ifdef CONFIG_LOCKDEP + mutex_lock(&dev->kvm->arch.config_lock); + mutex_lock(&its->cmd_lock); + mutex_lock(&its->its_lock); + mutex_unlock(&its->its_lock); + mutex_unlock(&its->cmd_lock); + mutex_unlock(&dev->kvm->arch.config_lock); +#endif + its->vgic_its_base = VGIC_ADDR_UNDEF; INIT_LIST_HEAD(&its->device_list); @@ -2752,15 +2762,14 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) return 0; mutex_lock(&kvm->lock); - mutex_lock(&its->its_lock); if (!lock_all_vcpus(kvm)) { - mutex_unlock(&its->its_lock); mutex_unlock(&kvm->lock); return -EBUSY; } mutex_lock(&kvm->arch.config_lock); + mutex_lock(&its->its_lock); switch (attr) { case KVM_DEV_ARM_ITS_CTRL_RESET: @@ -2774,9 +2783,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) break; } + mutex_unlock(&its->its_lock); mutex_unlock(&kvm->arch.config_lock); unlock_all_vcpus(kvm); - mutex_unlock(&its->its_lock); mutex_unlock(&kvm->lock); return ret; } -- cgit v1.2.3 From 55b5bac15939dec3cbcbee1f6271bc3a4afd4534 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 8 Apr 2023 17:04:23 +0100 Subject: KVM: arm64: nvhe: Synchronise with page table walker on vcpu run When taking an exception between the EL1&0 translation regime and the EL2 translation regime, the page table walker is allowed to complete the walks started from EL0 or EL1 while running at EL2. It means that altering the system registers that define the EL1&0 translation regime is fraught with danger *unless* we wait for the completion of such walk with a DSB (R_LFHQG and subsequent statements in the ARM ARM). We already did the right thing for other external agents (SPE, TRBE), but not the PTW. Rework the existing SPE/TRBE synchronisation to include the PTW, and add the missing DSB on guest exit. Signed-off-by: Marc Zyngier Reviewed-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/debug-sr.c | 2 -- arch/arm64/kvm/hyp/nvhe/switch.c | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c index 2673bde62fad..d756b939f296 100644 --- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c @@ -37,7 +37,6 @@ static void __debug_save_spe(u64 *pmscr_el1) /* Now drain all buffered data to memory */ psb_csync(); - dsb(nsh); } static void __debug_restore_spe(u64 pmscr_el1) @@ -69,7 +68,6 @@ static void __debug_save_trace(u64 *trfcr_el1) isb(); /* Drain the trace buffer to memory */ tsb_csync(); - dsb(nsh); } static void __debug_restore_trace(u64 trfcr_el1) diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c2cb46ca4fb6..71fa16a0dc77 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -272,6 +272,17 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) */ __debug_save_host_buffers_nvhe(vcpu); + /* + * We're about to restore some new MMU state. Make sure + * ongoing page-table walks that have started before we + * trapped to EL2 have completed. This also synchronises the + * above disabling of SPE and TRBE. + * + * See DDI0487I.a D8.1.5 "Out-of-context translation regimes", + * rule R_LFHQG and subsequent information statements. + */ + dsb(nsh); + __kvm_adjust_pc(vcpu); /* @@ -306,6 +317,13 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __timer_disable_traps(vcpu); __hyp_vgic_save_state(vcpu); + /* + * Same thing as before the guest run: we're about to switch + * the MMU context, so let's make sure we don't have any + * ongoing EL1&0 translations. + */ + dsb(nsh); + __deactivate_traps(vcpu); __load_host_stage2(); -- cgit v1.2.3 From a6610435ac17de1ac727c90ad62c723d86c7ea36 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 13 Apr 2023 14:23:42 +0100 Subject: KVM: arm64: Handle 32bit CNTPCTSS traps When CNTPOFF isn't implemented and that we have a non-zero counter offset, CNTPCT and CNTPCTSS are trapped. We properly handle the former, but not the latter, as it is not present in the sysreg table (despite being actually handled in the code). Bummer. Just populate the cp15_64 table with the missing register. Reported-by: Reiji Watanabe Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/sys_regs.c | 1 + 2 files changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index f8da9e1b0c11..a43f21559c3e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -403,6 +403,7 @@ #define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) #define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0) #define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) +#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0) #define __PMEV_op2(n) ((n) & 0x7) #define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index be7c2598e563..feca77083a5c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2538,6 +2538,7 @@ static const struct sys_reg_desc cp15_64_regs[] = { { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer }, + { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer }, }; static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n, -- cgit v1.2.3 From 7e1b2329c205d0a08ebaf3f619318a8a18f36644 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 8 Apr 2023 17:04:24 +0100 Subject: KVM: arm64: nvhe: Synchronise with page table walker on TLBI A TLBI from EL2 impacting EL1 involves messing with the EL1&0 translation regime, and the page table walker may still be performing speculative walks. Piggyback on the existing DSBs to always have a DSB ISH that will synchronise all load/store operations that the PTW may still have. Reviewed-by: Oliver Upton Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/nvhe/tlb.c | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index d296d617f589..978179133f4b 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -15,8 +15,31 @@ struct tlb_inv_context { }; static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, - struct tlb_inv_context *cxt) + struct tlb_inv_context *cxt, + bool nsh) { + /* + * We have two requirements: + * + * - ensure that the page table updates are visible to all + * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN + * being either ish or nsh, depending on the invalidation + * type. + * + * - complete any speculative page table walk started before + * we trapped to EL2 so that we can mess with the MM + * registers out of context, for which dsb(nsh) is enough + * + * The composition of these two barriers is a dsb(DOMAIN), and + * the 'nsh' parameter tracks the distinction between + * Inner-Shareable and Non-Shareable, as specified by the + * callers. + */ + if (nsh) + dsb(nsh); + else + dsb(ish); + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { u64 val; @@ -60,10 +83,8 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, { struct tlb_inv_context cxt; - dsb(ishst); - /* Switch to requested VMID */ - __tlb_switch_to_guest(mmu, &cxt); + __tlb_switch_to_guest(mmu, &cxt, false); /* * We could do so much better if we had the VA as well. @@ -113,10 +134,8 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) { struct tlb_inv_context cxt; - dsb(ishst); - /* Switch to requested VMID */ - __tlb_switch_to_guest(mmu, &cxt); + __tlb_switch_to_guest(mmu, &cxt, false); __tlbi(vmalls12e1is); dsb(ish); @@ -130,7 +149,7 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) struct tlb_inv_context cxt; /* Switch to requested VMID */ - __tlb_switch_to_guest(mmu, &cxt); + __tlb_switch_to_guest(mmu, &cxt, false); __tlbi(vmalle1); asm volatile("ic iallu"); @@ -142,7 +161,8 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) void __kvm_flush_vm_context(void) { - dsb(ishst); + /* Same remark as in __tlb_switch_to_guest() */ + dsb(ish); __tlbi(alle1is); /* -- cgit v1.2.3 From 8442d65373c6316876208c1ad27729e9682fa3cf Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 8 Apr 2023 17:04:25 +0100 Subject: KVM: arm64: pkvm: Document the side effects of kvm_flush_dcache_to_poc() We rely on the presence of a DSB at the end of kvm_flush_dcache_to_poc() that, on top of ensuring completion of the cache clean, also covers the speculative page table walk started from EL1. Document this dependency. Signed-off-by: Marc Zyngier Reviewed-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 552653fa18be..2e9ec4a2a4a3 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -297,6 +297,13 @@ int __pkvm_prot_finalize(void) params->vttbr = kvm_get_vttbr(mmu); params->vtcr = host_mmu.arch.vtcr; params->hcr_el2 |= HCR_VM; + + /* + * The CMO below not only cleans the updated params to the + * PoC, but also provides the DSB that ensures ongoing + * page-table walks that have started before we trapped to EL2 + * have completed. + */ kvm_flush_dcache_to_poc(params, sizeof(*params)); write_sysreg(params->hcr_el2, hcr_el2); -- cgit v1.2.3 From 1ff2755d6800d60bee96fb303cbbf30f9bb483a2 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 8 Apr 2023 17:04:26 +0100 Subject: KVM: arm64: vhe: Synchronise with page table walker on MMU update Contrary to nVHE, VHE is a lot easier when it comes to dealing with speculative page table walks started at EL1. As we only change EL1&0 translation regime when context-switching, we already benefit from the effect of the DSB that sits in the context switch code. We only need to take care of it in the NV case, where we can flip between between two EL1 contexts (one of them being the virtual EL2) without a context switch. Signed-off-by: Marc Zyngier Reviewed-by: Oliver Upton --- arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index 7b44f6b3b547..b35a178e7e0d 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -13,6 +13,7 @@ #include #include #include +#include /* * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and @@ -69,6 +70,17 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; __sysreg_save_user_state(host_ctxt); + /* + * When running a normal EL1 guest, we only load a new vcpu + * after a context switch, which imvolves a DSB, so all + * speculative EL1&0 walks will have already completed. + * If running NV, the vcpu may transition between vEL1 and + * vEL2 without a context switch, so make sure we complete + * those walks before loading a new context. + */ + if (vcpu_has_nv(vcpu)) + dsb(nsh); + /* * Load guest EL1 and user state * -- cgit v1.2.3 From bcf3e7da3ad3bfea38ac6ba9f56b99b2877af51f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 8 Apr 2023 17:04:27 +0100 Subject: KVM: arm64: vhe: Drop extra isb() on guest exit __kvm_vcpu_run_vhe() end on VHE with an isb(). However, this function is only reachable via kvm_call_hyp_ret(), which already contains an isb() in order to mimick the behaviour of nVHE and provide a context synchronisation event. We thus have two isb()s back to back, which is one too many. Drop the first one and solely rely on the one in the helper. Signed-off-by: Marc Zyngier Reviewed-by: Oliver Upton --- arch/arm64/kvm/hyp/vhe/switch.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index cd3f3117bf16..3d868e84c7a0 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -227,11 +227,10 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* * When we exit from the guest we change a number of CPU configuration - * parameters, such as traps. Make sure these changes take effect - * before running the host or additional guests. + * parameters, such as traps. We rely on the isb() in kvm_call_hyp*() + * to make sure these changes take effect before running the host or + * additional guests. */ - isb(); - return ret; } -- cgit v1.2.3 From 4ff910be01c0ca28c2ea8b354dd47a3a17524489 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Tue, 18 Apr 2023 19:18:51 -0700 Subject: KVM: arm64: Acquire mp_state_lock in kvm_arch_vcpu_ioctl_vcpu_init() kvm_arch_vcpu_ioctl_vcpu_init() doesn't acquire mp_state_lock when setting the mp_state to KVM_MP_STATE_RUNNABLE. Fix the code to acquire the lock. Signed-off-by: Reiji Watanabe [maz: minor refactor] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230419021852.2981107-2-reijiw@google.com --- arch/arm64/kvm/arm.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fd8d355aca15..ad3655a7d122 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1241,11 +1241,15 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, /* * Handle the "start in power-off" case. */ + spin_lock(&vcpu->arch.mp_state_lock); + if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) - kvm_arm_vcpu_power_off(vcpu); + __kvm_arm_vcpu_power_off(vcpu); else WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); + spin_unlock(&vcpu->arch.mp_state_lock); + return 0; } -- cgit v1.2.3 From a189884bdc9238aeba941c50f02e25eb584fafed Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Tue, 18 Apr 2023 19:18:52 -0700 Subject: KVM: arm64: Have kvm_psci_vcpu_on() use WRITE_ONCE() to update mp_state All accessors of kvm_vcpu_arch::mp_state should be {READ,WRITE}_ONCE(), since readers of the mp_state don't acquire the mp_state_lock. Nonetheless, kvm_psci_vcpu_on() updates the mp_state without using WRITE_ONCE(). So, fix the code to update the mp_state using WRITE_ONCE. Signed-off-by: Reiji Watanabe Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230419021852.2981107-3-reijiw@google.com --- arch/arm64/kvm/psci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 5767e6baa61a..d046e82e3723 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -110,7 +110,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) */ smp_wmb(); - vcpu->arch.mp_state.mp_state = KVM_MP_STATE_RUNNABLE; + WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); kvm_vcpu_wake_up(vcpu); out_unlock: -- cgit v1.2.3 From 2f2c0911b950794dbf394ee2d784138fbf714dbd Mon Sep 17 00:00:00 2001 From: Nico Boehr Date: Thu, 23 Feb 2023 17:22:36 +0100 Subject: KVM: s390: interrupt: fix virtual-physical confusion for next alert GISA We sometimes put a virtual address in next_alert, which should always be a physical address, since it is shared with hardware. This currently works, because virtual and physical addresses are the same. Add phys_to_virt() to resolve the virtual-physical confusion. Signed-off-by: Nico Boehr Reviewed-by: Janosch Frank Reviewed-by: Michael Mueller Signed-off-by: Janosch Frank Link: https://lore.kernel.org/r/20230223162236.51569-1-nrb@linux.ibm.com Message-Id: <20230223162236.51569-1-nrb@linux.ibm.com> --- arch/s390/kvm/interrupt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 9250fde1f97d..da6dac36e959 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -305,7 +305,7 @@ static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi) static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa) { - return READ_ONCE(gisa->next_alert) != (u32)(u64)gisa; + return READ_ONCE(gisa->next_alert) != (u32)virt_to_phys(gisa); } static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc) @@ -3168,7 +3168,7 @@ void kvm_s390_gisa_init(struct kvm *kvm) hrtimer_init(&gi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); gi->timer.function = gisa_vcpu_kicker; memset(gi->origin, 0, sizeof(struct kvm_s390_gisa)); - gi->origin->next_alert = (u32)(u64)gi->origin; + gi->origin->next_alert = (u32)virt_to_phys(gi->origin); VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin); } -- cgit v1.2.3 From 7be3e3392335131b2234fa3e6feda3e989cb837c Mon Sep 17 00:00:00 2001 From: Pierre Morel Date: Tue, 14 Feb 2023 13:28:41 +0100 Subject: KVM: s390: vsie: clarifications on setting the APCB The APCB is part of the CRYCB. The calculation of the APCB origin can be done by adding the APCB offset to the CRYCB origin. Current code makes confusing transformations, converting the CRYCB origin to a pointer to calculate the APCB origin. Let's make things simpler and keep the CRYCB origin to make these calculations. Signed-off-by: Pierre Morel Reviewed-by: Claudio Imbrenda Acked-by: David Hildenbrand Acked-by: Janosch Frank Signed-off-by: Janosch Frank Link: https://lore.kernel.org/r/20230214122841.13066-2-pmorel@linux.ibm.com Message-Id: <20230214122841.13066-2-pmorel@linux.ibm.com> --- arch/s390/kvm/vsie.c | 50 +++++++++++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index b6a0219e470a..8d6b765abf29 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -138,11 +138,15 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) } /* Copy to APCB FORMAT1 from APCB FORMAT0 */ static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s, - unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h) + unsigned long crycb_gpa, struct kvm_s390_apcb1 *apcb_h) { struct kvm_s390_apcb0 tmp; + unsigned long apcb_gpa; - if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0))) + apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0); + + if (read_guest_real(vcpu, apcb_gpa, &tmp, + sizeof(struct kvm_s390_apcb0))) return -EFAULT; apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0]; @@ -157,15 +161,19 @@ static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s, * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0 * @vcpu: pointer to the virtual CPU * @apcb_s: pointer to start of apcb in the shadow crycb - * @apcb_o: pointer to start of original apcb in the guest2 + * @crycb_gpa: guest physical address to start of original guest crycb * @apcb_h: pointer to start of apcb in the guest1 * * Returns 0 and -EFAULT on error reading guest apcb */ static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s, - unsigned long apcb_o, unsigned long *apcb_h) + unsigned long crycb_gpa, unsigned long *apcb_h) { - if (read_guest_real(vcpu, apcb_o, apcb_s, + unsigned long apcb_gpa; + + apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb0); + + if (read_guest_real(vcpu, apcb_gpa, apcb_s, sizeof(struct kvm_s390_apcb0))) return -EFAULT; @@ -178,16 +186,20 @@ static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s, * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB * @vcpu: pointer to the virtual CPU * @apcb_s: pointer to start of apcb in the shadow crycb - * @apcb_o: pointer to start of original guest apcb + * @crycb_gpa: guest physical address to start of original guest crycb * @apcb_h: pointer to start of apcb in the host * * Returns 0 and -EFAULT on error reading guest apcb */ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s, - unsigned long apcb_o, + unsigned long crycb_gpa, unsigned long *apcb_h) { - if (read_guest_real(vcpu, apcb_o, apcb_s, + unsigned long apcb_gpa; + + apcb_gpa = crycb_gpa + offsetof(struct kvm_s390_crypto_cb, apcb1); + + if (read_guest_real(vcpu, apcb_gpa, apcb_s, sizeof(struct kvm_s390_apcb1))) return -EFAULT; @@ -200,7 +212,7 @@ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s, * setup_apcb - Create a shadow copy of the apcb. * @vcpu: pointer to the virtual CPU * @crycb_s: pointer to shadow crycb - * @crycb_o: pointer to original guest crycb + * @crycb_gpa: guest physical address of original guest crycb * @crycb_h: pointer to the host crycb * @fmt_o: format of the original guest crycb. * @fmt_h: format of the host crycb. @@ -211,50 +223,46 @@ static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s, * Return 0 or an error number if the guest and host crycb are incompatible. */ static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s, - const u32 crycb_o, + const u32 crycb_gpa, struct kvm_s390_crypto_cb *crycb_h, int fmt_o, int fmt_h) { - struct kvm_s390_crypto_cb *crycb; - - crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o; - switch (fmt_o) { case CRYCB_FORMAT2: - if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK)) + if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 256) & PAGE_MASK)) return -EACCES; if (fmt_h != CRYCB_FORMAT2) return -EINVAL; return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1, - (unsigned long) &crycb->apcb1, + crycb_gpa, (unsigned long *)&crycb_h->apcb1); case CRYCB_FORMAT1: switch (fmt_h) { case CRYCB_FORMAT2: return setup_apcb10(vcpu, &crycb_s->apcb1, - (unsigned long) &crycb->apcb0, + crycb_gpa, &crycb_h->apcb1); case CRYCB_FORMAT1: return setup_apcb00(vcpu, (unsigned long *) &crycb_s->apcb0, - (unsigned long) &crycb->apcb0, + crycb_gpa, (unsigned long *) &crycb_h->apcb0); } break; case CRYCB_FORMAT0: - if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK)) + if ((crycb_gpa & PAGE_MASK) != ((crycb_gpa + 32) & PAGE_MASK)) return -EACCES; switch (fmt_h) { case CRYCB_FORMAT2: return setup_apcb10(vcpu, &crycb_s->apcb1, - (unsigned long) &crycb->apcb0, + crycb_gpa, &crycb_h->apcb1); case CRYCB_FORMAT1: case CRYCB_FORMAT0: return setup_apcb00(vcpu, (unsigned long *) &crycb_s->apcb0, - (unsigned long) &crycb->apcb0, + crycb_gpa, (unsigned long *) &crycb_h->apcb0); } } -- cgit v1.2.3 From 8a46df7cd135fe576c18efa418cd1549e51f2732 Mon Sep 17 00:00:00 2001 From: Nico Boehr Date: Wed, 22 Feb 2023 16:55:02 +0100 Subject: KVM: s390: pci: fix virtual-physical confusion on module unload/load When the kvm module is unloaded, zpci_setup_aipb() perists some data in the zpci_aipb structure in s390 pci code. Note that this struct is also passed to firmware in the zpci_set_irq_ctrl() call and thus the GAIT must be a physical address. On module re-insertion, the GAIT is restored from this structure in zpci_reset_aipb(). But it is a physical address, hence this may cause issues when the kvm module is unloaded and loaded again. Fix virtual vs physical address confusion (which currently are the same) by adding the necessary physical-to-virtual-conversion in zpci_reset_aipb(). Signed-off-by: Nico Boehr Reviewed-by: Matthew Rosato Signed-off-by: Janosch Frank Link: https://lore.kernel.org/r/20230222155503.43399-1-nrb@linux.ibm.com Message-Id: <20230222155503.43399-1-nrb@linux.ibm.com> --- arch/s390/kvm/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c index b124d586db55..7dab00f1e833 100644 --- a/arch/s390/kvm/pci.c +++ b/arch/s390/kvm/pci.c @@ -112,7 +112,7 @@ static int zpci_reset_aipb(u8 nisc) return -EINVAL; aift->sbv = zpci_aif_sbv; - aift->gait = (struct zpci_gaite *)zpci_aipb->aipb.gait; + aift->gait = phys_to_virt(zpci_aipb->aipb.gait); return 0; } -- cgit v1.2.3