summaryrefslogtreecommitdiff
path: root/virt/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2024-06-21 15:03:55 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2024-06-21 15:03:55 +0300
commitdee67a94d4c6cbd05b8f6e1181498e94caa33334 (patch)
tree352f6df0c8bd645b0107e20fa2b7c3eeeb3d67ac /virt/kvm
parentcf6d9d2d243f242f51ee0666ca88e61d9408752f (diff)
parentc3f3edf73a8f854f8766a69d2734198a58762e33 (diff)
downloadlinux-dee67a94d4c6cbd05b8f6e1181498e94caa33334.tar.xz
Merge tag 'kvm-x86-fixes-6.10-rcN' of https://github.com/kvm-x86/linux into HEAD
KVM fixes for 6.10 - Fix a "shift too big" goof in the KVM_SEV_INIT2 selftest. - Compute the max mappable gfn for KVM selftests on x86 using GuestMaxPhyAddr from KVM's supported CPUID (if it's available). - Fix a race in kvm_vcpu_on_spin() by ensuring loads and stores are atomic. - Fix technically benign bug in __kvm_handle_hva_range() where KVM consumes the return from a void-returning function as if it were a boolean.
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8e422c2c9450..1192942aef91 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -651,7 +651,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
range->on_lock(kvm);
if (IS_KVM_NULL_FN(range->handler))
- break;
+ goto mmu_unlock;
}
r.ret |= range->handler(kvm, &gfn_range);
}
@@ -660,6 +660,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
if (range->flush_on_ret && r.ret)
kvm_flush_remote_tlbs(kvm);
+mmu_unlock:
if (r.found_memslot)
KVM_MMU_UNLOCK(kvm);
@@ -4025,12 +4026,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
- int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
+ int last_boosted_vcpu;
unsigned long i;
int yielded = 0;
int try = 3;
int pass;
+ last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
kvm_vcpu_set_in_spin_loop(me, true);
/*
* We boost the priority of a VCPU that is runnable but not
@@ -4068,7 +4070,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
- kvm->last_boosted_vcpu = i;
+ WRITE_ONCE(kvm->last_boosted_vcpu, i);
break;
} else if (yielded < 0) {
try--;