summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2023-02-27 11:40:12 +0300
committerSean Christopherson <seanjc@google.com>2023-03-22 22:34:34 +0300
commitab2ee212a57bf136b896a47ce37da38a1fdbdc36 (patch)
tree6ad64b7860b6d8cc8b81047ebbc19dc0773a39d3 /arch/x86/kvm/x86.c
parent400fee8c9b2df61721de8103a4054247bea79fd0 (diff)
downloadlinux-ab2ee212a57bf136b896a47ce37da38a1fdbdc36.tar.xz
KVM: x86: Save/restore all NMIs when multiple NMIs are pending
Save all pending NMIs in KVM_GET_VCPU_EVENTS, and queue KVM_REQ_NMI if one or more NMIs are pending after KVM_SET_VCPU_EVENTS in order to re-evaluate pending NMIs with respect to NMI blocking. KVM allows multiple NMIs to be pending in order to faithfully emulate bare metal handling of simultaneous NMIs (on bare metal, truly simultaneous NMIs are impossible, i.e. one will always arrive first and be consumed). Support for simultaneous NMIs botched the save/restore though. KVM only saves one pending NMI, but allows userspace to restore 255 pending NMIs as kvm_vcpu_events.nmi.pending is a u8, and KVM's internal state is stored in an unsigned int. Fixes: 7460fb4a3400 ("KVM: Fix simultaneous NMIs") Signed-off-by: Santosh Shukla <Santosh.Shukla@amd.com> Link: https://lore.kernel.org/r/20230227084016.3368-8-santosh.shukla@amd.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e317559df489..f18f740f75d8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5125,7 +5125,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
events->nmi.injected = vcpu->arch.nmi_injected;
- events->nmi.pending = vcpu->arch.nmi_pending != 0;
+ events->nmi.pending = vcpu->arch.nmi_pending;
events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu);
/* events->sipi_vector is never valid when reporting to user space */
@@ -5212,8 +5212,11 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
events->interrupt.shadow);
vcpu->arch.nmi_injected = events->nmi.injected;
- if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
+ if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
vcpu->arch.nmi_pending = events->nmi.pending;
+ if (vcpu->arch.nmi_pending)
+ kvm_make_request(KVM_REQ_NMI, vcpu);
+ }
static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&