summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/i8254.c
diff options
context:
space:
mode:
authorRadim Krčmář <rkrcmar@redhat.com>2016-03-03 00:56:38 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2016-03-04 11:29:31 +0300
commit7dd0fdff145c5be7146d0ac06732ae3613412ac1 (patch)
tree5be510cbbfb23980e426fcf1a824906f1fef9b66 /arch/x86/kvm/i8254.c
parent13d268ca2c4c29d6da2ba79419f9a655e602afed (diff)
downloadlinux-7dd0fdff145c5be7146d0ac06732ae3613412ac1.tar.xz
KVM: i8254: change PIT discard tick policy
Discard policy uses ack_notifiers to prevent injection of PIT interrupts before EOI from the last one. This patch changes the policy to always try to deliver the interrupt, which makes a difference when its vector is in ISR. Old implementation would drop the interrupt, but proposed one injects to IRR, like real hardware would. The old policy breaks legacy NMI watchdogs, where PIT is used through virtual wire (LVT0): PIT never sends an interrupt before receiving EOI, thus a guest deadlock with disabled interrupts will stop NMIs. Note that NMI doesn't do EOI, so PIT also had to send a normal interrupt through IOAPIC. (KVM's PIT is deeply rotten and luckily not used much in modern systems.) Even though there is a chance of regressions, I think we can fix the LVT0 NMI bug without introducing a new tick policy. Cc: <stable@vger.kernel.org> Reported-by: Yuki Shibuya <shibuya.yk@ncos.nec.co.jp> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/i8254.c')
-rw-r--r--arch/x86/kvm/i8254.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index b0ea42b78ccd..ab5318727579 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -245,7 +245,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
* PIC is being reset. Handle it gracefully here
*/
atomic_inc(&ps->pending);
- else if (value > 0)
+ else if (value > 0 && ps->reinject)
/* in this case, we had multiple outstanding pit interrupts
* that we needed to inject. Reinject
*/
@@ -288,7 +288,9 @@ static void pit_do_work(struct kthread_work *work)
* last one has been acked.
*/
spin_lock(&ps->inject_lock);
- if (ps->irq_ack) {
+ if (!ps->reinject)
+ inject = 1;
+ else if (ps->irq_ack) {
ps->irq_ack = 0;
inject = 1;
}
@@ -317,10 +319,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
struct kvm_pit *pt = ps->kvm->arch.vpit;
- if (ps->reinject || !atomic_read(&ps->pending)) {
+ if (ps->reinject)
atomic_inc(&ps->pending);
- queue_kthread_work(&pt->worker, &pt->expired);
- }
+
+ queue_kthread_work(&pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);