summaryrefslogtreecommitdiff
path: root/drivers/lguest/interrupts_and_traps.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-06-13 08:27:02 +0400
committerRusty Russell <rusty@rustcorp.com.au>2009-06-12 16:57:03 +0400
commita32a8813d0173163ba44d8f9556e0d89fdc4fb46 (patch)
treefddb6742338047d0219e8c2536cd39b04e643b16 /drivers/lguest/interrupts_and_traps.c
parentabd41f037e1a64543000ed73b42f616d04d92700 (diff)
downloadlinux-a32a8813d0173163ba44d8f9556e0d89fdc4fb46.tar.xz
lguest: improve interrupt handling, speed up stream networking
lguest never checked for pending interrupts when enabling interrupts, and things still worked. However, it makes a significant difference to TCP performance, so it's time we fixed it by introducing a pending_irq flag and checking it on irq_restore and irq_enable. These two routines are now too big to patch into the 8/10 bytes patch space, so we drop that code. Note: The high latency on interrupt delivery had a very curious effect: once everything else was optimized, networking without GSO was faster than networking with GSO, since more interrupts were sent and hence a greater chance of one getting through to the Guest! Note2: (Almost) Closing the same loophole for iret doesn't have any measurable effect, so I'm leaving that patch for the moment. Before: 1GB tcpblast Guest->Host: 30.7 seconds 1GB tcpblast Guest->Host (no GSO): 76.0 seconds After: 1GB tcpblast Guest->Host: 6.8 seconds 1GB tcpblast Guest->Host (no GSO): 27.8 seconds Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/lguest/interrupts_and_traps.c')
-rw-r--r--drivers/lguest/interrupts_and_traps.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index a8c966fee1e4..5a10754b4790 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -131,7 +131,7 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
* interrupt_pending() returns the first pending interrupt which isn't blocked
* by the Guest. It is called before every entry to the Guest, and just before
* we go to sleep when the Guest has halted itself. */
-unsigned int interrupt_pending(struct lg_cpu *cpu)
+unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
{
unsigned int irq;
DECLARE_BITMAP(blk, LGUEST_IRQS);
@@ -149,13 +149,14 @@ unsigned int interrupt_pending(struct lg_cpu *cpu)
/* Find the first interrupt. */
irq = find_first_bit(blk, LGUEST_IRQS);
+ *more = find_next_bit(blk, LGUEST_IRQS, irq+1);
return irq;
}
/* This actually diverts the Guest to running an interrupt handler, once an
* interrupt has been identified by interrupt_pending(). */
-void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq)
+void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
{
struct desc_struct *idt;
@@ -178,8 +179,12 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq)
u32 irq_enabled;
if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
irq_enabled = 0;
- if (!irq_enabled)
+ if (!irq_enabled) {
+ /* Make sure they know an IRQ is pending. */
+ put_user(X86_EFLAGS_IF,
+ &cpu->lg->lguest_data->irq_pending);
return;
+ }
}
/* Look at the IDT entry the Guest gave us for this interrupt. The
@@ -202,6 +207,11 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq)
* here is a compromise which means at least it gets updated every
* timer interrupt. */
write_timestamp(cpu);
+
+ /* If there are no other interrupts we want to deliver, clear
+ * the pending flag. */
+ if (!more)
+ put_user(0, &cpu->lg->lguest_data->irq_pending);
}
/*:*/