summaryrefslogtreecommitdiff
path: root/include/linux/irq.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-06-20 02:37:50 +0300
committerThomas Gleixner <tglx@linutronix.de>2017-06-22 19:21:24 +0300
commit761ea388e8c4e3ac883a94e16bcc8c51fa419d4f (patch)
tree387123863dd4a3db22eba0f3bde29df1d886eb82 /include/linux/irq.h
parent4cde9c6b826834b861a2b58653ab33150f562064 (diff)
downloadlinux-761ea388e8c4e3ac883a94e16bcc8c51fa419d4f.tar.xz
genirq: Handle managed irqs gracefully in irq_startup()
Affinity managed interrupts should keep their assigned affinity accross CPU hotplug. To avoid magic hackery in device drivers, the core code shall manage them transparently and set these interrupts into a managed shutdown state when the last CPU of the assigned affinity mask goes offline. The interrupt will be restarted when one of the CPUs in the assigned affinity mask comes back online. Add the necessary logic to irq_startup(). If an interrupt is requested and started up, the code checks whether it is affinity managed and if so, it checks whether a CPU in the interrupts affinity mask is online. If not, it puts the interrupt into managed shutdown state. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Keith Busch <keith.busch@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Link: http://lkml.kernel.org/r/20170619235447.189851170@linutronix.de
Diffstat (limited to 'include/linux/irq.h')
-rw-r--r--include/linux/irq.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 0e37276c5315..807042b46af1 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -346,7 +346,7 @@ static inline bool irqd_is_started(struct irq_data *d)
return __irqd_to_state(d) & IRQD_IRQ_STARTED;
}
-static inline bool irqd_is_managed_shutdown(struct irq_data *d)
+static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
}