summaryrefslogtreecommitdiff
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c132
1 files changed, 77 insertions, 55 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index a9a9dbe49fea..b0ccd1ac2d6a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -759,6 +759,13 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
return ret;
}
+static void wake_threads_waitq(struct irq_desc *desc)
+{
+ if (atomic_dec_and_test(&desc->threads_active) &&
+ waitqueue_active(&desc->wait_for_threads))
+ wake_up(&desc->wait_for_threads);
+}
+
/*
* Interrupt handler thread
*/
@@ -771,57 +778,41 @@ static int irq_thread(void *data)
struct irq_desc *desc = irq_to_desc(action->irq);
irqreturn_t (*handler_fn)(struct irq_desc *desc,
struct irqaction *action);
- int wake;
- if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
+ if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
&action->thread_flags))
handler_fn = irq_forced_thread_fn;
else
handler_fn = irq_thread_fn;
sched_setscheduler(current, SCHED_FIFO, &param);
- current->irqaction = action;
+ current->irq_thread = 1;
while (!irq_wait_for_interrupt(action)) {
+ irqreturn_t action_ret;
irq_thread_check_affinity(desc, action);
- atomic_inc(&desc->threads_active);
-
- raw_spin_lock_irq(&desc->lock);
- if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
- /*
- * CHECKME: We might need a dedicated
- * IRQ_THREAD_PENDING flag here, which
- * retriggers the thread in check_irq_resend()
- * but AFAICT IRQS_PENDING should be fine as it
- * retriggers the interrupt itself --- tglx
- */
- desc->istate |= IRQS_PENDING;
- raw_spin_unlock_irq(&desc->lock);
- } else {
- irqreturn_t action_ret;
-
- raw_spin_unlock_irq(&desc->lock);
- action_ret = handler_fn(desc, action);
- if (!noirqdebug)
- note_interrupt(action->irq, desc, action_ret);
- }
+ action_ret = handler_fn(desc, action);
+ if (!noirqdebug)
+ note_interrupt(action->irq, desc, action_ret);
- wake = atomic_dec_and_test(&desc->threads_active);
-
- if (wake && waitqueue_active(&desc->wait_for_threads))
- wake_up(&desc->wait_for_threads);
+ wake_threads_waitq(desc);
}
- /* Prevent a stale desc->threads_oneshot */
- irq_finalize_oneshot(desc, action, true);
-
/*
- * Clear irqaction. Otherwise exit_irq_thread() would make
+ * This is the regular exit path. __free_irq() is stopping the
+ * thread via kthread_stop() after calling
+ * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
+ * oneshot mask bit can be set. We cannot verify that as we
+ * cannot touch the oneshot mask at this point anymore as
+ * __setup_irq() might have given out currents thread_mask
+ * again.
+ *
+ * Clear irq_thread. Otherwise exit_irq_thread() would make
* fuzz about an active irq thread going into nirvana.
*/
- current->irqaction = NULL;
+ current->irq_thread = 0;
return 0;
}
@@ -832,27 +823,28 @@ void exit_irq_thread(void)
{
struct task_struct *tsk = current;
struct irq_desc *desc;
+ struct irqaction *action;
- if (!tsk->irqaction)
+ if (!tsk->irq_thread)
return;
+ action = kthread_data(tsk);
+
printk(KERN_ERR
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
- tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
+ tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
- desc = irq_to_desc(tsk->irqaction->irq);
+ desc = irq_to_desc(action->irq);
/*
- * Prevent a stale desc->threads_oneshot. Must be called
- * before setting the IRQTF_DIED flag.
+ * If IRQTF_RUNTHREAD is set, we need to decrement
+ * desc->threads_active and wake possible waiters.
*/
- irq_finalize_oneshot(desc, tsk->irqaction, true);
+ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
+ wake_threads_waitq(desc);
- /*
- * Set the THREAD DIED flag to prevent further wakeups of the
- * soon to be gone threaded handler.
- */
- set_bit(IRQTF_DIED, &tsk->irqaction->flags);
+ /* Prevent a stale desc->threads_oneshot */
+ irq_finalize_oneshot(desc, action, true);
}
static void irq_setup_forced_threading(struct irqaction *new)
@@ -985,6 +977,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
/* add new interrupt at end of irq queue */
do {
+ /*
+ * Or all existing action->thread_mask bits,
+ * so we can find the next zero bit for this
+ * new action.
+ */
thread_mask |= old->thread_mask;
old_ptr = &old->next;
old = *old_ptr;
@@ -993,14 +990,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}
/*
- * Setup the thread mask for this irqaction. Unlikely to have
- * 32 resp 64 irqs sharing one line, but who knows.
+ * Setup the thread mask for this irqaction for ONESHOT. For
+ * !ONESHOT irqs the thread mask is 0 so we can avoid a
+ * conditional in irq_wake_thread().
*/
- if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
- ret = -EBUSY;
- goto out_mask;
+ if (new->flags & IRQF_ONESHOT) {
+ /*
+ * Unlikely to have 32 resp 64 irqs sharing one line,
+ * but who knows.
+ */
+ if (thread_mask == ~0UL) {
+ ret = -EBUSY;
+ goto out_mask;
+ }
+ /*
+ * The thread_mask for the action is or'ed to
+ * desc->thread_active to indicate that the
+ * IRQF_ONESHOT thread handler has been woken, but not
+ * yet finished. The bit is cleared when a thread
+ * completes. When all threads of a shared interrupt
+ * line have completed desc->threads_active becomes
+ * zero and the interrupt line is unmasked. See
+ * handle.c:irq_wake_thread() for further information.
+ *
+ * If no thread is woken by primary (hard irq context)
+ * interrupt handlers, then desc->threads_active is
+ * also checked for zero to unmask the irq line in the
+ * affected hard irq flow handlers
+ * (handle_[fasteoi|level]_irq).
+ *
+ * The new action gets the first zero bit of
+ * thread_mask assigned. See the loop above which or's
+ * all existing action->thread_mask bits.
+ */
+ new->thread_mask = 1 << ffz(thread_mask);
}
- new->thread_mask = 1 << ffz(thread_mask);
if (!shared) {
init_waitqueue_head(&desc->wait_for_threads);
@@ -1027,7 +1051,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
desc->istate |= IRQS_ONESHOT;
if (irq_settings_can_autoenable(desc))
- irq_startup(desc);
+ irq_startup(desc, true);
else
/* Undo nested disables: */
desc->depth = 1;
@@ -1103,8 +1127,7 @@ out_thread:
struct task_struct *t = new->thread;
new->thread = NULL;
- if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
- kthread_stop(t);
+ kthread_stop(t);
put_task_struct(t);
}
out_mput:
@@ -1214,8 +1237,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
#endif
if (action->thread) {
- if (!test_bit(IRQTF_DIED, &action->thread_flags))
- kthread_stop(action->thread);
+ kthread_stop(action->thread);
put_task_struct(action->thread);
}