summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-03-09 11:42:10 +0300
committerThomas Gleixner <tglx@linutronix.de>2021-03-17 18:33:57 +0300
commiteb2dafbba8b824ee77f166629babd470dd0b1c0a (patch)
treec3a84b3625d79c8b129e05c252be3f7d3ab376f8
parent697d8c63c4a2991a22a896a5e6adcdbb28fefe56 (diff)
downloadlinux-eb2dafbba8b824ee77f166629babd470dd0b1c0a.tar.xz
tasklets: Prevent tasklet_unlock_spin_wait() deadlock on RT
tasklet_unlock_spin_wait() spin waits for the TASKLET_STATE_SCHED bit in the tasklet state to be cleared. This works on !RT nicely because the corresponding execution can only happen on a different CPU. On RT softirq processing is preemptible, therefore a task preempting the softirq processing thread can spin forever. Prevent this by invoking local_bh_disable()/enable() inside the loop. In case that the softirq processing thread was preempted by the current task, current will block on the local lock which yields the CPU to the preempted softirq processing thread. If the tasklet is processed on a different CPU then the local_bh_disable()/enable() pair is just a waste of processor cycles. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210309084241.988908275@linutronix.de
-rw-r--r--include/linux/interrupt.h12
-rw-r--r--kernel/softirq.c28
2 files changed, 29 insertions, 11 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index b50be4fbbc98..352db93c2eed 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -658,7 +658,7 @@ enum
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
@@ -666,16 +666,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
void tasklet_unlock(struct tasklet_struct *t);
void tasklet_unlock_wait(struct tasklet_struct *t);
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
-/*
- * Do not use in new code. Waiting for tasklets from atomic contexts is
- * error prone and should be avoided.
- */
-static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &t->state))
- cpu_relax();
-}
#else
static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
static inline void tasklet_unlock(struct tasklet_struct *t) { }
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ba89ca77698a..f1eb83dc3d50 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -620,6 +620,32 @@ void tasklet_init(struct tasklet_struct *t,
}
EXPORT_SYMBOL(tasklet_init);
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+/*
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+ * error prone and should be avoided.
+ */
+void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ /*
+ * Prevent a live lock when current preempted soft
+ * interrupt processing or prevents ksoftirqd from
+ * running. If the tasklet runs on a different CPU
+ * then this has no effect other than doing the BH
+ * disable/enable dance for nothing.
+ */
+ local_bh_disable();
+ local_bh_enable();
+ } else {
+ cpu_relax();
+ }
+ }
+}
+EXPORT_SYMBOL(tasklet_unlock_spin_wait);
+#endif
+
void tasklet_kill(struct tasklet_struct *t)
{
if (in_interrupt())
@@ -633,7 +659,7 @@ void tasklet_kill(struct tasklet_struct *t)
}
EXPORT_SYMBOL(tasklet_kill);
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
void tasklet_unlock(struct tasklet_struct *t)
{
smp_mb__before_atomic();