summaryrefslogtreecommitdiff
path: root/kernel/softirq.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-11-13 17:02:18 +0300
committerThomas Gleixner <tglx@linutronix.de>2020-11-23 12:31:06 +0300
commitae9ef58996a4447dd44aa638759f913c883ba816 (patch)
tree500d36ed134f933007bc7fe5f96ba9fd00a8a7a3 /kernel/softirq.c
parent15115830c88751ba83068aa37da996602ddc6a61 (diff)
downloadlinux-ae9ef58996a4447dd44aa638759f913c883ba816.tar.xz
softirq: Move related code into one section
To prepare for adding a RT aware variant of softirq serialization and processing move related code into one section so the necessary #ifdeffery is reduced to one. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/r/20201113141733.974214480@linutronix.de
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c107
1 files changed, 54 insertions, 53 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 09229ad82209..617009ccd82c 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -92,6 +92,13 @@ static bool ksoftirqd_running(unsigned long pending)
!__kthread_should_park(tsk);
}
+#ifdef CONFIG_TRACE_IRQFLAGS
+DEFINE_PER_CPU(int, hardirqs_enabled);
+DEFINE_PER_CPU(int, hardirq_context);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
+#endif
+
/*
* preempt_count and SOFTIRQ_OFFSET usage:
* - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
@@ -102,17 +109,11 @@ static bool ksoftirqd_running(unsigned long pending)
* softirq and whether we just have bh disabled.
*/
+#ifdef CONFIG_TRACE_IRQFLAGS
/*
- * This one is for softirq.c-internal use,
- * where hardirqs are disabled legitimately:
+ * This is for softirq.c-internal use, where hardirqs are disabled
+ * legitimately:
*/
-#ifdef CONFIG_TRACE_IRQFLAGS
-
-DEFINE_PER_CPU(int, hardirqs_enabled);
-DEFINE_PER_CPU(int, hardirq_context);
-EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
-EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
-
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
@@ -203,6 +204,50 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
}
EXPORT_SYMBOL(__local_bh_enable_ip);
+static inline void invoke_softirq(void)
+{
+ if (ksoftirqd_running(local_softirq_pending()))
+ return;
+
+ if (!force_irqthreads) {
+#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
+ /*
+ * We can safely execute softirq on the current stack if
+ * it is the irq stack, because it should be near empty
+ * at this stage.
+ */
+ __do_softirq();
+#else
+ /*
+ * Otherwise, irq_exit() is called on the task stack that can
+ * be potentially deep already. So call softirq in its own stack
+ * to prevent from any overrun.
+ */
+ do_softirq_own_stack();
+#endif
+ } else {
+ wakeup_softirqd();
+ }
+}
+
+asmlinkage __visible void do_softirq(void)
+{
+ __u32 pending;
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ pending = local_softirq_pending();
+
+ if (pending && !ksoftirqd_running(pending))
+ do_softirq_own_stack();
+
+ local_irq_restore(flags);
+}
+
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
* but break the loop if need_resched() is set or after 2 ms.
@@ -327,24 +372,6 @@ restart:
current_restore_flags(old_flags, PF_MEMALLOC);
}
-asmlinkage __visible void do_softirq(void)
-{
- __u32 pending;
- unsigned long flags;
-
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
-
- pending = local_softirq_pending();
-
- if (pending && !ksoftirqd_running(pending))
- do_softirq_own_stack();
-
- local_irq_restore(flags);
-}
-
/**
* irq_enter_rcu - Enter an interrupt context with RCU watching
*/
@@ -371,32 +398,6 @@ void irq_enter(void)
irq_enter_rcu();
}
-static inline void invoke_softirq(void)
-{
- if (ksoftirqd_running(local_softirq_pending()))
- return;
-
- if (!force_irqthreads) {
-#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
- /*
- * We can safely execute softirq on the current stack if
- * it is the irq stack, because it should be near empty
- * at this stage.
- */
- __do_softirq();
-#else
- /*
- * Otherwise, irq_exit() is called on the task stack that can
- * be potentially deep already. So call softirq in its own stack
- * to prevent from any overrun.
- */
- do_softirq_own_stack();
-#endif
- } else {
- wakeup_softirqd();
- }
-}
-
static inline void tick_irq_exit(void)
{
#ifdef CONFIG_NO_HZ_COMMON