summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/irq_stack.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-02-10 02:40:50 +0300
committerThomas Gleixner <tglx@linutronix.de>2021-02-11 01:34:15 +0300
commit52d743f3b71265e14560a38f4c835d07b9c6fc4c (patch)
tree7516baf26ff08db3b91c4f89c6ce7bc5e5459250 /arch/x86/include/asm/irq_stack.h
parent359f01d1816fc1ea0161e6c30722bef1ed6b8abb (diff)
downloadlinux-52d743f3b71265e14560a38f4c835d07b9c6fc4c.tar.xz
x86/softirq: Remove indirection in do_softirq_own_stack()
Use the new inline stack switching and remove the old ASM indirect call implementation. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20210210002512.972714001@linutronix.de
Diffstat (limited to 'arch/x86/include/asm/irq_stack.h')
-rw-r--r--arch/x86/include/asm/irq_stack.h52
1 files changed, 16 insertions, 36 deletions
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index dabc0cf60df5..fa444c27772a 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -185,20 +185,23 @@
IRQ_CONSTRAINTS, regs, vector); \
}
-static __always_inline bool irqstack_active(void)
-{
- return __this_cpu_read(hardirq_stack_inuse);
-}
-
-void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
+#define ASM_CALL_SOFTIRQ \
+ "call %P[__func] \n"
-static __always_inline void __run_on_irqstack(void (*func)(void))
-{
- void *tos = __this_cpu_read(hardirq_stack_ptr);
-
- __this_cpu_write(hardirq_stack_inuse, true);
- asm_call_on_stack(tos, func, NULL);
- __this_cpu_write(hardirq_stack_inuse, false);
+/*
+ * Macro to invoke __do_softirq on the irq stack. Contrary to the above
+ * the only check which is necessary is whether the interrupt stack is
+ * in use already.
+ */
+#define run_softirq_on_irqstack_cond() \
+{ \
+ if (__this_cpu_read(hardirq_stack_inuse)) { \
+ __do_softirq(); \
+ } else { \
+ __this_cpu_write(hardirq_stack_inuse, true); \
+ call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \
+ __this_cpu_write(hardirq_stack_inuse, false); \
+ } \
}
#else /* CONFIG_X86_64 */
@@ -219,29 +222,6 @@ static __always_inline void __run_on_irqstack(void (*func)(void))
irq_exit_rcu(); \
}
-static inline bool irqstack_active(void) { return false; }
-static inline void __run_on_irqstack(void (*func)(void)) { }
#endif /* !CONFIG_X86_64 */
-static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs)
-{
- if (IS_ENABLED(CONFIG_X86_32))
- return false;
- if (!regs)
- return !irqstack_active();
- return !user_mode(regs) && !irqstack_active();
-}
-
-
-static __always_inline void run_on_irqstack_cond(void (*func)(void),
- struct pt_regs *regs)
-{
- lockdep_assert_irqs_disabled();
-
- if (irq_needs_irq_stack(regs))
- __run_on_irqstack(func);
- else
- func();
-}
-
#endif