summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/entry-common.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-06-07 12:46:11 +0300
committerWill Deacon <will@kernel.org>2021-06-07 13:35:55 +0300
commit064dbfb4169141943ec7d9dbfd02974dd008f2ce (patch)
tree2e67ec533160b8bc74d99ca82c0a11cd551be86b /arch/arm64/kernel/entry-common.c
parentf8049488e7d37b0a0e438ee449e83b3e46958743 (diff)
downloadlinux-064dbfb4169141943ec7d9dbfd02974dd008f2ce.tar.xz
arm64: entry: convert IRQ+FIQ handlers to C
For various reasons we'd like to convert the bulk of arm64's exception triage logic to C. As a step towards that, this patch converts the EL1 and EL0 IRQ+FIQ triage logic to C. Separate C functions are added for the native and compat cases so that in subsequent patches we can handle native/compat differences in C. Since the triage functions can now call arm64_apply_bp_hardening() directly, the do_el0_irq_bp_hardening() wrapper function is removed. Since the user_exit_irqoff macro is now unused, it is removed. The user_enter_irqoff macro is still used by the ret_to_user code, and cannot be removed at this time. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-8-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/entry-common.c')
-rw-r--r--arch/arm64/kernel/entry-common.c93
1 files changed, 90 insertions, 3 deletions
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 08d17eb0ce13..ae1b6d7c00e1 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -19,6 +19,8 @@
#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/stacktrace.h>
#include <asm/sysreg.h>
/*
@@ -101,7 +103,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs)
__nmi_exit();
}
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_enter_nmi(regs);
@@ -109,7 +111,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
enter_from_kernel_mode(regs);
}
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_exit_nmi(regs);
@@ -117,7 +119,7 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
-asmlinkage void __sched arm64_preempt_schedule_irq(void)
+static void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
@@ -142,6 +144,18 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void)
preempt_schedule_irq();
}
+static void do_interrupt_handler(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ if (on_thread_stack())
+ call_on_irq_stack(regs, handler);
+ else
+ handler(regs);
+}
+
+extern void (*handle_arch_irq)(struct pt_regs *);
+extern void (*handle_arch_fiq)(struct pt_regs *);
+
#ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -308,6 +322,36 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
}
}
+static void noinstr el1_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ enter_el1_irq_or_nmi(regs);
+ do_interrupt_handler(regs, handler);
+
+ /*
+ * Note: thread_info::preempt_count includes both thread_info::count
+ * and thread_info::need_resched, and is not equivalent to
+ * preempt_count().
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION) &&
+ READ_ONCE(current_thread_info()->preempt_count) == 0)
+ arm64_preempt_schedule_irq();
+
+ exit_el1_irq_or_nmi(regs);
+}
+
+asmlinkage void noinstr el1_irq_handler(struct pt_regs *regs)
+{
+ el1_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el1_fiq_handler(struct pt_regs *regs)
+{
+ el1_interrupt(regs, handle_arch_fiq);
+}
+
asmlinkage void noinstr el1_error_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -507,6 +551,39 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
}
}
+static void noinstr el0_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ enter_from_user_mode();
+
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ if (regs->pc & BIT(55))
+ arm64_apply_bp_hardening();
+
+ do_interrupt_handler(regs, handler);
+}
+
+static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
+{
+ el0_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el0_irq_handler(struct pt_regs *regs)
+{
+ __el0_irq_handler_common(regs);
+}
+
+static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
+{
+ el0_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el0_fiq_handler(struct pt_regs *regs)
+{
+ __el0_fiq_handler_common(regs);
+}
+
static void __el0_error_handler_common(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -583,6 +660,16 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
}
}
+asmlinkage void noinstr el0_irq_compat_handler(struct pt_regs *regs)
+{
+ __el0_irq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0_fiq_compat_handler(struct pt_regs *regs)
+{
+ __el0_fiq_handler_common(regs);
+}
+
asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs)
{
__el0_error_handler_common(regs);