summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/fault.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-01-30 16:08:41 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2021-02-08 16:02:12 +0300
commita008f8f9fd67ffb13d906ef4ea6235a3d62dfdb6 (patch)
treeb46363cdf1768c3aad470434f58a5ed9f29b7983 /arch/powerpc/mm/fault.c
parent2a06bf3e95cd93e3640d431960181b8e47415f33 (diff)
downloadlinux-a008f8f9fd67ffb13d906ef4ea6235a3d62dfdb6.tar.xz
powerpc/64s/hash: improve context tracking of hash faults
This moves the 64s/hash context tracking from hash_page_mm() to __do_hash_fault(), so it's no longer called by OCXL / SPU accelerators, which was certainly the wrong thing to be doing, because those callers are not low level interrupt handlers, so should have entered a kernel context tracking already. Then remain in kernel context for the duration of the fault, rather than enter/exit for the hash fault then enter/exit for the page fault, which is pointless. Even still, calling exception_enter/exit in __do_hash_fault seems questionable because that's touching per-cpu variables, tracing, etc., which might have been interrupted by this hash fault or themselves cause hash faults. But maybe I miss something because hash_page_mm very deliberately calls trace_hash_fault too, for example. So for now go with it, it's no worse than before, in this regard. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210130130852.2952424-32-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/mm/fault.c')
-rw-r--r--arch/powerpc/mm/fault.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 8552ab6c008b..9c4220efc20f 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -387,7 +387,7 @@ static void sanity_check_fault(bool is_write, bool is_user,
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-static int __do_page_fault(struct pt_regs *regs, unsigned long address,
+static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct * vma;
@@ -537,36 +537,53 @@ retry:
return 0;
}
-NOKPROBE_SYMBOL(__do_page_fault);
+NOKPROBE_SYMBOL(___do_page_fault);
-DEFINE_INTERRUPT_HANDLER_RET(do_page_fault)
+static long __do_page_fault(struct pt_regs *regs)
{
const struct exception_table_entry *entry;
- enum ctx_state prev_state;
long err;
- prev_state = exception_enter();
- err = __do_page_fault(regs, regs->dar, regs->dsisr);
+ err = ___do_page_fault(regs, regs->dar, regs->dsisr);
if (likely(!err))
- goto out;
+ return err;
entry = search_exception_tables(regs->nip);
if (likely(entry)) {
instruction_pointer_set(regs, extable_fixup(entry));
- err = 0;
+ return 0;
} else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
- /* 32 and 64e handle this in asm */
__bad_page_fault(regs, err);
- err = 0;
+ return 0;
+ } else {
+ /* 32 and 64e handle the bad page fault in asm */
+ return err;
}
+}
+NOKPROBE_SYMBOL(__do_page_fault);
+
+DEFINE_INTERRUPT_HANDLER_RET(do_page_fault)
+{
+ enum ctx_state prev_state = exception_enter();
+ long err;
+
+ err = __do_page_fault(regs);
-out:
exception_exit(prev_state);
return err;
}
NOKPROBE_SYMBOL(do_page_fault);
+#ifdef CONFIG_PPC_BOOK3S_64
+/* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
+long hash__do_page_fault(struct pt_regs *regs)
+{
+ return __do_page_fault(regs);
+}
+NOKPROBE_SYMBOL(hash__do_page_fault);
+#endif
+
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from the DSI and ISI handlers in head.S and from some