summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-01-30 16:08:24 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2021-02-08 16:02:10 +0300
commitf4c03b0e520c5f56e569a8da3fce5ddbd0696742 (patch)
tree36bfa25dfd766e2c6b8f0555d2453ffef8b85ded /arch
parent4cb8428465148bcca0b6b8593d51f805818a70e0 (diff)
downloadlinux-f4c03b0e520c5f56e569a8da3fce5ddbd0696742.tar.xz
powerpc/64s: move bad_page_fault handling to C
This simplifies code, and it is also useful when introducing interrupt handler wrappers when introducing wrapper functionality that doesn't cope with asm entry code calling into more than one handler function. 32-bit and 64e still have some such cases, which limits some ways they can use interrupt wrappers. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210130130852.2952424-15-npiggin@gmail.com
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S12
-rw-r--r--arch/powerpc/mm/fault.c4
2 files changed, 4 insertions, 12 deletions
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index fc793fa3fdf8..6e245e06848e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1447,12 +1447,6 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- cmpdi r3,0
- beq+ interrupt_return
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl __bad_page_fault
b interrupt_return
1: bl do_break
@@ -1557,12 +1551,6 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- cmpdi r3,0
- beq+ interrupt_return
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl __bad_page_fault
b interrupt_return
GEN_KVM instruction_access
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 970ac317e018..fc2d9a27c649 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -557,6 +557,10 @@ long do_page_fault(struct pt_regs *regs)
if (likely(entry)) {
instruction_pointer_set(regs, extable_fixup(entry));
err = 0;
+ } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ /* 32 and 64e handle this in asm */
+ __bad_page_fault(regs, err);
+ err = 0;
}
out: