summaryrefslogtreecommitdiff
path: root/arch/sh
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig16
-rw-r--r--arch/sh/drivers/platform_early.c2
-rw-r--r--arch/sh/include/asm/checksum_32.h36
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/kernel/disassemble.c4
-rw-r--r--arch/sh/kernel/entry-common.S1
-rw-r--r--arch/sh/kernel/kgdb.c2
-rw-r--r--arch/sh/kernel/kprobes.c58
-rw-r--r--arch/sh/kernel/ptrace_32.c15
-rw-r--r--arch/sh/kernel/signal_32.c2
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/lib/checksum.S119
-rw-r--r--arch/sh/mm/init.c9
13 files changed, 64 insertions, 202 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index d20927128fce..18278152c91c 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -600,22 +600,6 @@ config PHYSICAL_START
where the fail safe kernel needs to run at a different address
than the panic-ed kernel.
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via prctl, it cannot be disabled and the task is only
- allowed to execute a few safe syscalls defined by each seccomp
- mode.
-
- If unsure, say N.
-
config SMP
bool "Symmetric multi-processing support"
depends on SYS_SUPPORTS_SMP
diff --git a/arch/sh/drivers/platform_early.c b/arch/sh/drivers/platform_early.c
index f3dc3f25b3ff..143747c45206 100644
--- a/arch/sh/drivers/platform_early.c
+++ b/arch/sh/drivers/platform_early.c
@@ -246,7 +246,7 @@ static int __init sh_early_platform_driver_probe_id(char *class_str,
case EARLY_PLATFORM_ID_ERROR:
pr_warn("%s: unable to parse %s parameter\n",
class_str, epdrv->pdrv->driver.name);
- /* fall-through */
+ fallthrough;
case EARLY_PLATFORM_ID_UNSET:
match = NULL;
break;
diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h
index 91571a42e44e..1a391e3a7659 100644
--- a/arch/sh/include/asm/checksum_32.h
+++ b/arch/sh/include/asm/checksum_32.h
@@ -30,10 +30,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary
*/
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
+#define _HAVE_ARCH_CSUM_AND_COPY
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
@@ -42,23 +41,18 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* access_ok().
*/
static inline
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
- return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+ return csum_partial_copy_generic(src, dst, len);
}
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
static inline
-__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
{
- if (access_ok(src, len))
- return csum_partial_copy_generic((__force const void *)src, dst,
- len, sum, err_ptr, NULL);
- if (len)
- *err_ptr = -EFAULT;
- return sum;
+ if (!access_ok(src, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
}
/*
@@ -199,16 +193,10 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
#define HAVE_CSUM_COPY_USER
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
- int len, __wsum sum,
- int *err_ptr)
+ int len)
{
- if (access_ok(dst, len))
- return csum_partial_copy_generic((__force const void *)src,
- dst, len, sum, NULL, err_ptr);
-
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
+ if (!access_ok(dst, len))
+ return 0;
+ return csum_partial_copy_generic((__force const void *)src, dst, len);
}
#endif /* __ASM_SH_CHECKSUM_H */
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 1a0d7cf71c10..100bf241340b 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -8,7 +8,6 @@
#ifdef CONFIG_SMP
-#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/percpu.h>
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
index 08e1af63edd9..34e25a439c81 100644
--- a/arch/sh/kernel/disassemble.c
+++ b/arch/sh/kernel/disassemble.c
@@ -486,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
pr_cont("xd%d", rn & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_N:
pr_cont("dr%d", rn);
break;
@@ -495,7 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
pr_cont("xd%d", rm & ~1);
break;
}
- /* else, fall through */
+ fallthrough;
case D_REG_M:
pr_cont("dr%d", rm);
break;
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index ad963104d22d..91ab2607a1ff 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -370,7 +370,6 @@ syscall_trace_entry:
nop
cmp/eq #-1, r0
bt syscall_exit
- mov.l r0, @(OFF_R0,r15) ! Save return value
! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
! ptrace(POKEUSR). (Note that R0-R2 are
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 0d5f3c9d52f3..e4147efa9ec6 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -266,7 +266,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->pc = addr;
- /* fallthrough */
+ fallthrough;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 318296f48f1a..756100b01e84 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -204,6 +204,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *) regs->pr;
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
regs->pr = (unsigned long)kretprobe_trampoline;
@@ -302,62 +303,9 @@ static void __used kretprobe_trampoline_holder(void)
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ regs->pc = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more then one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler) {
- __this_cpu_write(current_kprobe, &ri->rp->kp);
- ri->rp->handler(ri, regs);
- __this_cpu_write(current_kprobe, NULL);
- }
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
-
- regs->pc = orig_ret_address;
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
-
- return orig_ret_address;
+ return 1;
}
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index b05bf92f9c32..5281685f6ad1 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -455,16 +455,11 @@ long arch_ptrace(struct task_struct *child, long request,
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret = 0;
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- /*
- * Tracing decided this syscall should not happen.
- * We'll return a bogus call number to get an ENOSYS
- * error, but leave the original number in regs->regs[0].
- */
- ret = -1L;
+ tracehook_report_syscall_entry(regs)) {
+ regs->regs[0] = -ENOSYS;
+ return -1;
+ }
if (secure_computing() == -1)
return -1;
@@ -475,7 +470,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
- return ret ?: regs->regs[0];
+ return 0;
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index a0fbb8427b39..4fe3f00137bc 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -418,7 +418,7 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
- /* fallthrough */
+ fallthrough;
case -ERESTARTNOINTR:
regs->regs[0] = save_r0;
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index bde7a6c01aaf..3161b9ccd2a5 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -76,6 +76,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
}
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index 97b5c2d9fec4..3e07074e0098 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -173,47 +173,27 @@ ENTRY(csum_partial)
mov r6, r0
/*
-unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
- int sum, int *src_err_ptr, int *dst_err_ptr)
+unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
*/
/*
- * Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for all access types.
- *
- * FIXME: could someone double-check whether I haven't mixed up some SRC and
- * DST definitions? It's damn hard to trigger all cases. I hope I got
- * them all but there's no guarantee.
+ * Copy from ds while checksumming, otherwise like csum_partial with initial
+ * sum being ~0U
*/
-#define SRC(...) \
+#define EXC(...) \
9999: __VA_ARGS__ ; \
.section __ex_table, "a"; \
.long 9999b, 6001f ; \
.previous
-#define DST(...) \
- 9999: __VA_ARGS__ ; \
- .section __ex_table, "a"; \
- .long 9999b, 6002f ; \
- .previous
-
!
! r4: const char *SRC
! r5: char *DST
! r6: int LEN
-! r7: int SUM
-!
-! on stack:
-! int *SRC_ERR_PTR
-! int *DST_ERR_PTR
!
ENTRY(csum_partial_copy_generic)
- mov.l r5,@-r15
- mov.l r6,@-r15
-
+ mov #-1,r7
mov #3,r0 ! Check src and dest are equally aligned
mov r4,r1
and r0,r1
@@ -243,11 +223,11 @@ ENTRY(csum_partial_copy_generic)
clrt
.align 2
5:
-SRC( mov.b @r4+,r1 )
-SRC( mov.b @r4+,r0 )
+EXC( mov.b @r4+,r1 )
+EXC( mov.b @r4+,r0 )
extu.b r1,r1
-DST( mov.b r1,@r5 )
-DST( mov.b r0,@(1,r5) )
+EXC( mov.b r1,@r5 )
+EXC( mov.b r0,@(1,r5) )
extu.b r0,r0
add #2,r5
@@ -276,8 +256,8 @@ DST( mov.b r0,@(1,r5) )
! Handle first two bytes as a special case
.align 2
1:
-SRC( mov.w @r4+,r0 )
-DST( mov.w r0,@r5 )
+EXC( mov.w @r4+,r0 )
+EXC( mov.w r0,@r5 )
add #2,r5
extu.w r0,r0
addc r0,r7
@@ -292,32 +272,32 @@ DST( mov.w r0,@r5 )
clrt
.align 2
1:
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@r5 )
-DST( mov.l r1,@(4,r5) )
+EXC( mov.l r0,@r5 )
+EXC( mov.l r1,@(4,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(8,r5) )
-DST( mov.l r1,@(12,r5) )
+EXC( mov.l r0,@(8,r5) )
+EXC( mov.l r1,@(12,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(16,r5) )
-DST( mov.l r1,@(20,r5) )
+EXC( mov.l r0,@(16,r5) )
+EXC( mov.l r1,@(20,r5) )
addc r1,r7
-SRC( mov.l @r4+,r0 )
-SRC( mov.l @r4+,r1 )
+EXC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r1 )
addc r0,r7
-DST( mov.l r0,@(24,r5) )
-DST( mov.l r1,@(28,r5) )
+EXC( mov.l r0,@(24,r5) )
+EXC( mov.l r1,@(28,r5) )
addc r1,r7
add #32,r5
movt r0
@@ -335,9 +315,9 @@ DST( mov.l r1,@(28,r5) )
clrt
shlr2 r6
3:
-SRC( mov.l @r4+,r0 )
+EXC( mov.l @r4+,r0 )
addc r0,r7
-DST( mov.l r0,@r5 )
+EXC( mov.l r0,@r5 )
add #4,r5
movt r0
dt r6
@@ -353,8 +333,8 @@ DST( mov.l r0,@r5 )
mov #2,r1
cmp/hs r1,r6
bf 5f
-SRC( mov.w @r4+,r0 )
-DST( mov.w r0,@r5 )
+EXC( mov.w @r4+,r0 )
+EXC( mov.w r0,@r5 )
extu.w r0,r0
add #2,r5
cmp/eq r1,r6
@@ -363,8 +343,8 @@ DST( mov.w r0,@r5 )
shll16 r0
addc r0,r7
5:
-SRC( mov.b @r4+,r0 )
-DST( mov.b r0,@r5 )
+EXC( mov.b @r4+,r0 )
+EXC( mov.b r0,@r5 )
extu.b r0,r0
#ifndef __LITTLE_ENDIAN__
shll8 r0
@@ -373,42 +353,13 @@ DST( mov.b r0,@r5 )
mov #0,r0
addc r0,r7
7:
-5000:
# Exception handler:
.section .fixup, "ax"
6001:
- mov.l @(8,r15),r0 ! src_err_ptr
- mov #-EFAULT,r1
- mov.l r1,@r0
-
- ! zero the complete destination - computing the rest
- ! is too much work
- mov.l @(4,r15),r5 ! dst
- mov.l @r15,r6 ! len
- mov #0,r7
-1: mov.b r7,@r5
- dt r6
- bf/s 1b
- add #1,r5
- mov.l 8000f,r0
- jmp @r0
- nop
- .align 2
-8000: .long 5000b
-
-6002:
- mov.l @(12,r15),r0 ! dst_err_ptr
- mov #-EFAULT,r1
- mov.l r1,@r0
- mov.l 8001f,r0
- jmp @r0
- nop
- .align 2
-8001: .long 5000b
-
+ rts
+ mov #0,r0
.previous
- add #8,r15
rts
mov r7,r0
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4735176ab811..3348e0c4d769 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -226,15 +226,12 @@ void __init allocate_pgdat(unsigned int nid)
static void __init do_init_bootmem(void)
{
- struct memblock_region *reg;
+ unsigned long start_pfn, end_pfn;
+ int i;
/* Add active regions with valid PFNs. */
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
__add_active_range(0, start_pfn, end_pfn);
- }
/* All of system RAM sits in node 0 for the non-NUMA case */
allocate_pgdat(0);