From fb2e7c5e33b341699f139b2ed972dca0a463a670 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Fri, 14 Nov 2008 18:18:00 +0100 Subject: [S390] Fix range for add_active_range() in setup_memory() add_active_range() expects start_pfn + size as end_pfn value, i.e. not the pfn of the last page frame but the one behind that. We used the pfn of the last page frame so far, which can lead to a BUG_ON in move_freepages(), when the kernelcore parameter is specified (page_zone(start_page) != page_zone(end_page)). Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 62122bad1e33..400b040df7fa 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -604,13 +604,13 @@ setup_memory(void) if (memory_chunk[i].type != CHUNK_READ_WRITE) continue; start_chunk = PFN_DOWN(memory_chunk[i].addr); - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; + end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); end_chunk = min(end_chunk, end_pfn); if (start_chunk >= end_chunk) continue; add_active_range(0, start_chunk, end_chunk); pfn = max(start_chunk, start_pfn); - for (; pfn <= end_chunk; pfn++) + for (; pfn < end_chunk; pfn++) page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); } -- cgit v1.2.3 From af4c68740e848019d8d14c52704ed8eacceddac6 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Nov 2008 18:18:03 +0100 Subject: [S390] lockdep: fix compile bug arch/s390/kernel/built-in.o: In function `cleanup_io_leave_insn': mem_detect.c:(.text+0x10592): undefined reference to `lockdep_sys_exit' Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index ed500ef799b7..5f0c4fba87c3 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -1116,6 +1116,8 @@ cleanup_io_leave_insn: .Ltrace_irq_on: .long trace_hardirqs_on .Ltrace_irq_off: .long trace_hardirqs_off +#endif +#ifdef CONFIG_LOCKDEP .Llockdep_sys_exit: .long lockdep_sys_exit #endif -- cgit v1.2.3 From 632448f65001c4935ed0d3bb362017d773da2eca Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Nov 2008 18:18:04 +0100 Subject: [S390] ftrace: disable tracing on idle psw Disable tracing on idle psw. Otherwise it would give us huge preempt off times for idle. Which is rather pointless. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/process.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 3e2c05cb6a87..04f8c67a6101 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -136,9 +136,12 @@ static void default_idle(void) return; } trace_hardirqs_on(); + /* Don't trace preempt off for idle. */ + stop_critical_timings(); /* Wait for external, I/O or machine check interrupt. */ __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT); + start_critical_timings(); } void cpu_idle(void) -- cgit v1.2.3 From 50bec4ce5d36ebf96189dcc54e20c7fce4bf61bf Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Nov 2008 18:18:05 +0100 Subject: [S390] ftrace: fix kernel stack backchain walking With CONFIG_IRQSOFF_TRACER the trace_hardirqs_off() function includes a call to __builtin_return_address(1). But we calltrace_hardirqs_off() from early entry code. There we have just a single stack frame. So this results in a kernel stack backchain walk that would walk beyond the kernel stack. Following the NULL terminated backchain this results in a lowcore read access. To fix this we simply call trace_hardirqs_off_caller() and pass the current instruction pointer. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 18 +++++++++++------- arch/s390/kernel/entry64.S | 11 +++++++---- 2 files changed, 18 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 5f0c4fba87c3..08844fc24a2e 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -61,22 +61,25 @@ STACK_SIZE = 1 << STACK_SHIFT #ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_IRQS_ON - l %r1,BASED(.Ltrace_irq_on) + basr %r2,%r0 + l %r1,BASED(.Ltrace_irq_on_caller) basr %r14,%r1 .endm .macro TRACE_IRQS_OFF - l %r1,BASED(.Ltrace_irq_off) + basr %r2,%r0 + l %r1,BASED(.Ltrace_irq_off_caller) basr %r14,%r1 .endm .macro TRACE_IRQS_CHECK + basr %r2,%r0 tm SP_PSW(%r15),0x03 # irqs enabled? jz 0f - l %r1,BASED(.Ltrace_irq_on) + l %r1,BASED(.Ltrace_irq_on_caller) basr %r14,%r1 j 1f -0: l %r1,BASED(.Ltrace_irq_off) +0: l %r1,BASED(.Ltrace_irq_off_caller) basr %r14,%r1 1: .endm @@ -1113,9 +1116,10 @@ cleanup_io_leave_insn: .Lschedtail: .long schedule_tail .Lsysc_table: .long sys_call_table #ifdef CONFIG_TRACE_IRQFLAGS -.Ltrace_irq_on: .long trace_hardirqs_on -.Ltrace_irq_off: - .long trace_hardirqs_off +.Ltrace_irq_on_caller: + .long trace_hardirqs_on_caller +.Ltrace_irq_off_caller: + .long trace_hardirqs_off_caller #endif #ifdef CONFIG_LOCKDEP .Llockdep_sys_exit: diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index d7ce150453f2..41aca06682aa 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -61,19 +61,22 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ #ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_IRQS_ON - brasl %r14,trace_hardirqs_on + basr %r2,%r0 + brasl %r14,trace_hardirqs_on_caller .endm .macro TRACE_IRQS_OFF - brasl %r14,trace_hardirqs_off + basr %r2,%r0 + brasl %r14,trace_hardirqs_off_caller .endm .macro TRACE_IRQS_CHECK + basr %r2,%r0 tm SP_PSW(%r15),0x03 # irqs enabled? jz 0f - brasl %r14,trace_hardirqs_on + brasl %r14,trace_hardirqs_on_caller j 1f -0: brasl %r14,trace_hardirqs_off +0: brasl %r14,trace_hardirqs_off_caller 1: .endm #else -- cgit v1.2.3 From 74af283102b358b0da545460d0d176f473e110f6 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Nov 2008 18:18:07 +0100 Subject: [S390] cpu topology: fix locking cpu_coregroup_map used to grab a mutex on s390 since it was only called from process context. Since c7c22e4d5c1fdebfac4dba76de7d0338c2b0d832 "block: add support for IO CPU affinity" this is not true anymore. It now also gets called from softirq context. To prevent possible deadlocks change this in architecture code and use a spinlock instead of a mutex. Cc: stable@kernel.org Cc: Jens Axboe Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/topology.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 632b13e10053..a947899dcba1 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -65,18 +65,21 @@ static int machine_has_topology_irq; static struct timer_list topology_timer; static void set_topology_timer(void); static DECLARE_WORK(topology_work, topology_work_fn); +/* topology_lock protects the core linked list */ +static DEFINE_SPINLOCK(topology_lock); cpumask_t cpu_core_map[NR_CPUS]; cpumask_t cpu_coregroup_map(unsigned int cpu) { struct core_info *core = &core_info; + unsigned long flags; cpumask_t mask; cpus_clear(mask); if (!machine_has_topology) return cpu_present_map; - mutex_lock(&smp_cpu_state_mutex); + spin_lock_irqsave(&topology_lock, flags); while (core) { if (cpu_isset(cpu, core->mask)) { mask = core->mask; @@ -84,7 +87,7 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) } core = core->next; } - mutex_unlock(&smp_cpu_state_mutex); + spin_unlock_irqrestore(&topology_lock, flags); if (cpus_empty(mask)) mask = cpumask_of_cpu(cpu); return mask; @@ -133,7 +136,7 @@ static void tl_to_cores(struct tl_info *info) union tl_entry *tle, *end; struct core_info *core = &core_info; - mutex_lock(&smp_cpu_state_mutex); + spin_lock_irq(&topology_lock); clear_cores(); tle = info->tle; end = (union tl_entry *)((unsigned long)info + info->length); @@ -157,7 +160,7 @@ static void tl_to_cores(struct tl_info *info) } tle = next_tle(tle); } - mutex_unlock(&smp_cpu_state_mutex); + spin_unlock_irq(&topology_lock); } static void topology_update_polarization_simple(void) -- cgit v1.2.3 From d2f019fe40e8fecd822f87bc759f74925a5c31d6 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Nov 2008 18:18:09 +0100 Subject: [S390] fix s390x_newuname The uname system call for 64 bit compares current->personality without masking the upper 16 bits. If e.g. READ_IMPLIES_EXEC is set the result of a uname system call will always be s390x even if the process uses the s390 personality. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/sys_s390.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 5fdb799062b7..4fe952e557ac 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -198,7 +198,7 @@ asmlinkage long s390x_newuname(struct new_utsname __user *name) { int ret = sys_newuname(name); - if (current->personality == PER_LINUX32 && !ret) { + if (personality(current->personality) == PER_LINUX32 && !ret) { ret = copy_to_user(name->machine, "s390\0\0\0\0", 8); if (ret) ret = -EFAULT; } -- cgit v1.2.3