summaryrefslogtreecommitdiff
path: root/arch/sparc
diff options
context:
space:
mode:
authorYong Zhang <yong.zhang0@gmail.com>2012-05-29 12:27:33 +0400
committerThomas Gleixner <tglx@linutronix.de>2012-06-05 19:27:13 +0400
commitbc6833009583bd5b096ef7aa2bb006854a5a2dce (patch)
treee4499ab9851f6f24d748e58889cc3c76dc57e420 /arch/sparc
parent459165e25030c0023cb54f73c14261a3d2f4a244 (diff)
downloadlinux-bc6833009583bd5b096ef7aa2bb006854a5a2dce.tar.xz
SPARC: SMP: Remove call to ipi_call_lock_irq()/ipi_call_unlock_irq()
ipi_call_lock/unlock() lock resp. unlock call_function.lock. This lock protects only the call_function data structure itself, but it's completely unrelated to cpu_online_mask. The mask to which the IPIs are sent is calculated before call_function.lock is taken in smp_call_function_many(), so the locking around set_cpu_online() is pointless and can be removed. Delay irq enable to after set_cpu_online(). [ tglx: Massaged changelog ] Signed-off-by: Yong Zhang <yong.zhang0@gmail.com> Cc: ralf@linux-mips.org Cc: sshtylyov@mvista.com Cc: david.daney@cavium.com Cc: nikunj@linux.vnet.ibm.com Cc: paulmck@linux.vnet.ibm.com Cc: axboe@kernel.dk Cc: peterz@infradead.org Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/20120529082732.GA4250@zhy Acked-by: "David S. Miller" <davem@davemloft.net> Acked-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/kernel/smp_64.c7
1 files changed, 1 insertions, 6 deletions
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f591598d92f6..781bcb10b8bd 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -103,8 +103,6 @@ void __cpuinit smp_callin(void)
if (cheetah_pcache_forced_on)
cheetah_enable_pcache();
- local_irq_enable();
-
callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory");
@@ -124,9 +122,8 @@ void __cpuinit smp_callin(void)
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
rmb();
- ipi_call_lock_irq();
set_cpu_online(cpuid, true);
- ipi_call_unlock_irq();
+ local_irq_enable();
/* idle thread is expected to have preempt disabled */
preempt_disable();
@@ -1308,9 +1305,7 @@ int __cpu_disable(void)
mdelay(1);
local_irq_disable();
- ipi_call_lock();
set_cpu_online(cpu, false);
- ipi_call_unlock();
cpu_map_rebuild();