summaryrefslogtreecommitdiff
path: root/net/iucv/iucv.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2007-07-27 14:29:08 +0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-07-27 14:29:17 +0400
commit3bb447fc8bb6523cb1cec7a0277d831a2b0462b7 (patch)
treee5f11fda2ff91d5670f1c046b53a12b4dbef55aa /net/iucv/iucv.c
parentd941cf5e373c356723fa648b9f0302a11c9b1770 (diff)
downloadlinux-3bb447fc8bb6523cb1cec7a0277d831a2b0462b7.tar.xz
[S390] Convert to smp_call_function_single.
smp_call_function_single now has the same semantics as s390's smp_call_function_on. Therefore convert to the *single variant and get rid of some architecture specific code. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'net/iucv/iucv.c')
-rw-r--r--net/iucv/iucv.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index ad5150b8dfa9..983058d432dc 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -479,7 +479,8 @@ static void iucv_setmask_mp(void)
/* Enable all cpus with a declared buffer. */
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
!cpu_isset(cpu, iucv_irq_cpumask))
- smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_allow_cpu,
+ NULL, 0, 1);
preempt_enable();
}
@@ -497,7 +498,7 @@ static void iucv_setmask_up(void)
cpumask = iucv_irq_cpumask;
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
for_each_cpu_mask(cpu, cpumask)
- smp_call_function_on(iucv_block_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);
}
/**
@@ -522,7 +523,7 @@ static int iucv_enable(void)
rc = -EIO;
preempt_disable();
for_each_online_cpu(cpu)
- smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
preempt_enable();
if (cpus_empty(iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
@@ -578,7 +579,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
case CPU_ONLINE_FROZEN:
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
@@ -587,10 +588,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
if (cpus_empty(cpumask))
/* Can't offline last IUCV enabled cpu. */
return NOTIFY_BAD;
- smp_call_function_on(iucv_retrieve_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1);
if (cpus_empty(iucv_irq_cpumask))
- smp_call_function_on(iucv_allow_cpu, NULL, 0, 1,
- first_cpu(iucv_buffer_cpumask));
+ smp_call_function_single(first_cpu(iucv_buffer_cpumask),
+ iucv_allow_cpu, NULL, 0, 1);
break;
}
return NOTIFY_OK;