summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2007-07-27 12:29:09 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-07-27 12:29:17 +0200
commit8da1aecde00b74d63123e6031155bbb1424b338d (patch)
treea743032ff75c67e290093a8fe38884e41bbca938 /arch
parent3bb447fc8bb6523cb1cec7a0277d831a2b0462b7 (diff)
[S390] Improve __smp_call_function_map.
There is no need to disable bottom halves when holding call_lock. Also this could imply that it is legal to call smp_call_function* from bh context, which it is not. Also test if func will be executed locally before disabling and aterwards enabling interrupts again. It's not necessary to disable and enable interrupts each time __smp_call_function_map gets called. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/kernel/smp.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index aff9f853fc3..03674fbe598 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -120,7 +120,7 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
if (wait)
data.finished = CPU_MASK_NONE;
- spin_lock_bh(&call_lock);
+ spin_lock(&call_lock);
call_data = &data;
for_each_cpu_mask(cpu, map)
@@ -129,18 +129,16 @@ static void __smp_call_function_map(void (*func) (void *info), void *info,
/* Wait for response */
while (!cpus_equal(map, data.started))
cpu_relax();
-
if (wait)
while (!cpus_equal(map, data.finished))
cpu_relax();
-
- spin_unlock_bh(&call_lock);
-
+ spin_unlock(&call_lock);
out:
- local_irq_disable();
- if (local)
+ if (local) {
+ local_irq_disable();
func(info);
- local_irq_enable();
+ local_irq_enable();
+ }
}
/*