summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/process.c15
-rw-r--r--arch/s390/kernel/smp.c53
-rw-r--r--arch/s390/kernel/time.c2
3 files changed, 47 insertions, 23 deletions
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index a6a4729e0e9..1c59ec161cf 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -114,24 +114,27 @@ extern void s390_handle_mcck(void);
static void default_idle(void)
{
int cpu, rc;
+ int nr_calls = 0;
+ void *hcpu;
#ifdef CONFIG_SMP
struct s390_idle_data *idle;
#endif
/* CPU is going idle. */
cpu = smp_processor_id();
-
+ hcpu = (void *)(long)cpu;
local_irq_disable();
if (need_resched()) {
local_irq_enable();
return;
}
- rc = atomic_notifier_call_chain(&idle_chain,
- S390_CPU_IDLE, (void *)(long) cpu);
- if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
- BUG();
- if (rc != NOTIFY_OK) {
+ rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
+ &nr_calls);
+ if (rc == NOTIFY_BAD) {
+ nr_calls--;
+ __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
+ hcpu, nr_calls, NULL);
local_irq_enable();
return;
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 85060659fb1..818bd09c026 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -626,13 +626,17 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
if (!lowcore)
return -ENOMEM;
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
- if (!async_stack)
- goto out_async_stack;
panic_stack = __get_free_page(GFP_KERNEL);
- if (!panic_stack)
- goto out_panic_stack;
-
- *lowcore = S390_lowcore;
+ if (!panic_stack || !async_stack)
+ goto out;
+ /*
+ * Only need to copy the first 512 bytes from address 0. But since
+ * the compiler emits a warning if src == NULL for memcpy use copy_page
+ * instead. Copies more than needed but this code is not performance
+ * critical.
+ */
+ copy_page(lowcore, &S390_lowcore);
+ memset((void *)lowcore + 512, 0, sizeof(*lowcore) - 512);
lowcore->async_stack = async_stack + ASYNC_SIZE;
lowcore->panic_stack = panic_stack + PAGE_SIZE;
@@ -653,9 +657,8 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
out_save_area:
free_page(panic_stack);
#endif
-out_panic_stack:
+out:
free_pages(async_stack, ASYNC_ORDER);
-out_async_stack:
free_pages((unsigned long) lowcore, lc_order);
return -ENOMEM;
}
@@ -719,8 +722,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
cpu_lowcore->current_task = (unsigned long) idle;
cpu_lowcore->cpu_data.cpu_nr = cpu;
- cpu_lowcore->softirq_pending = 0;
- cpu_lowcore->ext_call_fast = 0;
+ cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
+ cpu_lowcore->ipl_device = S390_lowcore.ipl_device;
eieio();
while (signal_processor(cpu, sigp_restart) == sigp_busy)
@@ -797,23 +800,43 @@ void cpu_die(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+#ifndef CONFIG_64BIT
+ unsigned long save_area = 0;
+#endif
+ unsigned long async_stack, panic_stack;
+ struct _lowcore *lowcore;
unsigned int cpu;
+ int lc_order;
smp_detect_cpus();
/* request the 0x1201 emergency signal external interrupt */
if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
panic("Couldn't request external interrupt 0x1201");
- memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
print_cpu_info(&S390_lowcore.cpu_data);
- smp_alloc_lowcore(smp_processor_id());
+ /* Reallocate current lowcore, but keep its contents. */
+ lc_order = sizeof(long) == 8 ? 1 : 0;
+ lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
+ panic_stack = __get_free_page(GFP_KERNEL);
+ async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
- ctl_set_bit(14, 29); /* enable extended save area */
+ save_area = get_zeroed_page(GFP_KERNEL);
#endif
- set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
-
+ local_irq_disable();
+ local_mcck_disable();
+ lowcore_ptr[smp_processor_id()] = lowcore;
+ *lowcore = S390_lowcore;
+ lowcore->panic_stack = panic_stack + PAGE_SIZE;
+ lowcore->async_stack = async_stack + ASYNC_SIZE;
+#ifndef CONFIG_64BIT
+ if (MACHINE_HAS_IEEE)
+ lowcore->extended_save_area_addr = (u32) save_area;
+#endif
+ set_prefix((u32)(unsigned long) lowcore);
+ local_mcck_enable();
+ local_irq_enable();
for_each_possible_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 3bbac1293be..76a5dd1b4ce 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -744,7 +744,6 @@ static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
}
}
-#ifdef CONFIG_SMP
static void etr_sync_cpu_start(void *dummy)
{
int *in_sync = dummy;
@@ -777,7 +776,6 @@ static void etr_sync_cpu_start(void *dummy)
static void etr_sync_cpu_end(void *dummy)
{
}
-#endif /* CONFIG_SMP */
/*
* Sync the TOD clock using the port refered to by aibp. This port