diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_32.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_40x.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_44x.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_fsl_booke.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/ppc_ksyms.c | 12 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 99 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso32/cacheflush.S | 41 | ||||
-rw-r--r-- | arch/powerpc/kernel/vdso64/cacheflush.S | 41 |
12 files changed, 140 insertions, 86 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2c8e756d19a..ed083feaf6f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -284,6 +284,10 @@ int main(void) DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); + DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); + DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); + DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); + DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size)); #ifdef CONFIG_PPC64 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); @@ -322,8 +326,7 @@ int main(void) DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START)); #endif -#ifdef CONFIG_PPC64 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); -#endif + return 0; } diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index a5b13ae7fd2..0f4fac51202 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -1311,7 +1311,7 @@ empty_zero_page: .globl swapper_pg_dir swapper_pg_dir: - .space 4096 + .space PGD_TABLE_SIZE .globl intercept_table intercept_table: diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index cfefc2df8f2..8552e67e3a8 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -994,7 +994,7 @@ empty_zero_page: .space 4096 .globl swapper_pg_dir swapper_pg_dir: - .space 4096 + .space PGD_TABLE_SIZE /* Stack for handling critical exceptions from kernel mode */ diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 409db612392..56aba84c1f6 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -722,7 +722,7 @@ empty_zero_page: */ .globl swapper_pg_dir swapper_pg_dir: - .space 8192 + .space PGD_TABLE_SIZE /* Reserved 4k for the critical exception stack & 4k for the machine * check stack per CPU for kernel mode exceptions */ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 4b9822728ae..7aecb39a5a4 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -1035,7 +1035,7 @@ empty_zero_page: .space 4096 .globl swapper_pg_dir swapper_pg_dir: - .space 4096 + .space PGD_TABLE_SIZE /* Reserved 4k for the critical exception stack & 4k for the machine * check stack per CPU for kernel mode exceptions */ diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index c6b1aa3efbb..13ebeb2d71e 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -45,10 +45,6 @@ #include <asm/signal.h> #include <asm/dcr.h> -#ifdef CONFIG_8xx -#include <asm/commproc.h> -#endif - #ifdef CONFIG_PPC64 EXPORT_SYMBOL(local_irq_restore); #endif @@ -172,14 +168,6 @@ EXPORT_SYMBOL(console_drivers); EXPORT_SYMBOL(cacheable_memcpy); #endif -#ifdef CONFIG_8xx -EXPORT_SYMBOL(cpm_install_handler); -EXPORT_SYMBOL(cpm_free_handler); -#endif /* CONFIG_8xx */ -#if defined(CONFIG_8xx) -EXPORT_SYMBOL(__res); -#endif - #ifdef CONFIG_PPC32 EXPORT_SYMBOL(next_mmu_context); EXPORT_SYMBOL(set_context); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 41e13f4cc6e..b9d88374f14 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -350,7 +350,7 @@ struct task_struct *__switch_to(struct task_struct *prev, local_irq_save(flags); account_system_vtime(current); - account_process_tick(current, 0); + account_process_vtime(current); calculate_steal_time(); last = _switch(old_thread, new_thread); diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 21478079828..52e95c2158c 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -19,6 +19,9 @@ #include <linux/init.h> #include <linux/capability.h> #include <linux/delay.h> +#include <linux/smp.h> +#include <linux/completion.h> +#include <linux/cpumask.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -34,6 +37,8 @@ #include <asm/lmb.h> #include <asm/udbg.h> #include <asm/syscalls.h> +#include <asm/smp.h> +#include <asm/atomic.h> struct rtas_t rtas = { .lock = SPIN_LOCK_UNLOCKED @@ -41,8 +46,10 @@ struct rtas_t rtas = { EXPORT_SYMBOL(rtas); struct rtas_suspend_me_data { - long waiting; - struct rtas_args *args; + atomic_t working; /* number of cpus accessing this struct */ + int token; /* ibm,suspend-me */ + int error; + struct completion *complete; /* wait on this until working == 0 */ }; DEFINE_SPINLOCK(rtas_data_buf_lock); @@ -657,50 +664,62 @@ static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; #ifdef CONFIG_PPC_PSERIES static void rtas_percpu_suspend_me(void *info) { - int i; long rc; - long flags; + unsigned long msr_save; + int cpu; struct rtas_suspend_me_data *data = (struct rtas_suspend_me_data *)info; - /* - * We use "waiting" to indicate our state. As long - * as it is >0, we are still trying to all join up. - * If it goes to 0, we have successfully joined up and - * one thread got H_CONTINUE. If any error happens, - * we set it to <0. - */ - local_irq_save(flags); - do { - rc = plpar_hcall_norets(H_JOIN); - smp_rmb(); - } while (rc == H_SUCCESS && data->waiting > 0); - if (rc == H_SUCCESS) - goto out; + atomic_inc(&data->working); + + /* really need to ensure MSR.EE is off for H_JOIN */ + msr_save = mfmsr(); + mtmsr(msr_save & ~(MSR_EE)); + + rc = plpar_hcall_norets(H_JOIN); + + mtmsr(msr_save); - if (rc == H_CONTINUE) { - data->waiting = 0; - data->args->args[data->args->nargs] = - rtas_call(ibm_suspend_me_token, 0, 1, NULL); - for_each_possible_cpu(i) - plpar_hcall_norets(H_PROD,i); + if (rc == H_SUCCESS) { + /* This cpu was prodded and the suspend is complete. */ + goto out; + } else if (rc == H_CONTINUE) { + /* All other cpus are in H_JOIN, this cpu does + * the suspend. + */ + printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", + smp_processor_id()); + data->error = rtas_call(data->token, 0, 1, NULL); + + if (data->error) + printk(KERN_DEBUG "ibm,suspend-me returned %d\n", + data->error); } else { - data->waiting = -EBUSY; - printk(KERN_ERR "Error on H_JOIN hypervisor call\n"); + printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n", + smp_processor_id(), rc); + data->error = rc; } - + /* This cpu did the suspend or got an error; in either case, + * we need to prod all other other cpus out of join state. + * Extra prods are harmless. + */ + for_each_online_cpu(cpu) + plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); out: - local_irq_restore(flags); - return; + if (atomic_dec_return(&data->working) == 0) + complete(data->complete); } static int rtas_ibm_suspend_me(struct rtas_args *args) { - int i; long state; long rc; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; + DECLARE_COMPLETION_ONSTACK(done); + + if (!rtas_service_present("ibm,suspend-me")) + return -ENOSYS; /* Make sure the state is valid */ rc = plpar_hcall(H_VASI_STATE, retbuf, @@ -721,25 +740,23 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) return 0; } - data.waiting = 1; - data.args = args; + atomic_set(&data.working, 0); + data.token = rtas_token("ibm,suspend-me"); + data.error = 0; + data.complete = &done; /* Call function on all CPUs. One of us will make the * rtas call */ if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) - data.waiting = -EINVAL; + data.error = -EINVAL; - if (data.waiting != 0) - printk(KERN_ERR "Error doing global join\n"); + wait_for_completion(&done); - /* Prod each CPU. This won't hurt, and will wake - * anyone we successfully put to sleep with H_JOIN. - */ - for_each_possible_cpu(i) - plpar_hcall_norets(H_PROD, i); + if (data.error != 0) + printk(KERN_ERR "Error doing global join\n"); - return data.waiting; + return data.error; } #else /* CONFIG_PPC_PSERIES */ static int rtas_ibm_suspend_me(struct rtas_args *args) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index c0d77723ba1..a925a8eae12 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -241,8 +241,9 @@ void account_system_vtime(struct task_struct *tsk) /* deltascaled includes both user and system time. * Hence scale it based on the purr ratio to estimate * the system time */ - deltascaled = deltascaled * get_paca()->system_time / - (get_paca()->system_time + get_paca()->user_time); + if (get_paca()->user_time) + deltascaled = deltascaled * get_paca()->system_time / + (get_paca()->system_time + get_paca()->user_time); delta += get_paca()->system_time; get_paca()->system_time = 0; } diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 2322ba5cce4..3702df7dc56 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -699,11 +699,22 @@ static int __init vdso_init(void) vdso_data->icache_size = ppc64_caches.isize; vdso_data->icache_line_size = ppc64_caches.iline_size; + /* XXXOJN: Blocks should be added to ppc64_caches and used instead */ + vdso_data->dcache_block_size = ppc64_caches.dline_size; + vdso_data->icache_block_size = ppc64_caches.iline_size; + vdso_data->dcache_log_block_size = ppc64_caches.log_dline_size; + vdso_data->icache_log_block_size = ppc64_caches.log_iline_size; + /* * Calculate the size of the 64 bits vDSO */ vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT; DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages); +#else + vdso_data->dcache_block_size = L1_CACHE_BYTES; + vdso_data->dcache_log_block_size = L1_CACHE_SHIFT; + vdso_data->icache_block_size = L1_CACHE_BYTES; + vdso_data->icache_log_block_size = L1_CACHE_SHIFT; #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/vdso32/cacheflush.S b/arch/powerpc/kernel/vdso32/cacheflush.S index 9cb319992c3..1ba6feb71b3 100644 --- a/arch/powerpc/kernel/vdso32/cacheflush.S +++ b/arch/powerpc/kernel/vdso32/cacheflush.S @@ -23,29 +23,46 @@ * * Flushes the data cache & invalidate the instruction cache for the * provided range [start, end[ - * - * Note: all CPUs supported by this kernel have a 128 bytes cache - * line size so we don't have to peek that info from the datapage */ V_FUNCTION_BEGIN(__kernel_sync_dicache) .cfi_startproc - li r5,127 - andc r6,r3,r5 /* round low to line bdy */ + mflr r12 + .cfi_register lr,r12 + mr r11,r3 + bl __get_datapage@local + mtlr r12 + mr r10,r3 + + lwz r7,CFG_DCACHE_BLOCKSZ(r10) + addi r5,r7,-1 + andc r6,r11,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ - srwi. r8,r8,7 /* compute line count */ + lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10) + srw. r8,r8,r9 /* compute line count */ crclr cr0*4+so beqlr /* nothing to do? */ mtctr r8 - mr r3,r6 -1: dcbst 0,r3 - addi r3,r3,128 +1: dcbst 0,r6 + add r6,r6,r7 bdnz 1b sync + +/* Now invalidate the instruction cache */ + + lwz r7,CFG_ICACHE_BLOCKSZ(r10) + addi r5,r7,-1 + andc r6,r11,r5 /* round low to line bdy */ + subf r8,r6,r4 /* compute length */ + add r8,r8,r5 + lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10) + srw. r8,r8,r9 /* compute line count */ + crclr cr0*4+so + beqlr /* nothing to do? */ mtctr r8 -1: icbi 0,r6 - addi r6,r6,128 - bdnz 1b +2: icbi 0,r6 + add r6,r6,r7 + bdnz 2b isync li r3,0 blr diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S index 66a36d3cc6a..69c5af2b3c9 100644 --- a/arch/powerpc/kernel/vdso64/cacheflush.S +++ b/arch/powerpc/kernel/vdso64/cacheflush.S @@ -23,29 +23,46 @@ * * Flushes the data cache & invalidate the instruction cache for the * provided range [start, end[ - * - * Note: all CPUs supported by this kernel have a 128 bytes cache - * line size so we don't have to peek that info from the datapage */ V_FUNCTION_BEGIN(__kernel_sync_dicache) .cfi_startproc - li r5,127 - andc r6,r3,r5 /* round low to line bdy */ + mflr r12 + .cfi_register lr,r12 + mr r11,r3 + bl V_LOCAL_FUNC(__get_datapage) + mtlr r12 + mr r10,r3 + + lwz r7,CFG_DCACHE_BLOCKSZ(r10) + addi r5,r7,-1 + andc r6,r11,r5 /* round low to line bdy */ subf r8,r6,r4 /* compute length */ add r8,r8,r5 /* ensure we get enough */ - srwi. r8,r8,7 /* compute line count */ + lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10) + srw. r8,r8,r9 /* compute line count */ crclr cr0*4+so beqlr /* nothing to do? */ mtctr r8 - mr r3,r6 -1: dcbst 0,r3 - addi r3,r3,128 +1: dcbst 0,r6 + add r6,r6,r7 bdnz 1b sync + +/* Now invalidate the instruction cache */ + + lwz r7,CFG_ICACHE_BLOCKSZ(r10) + addi r5,r7,-1 + andc r6,r11,r5 /* round low to line bdy */ + subf r8,r6,r4 /* compute length */ + add r8,r8,r5 + lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10) + srw. r8,r8,r9 /* compute line count */ + crclr cr0*4+so + beqlr /* nothing to do? */ mtctr r8 -1: icbi 0,r6 - addi r6,r6,128 - bdnz 1b +2: icbi 0,r6 + add r6,r6,r7 + bdnz 2b isync li r3,0 blr |