diff options
Diffstat (limited to 'arch/powerpc/kernel')
34 files changed, 365 insertions, 633 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index c287980b7e6..80e9fe2632b 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -12,10 +12,10 @@ endif obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ irq.o align.o signal_32.o pmc.o vdso.o \ - init_task.o process.o + init_task.o process.o systbl.o obj-y += vdso32/ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ - signal_64.o ptrace32.o systbl.o \ + signal_64.o ptrace32.o \ paca.o cpu_setup_power4.o \ firmware.o sysfs.o idle_64.o obj-$(CONFIG_PPC64) += vdso64/ @@ -46,7 +46,7 @@ extra-$(CONFIG_8xx) := head_8xx.o extra-y += vmlinux.lds obj-y += time.o prom.o traps.o setup-common.o udbg.o -obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o +obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o obj-$(CONFIG_MODULES) += ppc_ksyms.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 840aad43a98..c9a660e4c2d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -92,7 +92,6 @@ int main(void) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_SIGFRAME, offsetof(struct thread_info, nvgprs_frame)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); #ifdef CONFIG_PPC32 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 10696456a4c..e4e81374cb9 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -53,8 +53,10 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); PPC_FEATURE_HAS_MMU) #define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) -#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5) -#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS) +#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) +#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ PPC_FEATURE_BOOKE) @@ -267,7 +269,8 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "Cell Broadband Engine", .cpu_features = CPU_FTRS_CELL, .cpu_user_features = COMMON_USER_PPC64 | - PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP, + PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_SMT, .icache_bsize = 128, .dcache_bsize = 128, .cpu_setup = __setup_cpu_be, diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 8c21d378f5d..778f22fd85d 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -134,8 +134,10 @@ static void crash_kexec_prepare_cpus(void) * the crash CPU will send an IPI and wait for other CPUs to * respond. If not, proceed the kexec boot even though we failed to * capture other CPU states. + * Delay of at least 10 seconds. */ - msecs = 1000000; + printk(KERN_ALERT "Sending IPI to other cpus...\n"); + msecs = 10000; while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { barrier(); mdelay(1); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index f20a67261ec..4827ca1ec89 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -227,7 +227,7 @@ ret_from_syscall: MTMSRD(r10) lwz r9,TI_FLAGS(r12) li r8,-_LAST_ERRNO - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK) + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) bne- syscall_exit_work cmplw 0,r3,r8 blt+ syscall_exit_cont @@ -287,8 +287,10 @@ syscall_dotrace: syscall_exit_work: andi. r0,r9,_TIF_RESTOREALL - bne- 2f - cmplw 0,r3,r8 + beq+ 0f + REST_NVGPRS(r1) + b 2f +0: cmplw 0,r3,r8 blt+ 1f andi. r0,r9,_TIF_NOERROR bne- 1f @@ -302,9 +304,7 @@ syscall_exit_work: 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) beq 4f - /* Clear per-syscall TIF flags if any are set, but _leave_ - _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that - yet. */ + /* Clear per-syscall TIF flags if any are set. */ li r11,_TIF_PERSYSCALL_MASK addi r12,r12,TI_FLAGS @@ -318,8 +318,13 @@ syscall_exit_work: subi r12,r12,TI_FLAGS 4: /* Anything which requires enabling interrupts? */ - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) - beq 7f + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) + beq ret_from_except + + /* Re-enable interrupts */ + ori r10,r10,MSR_EE + SYNC + MTMSRD(r10) /* Save NVGPRS if they're not saved already */ lwz r4,_TRAP(r1) @@ -328,71 +333,11 @@ syscall_exit_work: SAVE_NVGPRS(r1) li r4,0xc00 stw r4,_TRAP(r1) - - /* Re-enable interrupts */ -5: ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) - - andi. r0,r9,_TIF_SAVE_NVGPRS - bne save_user_nvgprs - -save_user_nvgprs_cont: - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) - beq 7f - +5: addi r3,r1,STACK_FRAME_OVERHEAD bl do_syscall_trace_leave - REST_NVGPRS(r1) - -6: lwz r3,GPR3(r1) - LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ - SYNC - MTMSRD(r10) /* disable interrupts again */ - rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ - lwz r9,TI_FLAGS(r12) -7: - andi. r0,r9,_TIF_NEED_RESCHED - bne 8f - lwz r5,_MSR(r1) - andi. r5,r5,MSR_PR - beq ret_from_except - andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK - beq ret_from_except - b do_user_signal -8: - ori r10,r10,MSR_EE - SYNC - MTMSRD(r10) /* re-enable interrupts */ - bl schedule - b 6b - -save_user_nvgprs: - lwz r8,TI_SIGFRAME(r12) - -.macro savewords start, end - 1: stw \start,4*(\start)(r8) - .section __ex_table,"a" - .align 2 - .long 1b,save_user_nvgprs_fault - .previous - .if \end - \start - savewords "(\start+1)",\end - .endif -.endm - savewords 14,31 - b save_user_nvgprs_cont - - -save_user_nvgprs_fault: - li r3,11 /* SIGSEGV */ - lwz r4,TI_TASK(r12) - bl force_sigsegv + b ret_from_except_full - rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ - lwz r9,TI_FLAGS(r12) - b save_user_nvgprs_cont - #ifdef SHOW_SYSCALLS do_show_syscall: #ifdef SHOW_SYSCALLS_TASK @@ -490,6 +435,14 @@ ppc_clone: stw r0,_TRAP(r1) /* register set saved */ b sys_clone + .globl ppc_swapcontext +ppc_swapcontext: + SAVE_NVGPRS(r1) + lwz r0,_TRAP(r1) + rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ + stw r0,_TRAP(r1) /* register set saved */ + b sys_swapcontext + /* * Top-level page fault handling. * This is in assembler because if do_page_fault tells us that @@ -683,7 +636,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */ /* Check current_thread_info()->flags */ rlwinm r9,r1,0,0,(31-THREAD_SHIFT) lwz r9,TI_FLAGS(r9) - andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_RESTORE_SIGMASK) + andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED) bne do_work restore_user: diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 388f861b8ed..24be0cf86d7 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -160,7 +160,7 @@ syscall_exit: mtmsrd r10,1 ld r9,TI_FLAGS(r12) li r11,-_LAST_ERRNO - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK) + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) bne- syscall_exit_work cmpld r3,r11 ld r5,_CCR(r1) @@ -216,8 +216,10 @@ syscall_exit_work: If TIF_NOERROR is set, just save r3 as it is. */ andi. r0,r9,_TIF_RESTOREALL - bne- 2f - cmpld r3,r11 /* r10 is -LAST_ERRNO */ + beq+ 0f + REST_NVGPRS(r1) + b 2f +0: cmpld r3,r11 /* r10 is -LAST_ERRNO */ blt+ 1f andi. r0,r9,_TIF_NOERROR bne- 1f @@ -229,9 +231,7 @@ syscall_exit_work: 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) beq 4f - /* Clear per-syscall TIF flags if any are set, but _leave_ - _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that - yet. */ + /* Clear per-syscall TIF flags if any are set. */ li r11,_TIF_PERSYSCALL_MASK addi r12,r12,TI_FLAGS @@ -240,10 +240,9 @@ syscall_exit_work: stdcx. r10,0,r12 bne- 3b subi r12,r12,TI_FLAGS - -4: bl .save_nvgprs - /* Anything else left to do? */ - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) + +4: /* Anything else left to do? */ + andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) beq .ret_from_except_lite /* Re-enable interrupts */ @@ -251,26 +250,10 @@ syscall_exit_work: ori r10,r10,MSR_EE mtmsrd r10,1 - andi. r0,r9,_TIF_SAVE_NVGPRS - bne save_user_nvgprs - - /* If tracing, re-enable interrupts and do it */ -save_user_nvgprs_cont: - andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) - beq 5f - + bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .do_syscall_trace_leave - REST_NVGPRS(r1) - clrrdi r12,r1,THREAD_SHIFT - - /* Disable interrupts again and handle other work if any */ -5: mfmsr r10 - rldicl r10,r10,48,1 - rotldi r10,r10,16 - mtmsrd r10,1 - - b .ret_from_except_lite + b .ret_from_except /* Save non-volatile GPRs, if not already saved. */ _GLOBAL(save_nvgprs) @@ -282,51 +265,6 @@ _GLOBAL(save_nvgprs) std r0,_TRAP(r1) blr - -save_user_nvgprs: - ld r10,TI_SIGFRAME(r12) - andi. r0,r9,_TIF_32BIT - beq- save_user_nvgprs_64 - - /* 32-bit save to userspace */ - -.macro savewords start, end - 1: stw \start,4*(\start)(r10) - .section __ex_table,"a" - .align 3 - .llong 1b,save_user_nvgprs_fault - .previous - .if \end - \start - savewords "(\start+1)",\end - .endif -.endm - savewords 14,31 - b save_user_nvgprs_cont - -save_user_nvgprs_64: - /* 64-bit save to userspace */ - -.macro savelongs start, end - 1: std \start,8*(\start)(r10) - .section __ex_table,"a" - .align 3 - .llong 1b,save_user_nvgprs_fault - .previous - .if \end - \start - savelongs "(\start+1)",\end - .endif -.endm - savelongs 14,31 - b save_user_nvgprs_cont - -save_user_nvgprs_fault: - li r3,11 /* SIGSEGV */ - ld r4,TI_TASK(r12) - bl .force_sigsegv - - clrrdi r12,r1,THREAD_SHIFT - ld r9,TI_FLAGS(r12) - b save_user_nvgprs_cont /* * The sigsuspend and rt_sigsuspend system calls can call do_signal @@ -352,6 +290,16 @@ _GLOBAL(ppc_clone) bl .sys_clone b syscall_exit +_GLOBAL(ppc32_swapcontext) + bl .save_nvgprs + bl .compat_sys_swapcontext + b syscall_exit + +_GLOBAL(ppc64_swapcontext) + bl .save_nvgprs + bl .sys_swapcontext + b syscall_exit + _GLOBAL(ret_from_fork) bl .schedule_tail REST_NVGPRS(r1) diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index e4362dfa37f..340730fb8c9 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -66,7 +66,7 @@ _GLOBAL(load_up_fpu) #else ld r4,PACACURRENT(r13) addi r5,r4,THREAD /* Get THREAD */ - ld r4,THREAD_FPEXC_MODE(r5) + lwz r4,THREAD_FPEXC_MODE(r5) ori r12,r12,MSR_FP or r12,r12,r4 std r12,_MSR(r1) diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 03b25f9359f..a0579e859b2 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -714,6 +714,7 @@ AltiVecUnavailable: #ifdef CONFIG_ALTIVEC bne load_up_altivec /* if from user, just load it up */ #endif /* CONFIG_ALTIVEC */ + addi r3,r1,STACK_FRAME_OVERHEAD EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) PerformanceMonitor: diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 30826846634..9b65029dd2a 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -139,7 +139,7 @@ _GLOBAL(__secondary_hold) ori r24,r24,MSR_RI mtmsrd r24 /* RI on */ - /* Grab our linux cpu number */ + /* Grab our physical cpu number */ mr r24,r3 /* Tell the master cpu we're here */ @@ -153,12 +153,7 @@ _GLOBAL(__secondary_hold) cmpdi 0,r4,1 bne 100b -#ifdef CONFIG_HMT - SET_REG_IMMEDIATE(r4, .hmt_init) - mtctr r4 - bctr -#else -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) mtctr r4 mr r3,r24 @@ -166,7 +161,6 @@ _GLOBAL(__secondary_hold) #else BUG_OPCODE #endif -#endif /* This value is used to mark exception frames on the stack. */ .section ".toc","aw" @@ -321,7 +315,6 @@ exception_marker: label##_pSeries: \ HMT_MEDIUM; \ mtspr SPRN_SPRG1,r13; /* save r13 */ \ - RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) #define STD_EXCEPTION_ISERIES(n, label, area) \ @@ -329,7 +322,6 @@ label##_pSeries: \ label##_iSeries: \ HMT_MEDIUM; \ mtspr SPRN_SPRG1,r13; /* save r13 */ \ - RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_ISERIES_1(area); \ EXCEPTION_PROLOG_ISERIES_2; \ b label##_common @@ -339,7 +331,6 @@ label##_iSeries: \ label##_iSeries: \ HMT_MEDIUM; \ mtspr SPRN_SPRG1,r13; /* save r13 */ \ - RUNLATCH_ON(r13); \ EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ lbz r10,PACAPROCENABLED(r13); \ cmpwi 0,r10,0; \ @@ -392,6 +383,7 @@ label##_common: \ label##_common: \ EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ DISABLE_INTS; \ + bl .ppc64_runlatch_on; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ b .ret_from_except_lite @@ -409,7 +401,6 @@ __start_interrupts: _machine_check_pSeries: HMT_MEDIUM mtspr SPRN_SPRG1,r13 /* save r13 */ - RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) . = 0x300 @@ -436,7 +427,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB) data_access_slb_pSeries: HMT_MEDIUM mtspr SPRN_SPRG1,r13 - RUNLATCH_ON(r13) mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_DAR @@ -462,7 +452,6 @@ data_access_slb_pSeries: instruction_access_slb_pSeries: HMT_MEDIUM mtspr SPRN_SPRG1,r13 - RUNLATCH_ON(r13) mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ @@ -493,7 +482,6 @@ instruction_access_slb_pSeries: .globl system_call_pSeries system_call_pSeries: HMT_MEDIUM - RUNLATCH_ON(r9) mr r9,r13 mfmsr r10 mfspr r13,SPRN_SPRG3 @@ -577,7 +565,6 @@ slb_miss_user_pseries: system_reset_fwnmi: HMT_MEDIUM mtspr SPRN_SPRG1,r13 /* save r13 */ - RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) .globl machine_check_fwnmi @@ -585,7 +572,6 @@ system_reset_fwnmi: machine_check_fwnmi: HMT_MEDIUM mtspr SPRN_SPRG1,r13 /* save r13 */ - RUNLATCH_ON(r13) EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) #ifdef CONFIG_PPC_ISERIES @@ -749,11 +735,12 @@ iSeries_secondary_smp_loop: .globl decrementer_iSeries_masked decrementer_iSeries_masked: + /* We may not have a valid TOC pointer in here. */ li r11,1 ld r12,PACALPPACAPTR(r13) stb r11,LPPACADECRINT(r12) - LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy) - lwz r12,ADDROFF(tb_ticks_per_jiffy)(r12) + LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) + lwz r12,0(r12) mtspr SPRN_DEC,r12 /* fall through */ @@ -895,7 +882,6 @@ unrecov_fer: .align 7 .globl data_access_common data_access_common: - RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ mfspr r10,SPRN_DAR std r10,PACA_EXGEN+EX_DAR(r13) mfspr r10,SPRN_DSISR @@ -1043,6 +1029,7 @@ hardware_interrupt_common: EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) hardware_interrupt_entry: DISABLE_INTS + bl .ppc64_runlatch_on addi r3,r1,STACK_FRAME_OVERHEAD bl .do_IRQ b .ret_from_except_lite @@ -1550,6 +1537,9 @@ _STATIC(__boot_from_prom) mr r28,r6 mr r27,r7 + /* Align the stack to 16-byte boundary for broken yaboot */ + rldicr r1,r1,0,59 + /* Make sure we are running in 64 bits mode */ bl .enable_64b_mode @@ -1817,22 +1807,6 @@ _STATIC(start_here_multiplatform) ori r6,r6,MSR_RI mtmsrd r6 /* RI on */ -#ifdef CONFIG_HMT - /* Start up the second thread on cpu 0 */ - mfspr r3,SPRN_PVR - srwi r3,r3,16 - cmpwi r3,0x34 /* Pulsar */ - beq 90f - cmpwi r3,0x36 /* Icestar */ - beq 90f - cmpwi r3,0x37 /* SStar */ - beq 90f - b 91f /* HMT not supported */ -90: li r3,0 - bl .hmt_start_secondary -91: -#endif - /* The following gets the stack and TOC set up with the regs */ /* pointing to the real addr of the kernel stack. This is */ /* all done to support the C function call below which sets */ @@ -1946,77 +1920,8 @@ _STATIC(start_here_common) bl .start_kernel -_GLOBAL(hmt_init) -#ifdef CONFIG_HMT - LOAD_REG_IMMEDIATE(r5, hmt_thread_data) - mfspr r7,SPRN_PVR - srwi r7,r7,16 - cmpwi r7,0x34 /* Pulsar */ - beq 90f - cmpwi r7,0x36 /* Icestar */ - beq 91f - cmpwi r7,0x37 /* SStar */ - beq 91f - b 101f -90: mfspr r6,SPRN_PIR - andi. r6,r6,0x1f - b 92f -91: mfspr r6,SPRN_PIR - andi. r6,r6,0x3ff -92: sldi r4,r24,3 - stwx r6,r5,r4 - bl .hmt_start_secondary - b 101f - -__hmt_secondary_hold: - LOAD_REG_IMMEDIATE(r5, hmt_thread_data) - clrldi r5,r5,4 - li r7,0 - mfspr r6,SPRN_PIR - mfspr r8,SPRN_PVR - srwi r8,r8,16 - cmpwi r8,0x34 - bne 93f - andi. r6,r6,0x1f - b 103f -93: andi. r6,r6,0x3f - -103: lwzx r8,r5,r7 - cmpw r8,r6 - beq 104f - addi r7,r7,8 - b 103b - -104: addi r7,r7,4 - lwzx r9,r5,r7 - mr r24,r9 -101: -#endif - mr r3,r24 - b .pSeries_secondary_smp_init - -#ifdef CONFIG_HMT -_GLOBAL(hmt_start_secondary) - LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold) - clrldi r4,r4,4 - mtspr SPRN_NIADORM, r4 - mfspr r4, SPRN_MSRDORM - li r5, -65 - and r4, r4, r5 - mtspr SPRN_MSRDORM, r4 - lis r4,0xffef - ori r4,r4,0x7403 - mtspr SPRN_TSC, r4 - li r4,0x1f4 - mtspr SPRN_TST, r4 - mfspr r4, SPRN_HID0 - ori r4, r4, 0x1 - mtspr SPRN_HID0, r4 - mfspr r4, SPRN_CTRLF - oris r4, r4, 0x40 - mtspr SPRN_CTRLT, r4 - blr -#endif + /* Not reached */ + BUG_OPCODE /* * We put a few things here that have to be page-aligned. diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 4d9b4388918..946f3219fd2 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -334,9 +334,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, spin_unlock_irqrestore(&(tbl->it_lock), flags); - /* Make sure updates are seen by hardware */ - mb(); - DBG("mapped %d elements:\n", outcount); /* For the sake of iommu_unmap_sg, we clear out the length in the @@ -347,6 +344,10 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, outs->dma_address = DMA_ERROR_CODE; outs->dma_length = 0; } + + /* Make sure updates are seen by hardware */ + mb(); + return outcount; failure: @@ -358,6 +359,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr) >> PAGE_SHIFT; __iommu_free(tbl, vaddr, npages); + s->dma_address = DMA_ERROR_CODE; + s->dma_length = 0; } } spin_unlock_irqrestore(&(tbl->it_lock), flags); diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 1ae96a8ed7e..e789fef4eb8 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v) const char *system_id = ""; unsigned int *lp_index_ptr, lp_index = 0; struct device_node *rtas_node; - int *lrdrp; + int *lrdrp = NULL; rootdn = find_path_device("/"); if (rootdn) { @@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v) seq_printf(m, "partition_id=%d\n", (int)lp_index); rtas_node = find_path_device("/rtas"); - lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL); + if (rtas_node) + lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", + NULL); if (lrdrp == NULL) { partition_potential_processors = vdso_data->processorCount; diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index d6431440c54..ee166c58664 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -26,8 +26,6 @@ #include <asm/prom.h> #include <asm/smp.h> -#define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */ - int default_machine_kexec_prepare(struct kimage *image) { int i; @@ -61,7 +59,7 @@ int default_machine_kexec_prepare(struct kimage *image) */ if (htab_address) { low = __pa(htab_address); - high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; + high = low + htab_size_bytes; for (i = 0; i < image->nr_segments; i++) { begin = image->segment[i].mem; @@ -294,7 +292,7 @@ void default_machine_kexec(struct kimage *image) } /* Values we need to export to the second kernel via the device tree. */ -static unsigned long htab_base, htab_size, kernel_end; +static unsigned long htab_base, kernel_end; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -305,7 +303,7 @@ static struct property htab_base_prop = { static struct property htab_size_prop = { .name = "linux,htab-size", .length = sizeof(unsigned long), - .value = (unsigned char *)&htab_size, + .value = (unsigned char *)&htab_size_bytes, }; static struct property kernel_end_prop = { @@ -331,8 +329,6 @@ static void __init export_htab_values(void) htab_base = __pa(htab_address); prom_add_property(node, &htab_base_prop); - - htab_size = 1UL << ppc64_pft_size; prom_add_property(node, &htab_size_prop); out: diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index c367520bc1c..ba92bab7cc2 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -589,7 +589,6 @@ void __devinit scan_phb(struct pci_controller *hose) #endif /* CONFIG_PPC_MULTIPLATFORM */ if (mode == PCI_PROBE_NORMAL) hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); - pci_bus_add_devices(bus); } static int __init pcibios_init(void) @@ -608,8 +607,10 @@ static int __init pcibios_init(void) printk("PCI: Probing PCI hardware\n"); /* Scan all of the recorded PCI controllers. */ - list_for_each_entry_safe(hose, tmp, &hose_list, list_node) + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { scan_phb(hose); + pci_bus_add_devices(hose->bus); + } #ifndef CONFIG_PPC_ISERIES if (pci_probe_only) diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index d9a459c144d..63ecbec0520 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -79,15 +79,8 @@ EXPORT_SYMBOL(sys_sigreturn); EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strpbrk); -EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strcasecmp); EXPORT_SYMBOL(csum_partial); @@ -117,15 +110,6 @@ EXPORT_SYMBOL(_insw_ns); EXPORT_SYMBOL(_outsw_ns); EXPORT_SYMBOL(_insl_ns); EXPORT_SYMBOL(_outsl_ns); -EXPORT_SYMBOL(ioremap); -#ifdef CONFIG_44x -EXPORT_SYMBOL(ioremap64); -#endif -EXPORT_SYMBOL(__ioremap); -EXPORT_SYMBOL(iounmap); -#ifdef CONFIG_PPC32 -EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ -#endif #if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)) EXPORT_SYMBOL(ppc_ide_md); @@ -168,7 +152,6 @@ EXPORT_SYMBOL(__flush_icache_range); EXPORT_SYMBOL(flush_dcache_range); #ifdef CONFIG_SMP -EXPORT_SYMBOL(smp_call_function); #ifdef CONFIG_PPC32 EXPORT_SYMBOL(smp_hw_index); #endif @@ -185,9 +168,6 @@ EXPORT_SYMBOL(adb_try_handler_change); EXPORT_SYMBOL(cuda_request); EXPORT_SYMBOL(cuda_poll); #endif /* CONFIG_ADB_CUDA */ -#ifdef CONFIG_PPC_PMAC -EXPORT_SYMBOL(sys_ctrler); -#endif #ifdef CONFIG_VT EXPORT_SYMBOL(kd_mksound); #endif @@ -205,7 +185,6 @@ EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memchr); @@ -214,7 +193,6 @@ EXPORT_SYMBOL(screen_info); #endif #ifdef CONFIG_PPC32 -EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(timer_interrupt); EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(tb_ticks_per_jiffy); @@ -222,10 +200,6 @@ EXPORT_SYMBOL(console_drivers); EXPORT_SYMBOL(cacheable_memcpy); #endif -EXPORT_SYMBOL(__up); -EXPORT_SYMBOL(__down); -EXPORT_SYMBOL(__down_interruptible); - #ifdef CONFIG_8xx EXPORT_SYMBOL(cpm_install_handler); EXPORT_SYMBOL(cpm_free_handler); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 57703994a06..c225cf154bf 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -888,3 +888,35 @@ void dump_stack(void) show_stack(current, NULL); } EXPORT_SYMBOL(dump_stack); + +#ifdef CONFIG_PPC64 +void ppc64_runlatch_on(void) +{ + unsigned long ctrl; + + if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { + HMT_medium(); + + ctrl = mfspr(SPRN_CTRLF); + ctrl |= CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); + + set_thread_flag(TIF_RUNLATCH); + } +} + +void ppc64_runlatch_off(void) +{ + unsigned long ctrl; + + if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) { + HMT_medium(); + + clear_thread_flag(TIF_RUNLATCH); + + ctrl = mfspr(SPRN_CTRLF); + ctrl &= ~CTRL_RUNLATCH; + mtspr(SPRN_CTRLT, ctrl); + } +} +#endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index d50c8df0183..6dbd2172677 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -491,7 +491,12 @@ void __init finish_device_tree(void) size = 16; finish_node(allnodes, &size, 1); size -= 16; - end = start = (unsigned long) __va(lmb_alloc(size, 128)); + + if (0 == size) + end = start = 0; + else + end = start = (unsigned long)__va(lmb_alloc(size, 128)); + finish_node(allnodes, &end, 0); BUG_ON(end != start + size); @@ -811,8 +816,6 @@ void __init unflatten_device_tree(void) { unsigned long start, mem, size; struct device_node **allnextp = &allnodes; - char *p = NULL; - int l = 0; DBG(" -> unflatten_device_tree()\n"); @@ -852,19 +855,6 @@ void __init unflatten_device_tree(void) if (of_chosen == NULL) of_chosen = of_find_node_by_path("/chosen@0"); - /* Retreive command line */ - if (of_chosen != NULL) { - p = (char *)get_property(of_chosen, "bootargs", &l); - if (p != NULL && l > 0) - strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE)); - } -#ifdef CONFIG_CMDLINE - if (l == 0 || (l == 1 && (*p) == 0)) - strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#endif /* CONFIG_CMDLINE */ - - DBG("Command line is: %s\n", cmd_line); - DBG(" <- unflatten_device_tree()\n"); } @@ -935,6 +925,8 @@ static int __init early_init_dt_scan_chosen(unsigned long node, { u32 *prop; unsigned long *lprop; + unsigned long l; + char *p; DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); @@ -999,6 +991,41 @@ static int __init early_init_dt_scan_chosen(unsigned long node, crashk_res.end = crashk_res.start + *lprop - 1; #endif + /* Retreive command line */ + p = of_get_flat_dt_prop(node, "bootargs", &l); + if (p != NULL && l > 0) + strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); + +#ifdef CONFIG_CMDLINE + if (l == 0 || (l == 1 && (*p) == 0)) + strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#endif /* CONFIG_CMDLINE */ + + DBG("Command line is: %s\n", cmd_line); + + if (strstr(cmd_line, "mem=")) { + char *p, *q; + unsigned long maxmem = 0; + + for (q = cmd_line; (p = strstr(q, "mem=")) != 0; ) { + q = p + 4; + if (p > cmd_line && p[-1] != ' ') + continue; + maxmem = simple_strtoul(q, &q, 0); + if (*q == 'k' || *q == 'K') { + maxmem <<= 10; + ++q; + } else if (*q == 'm' || *q == 'M') { + maxmem <<= 20; + ++q; + } else if (*q == 'g' || *q == 'G') { + maxmem <<= 30; + ++q; + } + } + memory_limit = maxmem; + } + /* break now */ return 1; } @@ -1119,7 +1146,7 @@ static void __init early_reserve_mem(void) size_32 = *(reserve_map_32++); if (size_32 == 0) break; - DBG("reserving: %lx -> %lx\n", base_32, size_32); + DBG("reserving: %x -> %x\n", base_32, size_32); lmb_reserve(base_32, size_32); } return; @@ -1398,8 +1425,8 @@ struct device_node *of_find_node_by_name(struct device_node *from, read_lock(&devtree_lock); np = from ? from->allnext : allnodes; - for (; np != 0; np = np->allnext) - if (np->name != 0 && strcasecmp(np->name, name) == 0 + for (; np != NULL; np = np->allnext) + if (np->name != NULL && strcasecmp(np->name, name) == 0 && of_node_get(np)) break; if (from) @@ -1917,3 +1944,30 @@ int prom_update_property(struct device_node *np, return 0; } + +#ifdef CONFIG_KEXEC +/* We may have allocated the flat device tree inside the crash kernel region + * in prom_init. If so we need to move it out into regular memory. */ +void kdump_move_device_tree(void) +{ + unsigned long start, end; + struct boot_param_header *new; + + start = __pa((unsigned long)initial_boot_params); + end = start + initial_boot_params->totalsize; + + if (end < crashk_res.start || start > crashk_res.end) + return; + + new = (struct boot_param_header*) + __va(lmb_alloc(initial_boot_params->totalsize, PAGE_SIZE)); + + memcpy(new, initial_boot_params, initial_boot_params->totalsize); + + initial_boot_params = new; + + DBG("Flat device tree blob moved to %p\n", initial_boot_params); + + /* XXX should we unreserve the old DT? */ +} +#endif /* CONFIG_KEXEC */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7881ec96ef1..813c2cd194c 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -205,14 +205,6 @@ static cell_t __initdata regbuf[1024]; #define MAX_CPU_THREADS 2 -/* TO GO */ -#ifdef CONFIG_HMT -struct { - unsigned int pir; - unsigned int threadid; -} hmt_thread_data[NR_CPUS]; -#endif /* CONFIG_HMT */ - /* * Error results ... some OF calls will return "-1" on error, some * will return 0, some will return either. To simplify, here are @@ -986,7 +978,7 @@ static void __init prom_init_mem(void) if (size == 0) continue; prom_debug(" %x %x\n", base, size); - if (base == 0) + if (base == 0 && (RELOC(of_platform) & PLATFORM_LPAR)) RELOC(rmo_top) = size; if ((base + size) > RELOC(ram_top)) RELOC(ram_top) = base + size; @@ -1319,10 +1311,6 @@ static void __init prom_hold_cpus(void) */ *spinloop = 0; -#ifdef CONFIG_HMT - for (i = 0; i < NR_CPUS; i++) - RELOC(hmt_thread_data)[i].pir = 0xdeadbeef; -#endif /* look for cpus */ for (node = 0; prom_next_node(&node); ) { type[0] = 0; @@ -1389,32 +1377,6 @@ static void __init prom_hold_cpus(void) /* Reserve cpu #s for secondary threads. They start later. */ cpuid += cpu_threads; } -#ifdef CONFIG_HMT - /* Only enable HMT on processors that provide support. */ - if (__is_processor(PV_PULSAR) || - __is_processor(PV_ICESTAR) || - __is_processor(PV_SSTAR)) { - prom_printf(" starting secondary threads\n"); - - for (i = 0; i < NR_CPUS; i += 2) { - if (!cpu_online(i)) - continue; - - if (i == 0) { - unsigned long pir = mfspr(SPRN_PIR); - if (__is_processor(PV_PULSAR)) { - RELOC(hmt_thread_data)[i].pir = - pir & 0x1f; - } else { - RELOC(hmt_thread_data)[i].pir = - pir & 0x3ff; - } - } - } - } else { - prom_printf("Processor is not HMT capable\n"); - } -#endif if (cpuid > NR_CPUS) prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS) @@ -2098,6 +2060,10 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ prom_init_stdout(); + /* Bail if this is a kdump kernel. */ + if (PHYSICAL_START > 0) + prom_panic("Error: You can't boot a kdump kernel from OF!\n"); + /* * Check for an initrd */ diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index a8099c80615..3934c227549 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -465,8 +465,10 @@ u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, if (parent == NULL) return NULL; bus = of_match_bus(parent); - if (strcmp(bus->name, "pci")) + if (strcmp(bus->name, "pci")) { + of_node_put(parent); return NULL; + } bus->count_cells(dev, &na, &ns); of_node_put(parent); if (!OF_CHECK_COUNTS(na, ns)) diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 400793c7130..bcb83574335 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -561,10 +561,7 @@ void do_syscall_trace_leave(struct pt_regs *regs) regs->result); if ((test_thread_flag(TIF_SYSCALL_TRACE) -#ifdef CONFIG_PPC64 - || test_thread_flag(TIF_SINGLESTEP) -#endif - ) + || test_thread_flag(TIF_SINGLESTEP)) && (current->ptrace & PT_PTRACED)) do_syscall_trace(); } diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c index 635d3b9a881..34d073fb609 100644 --- a/arch/powerpc/kernel/rtas-rtc.c +++ b/arch/powerpc/kernel/rtas-rtc.c @@ -52,7 +52,7 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm) error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret); if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) { if (in_interrupt() && printk_ratelimit()) { - memset(&rtc_tm, 0, sizeof(struct rtc_time)); + memset(rtc_tm, 0, sizeof(struct rtc_time)); printk(KERN_WARNING "error: reading clock" " would delay interrupt\n"); return; /* delay not allowed */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 7fe4a5c944c..b5b2add7ad1 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -22,6 +22,7 @@ #include <asm/prom.h> #include <asm/rtas.h> +#include <asm/hvcall.h> #include <asm/semaphore.h> #include <asm/machdep.h> #include <asm/page.h> @@ -565,6 +566,7 @@ static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; #ifdef CONFIG_PPC_PSERIES static void rtas_percpu_suspend_me(void *info) { + int i; long rc; long flags; struct rtas_suspend_me_data *data = @@ -587,18 +589,16 @@ static void rtas_percpu_suspend_me(void *info) if (rc == H_Continue) { data->waiting = 0; - rtas_call(ibm_suspend_me_token, 0, 1, - data->args->args); + data->args->args[data->args->nargs] = + rtas_call(ibm_suspend_me_token, 0, 1, NULL); + for_each_cpu(i) + plpar_hcall_norets(H_PROD,i); } else { data->waiting = -EBUSY; printk(KERN_ERR "Error on H_Join hypervisor call\n"); } out: - /* before we restore interrupts, make sure we don't - * generate a spurious soft lockup errors - */ - touch_softlockup_watchdog(); local_irq_restore(flags); return; } diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 50500093c97..aaf384c3f04 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -672,8 +672,7 @@ static void rtas_flash_firmware(int reboot_type) static void remove_flash_pde(struct proc_dir_entry *dp) { if (dp) { - if (dp->data != NULL) - kfree(dp->data); + kfree(dp->data); dp->owner = NULL; remove_proc_entry(dp->name, dp->parent); } diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 5579f655991..7442775ef2a 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -280,8 +280,7 @@ static int phb_set_bus_ranges(struct device_node *dev, return 0; } -static int __devinit setup_phb(struct device_node *dev, - struct pci_controller *phb) +int __devinit setup_phb(struct device_node *dev, struct pci_controller *phb) { if (is_python(dev)) python_countermeasures(dev); @@ -359,27 +358,6 @@ unsigned long __init find_and_init_phbs(void) return 0; } -struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) -{ - struct pci_controller *phb; - int primary; - - primary = list_empty(&hose_list); - phb = pcibios_alloc_controller(dn); - if (!phb) - return NULL; - setup_phb(dn, phb); - pci_process_bridge_OF_ranges(phb, dn, primary); - - pci_setup_phb_io_dynamic(phb, primary); - - pci_devs_phb_init_dynamic(phb); - scan_phb(phb); - - return phb; -} -EXPORT_SYMBOL(init_phb_dynamic); - /* RPA-specific bits for removing PHBs */ int pcibios_remove_root_bus(struct pci_controller *phb) { diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e29b275e09e..f96c49b03ba 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -311,8 +311,6 @@ void smp_release_cpus(void) DBG(" <- smp_release_cpus()\n"); } -#else -#define smp_release_cpus() #endif /* CONFIG_SMP || CONFIG_KEXEC */ /* @@ -398,6 +396,9 @@ void __init setup_system(void) { DBG(" -> setup_system()\n"); +#ifdef CONFIG_KEXEC + kdump_move_device_tree(); +#endif /* * Unflatten the device-tree passed by prom_init or kexec */ @@ -470,10 +471,12 @@ void __init setup_system(void) check_smt_enabled(); smp_setup_cpu_maps(); +#ifdef CONFIG_SMP /* Release secondary cpus out of their spinloops at 0x60 now that * we can map physical -> logical CPU ids */ smp_release_cpus(); +#endif printk("Starting Linux PPC64 %s\n", system_utsname.version); diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index c6d0595da6b..d7a4e814974 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -142,11 +142,7 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka, return 0; } -static inline compat_uptr_t to_user_ptr(void *kp) -{ - return (compat_uptr_t)(u64)kp; -} - +#define to_user_ptr(p) ptr_to_compat(p) #define from_user_ptr(p) compat_ptr(p) static inline int save_general_regs(struct pt_regs *regs, @@ -155,10 +151,7 @@ static inline int save_general_regs(struct pt_regs *regs, elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; - if (!FULL_REGS(regs)) { - set_thread_flag(TIF_SAVE_NVGPRS); - current_thread_info()->nvgprs_frame = frame->mc_gregs; - } + WARN_ON(!FULL_REGS(regs)); for (i = 0; i <= PT_RESULT; i ++) { if (i == 14 && !FULL_REGS(regs)) @@ -213,21 +206,13 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka, return 0; } -#define to_user_ptr(p) (p) -#define from_user_ptr(p) (p) +#define to_user_ptr(p) ((unsigned long)(p)) +#define from_user_ptr(p) ((void __user *)(p)) static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { - if (!FULL_REGS(regs)) { - /* Zero out the unsaved GPRs to avoid information - leak, and set TIF_SAVE_NVGPRS to ensure that the - registers do actually get saved later. */ - memset(®s->gpr[14], 0, 18 * sizeof(unsigned long)); - current_thread_info()->nvgprs_frame = &frame->mc_gregs; - set_thread_flag(TIF_SAVE_NVGPRS); - } - + WARN_ON(!FULL_REGS(regs)); return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); } @@ -526,7 +511,7 @@ long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler); + ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler); ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); } @@ -675,8 +660,8 @@ long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo int compat_sys_sigaltstack(u32 __new, u32 __old, int r5, int r6, int r7, int r8, struct pt_regs *regs) { - stack_32_t __user * newstack = (stack_32_t __user *)(long) __new; - stack_32_t __user * oldstack = (stack_32_t __user *)(long) __old; + stack_32_t __user * newstack = compat_ptr(__new); + stack_32_t __user * oldstack = compat_ptr(__old); stack_t uss, uoss; int ret; mm_segment_t old_fs; @@ -708,7 +693,7 @@ int compat_sys_sigaltstack(u32 __new, u32 __old, int r5, set_fs(old_fs); /* Copy the stack information to the user output buffer */ if (!ret && oldstack && - (put_user((long)uoss.ss_sp, &oldstack->ss_sp) || + (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || __put_user(uoss.ss_flags, &oldstack->ss_flags) || __put_user(uoss.ss_size, &oldstack->ss_size))) return -EFAULT; @@ -830,8 +815,8 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int } long sys_swapcontext(struct ucontext __user *old_ctx, - struct ucontext __user *new_ctx, - int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) + struct ucontext __user *new_ctx, + int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) { unsigned char tmp; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index b3193116e68..4324f8a8ba2 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -60,8 +60,8 @@ struct rt_sigframe { struct ucontext uc; unsigned long _unused[2]; unsigned int tramp[TRAMP_SIZE]; - struct siginfo *pinfo; - void *puc; + struct siginfo __user *pinfo; + void __user *puc; struct siginfo info; /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ char abigap[288]; @@ -118,14 +118,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, err |= __put_user(0, &sc->v_regs); #endif /* CONFIG_ALTIVEC */ err |= __put_user(&sc->gp_regs, &sc->regs); - if (!FULL_REGS(regs)) { - /* Zero out the unsaved GPRs to avoid information - leak, and set TIF_SAVE_NVGPRS to ensure that the - registers do actually get saved later. */ - memset(®s->gpr[14], 0, 18 * sizeof(unsigned long)); - set_thread_flag(TIF_SAVE_NVGPRS); - current_thread_info()->nvgprs_frame = &sc->gp_regs; - } + WARN_ON(!FULL_REGS(regs)); err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); err |= __copy_to_user(&sc->fp_regs, ¤t->thread.fpr, FP_REGS_SIZE); err |= __put_user(signr, &sc->signal); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index c8458c531b2..13595a64f01 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -540,6 +540,9 @@ int __devinit start_secondary(void *unused) if (smp_ops->take_timebase) smp_ops->take_timebase(); + if (system_state > SYSTEM_BOOTING) + per_cpu(last_jiffy, cpu) = get_tb(); + spin_lock(&call_lock); cpu_set(cpu, cpu_online_map); spin_unlock(&call_lock); diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 475249dc235..cd75ab2908f 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -176,7 +176,6 @@ struct timex32 { }; extern int do_adjtimex(struct timex *); -extern void ppc_adjtimex(void); asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) { @@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) ret = do_adjtimex(&txc); - /* adjust the conversion of TB to time of day to track adjtimex */ - ppc_adjtimex(); - if(put_user(txc.modes, &utp->modes) || __put_user(txc.offset, &utp->offset) || __put_user(txc.freq, &utp->freq) || diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 007b15ee36d..1ad55f0466f 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -36,8 +36,6 @@ #ifdef CONFIG_PPC64 #define sys_sigpending sys_ni_syscall #define sys_old_getrlimit sys_ni_syscall -#else -#define ppc_rtas sys_ni_syscall #endif _GLOBAL(sys_call_table) @@ -290,7 +288,7 @@ COMPAT_SYS(clock_settime) COMPAT_SYS(clock_gettime) COMPAT_SYS(clock_getres) COMPAT_SYS(clock_nanosleep) -COMPAT_SYS(swapcontext) +SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext) COMPAT_SYS(tgkill) COMPAT_SYS(utimes) COMPAT_SYS(statfs64) @@ -323,3 +321,4 @@ SYSCALL(spu_run) SYSCALL(spu_create) COMPAT_SYS(pselect6) COMPAT_SYS(ppoll) +SYSCALL(unshare) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index c4a294d657b..86f7e3d154d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -50,6 +50,7 @@ #include <linux/security.h> #include <linux/percpu.h> #include <linux/rtc.h> +#include <linux/jiffies.h> #include <asm/io.h> #include <asm/processor.h> @@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec); unsigned long tb_ticks_per_sec; u64 tb_to_xs; unsigned tb_to_us; -unsigned long processor_freq; + +#define TICKLEN_SCALE (SHIFT_SCALE - 10) +u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ +u64 ticklen_to_xs; /* 0.64 fraction */ + +/* If last_tick_len corresponds to about 1/HZ seconds, then + last_tick_len << TICKLEN_SHIFT will be about 2^63. */ +#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) + DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL_GPL(rtc_lock); @@ -113,10 +122,6 @@ extern unsigned long wall_jiffies; extern struct timezone sys_tz; static long timezone_offset; -void ppc_adjtimex(void); - -static unsigned adjusting_time = 0; - unsigned long ppc_proc_freq; unsigned long ppc_tb_freq; @@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void) */ if (ppc_md.set_rtc_time && ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && - abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && - jiffies - wall_jiffies == 1) { + abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) { struct rtc_time tm; to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); tm.tm_year -= 1900; @@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv) if (__USE_RTC()) { /* do this the old way */ unsigned long flags, seq; - unsigned int sec, nsec, usec, lost; + unsigned int sec, nsec, usec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); sec = xtime.tv_sec; nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); - lost = jiffies - wall_jiffies; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - usec = nsec / 1000 + lost * (1000000 / HZ); + usec = nsec / 1000; while (usec >= 1000000) { usec -= 1000000; ++sec; @@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv) EXPORT_SYMBOL(do_gettimeofday); -/* Synchronize xtime with do_gettimeofday */ - -static inline void timer_sync_xtime(unsigned long cur_tb) -{ -#ifdef CONFIG_PPC64 - /* why do we do this? */ - struct timeval my_tv; - - __do_gettimeofday(&my_tv, cur_tb); - - if (xtime.tv_sec <= my_tv.tv_sec) { - xtime.tv_sec = my_tv.tv_sec; - xtime.tv_nsec = my_tv.tv_usec * 1000; - } -#endif -} - /* * There are two copies of tb_to_xs and stamp_xsec so that no * lock is needed to access and use these values in @@ -297,9 +283,9 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec, * the two values of tb_update_count match and are even then the * tb_to_xs and stamp_xsec values are consistent. If not, then it * loops back and reads them again until this criteria is met. + * We expect the caller to have done the first increment of + * vdso_data->tb_update_count already. */ - ++(vdso_data->tb_update_count); - smp_wmb(); vdso_data->tb_orig_stamp = new_tb_stamp; vdso_data->stamp_xsec = new_stamp_xsec; vdso_data->tb_to_xs = new_tb_to_xs; @@ -323,15 +309,40 @@ static __inline__ void timer_recalc_offset(u64 cur_tb) { unsigned long offset; u64 new_stamp_xsec; + u64 tlen, t2x; + u64 tb, xsec_old, xsec_new; + struct gettimeofday_vars *varp; if (__USE_RTC()) return; + tlen = current_tick_length(); offset = cur_tb - do_gtod.varp->tb_orig_stamp; - if ((offset & 0x80000000u) == 0) + if (tlen == last_tick_len && offset < 0x80000000u) return; - new_stamp_xsec = do_gtod.varp->stamp_xsec - + mulhdu(offset, do_gtod.varp->tb_to_xs); - update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); + if (tlen != last_tick_len) { + t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs); + last_tick_len = tlen; + } else + t2x = do_gtod.varp->tb_to_xs; + new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; + do_div(new_stamp_xsec, 1000000000); + new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; + + ++vdso_data->tb_update_count; + smp_mb(); + + /* + * Make sure time doesn't go backwards for userspace gettimeofday. + */ + tb = get_tb(); + varp = do_gtod.varp; + xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs) + + varp->stamp_xsec; + xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec; + if (xsec_new < xsec_old) + new_stamp_xsec += xsec_old - xsec_new; + + update_gtod(cur_tb, new_stamp_xsec, t2x); } #ifdef CONFIG_SMP @@ -462,13 +473,10 @@ void timer_interrupt(struct pt_regs * regs) write_seqlock(&xtime_lock); tb_last_jiffy += tb_ticks_per_jiffy; tb_last_stamp = per_cpu(last_jiffy, cpu); - timer_recalc_offset(tb_last_jiffy); do_timer(regs); - timer_sync_xtime(tb_last_jiffy); + timer_recalc_offset(tb_last_jiffy); timer_check_rtc(); write_sequnlock(&xtime_lock); - if (adjusting_time && (time_adjust == 0)) - ppc_adjtimex(); } next_dec = tb_ticks_per_jiffy - ticks; @@ -492,16 +500,18 @@ void timer_interrupt(struct pt_regs * regs) void wakeup_decrementer(void) { - int i; + unsigned long ticks; - set_dec(tb_ticks_per_jiffy); /* - * We don't expect this to be called on a machine with a 601, - * so using get_tbl is fine. + * The timebase gets saved on sleep and restored on wakeup, + * so all we need to do is to reset the decrementer. */ - tb_last_stamp = tb_last_jiffy = get_tb(); - for_each_cpu(i) - per_cpu(last_jiffy, i) = tb_last_stamp; + ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); + if (ticks < tb_ticks_per_jiffy) + ticks = tb_ticks_per_jiffy - ticks; + else + ticks = 1; + set_dec(ticks); } #ifdef CONFIG_SMP @@ -541,8 +551,8 @@ int do_settimeofday(struct timespec *tv) time_t wtm_sec, new_sec = tv->tv_sec; long wtm_nsec, new_nsec = tv->tv_nsec; unsigned long flags; - long int tb_delta; - u64 new_xsec, tb_delta_xs; + u64 new_xsec; + unsigned long tb_delta; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; @@ -563,9 +573,23 @@ int do_settimeofday(struct timespec *tv) first_settimeofday = 0; } #endif + + /* Make userspace gettimeofday spin until we're done. */ + ++vdso_data->tb_update_count; + smp_mb(); + + /* + * Subtract off the number of nanoseconds since the + * beginning of the last tick. + * Note that since we don't increment jiffies_64 anywhere other + * than in do_timer (since we don't have a lost tick problem), + * wall_jiffies will always be the same as jiffies, + * and therefore the (jiffies - wall_jiffies) computation + * has been removed. + */ tb_delta = tb_ticks_since(tb_last_stamp); - tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; - tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); + tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ + new_nsec -= SCALE_XSEC(tb_delta, 1000000000); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); @@ -580,12 +604,12 @@ int do_settimeofday(struct timespec *tv) ntp_clear(); - new_xsec = 0; - if (new_nsec != 0) { - new_xsec = (u64)new_nsec * XSEC_PER_SEC; + new_xsec = xtime.tv_nsec; + if (new_xsec != 0) { + new_xsec *= XSEC_PER_SEC; do_div(new_xsec, NSEC_PER_SEC); } - new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; + new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC; update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; @@ -612,10 +636,10 @@ void __init generic_calibrate_decr(void) ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ node_found = 0; - if (cpu != 0) { + if (cpu) { fp = (unsigned int *)get_property(cpu, "timebase-frequency", NULL); - if (fp != 0) { + if (fp) { node_found = 1; ppc_tb_freq = *fp; } @@ -626,10 +650,10 @@ void __init generic_calibrate_decr(void) ppc_proc_freq = DEFAULT_PROC_FREQ; node_found = 0; - if (cpu != 0) { + if (cpu) { fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL); - if (fp != 0) { + if (fp) { node_found = 1; ppc_proc_freq = *fp; } @@ -671,7 +695,7 @@ void __init time_init(void) unsigned long flags; unsigned long tm = 0; struct div_result res; - u64 scale; + u64 scale, x; unsigned shift; if (ppc_md.time_init != NULL) @@ -693,11 +717,42 @@ void __init time_init(void) } tb_ticks_per_jiffy = ppc_tb_freq / HZ; - tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; + tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); - div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); - tb_to_xs = res.result_low; + + /* + * Calculate the length of each tick in ns. It will not be + * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. + * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, + * rounded up. + */ + x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; + do_div(x, ppc_tb_freq); + tick_nsec = x; + last_tick_len = x << TICKLEN_SCALE; + + /* + * Compute ticklen_to_xs, which is a factor which gets multiplied + * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. + * It is computed as: + * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) + * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT + * which turns out to be N = 51 - SHIFT_HZ. + * This gives the result as a 0.64 fixed-point fraction. + * That value is reduced by an offset amounting to 1 xsec per + * 2^31 timebase ticks to avoid problems with time going backwards + * by 1 xsec when we do timer_recalc_offset due to losing the + * fractional xsec. That offset is equal to ppc_tb_freq/2^51 + * since there are 2^20 xsec in a second. + */ + div128_by_32((1ULL << 51) - ppc_tb_freq, 0, + tb_ticks_per_jiffy << SHIFT_HZ, &res); + div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); + ticklen_to_xs = res.result_low; + + /* Compute tb_to_xs from tick_nsec */ + tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); /* * Compute scale factor for sched_clock. @@ -724,6 +779,14 @@ void __init time_init(void) tm = get_boot_time(); write_seqlock_irqsave(&xtime_lock, flags); + + /* If platform provided a timezone (pmac), we correct the time */ + if (timezone_offset) { + sys_tz.tz_minuteswest = -timezone_offset / 60; + sys_tz.tz_dsttime = 0; + tm -= timezone_offset; + } + xtime.tv_sec = tm; xtime.tv_nsec = 0; do_gtod.varp = &do_gtod.vars[0]; @@ -738,18 +801,11 @@ void __init time_init(void) vdso_data->tb_orig_stamp = tb_last_jiffy; vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; + vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; vdso_data->tb_to_xs = tb_to_xs; time_freq = 0; - /* If platform provided a timezone (pmac), we correct the time */ - if (timezone_offset) { - sys_tz.tz_minuteswest = -timezone_offset / 60; - sys_tz.tz_dsttime = 0; - xtime.tv_sec -= timezone_offset; - } - last_rtc_update = xtime.tv_sec; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); @@ -759,126 +815,6 @@ void __init time_init(void) set_dec(tb_ticks_per_jiffy); } -/* - * After adjtimex is called, adjust the conversion of tb ticks - * to microseconds to keep do_gettimeofday synchronized - * with ntpd. - * - * Use the time_adjust, time_freq and time_offset computed by adjtimex to - * adjust the frequency. - */ - -/* #define DEBUG_PPC_ADJTIMEX 1 */ - -void ppc_adjtimex(void) -{ -#ifdef CONFIG_PPC64 - unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, - new_tb_to_xs, new_xsec, new_stamp_xsec; - unsigned long tb_ticks_per_sec_delta; - long delta_freq, ltemp; - struct div_result divres; - unsigned long flags; - long singleshot_ppm = 0; - - /* - * Compute parts per million frequency adjustment to - * accomplish the time adjustment implied by time_offset to be - * applied over the elapsed time indicated by time_constant. - * Use SHIFT_USEC to get it into the same units as - * time_freq. - */ - if ( time_offset < 0 ) { - ltemp = -time_offset; - ltemp <<= SHIFT_USEC - SHIFT_UPDATE; - ltemp >>= SHIFT_KG + time_constant; - ltemp = -ltemp; - } else { - ltemp = time_offset; - ltemp <<= SHIFT_USEC - SHIFT_UPDATE; - ltemp >>= SHIFT_KG + time_constant; - } - - /* If there is a single shot time adjustment in progress */ - if ( time_adjust ) { -#ifdef DEBUG_PPC_ADJTIMEX - printk("ppc_adjtimex: "); - if ( adjusting_time == 0 ) - printk("starting "); - printk("single shot time_adjust = %ld\n", time_adjust); -#endif - - adjusting_time = 1; - - /* - * Compute parts per million frequency adjustment - * to match time_adjust - */ - singleshot_ppm = tickadj * HZ; - /* - * The adjustment should be tickadj*HZ to match the code in - * linux/kernel/timer.c, but experiments show that this is too - * large. 3/4 of tickadj*HZ seems about right - */ - singleshot_ppm -= singleshot_ppm / 4; - /* Use SHIFT_USEC to get it into the same units as time_freq */ - singleshot_ppm <<= SHIFT_USEC; - if ( time_adjust < 0 ) - singleshot_ppm = -singleshot_ppm; - } - else { -#ifdef DEBUG_PPC_ADJTIMEX - if ( adjusting_time ) - printk("ppc_adjtimex: ending single shot time_adjust\n"); -#endif - adjusting_time = 0; - } - - /* Add up all of the frequency adjustments */ - delta_freq = time_freq + ltemp + singleshot_ppm; - - /* - * Compute a new value for tb_ticks_per_sec based on - * the frequency adjustment - */ - den = 1000000 * (1 << (SHIFT_USEC - 8)); - if ( delta_freq < 0 ) { - tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; - new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta; - } - else { - tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den; - new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta; - } - -#ifdef DEBUG_PPC_ADJTIMEX - printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); - printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); -#endif - - /* - * Compute a new value of tb_to_xs (used to convert tb to - * microseconds) and a new value of stamp_xsec which is the - * time (in 1/2^20 second units) corresponding to - * tb_orig_stamp. This new value of stamp_xsec compensates - * for the change in frequency (implied by the new tb_to_xs) - * which guarantees that the current time remains the same. - */ - write_seqlock_irqsave( &xtime_lock, flags ); - tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; - div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres); - new_tb_to_xs = divres.result_low; - new_xsec = mulhdu(tb_ticks, new_tb_to_xs); - - old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs); - new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; - - update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs); - - write_sequnlock_irqrestore( &xtime_lock, flags ); -#endif /* CONFIG_PPC64 */ -} - #define FEBRUARY 2 #define STARTOFTIME 1970 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 7509aa6474f..98660aedeeb 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -814,6 +814,8 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } + local_irq_enable(); + /* Try to emulate it if we should. */ if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { switch (emulate_instruction(regs)) { diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 2da65a9c93f..5d29dcca523 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -144,7 +144,7 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock) } #ifdef CONFIG_PPC_MAPLE -void udbg_maple_real_putc(unsigned char c) +void udbg_maple_real_putc(char c) { if (udbg_comport) { while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0) diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index f0c47dab090..04f7df39ffb 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -182,8 +182,8 @@ static struct page * vdso_vma_nopage(struct vm_area_struct * vma, unsigned long offset = address - vma->vm_start; struct page *pg; #ifdef CONFIG_PPC64 - void *vbase = test_thread_flag(TIF_32BIT) ? - vdso32_kbase : vdso64_kbase; + void *vbase = (vma->vm_mm->task_size > TASK_SIZE_USER32) ? + vdso64_kbase : vdso32_kbase; #else void *vbase = vdso32_kbase; #endif diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S index ccaeda5136d..4ee871f1cad 100644 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S @@ -225,9 +225,9 @@ V_FUNCTION_BEGIN(__do_get_xsec) .cfi_startproc /* check for update count & load values */ 1: ld r8,CFG_TB_UPDATE_COUNT(r3) - andi. r0,r4,1 /* pending update ? loop */ + andi. r0,r8,1 /* pending update ? loop */ bne- 1b - xor r0,r4,r4 /* create dependency */ + xor r0,r8,r8 /* create dependency */ add r3,r3,r0 /* Get TB & offset it */ |