diff options
Diffstat (limited to 'arch/powerpc/kernel')
52 files changed, 673 insertions, 372 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index e8b981897d4..ce4f7f17911 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o obj-$(CONFIG_44x) += cpu_setup_44x.o obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o extra-y := head_$(CONFIG_WORD_SIZE).o extra-$(CONFIG_40x) := head_40x.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 36e1c8a29be..5f078bc2063 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -82,6 +82,9 @@ int main(void) DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); +#ifdef CONFIG_BOOKE + DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); +#endif DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0])); DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr)); @@ -128,6 +131,7 @@ int main(void) DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); /* paca */ DEFINE(PACA_SIZE, sizeof(struct paca_struct)); + DEFINE(PACA_LOCK_TOKEN, offsetof(struct paca_struct, lock_token)); DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); @@ -187,7 +191,9 @@ int main(void) DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); + DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); + DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); #endif /* CONFIG_PPC_STD_MMU_64 */ DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); @@ -198,11 +204,6 @@ int main(void) DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); -#ifdef CONFIG_KVM_BOOK3S_64_HANDLER - DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); - DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb)); - DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max)); -#endif #endif /* CONFIG_PPC64 */ /* RTAS */ @@ -397,67 +398,160 @@ int main(void) DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); + DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); + DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); +#ifdef CONFIG_ALTIVEC + DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); + DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); +#endif +#ifdef CONFIG_VSX + DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr)); +#endif + DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); + DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); + DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); + DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); + DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); +#ifdef CONFIG_KVM_BOOK3S_64_HV + DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); + DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); + DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); + DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); + DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); + DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); + DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); +#endif DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); + DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); + DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); /* book3s */ +#ifdef CONFIG_KVM_BOOK3S_64_HV + DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); + DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); + DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); + DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); + DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); + DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock)); + DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter)); + DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu)); + DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); + DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); + DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); + DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); +#endif #ifdef CONFIG_PPC_BOOK3S + DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); + DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); - DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); + DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); + DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); + DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); + DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); + DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); + DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); + DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); + DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); + DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); + DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); + DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa)); + DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); + DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); + DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); + DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); + DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); + DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu)); + DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); + DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); + DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); + DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); + DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); + DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); + DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); + DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - offsetof(struct kvmppc_vcpu_book3s, vcpu)); - DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr)); - DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer)); - DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr)); - DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr)); - DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc)); - DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0])); - DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1])); - DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2])); - DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3])); - DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4])); - DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5])); - DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6])); - DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7])); - DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8])); - DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9])); - DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10])); - DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11])); - DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12])); - DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13])); - DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1)); - DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2)); - DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu, - vmhandler)); - DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu, - scratch0)); - DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu, - scratch1)); - DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu, - in_guest)); - DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu, - fault_dsisr)); - DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu, - fault_dar)); - DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu, - last_inst)); - DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu, - shadow_srr1)); + DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); + DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); + DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); + +#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_KVM_BOOK3S_PR +# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) +#else +# define SVCPU_FIELD(x, f) +#endif +# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) +#else /* 32-bit */ +# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) +# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f)) +#endif + + SVCPU_FIELD(SVCPU_CR, cr); + SVCPU_FIELD(SVCPU_XER, xer); + SVCPU_FIELD(SVCPU_CTR, ctr); + SVCPU_FIELD(SVCPU_LR, lr); + SVCPU_FIELD(SVCPU_PC, pc); + SVCPU_FIELD(SVCPU_R0, gpr[0]); + SVCPU_FIELD(SVCPU_R1, gpr[1]); + SVCPU_FIELD(SVCPU_R2, gpr[2]); + SVCPU_FIELD(SVCPU_R3, gpr[3]); + SVCPU_FIELD(SVCPU_R4, gpr[4]); + SVCPU_FIELD(SVCPU_R5, gpr[5]); + SVCPU_FIELD(SVCPU_R6, gpr[6]); + SVCPU_FIELD(SVCPU_R7, gpr[7]); + SVCPU_FIELD(SVCPU_R8, gpr[8]); + SVCPU_FIELD(SVCPU_R9, gpr[9]); + SVCPU_FIELD(SVCPU_R10, gpr[10]); + SVCPU_FIELD(SVCPU_R11, gpr[11]); + SVCPU_FIELD(SVCPU_R12, gpr[12]); + SVCPU_FIELD(SVCPU_R13, gpr[13]); + SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr); + SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar); + SVCPU_FIELD(SVCPU_LAST_INST, last_inst); + SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1); #ifdef CONFIG_PPC_BOOK3S_32 - DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr)); + SVCPU_FIELD(SVCPU_SR, sr); #endif -#else +#ifdef CONFIG_PPC64 + SVCPU_FIELD(SVCPU_SLB, slb); + SVCPU_FIELD(SVCPU_SLB_MAX, slb_max); +#endif + + HSTATE_FIELD(HSTATE_HOST_R1, host_r1); + HSTATE_FIELD(HSTATE_HOST_R2, host_r2); + HSTATE_FIELD(HSTATE_HOST_MSR, host_msr); + HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); + HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); + HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); + HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); + +#ifdef CONFIG_KVM_BOOK3S_64_HV + HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); + HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); + HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); + HSTATE_FIELD(HSTATE_MMCR, host_mmcr); + HSTATE_FIELD(HSTATE_PMC, host_pmc); + HSTATE_FIELD(HSTATE_PURR, host_purr); + HSTATE_FIELD(HSTATE_SPURR, host_spurr); + HSTATE_FIELD(HSTATE_DSCR, host_dscr); + HSTATE_FIELD(HSTATE_DABR, dabr); + HSTATE_FIELD(HSTATE_DECEXP, dec_expires); +#endif /* CONFIG_KVM_BOOK3S_64_HV */ + +#else /* CONFIG_PPC_BOOK3S */ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); @@ -467,7 +561,7 @@ int main(void) DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); #endif /* CONFIG_PPC_BOOK3S */ -#endif +#endif /* CONFIG_KVM */ #ifdef CONFIG_KVM_GUEST DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, @@ -497,6 +591,13 @@ int main(void) DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); #endif +#if defined(CONFIG_KVM) && defined(CONFIG_SPE) + DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); + DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); + DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); + DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); +#endif + #ifdef CONFIG_KVM_EXIT_TIMING DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, arch.timing_exit.tv32.tbu)); diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S index 4f9a93fcfe0..76797c5105d 100644 --- a/arch/powerpc/kernel/cpu_setup_power7.S +++ b/arch/powerpc/kernel/cpu_setup_power7.S @@ -45,12 +45,12 @@ _GLOBAL(__restore_cpu_power7) blr __init_hvmode_206: - /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */ + /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */ mfmsr r3 rldicl. r0,r3,4,63 bnelr ld r5,CPU_SPEC_FEATURES(r4) - LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206) + LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE) xor r5,r5,r6 std r5,CPU_SPEC_FEATURES(r4) blr @@ -61,19 +61,23 @@ __init_LPCR: * LPES = 0b01 (HSRR0/1 used for 0x500) * PECE = 0b111 * DPFD = 4 + * HDICE = 0 + * VC = 0b100 (VPM0=1, VPM1=0, ISL=0) + * VRMASD = 0b10000 (L=1, LP=00) * * Other bits untouched for now */ mfspr r3,SPRN_LPCR - ori r3,r3,(LPCR_LPES0|LPCR_LPES1) - xori r3,r3, LPCR_LPES0 + li r5,1 + rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2 ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) - li r5,7 - sldi r5,r5,LPCR_DPFD_SH - andc r3,r3,r5 li r5,4 - sldi r5,r5,LPCR_DPFD_SH - or r3,r3,r5 + rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3 + clrrdi r3,r3,1 /* clear HDICE */ + li r5,4 + rldimi r3,r5, LPCR_VC_SH, 0 + li r5,0x10 + rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5 mtspr SPRN_LPCR,r3 isync blr diff --git a/arch/powerpc/kernel/cpu_setup_ppc970.S b/arch/powerpc/kernel/cpu_setup_ppc970.S index 27f2507279d..12fac8df01c 100644 --- a/arch/powerpc/kernel/cpu_setup_ppc970.S +++ b/arch/powerpc/kernel/cpu_setup_ppc970.S @@ -76,7 +76,7 @@ _GLOBAL(__setup_cpu_ppc970) /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 - beqlr + beq no_hv_mode mfspr r0,SPRN_HID0 li r11,5 /* clear DOZE and SLEEP */ @@ -90,7 +90,7 @@ _GLOBAL(__setup_cpu_ppc970MP) /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 - beqlr + beq no_hv_mode mfspr r0,SPRN_HID0 li r11,0x15 /* clear DOZE and SLEEP */ @@ -109,6 +109,14 @@ load_hids: sync isync + /* Try to set LPES = 01 in HID4 */ + mfspr r0,SPRN_HID4 + clrldi r0,r0,1 /* clear LPES0 */ + ori r0,r0,HID4_LPES1 /* set LPES1 */ + sync + mtspr SPRN_HID4,r0 + isync + /* Save away cpu state */ LOAD_REG_ADDR(r5,cpu_state_storage) @@ -117,11 +125,21 @@ load_hids: std r3,CS_HID0(r5) mfspr r3,SPRN_HID1 std r3,CS_HID1(r5) - mfspr r3,SPRN_HID4 - std r3,CS_HID4(r5) + mfspr r4,SPRN_HID4 + std r4,CS_HID4(r5) mfspr r3,SPRN_HID5 std r3,CS_HID5(r5) + /* See if we successfully set LPES1 to 1; if not we are in Apple mode */ + andi. r4,r4,HID4_LPES1 + bnelr + +no_hv_mode: + /* Disable CPU_FTR_HVMODE and exit, since we don't have HV mode */ + ld r5,CPU_SPEC_FEATURES(r4) + LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE) + andc r5,r5,r6 + std r5,CPU_SPEC_FEATURES(r4) blr /* Called with no MMU context (typically MSR:IR/DR off) to diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 9fb933248ab..fa44ff53886 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2051,7 +2051,8 @@ static struct cpu_spec __initdata cpu_specs[] = { static struct cpu_spec the_cpu_spec; -static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) +static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, + struct cpu_spec *s) { struct cpu_spec *t = &the_cpu_spec; struct cpu_spec old; @@ -2114,6 +2115,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ + + return t; } struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) @@ -2124,10 +2127,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) s = PTRRELOC(s); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { - if ((pvr & s->pvr_mask) == s->pvr_value) { - setup_cpu_spec(offset, s); - return s; - } + if ((pvr & s->pvr_mask) == s->pvr_value) + return setup_cpu_spec(offset, s); } BUG(); diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 4e6ee944495..cc6a9d5d69a 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -242,12 +242,8 @@ static void crash_kexec_wait_realmode(int cpu) while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { barrier(); - if (!cpu_possible(i)) { + if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0)) break; - } - if (!cpu_online(i)) { - break; - } msecs--; mdelay(1); } diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index d238c082c3c..4f0959fbfbe 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -161,9 +161,7 @@ int dma_set_mask(struct device *dev, u64 dma_mask) if (ppc_md.dma_set_mask) return ppc_md.dma_set_mask(dev, dma_mask); - if (unlikely(dma_ops == NULL)) - return -EIO; - if (dma_ops->set_dma_mask != NULL) + if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index d24d4400cc7..429983c06f9 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -120,6 +120,12 @@ std r14,PACA_EXMC+EX_R14(r13); \ std r15,PACA_EXMC+EX_R15(r13) +#define PROLOG_ADDITION_DOORBELL_GEN \ + lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ + cmpwi cr0,r11,0; /* yes -> go out of line */ \ + beq masked_doorbell_book3e + + /* Core exception code for all exceptions except TLB misses. * XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ @@ -522,7 +528,13 @@ kernel_dbg_exc: MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE) /* Doorbell interrupt */ - MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE) + START_EXCEPTION(doorbell) + NORMAL_EXCEPTION_PROLOG(0x2070, PROLOG_ADDITION_DOORBELL) + EXCEPTION_COMMON(0x2070, PACA_EXGEN, INTS_DISABLE_ALL) + CHECK_NAPPING() + addi r3,r1,STACK_FRAME_OVERHEAD + bl .doorbell_exception + b .ret_from_except_lite /* Doorbell critical Interrupt */ START_EXCEPTION(doorbell_crit); @@ -545,8 +557,16 @@ kernel_dbg_exc: * An interrupt came in while soft-disabled; clear EE in SRR1, * clear paca->hard_enabled and return. */ +masked_doorbell_book3e: + mtcr r10 + /* Resend the doorbell to fire again when ints enabled */ + mfspr r10,SPRN_PIR + PPC_MSGSND(r10) + b masked_interrupt_book3e_common + masked_interrupt_book3e: mtcr r10 +masked_interrupt_book3e_common: stb r11,PACAHARDIRQEN(r13) mfspr r10,SPRN_SRR1 rldicl r11,r10,48,1 /* clear MSR_EE */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a85f4874cba..41b02c792aa 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -40,7 +40,6 @@ __start_interrupts: .globl system_reset_pSeries; system_reset_pSeries: HMT_MEDIUM; - DO_KVM 0x100; SET_SCRATCH0(r13) #ifdef CONFIG_PPC_P7_NAP BEGIN_FTR_SECTION @@ -50,82 +49,73 @@ BEGIN_FTR_SECTION * state loss at this time. */ mfspr r13,SPRN_SRR1 - rlwinm r13,r13,47-31,30,31 - cmpwi cr0,r13,1 - bne 1f - b .power7_wakeup_noloss -1: cmpwi cr0,r13,2 - bne 1f - b .power7_wakeup_loss + rlwinm. r13,r13,47-31,30,31 + beq 9f + + /* waking up from powersave (nap) state */ + cmpwi cr1,r13,2 /* Total loss of HV state is fatal, we could try to use the * PIR to locate a PACA, then use an emergency stack etc... * but for now, let's just stay stuck here */ -1: cmpwi cr0,r13,3 - beq . -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206) + bgt cr1,. + GET_PACA(r13) + +#ifdef CONFIG_KVM_BOOK3S_64_HV + lbz r0,PACAPROCSTART(r13) + cmpwi r0,0x80 + bne 1f + li r0,0 + stb r0,PACAPROCSTART(r13) + b kvm_start_guest +1: +#endif + + beq cr1,2f + b .power7_wakeup_noloss +2: b .power7_wakeup_loss +9: +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif /* CONFIG_PPC_P7_NAP */ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, + NOTEST, 0x100) . = 0x200 -_machine_check_pSeries: - HMT_MEDIUM - DO_KVM 0x200 - SET_SCRATCH0(r13) - EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) +machine_check_pSeries_1: + /* This is moved out of line as it can be patched by FW, but + * some code path might still want to branch into the original + * vector + */ + b machine_check_pSeries . = 0x300 .globl data_access_pSeries data_access_pSeries: HMT_MEDIUM - DO_KVM 0x300 SET_SCRATCH0(r13) +#ifndef CONFIG_POWER4_ONLY BEGIN_FTR_SECTION - GET_PACA(r13) - std r9,PACA_EXSLB+EX_R9(r13) - std r10,PACA_EXSLB+EX_R10(r13) - mfspr r10,SPRN_DAR - mfspr r9,SPRN_DSISR - srdi r10,r10,60 - rlwimi r10,r9,16,0x20 - mfcr r9 - cmpwi r10,0x2c - beq do_stab_bolted_pSeries - ld r10,PACA_EXSLB+EX_R10(r13) - std r11,PACA_EXGEN+EX_R11(r13) - ld r11,PACA_EXSLB+EX_R9(r13) - std r12,PACA_EXGEN+EX_R12(r13) - GET_SCRATCH0(r12) - std r10,PACA_EXGEN+EX_R10(r13) - std r11,PACA_EXGEN+EX_R9(r13) - std r12,PACA_EXGEN+EX_R13(r13) - EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD) -FTR_SECTION_ELSE - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD) -ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB) + b data_access_check_stab +data_access_not_stab: +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) +#endif + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, + KVMTEST_PR, 0x300) . = 0x380 .globl data_access_slb_pSeries data_access_slb_pSeries: HMT_MEDIUM - DO_KVM 0x380 SET_SCRATCH0(r13) - GET_PACA(r13) + EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_DAR - std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ - mfcr r9 #ifdef __DISABLED__ /* Keep that around for when we re-implement dynamic VSIDs */ cmpdi r3,0 bge slb_miss_user_pseries #endif /* __DISABLED__ */ - std r10,PACA_EXSLB+EX_R10(r13) - std r11,PACA_EXSLB+EX_R11(r13) - std r12,PACA_EXSLB+EX_R12(r13) - GET_SCRATCH0(r10) - std r10,PACA_EXSLB+EX_R13(r13) - mfspr r12,SPRN_SRR1 /* and SRR1 */ + mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE b .slb_miss_realmode #else @@ -147,24 +137,16 @@ data_access_slb_pSeries: .globl instruction_access_slb_pSeries instruction_access_slb_pSeries: HMT_MEDIUM - DO_KVM 0x480 SET_SCRATCH0(r13) - GET_PACA(r13) + EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480) std r3,PACA_EXSLB+EX_R3(r13) mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ - std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ - mfcr r9 #ifdef __DISABLED__ /* Keep that around for when we re-implement dynamic VSIDs */ cmpdi r3,0 bge slb_miss_user_pseries #endif /* __DISABLED__ */ - std r10,PACA_EXSLB+EX_R10(r13) - std r11,PACA_EXSLB+EX_R11(r13) - std r12,PACA_EXSLB+EX_R12(r13) - GET_SCRATCH0(r10) - std r10,PACA_EXSLB+EX_R13(r13) - mfspr r12,SPRN_SRR1 /* and SRR1 */ + mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE b .slb_miss_realmode #else @@ -184,26 +166,46 @@ instruction_access_slb_pSeries: hardware_interrupt_pSeries: hardware_interrupt_hv: BEGIN_FTR_SECTION - _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD) + _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, + EXC_HV, SOFTEN_TEST_HV) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) FTR_SECTION_ELSE - _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV) - ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206) + _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, + EXC_STD, SOFTEN_TEST_HV_201) + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500) + ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600) + STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700) + STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) - MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer) + MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) + STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00) . = 0xc00 .globl system_call_pSeries system_call_pSeries: HMT_MEDIUM - DO_KVM 0xc00 +#ifdef CONFIG_KVM_BOOK3S_64_HANDLER + SET_SCRATCH0(r13) + GET_PACA(r13) + std r9,PACA_EXGEN+EX_R9(r13) + std r10,PACA_EXGEN+EX_R10(r13) + mfcr r9 + KVMTEST(0xc00) + GET_SCRATCH0(r13) +#endif BEGIN_FTR_SECTION cmpdi r0,0x1ebe beq- 1f @@ -220,6 +222,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) rfid b . /* prevent speculative execution */ + KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00) + /* Fast LE/BE switch system call */ 1: mfspr r12,SPRN_SRR1 xori r12,r12,MSR_LE @@ -228,6 +232,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) b . STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00) /* At 0xe??? we have a bunch of hypervisor exceptions, we branch * out of line to handle them @@ -262,30 +267,93 @@ vsx_unavailable_pSeries_1: #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) + KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202) #endif /* CONFIG_CBE_RAS */ + STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) + KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) + #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) + KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602) #endif /* CONFIG_CBE_RAS */ + STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700) + #ifdef CONFIG_CBE_RAS STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) + KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802) #endif /* CONFIG_CBE_RAS */ . = 0x3000 /*** Out of line interrupts support ***/ + /* moved from 0x200 */ +machine_check_pSeries: + .globl machine_check_fwnmi +machine_check_fwnmi: + HMT_MEDIUM + SET_SCRATCH0(r13) /* save r13 */ + EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, + EXC_STD, KVMTEST, 0x200) + KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) + +#ifndef CONFIG_POWER4_ONLY + /* moved from 0x300 */ +data_access_check_stab: + GET_PACA(r13) + std r9,PACA_EXSLB+EX_R9(r13) + std r10,PACA_EXSLB+EX_R10(r13) + mfspr r10,SPRN_DAR + mfspr r9,SPRN_DSISR + srdi r10,r10,60 + rlwimi r10,r9,16,0x20 +#ifdef CONFIG_KVM_BOOK3S_PR + lbz r9,HSTATE_IN_GUEST(r13) + rlwimi r10,r9,8,0x300 +#endif + mfcr r9 + cmpwi r10,0x2c + beq do_stab_bolted_pSeries + mtcrf 0x80,r9 + ld r9,PACA_EXSLB+EX_R9(r13) + ld r10,PACA_EXSLB+EX_R10(r13) + b data_access_not_stab +do_stab_bolted_pSeries: + std r11,PACA_EXSLB+EX_R11(r13) + std r12,PACA_EXSLB+EX_R12(r13) + GET_SCRATCH0(r10) + std r10,PACA_EXSLB+EX_R13(r13) + EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) +#endif /* CONFIG_POWER4_ONLY */ + + KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300) + KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) + KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) + + .align 7 /* moved from 0xe00 */ - STD_EXCEPTION_HV(., 0xe00, h_data_storage) - STD_EXCEPTION_HV(., 0xe20, h_instr_storage) - STD_EXCEPTION_HV(., 0xe40, emulation_assist) - STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */ + STD_EXCEPTION_HV(., 0xe02, h_data_storage) + KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02) + STD_EXCEPTION_HV(., 0xe22, h_instr_storage) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22) + STD_EXCEPTION_HV(., 0xe42, emulation_assist) + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42) + STD_EXCEPTION_HV(., 0xe62, hmi_exception) /* need to flush cache ? */ + KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62) /* moved from 0xf00 */ STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00) STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20) STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) + KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) /* * An interrupt came in while soft-disabled; clear EE in SRR1, @@ -317,14 +385,6 @@ masked_Hinterrupt: hrfid b . - .align 7 -do_stab_bolted_pSeries: - std r11,PACA_EXSLB+EX_R11(r13) - std r12,PACA_EXSLB+EX_R12(r13) - GET_SCRATCH0(r10) - std r10,PACA_EXSLB+EX_R13(r13) - EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) - #ifdef CONFIG_PPC_PSERIES /* * Vectors for the FWNMI option. Share common code. @@ -334,14 +394,8 @@ do_stab_bolted_pSeries: system_reset_fwnmi: HMT_MEDIUM SET_SCRATCH0(r13) /* save r13 */ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) - - .globl machine_check_fwnmi - .align 7 -machine_check_fwnmi: - HMT_MEDIUM - SET_SCRATCH0(r13) /* save r13 */ - EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, + NOTEST, 0x100) #endif /* CONFIG_PPC_PSERIES */ @@ -376,7 +430,11 @@ slb_miss_user_pseries: /* KVM's trampoline code needs to be close to the interrupt handlers */ #ifdef CONFIG_KVM_BOOK3S_64_HANDLER +#ifdef CONFIG_KVM_BOOK3S_PR #include "../kvm/book3s_rmhandlers.S" +#else +#include "../kvm/book3s_hv_rmhandlers.S" +#endif #endif .align 7 diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 5e12b741ba5..f8e971ba94f 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -93,6 +93,30 @@ _ENTRY(_start); bl early_init +#ifdef CONFIG_RELOCATABLE + /* + * r25 will contain RPN/ERPN for the start address of memory + * + * Add the difference between KERNELBASE and PAGE_OFFSET to the + * start of physical memory to get kernstart_addr. + */ + lis r3,kernstart_addr@ha + la r3,kernstart_addr@l(r3) + + lis r4,KERNELBASE@h + ori r4,r4,KERNELBASE@l + lis r5,PAGE_OFFSET@h + ori r5,r5,PAGE_OFFSET@l + subf r4,r5,r4 + + rlwinm r6,r25,0,28,31 /* ERPN */ + rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ + add r7,r7,r4 + + stw r6,0(r3) + stw r7,4(r3) +#endif + /* * Decide what sort of machine this is and initialize the MMU. */ @@ -1001,9 +1025,6 @@ clear_utlb_entry: lis r3,PAGE_OFFSET@h ori r3,r3,PAGE_OFFSET@l - /* Kernel is at the base of RAM */ - li r4, 0 /* Load the kernel physical address */ - /* Load the kernel PID = 0 */ li r0,0 mtspr SPRN_PID,r0 @@ -1013,9 +1034,8 @@ clear_utlb_entry: clrrwi r3,r3,12 /* Mask off the effective page number */ ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M - /* Word 1 */ - clrrwi r4,r4,12 /* Mask off the real page number */ - /* ERPN is 0 for first 4GB page */ + /* Word 1 - use r25. RPN is the same as the original entry */ + /* Word 2 */ li r5,0 ori r5,r5,PPC47x_TLB2_S_RWX @@ -1026,7 +1046,7 @@ clear_utlb_entry: /* We write to way 0 and bolted 0 */ lis r0,0x8800 tlbwe r3,r0,0 - tlbwe r4,r0,1 + tlbwe r25,r0,1 tlbwe r5,r0,2 /* @@ -1124,7 +1144,13 @@ head_start_common: lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ mtspr SPRN_IVPR,r4 - addis r22,r22,KERNELBASE@h + /* + * If the kernel was loaded at a non-zero 256 MB page, we need to + * mask off the most significant 4 bits to get the relative address + * from the start of physical memory + */ + rlwinm r22,r22,0,4,31 + addis r22,r22,PAGE_OFFSET@h mtlr r22 isync blr diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index ba504099844..3564c49c683 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -255,7 +255,7 @@ generic_secondary_common_init: mtctr r23 bctrl -3: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */ +3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ lwarx r4,0,r3 subi r4,r4,1 stwcx. r4,0,r3 diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index a0bf158c8b4..fc921bf62e1 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -20,33 +20,43 @@ addi reg,reg,val@l #endif +/* + * Macro used to get to thread save registers. + * Note that entries 0-3 are used for the prolog code, and the remaining + * entries are available for specific exception use in the event a handler + * requires more than 4 scratch registers. + */ +#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) + #define NORMAL_EXCEPTION_PROLOG \ - mtspr SPRN_SPRG_WSCRATCH0,r10;/* save two registers to work with */\ - mtspr SPRN_SPRG_WSCRATCH1,r11; \ - mtspr SPRN_SPRG_WSCRATCH2,r1; \ - mfcr r10; /* save CR in r10 for now */\ + mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ + mfspr r10, SPRN_SPRG_THREAD; \ + stw r11, THREAD_NORMSAVE(0)(r10); \ + stw r13, THREAD_NORMSAVE(2)(r10); \ + mfcr r13; /* save CR in r13 for now */\ mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ andi. r11,r11,MSR_PR; \ + mr r11, r1; \ beq 1f; \ - mfspr r1,SPRN_SPRG_THREAD; /* if from user, start at top of */\ - lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ - ALLOC_STACK_FRAME(r1, THREAD_SIZE); \ -1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ - mr r11,r1; \ - stw r10,_CCR(r11); /* save various registers */\ + /* if from user, start at top of this thread's kernel stack */ \ + lwz r11, THREAD_INFO-THREAD(r10); \ + ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ +1 : subi r11, r11, INT_FRAME_SIZE; /* Allocate exception frame */ \ + stw r13, _CCR(r11); /* save various registers */ \ stw r12,GPR12(r11); \ stw r9,GPR9(r11); \ - mfspr r10,SPRN_SPRG_RSCRATCH0; \ - stw r10,GPR10(r11); \ - mfspr r12,SPRN_SPRG_RSCRATCH1; \ + mfspr r13, SPRN_SPRG_RSCRATCH0; \ + stw r13, GPR10(r11); \ + lwz r12, THREAD_NORMSAVE(0)(r10); \ stw r12,GPR11(r11); \ + lwz r13, THREAD_NORMSAVE(2)(r10); /* restore r13 */ \ mflr r10; \ stw r10,_LINK(r11); \ - mfspr r10,SPRN_SPRG_RSCRATCH2; \ mfspr r12,SPRN_SRR0; \ - stw r10,GPR1(r11); \ + stw r1, GPR1(r11); \ mfspr r9,SPRN_SRR1; \ - stw r10,0(r11); \ + stw r1, 0(r11); \ + mr r1, r11; \ rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ stw r0,GPR0(r11); \ lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 5ecf54cfa7d..50845924b7d 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -346,11 +346,12 @@ interrupt_base: /* Data TLB Error Interrupt */ START_EXCEPTION(DataTLBError) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ - mtspr SPRN_SPRG_WSCRATCH1, r11 - mtspr SPRN_SPRG_WSCRATCH2, r12 - mtspr SPRN_SPRG_WSCRATCH3, r13 - mfcr r11 - mtspr SPRN_SPRG_WSCRATCH4, r11 + mfspr r10, SPRN_SPRG_THREAD + stw r11, THREAD_NORMSAVE(0)(r10) + stw r12, THREAD_NORMSAVE(1)(r10) + stw r13, THREAD_NORMSAVE(2)(r10) + mfcr r13 + stw r13, THREAD_NORMSAVE(3)(r10) mfspr r10, SPRN_DEAR /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -416,11 +417,12 @@ interrupt_base: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ - mfspr r11, SPRN_SPRG_RSCRATCH4 + mfspr r10, SPRN_SPRG_THREAD + lwz r11, THREAD_NORMSAVE(3)(r10) mtcr r11 - mfspr r13, SPRN_SPRG_RSCRATCH3 - mfspr r12, SPRN_SPRG_RSCRATCH2 - mfspr r11, SPRN_SPRG_RSCRATCH1 + lwz r13, THREAD_NORMSAVE(2)(r10) + lwz r12, THREAD_NORMSAVE(1)(r10) + lwz r11, THREAD_NORMSAVE(0)(r10) mfspr r10, SPRN_SPRG_RSCRATCH0 b DataStorage @@ -432,11 +434,12 @@ interrupt_base: */ START_EXCEPTION(InstructionTLBError) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ - mtspr SPRN_SPRG_WSCRATCH1, r11 - mtspr SPRN_SPRG_WSCRATCH2, r12 - mtspr SPRN_SPRG_WSCRATCH3, r13 - mfcr r11 - mtspr SPRN_SPRG_WSCRATCH4, r11 + mfspr r10, SPRN_SPRG_THREAD + stw r11, THREAD_NORMSAVE(0)(r10) + stw r12, THREAD_NORMSAVE(1)(r10) + stw r13, THREAD_NORMSAVE(2)(r10) + mfcr r13 + stw r13, THREAD_NORMSAVE(3)(r10) mfspr r10, SPRN_SRR0 /* Get faulting address */ /* If we are faulting a kernel address, we have to use the @@ -496,11 +499,12 @@ interrupt_base: /* The bailout. Restore registers to pre-exception conditions * and call the heavyweights to help us out. */ - mfspr r11, SPRN_SPRG_RSCRATCH4 + mfspr r10, SPRN_SPRG_THREAD + lwz r11, THREAD_NORMSAVE(3)(r10) mtcr r11 - mfspr r13, SPRN_SPRG_RSCRATCH3 - mfspr r12, SPRN_SPRG_RSCRATCH2 - mfspr r11, SPRN_SPRG_RSCRATCH1 + lwz r13, THREAD_NORMSAVE(2)(r10) + lwz r12, THREAD_NORMSAVE(1)(r10) + lwz r11, THREAD_NORMSAVE(0)(r10) mfspr r10, SPRN_SPRG_RSCRATCH0 b InstructionStorage @@ -621,11 +625,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) tlbwe /* Done...restore registers and get out of here. */ - mfspr r11, SPRN_SPRG_RSCRATCH4 + mfspr r10, SPRN_SPRG_THREAD + lwz r11, THREAD_NORMSAVE(3)(r10) mtcr r11 - mfspr r13, SPRN_SPRG_RSCRATCH3 - mfspr r12, SPRN_SPRG_RSCRATCH2 - mfspr r11, SPRN_SPRG_RSCRATCH1 + lwz r13, THREAD_NORMSAVE(2)(r10) + lwz r12, THREAD_NORMSAVE(1)(r10) + lwz r11, THREAD_NORMSAVE(0)(r10) mfspr r10, SPRN_SPRG_RSCRATCH0 rfi /* Force context change */ @@ -656,7 +661,7 @@ load_up_spe: cmpi 0,r4,0 beq 1f addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ - SAVE_32EVRS(0,r10,r4) + SAVE_32EVRS(0,r10,r4,THREAD_EVR0) evxor evr10, evr10, evr10 /* clear out evr10 */ evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ li r5,THREAD_ACC @@ -676,7 +681,7 @@ load_up_spe: stw r4,THREAD_USED_SPE(r5) evlddx evr4,r10,r5 evmra evr4,evr4 - REST_32EVRS(0,r10,r5) + REST_32EVRS(0,r10,r5,THREAD_EVR0) #ifndef CONFIG_SMP subi r4,r5,THREAD stw r4,last_task_used_spe@l(r3) @@ -787,13 +792,11 @@ _GLOBAL(giveup_spe) addi r3,r3,THREAD /* want THREAD of task */ lwz r5,PT_REGS(r3) cmpi 0,r5,0 - SAVE_32EVRS(0, r4, r3) + SAVE_32EVRS(0, r4, r3, THREAD_EVR0) evxor evr6, evr6, evr6 /* clear out evr6 */ evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ li r4,THREAD_ACC evstddx evr6, r4, r3 /* save off accumulator */ - mfspr r6,SPRN_SPEFSCR - stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */ beq 1f lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) lis r3,MSR_SPE@h diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S index 47a1a983ff8..3e2b95c6ae6 100644 --- a/arch/powerpc/kernel/idle_e500.S +++ b/arch/powerpc/kernel/idle_e500.S @@ -26,6 +26,17 @@ _GLOBAL(e500_idle) ori r4,r4,_TLF_NAPPING /* so when we take an exception */ stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */ +#ifdef CONFIG_E500MC + wrteei 1 +1: wait + + /* + * Guard against spurious wakeups (e.g. from a hypervisor) -- + * any real interrupt will cause us to return to LR due to + * _TLF_NAPPING. + */ + b 1b +#else /* Check if we can nap or doze, put HID0 mask in r3 */ lis r3,0 BEGIN_FTR_SECTION @@ -72,6 +83,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L2CSR|CPU_FTR_CAN_NAP) mtmsr r7 isync 2: b 2b +#endif /* !E500MC */ /* * Return from NAP/DOZE mode, restore some CPU specific registers, diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index f8f0bc7f1d4..3a70845a51c 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -73,7 +73,6 @@ _GLOBAL(power7_idle) b . _GLOBAL(power7_wakeup_loss) - GET_PACA(r13) ld r1,PACAR1(r13) REST_NVGPRS(r1) REST_GPR(2, r1) @@ -87,7 +86,6 @@ _GLOBAL(power7_wakeup_loss) rfid _GLOBAL(power7_wakeup_noloss) - GET_PACA(r13) ld r1,PACAR1(r13) ld r4,_MSR(r1) ld r5,_NIP(r1) diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 1577434f408..b25f6325fc7 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -117,6 +117,7 @@ void ioport_unmap(void __iomem *addr) EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); +#ifdef CONFIG_PCI void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) { resource_size_t start = pci_resource_start(dev, bar); @@ -146,3 +147,4 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr) EXPORT_SYMBOL(pci_iomap); EXPORT_SYMBOL(pci_iounmap); +#endif /* CONFIG_PCI */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5b428e30866..d281fb6f12f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -157,12 +157,6 @@ notrace void arch_local_irq_restore(unsigned long en) if (get_hard_enabled()) return; -#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) - /* Check for pending doorbell interrupts and resend to ourself */ - if (cpu_has_feature(CPU_FTR_DBELL)) - smp_muxed_ipi_resend(); -#endif - /* * Need to hard-enable interrupts here. Since currently disabled, * no need to take further asm precautions against preemption; but @@ -457,11 +451,18 @@ static inline void do_softirq_onstack(void) curtp = current_thread_info(); irqtp = softirq_ctx[smp_processor_id()]; irqtp->task = curtp->task; + irqtp->flags = 0; current->thread.ksp_limit = (unsigned long)irqtp + _ALIGN_UP(sizeof(struct thread_info), 16); call_do_softirq(irqtp); current->thread.ksp_limit = saved_sp_limit; irqtp->task = NULL; + + /* Set any flag that may have been set on the + * alternate stack + */ + if (irqtp->flags) + set_bits(irqtp->flags, &curtp->flags); } void do_softirq(void) @@ -750,7 +751,7 @@ unsigned int irq_create_mapping(struct irq_host *host, if (irq_setup_virq(host, virq, hwirq)) return NO_IRQ; - printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", + pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n", hwirq, host->of_node ? host->of_node->full_name : "null", virq); return virq; @@ -882,6 +883,41 @@ unsigned int irq_find_mapping(struct irq_host *host, } EXPORT_SYMBOL_GPL(irq_find_mapping); +#ifdef CONFIG_SMP +int irq_choose_cpu(const struct cpumask *mask) +{ + int cpuid; + + if (cpumask_equal(mask, cpu_all_mask)) { + static int irq_rover; + static DEFINE_RAW_SPINLOCK(irq_rover_lock); + unsigned long flags; + + /* Round-robin distribution... */ +do_round_robin: + raw_spin_lock_irqsave(&irq_rover_lock, flags); + + irq_rover = cpumask_next(irq_rover, cpu_online_mask); + if (irq_rover >= nr_cpu_ids) + irq_rover = cpumask_first(cpu_online_mask); + + cpuid = irq_rover; + + raw_spin_unlock_irqrestore(&irq_rover_lock, flags); + } else { + cpuid = cpumask_first_and(mask, cpu_online_mask); + if (cpuid >= nr_cpu_ids) + goto do_round_robin; + } + + return get_hard_smp_processor_id(cpuid); +} +#else +int irq_choose_cpu(const struct cpumask *mask) +{ + return hard_smp_processor_id(); +} +#endif unsigned int irq_radix_revmap_lookup(struct irq_host *host, irq_hw_number_t hwirq) diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c new file mode 100644 index 00000000000..368d158d665 --- /dev/null +++ b/arch/powerpc/kernel/jump_label.c @@ -0,0 +1,23 @@ +/* + * Copyright 2010 Michael Ellerman, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/jump_label.h> +#include <asm/code-patching.h> + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + u32 *addr = (u32 *)(unsigned long)entry->code; + + if (type == JUMP_LABEL_ENABLE) + patch_branch(addr, entry->target, 0); + else + patch_instruction(addr, PPC_INST_NOP); +} diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 7ee50f0547c..9ce1672afb5 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -126,7 +126,7 @@ void __init reserve_crashkernel(void) /* We might have got these values via the command line or the * device tree, either way sanitise them now. */ - crash_size = crashk_res.end - crashk_res.start + 1; + crash_size = resource_size(&crashk_res); #ifndef CONFIG_RELOCATABLE if (crashk_res.start != KDUMP_KERNELBASE) @@ -136,12 +136,16 @@ void __init reserve_crashkernel(void) crashk_res.start = KDUMP_KERNELBASE; #else if (!crashk_res.start) { +#ifdef CONFIG_PPC64 /* - * unspecified address, choose a region of specified size - * can overlap with initrd (ignoring corruption when retained) - * ppc64 requires kernel and some stacks to be in first segemnt + * On 64bit we split the RMO in half but cap it at half of + * a small SLB (128MB) since the crash kernel needs to place + * itself and some stacks to be in the first segment. */ + crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); +#else crashk_res.start = KDUMP_KERNELBASE; +#endif } crash_base = PAGE_ALIGN(crashk_res.start); @@ -222,7 +226,7 @@ static void __init export_crashk_values(struct device_node *node) if (crashk_res.start != 0) { prom_add_property(node, &crashk_base_prop); - crashk_size = crashk_res.end - crashk_res.start + 1; + crashk_size = resource_size(&crashk_res); prom_add_property(node, &crashk_size_prop); } } diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index e89df59cdc5..616921ef143 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -339,7 +339,7 @@ _GLOBAL(real_205_writeb) #endif /* CONFIG_PPC_PASEMI */ -#ifdef CONFIG_CPU_FREQ_PMAC64 +#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE) /* * SCOM access functions for 970 (FX only for now) * @@ -408,7 +408,7 @@ _GLOBAL(scom970_write) /* restore interrupts */ mtmsrd r5,1 blr -#endif /* CONFIG_CPU_FREQ_PMAC64 */ +#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */ /* diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 49cee9df225..a1cd701b575 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -31,20 +31,6 @@ LIST_HEAD(module_bug_list); -void *module_alloc(unsigned long size) -{ - if (size == 0) - return NULL; - - return vmalloc_exec(size); -} - -/* Free memory returned from module_alloc */ -void module_free(struct module *mod, void *module_region) -{ - vfree(module_region); -} - static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, const char *name) @@ -93,7 +79,3 @@ int module_finalize(const Elf_Ehdr *hdr, return 0; } - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index f832773fc28..0b6d79617d7 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c @@ -174,17 +174,6 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, return 0; } -int apply_relocate(Elf32_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *module) -{ - printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", - module->name); - return -ENOEXEC; -} - static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) { if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 8fbb12508bf..9f44a775a10 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -243,16 +243,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr, return 0; } -int apply_relocate(Elf64_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *me) -{ - printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name); - return -ENOEXEC; -} - /* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the value maximum span in an instruction which uses a signed offset) */ diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index 845a5847889..fe21b515ca4 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c @@ -410,7 +410,7 @@ struct power_pmu mpc7450_pmu = { .cache_events = &mpc7450_cache_events, }; -static int init_mpc7450_pmu(void) +static int __init init_mpc7450_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450")) diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 24582181b6e..59dbf6abaaf 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -26,7 +26,7 @@ #include <asm/topology.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #ifdef CONFIG_PPC_OF_PLATFORM_PCI diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index efeb8818418..0a5a899846b 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -167,7 +167,7 @@ void setup_paca(struct paca_struct *new_paca) * if we do a GET_PACA() before the feature fixups have been * applied */ - if (cpu_has_feature(CPU_FTR_HVMODE_206)) + if (cpu_has_feature(CPU_FTR_HVMODE)) mtspr(SPRN_SPRG_HPACA, local_paca); #endif mtspr(SPRN_SPRG_PACA, local_paca); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index a3c92770e42..32656f10525 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -50,7 +50,7 @@ static int global_phb_number; /* Global phb counter */ resource_size_t isa_mem_base; /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ -unsigned int ppc_pci_flags = 0; +unsigned int pci_flags = 0; static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; @@ -107,7 +107,7 @@ static resource_size_t pcibios_io_size(const struct pci_controller *hose) #ifdef CONFIG_PPC64 return hose->pci_io_size; #else - return hose->io_resource.end - hose->io_resource.start + 1; + return resource_size(&hose->io_resource); #endif } @@ -842,9 +842,9 @@ int pci_proc_domain(struct pci_bus *bus) { struct pci_controller *hose = pci_bus_to_host(bus); - if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS)) + if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS)) return 0; - if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0) + if (pci_has_flag(PCI_COMPAT_DOMAIN_0)) return hose->global_number != 0; return 1; } @@ -920,13 +920,13 @@ static void __devinit pcibios_fixup_resources(struct pci_dev *dev) struct resource *res = dev->resource + i; if (!res->flags) continue; - /* On platforms that have PPC_PCI_PROBE_ONLY set, we don't + /* On platforms that have PCI_PROBE_ONLY set, we don't * consider 0 as an unassigned BAR value. It's technically * a valid value, but linux doesn't like it... so when we can * re-assign things, we do so, but if we can't, we keep it * around and hope for the best... */ - if (res->start == 0 && !(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { + if (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY)) { pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] is unassigned\n", pci_name(dev), i, (unsigned long long)res->start, @@ -973,7 +973,7 @@ static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus, int i; /* We don't do anything if PCI_PROBE_ONLY is set */ - if (ppc_pci_flags & PPC_PCI_PROBE_ONLY) + if (pci_has_flag(PCI_PROBE_ONLY)) return 0; /* Job is a bit different between memory and IO */ @@ -1143,7 +1143,7 @@ void __devinit pci_fixup_cardbus(struct pci_bus *bus) static int skip_isa_ioresource_align(struct pci_dev *dev) { - if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) && + if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) && !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA)) return 1; return 0; @@ -1271,7 +1271,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus) * and as such ensure proper re-allocation * later. */ - if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC) + if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) goto clear_resource; pr = pci_find_parent_resource(bus->self, res); if (pr == res) { @@ -1456,7 +1456,7 @@ void __init pcibios_resource_survey(void) list_for_each_entry(b, &pci_root_buses, node) pcibios_allocate_bus_resources(b); - if (!(ppc_pci_flags & PPC_PCI_REASSIGN_ALL_RSRC)) { + if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { pcibios_allocate_resources(0); pcibios_allocate_resources(1); } @@ -1465,7 +1465,7 @@ void __init pcibios_resource_survey(void) * the low IO area and the VGA memory area if they intersect the * bus available resources to avoid allocating things on top of them */ - if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { + if (!pci_has_flag(PCI_PROBE_ONLY)) { list_for_each_entry(b, &pci_root_buses, node) pcibios_reserve_legacy_regions(b); } @@ -1473,7 +1473,7 @@ void __init pcibios_resource_survey(void) /* Now, if the platform didn't decide to blindly trust the firmware, * we proceed to assigning things that were left unassigned */ - if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) { + if (!pci_has_flag(PCI_PROBE_ONLY)) { pr_debug("PCI: Assigning unassigned resources...\n"); pci_assign_unassigned_resources(); } @@ -1731,3 +1731,21 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose) if (mode == PCI_PROBE_NORMAL) hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); } + +static void fixup_hide_host_resource_fsl(struct pci_dev *dev) +{ + int i, class = dev->class >> 8; + + if ((class == PCI_CLASS_PROCESSOR_POWERPC || + class == PCI_CLASS_BRIDGE_OTHER) && + (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) && + (dev->bus->parent == NULL)) { + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = 0; + } + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 86585508e9c..bb154511db5 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -51,25 +51,6 @@ struct pci_dev *isa_bridge_pcidev; EXPORT_SYMBOL_GPL(isa_bridge_pcidev); static void -fixup_hide_host_resource_fsl(struct pci_dev *dev) -{ - int i, class = dev->class >> 8; - - if ((class == PCI_CLASS_PROCESSOR_POWERPC || - class == PCI_CLASS_BRIDGE_OTHER) && - (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) && - (dev->bus->parent == NULL)) { - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { - dev->resource[i].start = 0; - dev->resource[i].end = 0; - dev->resource[i].flags = 0; - } - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); - -static void fixup_cpc710_pci64(struct pci_dev* dev) { /* Hide the PCI64 BARs from the kernel as their content doesn't @@ -249,7 +230,7 @@ static int __init pcibios_init(void) printk(KERN_INFO "PCI: Probing PCI hardware\n"); - if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS) + if (pci_has_flag(PCI_REASSIGN_ALL_BUS)) pci_assign_all_buses = 1; /* Scan all of the recorded PCI controllers. */ diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index fc6452b6be9..ab34046752b 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -55,12 +55,12 @@ static int __init pcibios_init(void) ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; if (pci_probe_only) - ppc_pci_flags |= PPC_PCI_PROBE_ONLY; + pci_add_flags(PCI_PROBE_ONLY); /* On ppc64, we always enable PCI domains and we keep domain 0 * backward compatible in /proc for video cards */ - ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0; + pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0); /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index d05ae4204bb..564c1d8bdb5 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -154,8 +154,12 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) ((unsigned long)ptr & 7)) return -EFAULT; - if (!__get_user_inatomic(*ret, ptr)) + pagefault_disable(); + if (!__get_user_inatomic(*ret, ptr)) { + pagefault_enable(); return 0; + } + pagefault_enable(); return read_user_stack_slow(ptr, ret, 8); } @@ -166,8 +170,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) ((unsigned long)ptr & 3)) return -EFAULT; - if (!__get_user_inatomic(*ret, ptr)) + pagefault_disable(); + if (!__get_user_inatomic(*ret, ptr)) { + pagefault_enable(); return 0; + } + pagefault_enable(); return read_user_stack_slow(ptr, ret, 4); } @@ -294,11 +302,17 @@ static inline int current_is_64bit(void) */ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) { + int rc; + if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || ((unsigned long)ptr & 3)) return -EFAULT; - return __get_user_inatomic(*ret, ptr); + pagefault_disable(); + rc = __get_user_inatomic(*ret, ptr); + pagefault_enable(); + + return rc; } static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 14967de9887..10a140f82cb 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1408,7 +1408,7 @@ power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu return NOTIFY_OK; } -int register_power_pmu(struct power_pmu *pmu) +int __cpuinit register_power_pmu(struct power_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index e9dbc2d35c9..b4f1dda4d08 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c @@ -609,7 +609,7 @@ static struct power_pmu power4_pmu = { .cache_events = &power4_cache_events, }; -static int init_power4_pmu(void) +static int __init init_power4_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4")) diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index f58a2bd41b5..a8757baa28f 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c @@ -677,7 +677,7 @@ static struct power_pmu power5p_pmu = { .cache_events = &power5p_cache_events, }; -static int init_power5p_pmu(void) +static int __init init_power5p_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+") diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index b1acab68414..e7f06eb7a86 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c @@ -617,7 +617,7 @@ static struct power_pmu power5_pmu = { .cache_events = &power5_cache_events, }; -static int init_power5_pmu(void) +static int __init init_power5_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5")) diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index b24a3a23d07..03b95e2c6d6 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c @@ -540,7 +540,7 @@ static struct power_pmu power6_pmu = { .cache_events = &power6_cache_events, }; -static int init_power6_pmu(void) +static int __init init_power6_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6")) diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 6d9dccb2ea5..de83d6060dd 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c @@ -365,7 +365,7 @@ static struct power_pmu power7_pmu = { .cache_events = &power7_cache_events, }; -static int init_power7_pmu(void) +static int __init init_power7_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7")) diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index b121de9658e..8c219020696 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -489,7 +489,7 @@ static struct power_pmu ppc970_pmu = { .cache_events = &ppc970_cache_events, }; -static int init_ppc970_pmu(void) +static int __init init_ppc970_pmu(void) { if (!cur_cpu_spec->oprofile_cpu_type || (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970") diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 7d28f540200..f5ae872a2ef 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -18,7 +18,7 @@ #include <asm/cacheflush.h> #include <asm/uaccess.h> #include <asm/io.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/checksum.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 91e52df3d81..8f53954e75a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk) preempt_enable(); } } +EXPORT_SYMBOL_GPL(flush_fp_to_thread); void enable_kernel_fp(void) { @@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk) preempt_enable(); } } +EXPORT_SYMBOL_GPL(flush_altivec_to_thread); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX @@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk) preempt_enable(); } } +EXPORT_SYMBOL_GPL(flush_vsx_to_thread); #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE @@ -213,6 +216,7 @@ void flush_spe_to_thread(struct task_struct *tsk) #ifdef CONFIG_SMP BUG_ON(tsk != current); #endif + tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); giveup_spe(tsk); } preempt_enable(); @@ -650,6 +654,8 @@ void show_regs(struct pt_regs * regs) printbits(regs->msr, msr_bits); printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); trap = TRAP(regs); + if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) + printk("CFAR: "REG"\n", regs->orig_gpr3); if (trap == 0x300 || trap == 0x600) #ifdef CONFIG_PPC_ADV_DEBUG_REGS printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); @@ -831,8 +837,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ #endif - set_fs(USER_DS); - /* * If we exec out of a kernel thread then thread.regs will not be * set. Do it now. diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 8c3112a57cf..174e1e96175 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -69,6 +69,7 @@ unsigned long tce_alloc_start, tce_alloc_end; u64 ppc64_rma_size; #endif static phys_addr_t first_memblock_size; +static int __initdata boot_cpu_count; static int __init early_parse_mem(char *p) { @@ -769,6 +770,13 @@ void __init early_init_devtree(void *params) */ of_scan_flat_dt(early_init_dt_scan_cpus, NULL); +#if defined(CONFIG_SMP) && defined(CONFIG_PPC64) + /* We'll later wait for secondaries to check in; there are + * NCPUS-1 non-boot CPUs :-) + */ + spinning_secondaries = boot_cpu_count - 1; +#endif + DBG(" <- early_init_devtree()\n"); } @@ -862,16 +870,14 @@ static int prom_reconfig_notifier(struct notifier_block *nb, switch (action) { case PSERIES_RECONFIG_ADD: err = of_finish_dynamic_node(node); - if (err < 0) { + if (err < 0) printk(KERN_ERR "finish_node returned %d\n", err); - err = NOTIFY_BAD; - } break; default: - err = NOTIFY_DONE; + err = 0; break; } - return err; + return notifier_from_errno(err); } static struct notifier_block prom_reconfig_nb = { diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index c016033ba78..a909f4e9343 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1020,7 +1020,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) } if (addr == 0) return 0; - RELOC(alloc_bottom) = addr; + RELOC(alloc_bottom) = addr + size; prom_debug(" -> %x\n", addr); prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom)); @@ -1830,11 +1830,13 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, if (room > DEVTREE_CHUNK_SIZE) room = DEVTREE_CHUNK_SIZE; if (room < PAGE_SIZE) - prom_panic("No memory for flatten_device_tree (no room)"); + prom_panic("No memory for flatten_device_tree " + "(no room)\n"); chunk = alloc_up(room, 0); if (chunk == 0) - prom_panic("No memory for flatten_device_tree (claim failed)"); - *mem_end = RELOC(alloc_top); + prom_panic("No memory for flatten_device_tree " + "(claim failed)\n"); + *mem_end = chunk + room; } ret = (void *)*mem_start; @@ -2042,7 +2044,7 @@ static void __init flatten_device_tree(void) /* * Check how much room we have between alloc top & bottom (+/- a - * few pages), crop to 4Mb, as this is our "chuck" size + * few pages), crop to 1MB, as this is our "chunk" size */ room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000; if (room > DEVTREE_CHUNK_SIZE) @@ -2053,7 +2055,7 @@ static void __init flatten_device_tree(void) mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); if (mem_start == 0) prom_panic("Can't allocate initial device-tree chunk\n"); - mem_end = RELOC(alloc_top); + mem_end = mem_start + room; /* Get root of tree */ root = call_prom("peer", 1, 1, (phandle)0); diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 271ff6318ed..d5ca8236315 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -24,6 +24,7 @@ #include <linux/cpumask.h> #include <linux/memblock.h> #include <linux/slab.h> +#include <linux/reboot.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -38,7 +39,7 @@ #include <asm/udbg.h> #include <asm/syscalls.h> #include <asm/smp.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/time.h> #include <asm/mmu.h> #include <asm/topology.h> diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index bf5f5ce3a7b..e037c7494fd 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/proc_fs.h> +#include <linux/reboot.h> #include <asm/delay.h> #include <asm/uaccess.h> #include <asm/rtas.h> diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 54e66da8f74..6cd8f0196b6 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -291,7 +291,7 @@ void __init find_and_init_phbs(void) prop = of_get_property(of_chosen, "linux,pci-assign-all-buses", NULL); if (prop && *prop) - ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS; + pci_add_flags(PCI_REASSIGN_ALL_BUS); #endif /* CONFIG_PPC32 */ } } diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 67f6c3b5135..481ef064c8f 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -27,7 +27,7 @@ #include <asm/rtas.h> #include <asm/prom.h> #include <asm/nvram.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/machdep.h> diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 79fca2651b6..b1d738d1289 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -375,6 +375,9 @@ void __init check_for_initrd(void) int threads_per_core, threads_shift; cpumask_t threads_core_mask; +EXPORT_SYMBOL_GPL(threads_per_core); +EXPORT_SYMBOL_GPL(threads_shift); +EXPORT_SYMBOL_GPL(threads_core_mask); static void __init cpu_init_thread_core_maps(int tpc) { @@ -704,29 +707,14 @@ static int powerpc_debugfs_init(void) arch_initcall(powerpc_debugfs_init); #endif -static int ppc_dflt_bus_notify(struct notifier_block *nb, - unsigned long action, void *data) +void ppc_printk_progress(char *s, unsigned short hex) { - struct device *dev = data; - - /* We are only intereted in device addition */ - if (action != BUS_NOTIFY_ADD_DEVICE) - return 0; - - set_dma_ops(dev, &dma_direct_ops); - - return NOTIFY_DONE; + pr_info("%s\n", s); } -static struct notifier_block ppc_dflt_plat_bus_notifier = { - .notifier_call = ppc_dflt_bus_notify, - .priority = INT_MAX, -}; - -static int __init setup_bus_notifier(void) +void arch_setup_pdev_archdata(struct platform_device *pdev) { - bus_register_notifier(&platform_bus_type, &ppc_dflt_plat_bus_notifier); - return 0; + pdev->archdata.dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->archdata.dma_mask; + set_dma_ops(&pdev->dev, &dma_direct_ops); } - -arch_initcall(setup_bus_notifier); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 620d792b52e..209135af0a4 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -48,8 +48,8 @@ extern void bootx_init(unsigned long r4, unsigned long phys); int boot_cpuid = -1; EXPORT_SYMBOL_GPL(boot_cpuid); -int __initdata boot_cpu_count; int boot_cpuid_phys; +EXPORT_SYMBOL_GPL(boot_cpuid_phys); int smp_hw_index[NR_CPUS]; @@ -127,6 +127,8 @@ notrace void __init machine_init(unsigned long dt_ptr) /* Do some early initialization based on the flat device tree */ early_init_devtree(__va(dt_ptr)); + early_init_mmu(); + probe_machine(); setup_kdump_trampoline(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a88bf2713d4..aebef1320ed 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -63,6 +63,7 @@ #include <asm/kexec.h> #include <asm/mmu_context.h> #include <asm/code-patching.h> +#include <asm/kvm_ppc.h> #include "setup.h" @@ -73,7 +74,7 @@ #endif int boot_cpuid = 0; -int __initdata boot_cpu_count; +int __initdata spinning_secondaries; u64 ppc64_pft_size; /* Pick defaults since we might want to patch instructions @@ -253,11 +254,11 @@ void smp_release_cpus(void) for (i = 0; i < 100000; i++) { mb(); HMT_low(); - if (boot_cpu_count == 0) + if (spinning_secondaries == 0) break; udelay(1); } - DBG("boot_cpu_count = %d\n", boot_cpu_count); + DBG("spinning_secondaries = %d\n", spinning_secondaries); DBG(" <- smp_release_cpus()\n"); } @@ -580,6 +581,8 @@ void __init setup_arch(char **cmdline_p) /* Initialize the MMU context management stuff */ mmu_context_init(); + kvm_rma_init(); + ppc64_boot_msg(0x15, "Setup Done"); } diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c index 03e45c4a9ef..640de836e46 100644 --- a/arch/powerpc/kernel/smp-tbsync.c +++ b/arch/powerpc/kernel/smp-tbsync.c @@ -11,7 +11,7 @@ #include <linux/unistd.h> #include <linux/init.h> #include <linux/slab.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/smp.h> #include <asm/time.h> diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8ebc6700b98..7bf2187dfd9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -33,7 +33,7 @@ #include <linux/topology.h> #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -202,14 +202,6 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) smp_ops->cause_ipi(cpu, info->data); } -void smp_muxed_ipi_resend(void) -{ - struct cpu_messages *info = &__get_cpu_var(ipi_message); - - if (info->messages) - smp_ops->cause_ipi(smp_processor_id(), info->data); -} - irqreturn_t smp_ipi_demux(void) { struct cpu_messages *info = &__get_cpu_var(ipi_message); @@ -238,15 +230,26 @@ irqreturn_t smp_ipi_demux(void) } #endif /* CONFIG_PPC_SMP_MUXED_IPI */ +static inline void do_message_pass(int cpu, int msg) +{ + if (smp_ops->message_pass) + smp_ops->message_pass(cpu, msg); +#ifdef CONFIG_PPC_SMP_MUXED_IPI + else + smp_muxed_ipi_message_pass(cpu, msg); +#endif +} + void smp_send_reschedule(int cpu) { if (likely(smp_ops)) - smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); + do_message_pass(cpu, PPC_MSG_RESCHEDULE); } +EXPORT_SYMBOL_GPL(smp_send_reschedule); void arch_send_call_function_single_ipi(int cpu) { - smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); + do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -254,7 +257,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) unsigned int cpu; for_each_cpu(cpu, mask) - smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); + do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); } #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) @@ -268,7 +271,7 @@ void smp_send_debugger_break(void) for_each_online_cpu(cpu) if (cpu != me) - smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); + do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); } #endif @@ -303,6 +306,10 @@ struct thread_info *current_set[NR_CPUS]; static void __devinit smp_store_cpu_info(int id) { per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); +#ifdef CONFIG_PPC_FSL_BOOK3E + per_cpu(next_tlbcam_idx, id) + = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; +#endif } void __init smp_prepare_cpus(unsigned int max_cpus) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 1a0141426cd..f19d9777d3c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1387,10 +1387,7 @@ void SPEFloatingPointException(struct pt_regs *regs) int code = 0; int err; - preempt_disable(); - if (regs->msr & MSR_SPE) - giveup_spe(current); - preempt_enable(); + flush_spe_to_thread(current); spefscr = current->thread.spefscr; fpexc_mode = current->thread.fpexc_mode; diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 23d65abbedc..faa82c1f3f6 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -31,6 +31,9 @@ void __init udbg_early_init(void) #if defined(CONFIG_PPC_EARLY_DEBUG_LPAR) /* For LPAR machines that have an HVC console on vterm 0 */ udbg_init_debug_lpar(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_LPAR_HVSI) + /* For LPAR machines that have an HVSI console on vterm 0 */ + udbg_init_debug_lpar_hvsi(); #elif defined(CONFIG_PPC_EARLY_DEBUG_G5) /* For use on Apple G5 machines */ udbg_init_pmac_realmode(); @@ -68,6 +71,8 @@ void __init udbg_early_init(void) #ifdef CONFIG_PPC_EARLY_DEBUG console_loglevel = 10; + + register_early_udbg_console(); #endif } |