diff options
Diffstat (limited to 'arch/powerpc/kernel')
38 files changed, 1323 insertions, 450 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index c09138d150d..496cc5b3984 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -50,6 +50,9 @@ #endif #ifdef CONFIG_KVM #include <linux/kvm_host.h> +#ifndef CONFIG_BOOKE +#include <asm/kvm_book3s.h> +#endif #endif #ifdef CONFIG_PPC32 @@ -105,6 +108,9 @@ int main(void) DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); #endif /* CONFIG_SPE */ #endif /* CONFIG_PPC64 */ +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER + DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); +#endif DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); @@ -183,6 +189,7 @@ int main(void) #endif /* CONFIG_PPC_STD_MMU_64 */ DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); + DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); @@ -190,33 +197,9 @@ int main(void) DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); #ifdef CONFIG_KVM_BOOK3S_64_HANDLER - DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest)); - DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb)); - DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max)); - DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr)); - DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer)); - DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0])); - DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1])); - DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2])); - DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3])); - DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4])); - DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5])); - DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6])); - DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7])); - DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8])); - DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9])); - DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10])); - DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11])); - DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12])); - DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13])); - DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1)); - DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2)); - DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct, - shadow_vcpu.vmhandler)); - DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct, - shadow_vcpu.scratch0)); - DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct, - shadow_vcpu.scratch1)); + DEFINE(PACA_KVM_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); + DEFINE(SVCPU_SLB, offsetof(struct kvmppc_book3s_shadow_vcpu, slb)); + DEFINE(SVCPU_SLB_MAX, offsetof(struct kvmppc_book3s_shadow_vcpu, slb_max)); #endif #endif /* CONFIG_PPC64 */ @@ -227,8 +210,8 @@ int main(void) /* Interrupt register frame */ DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); -#ifdef CONFIG_PPC64 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); +#ifdef CONFIG_PPC64 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); @@ -411,9 +394,6 @@ int main(void) DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); - DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); - DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); @@ -421,32 +401,81 @@ int main(void) DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); - DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); - DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); - DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); - - /* book3s_64 */ -#ifdef CONFIG_PPC64 - DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); + /* book3s */ +#ifdef CONFIG_PPC_BOOK3S DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); - DEFINE(VCPU_HOST_R2, offsetof(struct kvm_vcpu, arch.host_r2)); DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); - DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); + DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) - + offsetof(struct kvmppc_vcpu_book3s, vcpu)); + DEFINE(SVCPU_CR, offsetof(struct kvmppc_book3s_shadow_vcpu, cr)); + DEFINE(SVCPU_XER, offsetof(struct kvmppc_book3s_shadow_vcpu, xer)); + DEFINE(SVCPU_CTR, offsetof(struct kvmppc_book3s_shadow_vcpu, ctr)); + DEFINE(SVCPU_LR, offsetof(struct kvmppc_book3s_shadow_vcpu, lr)); + DEFINE(SVCPU_PC, offsetof(struct kvmppc_book3s_shadow_vcpu, pc)); + DEFINE(SVCPU_R0, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[0])); + DEFINE(SVCPU_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[1])); + DEFINE(SVCPU_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[2])); + DEFINE(SVCPU_R3, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[3])); + DEFINE(SVCPU_R4, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[4])); + DEFINE(SVCPU_R5, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[5])); + DEFINE(SVCPU_R6, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[6])); + DEFINE(SVCPU_R7, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[7])); + DEFINE(SVCPU_R8, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[8])); + DEFINE(SVCPU_R9, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[9])); + DEFINE(SVCPU_R10, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[10])); + DEFINE(SVCPU_R11, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[11])); + DEFINE(SVCPU_R12, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[12])); + DEFINE(SVCPU_R13, offsetof(struct kvmppc_book3s_shadow_vcpu, gpr[13])); + DEFINE(SVCPU_HOST_R1, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r1)); + DEFINE(SVCPU_HOST_R2, offsetof(struct kvmppc_book3s_shadow_vcpu, host_r2)); + DEFINE(SVCPU_VMHANDLER, offsetof(struct kvmppc_book3s_shadow_vcpu, + vmhandler)); + DEFINE(SVCPU_SCRATCH0, offsetof(struct kvmppc_book3s_shadow_vcpu, + scratch0)); + DEFINE(SVCPU_SCRATCH1, offsetof(struct kvmppc_book3s_shadow_vcpu, + scratch1)); + DEFINE(SVCPU_IN_GUEST, offsetof(struct kvmppc_book3s_shadow_vcpu, + in_guest)); + DEFINE(SVCPU_FAULT_DSISR, offsetof(struct kvmppc_book3s_shadow_vcpu, + fault_dsisr)); + DEFINE(SVCPU_FAULT_DAR, offsetof(struct kvmppc_book3s_shadow_vcpu, + fault_dar)); + DEFINE(SVCPU_LAST_INST, offsetof(struct kvmppc_book3s_shadow_vcpu, + last_inst)); + DEFINE(SVCPU_SHADOW_SRR1, offsetof(struct kvmppc_book3s_shadow_vcpu, + shadow_srr1)); +#ifdef CONFIG_PPC_BOOK3S_32 + DEFINE(SVCPU_SR, offsetof(struct kvmppc_book3s_shadow_vcpu, sr)); +#endif #else DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); -#endif /* CONFIG_PPC64 */ + DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); + DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); + DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); + DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); + DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); + DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); +#endif /* CONFIG_PPC_BOOK3S */ #endif #ifdef CONFIG_44x DEFINE(PGD_T_LOG2, PGD_T_LOG2); DEFINE(PTE_T_LOG2, PTE_T_LOG2); #endif +#ifdef CONFIG_FSL_BOOKE + DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); + DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); + DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); + DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2)); + DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3)); + DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); +#endif #ifdef CONFIG_KVM_EXIT_TIMING DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8af4949434b..9556be903e9 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1701,6 +1701,35 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_440A, .platform = "ppc440", }, + { /* 476 core */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x11a50000, + .cpu_name = "476", + .cpu_features = CPU_FTRS_47X, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_47x | + MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, + .icache_bsize = 32, + .dcache_bsize = 128, + .machine_check = machine_check_47x, + .platform = "ppc470", + }, + { /* 476 iss */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00050000, + .cpu_name = "476", + .cpu_features = CPU_FTRS_47X, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_FPU, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_47x | + MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, + .icache_bsize = 32, + .dcache_bsize = 128, + .machine_check = machine_check_47x, + .platform = "ppc470", + }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 6f4613dd05e..8c066d6a8e4 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -162,6 +162,32 @@ static void crash_kexec_prepare_cpus(int cpu) /* Leave the IPI callback set */ } +/* wait for all the CPUs to hit real mode but timeout if they don't come in */ +static void crash_kexec_wait_realmode(int cpu) +{ + unsigned int msecs; + int i; + + msecs = 10000; + for (i=0; i < NR_CPUS && msecs > 0; i++) { + if (i == cpu) + continue; + + while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { + barrier(); + if (!cpu_possible(i)) { + break; + } + if (!cpu_online(i)) { + break; + } + msecs--; + mdelay(1); + } + } + mb(); +} + /* * This function will be called by secondary cpus or by kexec cpu * if soft-reset is activated to stop some CPUs. @@ -347,10 +373,12 @@ int crash_shutdown_unregister(crash_shutdown_t handler) EXPORT_SYMBOL(crash_shutdown_unregister); static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; +static int crash_shutdown_cpu = -1; static int handle_fault(struct pt_regs *regs) { - longjmp(crash_shutdown_buf, 1); + if (crash_shutdown_cpu == smp_processor_id()) + longjmp(crash_shutdown_buf, 1); return 0; } @@ -375,11 +403,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs) for_each_irq(i) { struct irq_desc *desc = irq_to_desc(i); + if (!desc || !desc->chip || !desc->chip->eoi) + continue; + if (desc->status & IRQ_INPROGRESS) desc->chip->eoi(i); if (!(desc->status & IRQ_DISABLED)) - desc->chip->disable(i); + desc->chip->shutdown(i); } /* @@ -388,6 +419,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) */ old_handler = __debugger_fault_handler; __debugger_fault_handler = handle_fault; + crash_shutdown_cpu = smp_processor_id(); for (i = 0; crash_shutdown_handles[i]; i++) { if (setjmp(crash_shutdown_buf) == 0) { /* @@ -401,6 +433,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) asm volatile("sync; isync"); } } + crash_shutdown_cpu = -1; __debugger_fault_handler = old_handler; /* @@ -412,6 +445,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); crash_kexec_stop_spus(); + crash_kexec_wait_realmode(crashing_cpu); if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); } diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 4ff4da2c238..e7fe218b869 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -39,8 +39,8 @@ struct dma_map_ops swiotlb_dma_ops = { .dma_supported = swiotlb_dma_supported, .map_page = swiotlb_map_page, .unmap_page = swiotlb_unmap_page, - .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, - .sync_single_range_for_device = swiotlb_sync_single_range_for_device, + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, + .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .mapping_error = swiotlb_dma_mapping_error, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 6c1df5757cd..8d1de6f31d5 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -127,11 +127,11 @@ static inline void dma_direct_sync_sg(struct device *dev, __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } -static inline void dma_direct_sync_single_range(struct device *dev, - dma_addr_t dma_handle, unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline void dma_direct_sync_single(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) { - __dma_sync(bus_to_virt(dma_handle+offset), size, direction); + __dma_sync(bus_to_virt(dma_handle), size, direction); } #endif @@ -144,8 +144,8 @@ struct dma_map_ops dma_direct_ops = { .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, #ifdef CONFIG_NOT_COHERENT_CACHE - .sync_single_range_for_cpu = dma_direct_sync_single_range, - .sync_single_range_for_device = dma_direct_sync_single_range, + .sync_single_for_cpu = dma_direct_sync_single, + .sync_single_for_device = dma_direct_sync_single, .sync_sg_for_cpu = dma_direct_sync_sg, .sync_sg_for_device = dma_direct_sync_sg, #endif diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 1175a8539e6..ed4aeb96398 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -373,11 +373,13 @@ syscall_exit_cont: bnel- load_dbcr0 #endif #ifdef CONFIG_44x +BEGIN_MMU_FTR_SECTION lis r4,icache_44x_need_flush@ha lwz r5,icache_44x_need_flush@l(r4) cmplwi cr0,r5,0 bne- 2f 1: +END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x) #endif /* CONFIG_44x */ BEGIN_FTR_SECTION lwarx r7,0,r1 @@ -848,6 +850,9 @@ resume_kernel: /* interrupts are hard-disabled at this point */ restore: #ifdef CONFIG_44x +BEGIN_MMU_FTR_SECTION + b 1f +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) lis r4,icache_44x_need_flush@ha lwz r5,icache_44x_need_flush@l(r4) cmplwi cr0,r5,0 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e3be98ffe2a..3e423fbad6b 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -735,8 +735,11 @@ _STATIC(do_hash_page) std r3,_DAR(r1) std r4,_DSISR(r1) - andis. r0,r4,0xa450 /* weird error? */ + andis. r0,r4,0xa410 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ + andis. r0,r4,DSISR_DABRMATCH@h + bne- handle_dabr_fault + BEGIN_FTR_SECTION andis. r0,r4,0x0020 /* Is it a segment table fault? */ bne- do_ste_alloc /* If so handle it */ @@ -823,6 +826,14 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) bl .raw_local_irq_restore b 11f +/* We have a data breakpoint exception - handle it */ +handle_dabr_fault: + ld r4,_DAR(r1) + ld r5,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD + bl .do_dabr + b .ret_from_except_lite + /* Here we have a page fault that hash_page can't handle. */ handle_page_fault: ENABLE_INTS diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index e025e89fe93..98c4b29a56f 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -33,6 +33,7 @@ #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/bug.h> +#include <asm/kvm_book3s_asm.h> /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ #define LOAD_BAT(n, reg, RA, RB) \ @@ -303,6 +304,7 @@ __secondary_hold_acknowledge: */ #define EXCEPTION(n, label, hdlr, xfer) \ . = n; \ + DO_KVM n; \ label: \ EXCEPTION_PROLOG; \ addi r3,r1,STACK_FRAME_OVERHEAD; \ @@ -358,6 +360,7 @@ i##n: \ * -- paulus. */ . = 0x200 + DO_KVM 0x200 mtspr SPRN_SPRG_SCRATCH0,r10 mtspr SPRN_SPRG_SCRATCH1,r11 mfcr r10 @@ -381,6 +384,7 @@ i##n: \ /* Data access exception. */ . = 0x300 + DO_KVM 0x300 DataAccess: EXCEPTION_PROLOG mfspr r10,SPRN_DSISR @@ -397,6 +401,7 @@ DataAccess: /* Instruction access exception. */ . = 0x400 + DO_KVM 0x400 InstructionAccess: EXCEPTION_PROLOG andis. r0,r9,0x4000 /* no pte found? */ @@ -413,6 +418,7 @@ InstructionAccess: /* Alignment exception */ . = 0x600 + DO_KVM 0x600 Alignment: EXCEPTION_PROLOG mfspr r4,SPRN_DAR @@ -427,6 +433,7 @@ Alignment: /* Floating-point unavailable */ . = 0x800 + DO_KVM 0x800 FPUnavailable: BEGIN_FTR_SECTION /* @@ -450,6 +457,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE) /* System call */ . = 0xc00 + DO_KVM 0xc00 SystemCall: EXCEPTION_PROLOG EXC_XFER_EE_LITE(0xc00, DoSyscall) @@ -467,9 +475,11 @@ SystemCall: * by executing an altivec instruction. */ . = 0xf00 + DO_KVM 0xf00 b PerformanceMonitor . = 0xf20 + DO_KVM 0xf20 b AltiVecUnavailable /* @@ -882,6 +892,10 @@ __secondary_start: RFI #endif /* CONFIG_SMP */ +#ifdef CONFIG_KVM_BOOK3S_HANDLER +#include "../kvm/book3s_rmhandlers.S" +#endif + /* * Those generic dummy functions are kept for CPUs not * included in CONFIG_6xx diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 711368b993f..5ab484ef06a 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -37,6 +37,7 @@ #include <asm/thread_info.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> +#include <asm/synch.h> #include "head_booke.h" @@ -69,165 +70,7 @@ _ENTRY(_start); mr r27,r7 li r24,0 /* CPU number */ -/* - * In case the firmware didn't do it, we apply some workarounds - * that are good for all 440 core variants here - */ - mfspr r3,SPRN_CCR0 - rlwinm r3,r3,0,0,27 /* disable icache prefetch */ - isync - mtspr SPRN_CCR0,r3 - isync - sync - -/* - * Set up the initial MMU state - * - * We are still executing code at the virtual address - * mappings set by the firmware for the base of RAM. - * - * We first invalidate all TLB entries but the one - * we are running from. We then load the KERNELBASE - * mappings so we can begin to use kernel addresses - * natively and so the interrupt vector locations are - * permanently pinned (necessary since Book E - * implementations always have translation enabled). - * - * TODO: Use the known TLB entry we are running from to - * determine which physical region we are located - * in. This can be used to determine where in RAM - * (on a shared CPU system) or PCI memory space - * (on a DRAMless system) we are located. - * For now, we assume a perfect world which means - * we are located at the base of DRAM (physical 0). - */ - -/* - * Search TLB for entry that we are currently using. - * Invalidate all entries but the one we are using. - */ - /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ - mfspr r3,SPRN_PID /* Get PID */ - mfmsr r4 /* Get MSR */ - andi. r4,r4,MSR_IS@l /* TS=1? */ - beq wmmucr /* If not, leave STS=0 */ - oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ -wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ - sync - - bl invstr /* Find our address */ -invstr: mflr r5 /* Make it accessible */ - tlbsx r23,0,r5 /* Find entry we are in */ - li r4,0 /* Start at TLB entry 0 */ - li r3,0 /* Set PAGEID inval value */ -1: cmpw r23,r4 /* Is this our entry? */ - beq skpinv /* If so, skip the inval */ - tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ -skpinv: addi r4,r4,1 /* Increment */ - cmpwi r4,64 /* Are we done? */ - bne 1b /* If not, repeat */ - isync /* If so, context change */ - -/* - * Configure and load pinned entry into TLB slot 63. - */ - - lis r3,PAGE_OFFSET@h - ori r3,r3,PAGE_OFFSET@l - - /* Kernel is at the base of RAM */ - li r4, 0 /* Load the kernel physical address */ - - /* Load the kernel PID = 0 */ - li r0,0 - mtspr SPRN_PID,r0 - sync - - /* Initialize MMUCR */ - li r5,0 - mtspr SPRN_MMUCR,r5 - sync - - /* pageid fields */ - clrrwi r3,r3,10 /* Mask off the effective page number */ - ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M - - /* xlat fields */ - clrrwi r4,r4,10 /* Mask off the real page number */ - /* ERPN is 0 for first 4GB page */ - - /* attrib fields */ - /* Added guarded bit to protect against speculative loads/stores */ - li r5,0 - ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) - - li r0,63 /* TLB slot 63 */ - - tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ - tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ - tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ - - /* Force context change */ - mfmsr r0 - mtspr SPRN_SRR1, r0 - lis r0,3f@h - ori r0,r0,3f@l - mtspr SPRN_SRR0,r0 - sync - rfi - - /* If necessary, invalidate original entry we used */ -3: cmpwi r23,63 - beq 4f - li r6,0 - tlbwe r6,r23,PPC44x_TLB_PAGEID - isync - -4: -#ifdef CONFIG_PPC_EARLY_DEBUG_44x - /* Add UART mapping for early debug. */ - - /* pageid fields */ - lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h - ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K - - /* xlat fields */ - lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h - ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH - - /* attrib fields */ - li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) - li r0,62 /* TLB slot 0 */ - - tlbwe r3,r0,PPC44x_TLB_PAGEID - tlbwe r4,r0,PPC44x_TLB_XLAT - tlbwe r5,r0,PPC44x_TLB_ATTRIB - - /* Force context change */ - isync -#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ - - /* Establish the interrupt vector offsets */ - SET_IVOR(0, CriticalInput); - SET_IVOR(1, MachineCheck); - SET_IVOR(2, DataStorage); - SET_IVOR(3, InstructionStorage); - SET_IVOR(4, ExternalInput); - SET_IVOR(5, Alignment); - SET_IVOR(6, Program); - SET_IVOR(7, FloatingPointUnavailable); - SET_IVOR(8, SystemCall); - SET_IVOR(9, AuxillaryProcessorUnavailable); - SET_IVOR(10, Decrementer); - SET_IVOR(11, FixedIntervalTimer); - SET_IVOR(12, WatchdogTimer); - SET_IVOR(13, DataTLBError); - SET_IVOR(14, InstructionTLBError); - SET_IVOR(15, DebugCrit); - - /* Establish the interrupt vector base */ - lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ - mtspr SPRN_IVPR,r4 + bl init_cpu_state /* * This is where the main kernel code starts. @@ -349,7 +192,7 @@ interrupt_base: #endif /* Data TLB Error Interrupt */ - START_EXCEPTION(DataTLBError) + START_EXCEPTION(DataTLBError44x) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH1, r11 mtspr SPRN_SPRG_WSCRATCH2, r12 @@ -440,7 +283,7 @@ tlb_44x_patch_hwater_D: mfspr r10,SPRN_DEAR /* Jump to common tlb load */ - b finish_tlb_load + b finish_tlb_load_44x 2: /* The bailout. Restore registers to pre-exception conditions @@ -460,7 +303,7 @@ tlb_44x_patch_hwater_D: * information from different registers and bailout * to a different point. */ - START_EXCEPTION(InstructionTLBError) + START_EXCEPTION(InstructionTLBError44x) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ mtspr SPRN_SPRG_WSCRATCH1, r11 mtspr SPRN_SPRG_WSCRATCH2, r12 @@ -536,7 +379,7 @@ tlb_44x_patch_hwater_I: mfspr r10,SPRN_SRR0 /* Jump to common TLB load point */ - b finish_tlb_load + b finish_tlb_load_44x 2: /* The bailout. Restore registers to pre-exception conditions @@ -550,15 +393,7 @@ tlb_44x_patch_hwater_I: mfspr r10, SPRN_SPRG_RSCRATCH0 b InstructionStorage - /* Debug Interrupt */ - DEBUG_CRIT_EXCEPTION - -/* - * Local functions - */ - /* - * Both the instruction and data TLB miss get to this * point to load the TLB. * r10 - EA of fault @@ -568,7 +403,7 @@ tlb_44x_patch_hwater_I: * MMUCR - loaded with proper value when we get here * Upon exit, we reload everything and RFI. */ -finish_tlb_load: +finish_tlb_load_44x: /* Combine RPN & ERPN an write WS 0 */ rlwimi r11,r12,0,0,31-PAGE_SHIFT tlbwe r11,r13,PPC44x_TLB_XLAT @@ -601,6 +436,227 @@ finish_tlb_load: mfspr r10, SPRN_SPRG_RSCRATCH0 rfi /* Force context change */ +/* TLB error interrupts for 476 + */ +#ifdef CONFIG_PPC_47x + START_EXCEPTION(DataTLBError47x) + mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ + mtspr SPRN_SPRG_WSCRATCH1,r11 + mtspr SPRN_SPRG_WSCRATCH2,r12 + mtspr SPRN_SPRG_WSCRATCH3,r13 + mfcr r11 + mtspr SPRN_SPRG_WSCRATCH4,r11 + mfspr r10,SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11,PAGE_OFFSET@h + cmplw cr0,r10,r11 + blt+ 3f + lis r11,swapper_pg_dir@h + ori r11,r11, swapper_pg_dir@l + li r12,0 /* MMUCR = 0 */ + b 4f + + /* Get the PGD for the current thread and setup MMUCR */ +3: mfspr r11,SPRN_SPRG3 + lwz r11,PGDIR(r11) + mfspr r12,SPRN_PID /* Get PID */ +4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ + + /* Mask of required permission bits. Note that while we + * do copy ESR:ST to _PAGE_RW position as trying to write + * to an RO page is pretty common, we don't do it with + * _PAGE_DIRTY. We could do it, but it's a fairly rare + * event so I'd rather take the overhead when it happens + * rather than adding an instruction here. We should measure + * whether the whole thing is worth it in the first place + * as we could avoid loading SPRN_ESR completely in the first + * place... + * + * TODO: Is it worth doing that mfspr & rlwimi in the first + * place or can we save a couple of instructions here ? + */ + mfspr r12,SPRN_ESR + li r13,_PAGE_PRESENT|_PAGE_ACCESSED + rlwimi r13,r12,10,30,30 + + /* Load the PTE */ + /* Compute pgdir/pmd offset */ + rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 + lwzx r11,r12,r11 /* Get pgd/pmd entry */ + + /* Word 0 is EPN,V,TS,DSIZ */ + li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE + rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ + li r12,0 + tlbwe r10,r12,0 + + /* XXX can we do better ? Need to make sure tlbwe has established + * latch V bit in MMUCR0 before the PTE is loaded further down */ +#ifdef CONFIG_SMP + isync +#endif + + rlwinm. r12,r11,0,0,20 /* Extract pt base address */ + /* Compute pte address */ + rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 + beq 2f /* Bail if no table */ + lwz r11,0(r12) /* Get high word of pte entry */ + + /* XXX can we do better ? maybe insert a known 0 bit from r11 into the + * bottom of r12 to create a data dependency... We can also use r10 + * as destination nowadays + */ +#ifdef CONFIG_SMP + lwsync +#endif + lwz r12,4(r12) /* Get low word of pte entry */ + + andc. r13,r13,r12 /* Check permission */ + + /* Jump to common tlb load */ + beq finish_tlb_load_47x + +2: /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11,SPRN_SPRG_RSCRATCH4 + mtcr r11 + mfspr r13,SPRN_SPRG_RSCRATCH3 + mfspr r12,SPRN_SPRG_RSCRATCH2 + mfspr r11,SPRN_SPRG_RSCRATCH1 + mfspr r10,SPRN_SPRG_RSCRATCH0 + b DataStorage + + /* Instruction TLB Error Interrupt */ + /* + * Nearly the same as above, except we get our + * information from different registers and bailout + * to a different point. + */ + START_EXCEPTION(InstructionTLBError47x) + mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ + mtspr SPRN_SPRG_WSCRATCH1,r11 + mtspr SPRN_SPRG_WSCRATCH2,r12 + mtspr SPRN_SPRG_WSCRATCH3,r13 + mfcr r11 + mtspr SPRN_SPRG_WSCRATCH4,r11 + mfspr r10,SPRN_SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11,PAGE_OFFSET@h + cmplw cr0,r10,r11 + blt+ 3f + lis r11,swapper_pg_dir@h + ori r11,r11, swapper_pg_dir@l + li r12,0 /* MMUCR = 0 */ + b 4f + + /* Get the PGD for the current thread and setup MMUCR */ +3: mfspr r11,SPRN_SPRG_THREAD + lwz r11,PGDIR(r11) + mfspr r12,SPRN_PID /* Get PID */ +4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ + + /* Make up the required permissions */ + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC + + /* Load PTE */ + /* Compute pgdir/pmd offset */ + rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 + lwzx r11,r12,r11 /* Get pgd/pmd entry */ + + /* Word 0 is EPN,V,TS,DSIZ */ + li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE + rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ + li r12,0 + tlbwe r10,r12,0 + + /* XXX can we do better ? Need to make sure tlbwe has established + * latch V bit in MMUCR0 before the PTE is loaded further down */ +#ifdef CONFIG_SMP + isync +#endif + + rlwinm. r12,r11,0,0,20 /* Extract pt base address */ + /* Compute pte address */ + rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 + beq 2f /* Bail if no table */ + + lwz r11,0(r12) /* Get high word of pte entry */ + /* XXX can we do better ? maybe insert a known 0 bit from r11 into the + * bottom of r12 to create a data dependency... We can also use r10 + * as destination nowadays + */ +#ifdef CONFIG_SMP + lwsync +#endif + lwz r12,4(r12) /* Get low word of pte entry */ + + andc. r13,r13,r12 /* Check permission */ + + /* Jump to common TLB load point */ + beq finish_tlb_load_47x + +2: /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRN_SPRG_RSCRATCH4 + mtcr r11 + mfspr r13, SPRN_SPRG_RSCRATCH3 + mfspr r12, SPRN_SPRG_RSCRATCH2 + mfspr r11, SPRN_SPRG_RSCRATCH1 + mfspr r10, SPRN_SPRG_RSCRATCH0 + b InstructionStorage + +/* + * Both the instruction and data TLB miss get to this + * point to load the TLB. + * r10 - free to use + * r11 - PTE high word value + * r12 - PTE low word value + * r13 - free to use + * MMUCR - loaded with proper value when we get here + * Upon exit, we reload everything and RFI. + */ +finish_tlb_load_47x: + /* Combine RPN & ERPN an write WS 1 */ + rlwimi r11,r12,0,0,31-PAGE_SHIFT + tlbwe r11,r13,1 + + /* And make up word 2 */ + li r10,0xf85 /* Mask to apply from PTE */ + rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ + and r11,r12,r10 /* Mask PTE bits to keep */ + andi. r10,r12,_PAGE_USER /* User page ? */ + beq 1f /* nope, leave U bits empty */ + rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ +1: tlbwe r11,r13,2 + + /* Done...restore registers and get out of here. + */ + mfspr r11, SPRN_SPRG_RSCRATCH4 + mtcr r11 + mfspr r13, SPRN_SPRG_RSCRATCH3 + mfspr r12, SPRN_SPRG_RSCRATCH2 + mfspr r11, SPRN_SPRG_RSCRATCH1 + mfspr r10, SPRN_SPRG_RSCRATCH0 + rfi + +#endif /* CONFIG_PPC_47x */ + + /* Debug Interrupt */ + /* + * This statement needs to exist at the end of the IVPR + * definition just in case you end up taking a debug + * exception within another exception. + */ + DEBUG_CRIT_EXCEPTION + /* * Global functions */ @@ -647,6 +703,428 @@ _GLOBAL(set_context) blr /* + * Init CPU state. This is called at boot time or for secondary CPUs + * to setup initial TLB entries, setup IVORs, etc... + * + */ +_GLOBAL(init_cpu_state) + mflr r22 +#ifdef CONFIG_PPC_47x + /* We use the PVR to differenciate 44x cores from 476 */ + mfspr r3,SPRN_PVR + srwi r3,r3,16 + cmplwi cr0,r3,PVR_476@h + beq head_start_47x + cmplwi cr0,r3,PVR_476_ISS@h + beq head_start_47x +#endif /* CONFIG_PPC_47x */ + +/* + * In case the firmware didn't do it, we apply some workarounds + * that are good for all 440 core variants here + */ + mfspr r3,SPRN_CCR0 + rlwinm r3,r3,0,0,27 /* disable icache prefetch */ + isync + mtspr SPRN_CCR0,r3 + isync + sync + +/* + * Set up the initial MMU state for 44x + * + * We are still executing code at the virtual address + * mappings set by the firmware for the base of RAM. + * + * We first invalidate all TLB entries but the one + * we are running from. We then load the KERNELBASE + * mappings so we can begin to use kernel addresses + * natively and so the interrupt vector locations are + * permanently pinned (necessary since Book E + * implementations always have translation enabled). + * + * TODO: Use the known TLB entry we are running from to + * determine which physical region we are located + * in. This can be used to determine where in RAM + * (on a shared CPU system) or PCI memory space + * (on a DRAMless system) we are located. + * For now, we assume a perfect world which means + * we are located at the base of DRAM (physical 0). + */ + +/* + * Search TLB for entry that we are currently using. + * Invalidate all entries but the one we are using. + */ + /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ + mfspr r3,SPRN_PID /* Get PID */ + mfmsr r4 /* Get MSR */ + andi. r4,r4,MSR_IS@l /* TS=1? */ + beq wmmucr /* If not, leave STS=0 */ + oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ +wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ + sync + + bl invstr /* Find our address */ +invstr: mflr r5 /* Make it accessible */ + tlbsx r23,0,r5 /* Find entry we are in */ + li r4,0 /* Start at TLB entry 0 */ + li r3,0 /* Set PAGEID inval value */ +1: cmpw r23,r4 /* Is this our entry? */ + beq skpinv /* If so, skip the inval */ + tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ +skpinv: addi r4,r4,1 /* Increment */ + cmpwi r4,64 /* Are we done? */ + bne 1b /* If not, repeat */ + isync /* If so, context change */ + +/* + * Configure and load pinned entry into TLB slot 63. + */ + + lis r3,PAGE_OFFSET@h + ori r3,r3,PAGE_OFFSET@l + + /* Kernel is at the base of RAM */ + li r4, 0 /* Load the kernel physical address */ + + /* Load the kernel PID = 0 */ + li r0,0 + mtspr SPRN_PID,r0 + sync + + /* Initialize MMUCR */ + li r5,0 + mtspr SPRN_MMUCR,r5 + sync + + /* pageid fields */ + clrrwi r3,r3,10 /* Mask off the effective page number */ + ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M + + /* xlat fields */ + clrrwi r4,r4,10 /* Mask off the real page number */ + /* ERPN is 0 for first 4GB page */ + + /* attrib fields */ + /* Added guarded bit to protect against speculative loads/stores */ + li r5,0 + ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) + + li r0,63 /* TLB slot 63 */ + + tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ + tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ + tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ + + /* Force context change */ + mfmsr r0 + mtspr SPRN_SRR1, r0 + lis r0,3f@h + ori r0,r0,3f@l + mtspr SPRN_SRR0,r0 + sync + rfi + + /* If necessary, invalidate original entry we used */ +3: cmpwi r23,63 + beq 4f + li r6,0 + tlbwe r6,r23,PPC44x_TLB_PAGEID + isync + +4: +#ifdef CONFIG_PPC_EARLY_DEBUG_44x + /* Add UART mapping for early debug. */ + + /* pageid fields */ + lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h + ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K + + /* xlat fields */ + lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h + ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH + + /* attrib fields */ + li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) + li r0,62 /* TLB slot 0 */ + + tlbwe r3,r0,PPC44x_TLB_PAGEID + tlbwe r4,r0,PPC44x_TLB_XLAT + tlbwe r5,r0,PPC44x_TLB_ATTRIB + + /* Force context change */ + isync +#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ + + /* Establish the interrupt vector offsets */ + SET_IVOR(0, CriticalInput); + SET_IVOR(1, MachineCheck); + SET_IVOR(2, DataStorage); + SET_IVOR(3, InstructionStorage); + SET_IVOR(4, ExternalInput); + SET_IVOR(5, Alignment); + SET_IVOR(6, Program); + SET_IVOR(7, FloatingPointUnavailable); + SET_IVOR(8, SystemCall); + SET_IVOR(9, AuxillaryProcessorUnavailable); + SET_IVOR(10, Decrementer); + SET_IVOR(11, FixedIntervalTimer); + SET_IVOR(12, WatchdogTimer); + SET_IVOR(13, DataTLBError44x); + SET_IVOR(14, InstructionTLBError44x); + SET_IVOR(15, DebugCrit); + + b head_start_common + + +#ifdef CONFIG_PPC_47x + +#ifdef CONFIG_SMP + +/* Entry point for secondary 47x processors */ +_GLOBAL(start_secondary_47x) + mr r24,r3 /* CPU number */ + + bl init_cpu_state + + /* Now we need to bolt the rest of kernel memory which + * is done in C code. We must be careful because our task + * struct or our stack can (and will probably) be out + * of reach of the initial 256M TLB entry, so we use a + * small temporary stack in .bss for that. This works + * because only one CPU at a time can be in this code + */ + lis r1,temp_boot_stack@h + ori r1,r1,temp_boot_stack@l + addi r1,r1,1024-STACK_FRAME_OVERHEAD + li r0,0 + stw r0,0(r1) + bl mmu_init_secondary + + /* Now we can get our task struct and real stack pointer */ + + /* Get current_thread_info and current */ + lis r1,secondary_ti@ha + lwz r1,secondary_ti@l(r1) + lwz r2,TI_TASK(r1) + + /* Current stack pointer */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r0,0 + stw r0,0(r1) + + /* Kernel stack for exception entry in SPRG3 */ + addi r4,r2,THREAD /* init task's THREAD */ + mtspr SPRN_SPRG3,r4 + + b start_secondary + +#endif /* CONFIG_SMP */ + +/* + * Set up the initial MMU state for 44x + * + * We are still executing code at the virtual address + * mappings set by the firmware for the base of RAM. + */ + +head_start_47x: + /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ + mfspr r3,SPRN_PID /* Get PID */ + mfmsr r4 /* Get MSR */ + andi. r4,r4,MSR_IS@l /* TS=1? */ + beq 1f /* If not, leave STS=0 */ + oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */ +1: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ + sync + + /* Find the entry we are running from */ + bl 1f +1: mflr r23 + tlbsx r23,0,r23 + tlbre r24,r23,0 + tlbre r25,r23,1 + tlbre r26,r23,2 + +/* + * Cleanup time + */ + + /* Initialize MMUCR */ + li r5,0 + mtspr SPRN_MMUCR,r5 + sync + +clear_all_utlb_entries: + + #; Set initial values. + + addis r3,0,0x8000 + addi r4,0,0 + addi r5,0,0 + b clear_utlb_entry + + #; Align the loop to speed things up. + + .align 6 + +clear_utlb_entry: + + tlbwe r4,r3,0 + tlbwe r5,r3,1 + tlbwe r5,r3,2 + addis r3,r3,0x2000 + cmpwi r3,0 + bne clear_utlb_entry + addis r3,0,0x8000 + addis r4,r4,0x100 + cmpwi r4,0 + bne clear_utlb_entry + + #; Restore original entry. + + oris r23,r23,0x8000 /* specify the way */ + tlbwe r24,r23,0 + tlbwe r25,r23,1 + tlbwe r26,r23,2 + +/* + * Configure and load pinned entry into TLB for the kernel core + */ + + lis r3,PAGE_OFFSET@h + ori r3,r3,PAGE_OFFSET@l + + /* Kernel is at the base of RAM */ + li r4, 0 /* Load the kernel physical address */ + + /* Load the kernel PID = 0 */ + li r0,0 + mtspr SPRN_PID,r0 + sync + + /* Word 0 */ + clrrwi r3,r3,12 /* Mask off the effective page number */ + ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M + + /* Word 1 */ + clrrwi r4,r4,12 /* Mask off the real page number */ + /* ERPN is 0 for first 4GB page */ + /* Word 2 */ + li r5,0 + ori r5,r5,PPC47x_TLB2_S_RWX +#ifdef CONFIG_SMP + ori r5,r5,PPC47x_TLB2_M +#endif + + /* We write to way 0 and bolted 0 */ + lis r0,0x8800 + tlbwe r3,r0,0 + tlbwe r4,r0,1 + tlbwe r5,r0,2 + +/* + * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix + * them up later + */ + LOAD_REG_IMMEDIATE(r3, 0x9abcdef0) + mtspr SPRN_SSPCR,r3 + mtspr SPRN_USPCR,r3 + LOAD_REG_IMMEDIATE(r3, 0x12345670) + mtspr SPRN_ISPCR,r3 + + /* Force context change */ + mfmsr r0 + mtspr SPRN_SRR1, r0 + lis r0,3f@h + ori r0,r0,3f@l + mtspr SPRN_SRR0,r0 + sync + rfi + + /* Invalidate original entry we used */ +3: + rlwinm r24,r24,0,21,19 /* clear the "valid" bit */ + tlbwe r24,r23,0 + addi r24,0,0 + tlbwe r24,r23,1 + tlbwe r24,r23,2 + isync /* Clear out the shadow TLB entries */ + +#ifdef CONFIG_PPC_EARLY_DEBUG_44x + /* Add UART mapping for early debug. */ + + /* Word 0 */ + lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h + ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M + + /* Word 1 */ + lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h + ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH + + /* Word 2 */ + li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG) + + /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same + * congruence class as the kernel, we need to make sure of it at + * some point + */ + lis r0,0x8d00 + tlbwe r3,r0,0 + tlbwe r4,r0,1 + tlbwe r5,r0,2 + + /* Force context change */ + isync +#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ + + /* Establish the interrupt vector offsets */ + SET_IVOR(0, CriticalInput); + SET_IVOR(1, MachineCheckA); + SET_IVOR(2, DataStorage); + SET_IVOR(3, InstructionStorage); + SET_IVOR(4, ExternalInput); + SET_IVOR(5, Alignment); + SET_IVOR(6, Program); + SET_IVOR(7, FloatingPointUnavailable); + SET_IVOR(8, SystemCall); + SET_IVOR(9, AuxillaryProcessorUnavailable); + SET_IVOR(10, Decrementer); + SET_IVOR(11, FixedIntervalTimer); + SET_IVOR(12, WatchdogTimer); + SET_IVOR(13, DataTLBError47x); + SET_IVOR(14, InstructionTLBError47x); + SET_IVOR(15, DebugCrit); + + /* We configure icbi to invalidate 128 bytes at a time since the + * current 32-bit kernel code isn't too happy with icache != dcache + * block size + */ + mfspr r3,SPRN_CCR0 + oris r3,r3,0x0020 + mtspr SPRN_CCR0,r3 + isync + +#endif /* CONFIG_PPC_47x */ + +/* + * Here we are back to code that is common between 44x and 47x + * + * We proceed to further kernel initialization and return to the + * main kernel entry + */ +head_start_common: + /* Establish the interrupt vector base */ + lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ + mtspr SPRN_IVPR,r4 + + addis r22,r22,KERNELBASE@h + mtlr r22 + isync + blr + +/* * We put a few things here that have to be page-aligned. This stuff * goes at the beginning of the data segment, which is page-aligned. */ @@ -671,3 +1149,9 @@ swapper_pg_dir: */ abatron_pteptrs: .space 8 + +#ifdef CONFIG_SMP + .align 12 +temp_boot_stack: + .space 1024 +#endif /* CONFIG_SMP */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index bed9a29ee38..844a44b6447 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -37,7 +37,7 @@ #include <asm/firmware.h> #include <asm/page_64.h> #include <asm/irqflags.h> -#include <asm/kvm_book3s_64_asm.h> +#include <asm/kvm_book3s_asm.h> /* The physical memory is layed out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow @@ -169,7 +169,7 @@ exception_marker: /* KVM trampoline code needs to be close to the interrupt handlers */ #ifdef CONFIG_KVM_BOOK3S_64_HANDLER -#include "../kvm/book3s_64_rmhandlers.S" +#include "../kvm/book3s_rmhandlers.S" #endif _GLOBAL(generic_secondary_thread_init) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 3ef743fa5d7..1f1a04b5c2a 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -71,9 +71,6 @@ _ENTRY(_start); * in the first level table, but that would require many changes to the * Linux page directory/table functions that I don't want to do right now. * - * I used to use SPRG2 for a temporary register in the TLB handler, but it - * has since been put to other uses. I now use a hack to save a register - * and the CCR at memory location 0.....Someday I'll fix this..... * -- Dan */ .globl __start @@ -302,8 +299,13 @@ InstructionTLBMiss: DO_8xx_CPU6(0x3f80, r3) mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ mfcr r10 +#ifdef CONFIG_8xx_CPU6 stw r10, 0(r0) stw r11, 4(r0) +#else + mtspr SPRN_DAR, r10 + mtspr SPRN_SPRG2, r11 +#endif mfspr r10, SPRN_SRR0 /* Get effective address of fault */ #ifdef CONFIG_8xx_CPU15 addi r11, r10, 0x1000 @@ -318,12 +320,16 @@ InstructionTLBMiss: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ +#ifdef CONFIG_MODULES + /* Only modules will cause ITLB Misses as we always + * pin the first 8MB of kernel memory */ andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ beq 3f lis r11, swapper_pg_dir@h ori r11, r11, swapper_pg_dir@l rlwimi r10, r11, 0, 2, 19 3: +#endif lwz r11, 0(r10) /* Get the level 1 entry */ rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ beq 2f /* If zero, don't try to find a pte */ @@ -339,31 +345,35 @@ InstructionTLBMiss: mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ lwz r10, 0(r11) /* Get the pte */ +#ifdef CONFIG_SWAP andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT bne- cr0, 2f - - /* Clear PP lsb, 0x400 */ - rlwinm r10, r10, 0, 22, 20 - +#endif /* The Linux PTE won't go exactly into the MMU TLB. - * Software indicator bits 22 and 28 must be clear. + * Software indicator bits 21 and 28 must be clear. * Software indicator bits 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior * of the MMU. */ li r11, 0x00f0 - rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ + rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */ DO_8xx_CPU6(0x2d80, r3) mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ - mfspr r10, SPRN_M_TW /* Restore registers */ + /* Restore registers */ +#ifndef CONFIG_8xx_CPU6 + mfspr r10, SPRN_DAR + mtcr r10 + mtspr SPRN_DAR, r11 /* Tag DAR */ + mfspr r11, SPRN_SPRG2 +#else lwz r11, 0(r0) mtcr r11 lwz r11, 4(r0) -#ifdef CONFIG_8xx_CPU6 lwz r3, 8(r0) #endif + mfspr r10, SPRN_M_TW rfi 2: mfspr r11, SPRN_SRR1 @@ -373,13 +383,20 @@ InstructionTLBMiss: rlwinm r11, r11, 0, 0xffff mtspr SPRN_SRR1, r11 - mfspr r10, SPRN_M_TW /* Restore registers */ + /* Restore registers */ +#ifndef CONFIG_8xx_CPU6 + mfspr r10, SPRN_DAR + mtcr r10 + li r11, 0x00f0 + mtspr SPRN_DAR, r11 /* Tag DAR */ + mfspr r11, SPRN_SPRG2 +#else lwz r11, 0(r0) mtcr r11 lwz r11, 4(r0) -#ifdef CONFIG_8xx_CPU6 lwz r3, 8(r0) #endif + mfspr r10, SPRN_M_TW b InstructionAccess . = 0x1200 @@ -390,8 +407,13 @@ DataStoreTLBMiss: DO_8xx_CPU6(0x3f80, r3) mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ mfcr r10 +#ifdef CONFIG_8xx_CPU6 stw r10, 0(r0) stw r11, 4(r0) +#else + mtspr SPRN_DAR, r10 + mtspr SPRN_SPRG2, r11 +#endif mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ /* If we are faulting a kernel address, we have to use the @@ -438,15 +460,14 @@ DataStoreTLBMiss: * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); * r10 = (r10 & ~PRESENT) | r11; */ +#ifdef CONFIG_SWAP rlwinm r11, r10, 32-5, _PAGE_PRESENT and r11, r11, r10 rlwimi r10, r11, 0, _PAGE_PRESENT - +#endif /* Honour kernel RO, User NA */ /* 0x200 == Extended encoding, bit 22 */ - /* r11 = (r10 & _PAGE_USER) >> 2 */ - rlwinm r11, r10, 32-2, 0x200 - or r10, r11, r10 + rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */ /* r11 = (r10 & _PAGE_RW) >> 1 */ rlwinm r11, r10, 32-1, 0x200 or r10, r11, r10 @@ -460,18 +481,24 @@ DataStoreTLBMiss: * of the MMU. */ 2: li r11, 0x00f0 - mtspr SPRN_DAR,r11 /* Tag DAR */ rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ DO_8xx_CPU6(0x3d80, r3) mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ - mfspr r10, SPRN_M_TW /* Restore registers */ + /* Restore registers */ +#ifndef CONFIG_8xx_CPU6 + mfspr r10, SPRN_DAR + mtcr r10 + mtspr SPRN_DAR, r11 /* Tag DAR */ + mfspr r11, SPRN_SPRG2 +#else + mtspr SPRN_DAR, r11 /* Tag DAR */ lwz r11, 0(r0) mtcr r11 lwz r11, 4(r0) -#ifdef CONFIG_8xx_CPU6 lwz r3, 8(r0) #endif + mfspr r10, SPRN_M_TW rfi /* This is an instruction TLB error on the MPC8xx. This could be due @@ -683,9 +710,6 @@ start_here: tophys(r4,r2) addi r4,r4,THREAD /* init task's THREAD */ mtspr SPRN_SPRG_THREAD,r4 - li r3,0 - /* XXX What is that for ? SPRG2 appears otherwise unused on 8xx */ - mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */ /* stack */ lis r1,init_thread_union@ha diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 50504ae39cb..a0bf158c8b4 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -1,6 +1,7 @@ #ifndef __HEAD_BOOKE_H__ #define __HEAD_BOOKE_H__ +#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */ /* * Macros used for common Book-e exception handling */ @@ -48,6 +49,9 @@ stw r10,0(r11); \ rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ stw r0,GPR0(r11); \ + lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \ + addi r10, r10, STACK_FRAME_REGS_MARKER@l; \ + stw r10, 8(r11); \ SAVE_4GPRS(3, r11); \ SAVE_2GPRS(7, r11) diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 72552654799..edd4a57fd29 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -639,6 +639,13 @@ interrupt_base: rlwinm r12,r12,0,16,1 mtspr SPRN_MAS1,r12 + /* Make up the required permissions for kernel code */ +#ifdef CONFIG_PTE_64BIT + li r13,_PAGE_PRESENT | _PAGE_BAP_SX + oris r13,r13,_PAGE_ACCESSED@h +#else + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC +#endif b 4f /* Get the PGD for the current thread */ @@ -646,15 +653,15 @@ interrupt_base: mfspr r11,SPRN_SPRG_THREAD lwz r11,PGDIR(r11) -4: - /* Make up the required permissions */ + /* Make up the required permissions for user code */ #ifdef CONFIG_PTE_64BIT - li r13,_PAGE_PRESENT | _PAGE_EXEC + li r13,_PAGE_PRESENT | _PAGE_BAP_UX oris r13,r13,_PAGE_ACCESSED@h #else li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC #endif +4: FIND_PTE andc. r13,r13,r11 /* Check permission */ diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 71cf280da18..21266abfbda 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -140,14 +140,14 @@ static struct dma_map_ops ibmebus_dma_ops = { static int ibmebus_match_path(struct device *dev, void *data) { - struct device_node *dn = to_of_device(dev)->node; + struct device_node *dn = to_of_device(dev)->dev.of_node; return (dn->full_name && (strcasecmp((char *)data, dn->full_name) == 0)); } static int ibmebus_match_node(struct device *dev, void *data) { - return to_of_device(dev)->node == data; + return to_of_device(dev)->dev.of_node == data; } static int ibmebus_create_device(struct device_node *dn) @@ -202,7 +202,7 @@ static int ibmebus_create_devices(const struct of_device_id *matches) int ibmebus_register_driver(struct of_platform_driver *drv) { /* If the driver uses devices that ibmebus doesn't know, add them */ - ibmebus_create_devices(drv->match_table); + ibmebus_create_devices(drv->driver.of_match_table); return of_register_driver(drv, &ibmebus_bus_type); } diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index ec94f906ea4..d5839179ec7 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -43,20 +43,9 @@ #define DBG(...) static int novmerge; -static int protect4gb = 1; static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); -static int __init setup_protect4gb(char *str) -{ - if (strcmp(str, "on") == 0) - protect4gb = 1; - else if (strcmp(str, "off") == 0) - protect4gb = 0; - - return 1; -} - static int __init setup_iommu(char *str) { if (!strcmp(str, "novmerge")) @@ -66,7 +55,6 @@ static int __init setup_iommu(char *str) return 1; } -__setup("protect4gb=", setup_protect4gb); __setup("iommu=", setup_iommu); static unsigned long iommu_range_alloc(struct device *dev, diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 066bd31551d..30817d9b20c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -284,30 +284,33 @@ u64 arch_irq_stat_cpu(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU -void fixup_irqs(cpumask_t map) +void fixup_irqs(const struct cpumask *map) { struct irq_desc *desc; unsigned int irq; static int warned; + cpumask_var_t mask; - for_each_irq(irq) { - cpumask_t mask; + alloc_cpumask_var(&mask, GFP_KERNEL); + for_each_irq(irq) { desc = irq_to_desc(irq); if (desc && desc->status & IRQ_PER_CPU) continue; - cpumask_and(&mask, desc->affinity, &map); - if (any_online_cpu(mask) == NR_CPUS) { + cpumask_and(mask, desc->affinity, map); + if (cpumask_any(mask) >= nr_cpu_ids) { printk("Breaking affinity for irq %i\n", irq); - mask = map; + cpumask_copy(mask, map); } if (desc->chip->set_affinity) - desc->chip->set_affinity(irq, &mask); + desc->chip->set_affinity(irq, mask); else if (desc->action && !(warned++)) printk("Cannot set affinity for irq %i\n", irq); } + free_cpumask_var(mask); + local_irq_enable(); mdelay(1); local_irq_disable(); diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 41bada0298c..82a7b228c81 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -20,6 +20,7 @@ #include <linux/smp.h> #include <linux/signal.h> #include <linux/ptrace.h> +#include <linux/kdebug.h> #include <asm/current.h> #include <asm/processor.h> #include <asm/machdep.h> @@ -115,7 +116,8 @@ void kgdb_roundup_cpus(unsigned long flags) /* KGDB functions to use existing PowerPC64 hooks. */ static int kgdb_debugger(struct pt_regs *regs) { - return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs); + return !kgdb_handle_exception(1, computeSignal(TRAP(regs)), + DIE_OOPS, regs); } static int kgdb_handle_breakpoint(struct pt_regs *regs) @@ -123,7 +125,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) if (user_mode(regs)) return 0; - if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0) + if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) return 0; if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) @@ -309,6 +311,11 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); } +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + regs->nip = pc; +} + /* * This function does PowerPC specific procesing for interfacing to gdb. */ diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index b36f074524a..c533525ca56 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -114,6 +114,9 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) #ifdef CONFIG_PPC_ADV_DEBUG_REGS regs->msr &= ~MSR_CE; mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); +#ifdef CONFIG_PPC_47x + isync(); +#endif #endif /* diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index c2c70e1b32c..50362b6ef6e 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -38,7 +38,7 @@ #include <asm/vio.h> #include <asm/mmu.h> -#define MODULE_VERS "1.8" +#define MODULE_VERS "1.9" #define MODULE_NAME "lparcfg" /* #define LPARCFG_DEBUG */ @@ -487,6 +487,14 @@ static void splpar_dispatch_data(struct seq_file *m) seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions); } +static void parse_em_data(struct seq_file *m) +{ + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) + seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); +} + static int pseries_lparcfg_data(struct seq_file *m, void *v) { int partition_potential_processors; @@ -541,6 +549,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) seq_printf(m, "slb_size=%d\n", mmu_slb_size); + parse_em_data(m); + return 0; } diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 040bd1de8d9..26f9900f773 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -155,33 +155,38 @@ void kexec_copy_flush(struct kimage *image) #ifdef CONFIG_SMP -/* FIXME: we should schedule this function to be called on all cpus based - * on calling the interrupts, but we would like to call it off irq level - * so that the interrupt controller is clean. - */ +static int kexec_all_irq_disabled = 0; + static void kexec_smp_down(void *arg) { + local_irq_disable(); + mb(); /* make sure our irqs are disabled before we say they are */ + get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; + while(kexec_all_irq_disabled == 0) + cpu_relax(); + mb(); /* make sure all irqs are disabled before this */ + /* + * Now every CPU has IRQs off, we can clear out any pending + * IPIs and be sure that no more will come in after this. + */ if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 1); - local_irq_disable(); kexec_smp_wait(); /* NOTREACHED */ } -static void kexec_prepare_cpus(void) +static void kexec_prepare_cpus_wait(int wait_state) { int my_cpu, i, notified=-1; - smp_call_function(kexec_smp_down, NULL, /* wait */0); my_cpu = get_cpu(); - - /* check the others cpus are now down (via paca hw cpu id == -1) */ + /* Make sure each CPU has atleast made it to the state we need */ for (i=0; i < NR_CPUS; i++) { if (i == my_cpu) continue; - while (paca[i].hw_cpu_id != -1) { + while (paca[i].kexec_state < wait_state) { barrier(); if (!cpu_possible(i)) { printk("kexec: cpu %d hw_cpu_id %d is not" @@ -201,20 +206,35 @@ static void kexec_prepare_cpus(void) } if (i != notified) { printk( "kexec: waiting for cpu %d (physical" - " %d) to go down\n", - i, paca[i].hw_cpu_id); + " %d) to enter %i state\n", + i, paca[i].hw_cpu_id, wait_state); notified = i; } } } + mb(); +} + +static void kexec_prepare_cpus(void) +{ + + smp_call_function(kexec_smp_down, NULL, /* wait */0); + local_irq_disable(); + mb(); /* make sure IRQs are disabled before we say they are */ + get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF; + + kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF); + /* we are sure every CPU has IRQs off at this point */ + kexec_all_irq_disabled = 1; /* after we tell the others to go down */ if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 0); - put_cpu(); + /* Before removing MMU mapings make sure all CPUs have entered real mode */ + kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE); - local_irq_disable(); + put_cpu(); } #else /* ! SMP */ diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 8649f536f8d..8043d1b73cf 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) addi r3,r3,L1_CACHE_BYTES bdnz 0b sync -#ifndef CONFIG_44x +#ifdef CONFIG_44x /* We don't flush the icache on 44x. Those have a virtual icache * and we don't have access to the virtual address here (it's * not the page vaddr but where it's mapped in user space). The @@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) * a change in the address space occurs, before returning to * user space */ +BEGIN_MMU_FTR_SECTION + blr +END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x) +#endif /* CONFIG_44x */ mtctr r4 1: icbi 0,r6 addi r6,r6,L1_CACHE_BYTES bdnz 1b sync isync -#endif /* CONFIG_44x */ blr +#ifndef CONFIG_BOOKE /* * Flush a particular page from the data cache to RAM, identified * by its physical address. We turn off the MMU so we can just use @@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) mtmsr r10 /* restore DR */ isync blr +#endif /* CONFIG_BOOKE */ /* * Clear pages using the dcbz instruction, which doesn't cause any diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index a5cf9c1356a..a2b18dffa03 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -24,6 +24,7 @@ #include <asm/asm-offsets.h> #include <asm/cputable.h> #include <asm/thread_info.h> +#include <asm/kexec.h> .text @@ -471,6 +472,10 @@ _GLOBAL(kexec_wait) 1: mflr r5 addi r5,r5,kexec_flag-1b + li r4,KEXEC_STATE_REAL_MODE + stb r4,PACAKEXECSTATE(r13) + SYNC + 99: HMT_LOW #ifdef CONFIG_KEXEC /* use no memory without kexec */ lwz r4,0(r5) @@ -494,14 +499,11 @@ kexec_flag: * note: this is a terminal routine, it does not save lr * * get phys id from paca - * set paca id to -1 to say we got here * switch to real mode * join other cpus in kexec_wait(phys_id) */ _GLOBAL(kexec_smp_wait) lhz r3,PACAHWCPUID(r13) - li r4,-1 - sth r4,PACAHWCPUID(r13) /* let others know we left */ bl real_mode b .kexec_wait diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c index a359cb08e90..df78e0236a0 100644 --- a/arch/powerpc/kernel/of_device.c +++ b/arch/powerpc/kernel/of_device.c @@ -13,7 +13,7 @@ static void of_device_make_bus_id(struct of_device *dev) { static atomic_t bus_no_reg_magic; - struct device_node *node = dev->node; + struct device_node *node = dev->dev.of_node; const u32 *reg; u64 addr; int magic; @@ -69,11 +69,10 @@ struct of_device *of_device_alloc(struct device_node *np, if (!dev) return NULL; - dev->node = of_node_get(np); - dev->dev.dma_mask = &dev->dma_mask; + dev->dev.of_node = of_node_get(np); + dev->dev.dma_mask = &dev->archdata.dma_mask; dev->dev.parent = parent; dev->dev.release = of_release_dev; - dev->dev.archdata.of_node = np; if (bus_id) dev_set_name(&dev->dev, "%s", bus_id); @@ -95,17 +94,17 @@ int of_device_uevent(struct device *dev, struct kobj_uevent_env *env) ofdev = to_of_device(dev); - if (add_uevent_var(env, "OF_NAME=%s", ofdev->node->name)) + if (add_uevent_var(env, "OF_NAME=%s", ofdev->dev.of_node->name)) return -ENOMEM; - if (add_uevent_var(env, "OF_TYPE=%s", ofdev->node->type)) + if (add_uevent_var(env, "OF_TYPE=%s", ofdev->dev.of_node->type)) return -ENOMEM; /* Since the compatible field can contain pretty much anything * it's not really legal to split it out with commas. We split it * up using a number of environment variables instead. */ - compat = of_get_property(ofdev->node, "compatible", &cplen); + compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen); while (compat && *compat && cplen > 0) { if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat)) return -ENOMEM; diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index 6c1dfc3ff8b..487a98851ba 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -74,7 +74,7 @@ struct of_device* of_platform_device_create(struct device_node *np, if (!dev) return NULL; - dev->dma_mask = 0xffffffffUL; + dev->archdata.dma_mask = 0xffffffffUL; dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); dev->dev.bus = &of_platform_bus_type; @@ -195,7 +195,7 @@ EXPORT_SYMBOL(of_platform_bus_probe); static int of_dev_node_match(struct device *dev, void *data) { - return to_of_device(dev)->node == data; + return to_of_device(dev)->dev.of_node == data; } struct of_device *of_find_device_by_node(struct device_node *np) @@ -213,7 +213,7 @@ EXPORT_SYMBOL(of_find_device_by_node); static int of_dev_phandle_match(struct device *dev, void *data) { phandle *ph = data; - return to_of_device(dev)->node->phandle == *ph; + return to_of_device(dev)->dev.of_node->phandle == *ph; } struct of_device *of_find_device_by_phandle(phandle ph) @@ -246,10 +246,10 @@ static int __devinit of_pci_phb_probe(struct of_device *dev, if (ppc_md.pci_setup_phb == NULL) return -ENODEV; - printk(KERN_INFO "Setting up PCI bus %s\n", dev->node->full_name); + pr_info("Setting up PCI bus %s\n", dev->dev.of_node->full_name); /* Alloc and setup PHB data structure */ - phb = pcibios_alloc_controller(dev->node); + phb = pcibios_alloc_controller(dev->dev.of_node); if (!phb) return -ENODEV; @@ -263,19 +263,19 @@ static int __devinit of_pci_phb_probe(struct of_device *dev, } /* Process "ranges" property */ - pci_process_bridge_OF_ranges(phb, dev->node, 0); + pci_process_bridge_OF_ranges(phb, dev->dev.of_node, 0); /* Init pci_dn data structures */ pci_devs_phb_init_dynamic(phb); /* Register devices with EEH */ #ifdef CONFIG_EEH - if (dev->node->child) - eeh_add_device_tree_early(dev->node); + if (dev->dev.of_node->child) + eeh_add_device_tree_early(dev->dev.of_node); #endif /* CONFIG_EEH */ /* Scan the bus */ - pcibios_scan_phb(phb, dev->node); + pcibios_scan_phb(phb, dev->dev.of_node); if (phb->bus == NULL) return -ENXIO; @@ -306,10 +306,11 @@ static struct of_device_id of_pci_phb_ids[] = { }; static struct of_platform_driver of_pci_phb_driver = { - .match_table = of_pci_phb_ids, .probe = of_pci_phb_probe, .driver = { .name = "of-pci", + .owner = THIS_MODULE, + .of_match_table = of_pci_phb_ids, }, }; diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 0c40c6f476f..f88acf0218d 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -18,6 +18,7 @@ #include <asm/pgtable.h> #include <asm/iseries/lpar_map.h> #include <asm/iseries/hv_types.h> +#include <asm/kexec.h> /* This symbol is provided by the linker - let it fill in the paca * field correctly */ @@ -97,6 +98,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) new_paca->kernelbase = (unsigned long) _stext; new_paca->kernel_msr = MSR_KERNEL; new_paca->hw_cpu_id = 0xffff; + new_paca->kexec_state = KEXEC_STATE_NONE; new_paca->__current = &init_task; #ifdef CONFIG_PPC_STD_MMU_64 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 0c0567e5840..6646005dffb 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -1097,8 +1097,8 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) if (dev->is_added) continue; - /* Setup OF node pointer in archdata */ - sd->of_node = pci_device_to_OF_node(dev); + /* Setup OF node pointer in the device */ + dev->dev.of_node = pci_device_to_OF_node(dev); /* Fixup NUMA node as it may not be setup yet by the generic * code and is needed by the DMA init diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index cd11d5ca80d..6ddb795f83e 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -310,6 +310,8 @@ static void __devinit __of_scan_bus(struct device_node *node, /* Scan direct children */ for_each_child_of_node(node, child) { pr_debug(" * %s\n", child->full_name); + if (!of_device_is_available(child)) + continue; reg = of_get_property(child, "reg", ®len); if (reg == NULL || reglen < 20) continue; diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index ab3e392ac63..bc9f39d2598 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -101,6 +101,10 @@ EXPORT_SYMBOL(pci_dram_offset); EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(kernel_thread); +#ifndef CONFIG_BOOKE +EXPORT_SYMBOL_GPL(cvt_df); +EXPORT_SYMBOL_GPL(cvt_fd); +#endif EXPORT_SYMBOL(giveup_fpu); #ifdef CONFIG_ALTIVEC EXPORT_SYMBOL(giveup_altivec); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index e4d71ced97e..9d255b4f0a0 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -371,6 +371,9 @@ int set_dabr(unsigned long dabr) /* XXX should we have a CPU_FTR_HAS_DABR ? */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS mtspr(SPRN_DAC1, dabr); +#ifdef CONFIG_PPC_47x + isync(); +#endif #elif defined(CONFIG_PPC_BOOK3S) mtspr(SPRN_DABR, dabr); #endif diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index ed2cfe17d25..7a0c0199ea2 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -39,6 +39,109 @@ #include <asm/system.h> /* + * The parameter save area on the stack is used to store arguments being passed + * to callee function and is located at fixed offset from stack pointer. + */ +#ifdef CONFIG_PPC32 +#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */ +#else /* CONFIG_PPC32 */ +#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */ +#endif + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define STR(s) #s /* convert to string */ +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define GPR_OFFSET_NAME(num) \ + {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { + GPR_OFFSET_NAME(0), + GPR_OFFSET_NAME(1), + GPR_OFFSET_NAME(2), + GPR_OFFSET_NAME(3), + GPR_OFFSET_NAME(4), + GPR_OFFSET_NAME(5), + GPR_OFFSET_NAME(6), + GPR_OFFSET_NAME(7), + GPR_OFFSET_NAME(8), + GPR_OFFSET_NAME(9), + GPR_OFFSET_NAME(10), + GPR_OFFSET_NAME(11), + GPR_OFFSET_NAME(12), + GPR_OFFSET_NAME(13), + GPR_OFFSET_NAME(14), + GPR_OFFSET_NAME(15), + GPR_OFFSET_NAME(16), + GPR_OFFSET_NAME(17), + GPR_OFFSET_NAME(18), + GPR_OFFSET_NAME(19), + GPR_OFFSET_NAME(20), + GPR_OFFSET_NAME(21), + GPR_OFFSET_NAME(22), + GPR_OFFSET_NAME(23), + GPR_OFFSET_NAME(24), + GPR_OFFSET_NAME(25), + GPR_OFFSET_NAME(26), + GPR_OFFSET_NAME(27), + GPR_OFFSET_NAME(28), + GPR_OFFSET_NAME(29), + GPR_OFFSET_NAME(30), + GPR_OFFSET_NAME(31), + REG_OFFSET_NAME(nip), + REG_OFFSET_NAME(msr), + REG_OFFSET_NAME(ctr), + REG_OFFSET_NAME(link), + REG_OFFSET_NAME(xer), + REG_OFFSET_NAME(ccr), +#ifdef CONFIG_PPC64 + REG_OFFSET_NAME(softe), +#else + REG_OFFSET_NAME(mq), +#endif + REG_OFFSET_NAME(trap), + REG_OFFSET_NAME(dar), + REG_OFFSET_NAME(dsisr), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_query_register_name() - query register name from its offset + * @offset: the offset of a register in struct pt_regs. + * + * regs_query_register_name() returns the name of a register from its + * offset in struct pt_regs. If the @offset is invalid, this returns NULL; + */ +const char *regs_query_register_name(unsigned int offset) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (roff->offset == offset) + return roff->name; + return NULL; +} + +/* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 74367841615..0e1ec6f746f 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -691,10 +691,14 @@ void rtas_os_term(char *str) { int status; - if (panic_timeout) - return; - - if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term")) + /* + * Firmware with the ibm,extended-os-term property is guaranteed + * to always return from an ibm,os-term call. Earlier versions without + * this property may terminate the partition which we want to avoid + * since it interferes with panic_timeout. + */ + if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") || + RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term")) return; snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); @@ -705,8 +709,7 @@ void rtas_os_term(char *str) } while (rtas_busy_delay(status)); if (status != 0) - printk(KERN_EMERG "ibm,os-term call failed %d\n", - status); + printk(KERN_EMERG "ibm,os-term call failed %d\n", status); } static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 4190eae7850..638883e23e3 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -411,9 +411,9 @@ static void rtas_event_scan(struct work_struct *w) get_online_cpus(); - cpu = next_cpu(smp_processor_id(), cpu_online_map); - if (cpu == NR_CPUS) { - cpu = first_cpu(cpu_online_map); + cpu = cpumask_next(smp_processor_id(), cpu_online_mask); + if (cpu >= nr_cpu_ids) { + cpu = cpumask_first(cpu_online_mask); if (first_pass) { first_pass = 0; @@ -466,8 +466,8 @@ static void start_event_scan(void) /* Retreive errors from nvram if any */ retreive_nvram_error_log(); - schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work, - event_scan_delay); + schedule_delayed_work_on(cpumask_first(cpu_online_mask), + &event_scan_work, event_scan_delay); } static int __init rtas_init(void) @@ -490,6 +490,12 @@ static int __init rtas_init(void) return -ENODEV; } + if (!rtas_event_scan_rate) { + /* Broken firmware: take a rate of zero to mean don't scan */ + printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n"); + return 0; + } + /* Make room for the sequence number */ rtas_error_log_max = rtas_get_error_log_max(); rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 48f0a008b20..5e4d852f640 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -161,45 +161,44 @@ extern u32 cpu_temp_both(unsigned long cpu); DEFINE_PER_CPU(unsigned int, cpu_pvr); #endif -static int show_cpuinfo(struct seq_file *m, void *v) +static void show_cpuinfo_summary(struct seq_file *m) { - unsigned long cpu_id = (unsigned long)v - 1; - unsigned int pvr; - unsigned short maj; - unsigned short min; - - if (cpu_id == NR_CPUS) { - struct device_node *root; - const char *model = NULL; + struct device_node *root; + const char *model = NULL; #if defined(CONFIG_SMP) && defined(CONFIG_PPC32) - unsigned long bogosum = 0; - int i; - for_each_online_cpu(i) - bogosum += loops_per_jiffy; - seq_printf(m, "total bogomips\t: %lu.%02lu\n", - bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); + unsigned long bogosum = 0; + int i; + for_each_online_cpu(i) + bogosum += loops_per_jiffy; + seq_printf(m, "total bogomips\t: %lu.%02lu\n", + bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); #endif /* CONFIG_SMP && CONFIG_PPC32 */ - seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); - if (ppc_md.name) - seq_printf(m, "platform\t: %s\n", ppc_md.name); - root = of_find_node_by_path("/"); - if (root) - model = of_get_property(root, "model", NULL); - if (model) - seq_printf(m, "model\t\t: %s\n", model); - of_node_put(root); - - if (ppc_md.show_cpuinfo != NULL) - ppc_md.show_cpuinfo(m); + seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); + if (ppc_md.name) + seq_printf(m, "platform\t: %s\n", ppc_md.name); + root = of_find_node_by_path("/"); + if (root) + model = of_get_property(root, "model", NULL); + if (model) + seq_printf(m, "model\t\t: %s\n", model); + of_node_put(root); + + if (ppc_md.show_cpuinfo != NULL) + ppc_md.show_cpuinfo(m); #ifdef CONFIG_PPC32 - /* Display the amount of memory */ - seq_printf(m, "Memory\t\t: %d MB\n", - (unsigned int)(total_memory / (1024 * 1024))); + /* Display the amount of memory */ + seq_printf(m, "Memory\t\t: %d MB\n", + (unsigned int)(total_memory / (1024 * 1024))); #endif +} - return 0; - } +static int show_cpuinfo(struct seq_file *m, void *v) +{ + unsigned long cpu_id = (unsigned long)v - 1; + unsigned int pvr; + unsigned short maj; + unsigned short min; /* We only show online cpus: disable preempt (overzealous, I * knew) to prevent cpu going down. */ @@ -308,19 +307,28 @@ static int show_cpuinfo(struct seq_file *m, void *v) #endif preempt_enable(); + + /* If this is the last cpu, print the summary */ + if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) + show_cpuinfo_summary(m); + return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { - unsigned long i = *pos; - - return i <= NR_CPUS ? (void *)(i + 1) : NULL; + if (*pos == 0) /* just in case, cpu 0 is not the first */ + *pos = cpumask_first(cpu_online_mask); + else + *pos = cpumask_next(*pos - 1, cpu_online_mask); + if ((*pos) < nr_cpu_ids) + return (void *)(unsigned long)(*pos + 1); + return NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { - ++*pos; + (*pos)++; return c_start(m, pos); } @@ -386,14 +394,14 @@ static void __init cpu_init_thread_core_maps(int tpc) /** * setup_cpu_maps - initialize the following cpu maps: - * cpu_possible_map - * cpu_present_map + * cpu_possible_mask + * cpu_present_mask * * Having the possible map set up early allows us to restrict allocations * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. * * We do not initialize the online map here; cpus set their own bits in - * cpu_online_map as they come up. + * cpu_online_mask as they come up. * * This function is valid only for Open Firmware systems. finish_device_tree * must be called before using this. diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 914389158a9..f3fb5a79de5 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -424,9 +424,18 @@ void __init setup_system(void) DBG(" <- setup_system()\n"); } +static u64 slb0_limit(void) +{ + if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { + return 1UL << SID_SHIFT_1T; + } + return 1UL << SID_SHIFT; +} + #ifdef CONFIG_IRQSTACKS static void __init irqstack_early_init(void) { + u64 limit = slb0_limit(); unsigned int i; /* @@ -436,10 +445,10 @@ static void __init irqstack_early_init(void) for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) __va(lmb_alloc_base(THREAD_SIZE, - THREAD_SIZE, 0x10000000)); + THREAD_SIZE, limit)); hardirq_ctx[i] = (struct thread_info *) __va(lmb_alloc_base(THREAD_SIZE, - THREAD_SIZE, 0x10000000)); + THREAD_SIZE, limit)); } } #else @@ -470,7 +479,7 @@ static void __init exc_lvl_early_init(void) */ static void __init emergency_stack_init(void) { - unsigned long limit; + u64 limit; unsigned int i; /* @@ -482,7 +491,7 @@ static void __init emergency_stack_init(void) * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ - limit = min(0x10000000ULL, lmb.rmo_size); + limit = min(slb0_limit(), lmb.rmo_size); for_each_possible_cpu(i) { unsigned long sp; @@ -573,12 +582,6 @@ void ppc64_boot_msg(unsigned int src, const char *msg) printk("[boot]%04x %s\n", src, msg); } -void cpu_die(void) -{ - if (ppc_md.cpu_die) - ppc_md.cpu_die(); -} - #ifdef CONFIG_SMP #define PCPU_DYN_SIZE () diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index c2ee1449807..5c196d1086d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -59,8 +59,8 @@ struct thread_info *secondary_ti; -DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; -DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; +DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); +DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map); @@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus) smp_store_cpu_info(boot_cpuid); cpu_callin_map[boot_cpuid] = 1; + for_each_possible_cpu(cpu) { + zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), + GFP_KERNEL, cpu_to_node(cpu)); + } + + cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); + cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); + if (smp_ops) if (smp_ops->probe) max_cpus = smp_ops->probe(); @@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) void __devinit smp_prepare_boot_cpu(void) { BUG_ON(smp_processor_id() != boot_cpuid); - - set_cpu_online(boot_cpuid, true); - cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid)); - cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid)); #ifdef CONFIG_PPC64 paca[boot_cpuid].__current = current; #endif @@ -313,7 +319,7 @@ int generic_cpu_disable(void) set_cpu_online(cpu, false); #ifdef CONFIG_PPC64 vdso_data->processorCount--; - fixup_irqs(cpu_online_map); + fixup_irqs(cpu_online_mask); #endif return 0; } @@ -333,7 +339,7 @@ int generic_cpu_enable(unsigned int cpu) cpu_relax(); #ifdef CONFIG_PPC64 - fixup_irqs(cpu_online_map); + fixup_irqs(cpu_online_mask); /* counter the irq disable in fixup_irqs */ local_irq_enable(); #endif @@ -462,7 +468,7 @@ out: return id; } -/* Must be called when no change can occur to cpu_present_map, +/* Must be called when no change can occur to cpu_present_mask, * i.e. during cpu online or offline. */ static struct device_node *cpu_to_l2cache(int cpu) @@ -495,6 +501,14 @@ int __devinit start_secondary(void *unused) current->active_mm = &init_mm; smp_store_cpu_info(cpu); + +#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) + /* Clear any pending timer interrupts */ + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); + + /* Enable decrementer interrupt */ + mtspr(SPRN_TCR, TCR_DIE); +#endif set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; @@ -517,15 +531,15 @@ int __devinit start_secondary(void *unused) for (i = 0; i < threads_per_core; i++) { if (cpu_is_offline(base + i)) continue; - cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); - cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); + cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); + cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); /* cpu_core_map should be a superset of * cpu_sibling_map even if we don't have cache * information, so update the former here, too. */ - cpu_set(cpu, per_cpu(cpu_core_map, base +i)); - cpu_set(base + i, per_cpu(cpu_core_map, cpu)); + cpumask_set_cpu(cpu, cpu_core_mask(base + i)); + cpumask_set_cpu(base + i, cpu_core_mask(cpu)); } l2_cache = cpu_to_l2cache(cpu); for_each_online_cpu(i) { @@ -533,8 +547,8 @@ int __devinit start_secondary(void *unused) if (!np) continue; if (np == l2_cache) { - cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, per_cpu(cpu_core_map, cpu)); + cpumask_set_cpu(cpu, cpu_core_mask(i)); + cpumask_set_cpu(i, cpu_core_mask(cpu)); } of_node_put(np); } @@ -554,19 +568,22 @@ int setup_profiling_timer(unsigned int multiplier) void __init smp_cpus_done(unsigned int max_cpus) { - cpumask_t old_mask; + cpumask_var_t old_mask; /* We want the setup_cpu() here to be called from CPU 0, but our * init thread may have been "borrowed" by another CPU in the meantime * se we pin us down to CPU 0 for a short while */ - old_mask = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); + alloc_cpumask_var(&old_mask, GFP_NOWAIT); + cpumask_copy(old_mask, ¤t->cpus_allowed); + set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); if (smp_ops && smp_ops->setup_cpu) smp_ops->setup_cpu(boot_cpuid); - set_cpus_allowed(current, old_mask); + set_cpus_allowed_ptr(current, old_mask); + + free_cpumask_var(old_mask); snapshot_timebases(); @@ -591,10 +608,10 @@ int __cpu_disable(void) /* Update sibling maps */ base = cpu_first_thread_in_core(cpu); for (i = 0; i < threads_per_core; i++) { - cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); - cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); - cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); - cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); + cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); + cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); + cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); + cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); } l2_cache = cpu_to_l2cache(cpu); @@ -603,8 +620,8 @@ int __cpu_disable(void) if (!np) continue; if (np == l2_cache) { - cpu_clear(cpu, per_cpu(cpu_core_map, i)); - cpu_clear(i, per_cpu(cpu_core_map, cpu)); + cpumask_clear_cpu(cpu, cpu_core_mask(i)); + cpumask_clear_cpu(i, cpu_core_mask(cpu)); } of_node_put(np); } @@ -631,4 +648,10 @@ void cpu_hotplug_driver_unlock() { mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); } + +void cpu_die(void) +{ + if (ppc_md.cpu_die) + ppc_md.cpu_die(); +} #endif diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index e235e52dc4f..c0d8c2006bf 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -35,7 +35,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); #ifdef CONFIG_PPC64 /* Time in microseconds we delay before sleeping in the idle loop */ -DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 }; +DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 }; static ssize_t store_smt_snooze_delay(struct sys_device *dev, struct sysdev_attribute *attr, @@ -44,9 +44,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev, { struct cpu *cpu = container_of(dev, struct cpu, sysdev); ssize_t ret; - unsigned long snooze; + long snooze; - ret = sscanf(buf, "%lu", &snooze); + ret = sscanf(buf, "%ld", &snooze); if (ret != 1) return -EINVAL; @@ -61,53 +61,23 @@ static ssize_t show_smt_snooze_delay(struct sys_device *dev, { struct cpu *cpu = container_of(dev, struct cpu, sysdev); - return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id)); + return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id)); } static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, store_smt_snooze_delay); -/* Only parse OF options if the matching cmdline option was not specified */ -static int smt_snooze_cmdline; - -static int __init smt_setup(void) -{ - struct device_node *options; - const unsigned int *val; - unsigned int cpu; - - if (!cpu_has_feature(CPU_FTR_SMT)) - return -ENODEV; - - options = of_find_node_by_path("/options"); - if (!options) - return -ENODEV; - - val = of_get_property(options, "ibm,smt-snooze-delay", NULL); - if (!smt_snooze_cmdline && val) { - for_each_possible_cpu(cpu) - per_cpu(smt_snooze_delay, cpu) = *val; - } - - of_node_put(options); - return 0; -} -__initcall(smt_setup); - static int __init setup_smt_snooze_delay(char *str) { unsigned int cpu; - int snooze; + long snooze; if (!cpu_has_feature(CPU_FTR_SMT)) return 1; - smt_snooze_cmdline = 1; - - if (get_option(&str, &snooze)) { - for_each_possible_cpu(cpu) - per_cpu(smt_snooze_delay, cpu) = snooze; - } + snooze = simple_strtol(str, NULL, 10); + for_each_possible_cpu(cpu) + per_cpu(smt_snooze_delay, cpu) = snooze; return 1; } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 29d128eb6c4..3031fc712ad 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -380,6 +380,46 @@ int machine_check_440A(struct pt_regs *regs) } return 0; } + +int machine_check_47x(struct pt_regs *regs) +{ + unsigned long reason = get_mc_reason(regs); + u32 mcsr; + + printk(KERN_ERR "Machine check in kernel mode.\n"); + if (reason & ESR_IMCP) { + printk(KERN_ERR + "Instruction Synchronous Machine Check exception\n"); + mtspr(SPRN_ESR, reason & ~ESR_IMCP); + return 0; + } + mcsr = mfspr(SPRN_MCSR); + if (mcsr & MCSR_IB) + printk(KERN_ERR "Instruction Read PLB Error\n"); + if (mcsr & MCSR_DRB) + printk(KERN_ERR "Data Read PLB Error\n"); + if (mcsr & MCSR_DWB) + printk(KERN_ERR "Data Write PLB Error\n"); + if (mcsr & MCSR_TLBP) + printk(KERN_ERR "TLB Parity Error\n"); + if (mcsr & MCSR_ICP) { + flush_instruction_cache(); + printk(KERN_ERR "I-Cache Parity Error\n"); + } + if (mcsr & MCSR_DCSP) + printk(KERN_ERR "D-Cache Search Parity Error\n"); + if (mcsr & PPC47x_MCSR_GPR) + printk(KERN_ERR "GPR Parity Error\n"); + if (mcsr & PPC47x_MCSR_FPR) + printk(KERN_ERR "FPR Parity Error\n"); + if (mcsr & PPC47x_MCSR_IPR) + printk(KERN_ERR "Machine Check exception is imprecise\n"); + + /* Clear MCSR */ + mtspr(SPRN_MCSR, mcsr); + + return 0; +} #elif defined(CONFIG_E500) int machine_check_e500(struct pt_regs *regs) { @@ -815,12 +855,15 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } if (reason & REASON_TRAP) { + /* Debugger is first in line to stop recursive faults in + * rcu_lock, notify_die, or atomic_notifier_call_chain */ + if (debugger_bpt(regs)) + return; + /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) return; - if (debugger_bpt(regs)) - return; if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 82237176a2a..00b9436f765 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -645,8 +645,10 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) found = 1; break; } - if (!found) + if (!found) { + spin_unlock_irqrestore(&vio_cmo.lock, flags); return; + } /* Increase/decrease in desired device entitlement */ if (desired >= viodev->cmo.desired) { @@ -705,7 +707,7 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev) * Check to see that device has a DMA window and configure * entitlement for the device. */ - if (of_get_property(viodev->dev.archdata.of_node, + if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) { /* Check that the driver is CMO enabled and get desired DMA */ if (!viodrv->get_desired_dma) { @@ -958,9 +960,12 @@ viodev_cmo_rd_attr(allocated); static ssize_t name_show(struct device *, struct device_attribute *, char *); static ssize_t devspec_show(struct device *, struct device_attribute *, char *); +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf); static struct device_attribute vio_cmo_dev_attrs[] = { __ATTR_RO(name), __ATTR_RO(devspec), + __ATTR_RO(modalias), __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, viodev_cmo_desired_show, viodev_cmo_desired_set), __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), @@ -1049,7 +1054,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) if (firmware_has_feature(FW_FEATURE_ISERIES)) return vio_build_iommu_table_iseries(dev); - dma_window = of_get_property(dev->dev.archdata.of_node, + dma_window = of_get_property(dev->dev.of_node, "ibm,my-dma-window", NULL); if (!dma_window) return NULL; @@ -1058,7 +1063,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) if (tbl == NULL) return NULL; - of_parse_dma_window(dev->dev.archdata.of_node, dma_window, + of_parse_dma_window(dev->dev.of_node, dma_window, &tbl->it_index, &offset, &size); /* TCE table size - measured in tce entries */ @@ -1086,7 +1091,7 @@ static const struct vio_device_id *vio_match_device( { while (ids->type[0] != '\0') { if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) && - of_device_is_compatible(dev->dev.archdata.of_node, + of_device_is_compatible(dev->dev.of_node, ids->compat)) return ids; ids++; @@ -1179,7 +1184,7 @@ EXPORT_SYMBOL(vio_unregister_driver); static void __devinit vio_dev_release(struct device *dev) { /* XXX should free TCE table */ - of_node_put(dev->archdata.of_node); + of_node_put(dev->of_node); kfree(to_vio_dev(dev)); } @@ -1230,7 +1235,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) if (unit_address != NULL) viodev->unit_address = *unit_address; } - viodev->dev.archdata.of_node = of_node_get(of_node); + viodev->dev.of_node = of_node_get(of_node); if (firmware_has_feature(FW_FEATURE_CMO)) vio_cmo_set_dma_ops(viodev); @@ -1315,14 +1320,32 @@ static ssize_t name_show(struct device *dev, static ssize_t devspec_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct device_node *of_node = dev->archdata.of_node; + struct device_node *of_node = dev->of_node; return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); } +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const struct vio_dev *vio_dev = to_vio_dev(dev); + struct device_node *dn; + const char *cp; + + dn = dev->of_node; + if (!dn) + return -ENODEV; + cp = of_get_property(dn, "compatible", NULL); + if (!cp) + return -ENODEV; + + return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); +} + static struct device_attribute vio_dev_attrs[] = { __ATTR_RO(name), __ATTR_RO(devspec), + __ATTR_RO(modalias), __ATTR_NULL }; @@ -1347,7 +1370,7 @@ static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) struct device_node *dn; const char *cp; - dn = dev->archdata.of_node; + dn = dev->of_node; if (!dn) return -ENODEV; cp = of_get_property(dn, "compatible", NULL); @@ -1365,6 +1388,7 @@ static struct bus_type vio_bus_type = { .match = vio_bus_match, .probe = vio_bus_probe, .remove = vio_bus_remove, + .pm = GENERIC_SUBSYS_PM_OPS, }; /** @@ -1378,7 +1402,7 @@ static struct bus_type vio_bus_type = { */ const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length) { - return of_get_property(vdev->dev.archdata.of_node, which, length); + return of_get_property(vdev->dev.of_node, which, length); } EXPORT_SYMBOL(vio_get_attribute); |