diff options
Diffstat (limited to 'arch/powerpc/kernel')
49 files changed, 869 insertions, 588 deletions
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 625942ae558..60b3e377b1e 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void) /* This function can be used to enable the early boot text when doing * OF booting or within bootx init. It must be followed by a btext_unmap() - * call before the logical address becomes unuseable + * call before the logical address becomes unusable */ void __init btext_setup_display(int width, int height, int depth, int pitch, unsigned long address) diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a8a95..f8cd9fba4d3 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -18,7 +18,7 @@ #include <asm/mmu.h> _GLOBAL(__setup_cpu_603) - mflr r4 + mflr r5 BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) bl setup_common_caches - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_604) - mflr r4 + mflr r5 bl setup_common_caches bl setup_604_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750cx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750fx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7400) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7410) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 li r3,0 mtspr SPRN_L2CR2,r3 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_745x) - mflr r4 + mflr r5 bl setup_common_caches bl setup_745x_specifics - mtlr r4 + mtlr r5 blr /* Enable caches for 603's, 604, 750 & 7400 */ @@ -194,10 +194,10 @@ setup_750cx: cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr2+eq bnelr - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) blr /* 750fx specific @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION andis. r11,r11,L3CR_L3E@h beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) andi. r0,r6,CPU_FTR_L3_DISABLE_NAP beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) 1: mfspr r11,SPRN_HID0 diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 894e64fa481..913611105c1 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -64,6 +64,12 @@ _GLOBAL(__setup_cpu_e500v2) bl __e500_icache_setup bl __e500_dcache_setup bl __setup_e500_ivors +#ifdef CONFIG_FSL_RIO + /* Ensure that RFXE is set */ + mfspr r3,SPRN_HID1 + oris r3,r3,HID1_RFXE@h + mtspr SPRN_HID1,r3 +#endif mtlr r4 blr _GLOBAL(__setup_cpu_e500mc) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index be5ab18b03b..b9602ee06de 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -116,7 +116,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power3", .oprofile_type = PPC_OPROFILE_RS64, - .machine_check = machine_check_generic, .platform = "power3", }, { /* Power3+ */ @@ -132,7 +131,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power3", .oprofile_type = PPC_OPROFILE_RS64, - .machine_check = machine_check_generic, .platform = "power3", }, { /* Northstar */ @@ -148,7 +146,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = PPC_OPROFILE_RS64, - .machine_check = machine_check_generic, .platform = "rs64", }, { /* Pulsar */ @@ -164,7 +161,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = PPC_OPROFILE_RS64, - .machine_check = machine_check_generic, .platform = "rs64", }, { /* I-star */ @@ -180,7 +176,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = PPC_OPROFILE_RS64, - .machine_check = machine_check_generic, .platform = "rs64", }, { /* S-star */ @@ -196,7 +191,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = PPC_OPROFILE_RS64, - .machine_check = machine_check_generic, .platform = "rs64", }, { /* Power4 */ @@ -212,7 +206,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power4", .oprofile_type = PPC_OPROFILE_POWER4, - .machine_check = machine_check_generic, .platform = "power4", }, { /* Power4+ */ @@ -228,7 +221,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/power4", .oprofile_type = PPC_OPROFILE_POWER4, - .machine_check = machine_check_generic, .platform = "power4", }, { /* PPC970 */ @@ -247,7 +239,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, - .machine_check = machine_check_generic, .platform = "ppc970", }, { /* PPC970FX */ @@ -266,7 +257,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, - .machine_check = machine_check_generic, .platform = "ppc970", }, { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ @@ -285,7 +275,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970MP", .oprofile_type = PPC_OPROFILE_POWER4, - .machine_check = machine_check_generic, .platform = "ppc970", }, { /* PPC970MP */ @@ -304,7 +293,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970MP", .oprofile_type = PPC_OPROFILE_POWER4, - .machine_check = machine_check_generic, .platform = "ppc970", }, { /* PPC970GX */ @@ -322,7 +310,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, - .machine_check = machine_check_generic, .platform = "ppc970", }, { /* Power5 GR */ @@ -343,7 +330,6 @@ static struct cpu_spec __initdata cpu_specs[] = { */ .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, - .machine_check = machine_check_generic, .platform = "power5", }, { /* Power5++ */ @@ -360,7 +346,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, - .machine_check = machine_check_generic, .platform = "power5+", }, { /* Power5 GS */ @@ -378,7 +363,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_mmcra_sihv = MMCRA_SIHV, .oprofile_mmcra_sipr = MMCRA_SIPR, - .machine_check = machine_check_generic, .platform = "power5+", }, { /* POWER6 in P5+ mode; 2.04-compliant processor */ @@ -390,7 +374,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, - .machine_check = machine_check_generic, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power5+", @@ -413,7 +396,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, .oprofile_mmcra_clear = POWER6_MMCRA_THRM | POWER6_MMCRA_OTHER, - .machine_check = machine_check_generic, .platform = "power6x", }, { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ @@ -425,7 +407,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .mmu_features = MMU_FTR_HPTE_TABLE, .icache_bsize = 128, .dcache_bsize = 128, - .machine_check = machine_check_generic, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power6", @@ -440,7 +421,6 @@ static struct cpu_spec __initdata cpu_specs[] = { MMU_FTR_TLBIE_206, .icache_bsize = 128, .dcache_bsize = 128, - .machine_check = machine_check_generic, .oprofile_type = PPC_OPROFILE_POWER4, .oprofile_cpu_type = "ppc64/ibm-compat-v1", .platform = "power7", @@ -492,7 +472,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .pmc_type = PPC_PMC_IBM, .oprofile_cpu_type = "ppc64/cell-be", .oprofile_type = PPC_OPROFILE_CELL, - .machine_check = machine_check_generic, .platform = "ppc-cell-be", }, { /* PA Semi PA6T */ @@ -510,7 +489,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_restore = __restore_cpu_pa6t, .oprofile_cpu_type = "ppc64/pa6t", .oprofile_type = PPC_OPROFILE_PA6T, - .machine_check = machine_check_generic, .platform = "pa6t", }, { /* default match */ @@ -524,7 +502,6 @@ static struct cpu_spec __initdata cpu_specs[] = { .dcache_bsize = 128, .num_pmcs = 6, .pmc_type = PPC_PMC_IBM, - .machine_check = machine_check_generic, .platform = "power4", } #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -1834,11 +1811,11 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_440A, .platform = "ppc440", }, - { /* 476 core */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x11a50000, + { /* 476 DD2 core */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x11a52080, .cpu_name = "476", - .cpu_features = CPU_FTRS_47X, + .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .mmu_features = MMU_FTR_TYPE_47x | @@ -1862,6 +1839,20 @@ static struct cpu_spec __initdata cpu_specs[] = { .machine_check = machine_check_47x, .platform = "ppc470", }, + { /* 476 others */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x11a50000, + .cpu_name = "476", + .cpu_features = CPU_FTRS_47X, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_47x | + MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, + .icache_bsize = 32, + .dcache_bsize = 128, + .machine_check = machine_check_47x, + .platform = "ppc470", + }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, @@ -1982,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0000, .pvr_value = 0x80240000, .cpu_name = "e5500", - .cpu_features = CPU_FTRS_E500MC, + .cpu_features = CPU_FTRS_E5500, .cpu_user_features = COMMON_USER_BOOKE, .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, @@ -2099,8 +2090,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) * pointer on ppc64 and booke as we are running at 0 in real mode * on ppc64 and reloc_offset is always 0 on booke. */ - if (s->cpu_setup) { - s->cpu_setup(offset, s); + if (t->cpu_setup) { + t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ } diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 832c8c4db25..5b5e1f002a8 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -48,7 +48,7 @@ int crashing_cpu = -1; static cpumask_t cpus_in_crash = CPU_MASK_NONE; cpumask_t cpus_in_sr = CPU_MASK_NONE; -#define CRASH_HANDLER_MAX 2 +#define CRASH_HANDLER_MAX 3 /* NULL terminated list of shutdown handles */ static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; static DEFINE_SPINLOCK(crash_handlers_lock); @@ -125,7 +125,7 @@ static void crash_kexec_prepare_cpus(int cpu) smp_wmb(); /* - * FIXME: Until we will have the way to stop other CPUSs reliabally, + * FIXME: Until we will have the way to stop other CPUs reliably, * the crash CPU will send an IPI and wait for other CPUs to * respond. * Delay of at least 10 seconds. @@ -188,7 +188,7 @@ static void crash_kexec_wait_realmode(int cpu) } mb(); } -#endif +#endif /* CONFIG_PPC_STD_MMU_64 */ /* * This function will be called by secondary cpus or by kexec cpu @@ -233,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs) crash_ipi_callback(regs); } -#else +#else /* ! CONFIG_SMP */ +static inline void crash_kexec_wait_realmode(int cpu) {} + static void crash_kexec_prepare_cpus(int cpu) { /* @@ -253,73 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs) { cpus_in_sr = CPU_MASK_NONE; } -#endif -#ifdef CONFIG_SPU_BASE - -#include <asm/spu.h> -#include <asm/spu_priv1.h> - -struct crash_spu_info { - struct spu *spu; - u32 saved_spu_runcntl_RW; - u32 saved_spu_status_R; - u32 saved_spu_npc_RW; - u64 saved_mfc_sr1_RW; - u64 saved_mfc_dar; - u64 saved_mfc_dsisr; -}; - -#define CRASH_NUM_SPUS 16 /* Enough for current hardware */ -static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; - -static void crash_kexec_stop_spus(void) -{ - struct spu *spu; - int i; - u64 tmp; - - for (i = 0; i < CRASH_NUM_SPUS; i++) { - if (!crash_spu_info[i].spu) - continue; - - spu = crash_spu_info[i].spu; - - crash_spu_info[i].saved_spu_runcntl_RW = - in_be32(&spu->problem->spu_runcntl_RW); - crash_spu_info[i].saved_spu_status_R = - in_be32(&spu->problem->spu_status_R); - crash_spu_info[i].saved_spu_npc_RW = - in_be32(&spu->problem->spu_npc_RW); - - crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); - crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); - tmp = spu_mfc_sr1_get(spu); - crash_spu_info[i].saved_mfc_sr1_RW = tmp; - - tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; - spu_mfc_sr1_set(spu, tmp); - - __delay(200); - } -} - -void crash_register_spus(struct list_head *list) -{ - struct spu *spu; - - list_for_each_entry(spu, list, full_list) { - if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) - continue; - - crash_spu_info[spu->number].spu = spu; - } -} - -#else -static inline void crash_kexec_stop_spus(void) -{ -} -#endif /* CONFIG_SPU_BASE */ +#endif /* CONFIG_SMP */ /* * Register a function to be called on shutdown. Only use this if you @@ -410,9 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) crash_kexec_wait_realmode(crashing_cpu); -#endif machine_kexec_mask_interrupts(); @@ -439,8 +373,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_shutdown_cpu = -1; __debugger_fault_handler = old_handler; - crash_kexec_stop_spus(); - if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); } diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 0a2af50243c..424afb6b8fb 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -28,9 +28,6 @@ #define DBG(fmt...) #endif -/* Stores the physical address of elf header of crash image. */ -unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; - #ifndef CONFIG_RELOCATABLE void __init reserve_kdump_trampoline(void) { @@ -72,20 +69,6 @@ void __init setup_kdump_trampoline(void) } #endif /* CONFIG_RELOCATABLE */ -/* - * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by - * is_kdump_kernel() to determine if we are booting after a panic. Hence - * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. - */ -static int __init parse_elfcorehdr(char *p) -{ - if (p) - elfcorehdr_addr = memparse(p, &p); - - return 1; -} -__setup("elfcorehdr=", parse_elfcorehdr); - static int __init parse_savemaxmem(char *p) { if (p) diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index cf02cad62d9..d238c082c3c 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -179,3 +179,21 @@ static int __init dma_init(void) return 0; } fs_initcall(dma_init); + +int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + unsigned long pfn; + +#ifdef CONFIG_NOT_COHERENT_CACHE + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); +#else + pfn = page_to_pfn(virt_to_page(cpu_addr)); +#endif + return remap_pfn_range(vma, vma->vm_start, + pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} +EXPORT_SYMBOL_GPL(dma_mmap_coherent); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index c22dc1ec1c9..56212bc0ab0 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -880,7 +880,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) */ andi. r10,r9,MSR_EE beq 1f + /* + * Since the ftrace irqsoff latency trace checks CALLER_ADDR1, + * which is the stack frame here, we need to force a stack frame + * in case we came from user space. + */ + stwu r1,-32(r1) + mflr r0 + stw r0,4(r1) + stwu r1,-32(r1) bl trace_hardirqs_on + lwz r1,0(r1) + lwz r1,0(r1) lwz r9,_MSR(r1) 1: #endif /* CONFIG_TRACE_IRQFLAGS */ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 5c43063d250..9651acc3504 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -379,7 +379,7 @@ interrupt_end_book3e: mfspr r13,SPRN_SPRG_PACA /* get our PACA */ b system_call_common -/* Auxillary Processor Unavailable Interrupt */ +/* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 8a817995b4c..aeb739e1876 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -5,7 +5,7 @@ * handling and other fixed offset specific things. * * This file is meant to be #included from head_64.S due to - * position dependant assembly. + * position dependent assembly. * * Most of this originates from head_64.S and thus has the same * copyright history. @@ -977,20 +977,6 @@ _GLOBAL(do_stab_bolted) rfid b . /* prevent speculative execution */ -/* - * Space for CPU0's segment table. - * - * On iSeries, the hypervisor must fill in at least one entry before - * we get control (with relocate on). The address is given to the hv - * as a page number (see xLparMap below), so this must be at a - * fixed address (the linker can't compute (u64)&initial_stab >> - * PAGE_SHIFT). - */ - . = STAB0_OFFSET /* 0x6000 */ - .globl initial_stab -initial_stab: - .space 4096 - #ifdef CONFIG_PPC_PSERIES /* * Data area reserved for FWNMI option. @@ -1027,3 +1013,17 @@ xLparMap: #ifdef CONFIG_PPC_PSERIES . = 0x8000 #endif /* CONFIG_PPC_PSERIES */ + +/* + * Space for CPU0's segment table. + * + * On iSeries, the hypervisor must fill in at least one entry before + * we get control (with relocate on). The address is given to the hv + * as a page number (see xLparMap above), so this must be at a + * fixed address (the linker can't compute (u64)&initial_stab >> + * PAGE_SHIFT). + */ + . = STAB0_OFFSET /* 0x8000 */ + .globl initial_stab +initial_stab: + .space 4096 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 98c4b29a56f..c5c24beb838 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -890,6 +890,15 @@ __secondary_start: mtspr SPRN_SRR1,r4 SYNC RFI + +_GLOBAL(start_secondary_resume) + /* Reset stack */ + rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r3,0 + std r3,0(r1) /* Zero the stack frame pointer */ + bl start_secondary + b . #endif /* CONFIG_SMP */ #ifdef CONFIG_KVM_BOOK3S_HANDLER diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 9dd21a8c4d5..a91626d87fc 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -766,7 +766,7 @@ DataAccess: * miss get to this point to load the TLB. * r10 - TLB_TAG value * r11 - Linux PTE - * r12, r9 - avilable to use + * r12, r9 - available to use * PID - loaded with proper value when we get here * Upon exit, we reload everything and RFI. * Actually, it will fit now, but oh well.....a common place diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index cbb3436b592..5e12b741ba5 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -178,7 +178,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 782f23df7c8..3a319f9c9d3 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -40,7 +40,7 @@ #include <asm/kvm_book3s_asm.h> #include <asm/ptrace.h> -/* The physical memory is layed out such that the secondary processor +/* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow * using the layout described in exceptions-64s.S */ @@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start) add r13,r13,r4 /* for this processor. */ mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ + /* Mark interrupts soft and hard disabled (they might be enabled + * in the PACA when doing hotplug) + */ + li r0,0 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 3e02710d956..5ecf54cfa7d 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -326,7 +326,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index f62efdfd176..28581f1ad2c 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -201,13 +201,14 @@ int ibmebus_register_driver(struct of_platform_driver *drv) /* If the driver uses devices that ibmebus doesn't know, add them */ ibmebus_create_devices(drv->driver.of_match_table); - return of_register_driver(drv, &ibmebus_bus_type); + drv->driver.bus = &ibmebus_bus_type; + return driver_register(&drv->driver); } EXPORT_SYMBOL(ibmebus_register_driver); void ibmebus_unregister_driver(struct of_platform_driver *drv) { - of_unregister_driver(drv); + driver_unregister(&drv->driver); } EXPORT_SYMBOL(ibmebus_unregister_driver); @@ -308,15 +309,410 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, } } + static struct bus_attribute ibmebus_bus_attrs[] = { __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), __ATTR_NULL }; +static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) +{ + const struct of_device_id *matches = drv->of_match_table; + + if (!matches) + return 0; + + return of_match_device(matches, dev) != NULL; +} + +static int ibmebus_bus_device_probe(struct device *dev) +{ + int error = -ENODEV; + struct of_platform_driver *drv; + struct platform_device *of_dev; + const struct of_device_id *match; + + drv = to_of_platform_driver(dev->driver); + of_dev = to_platform_device(dev); + + if (!drv->probe) + return error; + + of_dev_get(of_dev); + + match = of_match_device(drv->driver.of_match_table, dev); + if (match) + error = drv->probe(of_dev, match); + if (error) + of_dev_put(of_dev); + + return error; +} + +static int ibmebus_bus_device_remove(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + + if (dev->driver && drv->remove) + drv->remove(of_dev); + return 0; +} + +static void ibmebus_bus_device_shutdown(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + + if (dev->driver && drv->shutdown) + drv->shutdown(of_dev); +} + +/* + * ibmebus_bus_device_attrs + */ +static ssize_t devspec_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *ofdev; + + ofdev = to_platform_device(dev); + return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); +} + +static ssize_t name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *ofdev; + + ofdev = to_platform_device(dev); + return sprintf(buf, "%s\n", ofdev->dev.of_node->name); +} + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); + buf[len] = '\n'; + buf[len+1] = 0; + return len+1; +} + +struct device_attribute ibmebus_bus_device_attrs[] = { + __ATTR_RO(devspec), + __ATTR_RO(name), + __ATTR_RO(modalias), + __ATTR_NULL +}; + +#ifdef CONFIG_PM_SLEEP +static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + int ret = 0; + + if (dev->driver && drv->suspend) + ret = drv->suspend(of_dev, mesg); + return ret; +} + +static int ibmebus_bus_legacy_resume(struct device *dev) +{ + struct platform_device *of_dev = to_platform_device(dev); + struct of_platform_driver *drv = to_of_platform_driver(dev->driver); + int ret = 0; + + if (dev->driver && drv->resume) + ret = drv->resume(of_dev); + return ret; +} + +static int ibmebus_bus_pm_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +static void ibmebus_bus_pm_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} + +#ifdef CONFIG_SUSPEND + +static int ibmebus_bus_pm_suspend(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend) + ret = drv->pm->suspend(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); + } + + return ret; +} + +static int ibmebus_bus_pm_suspend_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend_noirq) + ret = drv->pm->suspend_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_resume(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume) + ret = drv->pm->resume(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_resume_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume_noirq) + ret = drv->pm->resume_noirq(dev); + } + + return ret; +} + +#else /* !CONFIG_SUSPEND */ + +#define ibmebus_bus_pm_suspend NULL +#define ibmebus_bus_pm_resume NULL +#define ibmebus_bus_pm_suspend_noirq NULL +#define ibmebus_bus_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATE_CALLBACKS + +static int ibmebus_bus_pm_freeze(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze) + ret = drv->pm->freeze(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); + } + + return ret; +} + +static int ibmebus_bus_pm_freeze_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze_noirq) + ret = drv->pm->freeze_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw) + ret = drv->pm->thaw(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_thaw_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw_noirq) + ret = drv->pm->thaw_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff) + ret = drv->pm->poweroff(dev); + } else { + ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return ret; +} + +static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff_noirq) + ret = drv->pm->poweroff_noirq(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_restore(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore) + ret = drv->pm->restore(dev); + } else { + ret = ibmebus_bus_legacy_resume(dev); + } + + return ret; +} + +static int ibmebus_bus_pm_restore_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore_noirq) + ret = drv->pm->restore_noirq(dev); + } + + return ret; +} + +#else /* !CONFIG_HIBERNATE_CALLBACKS */ + +#define ibmebus_bus_pm_freeze NULL +#define ibmebus_bus_pm_thaw NULL +#define ibmebus_bus_pm_poweroff NULL +#define ibmebus_bus_pm_restore NULL +#define ibmebus_bus_pm_freeze_noirq NULL +#define ibmebus_bus_pm_thaw_noirq NULL +#define ibmebus_bus_pm_poweroff_noirq NULL +#define ibmebus_bus_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATE_CALLBACKS */ + +static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { + .prepare = ibmebus_bus_pm_prepare, + .complete = ibmebus_bus_pm_complete, + .suspend = ibmebus_bus_pm_suspend, + .resume = ibmebus_bus_pm_resume, + .freeze = ibmebus_bus_pm_freeze, + .thaw = ibmebus_bus_pm_thaw, + .poweroff = ibmebus_bus_pm_poweroff, + .restore = ibmebus_bus_pm_restore, + .suspend_noirq = ibmebus_bus_pm_suspend_noirq, + .resume_noirq = ibmebus_bus_pm_resume_noirq, + .freeze_noirq = ibmebus_bus_pm_freeze_noirq, + .thaw_noirq = ibmebus_bus_pm_thaw_noirq, + .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, + .restore_noirq = ibmebus_bus_pm_restore_noirq, +}; + +#define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) + +#else /* !CONFIG_PM_SLEEP */ + +#define IBMEBUS_BUS_PM_OPS_PTR NULL + +#endif /* !CONFIG_PM_SLEEP */ + struct bus_type ibmebus_bus_type = { + .name = "ibmebus", .uevent = of_device_uevent, - .bus_attrs = ibmebus_bus_attrs + .bus_attrs = ibmebus_bus_attrs, + .match = ibmebus_bus_bus_match, + .probe = ibmebus_bus_device_probe, + .remove = ibmebus_bus_device_remove, + .shutdown = ibmebus_bus_device_shutdown, + .dev_attrs = ibmebus_bus_device_attrs, + .pm = IBMEBUS_BUS_PM_OPS_PTR, }; EXPORT_SYMBOL(ibmebus_bus_type); @@ -326,7 +722,7 @@ static int __init ibmebus_bus_init(void) printk(KERN_INFO "IBM eBus Device Driver\n"); - err = of_bus_type_init(&ibmebus_bus_type, "ibmebus"); + err = bus_register(&ibmebus_bus_type); if (err) { printk(KERN_ERR "%s: failed to register IBM eBus.\n", __func__); diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 5328709eeed..ba319547860 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) isync b 1b -_GLOBAL(power4_cpu_offline_powersave) - /* Go to NAP now */ - mfmsr r7 - rldicl r0,r7,48,1 - rotldi r0,r0,16 - mtmsrd r0,1 /* hard-disable interrupts */ - li r0,1 - li r6,0 - stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ - stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ -BEGIN_FTR_SECTION - DSSALL - sync -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - ori r7,r7,MSR_EE - oris r7,r7,MSR_POW@h - sync - isync - mtmsrd r7 - isync - blr diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ce557f6f00f..f621b7d2d86 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -195,7 +195,7 @@ notrace void arch_local_irq_restore(unsigned long en) EXPORT_SYMBOL(arch_local_irq_restore); #endif /* CONFIG_PPC64 */ -static int show_other_interrupts(struct seq_file *p, int prec) +int arch_show_interrupts(struct seq_file *p, int prec) { int j; @@ -231,63 +231,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) return 0; } -int show_interrupts(struct seq_file *p, void *v) -{ - unsigned long flags, any_count = 0; - int i = *(loff_t *) v, j, prec; - struct irqaction *action; - struct irq_desc *desc; - - if (i > nr_irqs) - return 0; - - for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) - j *= 10; - - if (i == nr_irqs) - return show_other_interrupts(p, prec); - - /* print header */ - if (i == 0) { - seq_printf(p, "%*s", prec + 8, ""); - for_each_online_cpu(j) - seq_printf(p, "CPU%-8d", j); - seq_putc(p, '\n'); - } - - desc = irq_to_desc(i); - if (!desc) - return 0; - - raw_spin_lock_irqsave(&desc->lock, flags); - for_each_online_cpu(j) - any_count |= kstat_irqs_cpu(i, j); - action = desc->action; - if (!action && !any_count) - goto out; - - seq_printf(p, "%*d: ", prec, i); - for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); - - if (desc->chip) - seq_printf(p, " %-16s", desc->chip->name); - else - seq_printf(p, " %-16s", "None"); - seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); - - if (action) { - seq_printf(p, " %s", action->name); - while ((action = action->next) != NULL) - seq_printf(p, ", %s", action->name); - } - - seq_putc(p, '\n'); -out: - raw_spin_unlock_irqrestore(&desc->lock, flags); - return 0; -} - /* * /proc/stat helpers */ @@ -303,30 +246,37 @@ u64 arch_irq_stat_cpu(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU -void fixup_irqs(const struct cpumask *map) +void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq; static int warned; cpumask_var_t mask; + const struct cpumask *map = cpu_online_mask; alloc_cpumask_var(&mask, GFP_KERNEL); for_each_irq(irq) { + struct irq_data *data; + struct irq_chip *chip; + desc = irq_to_desc(irq); if (!desc) continue; - if (desc->status & IRQ_PER_CPU) + data = irq_desc_get_irq_data(desc); + if (irqd_is_per_cpu(data)) continue; - cpumask_and(mask, desc->affinity, map); + chip = irq_data_get_irq_chip(data); + + cpumask_and(mask, data->affinity, map); if (cpumask_any(mask) >= nr_cpu_ids) { printk("Breaking affinity for irq %i\n", irq); cpumask_copy(mask, map); } - if (desc->chip->set_affinity) - desc->chip->set_affinity(irq, mask); + if (chip->irq_set_affinity) + chip->irq_set_affinity(data, mask, true); else if (desc->action && !(warned++)) printk("Cannot set affinity for irq %i\n", irq); } @@ -612,7 +562,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, smp_wmb(); /* Clear norequest flags */ - irq_to_desc(i)->status &= ~IRQ_NOREQUEST; + irq_clear_status_flags(i, IRQ_NOREQUEST); /* Legacy flags are left to default at this point, * one can then use irq_create_mapping() to @@ -678,16 +628,15 @@ void irq_set_virq_count(unsigned int count) static int irq_setup_virq(struct irq_host *host, unsigned int virq, irq_hw_number_t hwirq) { - struct irq_desc *desc; + int res; - desc = irq_to_desc_alloc_node(virq, 0); - if (!desc) { + res = irq_alloc_desc_at(virq, 0); + if (res != virq) { pr_debug("irq: -> allocating desc failed\n"); goto error; } - /* Clear IRQ_NOREQUEST flag */ - desc->status &= ~IRQ_NOREQUEST; + irq_clear_status_flags(virq, IRQ_NOREQUEST); /* map it */ smp_wmb(); @@ -696,11 +645,13 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq, if (host->ops->map(host, virq, hwirq)) { pr_debug("irq: -> mapping failed, freeing\n"); - goto error; + goto errdesc; } return 0; +errdesc: + irq_free_descs(virq, 1); error: irq_free_virt(virq, 1); return -1; @@ -820,8 +771,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller, /* Set type if specified and different than the current one */ if (type != IRQ_TYPE_NONE && - type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) - set_irq_type(virq, type); + type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) + irq_set_irq_type(virq, type); return virq; } EXPORT_SYMBOL_GPL(irq_create_of_mapping); @@ -844,7 +795,7 @@ void irq_dispose_mapping(unsigned int virq) return; /* remove chip and handler */ - set_irq_chip_and_handler(virq, NULL, NULL); + irq_set_chip_and_handler(virq, NULL, NULL); /* Make sure it's completed */ synchronize_irq(virq); @@ -879,9 +830,9 @@ void irq_dispose_mapping(unsigned int virq) smp_mb(); irq_map[virq].hwirq = host->inval_irq; - /* Set some flags */ - irq_to_desc(virq)->status |= IRQ_NOREQUEST; + irq_set_status_flags(virq, IRQ_NOREQUEST); + irq_free_descs(virq, 1); /* Free it */ irq_free_virt(virq, 1); } @@ -1074,21 +1025,6 @@ void irq_free_virt(unsigned int virq, unsigned int count) int arch_early_irq_init(void) { - struct irq_desc *desc; - int i; - - for (i = 0; i < NR_IRQS; i++) { - desc = irq_to_desc(i); - if (desc) - desc->status |= IRQ_NOREQUEST; - } - - return 0; -} - -int arch_init_chip_data(struct irq_desc *desc, int node) -{ - desc->status |= IRQ_NOREQUEST; return 0; } @@ -1159,11 +1095,14 @@ static int virq_debug_show(struct seq_file *m, void *private) raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action && desc->action->handler) { + struct irq_chip *chip; + seq_printf(m, "%5d ", i); seq_printf(m, "0x%05lx ", virq_to_hw(i)); - if (desc->chip && desc->chip->name) - p = desc->chip->name; + chip = irq_desc_get_chip(desc); + if (chip && chip->name) + p = chip->name; else p = none; seq_printf(m, "%-15s ", p); diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 2a2f3c3f6d8..97ec8557f97 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /**** Might be a good idea to set L2DO here - to prevent instructions from getting into the cache. But since we invalidate the next time we enable the cache it doesn't really matter. - Don't do this unless you accomodate all processor variations. + Don't do this unless you accommodate all processor variations. The bit moved on the 7450..... ****/ diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c834757bebc..2b97b80d6d7 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) if (!parent) continue; if (of_match_node(legacy_serial_parents, parent) != NULL) { - index = add_legacy_soc_port(np, np); - if (index >= 0 && np == stdout) - legacy_serial_console = index; + if (of_device_is_available(np)) { + index = add_legacy_soc_port(np, np); + if (index >= 0 && np == stdout) + legacy_serial_console = index; + } } of_node_put(parent); } diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 16468362ad5..301db65f05a 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -262,7 +262,7 @@ static void parse_ppp_data(struct seq_file *m) seq_printf(m, "system_active_processors=%d\n", ppp_data.active_system_procs); - /* pool related entries are apropriate for shared configs */ + /* pool related entries are appropriate for shared configs */ if (lppaca_of(0).shared_proc) { unsigned long pool_idle_time, pool_procs; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index df7e20c191c..7ee50f0547c 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -15,6 +15,7 @@ #include <linux/memblock.h> #include <linux/of.h> #include <linux/irq.h> +#include <linux/ftrace.h> #include <asm/machdep.h> #include <asm/prom.h> @@ -25,29 +26,29 @@ void machine_kexec_mask_interrupts(void) { for_each_irq(i) { struct irq_desc *desc = irq_to_desc(i); + struct irq_chip *chip; - if (!desc || !desc->chip) + if (!desc) continue; - if (desc->chip->eoi && - desc->status & IRQ_INPROGRESS) - desc->chip->eoi(i); + chip = irq_desc_get_chip(desc); + if (!chip) + continue; + + if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) + chip->irq_eoi(&desc->irq_data); - if (desc->chip->mask) - desc->chip->mask(i); + if (chip->irq_mask) + chip->irq_mask(&desc->irq_data); - if (desc->chip->disable && - !(desc->status & IRQ_DISABLED)) - desc->chip->disable(i); + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) + chip->irq_disable(&desc->irq_data); } } void machine_crash_shutdown(struct pt_regs *regs) { - if (ppc_md.machine_crash_shutdown) - ppc_md.machine_crash_shutdown(regs); - else - default_machine_crash_shutdown(regs); + default_machine_crash_shutdown(regs); } /* @@ -65,8 +66,6 @@ int machine_kexec_prepare(struct kimage *image) void machine_kexec_cleanup(struct kimage *image) { - if (ppc_md.machine_kexec_cleanup) - ppc_md.machine_kexec_cleanup(image); } void arch_crash_save_vmcoreinfo(void) @@ -87,11 +86,17 @@ void arch_crash_save_vmcoreinfo(void) */ void machine_kexec(struct kimage *image) { + int save_ftrace_enabled; + + save_ftrace_enabled = __ftrace_enabled_save(); + if (ppc_md.machine_kexec) ppc_md.machine_kexec(image); else default_machine_kexec(image); + __ftrace_enabled_restore(save_ftrace_enabled); + /* Fall back to normal restart if we're still alive. */ machine_restart(NULL); for(;;); diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index bb12b3248f1..bec1e930ed7 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -237,22 +237,45 @@ static unsigned char __init nvram_checksum(struct nvram_header *p) return c_sum; } +/* + * Per the criteria passed via nvram_remove_partition(), should this + * partition be removed? 1=remove, 0=keep + */ +static int nvram_can_remove_partition(struct nvram_partition *part, + const char *name, int sig, const char *exceptions[]) +{ + if (part->header.signature != sig) + return 0; + if (name) { + if (strncmp(name, part->header.name, 12)) + return 0; + } else if (exceptions) { + const char **except; + for (except = exceptions; *except; except++) { + if (!strncmp(*except, part->header.name, 12)) + return 0; + } + } + return 1; +} + /** * nvram_remove_partition - Remove one or more partitions in nvram * @name: name of the partition to remove, or NULL for a * signature only match * @sig: signature of the partition(s) to remove + * @exceptions: When removing all partitions with a matching signature, + * leave these alone. */ -int __init nvram_remove_partition(const char *name, int sig) +int __init nvram_remove_partition(const char *name, int sig, + const char *exceptions[]) { struct nvram_partition *part, *prev, *tmp; int rc; list_for_each_entry(part, &nvram_partitions, partition) { - if (part->header.signature != sig) - continue; - if (name && strncmp(name, part->header.name, 12)) + if (!nvram_can_remove_partition(part, name, sig, exceptions)) continue; /* Make partition a free partition */ diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index b2c363ef38a..24582181b6e 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -36,8 +36,7 @@ * lacking some bits needed here. */ -static int __devinit of_pci_phb_probe(struct platform_device *dev, - const struct of_device_id *match) +static int __devinit of_pci_phb_probe(struct platform_device *dev) { struct pci_controller *phb; @@ -74,7 +73,7 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev, #endif /* CONFIG_EEH */ /* Scan the bus */ - pcibios_scan_phb(phb, dev->dev.of_node); + pcibios_scan_phb(phb); if (phb->bus == NULL) return -ENXIO; @@ -104,7 +103,7 @@ static struct of_device_id of_pci_phb_ids[] = { {} }; -static struct of_platform_driver of_pci_phb_driver = { +static struct platform_driver of_pci_phb_driver = { .probe = of_pci_phb_probe, .driver = { .name = "of-pci", @@ -115,7 +114,7 @@ static struct of_platform_driver of_pci_phb_driver = { static __init int of_pci_phb_init(void) { - return of_register_platform_driver(&of_pci_phb_driver); + return platform_driver_register(&of_pci_phb_driver); } device_initcall(of_pci_phb_init); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ebf9846f3c3..10f0aadee95 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -27,20 +27,6 @@ extern unsigned long __toc_start; #ifdef CONFIG_PPC_BOOK3S /* - * We only have to have statically allocated lppaca structs on - * legacy iSeries, which supports at most 64 cpus. - */ -#ifdef CONFIG_PPC_ISERIES -#if NR_CPUS < 64 -#define NR_LPPACAS NR_CPUS -#else -#define NR_LPPACAS 64 -#endif -#else /* not iSeries */ -#define NR_LPPACAS 1 -#endif - -/* * The structure which the hypervisor knows about - this structure * should not cross a page boundary. The vpa_init/register_vpa call * is now known to fail if the lppaca structure crosses a page @@ -217,7 +203,7 @@ void __init free_unused_pacas(void) { int new_size; - new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); + new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); if (new_size >= paca_size) return; diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 10a44e68ef1..893af2a9cd0 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/bootmem.h> #include <linux/of_address.h> +#include <linux/of_pci.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> @@ -260,7 +261,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev) virq = irq_create_mapping(NULL, line); if (virq != NO_IRQ) - set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); + irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); } else { pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", oirq.size, oirq.specifier[0], oirq.specifier[1], @@ -1687,13 +1688,8 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, /** * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus * @hose: Pointer to the PCI host controller instance structure - * @sysdata: value to use for sysdata pointer. ppc32 and ppc64 differ here - * - * Note: the 'data' pointer is a temporary measure. As 32 and 64 bit - * pci code gets merged, this parameter should become unnecessary because - * both will use the same value. */ -void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) +void __devinit pcibios_scan_phb(struct pci_controller *hose) { struct pci_bus *bus; struct device_node *node = hose->dn; @@ -1703,13 +1699,13 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) node ? node->full_name : "<NO NAME>"); /* Create an empty bus for the toplevel */ - bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, - sysdata); + bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); if (bus == NULL) { pr_err("Failed to create bus for PCI domain %04x\n", hose->global_number); return; } + bus->dev.of_node = of_node_get(node); bus->secondary = hose->first_busno; hose->bus = bus; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index e7db5b48004..bedb370459f 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -381,7 +381,7 @@ static int __init pcibios_init(void) if (pci_assign_all_buses) hose->first_busno = next_busno; hose->last_busno = 0xff; - pcibios_scan_phb(hose, hose); + pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); if (pci_assign_all_buses || next_busno <= hose->last_busno) next_busno = hose->last_busno + pcibios_assign_bus_offset; diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 851577608a7..fc6452b6be9 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -64,7 +64,7 @@ static int __init pcibios_init(void) /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { - pcibios_scan_phb(hose, hose->dn); + pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); } @@ -242,10 +242,10 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, break; bus = NULL; } - if (bus == NULL || bus->sysdata == NULL) + if (bus == NULL || bus->dev.of_node == NULL) return -ENODEV; - hose_node = (struct device_node *)bus->sysdata; + hose_node = bus->dev.of_node; hose = PCI_DN(hose_node)->phb; switch (which) { diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index d56b35ee7f7..d225d99fe39 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -161,7 +161,7 @@ static void *is_devfn_node(struct device_node *dn, void *data) /* * This is the "slow" path for looking up a device_node from a * pci_dev. It will hunt for the device under its parent's - * phb and then update sysdata for a future fastpath. + * phb and then update of_node pointer. * * It may also do fixups on the actual device since this happens * on the first read/write. @@ -170,16 +170,22 @@ static void *is_devfn_node(struct device_node *dn, void *data) * In this case it may probe for real hardware ("just in case") * and add a device_node to the device tree if necessary. * + * Is this function necessary anymore now that dev->dev.of_node is + * used to store the node pointer? + * */ struct device_node *fetch_dev_dn(struct pci_dev *dev) { - struct device_node *orig_dn = dev->sysdata; + struct pci_controller *phb = dev->sysdata; struct device_node *dn; unsigned long searchval = (dev->bus->number << 8) | dev->devfn; - dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval); + if (WARN_ON(!phb)) + return NULL; + + dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval); if (dn) - dev->sysdata = dn; + dev->dev.of_node = dn; return dn; } EXPORT_SYMBOL(fetch_dev_dn); diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index e751506323b..1e89a72fd03 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -135,7 +135,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); dev->bus = bus; - dev->sysdata = node; + dev->dev.of_node = of_node_get(node); dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; dev->devfn = devfn; @@ -238,7 +238,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, bus->primary = dev->bus->number; bus->subordinate = busrange[1]; bus->bridge_ctl = 0; - bus->sysdata = node; + bus->dev.of_node = of_node_get(node); /* parse ranges property */ /* PCI #address-cells == 3 and #size-cells == 2 always */ diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 56748070578..822f63008ae 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], return 0; } +static u64 check_and_compute_delta(u64 prev, u64 val) +{ + u64 delta = (val - prev) & 0xfffffffful; + + /* + * POWER7 can roll back counter values, if the new value is smaller + * than the previous value it will cause the delta and the counter to + * have bogus values unless we rolled a counter over. If a coutner is + * rolled back, it will be smaller, but within 256, which is the maximum + * number of events to rollback at once. If we dectect a rollback + * return 0. This can lead to a small lack of precision in the + * counters. + */ + if (prev > val && (prev - val) < 256) + delta = 0; + + return delta; +} + static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; @@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); + delta = check_and_compute_delta(prev, val); + if (!delta) + return; } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); - /* The counters are only 32 bits wide */ - delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); } @@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; - delta = (val - prev) & 0xfffffffful; - local64_add(delta, &event->count); + delta = check_and_compute_delta(prev, val); + if (delta) + local64_add(delta, &event->count); } } @@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; - u64 val; + u64 val, prev; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; - local64_set(&event->hw.prev_count, val); + prev = local64_read(&event->hw.prev_count); + if (check_and_compute_delta(prev, val)) + local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } } @@ -759,7 +782,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) /* * If group events scheduling transaction was started, - * skip the schedulability test here, it will be peformed + * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) @@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, /* we don't have to worry about interrupts here */ prev = local64_read(&event->hw.prev_count); - delta = (val - prev) & 0xfffffffful; + delta = check_and_compute_delta(prev, val); local64_add(delta, &event->count); /* @@ -1212,6 +1235,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (left <= 0) left = period; record = 1; + event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) val = 0x80000000LL - left; @@ -1268,6 +1292,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1315,7 +1361,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } } diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 4dcf5f831e9..b0dc8f7069c 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -596,6 +596,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (left <= 0) left = period; record = 1; + event->hw.last_period = event->hw.sample_period; } if (left < 0x80000000LL) val = 0x80000000LL - left; diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index e83ba3f078e..1b1787d5289 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -15,7 +15,7 @@ /* * Grab the register values as they are now. - * This won't do a particularily good job because we really + * This won't do a particularly good job because we really * want our caller's caller's registers, and our caller has * already executed its prologue. * ToDo: We could reach back into the caller's save area to do diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 84906d3fc86..f74f355a961 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) prime_debug_regs(new_thread); } #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ +#ifndef CONFIG_HAVE_HW_BREAKPOINT static void set_debug_reg_defaults(struct thread_struct *thread) { if (thread->dabr) { @@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) set_dabr(0); } } +#endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ int set_dabr(unsigned long dabr) @@ -631,7 +633,7 @@ void show_regs(struct pt_regs * regs) #ifdef CONFIG_PPC_ADV_DEBUG_REGS printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); #else - printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); + printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); #endif printk("TASK = %p[%d] '%s' THREAD: %p", current, task_pid_nr(current), current->comm, task_thread_info(current)); @@ -670,11 +672,11 @@ void flush_thread(void) { discard_lazy_cpu_state(); -#ifdef CONFIG_HAVE_HW_BREAKPOINTS +#ifdef CONFIG_HAVE_HW_BREAKPOINT flush_ptrace_hw_breakpoint(current); -#else /* CONFIG_HAVE_HW_BREAKPOINTS */ +#else /* CONFIG_HAVE_HW_BREAKPOINT */ set_debug_reg_defaults(¤t->thread); -#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ } void @@ -1216,11 +1218,11 @@ void __ppc64_runlatch_off(void) static struct kmem_cache *thread_info_cache; -struct thread_info *alloc_thread_info(struct task_struct *tsk) +struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) { struct thread_info *ti; - ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); + ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); if (unlikely(ti == NULL)) return NULL; #ifdef CONFIG_DEBUG_STACK_USAGE diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9e3132db718..e74fa12afc8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -97,7 +97,7 @@ static void __init move_device_tree(void) start = __pa(initial_boot_params); size = be32_to_cpu(initial_boot_params->totalsize); - if ((memory_limit && (start + size) > memory_limit) || + if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || overlaps_crashkernel(start, size)) { p = __va(memblock_alloc(size, PAGE_SIZE)); memcpy(p, initial_boot_params, size); @@ -519,9 +519,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) memblock_add(base, size); } -u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { - return memblock_alloc(size, align); + return __va(memblock_alloc(size, align)); } #ifdef CONFIG_BLK_DEV_INITRD @@ -683,7 +683,7 @@ void __init early_init_devtree(void *params) #endif #ifdef CONFIG_PHYP_DUMP - /* scan tree to see if dump occured during last boot */ + /* scan tree to see if dump occurred during last boot */ of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); #endif @@ -739,7 +739,7 @@ void __init early_init_devtree(void *params) DBG("Scanning CPUs ...\n"); - /* Retreive CPU related informations from the flat tree + /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ of_scan_flat_dt(early_init_dt_scan_cpus, NULL); diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index c2b7a07cc3d..47187cc2cf0 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -2,95 +2,11 @@ #include <linux/kernel.h> #include <linux/string.h> -#include <linux/pci_regs.h> #include <linux/module.h> #include <linux/ioport.h> #include <linux/etherdevice.h> #include <linux/of_address.h> #include <asm/prom.h> -#include <asm/pci-bridge.h> - -#ifdef CONFIG_PCI -int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) -{ - struct device_node *dn, *ppnode; - struct pci_dev *ppdev; - u32 lspec; - u32 laddr[3]; - u8 pin; - int rc; - - /* Check if we have a device node, if yes, fallback to standard OF - * parsing - */ - dn = pci_device_to_OF_node(pdev); - if (dn) { - rc = of_irq_map_one(dn, 0, out_irq); - if (!rc) - return rc; - } - - /* Ok, we don't, time to have fun. Let's start by building up an - * interrupt spec. we assume #interrupt-cells is 1, which is standard - * for PCI. If you do different, then don't use that routine. - */ - rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); - if (rc != 0) - return rc; - /* No pin, exit */ - if (pin == 0) - return -ENODEV; - - /* Now we walk up the PCI tree */ - lspec = pin; - for (;;) { - /* Get the pci_dev of our parent */ - ppdev = pdev->bus->self; - - /* Ouch, it's a host bridge... */ - if (ppdev == NULL) { -#ifdef CONFIG_PPC64 - ppnode = pci_bus_to_OF_node(pdev->bus); -#else - struct pci_controller *host; - host = pci_bus_to_host(pdev->bus); - ppnode = host ? host->dn : NULL; -#endif - /* No node for host bridge ? give up */ - if (ppnode == NULL) - return -EINVAL; - } else - /* We found a P2P bridge, check if it has a node */ - ppnode = pci_device_to_OF_node(ppdev); - - /* Ok, we have found a parent with a device-node, hand over to - * the OF parsing code. - * We build a unit address from the linux device to be used for - * resolution. Note that we use the linux bus number which may - * not match your firmware bus numbering. - * Fortunately, in most cases, interrupt-map-mask doesn't include - * the bus number as part of the matching. - * You should still be careful about that though if you intend - * to rely on this function (you ship a firmware that doesn't - * create device nodes for all PCI devices). - */ - if (ppnode) - break; - - /* We can only get here if we hit a P2P bridge with no node, - * let's do standard swizzling and try again - */ - lspec = pci_swizzle_interrupt_pin(pdev, lspec); - pdev = ppdev; - } - - laddr[0] = (pdev->bus->number << 16) - | (pdev->devfn << 8); - laddr[1] = laddr[2] = 0; - return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); -} -EXPORT_SYMBOL_GPL(of_irq_map_pci); -#endif /* CONFIG_PCI */ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, unsigned long *busno, unsigned long *phys, unsigned long *size) diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 90653699829..55613e33e26 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -229,12 +229,16 @@ static int gpr_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { - int ret; + int i, ret; if (target->thread.regs == NULL) return -EIO; - CHECK_FULL_REGS(target->thread.regs); + if (!FULL_REGS(target->thread.regs)) { + /* We have a partial register set. Fill 14-31 with bogus values */ + for (i = 14; i < 32; i++) + target->thread.regs->gpr[i] = NV_REG_POISON; + } ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, target->thread.regs, @@ -459,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, #ifdef CONFIG_VSX /* * Currently to set and and get all the vsx state, you need to call - * the fp and VMX calls aswell. This only get/sets the lower 32 + * the fp and VMX calls as well. This only get/sets the lower 32 * 128bit VSX registers. */ @@ -641,11 +645,16 @@ static int gpr32_get(struct task_struct *target, compat_ulong_t *k = kbuf; compat_ulong_t __user *u = ubuf; compat_ulong_t reg; + int i; if (target->thread.regs == NULL) return -EIO; - CHECK_FULL_REGS(target->thread.regs); + if (!FULL_REGS(target->thread.regs)) { + /* We have a partial register set. Fill 14-31 with bogus values */ + for (i = 14; i < 32; i++) + target->thread.regs->gpr[i] = NV_REG_POISON; + } pos /= sizeof(reg); count /= sizeof(reg); diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 2b442e6c21e..bf5f5ce3a7b 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -256,31 +256,16 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf, struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); struct rtas_update_flash_t *uf; char msg[RTAS_MSG_MAXLEN]; - int msglen; - uf = (struct rtas_update_flash_t *) dp->data; + uf = dp->data; if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) { get_flash_status_msg(uf->status, msg); } else { /* FIRMWARE_UPDATE_NAME */ sprintf(msg, "%d\n", uf->status); } - msglen = strlen(msg); - if (msglen > count) - msglen = count; - - if (ppos && *ppos != 0) - return 0; /* be cheap */ - - if (!access_ok(VERIFY_WRITE, buf, msglen)) - return -EINVAL; - if (copy_to_user(buf, msg, msglen)) - return -EFAULT; - - if (ppos) - *ppos = msglen; - return msglen; + return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg)); } /* constructor for flash_block_cache */ @@ -394,26 +379,13 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf, char msg[RTAS_MSG_MAXLEN]; int msglen; - args_buf = (struct rtas_manage_flash_t *) dp->data; + args_buf = dp->data; if (args_buf == NULL) return 0; msglen = sprintf(msg, "%d\n", args_buf->status); - if (msglen > count) - msglen = count; - if (ppos && *ppos != 0) - return 0; /* be cheap */ - - if (!access_ok(VERIFY_WRITE, buf, msglen)) - return -EINVAL; - - if (copy_to_user(buf, msg, msglen)) - return -EFAULT; - - if (ppos) - *ppos = msglen; - return msglen; + return simple_read_from_buffer(buf, count, ppos, msg, msglen); } static ssize_t manage_flash_write(struct file *file, const char __user *buf, @@ -495,24 +467,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf, char msg[RTAS_MSG_MAXLEN]; int msglen; - args_buf = (struct rtas_validate_flash_t *) dp->data; + args_buf = dp->data; - if (ppos && *ppos != 0) - return 0; /* be cheap */ - msglen = get_validate_flash_msg(args_buf, msg); - if (msglen > count) - msglen = count; - - if (!access_ok(VERIFY_WRITE, buf, msglen)) - return -EINVAL; - - if (copy_to_user(buf, msg, msglen)) - return -EFAULT; - if (ppos) - *ppos = msglen; - return msglen; + return simple_read_from_buffer(buf, count, ppos, msg, msglen); } static ssize_t validate_flash_write(struct file *file, const char __user *buf, diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 0438f819fe6..67f6c3b5135 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -160,7 +160,7 @@ static int log_rtas_len(char * buf) /* rtas fixed header */ len = 8; err = (struct rtas_error_log *)buf; - if (err->extended_log_length) { + if (err->extended && err->extended_log_length) { /* extended header */ len += err->extended_log_length; @@ -412,7 +412,8 @@ static void rtas_event_scan(struct work_struct *w) get_online_cpus(); - cpu = cpumask_next(smp_processor_id(), cpu_online_mask); + /* raw_ OK because just using CPU as starting point. */ + cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); if (cpu >= nr_cpu_ids) { cpu = cpumask_first(cpu_online_mask); @@ -464,7 +465,7 @@ static void start_event_scan(void) pr_debug("rtasd: will sleep for %d milliseconds\n", (30000 / rtas_event_scan_rate)); - /* Retreive errors from nvram if any */ + /* Retrieve errors from nvram if any */ retreive_nvram_error_log(); schedule_delayed_work_on(cpumask_first(cpu_online_mask), diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d4882a4664..21f30cb6807 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void) */ cpu_init_thread_core_maps(nthreads); + /* Now that possible cpus are set, set nr_cpu_ids for later use */ + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; + free_unused_pacas(); } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 98136050917..cbdbb14be4b 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -57,6 +57,25 @@ #define DBG(fmt...) #endif + +/* Store all idle threads, this can be reused instead of creating +* a new thread. Also avoids complicated thread destroy functionality +* for idle threads. +*/ +#ifdef CONFIG_HOTPLUG_CPU +/* + * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is + * removed after init for !CONFIG_HOTPLUG_CPU. + */ +static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); +#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) +#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) +#else +static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; +#define get_idle_for_cpu(x) (idle_thread_array[(x)]) +#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) +#endif + struct thread_info *secondary_ti; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); @@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id) per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); } -static void __init smp_create_idle(unsigned int cpu) -{ - struct task_struct *p; - - /* create a process for the processor */ - p = fork_idle(cpu); - if (IS_ERR(p)) - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); -#ifdef CONFIG_PPC64 - paca[cpu].__current = p; - paca[cpu].kstack = (unsigned long) task_thread_info(p) - + THREAD_SIZE - STACK_FRAME_OVERHEAD; -#endif - current_set[cpu] = task_thread_info(p); - task_thread_info(p)->cpu = cpu; -} - void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; @@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = NR_CPUS; else max_cpus = 1; - - for_each_possible_cpu(cpu) - if (cpu != boot_cpuid) - smp_create_idle(cpu); } void __devinit smp_prepare_boot_cpu(void) @@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void) #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU during hotplug phases */ -DEFINE_PER_CPU(int, cpu_state) = { 0 }; +static DEFINE_PER_CPU(int, cpu_state) = { 0 }; int generic_cpu_disable(void) { @@ -317,30 +315,8 @@ int generic_cpu_disable(void) set_cpu_online(cpu, false); #ifdef CONFIG_PPC64 vdso_data->processorCount--; - fixup_irqs(cpu_online_mask); -#endif - return 0; -} - -int generic_cpu_enable(unsigned int cpu) -{ - /* Do the normal bootup if we haven't - * already bootstrapped. */ - if (system_state != SYSTEM_RUNNING) - return -ENOSYS; - - /* get the target out of it's holding state */ - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - smp_wmb(); - - while (!cpu_online(cpu)) - cpu_relax(); - -#ifdef CONFIG_PPC64 - fixup_irqs(cpu_online_mask); - /* counter the irq disable in fixup_irqs */ - local_irq_enable(); #endif + migrate_irqs(); return 0; } @@ -362,37 +338,89 @@ void generic_mach_cpu_die(void) unsigned int cpu; local_irq_disable(); + idle_task_exit(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); - set_cpu_online(cpu, true); - local_irq_enable(); +} + +void generic_set_cpu_dead(unsigned int cpu) +{ + per_cpu(cpu_state, cpu) = CPU_DEAD; } #endif -static int __devinit cpu_enable(unsigned int cpu) +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) { - if (smp_ops && smp_ops->cpu_enable) - return smp_ops->cpu_enable(cpu); + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + +static int __cpuinit create_idle(unsigned int cpu) +{ + struct thread_info *ti; + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + + c_idle.idle = get_idle_for_cpu(cpu); + + /* We can't use kernel_thread since we must avoid to + * reschedule the child. We use a workqueue because + * we want to fork from a kernel thread, not whatever + * userspace process happens to be trying to online us. + */ + if (!c_idle.idle) { + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + } else + init_idle(c_idle.idle, cpu); + if (IS_ERR(c_idle.idle)) { + pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); + return PTR_ERR(c_idle.idle); + } + ti = task_thread_info(c_idle.idle); + +#ifdef CONFIG_PPC64 + paca[cpu].__current = c_idle.idle; + paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; +#endif + ti->cpu = cpu; + current_set[cpu] = ti; - return -ENOSYS; + return 0; } int __cpuinit __cpu_up(unsigned int cpu) { - int c; + int rc, c; secondary_ti = current_set[cpu]; - if (!cpu_enable(cpu)) - return 0; if (smp_ops == NULL || (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) return -EINVAL; + /* Make sure we have an idle thread */ + rc = create_idle(cpu); + if (rc) + return rc; + /* Make sure callin-map entry is 0 (can be leftover a CPU * hotplug */ @@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu) } /* Activate a secondary processor. */ -int __devinit start_secondary(void *unused) +void __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; @@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused) secondary_cpu_time_init(); +#ifdef CONFIG_PPC64 + if (system_state == SYSTEM_RUNNING) + vdso_data->processorCount++; +#endif ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); @@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused) local_irq_enable(); cpu_idle(); - return 0; + + BUG(); } int setup_profiling_timer(unsigned int multiplier) @@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus) free_cpumask_var(old_mask); + if (smp_ops && smp_ops->bringup_done) + smp_ops->bringup_done(); + dump_numa_cpu_topology(); + } int arch_sd_sibling_asym_packing(void) @@ -660,5 +697,9 @@ void cpu_die(void) { if (ppc_md.cpu_die) ppc_md.cpu_die(); + + /* If we return, we re-enter start_secondary */ + start_secondary_resume(); } + #endif diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index b0754e23743..ba4dee3d233 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Disable MSR:DR to make sure we don't take a TLB or * hash miss during the copy, as our hash table will - * for a while be unuseable. For .text, we assume we are + * for a while be unusable. For .text, we assume we are * covered by a BAT. This works only for non-G5 at this * point. G5 will need a better approach, possibly using * a small temporary hash table filled with large mappings, diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 09e4dea4a85..f33acfd872a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb) u64 stolen = 0; u64 dtb; + if (!dtl) + return 0; + if (i == vpa->dtl_idx) return 0; while (i < vpa->dtl_idx) { @@ -265,11 +268,26 @@ void accumulate_stolen_time(void) { u64 sst, ust; - sst = scan_dispatch_log(get_paca()->starttime_user); - ust = scan_dispatch_log(get_paca()->starttime); - get_paca()->system_time -= sst; - get_paca()->user_time -= ust; - get_paca()->stolen_time += ust + sst; + u8 save_soft_enabled = local_paca->soft_enabled; + u8 save_hard_enabled = local_paca->hard_enabled; + + /* We are called early in the exception entry, before + * soft/hard_enabled are sync'ed to the expected state + * for the exception. We are hard disabled but the PACA + * needs to reflect that so various debug stuff doesn't + * complain + */ + local_paca->soft_enabled = 0; + local_paca->hard_enabled = 0; + + sst = scan_dispatch_log(local_paca->starttime_user); + ust = scan_dispatch_log(local_paca->starttime); + local_paca->system_time -= sst; + local_paca->user_time -= ust; + local_paca->stolen_time += ust + sst; + + local_paca->soft_enabled = save_soft_enabled; + local_paca->hard_enabled = save_hard_enabled; } static inline u64 calculate_stolen_time(u64 stop_tb) @@ -341,7 +359,7 @@ void account_system_vtime(struct task_struct *tsk) } get_paca()->user_time_scaled += user_scaled; - if (in_irq() || idle_task(smp_processor_id()) != tsk) { + if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { account_system_time(tsk, 0, delta, sys_scaled); if (stolen) account_steal_time(stolen); @@ -562,14 +580,21 @@ void timer_interrupt(struct pt_regs * regs) struct clock_event_device *evt = &decrementer->event; u64 now; + /* Ensure a positive value is written to the decrementer, or else + * some CPUs will continue to take decrementer exceptions. + */ + set_dec(DECREMENTER_MAX); + + /* Some implementations of hotplug will get timer interrupts while + * offline, just ignore these + */ + if (!cpu_online(smp_processor_id())) + return; + trace_timer_interrupt_entry(regs); __get_cpu_var(irq_stat).timer_irqs++; - /* Ensure a positive value is written to the decrementer, or else - * some CPUs will continuue to take decrementer exceptions */ - set_dec(DECREMENTER_MAX); - #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 1b2cdc8eec9..5ddb801bc15 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -626,12 +626,6 @@ void machine_check_exception(struct pt_regs *regs) if (recover > 0) return; - if (user_mode(regs)) { - regs->msr |= MSR_RI; - _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); - return; - } - #if defined(CONFIG_8xx) && defined(CONFIG_PCI) /* the qspan pci read routines can cause machine checks -- Cort * @@ -643,16 +637,12 @@ void machine_check_exception(struct pt_regs *regs) return; #endif - if (debugger_fault_handler(regs)) { - regs->msr |= MSR_RI; + if (debugger_fault_handler(regs)) return; - } if (check_io_access(regs)) return; - if (debugger_fault_handler(regs)) - return; die("Machine check", regs, SIGBUS); /* Must die if the interrupt is not recoverable */ @@ -969,7 +959,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) * ESR_DST (!?) or 0. In the process of chasing this with the * hardware people - not sure if it can happen on any illegal * instruction or only on FP instructions, whether there is a - * pattern to occurences etc. -dgibson 31/Mar/2003 */ + * pattern to occurrences etc. -dgibson 31/Mar/2003 */ switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index b4b167b3364..baa33a7517b 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -1,5 +1,5 @@ /* - * udbg for NS16550 compatable serial ports + * udbg for NS16550 compatible serial ports * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index fd8728729ab..142ab1008c3 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -820,17 +820,17 @@ static int __init vdso_init(void) } arch_initcall(vdso_init); -int in_gate_area_no_task(unsigned long addr) +int in_gate_area_no_mm(unsigned long addr) { return 0; } -int in_gate_area(struct task_struct *task, unsigned long addr) +int in_gate_area(struct mm_struct *mm, unsigned long addr) { return 0; } -struct vm_area_struct *get_gate_vma(struct task_struct *tsk) +struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return NULL; } diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S index 68d49dd71dc..cf0c9c9c24f 100644 --- a/arch/powerpc/kernel/vdso32/sigtramp.S +++ b/arch/powerpc/kernel/vdso32/sigtramp.S @@ -19,7 +19,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by adding a nop before the real start. */ nop diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 59eb59bb408..45ea281e9a2 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S @@ -20,7 +20,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by padding before the real start. */ nop diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 8a0deefac08..b9150f07d26 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -160,7 +160,7 @@ SECTIONS INIT_RAM_FS } - PERCPU(PAGE_SIZE) + PERCPU(L1_CACHE_BYTES, PAGE_SIZE) . = ALIGN(8); .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { |