diff options
Diffstat (limited to 'arch/powerpc')
85 files changed, 372 insertions, 317 deletions
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts index 22f64b62d7f..e0668f87779 100644 --- a/arch/powerpc/boot/dts/p1020rdb.dts +++ b/arch/powerpc/boot/dts/p1020rdb.dts @@ -1,7 +1,7 @@ /* * P1020 RDB Device Tree Source * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -553,7 +553,7 @@ reg = <0 0xffe09000 0 0x1000>; bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <16 2>; @@ -580,8 +580,8 @@ #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; bus-range = <0 255>; - ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <16 2>; @@ -590,8 +590,8 @@ #size-cells = <2>; #address-cells = <3>; device_type = "pci"; - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x0 diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts index da4cb0d8d21..e2d48fd4416 100644 --- a/arch/powerpc/boot/dts/p2020rdb.dts +++ b/arch/powerpc/boot/dts/p2020rdb.dts @@ -1,7 +1,7 @@ /* * P2020 RDB Device Tree Source * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -537,7 +537,7 @@ reg = <0 0xffe09000 0 0x1000>; bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <25 2>; @@ -564,8 +564,8 @@ #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; bus-range = <0 255>; - ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <26 2>; @@ -574,8 +574,8 @@ #size-cells = <2>; #address-cells = <3>; device_type = "pci"; - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x0 diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts index 0fe93d0c8b2..b69c3a5dc85 100644 --- a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts +++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts @@ -6,7 +6,7 @@ * This dts file allows core0 to have memory, l2, i2c, spi, gpio, dma1, usb, * eth1, eth2, sdhc, crypto, global-util, pci0. * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -342,7 +342,7 @@ reg = <0 0xffe09000 0 0x1000>; bus-range = <0 255>; ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; + 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <25 2>; diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts index e95a5128532..7a31d46c01b 100644 --- a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts +++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts @@ -7,7 +7,7 @@ * * Please note to add "-b 1" for core1's dts compiling. * - * Copyright 2009 Freescale Semiconductor Inc. + * Copyright 2009-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -162,8 +162,8 @@ #address-cells = <3>; reg = <0 0xffe0a000 0 0x1000>; bus-range = <0 255>; - ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 - 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; + ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 + 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; clock-frequency = <33333333>; interrupt-parent = <&mpic>; interrupts = <26 2>; @@ -172,8 +172,8 @@ #size-cells = <2>; #address-cells = <3>; device_type = "pci"; - ranges = <0x2000000 0x0 0xc0000000 - 0x2000000 0x0 0xc0000000 + ranges = <0x2000000 0x0 0x80000000 + 0x2000000 0x0 0x80000000 0x0 0x20000000 0x1000000 0x0 0x0 diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 2e561876fc8..f18c6d9b951 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -209,8 +209,8 @@ static __inline__ unsigned long ffz(unsigned long x) return BITS_PER_LONG; /* - * Calculate the bit position of the least signficant '1' bit in x - * (since x has been changed this will actually be the least signficant + * Calculate the bit position of the least significant '1' bit in x + * (since x has been changed this will actually be the least significant * '0' bit in * the original x). Note: (x & -x) gives us a mask that * is the least significant * (RIGHT-most) 1-bit of the value in x. */ diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 2296112e247..91010e8f847 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -140,7 +140,7 @@ static inline void __user *arch_compat_alloc_user_space(long len) unsigned long usp = regs->gpr[1]; /* - * We cant access below the stack pointer in the 32bit ABI and + * We can't access below the stack pointer in the 32bit ABI and * can access 288 bytes in the 64bit ABI */ if (!is_32bit_task()) diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h index e50323fe941..4398a6cdcf5 100644 --- a/arch/powerpc/include/asm/cpm.h +++ b/arch/powerpc/include/asm/cpm.h @@ -98,7 +98,7 @@ typedef struct cpm_buf_desc { #define BD_SC_INTRPT (0x1000) /* Interrupt on change */ #define BD_SC_LAST (0x0800) /* Last buffer in frame */ #define BD_SC_TC (0x0400) /* Transmit CRC */ -#define BD_SC_CM (0x0200) /* Continous mode */ +#define BD_SC_CM (0x0200) /* Continuous mode */ #define BD_SC_ID (0x0100) /* Rec'd too many idles */ #define BD_SC_P (0x0100) /* xmt preamble */ #define BD_SC_BR (0x0020) /* Break received */ diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index bd07650dca5..8ee4211ca0c 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -4,7 +4,7 @@ * * This file contains structures and information for the communication * processor channels. Some CPM control and status is available - * throught the MPC8xx internal memory map. See immap.h for details. + * through the MPC8xx internal memory map. See immap.h for details. * This file only contains what I need for the moment, not the total * CPM capabilities. I (or someone else) will add definitions as they * are needed. -- Dan diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index ec089acfa56..8edec710cc6 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -122,7 +122,7 @@ #define H_DABRX_KERNEL (1UL<<(63-62)) #define H_DABRX_USER (1UL<<(63-63)) -/* Each control block has to be on a 4K bondary */ +/* Each control block has to be on a 4K boundary */ #define H_CB_ALIGNMENT 4096 /* pSeries hypervisor opcodes */ diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index d0e7701fa1f..be0171afdc0 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -50,7 +50,7 @@ typedef unsigned int kprobe_opcode_t; * Handle cases where: * - User passes a <.symbol> or <module:.symbol> * - User passes a <symbol> or <module:symbol> - * - User passes a non-existant symbol, kallsyms_lookup_name + * - User passes a non-existent symbol, kallsyms_lookup_name * returns 0. Don't deref the NULL pointer in that case */ #define kprobe_lookup_name(name, addr) \ diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 26b8c807f8f..a077adc0b35 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -105,7 +105,7 @@ struct lppaca { // processing of external interrupts. Note that PLIC will store the // XIRR directly into the xXirrValue field so that another XIRR will // not be presented until this one clears. The layout of the low - // 4-bytes of this Dword is upto SLIC - PLIC just checks whether the + // 4-bytes of this Dword is up to SLIC - PLIC just checks whether the // entire Dword is zero or not. A non-zero value in the low order // 2-bytes will result in SLIC being granted the highest thread // priority upon return. A 0 will return to SLIC as medium priority. diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index fe56a23e1ff..e4f01915fbb 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -35,9 +35,9 @@ struct smp_ops_t { int (*probe)(void); void (*kick_cpu)(int nr); void (*setup_cpu)(int nr); + void (*bringup_done)(void); void (*take_timebase)(void); void (*give_timebase)(void); - int (*cpu_enable)(unsigned int nr); int (*cpu_disable)(void); void (*cpu_die)(unsigned int nr); int (*cpu_bootable)(unsigned int nr); @@ -267,7 +267,6 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); -extern void power4_cpu_offline_powersave(void); extern void ppc6xx_idle(void); extern void book3e_idle(void); diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 932f88dcf6f..812b2cd80ae 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -169,7 +169,7 @@ do { \ /* * This is the default if a program doesn't have a PT_GNU_STACK * program header entry. The PPC64 ELF ABI has a non executable stack - * stack by default, so in the absense of a PT_GNU_STACK program header + * stack by default, so in the absence of a PT_GNU_STACK program header * we turn execute permission off. */ #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ diff --git a/arch/powerpc/include/asm/pasemi_dma.h b/arch/powerpc/include/asm/pasemi_dma.h index 19fd7933e2d..eafa5a5f56d 100644 --- a/arch/powerpc/include/asm/pasemi_dma.h +++ b/arch/powerpc/include/asm/pasemi_dma.h @@ -522,7 +522,7 @@ extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size, extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, dma_addr_t *handle); -/* Routines to allocate flags (events) for channel syncronization */ +/* Routines to allocate flags (events) for channel synchronization */ extern int pasemi_dma_alloc_flag(void); extern void pasemi_dma_free_flag(int flag); extern void pasemi_dma_set_flag(int flag); diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 5e156e034fe..b90dbf8e5cd 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -106,7 +106,7 @@ struct pci_controller { * Used for variants of PCI indirect handling and possible quirks: * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1 * EXT_REG - provides access to PCI-e extended registers - * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS + * SURPRESS_PRIMARY_BUS - we suppress the setting of PCI_PRIMARY_BUS * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS * to determine which bus number to match on when generating type0 * config cycles diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h index 00eedc5a4e6..10902c9375d 100644 --- a/arch/powerpc/include/asm/pmac_feature.h +++ b/arch/powerpc/include/asm/pmac_feature.h @@ -53,8 +53,8 @@ /* Here is the infamous serie of OHare based machines */ -#define PMAC_TYPE_COMET 0x20 /* Beleived to be PowerBook 2400 */ -#define PMAC_TYPE_HOOPER 0x21 /* Beleived to be PowerBook 3400 */ +#define PMAC_TYPE_COMET 0x20 /* Believed to be PowerBook 2400 */ +#define PMAC_TYPE_HOOPER 0x21 /* Believed to be PowerBook 3400 */ #define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */ #define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */ #define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */ diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 76bb195e4f2..811f04ac366 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -86,7 +86,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) #endif -/* _PAGE_CHG_MASK masks of bits that are to be preserved accross +/* _PAGE_CHG_MASK masks of bits that are to be preserved across * pgprot changes */ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ @@ -174,7 +174,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); /* * Don't just check for any non zero bits in __PAGE_USER, since for book3e * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in - * _PAGE_USER. Need to explictly match _PAGE_BAP_UR bit in that case too. + * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. */ #define pte_user(val) ((val & _PAGE_USER) == _PAGE_USER) diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 3b1a9b70736..b316794aa2b 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -2,7 +2,7 @@ * Contains register definitions common to the Book E PowerPC * specification. Notice that while the IBM-40x series of CPUs * are not true Book E PowerPCs, they borrowed a number of features - * before Book E was finalized, and are included here as well. Unfortunatly, + * before Book E was finalized, and are included here as well. Unfortunately, * they sometimes used different locations than true Book E CPUs did. * * This program is free software; you can redistribute it and/or diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 66e237bbe15..a902a0d3ae0 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -36,15 +36,16 @@ extern void cpu_die(void); extern void smp_send_debugger_break(int cpu); extern void smp_message_recv(int); +extern void start_secondary_resume(void); DECLARE_PER_CPU(unsigned int, cpu_pvr); #ifdef CONFIG_HOTPLUG_CPU -extern void fixup_irqs(const struct cpumask *map); +extern void migrate_irqs(void); int generic_cpu_disable(void); -int generic_cpu_enable(unsigned int cpu); void generic_cpu_die(unsigned int cpu); void generic_mach_cpu_die(void); +void generic_set_cpu_dead(unsigned int cpu); #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h index 25020a34ce7..d8f5c60f61c 100644 --- a/arch/powerpc/include/asm/spu_priv1.h +++ b/arch/powerpc/include/asm/spu_priv1.h @@ -223,7 +223,7 @@ spu_disable_spu (struct spu_context *ctx) } /* - * The declarations folowing are put here for convenience + * The declarations following are put here for convenience * and only intended to be used by the platform setup code. */ diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h index f737732c386..ae9c899c8a6 100644 --- a/arch/powerpc/include/asm/uninorth.h +++ b/arch/powerpc/include/asm/uninorth.h @@ -60,7 +60,7 @@ * * Obviously, the GART is not cache coherent and so any change to it * must be flushed to memory (or maybe just make the GART space non - * cachable). AGP memory itself doens't seem to be cache coherent neither. + * cachable). AGP memory itself does't seem to be cache coherent neither. * * In order to invalidate the GART (which is probably necessary to inval * the bridge internal TLBs), the following sequence has to be written, diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index 25e39220e89..b73a8199f16 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h @@ -57,7 +57,7 @@ struct vdso_data { } version; /* Note about the platform flags: it now only contains the lpar - * bit. The actual platform number is dead and burried + * bit. The actual platform number is dead and buried */ __u32 platform; /* Platform flags 0x18 */ __u32 processor; /* Processor type 0x1C */ diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 625942ae558..60b3e377b1e 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void) /* This function can be used to enable the early boot text when doing * OF booting or within bootx init. It must be followed by a btext_unmap() - * call before the logical address becomes unuseable + * call before the logical address becomes unusable */ void __init btext_setup_display(int width, int height, int depth, int pitch, unsigned long address) diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 3d569e2aff1..3d3d416339d 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu) } /* wait for all the CPUs to hit real mode but timeout if they don't come in */ -#ifdef CONFIG_PPC_STD_MMU_64 +#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) static void crash_kexec_wait_realmode(int cpu) { unsigned int msecs; @@ -188,6 +188,8 @@ static void crash_kexec_wait_realmode(int cpu) } mb(); } +#else +static inline void crash_kexec_wait_realmode(int cpu) {} #endif /* @@ -344,9 +346,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs) crash_save_cpu(regs, crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu); cpu_set(crashing_cpu, cpus_in_crash); -#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) crash_kexec_wait_realmode(crashing_cpu); -#endif machine_kexec_mask_interrupts(); diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 5c43063d250..9651acc3504 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -379,7 +379,7 @@ interrupt_end_book3e: mfspr r13,SPRN_SPRG_PACA /* get our PACA */ b system_call_common -/* Auxillary Processor Unavailable Interrupt */ +/* Auxiliary Processor Unavailable Interrupt */ START_EXCEPTION(ap_unavailable); NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c532cb2c927..aeb739e1876 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -5,7 +5,7 @@ * handling and other fixed offset specific things. * * This file is meant to be #included from head_64.S due to - * position dependant assembly. + * position dependent assembly. * * Most of this originates from head_64.S and thus has the same * copyright history. diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 98c4b29a56f..c5c24beb838 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -890,6 +890,15 @@ __secondary_start: mtspr SPRN_SRR1,r4 SYNC RFI + +_GLOBAL(start_secondary_resume) + /* Reset stack */ + rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ + addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD + li r3,0 + std r3,0(r1) /* Zero the stack frame pointer */ + bl start_secondary + b . #endif /* CONFIG_SMP */ #ifdef CONFIG_KVM_BOOK3S_HANDLER diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 9dd21a8c4d5..a91626d87fc 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -766,7 +766,7 @@ DataAccess: * miss get to this point to load the TLB. * r10 - TLB_TAG value * r11 - Linux PTE - * r12, r9 - avilable to use + * r12, r9 - available to use * PID - loaded with proper value when we get here * Upon exit, we reload everything and RFI. * Actually, it will fit now, but oh well.....a common place diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index cbb3436b592..5e12b741ba5 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -178,7 +178,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 782f23df7c8..3a319f9c9d3 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -40,7 +40,7 @@ #include <asm/kvm_book3s_asm.h> #include <asm/ptrace.h> -/* The physical memory is layed out such that the secondary processor +/* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow * using the layout described in exceptions-64s.S */ @@ -536,6 +536,13 @@ _GLOBAL(pmac_secondary_start) add r13,r13,r4 /* for this processor. */ mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ + /* Mark interrupts soft and hard disabled (they might be enabled + * in the PACA when doing hotplug) + */ + li r0,0 + stb r0,PACASOFTIRQEN(r13) + stb r0,PACAHARDIRQEN(r13) + /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 3e02710d956..5ecf54cfa7d 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -326,7 +326,7 @@ interrupt_base: NORMAL_EXCEPTION_PROLOG EXC_XFER_EE_LITE(0x0c00, DoSyscall) - /* Auxillary Processor Unavailable Interrupt */ + /* Auxiliary Processor Unavailable Interrupt */ EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) /* Decrementer Interrupt */ diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 5328709eeed..ba319547860 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) isync b 1b -_GLOBAL(power4_cpu_offline_powersave) - /* Go to NAP now */ - mfmsr r7 - rldicl r0,r7,48,1 - rotldi r0,r0,16 - mtmsrd r0,1 /* hard-disable interrupts */ - li r0,1 - li r6,0 - stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ - stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ -BEGIN_FTR_SECTION - DSSALL - sync -END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - ori r7,r7,MSR_EE - oris r7,r7,MSR_POW@h - sync - isync - mtmsrd r7 - isync - blr diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 63625e0650b..f621b7d2d86 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -246,12 +246,13 @@ u64 arch_irq_stat_cpu(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU -void fixup_irqs(const struct cpumask *map) +void migrate_irqs(void) { struct irq_desc *desc; unsigned int irq; static int warned; cpumask_var_t mask; + const struct cpumask *map = cpu_online_mask; alloc_cpumask_var(&mask, GFP_KERNEL); diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 2a2f3c3f6d8..97ec8557f97 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S @@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /**** Might be a good idea to set L2DO here - to prevent instructions from getting into the cache. But since we invalidate the next time we enable the cache it doesn't really matter. - Don't do this unless you accomodate all processor variations. + Don't do this unless you accommodate all processor variations. The bit moved on the 7450..... ****/ diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 16468362ad5..301db65f05a 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -262,7 +262,7 @@ static void parse_ppp_data(struct seq_file *m) seq_printf(m, "system_active_processors=%d\n", ppp_data.active_system_procs); - /* pool related entries are apropriate for shared configs */ + /* pool related entries are appropriate for shared configs */ if (lppaca_of(0).shared_proc) { unsigned long pool_idle_time, pool_procs; diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index f4adf89d761..10f0aadee95 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -203,7 +203,7 @@ void __init free_unused_pacas(void) { int new_size; - new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); + new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); if (new_size >= paca_size) return; diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 97e0ae41494..c4063b7f49a 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -759,7 +759,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) /* * If group events scheduling transaction was started, - * skip the schedulability test here, it will be peformed + * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index e83ba3f078e..1b1787d5289 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S @@ -15,7 +15,7 @@ /* * Grab the register values as they are now. - * This won't do a particularily good job because we really + * This won't do a particularly good job because we really * want our caller's caller's registers, and our caller has * already executed its prologue. * ToDo: We could reach back into the caller's save area to do diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 05b7139d6a2..e74fa12afc8 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -683,7 +683,7 @@ void __init early_init_devtree(void *params) #endif #ifdef CONFIG_PHYP_DUMP - /* scan tree to see if dump occured during last boot */ + /* scan tree to see if dump occurred during last boot */ of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); #endif @@ -739,7 +739,7 @@ void __init early_init_devtree(void *params) DBG("Scanning CPUs ...\n"); - /* Retreive CPU related informations from the flat tree + /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ of_scan_flat_dt(early_init_dt_scan_cpus, NULL); diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 895b082f1e4..55613e33e26 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -463,7 +463,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, #ifdef CONFIG_VSX /* * Currently to set and and get all the vsx state, you need to call - * the fp and VMX calls aswell. This only get/sets the lower 32 + * the fp and VMX calls as well. This only get/sets the lower 32 * 128bit VSX registers. */ diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 7980ec0e1e1..67f6c3b5135 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -465,7 +465,7 @@ static void start_event_scan(void) pr_debug("rtasd: will sleep for %d milliseconds\n", (30000 / rtas_event_scan_rate)); - /* Retreive errors from nvram if any */ + /* Retrieve errors from nvram if any */ retreive_nvram_error_log(); schedule_delayed_work_on(cpumask_first(cpu_online_mask), diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d4882a4664..21f30cb6807 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void) */ cpu_init_thread_core_maps(nthreads); + /* Now that possible cpus are set, set nr_cpu_ids for later use */ + nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; + free_unused_pacas(); } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 98136050917..cbdbb14be4b 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -57,6 +57,25 @@ #define DBG(fmt...) #endif + +/* Store all idle threads, this can be reused instead of creating +* a new thread. Also avoids complicated thread destroy functionality +* for idle threads. +*/ +#ifdef CONFIG_HOTPLUG_CPU +/* + * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is + * removed after init for !CONFIG_HOTPLUG_CPU. + */ +static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); +#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) +#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) +#else +static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; +#define get_idle_for_cpu(x) (idle_thread_array[(x)]) +#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) +#endif + struct thread_info *secondary_ti; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); @@ -238,23 +257,6 @@ static void __devinit smp_store_cpu_info(int id) per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); } -static void __init smp_create_idle(unsigned int cpu) -{ - struct task_struct *p; - - /* create a process for the processor */ - p = fork_idle(cpu); - if (IS_ERR(p)) - panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); -#ifdef CONFIG_PPC64 - paca[cpu].__current = p; - paca[cpu].kstack = (unsigned long) task_thread_info(p) - + THREAD_SIZE - STACK_FRAME_OVERHEAD; -#endif - current_set[cpu] = task_thread_info(p); - task_thread_info(p)->cpu = cpu; -} - void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; @@ -288,10 +290,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) max_cpus = NR_CPUS; else max_cpus = 1; - - for_each_possible_cpu(cpu) - if (cpu != boot_cpuid) - smp_create_idle(cpu); } void __devinit smp_prepare_boot_cpu(void) @@ -305,7 +303,7 @@ void __devinit smp_prepare_boot_cpu(void) #ifdef CONFIG_HOTPLUG_CPU /* State of each CPU during hotplug phases */ -DEFINE_PER_CPU(int, cpu_state) = { 0 }; +static DEFINE_PER_CPU(int, cpu_state) = { 0 }; int generic_cpu_disable(void) { @@ -317,30 +315,8 @@ int generic_cpu_disable(void) set_cpu_online(cpu, false); #ifdef CONFIG_PPC64 vdso_data->processorCount--; - fixup_irqs(cpu_online_mask); -#endif - return 0; -} - -int generic_cpu_enable(unsigned int cpu) -{ - /* Do the normal bootup if we haven't - * already bootstrapped. */ - if (system_state != SYSTEM_RUNNING) - return -ENOSYS; - - /* get the target out of it's holding state */ - per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; - smp_wmb(); - - while (!cpu_online(cpu)) - cpu_relax(); - -#ifdef CONFIG_PPC64 - fixup_irqs(cpu_online_mask); - /* counter the irq disable in fixup_irqs */ - local_irq_enable(); #endif + migrate_irqs(); return 0; } @@ -362,37 +338,89 @@ void generic_mach_cpu_die(void) unsigned int cpu; local_irq_disable(); + idle_task_exit(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); - set_cpu_online(cpu, true); - local_irq_enable(); +} + +void generic_set_cpu_dead(unsigned int cpu) +{ + per_cpu(cpu_state, cpu) = CPU_DEAD; } #endif -static int __devinit cpu_enable(unsigned int cpu) +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) { - if (smp_ops && smp_ops->cpu_enable) - return smp_ops->cpu_enable(cpu); + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + +static int __cpuinit create_idle(unsigned int cpu) +{ + struct thread_info *ti; + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + + c_idle.idle = get_idle_for_cpu(cpu); + + /* We can't use kernel_thread since we must avoid to + * reschedule the child. We use a workqueue because + * we want to fork from a kernel thread, not whatever + * userspace process happens to be trying to online us. + */ + if (!c_idle.idle) { + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + } else + init_idle(c_idle.idle, cpu); + if (IS_ERR(c_idle.idle)) { + pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); + return PTR_ERR(c_idle.idle); + } + ti = task_thread_info(c_idle.idle); + +#ifdef CONFIG_PPC64 + paca[cpu].__current = c_idle.idle; + paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; +#endif + ti->cpu = cpu; + current_set[cpu] = ti; - return -ENOSYS; + return 0; } int __cpuinit __cpu_up(unsigned int cpu) { - int c; + int rc, c; secondary_ti = current_set[cpu]; - if (!cpu_enable(cpu)) - return 0; if (smp_ops == NULL || (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) return -EINVAL; + /* Make sure we have an idle thread */ + rc = create_idle(cpu); + if (rc) + return rc; + /* Make sure callin-map entry is 0 (can be leftover a CPU * hotplug */ @@ -502,7 +530,7 @@ static struct device_node *cpu_to_l2cache(int cpu) } /* Activate a secondary processor. */ -int __devinit start_secondary(void *unused) +void __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; @@ -523,6 +551,10 @@ int __devinit start_secondary(void *unused) secondary_cpu_time_init(); +#ifdef CONFIG_PPC64 + if (system_state == SYSTEM_RUNNING) + vdso_data->processorCount++; +#endif ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); @@ -558,7 +590,8 @@ int __devinit start_secondary(void *unused) local_irq_enable(); cpu_idle(); - return 0; + + BUG(); } int setup_profiling_timer(unsigned int multiplier) @@ -585,7 +618,11 @@ void __init smp_cpus_done(unsigned int max_cpus) free_cpumask_var(old_mask); + if (smp_ops && smp_ops->bringup_done) + smp_ops->bringup_done(); + dump_numa_cpu_topology(); + } int arch_sd_sibling_asym_packing(void) @@ -660,5 +697,9 @@ void cpu_die(void) { if (ppc_md.cpu_die) ppc_md.cpu_die(); + + /* If we return, we re-enter start_secondary */ + start_secondary_resume(); } + #endif diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index b0754e23743..ba4dee3d233 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S @@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Disable MSR:DR to make sure we don't take a TLB or * hash miss during the copy, as our hash table will - * for a while be unuseable. For .text, we assume we are + * for a while be unusable. For .text, we assume we are * covered by a BAT. This works only for non-G5 at this * point. G5 will need a better approach, possibly using * a small temporary hash table filled with large mappings, diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index aa9269600ca..375480c56eb 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -577,14 +577,21 @@ void timer_interrupt(struct pt_regs * regs) struct clock_event_device *evt = &decrementer->event; u64 now; + /* Ensure a positive value is written to the decrementer, or else + * some CPUs will continue to take decrementer exceptions. + */ + set_dec(DECREMENTER_MAX); + + /* Some implementations of hotplug will get timer interrupts while + * offline, just ignore these + */ + if (!cpu_online(smp_processor_id())) + return; + trace_timer_interrupt_entry(regs); __get_cpu_var(irq_stat).timer_irqs++; - /* Ensure a positive value is written to the decrementer, or else - * some CPUs will continuue to take decrementer exceptions */ - set_dec(DECREMENTER_MAX); - #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bd74fac169b..5ddb801bc15 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -959,7 +959,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) * ESR_DST (!?) or 0. In the process of chasing this with the * hardware people - not sure if it can happen on any illegal * instruction or only on FP instructions, whether there is a - * pattern to occurences etc. -dgibson 31/Mar/2003 */ + * pattern to occurrences etc. -dgibson 31/Mar/2003 */ switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index b4b167b3364..baa33a7517b 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -1,5 +1,5 @@ /* - * udbg for NS16550 compatable serial ports + * udbg for NS16550 compatible serial ports * * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp * diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S index 68d49dd71dc..cf0c9c9c24f 100644 --- a/arch/powerpc/kernel/vdso32/sigtramp.S +++ b/arch/powerpc/kernel/vdso32/sigtramp.S @@ -19,7 +19,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by adding a nop before the real start. */ nop diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 59eb59bb408..45ea281e9a2 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S @@ -20,7 +20,7 @@ /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from the return address to get an address in the middle of the presumed - call instruction. Since we don't have a call here, we artifically + call instruction. Since we don't have a call here, we artificially extend the range covered by the unwind info by padding before the real start. */ nop diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 3079f6b44cf..5b7dd4ea02b 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -192,8 +192,8 @@ htab_insert_pte: rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -215,8 +215,8 @@ _GLOBAL(htab_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -495,8 +495,8 @@ htab_special_pfn: rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -522,8 +522,8 @@ _GLOBAL(htab_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_4K /* page size */ ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -813,8 +813,8 @@ ht64_insert_pte: rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,0 /* !bolted, !secondary */ li r8,MMU_PAGE_64K ld r9,STK_PARM(r9)(r1) /* segment size */ @@ -836,8 +836,8 @@ _GLOBAL(ht64_call_hpte_insert1) rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ /* Call ppc_md.hpte_insert */ - ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ - mr r4,r29 /* Retreive va */ + ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ + mr r4,r29 /* Retrieve va */ li r7,HPTE_V_SECONDARY /* !bolted, secondary */ li r8,MMU_PAGE_64K ld r9,STK_PARM(r9)(r1) /* segment size */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a5991facddc..58a022d0f46 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -753,7 +753,7 @@ void __cpuinit early_init_mmu_secondary(void) mtspr(SPRN_SDR1, _SDR1); /* Initialize STAB/SLB. We use a virtual address as it works - * in real mode on pSeries and we want a virutal address on + * in real mode on pSeries and we want a virtual address on * iSeries anyway */ if (cpu_has_feature(CPU_FTR_SLB)) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index a6649965090..57e545b84bf 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -424,7 +424,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg) clear_page(page); /* - * We shouldnt have to do this, but some versions of glibc + * We shouldn't have to do this, but some versions of glibc * require it (ld.so assumes zero filled pages are icache clean) * - Anton */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 0dc95c0aa3b..5ec1dad2a19 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -440,11 +440,11 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) } /* - * Retreive and validate the ibm,dynamic-memory property of the device tree. + * Retrieve and validate the ibm,dynamic-memory property of the device tree. * * The layout of the ibm,dynamic-memory property is a number N of memblock * list entries followed by N memblock list entries. Each memblock list entry - * contains information as layed out in the of_drconf_cell struct above. + * contains information as laid out in the of_drconf_cell struct above. */ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) { @@ -468,7 +468,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) } /* - * Retreive and validate the ibm,lmb-size property for drconf memory + * Retrieve and validate the ibm,lmb-size property for drconf memory * from the device tree. */ static u64 of_get_lmb_size(struct device_node *memory) @@ -490,7 +490,7 @@ struct assoc_arrays { }; /* - * Retreive and validate the list of associativity arrays for drconf + * Retrieve and validate the list of associativity arrays for drconf * memory from the ibm,associativity-lookup-arrays property of the * device tree.. * @@ -604,7 +604,7 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, * Returns the size the region should have to enforce the memory limit. * This will either be the original value of size, a truncated value, * or zero. If the returned value of size is 0 the region should be - * discarded as it lies wholy above the memory limit. + * discarded as it lies wholly above the memory limit. */ static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size) diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 8526bd9d2aa..af089220941 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -192,7 +192,7 @@ normal_tlb_miss: or r10,r15,r14 BEGIN_MMU_FTR_SECTION - /* Set the TLB reservation and seach for existing entry. Then load + /* Set the TLB reservation and search for existing entry. Then load * the entry. */ PPC_TLBSRX_DOT(0,r16) @@ -425,13 +425,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) virt_page_table_tlb_miss_fault: /* If we fault here, things are a little bit tricky. We need to call - * either data or instruction store fault, and we need to retreive + * either data or instruction store fault, and we need to retrieve * the original fault address and ESR (for data). * * The thing is, we know that in normal circumstances, this is * always called as a second level tlb miss for SW load or as a first * level TLB miss for HW load, so we should be able to peek at the - * relevant informations in the first exception frame in the PACA. + * relevant information in the first exception frame in the PACA. * * However, we do need to double check that, because we may just hit * a stray kernel pointer or a userland attack trying to hit those diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index c4d2b716756..cb515cff745 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -67,7 +67,7 @@ #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ -/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle. +/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle. * To configure counter to send value every N cycles set counter to * 2^32 - 1 - N. */ @@ -1470,7 +1470,7 @@ static int cell_global_start(struct op_counter_config *ctr) * trace buffer at the maximum rate possible. The trace buffer is configured * to store the PCs, wrapping when it is full. The performance counter is * initialized to the max hardware count minus the number of events, N, between - * samples. Once the N events have occured, a HW counter overflow occurs + * samples. Once the N events have occurred, a HW counter overflow occurs * causing the generation of a HW counter interrupt which also stops the * writing of the SPU PC values to the trace buffer. Hence the last PC * written to the trace buffer is the SPU PC that we want. Unfortunately, @@ -1656,7 +1656,7 @@ static void cell_handle_interrupt_ppu(struct pt_regs *regs, * The counters were frozen by the interrupt. * Reenable the interrupt and restart the counters. * If there was a race between the interrupt handler and - * the virtual counter routine. The virutal counter + * the virtual counter routine. The virtual counter * routine may have cleared the interrupts. Hence must * use the virt_cntr_inter_mask to re-enable the interrupts. */ diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 80774092db7..8ee51a252cf 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -207,7 +207,7 @@ static unsigned long get_pc(struct pt_regs *regs) unsigned long mmcra; unsigned long slot; - /* Cant do much about it */ + /* Can't do much about it */ if (!cur_cpu_spec->oprofile_mmcra_sihv) return pc; diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 6385d883cb8..9940ce8a2d4 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -57,7 +57,7 @@ struct mpc52xx_lpbfifo { static struct mpc52xx_lpbfifo lpbfifo; /** - * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered + * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred */ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) { @@ -179,7 +179,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * * On transmit, the dma completion irq triggers before the fifo completion * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm - * task completion irq becuase everyting is not really done until the LPB FIFO + * task completion irq because everything is not really done until the LPB FIFO * completion irq triggers. * * In other words: @@ -195,7 +195,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * Exit conditions: * 1) Transfer aborted * 2) FIFO complete without DMA; more data to do - * 3) FIFO complete without DMA; all data transfered + * 3) FIFO complete without DMA; all data transferred * 4) FIFO complete using DMA * * Condition 1 can occur regardless of whether or not DMA is used. diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 3ddea96273c..1dd15400f6f 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -512,7 +512,7 @@ void __init mpc52xx_init_irq(void) /** * mpc52xx_get_irq - Get pending interrupt number hook function * - * Called by the interupt handler to determine what IRQ handler needs to be + * Called by the interrupt handler to determine what IRQ handler needs to be * executed. * * Status of pending interrupts is determined by reading the encoded status diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 20576829eca..f7b07720aa3 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -46,7 +46,7 @@ config PPC_OF_BOOT_TRAMPOLINE help Support from booting from Open Firmware or yaboot using an Open Firmware client interface. This enables the kernel to - communicate with open firmware to retrieve system informations + communicate with open firmware to retrieve system information such as the device tree. In case of doubt, say Y diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c index 3b894f58528..147069938cf 100644 --- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c +++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c @@ -90,7 +90,7 @@ int spu_alloc_lscsa(struct spu_state *csa) */ for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { /* XXX This is likely to fail, we should use a special pool - * similiar to what hugetlbfs does. + * similar to what hugetlbfs does. */ csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, SPU_64K_PAGE_ORDER); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 0b046628493..65203857b0c 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -846,7 +846,7 @@ static struct spu_context *grab_runnable_context(int prio, int node) struct list_head *rq = &spu_prio->runq[best]; list_for_each_entry(ctx, rq, rq) { - /* XXX(hch): check for affinity here aswell */ + /* XXX(hch): check for affinity here as well */ if (__node_allowed(ctx, node)) { __spu_del_from_rq(ctx); goto found; diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c index 21a9c952d88..72c905f1ee7 100644 --- a/arch/powerpc/platforms/cell/spufs/spu_restore.c +++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c @@ -284,7 +284,7 @@ static inline void restore_complete(void) exit_instrs[3] = BR_INSTR; break; default: - /* SPU_Status[R]=1. No additonal instructions. */ + /* SPU_Status[R]=1. No additional instructions. */ break; } spu_sync(); diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index b5e026bdca2..62dabe3c2bf 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c @@ -51,7 +51,7 @@ static int mf_initialized; /* - * This is the structure layout for the Machine Facilites LPAR event + * This is the structure layout for the Machine Facilities LPAR event * flows. */ struct vsp_cmd_data { diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index b5f05d943a9..2376069cdc1 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c @@ -396,7 +396,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mTargetInst)) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "int msg rcvd, source inst (%d) doesnt match (%d)\n", + "int msg rcvd, source inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mTargetInst, event->xSourceInstanceId); return; @@ -407,7 +407,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mSourceInst)) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "int msg rcvd, target inst (%d) doesnt match (%d)\n", + "int msg rcvd, target inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mSourceInst, event->xTargetInstanceId); return; @@ -418,7 +418,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mSourceInst) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "ack msg rcvd, source inst (%d) doesnt match (%d)\n", + "ack msg rcvd, source inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mSourceInst, event->xSourceInstanceId); return; @@ -428,7 +428,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mTargetInst) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n", + "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mTargetInst, event->xTargetInstanceId); return; diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 09695ae50f9..321a9b3a2d0 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -379,9 +379,9 @@ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, } EXPORT_SYMBOL(pasemi_dma_free_buf); -/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization +/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization * - * Allocates a flag for use with channel syncronization (event descriptors). + * Allocates a flag for use with channel synchronization (event descriptors). * Returns allocated flag (0-63), < 0 on error. */ int pasemi_dma_alloc_flag(void) diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index 50f16939255..ea47df66fee 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -11,7 +11,7 @@ obj-y += pic.o setup.o time.o feature.o pci.o \ obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o -# CONFIG_NVRAM is an arch. independant tristate symbol, for pmac32 we really +# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really # CONFIG_NVRAM=y obj-$(CONFIG_NVRAM:m=y) += nvram.o diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 480567e5fa9..e9c8a607268 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -904,7 +904,7 @@ static void __init smu_i2c_probe(void) printk(KERN_INFO "SMU i2c %s\n", controller->full_name); /* Look for childs, note that they might not be of the right - * type as older device trees mix i2c busses and other thigns + * type as older device trees mix i2c busses and other things * at the same level */ for (busnode = NULL; diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index ab689894270..f33e08d573c 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -299,7 +299,7 @@ static void __init setup_chaos(struct pci_controller *hose, * This function deals with some "special cases" devices. * * 0 -> No special case - * 1 -> Skip the device but act as if the access was successfull + * 1 -> Skip the device but act as if the access was successful * (return 0xff's on reads, eventually, cache config space * accesses in a later version) * -1 -> Hide the device (unsuccessful access) diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index f0bc08f6c1f..20468f49aec 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -33,7 +33,6 @@ extern void pmac_setup_pci_dma(void); extern void pmac_check_ht_link(void); extern void pmac_setup_smp(void); -extern void pmac32_cpu_die(void); extern void low_cpu_die(void) __attribute__((noreturn)); extern int pmac_nvram_init(void); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index d5aceb7fb12..aa45281bd29 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -650,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) return PCI_PROBE_NORMAL; return PCI_PROBE_DEVTREE; } - -#ifdef CONFIG_HOTPLUG_CPU -/* access per cpu vars from generic smp.c */ -DECLARE_PER_CPU(int, cpu_state); - -static void pmac64_cpu_die(void) -{ - /* - * turn off as much as possible, we'll be - * kicked out as this will only be invoked - * on core99 platforms for now ... - */ - - printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; - smp_wmb(); - - /* - * during the path that leads here preemption is disabled, - * reenable it now so that when coming up preempt count is - * zero correctly - */ - preempt_enable(); - - /* - * hard-disable interrupts for the non-NAP case, the NAP code - * needs to re-enable interrupts (but soft-disables them) - */ - hard_irq_disable(); - - while (1) { - /* let's not take timer interrupts too often ... */ - set_dec(0x7fffffff); - - /* should always be true at this point */ - if (cpu_has_feature(CPU_FTR_CAN_NAP)) - power4_cpu_offline_powersave(); - else { - HMT_low(); - HMT_very_low(); - } - } -} -#endif /* CONFIG_HOTPLUG_CPU */ - #endif /* CONFIG_PPC64 */ define_machine(powermac) { @@ -726,15 +681,4 @@ define_machine(powermac) { .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif -#ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_PPC64 - .cpu_die = pmac64_cpu_die, -#endif -#ifdef CONFIG_PPC32 - .cpu_die = pmac32_cpu_die, -#endif -#endif -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) - .cpu_die = generic_mach_cpu_die, -#endif }; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f4f8b..a830c5e8065 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -840,92 +840,149 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) /* Setup openpic */ mpic_setup_this_cpu(); +} - if (cpu_nr == 0) { -#ifdef CONFIG_PPC64 - extern void g5_phy_disable_cpu1(void); +#ifdef CONFIG_HOTPLUG_CPU +static int smp_core99_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int rc; - /* Close i2c bus if it was used for tb sync */ + switch(action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + /* Open i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host) { - pmac_i2c_close(pmac_tb_clock_chip_host); - pmac_tb_clock_chip_host = NULL; + rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); + if (rc) { + pr_err("Failed to open i2c bus for time sync\n"); + return notifier_from_errno(rc); + } } + break; + case CPU_ONLINE: + case CPU_UP_CANCELED: + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); + break; + default: + break; + } + return NOTIFY_OK; +} - /* If we didn't start the second CPU, we must take - * it off the bus - */ - if (of_machine_is_compatible("MacRISC4") && - num_online_cpus() < 2) - g5_phy_disable_cpu1(); -#endif /* CONFIG_PPC64 */ +static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { + .notifier_call = smp_core99_cpu_notify, +}; +#endif /* CONFIG_HOTPLUG_CPU */ + +static void __init smp_core99_bringup_done(void) +{ +#ifdef CONFIG_PPC64 + extern void g5_phy_disable_cpu1(void); + + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); - if (ppc_md.progress) - ppc_md.progress("core99_setup_cpu 0 done", 0x349); + /* If we didn't start the second CPU, we must take + * it off the bus. + */ + if (of_machine_is_compatible("MacRISC4") && + num_online_cpus() < 2) { + set_cpu_present(1, false); + g5_phy_disable_cpu1(); } -} +#endif /* CONFIG_PPC64 */ +#ifdef CONFIG_HOTPLUG_CPU + register_cpu_notifier(&smp_core99_cpu_nb); +#endif + if (ppc_md.progress) + ppc_md.progress("smp_core99_bringup_done", 0x349); +} -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) +#ifdef CONFIG_HOTPLUG_CPU -int smp_core99_cpu_disable(void) +static int smp_core99_cpu_disable(void) { - set_cpu_online(smp_processor_id(), false); + int rc = generic_cpu_disable(); + if (rc) + return rc; - /* XXX reset cpu affinity here */ mpic_cpu_set_priority(0xf); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); - mb(); - udelay(20); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); + return 0; } -static int cpu_dead[NR_CPUS]; +#ifdef CONFIG_PPC32 -void pmac32_cpu_die(void) +static void pmac_cpu_die(void) { + int cpu = smp_processor_id(); + local_irq_disable(); - cpu_dead[smp_processor_id()] = 1; + idle_task_exit(); + pr_debug("CPU%d offline\n", cpu); + generic_set_cpu_dead(cpu); + smp_wmb(); mb(); low_cpu_die(); } -void smp_core99_cpu_die(unsigned int cpu) +#else /* CONFIG_PPC32 */ + +static void pmac_cpu_die(void) { - int timeout; + int cpu = smp_processor_id(); - timeout = 1000; - while (!cpu_dead[cpu]) { - if (--timeout == 0) { - printk("CPU %u refused to die!\n", cpu); - break; - } - msleep(1); + local_irq_disable(); + idle_task_exit(); + + /* + * turn off as much as possible, we'll be + * kicked out as this will only be invoked + * on core99 platforms for now ... + */ + + printk(KERN_INFO "CPU#%d offline\n", cpu); + generic_set_cpu_dead(cpu); + smp_wmb(); + + /* + * Re-enable interrupts. The NAP code needs to enable them + * anyways, do it now so we deal with the case where one already + * happened while soft-disabled. + * We shouldn't get any external interrupts, only decrementer, and the + * decrementer handler is safe for use on offline CPUs + */ + local_irq_enable(); + + while (1) { + /* let's not take timer interrupts too often ... */ + set_dec(0x7fffffff); + + /* Enter NAP mode */ + power4_idle(); } - cpu_dead[cpu] = 0; } -#endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ +#endif /* else CONFIG_PPC32 */ +#endif /* CONFIG_HOTPLUG_CPU */ /* Core99 Macs (dual G4s and G5s) */ struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, + .bringup_done = smp_core99_bringup_done, .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase, .take_timebase = smp_core99_take_timebase, #if defined(CONFIG_HOTPLUG_CPU) -# if defined(CONFIG_PPC32) .cpu_disable = smp_core99_cpu_disable, - .cpu_die = smp_core99_cpu_die, -# endif -# if defined(CONFIG_PPC64) - .cpu_disable = generic_cpu_disable, .cpu_die = generic_cpu_die, - /* intentionally do *NOT* assign cpu_enable, - * the generic code will use kick_cpu then! */ -# endif #endif }; @@ -957,5 +1014,10 @@ void __init pmac_setup_smp(void) smp_ops = &psurge_smp_ops; } #endif /* CONFIG_PPC32 */ + +#ifdef CONFIG_HOTPLUG_CPU + ppc_md.cpu_die = pmac_cpu_die; +#endif } + diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index b74a9230edc..57ceb92b228 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -74,7 +74,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) return NULL; /* The configure connector reported name does not contain a - * preceeding '/', so we allocate a buffer large enough to + * preceding '/', so we allocate a buffer large enough to * prepend this to the full_name. */ name = (char *)ccwa + ccwa->name_offset; diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 3cc4d102b1f..89649173d3a 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -65,7 +65,7 @@ * with EEH. * * Ideally, a PCI device driver, when suspecting that an isolation - * event has occured (e.g. by reading 0xff's), will then ask EEH + * event has occurred (e.g. by reading 0xff's), will then ask EEH * whether this is the case, and then take appropriate steps to * reset the PCI slot, the PCI device, and then resume operations. * However, until that day, the checking is done here, with the diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fd50ccd4bac..ef8c45489e2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -216,7 +216,7 @@ static void pseries_cpu_die(unsigned int cpu) cpu, pcpu, cpu_status); } - /* Isolation and deallocation are definatly done by + /* Isolation and deallocation are definitely done by * drslot_chrp_cpu. If they were not they would be * done here. Change isolate state to Isolate and * change allocation-state to Unusable. diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 154c464cdca..6d5412a18b2 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -272,7 +272,7 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) return tce_ret; } -/* this is compatable with cells for the device tree property */ +/* this is compatible with cells for the device tree property */ struct dynamic_dma_window_prop { __be32 liobn; /* tce table number */ __be64 dma_base; /* address hi,lo */ @@ -976,7 +976,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); /* dev setup for LPAR is a little tricky, since the device tree might - * contain the dma-window properties per-device and not neccesarily + * contain the dma-window properties per-device and not necessarily * for the bus. So we need to search upwards in the tree until we * either hit a dma-window property, OR find a parent with a table * already allocated. @@ -1033,7 +1033,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) /* * the device tree might contain the dma-window properties - * per-device and not neccesarily for the bus. So we need to + * per-device and not necessarily for the bus. So we need to * search upwards in the tree until we either hit a dma-window * property, OR find a parent with a table already allocated. */ diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f480d93..08672d9136a 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h @@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) #endif extern enum cpu_state_vals get_preferred_offline_state(int cpu); -extern int start_secondary(void); -extern void start_secondary_resume(void); #endif diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index c319d04aa79..00072414908 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -378,7 +378,7 @@ static int __init pSeries_init_panel(void) return 0; } -arch_initcall(pSeries_init_panel); +machine_arch_initcall(pseries, pSeries_init_panel); static int pseries_set_dabr(unsigned long dabr) { diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index d6479f9738f..a509c5292a6 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -112,10 +112,10 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca[lcpu].__current)->preempt_count = 0; - +#ifdef CONFIG_HOTPLUG_CPU if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) goto out; - +#endif /* * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. @@ -130,7 +130,9 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) return 0; } +#ifdef CONFIG_HOTPLUG_CPU out: +#endif return 1; } @@ -144,16 +146,15 @@ static void __devinit smp_xics_setup_cpu(int cpu) vpa_init(cpu); cpumask_clear_cpu(cpu, of_spin_mask); +#ifdef CONFIG_HOTPLUG_CPU set_cpu_current_state(cpu, CPU_STATE_ONLINE); set_default_offline_state(cpu); - +#endif } #endif /* CONFIG_XICS */ static void __devinit smp_pSeries_kick_cpu(int nr) { - long rc; - unsigned long hcpuid; BUG_ON(nr < 0 || nr >= NR_CPUS); if (!smp_startup_cpu(nr)) @@ -165,16 +166,20 @@ static void __devinit smp_pSeries_kick_cpu(int nr) * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; - +#ifdef CONFIG_HOTPLUG_CPU set_preferred_offline_state(nr, CPU_STATE_ONLINE); if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { + long rc; + unsigned long hcpuid; + hcpuid = get_hard_smp_processor_id(nr); rc = plpar_hcall_norets(H_PROD, hcpuid); if (rc != H_SUCCESS) printk(KERN_ERR "Error: Prod to wake up processor %d " "Ret= %ld\n", nr, rc); } +#endif } static int smp_pSeries_cpu_bootable(unsigned int nr) diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index ec8fe22047b..d6901334d66 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -897,7 +897,7 @@ void xics_migrate_irqs_away(void) int status; unsigned long flags; - /* We cant set affinity on ISA interrupts */ + /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; if (irq_map[virq].host != xics_host) diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 27402c7d309..1636dd89670 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -95,7 +95,7 @@ axon_ram_irq_handler(int irq, void *dev) BUG_ON(!bank); - dev_err(&device->dev, "Correctable memory error occured\n"); + dev_err(&device->dev, "Correctable memory error occurred\n"); bank->ecc_counter++; return IRQ_HANDLED; } diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h index 23a95f80dfd..a0e2e6b19b5 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.h +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h @@ -20,7 +20,7 @@ * struct bcom_bd - Structure describing a generic BestComm buffer descriptor * @status: The current status of this buffer. Exact meaning depends on the * task type - * @data: An array of u32 extra data. Size of array is task dependant. + * @data: An array of u32 extra data. Size of array is task dependent. * * Note: Don't dereference a bcom_bd pointer as an array. The size of the * bcom_bd is variable. Use bcom_get_bd() instead. diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h index eb0d1c883c3..3b52f3ffbdf 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h +++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h @@ -97,7 +97,7 @@ struct bcom_task_header { u8 reserved[8]; }; -/* Descriptors stucture & co */ +/* Descriptors structure & co */ #define BCOM_DESC_NOP 0x000001f8 #define BCOM_LCD_MASK 0x80000000 #define BCOM_DRD_EXTENDED 0x40000000 diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 8b5aba26332..e0bc944eb23 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c @@ -223,7 +223,7 @@ void __init cpm_reset(void) /* Set SDMA Bus Request priority 5. * On 860T, this also enables FEC priority 6. I am not sure - * this is what we realy want for some applications, but the + * this is what we really want for some applications, but the * manual recommends it. * Bit 25, FAM can also be set to use FEC aggressive mode (860T). */ diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c index 7ed80967664..82fdad885d2 100644 --- a/arch/powerpc/sysdev/indirect_pci.c +++ b/arch/powerpc/sysdev/indirect_pci.c @@ -117,7 +117,7 @@ indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | (devfn << 8) | reg | cfg_type)); - /* surpress setting of PCI_PRIMARY_BUS */ + /* suppress setting of PCI_PRIMARY_BUS */ if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) if ((offset == PCI_PRIMARY_BUS) && (bus->number == hose->first_busno)) diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h index 56d9e5deccb..c39a134e868 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.h +++ b/arch/powerpc/sysdev/ppc4xx_pci.h @@ -324,7 +324,7 @@ #define PESDR0_460EX_IHS2 0x036D /* - * 460SX addtional DCRs + * 460SX additional DCRs */ #define PESDRn_460SX_RCEI 0x02 |