diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2011-04-18 17:12:14 +0100 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2011-04-18 17:12:14 +0100 |
commit | fac56c2df51bc29b07b3c2dcfabf32a015a0522c (patch) | |
tree | 1ff5d84ecf4ea0bcbd42e2ef9624b5ade3810890 /arch/powerpc/platforms | |
parent | 6caa15d0b84d2ea688fd31f4f172c8353463e109 (diff) | |
parent | a6360dd37e1a144ed11e6548371bade559a1e4df (diff) |
Merge commit 'v2.6.39-rc3' into for-2.6.39
Diffstat (limited to 'arch/powerpc/platforms')
25 files changed, 208 insertions, 175 deletions
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index 6385d883cb8..9940ce8a2d4 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c @@ -57,7 +57,7 @@ struct mpc52xx_lpbfifo { static struct mpc52xx_lpbfifo lpbfifo; /** - * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered + * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred */ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) { @@ -179,7 +179,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * * On transmit, the dma completion irq triggers before the fifo completion * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm - * task completion irq becuase everyting is not really done until the LPB FIFO + * task completion irq because everything is not really done until the LPB FIFO * completion irq triggers. * * In other words: @@ -195,7 +195,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) * Exit conditions: * 1) Transfer aborted * 2) FIFO complete without DMA; more data to do - * 3) FIFO complete without DMA; all data transfered + * 3) FIFO complete without DMA; all data transferred * 4) FIFO complete using DMA * * Condition 1 can occur regardless of whether or not DMA is used. diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 3ddea96273c..1dd15400f6f 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c @@ -512,7 +512,7 @@ void __init mpc52xx_init_irq(void) /** * mpc52xx_get_irq - Get pending interrupt number hook function * - * Called by the interupt handler to determine what IRQ handler needs to be + * Called by the interrupt handler to determine what IRQ handler needs to be * executed. * * Status of pending interrupts is determined by reading the encoded status diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 20576829eca..f7b07720aa3 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -46,7 +46,7 @@ config PPC_OF_BOOT_TRAMPOLINE help Support from booting from Open Firmware or yaboot using an Open Firmware client interface. This enables the kernel to - communicate with open firmware to retrieve system informations + communicate with open firmware to retrieve system information such as the device tree. In case of doubt, say Y diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index a19bec07870..44cfd1bef89 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -244,7 +244,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq, break; case IIC_IRQ_TYPE_IOEXC: irq_set_chip_and_handler(virq, &iic_ioexc_chip, - handle_iic_irq); + handle_edge_eoi_irq); break; default: irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c index 3b894f58528..147069938cf 100644 --- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c +++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c @@ -90,7 +90,7 @@ int spu_alloc_lscsa(struct spu_state *csa) */ for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { /* XXX This is likely to fail, we should use a special pool - * similiar to what hugetlbfs does. + * similar to what hugetlbfs does. */ csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, SPU_64K_PAGE_ORDER); diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 0b046628493..65203857b0c 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -846,7 +846,7 @@ static struct spu_context *grab_runnable_context(int prio, int node) struct list_head *rq = &spu_prio->runq[best]; list_for_each_entry(ctx, rq, rq) { - /* XXX(hch): check for affinity here aswell */ + /* XXX(hch): check for affinity here as well */ if (__node_allowed(ctx, node)) { __spu_del_from_rq(ctx); goto found; diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c index 21a9c952d88..72c905f1ee7 100644 --- a/arch/powerpc/platforms/cell/spufs/spu_restore.c +++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c @@ -284,7 +284,7 @@ static inline void restore_complete(void) exit_instrs[3] = BR_INSTR; break; default: - /* SPU_Status[R]=1. No additonal instructions. */ + /* SPU_Status[R]=1. No additional instructions. */ break; } spu_sync(); diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index b5e026bdca2..62dabe3c2bf 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c @@ -51,7 +51,7 @@ static int mf_initialized; /* - * This is the structure layout for the Machine Facilites LPAR event + * This is the structure layout for the Machine Facilities LPAR event * flows. */ struct vsp_cmd_data { diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index b5f05d943a9..2376069cdc1 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c @@ -396,7 +396,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mTargetInst)) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "int msg rcvd, source inst (%d) doesnt match (%d)\n", + "int msg rcvd, source inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mTargetInst, event->xSourceInstanceId); return; @@ -407,7 +407,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mSourceInst)) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "int msg rcvd, target inst (%d) doesnt match (%d)\n", + "int msg rcvd, target inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mSourceInst, event->xTargetInstanceId); return; @@ -418,7 +418,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mSourceInst) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "ack msg rcvd, source inst (%d) doesnt match (%d)\n", + "ack msg rcvd, source inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mSourceInst, event->xSourceInstanceId); return; @@ -428,7 +428,7 @@ static void vio_handleEvent(struct HvLpEvent *event) viopathStatus[remoteLp].mTargetInst) { printk(VIOPATH_KERN_WARN "message from invalid partition. " - "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n", + "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n", viopathStatus[remoteLp].mTargetInst, event->xTargetInstanceId); return; diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 09695ae50f9..321a9b3a2d0 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -379,9 +379,9 @@ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, } EXPORT_SYMBOL(pasemi_dma_free_buf); -/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization +/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization * - * Allocates a flag for use with channel syncronization (event descriptors). + * Allocates a flag for use with channel synchronization (event descriptors). * Returns allocated flag (0-63), < 0 on error. */ int pasemi_dma_alloc_flag(void) diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index 50f16939255..ea47df66fee 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -11,7 +11,7 @@ obj-y += pic.o setup.o time.o feature.o pci.o \ obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o -# CONFIG_NVRAM is an arch. independant tristate symbol, for pmac32 we really +# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really # CONFIG_NVRAM=y obj-$(CONFIG_NVRAM:m=y) += nvram.o diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 480567e5fa9..e9c8a607268 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -904,7 +904,7 @@ static void __init smu_i2c_probe(void) printk(KERN_INFO "SMU i2c %s\n", controller->full_name); /* Look for childs, note that they might not be of the right - * type as older device trees mix i2c busses and other thigns + * type as older device trees mix i2c busses and other things * at the same level */ for (busnode = NULL; diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index ab689894270..f33e08d573c 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -299,7 +299,7 @@ static void __init setup_chaos(struct pci_controller *hose, * This function deals with some "special cases" devices. * * 0 -> No special case - * 1 -> Skip the device but act as if the access was successfull + * 1 -> Skip the device but act as if the access was successful * (return 0xff's on reads, eventually, cache config space * accesses in a later version) * -1 -> Hide the device (unsuccessful access) diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index f0bc08f6c1f..20468f49aec 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -33,7 +33,6 @@ extern void pmac_setup_pci_dma(void); extern void pmac_check_ht_link(void); extern void pmac_setup_smp(void); -extern void pmac32_cpu_die(void); extern void low_cpu_die(void) __attribute__((noreturn)); extern int pmac_nvram_init(void); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index d5aceb7fb12..aa45281bd29 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -650,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) return PCI_PROBE_NORMAL; return PCI_PROBE_DEVTREE; } - -#ifdef CONFIG_HOTPLUG_CPU -/* access per cpu vars from generic smp.c */ -DECLARE_PER_CPU(int, cpu_state); - -static void pmac64_cpu_die(void) -{ - /* - * turn off as much as possible, we'll be - * kicked out as this will only be invoked - * on core99 platforms for now ... - */ - - printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); - __get_cpu_var(cpu_state) = CPU_DEAD; - smp_wmb(); - - /* - * during the path that leads here preemption is disabled, - * reenable it now so that when coming up preempt count is - * zero correctly - */ - preempt_enable(); - - /* - * hard-disable interrupts for the non-NAP case, the NAP code - * needs to re-enable interrupts (but soft-disables them) - */ - hard_irq_disable(); - - while (1) { - /* let's not take timer interrupts too often ... */ - set_dec(0x7fffffff); - - /* should always be true at this point */ - if (cpu_has_feature(CPU_FTR_CAN_NAP)) - power4_cpu_offline_powersave(); - else { - HMT_low(); - HMT_very_low(); - } - } -} -#endif /* CONFIG_HOTPLUG_CPU */ - #endif /* CONFIG_PPC64 */ define_machine(powermac) { @@ -726,15 +681,4 @@ define_machine(powermac) { .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif -#ifdef CONFIG_HOTPLUG_CPU -#ifdef CONFIG_PPC64 - .cpu_die = pmac64_cpu_die, -#endif -#ifdef CONFIG_PPC32 - .cpu_die = pmac32_cpu_die, -#endif -#endif -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) - .cpu_die = generic_mach_cpu_die, -#endif }; diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f4f8b..a830c5e8065 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -840,92 +840,149 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) /* Setup openpic */ mpic_setup_this_cpu(); +} - if (cpu_nr == 0) { -#ifdef CONFIG_PPC64 - extern void g5_phy_disable_cpu1(void); +#ifdef CONFIG_HOTPLUG_CPU +static int smp_core99_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int rc; - /* Close i2c bus if it was used for tb sync */ + switch(action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + /* Open i2c bus if it was used for tb sync */ if (pmac_tb_clock_chip_host) { - pmac_i2c_close(pmac_tb_clock_chip_host); - pmac_tb_clock_chip_host = NULL; + rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); + if (rc) { + pr_err("Failed to open i2c bus for time sync\n"); + return notifier_from_errno(rc); + } } + break; + case CPU_ONLINE: + case CPU_UP_CANCELED: + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); + break; + default: + break; + } + return NOTIFY_OK; +} - /* If we didn't start the second CPU, we must take - * it off the bus - */ - if (of_machine_is_compatible("MacRISC4") && - num_online_cpus() < 2) - g5_phy_disable_cpu1(); -#endif /* CONFIG_PPC64 */ +static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { + .notifier_call = smp_core99_cpu_notify, +}; +#endif /* CONFIG_HOTPLUG_CPU */ + +static void __init smp_core99_bringup_done(void) +{ +#ifdef CONFIG_PPC64 + extern void g5_phy_disable_cpu1(void); + + /* Close i2c bus if it was used for tb sync */ + if (pmac_tb_clock_chip_host) + pmac_i2c_close(pmac_tb_clock_chip_host); - if (ppc_md.progress) - ppc_md.progress("core99_setup_cpu 0 done", 0x349); + /* If we didn't start the second CPU, we must take + * it off the bus. + */ + if (of_machine_is_compatible("MacRISC4") && + num_online_cpus() < 2) { + set_cpu_present(1, false); + g5_phy_disable_cpu1(); } -} +#endif /* CONFIG_PPC64 */ +#ifdef CONFIG_HOTPLUG_CPU + register_cpu_notifier(&smp_core99_cpu_nb); +#endif + if (ppc_md.progress) + ppc_md.progress("smp_core99_bringup_done", 0x349); +} -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) +#ifdef CONFIG_HOTPLUG_CPU -int smp_core99_cpu_disable(void) +static int smp_core99_cpu_disable(void) { - set_cpu_online(smp_processor_id(), false); + int rc = generic_cpu_disable(); + if (rc) + return rc; - /* XXX reset cpu affinity here */ mpic_cpu_set_priority(0xf); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); - mb(); - udelay(20); - asm volatile("mtdec %0" : : "r" (0x7fffffff)); + return 0; } -static int cpu_dead[NR_CPUS]; +#ifdef CONFIG_PPC32 -void pmac32_cpu_die(void) +static void pmac_cpu_die(void) { + int cpu = smp_processor_id(); + local_irq_disable(); - cpu_dead[smp_processor_id()] = 1; + idle_task_exit(); + pr_debug("CPU%d offline\n", cpu); + generic_set_cpu_dead(cpu); + smp_wmb(); mb(); low_cpu_die(); } -void smp_core99_cpu_die(unsigned int cpu) +#else /* CONFIG_PPC32 */ + +static void pmac_cpu_die(void) { - int timeout; + int cpu = smp_processor_id(); - timeout = 1000; - while (!cpu_dead[cpu]) { - if (--timeout == 0) { - printk("CPU %u refused to die!\n", cpu); - break; - } - msleep(1); + local_irq_disable(); + idle_task_exit(); + + /* + * turn off as much as possible, we'll be + * kicked out as this will only be invoked + * on core99 platforms for now ... + */ + + printk(KERN_INFO "CPU#%d offline\n", cpu); + generic_set_cpu_dead(cpu); + smp_wmb(); + + /* + * Re-enable interrupts. The NAP code needs to enable them + * anyways, do it now so we deal with the case where one already + * happened while soft-disabled. + * We shouldn't get any external interrupts, only decrementer, and the + * decrementer handler is safe for use on offline CPUs + */ + local_irq_enable(); + + while (1) { + /* let's not take timer interrupts too often ... */ + set_dec(0x7fffffff); + + /* Enter NAP mode */ + power4_idle(); } - cpu_dead[cpu] = 0; } -#endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ +#endif /* else CONFIG_PPC32 */ +#endif /* CONFIG_HOTPLUG_CPU */ /* Core99 Macs (dual G4s and G5s) */ struct smp_ops_t core99_smp_ops = { .message_pass = smp_mpic_message_pass, .probe = smp_core99_probe, + .bringup_done = smp_core99_bringup_done, .kick_cpu = smp_core99_kick_cpu, .setup_cpu = smp_core99_setup_cpu, .give_timebase = smp_core99_give_timebase, .take_timebase = smp_core99_take_timebase, #if defined(CONFIG_HOTPLUG_CPU) -# if defined(CONFIG_PPC32) .cpu_disable = smp_core99_cpu_disable, - .cpu_die = smp_core99_cpu_die, -# endif -# if defined(CONFIG_PPC64) - .cpu_disable = generic_cpu_disable, .cpu_die = generic_cpu_die, - /* intentionally do *NOT* assign cpu_enable, - * the generic code will use kick_cpu then! */ -# endif #endif }; @@ -957,5 +1014,10 @@ void __init pmac_setup_smp(void) smp_ops = &psurge_smp_ops; } #endif /* CONFIG_PPC32 */ + +#ifdef CONFIG_HOTPLUG_CPU + ppc_md.cpu_die = pmac_cpu_die; +#endif } + diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index b74a9230edc..57ceb92b228 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -74,7 +74,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) return NULL; /* The configure connector reported name does not contain a - * preceeding '/', so we allocate a buffer large enough to + * preceding '/', so we allocate a buffer large enough to * prepend this to the full_name. */ name = (char *)ccwa + ccwa->name_offset; diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 3cc4d102b1f..89649173d3a 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -65,7 +65,7 @@ * with EEH. * * Ideally, a PCI device driver, when suspecting that an isolation - * event has occured (e.g. by reading 0xff's), will then ask EEH + * event has occurred (e.g. by reading 0xff's), will then ask EEH * whether this is the case, and then take appropriate steps to * reset the PCI slot, the PCI device, and then resume operations. * However, until that day, the checking is done here, with the diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fd50ccd4bac..ef8c45489e2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -216,7 +216,7 @@ static void pseries_cpu_die(unsigned int cpu) cpu, pcpu, cpu_status); } - /* Isolation and deallocation are definatly done by + /* Isolation and deallocation are definitely done by * drslot_chrp_cpu. If they were not they would be * done here. Change isolate state to Isolate and * change allocation-state to Unusable. diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 154c464cdca..6d5412a18b2 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -272,7 +272,7 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) return tce_ret; } -/* this is compatable with cells for the device tree property */ +/* this is compatible with cells for the device tree property */ struct dynamic_dma_window_prop { __be32 liobn; /* tce table number */ __be64 dma_base; /* address hi,lo */ @@ -976,7 +976,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); /* dev setup for LPAR is a little tricky, since the device tree might - * contain the dma-window properties per-device and not neccesarily + * contain the dma-window properties per-device and not necessarily * for the bus. So we need to search upwards in the tree until we * either hit a dma-window property, OR find a parent with a table * already allocated. @@ -1033,7 +1033,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) /* * the device tree might contain the dma-window properties - * per-device and not neccesarily for the bus. So we need to + * per-device and not necessarily for the bus. So we need to * search upwards in the tree until we either hit a dma-window * property, OR find a parent with a table already allocated. */ diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 419707b0724..00cc3a09488 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -480,8 +480,32 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, const char *new_msgs, unsigned long new_len) { static unsigned int oops_count = 0; + static bool panicking = false; size_t text_len; + switch (reason) { + case KMSG_DUMP_RESTART: + case KMSG_DUMP_HALT: + case KMSG_DUMP_POWEROFF: + /* These are almost always orderly shutdowns. */ + return; + case KMSG_DUMP_OOPS: + case KMSG_DUMP_KEXEC: + break; + case KMSG_DUMP_PANIC: + panicking = true; + break; + case KMSG_DUMP_EMERG: + if (panicking) + /* Panic report already captured. */ + return; + break; + default: + pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", + __FUNCTION__, (int) reason); + return; + } + if (clobbering_unread_rtas_event()) return; diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f480d93..08672d9136a 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h @@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) #endif extern enum cpu_state_vals get_preferred_offline_state(int cpu); -extern int start_secondary(void); -extern void start_secondary_resume(void); #endif diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index c319d04aa79..00072414908 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -378,7 +378,7 @@ static int __init pSeries_init_panel(void) return 0; } -arch_initcall(pSeries_init_panel); +machine_arch_initcall(pseries, pSeries_init_panel); static int pseries_set_dabr(unsigned long dabr) { diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 0317cce877c..a509c5292a6 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -64,8 +64,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) int qcss_tok = rtas_token("query-cpu-stopped-state"); if (qcss_tok == RTAS_UNKNOWN_SERVICE) { - printk(KERN_INFO "Firmware doesn't support " - "query-cpu-stopped-state\n"); + printk_once(KERN_INFO + "Firmware doesn't support query-cpu-stopped-state\n"); return QCSS_HARDWARE_ERROR; } @@ -112,10 +112,10 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca[lcpu].__current)->preempt_count = 0; - +#ifdef CONFIG_HOTPLUG_CPU if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) goto out; - +#endif /* * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. @@ -130,7 +130,9 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) return 0; } +#ifdef CONFIG_HOTPLUG_CPU out: +#endif return 1; } @@ -144,16 +146,15 @@ static void __devinit smp_xics_setup_cpu(int cpu) vpa_init(cpu); cpumask_clear_cpu(cpu, of_spin_mask); +#ifdef CONFIG_HOTPLUG_CPU set_cpu_current_state(cpu, CPU_STATE_ONLINE); set_default_offline_state(cpu); - +#endif } #endif /* CONFIG_XICS */ static void __devinit smp_pSeries_kick_cpu(int nr) { - long rc; - unsigned long hcpuid; BUG_ON(nr < 0 || nr >= NR_CPUS); if (!smp_startup_cpu(nr)) @@ -165,16 +166,20 @@ static void __devinit smp_pSeries_kick_cpu(int nr) * the processor will continue on to secondary_start */ paca[nr].cpu_start = 1; - +#ifdef CONFIG_HOTPLUG_CPU set_preferred_offline_state(nr, CPU_STATE_ONLINE); if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { + long rc; + unsigned long hcpuid; + hcpuid = get_hard_smp_processor_id(nr); rc = plpar_hcall_norets(H_PROD, hcpuid); if (rc != H_SUCCESS) printk(KERN_ERR "Error: Prod to wake up processor %d " "Ret= %ld\n", nr, rc); } +#endif } static int smp_pSeries_cpu_bootable(unsigned int nr) diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 6c1e638f0ce..d6901334d66 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -204,33 +204,33 @@ static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, static void xics_unmask_irq(struct irq_data *d) { - unsigned int irq; + unsigned int hwirq; int call_status; int server; pr_devel("xics: unmask virq %d\n", d->irq); - irq = (unsigned int)irq_map[d->irq].hwirq; - pr_devel(" -> map to hwirq 0x%x\n", irq); - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + pr_devel(" -> map to hwirq 0x%x\n", hwirq); + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return; server = get_irq_server(d->irq, d->affinity, 0); - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, server, DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", - __func__, irq, server, call_status); + __func__, hwirq, server, call_status); return; } /* Now unmask the interrupt (often a no-op) */ - call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hwirq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", - __func__, irq, call_status); + __func__, hwirq, call_status); return; } } @@ -250,46 +250,46 @@ static unsigned int xics_startup(struct irq_data *d) return 0; } -static void xics_mask_real_irq(struct irq_data *d) +static void xics_mask_real_irq(unsigned int hwirq) { int call_status; - if (d->irq == XICS_IPI) + if (hwirq == XICS_IPI) return; - call_status = rtas_call(ibm_int_off, 1, 1, NULL, d->irq); + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hwirq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", - __func__, d->irq, call_status); + __func__, hwirq, call_status); return; } /* Have to set XIVE to 0xff to be able to remove a slot */ - call_status = rtas_call(ibm_set_xive, 3, 1, NULL, d->irq, + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hwirq, default_server, 0xff); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", - __func__, d->irq, call_status); + __func__, hwirq, call_status); return; } } static void xics_mask_irq(struct irq_data *d) { - unsigned int irq; + unsigned int hwirq; pr_devel("xics: mask virq %d\n", d->irq); - irq = (unsigned int)irq_map[d->irq].hwirq; - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return; - xics_mask_real_irq(d); + xics_mask_real_irq(hwirq); } static void xics_mask_unknown_vec(unsigned int vec) { printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); - xics_mask_real_irq(irq_get_irq_data(vec)); + xics_mask_real_irq(vec); } static inline unsigned int xics_xirr_vector(unsigned int xirr) @@ -373,37 +373,37 @@ static unsigned char pop_cppr(void) static void xics_eoi_direct(struct irq_data *d) { - unsigned int irq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; iosync(); - direct_xirr_info_set((pop_cppr() << 24) | irq); + direct_xirr_info_set((pop_cppr() << 24) | hwirq); } static void xics_eoi_lpar(struct irq_data *d) { - unsigned int irq = (unsigned int)irq_map[d->irq].hwirq; + unsigned int hwirq = (unsigned int)irq_map[d->irq].hwirq; iosync(); - lpar_xirr_info_set((pop_cppr() << 24) | irq); + lpar_xirr_info_set((pop_cppr() << 24) | hwirq); } static int xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) { - unsigned int irq; + unsigned int hwirq; int status; int xics_status[2]; int irq_server; - irq = (unsigned int)irq_map[d->irq].hwirq; - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + hwirq = (unsigned int)irq_map[d->irq].hwirq; + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) return -1; - status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); return -1; } @@ -418,11 +418,11 @@ xics_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) } status = rtas_call(ibm_set_xive, 3, 1, NULL, - irq, irq_server, xics_status[1]); + hwirq, irq_server, xics_status[1]); if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); return -1; } @@ -874,7 +874,7 @@ void xics_kexec_teardown_cpu(int secondary) void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); - unsigned int irq, virq; + int virq; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == default_server) @@ -892,18 +892,19 @@ void xics_migrate_irqs_away(void) for_each_irq(virq) { struct irq_desc *desc; struct irq_chip *chip; + unsigned int hwirq; int xics_status[2]; int status; unsigned long flags; - /* We cant set affinity on ISA interrupts */ + /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; if (irq_map[virq].host != xics_host) continue; - irq = (unsigned int)irq_map[virq].hwirq; + hwirq = (unsigned int)irq_map[virq].hwirq; /* We need to get IPIs still. */ - if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) + if (hwirq == XICS_IPI || hwirq == XICS_IRQ_SPURIOUS) continue; desc = irq_to_desc(virq); @@ -918,10 +919,10 @@ void xics_migrate_irqs_away(void) raw_spin_lock_irqsave(&desc->lock, flags); - status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hwirq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", - __func__, irq, status); + __func__, hwirq, status); goto unlock; } |