diff options
Diffstat (limited to 'arch/powerpc/platforms')
34 files changed, 502 insertions, 182 deletions
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 7486bffd3eb..eeba0a70e46 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -1,3 +1,12 @@ +config PPC_47x + bool "Support for 47x variant" + depends on 44x + default n + select MPIC + help + This option enables support for the 47x family of processors and is + not currently compatible with other 44x or 46x varients + config BAMBOO bool "Bamboo" depends on 44x @@ -151,6 +160,17 @@ config YOSEMITE help This option enables support for the AMCC PPC440EP evaluation board. +config ISS4xx + bool "ISS 4xx Simulator" + depends on (44x || 40x) + default n + select 405GP if 40x + select 440GP if 44x && !PPC_47x + select PPC_FPU + select OF_RTC + help + This option enables support for the IBM ISS simulation environment + #config LUAN # bool "Luan" # depends on 44x diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index ee6185aeaa3..82ff326e079 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_SAM440EP) += sam440ep.o obj-$(CONFIG_WARP) += warp.o obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o +obj-$(CONFIG_ISS4xx) += iss4xx.o diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c new file mode 100644 index 00000000000..aa46e9d1e77 --- /dev/null +++ b/arch/powerpc/platforms/44x/iss4xx.c @@ -0,0 +1,167 @@ +/* + * PPC476 board specific routines + * + * Copyright 2010 Torez Smith, IBM Corporation. + * + * Based on earlier code: + * Matt Porter <mporter@kernel.crashing.org> + * Copyright 2002-2005 MontaVista Software Inc. + * + * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> + * Copyright (c) 2003-2005 Zultys Technologies + * + * Rewritten and ported to the merged powerpc tree: + * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/init.h> +#include <linux/of_platform.h> +#include <linux/rtc.h> + +#include <asm/machdep.h> +#include <asm/prom.h> +#include <asm/udbg.h> +#include <asm/time.h> +#include <asm/uic.h> +#include <asm/ppc4xx.h> +#include <asm/mpic.h> +#include <asm/mmu.h> + +static __initdata struct of_device_id iss4xx_of_bus[] = { + { .compatible = "ibm,plb4", }, + { .compatible = "ibm,plb6", }, + { .compatible = "ibm,opb", }, + { .compatible = "ibm,ebc", }, + {}, +}; + +static int __init iss4xx_device_probe(void) +{ + of_platform_bus_probe(NULL, iss4xx_of_bus, NULL); + of_instantiate_rtc(); + + return 0; +} +machine_device_initcall(iss4xx, iss4xx_device_probe); + +/* We can have either UICs or MPICs */ +static void __init iss4xx_init_irq(void) +{ + struct device_node *np; + + /* Find top level interrupt controller */ + for_each_node_with_property(np, "interrupt-controller") { + if (of_get_property(np, "interrupts", NULL) == NULL) + break; + } + if (np == NULL) + panic("Can't find top level interrupt controller"); + + /* Check type and do appropriate initialization */ + if (of_device_is_compatible(np, "ibm,uic")) { + uic_init_tree(); + ppc_md.get_irq = uic_get_irq; +#ifdef CONFIG_MPIC + } else if (of_device_is_compatible(np, "chrp,open-pic")) { + /* The MPIC driver will get everything it needs from the + * device-tree, just pass 0 to all arguments + */ + struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0, + " MPIC "); + BUG_ON(mpic == NULL); + mpic_init(mpic); + ppc_md.get_irq = mpic_get_irq; +#endif + } else + panic("Unrecognized top level interrupt controller"); +} + +#ifdef CONFIG_SMP +static void __cpuinit smp_iss4xx_setup_cpu(int cpu) +{ + mpic_setup_this_cpu(); +} + +static void __cpuinit smp_iss4xx_kick_cpu(int cpu) +{ + struct device_node *cpunode = of_get_cpu_node(cpu, NULL); + const u64 *spin_table_addr_prop; + u32 *spin_table; + extern void start_secondary_47x(void); + + BUG_ON(cpunode == NULL); + + /* Assume spin table. We could test for the enable-method in + * the device-tree but currently there's little point as it's + * our only supported method + */ + spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr", + NULL); + if (spin_table_addr_prop == NULL) { + pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); + return; + } + + /* Assume it's mapped as part of the linear mapping. This is a bit + * fishy but will work fine for now + */ + spin_table = (u32 *)__va(*spin_table_addr_prop); + pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table); + + spin_table[3] = cpu; + smp_wmb(); + spin_table[1] = __pa(start_secondary_47x); + mb(); +} + +static struct smp_ops_t iss_smp_ops = { + .probe = smp_mpic_probe, + .message_pass = smp_mpic_message_pass, + .setup_cpu = smp_iss4xx_setup_cpu, + .kick_cpu = smp_iss4xx_kick_cpu, + .give_timebase = smp_generic_give_timebase, + .take_timebase = smp_generic_take_timebase, +}; + +static void __init iss4xx_smp_init(void) +{ + if (mmu_has_feature(MMU_FTR_TYPE_47x)) + smp_ops = &iss_smp_ops; +} + +#else /* CONFIG_SMP */ +static void __init iss4xx_smp_init(void) { } +#endif /* CONFIG_SMP */ + +static void __init iss4xx_setup_arch(void) +{ + iss4xx_smp_init(); +} + +/* + * Called very early, MMU is off, device-tree isn't unflattened + */ +static int __init iss4xx_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); + + if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx")) + return 0; + + return 1; +} + +define_machine(iss4xx) { + .name = "ISS-4xx", + .probe = iss4xx_probe, + .progress = udbg_progress, + .init_IRQ = iss4xx_init_irq, + .setup_arch = iss4xx_setup_arch, + .restart = ppc4xx_reset_system, + .calibrate_decr = generic_calibrate_decr, +}; diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c index 0b4f883b20e..ae525e4745d 100644 --- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c @@ -74,6 +74,7 @@ static int __init mpc831x_rdb_probe(void) static struct of_device_id __initdata of_bus_ids[] = { { .compatible = "simple-bus" }, { .compatible = "gianfar" }, + { .compatible = "gpio-leds", }, {}, }; diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c index a1908d26124..e00801c4254 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c @@ -72,6 +72,7 @@ static struct of_device_id mpc837x_ids[] = { { .compatible = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, + { .compatible = "gpio-leds", }, {}, }; diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index d95121894eb..3a2ade2e443 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -51,7 +51,7 @@ config MPC85xx_DS bool "Freescale MPC85xx DS" select PPC_I8259 select DEFAULT_UIMAGE - select FSL_ULI1575 + select FSL_ULI1575 if PCI select SWIOTLB help This option enables support for the MPC85xx DS (MPC8544 DS) board @@ -60,7 +60,7 @@ config MPC85xx_RDB bool "Freescale MPC85xx RDB" select PPC_I8259 select DEFAULT_UIMAGE - select FSL_ULI1575 + select FSL_ULI1575 if PCI select SWIOTLB help This option enables support for the MPC85xx RDB (P2020 RDB) board diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig index fbe9f362142..a0b5638c5dc 100644 --- a/arch/powerpc/platforms/86xx/Kconfig +++ b/arch/powerpc/platforms/86xx/Kconfig @@ -13,7 +13,7 @@ config MPC8641_HPCN bool "Freescale MPC8641 HPCN" select PPC_I8259 select DEFAULT_UIMAGE - select FSL_ULI1575 + select FSL_ULI1575 if PCI select HAS_RAPIDIO select SWIOTLB help @@ -28,7 +28,7 @@ config SBC8641D config MPC8610_HPCD bool "Freescale MPC8610 HPCD" select DEFAULT_UIMAGE - select FSL_ULI1575 + select FSL_ULI1575 if PCI help This option enables support for the MPC8610 HPCD board. diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index 5abe137f630..018cc67be42 100644 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c @@ -83,7 +83,8 @@ static struct of_device_id __initdata mpc8610_ids[] = { { .compatible = "fsl,mpc8610-immr", }, { .compatible = "fsl,mpc8610-guts", }, { .compatible = "simple-bus", }, - { .compatible = "gianfar", }, + /* So that the DMA channel nodes can be probed individually: */ + { .compatible = "fsl,eloplus-dma", }, {} }; diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index a8aae0b5457..d361f8119b1 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -43,7 +43,7 @@ config 40x select PPC_PCI_CHOICE config 44x - bool "AMCC 44x" + bool "AMCC 44x, 46x or 47x" select PPC_DCR_NATIVE select PPC_UDBG_16550 select 4xx_SOC @@ -294,7 +294,7 @@ config PPC_PERF_CTRS This enables the powerpc-specific perf_event back-end. config SMP - depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE + depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x bool "Symmetric multi-processing support" ---help--- This enables support for systems with more than one CPU. If you have @@ -322,6 +322,7 @@ config NR_CPUS config NOT_COHERENT_CACHE bool depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON + default n if PPC_47x default y config CHECK_CACHE_COHERENCY diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c index e6506cd0ff9..bfa2c0cb3d1 100644 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.c +++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c @@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->cur = cbe_freqs[cur_pmode].frequency; #ifdef CONFIG_SMP - cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); + cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); #endif cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 57e5b608fa1..174a04ac480 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -69,10 +69,10 @@ void __init wii_memory_fixups(void) /* * This is part of a workaround to allow the use of two - * discontiguous RAM ranges on the Wii, even if this is + * discontinuous RAM ranges on the Wii, even if this is * currently unsupported on 32-bit PowerPC Linux. * - * We coealesce the two memory ranges of the Wii into a + * We coalesce the two memory ranges of the Wii into a * single range, then create a reservation for the "hole" * between both ranges. */ diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S index fba5bf91507..32a56c6dfa7 100644 --- a/arch/powerpc/platforms/iseries/exception.S +++ b/arch/powerpc/platforms/iseries/exception.S @@ -252,8 +252,8 @@ decrementer_iSeries_masked: li r11,1 ld r12,PACALPPACAPTR(r13) stb r11,LPPACADECRINT(r12) - LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) - lwz r12,0(r12) + li r12,-1 + clrldi r12,r12,33 /* set DEC to 0x7fffffff */ mtspr SPRN_DEC,r12 /* fall through */ diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c index b841c9a9db8..3fc2e6494b8 100644 --- a/arch/powerpc/platforms/iseries/pci.c +++ b/arch/powerpc/platforms/iseries/pci.c @@ -32,6 +32,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/of.h> +#include <linux/ratelimit.h> #include <asm/types.h> #include <asm/io.h> @@ -584,14 +585,9 @@ static inline struct device_node *xlate_iomm_address( orig_addr = (unsigned long __force)addr; if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) { - static unsigned long last_jiffies; - static int num_printed; + static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10); - if (time_after(jiffies, last_jiffies + 60 * HZ)) { - last_jiffies = jiffies; - num_printed = 0; - } - if (num_printed++ < 10) + if (__ratelimit(&ratelimit)) printk(KERN_ERR "iSeries_%s: invalid access at IO address %p\n", func, addr); diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c index 722335e32fd..6590850045a 100644 --- a/arch/powerpc/platforms/iseries/smp.c +++ b/arch/powerpc/platforms/iseries/smp.c @@ -83,7 +83,7 @@ static void smp_iSeries_message_pass(int target, int msg) static int smp_iSeries_probe(void) { - return cpus_weight(cpu_possible_map); + return cpumask_weight(cpu_possible_mask); } static void smp_iSeries_kick_cpu(int nr) diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c index d35e0520abf..c16537bc0c6 100644 --- a/arch/powerpc/platforms/pasemi/cpufreq.c +++ b/arch/powerpc/platforms/pasemi/cpufreq.c @@ -213,7 +213,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_debug("current astate is at %d\n",cur_astate); policy->cur = pas_freqs[cur_astate].frequency; - cpumask_copy(policy->cpus, &cpu_online_map); + cpumask_copy(policy->cpus, cpu_online_mask); ppc_proc_freq = policy->cur * 1000ul; diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c index 3ca09d3ccce..9650c6029c8 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ b/arch/powerpc/platforms/powermac/cpufreq_64.c @@ -362,7 +362,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) /* secondary CPUs are tied to the primary one by the * cpufreq core if in the secondary policy we tell it that * it actually must be one policy together with all others. */ - cpumask_copy(policy->cpus, &cpu_online_map); + cpumask_copy(policy->cpus, cpu_online_mask); cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); return cpufreq_frequency_table_cpuinfo(policy, diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index f45331ab97c..06a137c5b8b 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -592,7 +592,7 @@ static void __init kw_i2c_probe(void) /* Probe keywest-i2c busses */ for_each_compatible_node(np, "i2c","keywest-i2c") { struct pmac_i2c_host_kw *host; - int multibus, chans, i; + int multibus; /* Found one, init a host structure */ host = kw_i2c_host_init(np); @@ -614,6 +614,8 @@ static void __init kw_i2c_probe(void) * parent type */ if (multibus) { + int chans, i; + parent = of_get_parent(np); if (parent == NULL) continue; @@ -1258,8 +1260,7 @@ static void pmac_i2c_do_end(struct pmf_function *func, void *instdata) if (inst == NULL) return; pmac_i2c_close(inst->bus); - if (inst) - kfree(inst); + kfree(inst); } static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len) diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index 3362e781b6a..f0bc08f6c1f 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h @@ -33,6 +33,8 @@ extern void pmac_setup_pci_dma(void); extern void pmac_check_ht_link(void); extern void pmac_setup_smp(void); +extern void pmac32_cpu_die(void); +extern void low_cpu_die(void) __attribute__((noreturn)); extern int pmac_nvram_init(void); extern void pmac_pic_init(void); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 15c2241f9c7..f1d0132ebcc 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -480,7 +480,7 @@ static void __init pmac_init_early(void) #endif /* SMP Init has to be done early as we need to patch up - * cpu_possible_map before interrupt stacks are allocated + * cpu_possible_mask before interrupt stacks are allocated * or kaboom... */ #ifdef CONFIG_SMP @@ -646,7 +646,7 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) /* access per cpu vars from generic smp.c */ DECLARE_PER_CPU(int, cpu_state); -static void pmac_cpu_die(void) +static void pmac64_cpu_die(void) { /* * turn off as much as possible, we'll be @@ -717,8 +717,13 @@ define_machine(powermac) { .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64) - .cpu_die = pmac_cpu_die, +#ifdef CONFIG_HOTPLUG_CPU +#ifdef CONFIG_PPC64 + .cpu_die = pmac64_cpu_die, +#endif +#ifdef CONFIG_PPC32 + .cpu_die = pmac32_cpu_die, +#endif #endif #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) .cpu_die = generic_mach_cpu_die, diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 6898e8241cd..c95215f4f8b 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -53,6 +53,8 @@ #include <asm/pmac_low_i2c.h> #include <asm/pmac_pfunc.h> +#include "pmac.h" + #undef DEBUG #ifdef DEBUG @@ -315,7 +317,7 @@ static int __init smp_psurge_probe(void) /* This is necessary because OF doesn't know about the * secondary cpu(s), and thus there aren't nodes in the * device tree for them, and smp_setup_cpu_maps hasn't - * set their bits in cpu_present_map. + * set their bits in cpu_present_mask. */ if (ncpus > NR_CPUS) ncpus = NR_CPUS; @@ -878,10 +880,9 @@ int smp_core99_cpu_disable(void) return 0; } -extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */ static int cpu_dead[NR_CPUS]; -void cpu_die(void) +void pmac32_cpu_die(void) { local_irq_disable(); cpu_dead[smp_processor_id()] = 1; @@ -944,7 +945,7 @@ void __init pmac_setup_smp(void) } #ifdef CONFIG_PPC32 else { - /* We have to set bits in cpu_possible_map here since the + /* We have to set bits in cpu_possible_mask here since the * secondary CPU(s) aren't in the device tree. Various * things won't be initialized for CPUs not in the possible * map, so we really need to fix it up here. diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 0ff5174ae4f..3dbef309bc8 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -7,7 +7,7 @@ EXTRA_CFLAGS += -DDEBUG endif obj-y := lpar.o hvCall.o nvram.o reconfig.o \ - setup.o iommu.o ras.o \ + setup.o iommu.o event_sources.o ras.o \ firmware.o power.o dlpar.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_XICS) += xics.o diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index e1682bc168a..d71e5858408 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c @@ -79,13 +79,12 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) * prepend this to the full_name. */ name = (char *)ccwa + ccwa->name_offset; - dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL); + dn->full_name = kasprintf(GFP_KERNEL, "/%s", name); if (!dn->full_name) { kfree(dn); return NULL; } - sprintf(dn->full_name, "/%s", name); return dn; } @@ -410,15 +409,13 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) * directory of the device tree. CPUs actually live in the * cpus directory so we need to fixup the full_name. */ - cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1, - GFP_KERNEL); + cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name); if (!cpu_name) { dlpar_free_cc_nodes(dn); rc = -ENOMEM; goto out; } - sprintf(cpu_name, "/cpus%s", dn->full_name); kfree(dn->full_name); dn->full_name = cpu_name; @@ -433,6 +430,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) if (rc) { dlpar_release_drc(drc_index); dlpar_free_cc_nodes(dn); + goto out; } rc = dlpar_online_cpu(dn); diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 7df7fbb7cac..34b7dc12e73 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -749,7 +749,7 @@ static void __rtas_set_slot_reset(struct pci_dn *pdn) /* Determine type of EEH reset required by device, * default hot reset or fundamental reset */ - if (dev->needs_freset) + if (dev && dev->needs_freset) rtas_pci_slot_reset(pdn, 3); else rtas_pci_slot_reset(pdn, 1); diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c new file mode 100644 index 00000000000..e889c9d9586 --- /dev/null +++ b/arch/powerpc/platforms/pseries/event_sources.c @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2001 Dave Engebretsen IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <asm/prom.h> + +#include "pseries.h" + +void request_event_sources_irqs(struct device_node *np, + irq_handler_t handler, + const char *name) +{ + int i, index, count = 0; + struct of_irq oirq; + const u32 *opicprop; + unsigned int opicplen; + unsigned int virqs[16]; + + /* Check for obsolete "open-pic-interrupt" property. If present, then + * map those interrupts using the default interrupt host and default + * trigger + */ + opicprop = of_get_property(np, "open-pic-interrupt", &opicplen); + if (opicprop) { + opicplen /= sizeof(u32); + for (i = 0; i < opicplen; i++) { + if (count > 15) + break; + virqs[count] = irq_create_mapping(NULL, *(opicprop++)); + if (virqs[count] == NO_IRQ) + printk(KERN_ERR "Unable to allocate interrupt " + "number for %s\n", np->full_name); + else + count++; + + } + } + /* Else use normal interrupt tree parsing */ + else { + /* First try to do a proper OF tree parsing */ + for (index = 0; of_irq_map_one(np, index, &oirq) == 0; + index++) { + if (count > 15) + break; + virqs[count] = irq_create_of_mapping(oirq.controller, + oirq.specifier, + oirq.size); + if (virqs[count] == NO_IRQ) + printk(KERN_ERR "Unable to allocate interrupt " + "number for %s\n", np->full_name); + else + count++; + } + } + + /* Now request them */ + for (i = 0; i < count; i++) { + if (request_irq(virqs[i], handler, 0, name, NULL)) { + printk(KERN_ERR "Unable to request interrupt %d for " + "%s\n", virqs[i], np->full_name); + return; + } + } +} + diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index a8e1d5d17a2..8f85f399ab9 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void) for(;;); } -static int qcss_tok; /* query-cpu-stopped-state token */ - -/* Get state of physical CPU. - * Return codes: - * 0 - The processor is in the RTAS stopped state - * 1 - stop-self is in progress - * 2 - The processor is not in the RTAS stopped state - * -1 - Hardware Error - * -2 - Hardware Busy, Try again later. - */ -static int query_cpu_stopped(unsigned int pcpu) -{ - int cpu_status, status; - - status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); - if (status != 0) { - printk(KERN_ERR - "RTAS query-cpu-stopped-state failed: %i\n", status); - return status; - } - - return cpu_status; -} - static int pseries_cpu_disable(void) { int cpu = smp_processor_id(); @@ -187,7 +163,7 @@ static int pseries_cpu_disable(void) /*fix boot_cpuid here*/ if (cpu == boot_cpuid) - boot_cpuid = any_online_cpu(cpu_online_map); + boot_cpuid = cpumask_any(cpu_online_mask); /* FIXME: abstract this to not be platform specific later on */ xics_migrate_irqs_away(); @@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu) } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { for (tries = 0; tries < 25; tries++) { - cpu_status = query_cpu_stopped(pcpu); - if (cpu_status == 0 || cpu_status == -1) + cpu_status = smp_query_cpu_stopped(pcpu); + if (cpu_status == QCSS_STOPPED || + cpu_status == QCSS_HARDWARE_ERROR) break; cpu_relax(); } @@ -245,7 +222,7 @@ static void pseries_cpu_die(unsigned int cpu) } /* - * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle + * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle * here is that a cpu device node may represent up to two logical cpus * in the SMT case. We must honor the assumption in other code that * the logical ids for sibling SMT threads x and y are adjacent, such @@ -254,7 +231,7 @@ static void pseries_cpu_die(unsigned int cpu) static int pseries_add_processor(struct device_node *np) { unsigned int cpu; - cpumask_t candidate_map, tmp = CPU_MASK_NONE; + cpumask_var_t candidate_mask, tmp; int err = -ENOSPC, len, nthreads, i; const u32 *intserv; @@ -262,48 +239,53 @@ static int pseries_add_processor(struct device_node *np) if (!intserv) return 0; + zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); + zalloc_cpumask_var(&tmp, GFP_KERNEL); + nthreads = len / sizeof(u32); for (i = 0; i < nthreads; i++) - cpu_set(i, tmp); + cpumask_set_cpu(i, tmp); cpu_maps_update_begin(); - BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); + BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); /* Get a bitmap of unoccupied slots. */ - cpus_xor(candidate_map, cpu_possible_map, cpu_present_map); - if (cpus_empty(candidate_map)) { + cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); + if (cpumask_empty(candidate_mask)) { /* If we get here, it most likely means that NR_CPUS is * less than the partition's max processors setting. */ printk(KERN_ERR "Cannot add cpu %s; this system configuration" " supports %d logical cpus.\n", np->full_name, - cpus_weight(cpu_possible_map)); + cpumask_weight(cpu_possible_mask)); goto out_unlock; } - while (!cpus_empty(tmp)) - if (cpus_subset(tmp, candidate_map)) + while (!cpumask_empty(tmp)) + if (cpumask_subset(tmp, candidate_mask)) /* Found a range where we can insert the new cpu(s) */ break; else - cpus_shift_left(tmp, tmp, nthreads); + cpumask_shift_left(tmp, tmp, nthreads); - if (cpus_empty(tmp)) { - printk(KERN_ERR "Unable to find space in cpu_present_map for" + if (cpumask_empty(tmp)) { + printk(KERN_ERR "Unable to find space in cpu_present_mask for" " processor %s with %d thread(s)\n", np->name, nthreads); goto out_unlock; } - for_each_cpu_mask(cpu, tmp) { - BUG_ON(cpu_isset(cpu, cpu_present_map)); + for_each_cpu(cpu, tmp) { + BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); set_cpu_present(cpu, true); set_hard_smp_processor_id(cpu, *intserv++); } err = 0; out_unlock: cpu_maps_update_done(); + free_cpumask_var(candidate_mask); + free_cpumask_var(tmp); return err; } @@ -334,7 +316,7 @@ static void pseries_remove_processor(struct device_node *np) set_hard_smp_processor_id(cpu, -1); break; } - if (cpu == NR_CPUS) + if (cpu >= nr_cpu_ids) printk(KERN_WARNING "Could not find cpu to remove " "with physical id 0x%x\n", intserv[i]); } @@ -388,6 +370,7 @@ static int __init pseries_cpu_hotplug_init(void) struct device_node *np; const char *typep; int cpu; + int qcss_tok; for_each_node_by_name(np, "interrupt-controller") { typep = of_get_property(np, "compatible", NULL); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 9b21ee68ea5..01e7b5bb3c1 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -11,6 +11,7 @@ #include <linux/of.h> #include <linux/lmb.h> +#include <linux/vmalloc.h> #include <asm/firmware.h> #include <asm/machdep.h> #include <asm/pSeries_reconfig.h> @@ -54,6 +55,12 @@ static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size) */ start = (unsigned long)__va(base); ret = remove_section_mapping(start, start + lmb_size); + + /* Ensure all vmalloc mappings are flushed in case they also + * hit that section of memory + */ + vm_unmap_aliases(); + return ret; } diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 383a5d0e981..48d20573e4d 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -228,3 +228,41 @@ _GLOBAL(plpar_hcall9) mtcrf 0xff,r0 blr /* return r3 = status */ + +/* See plpar_hcall_raw to see why this is needed */ +_GLOBAL(plpar_hcall9_raw) + HMT_MEDIUM + + mfcr r0 + stw r0,8(r1) + + std r4,STK_PARM(r4)(r1) /* Save ret buffer */ + + mr r4,r5 + mr r5,r6 + mr r6,r7 + mr r7,r8 + mr r8,r9 + mr r9,r10 + ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ + ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ + ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ + + HVSC /* invoke the hypervisor */ + + mr r0,r12 + ld r12,STK_PARM(r4)(r1) + std r4, 0(r12) + std r5, 8(r12) + std r6, 16(r12) + std r7, 24(r12) + std r8, 32(r12) + std r9, 40(r12) + std r10,48(r12) + std r11,56(r12) + std r0, 64(r12) + + lwz r0,8(r1) + mtcrf 0xff,r0 + + blr /* return r3 = status */ diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0707653612b..cf79b46d8f8 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -367,21 +367,28 @@ static void pSeries_lpar_hptab_clear(void) { unsigned long size_bytes = 1UL << ppc64_pft_size; unsigned long hpte_count = size_bytes >> 4; - unsigned long dummy1, dummy2, dword0; + struct { + unsigned long pteh; + unsigned long ptel; + } ptes[4]; long lpar_rc; - int i; + int i, j; - /* TODO: Use bulk call */ - for (i = 0; i < hpte_count; i++) { - /* dont remove HPTEs with VRMA mappings */ - lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG, - &dummy1, &dummy2); - if (lpar_rc == H_NOT_FOUND) { - lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1); - if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK) - != HPTE_V_VRMA_MASK)) - /* Can be hpte for 1TB Seg. So remove it */ - plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2); + /* Read in batches of 4, + * invalidate only valid entries not in the VRMA + * hpte_count will be a multiple of 4 + */ + for (i = 0; i < hpte_count; i += 4) { + lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); + if (lpar_rc != H_SUCCESS) + continue; + for (j = 0; j < 4; j++){ + if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == + HPTE_V_VRMA_MASK) + continue; + if (ptes[j].pteh & HPTE_V_VALID) + plpar_pte_remove_raw(0, i + j, 0, + &(ptes[j].pteh), &(ptes[j].ptel)); } } } diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index a05f8d42785..d9801117124 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h @@ -4,6 +4,14 @@ #include <asm/hvcall.h> #include <asm/page.h> +/* Get state of physical CPU from query_cpu_stopped */ +int smp_query_cpu_stopped(unsigned int pcpu); +#define QCSS_STOPPED 0 +#define QCSS_STOPPING 1 +#define QCSS_NOT_STOPPED 2 +#define QCSS_HARDWARE_ERROR -1 +#define QCSS_HARDWARE_BUSY -2 + static inline long poll_pending(void) { return plpar_hcall_norets(H_POLL_PENDING); @@ -183,6 +191,24 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex, return rc; } +/* + * plpar_pte_read_4_raw can be called in real mode. + * ptes must be 8*sizeof(unsigned long) + */ +static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex, + unsigned long *ptes) + +{ + long rc; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + + rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex); + + memcpy(ptes, retbuf, 8*sizeof(unsigned long)); + + return rc; +} + static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex, unsigned long avpn) { diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 9e17c0d2a0c..40c93cad91d 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h @@ -10,6 +10,13 @@ #ifndef _PSERIES_PSERIES_H #define _PSERIES_PSERIES_H +#include <linux/interrupt.h> + +struct device_node; + +extern void request_event_sources_irqs(struct device_node *np, + irq_handler_t handler, const char *name); + extern void __init fw_feature_init(const char *hypertas, unsigned long len); struct pt_regs; diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index db940d2c39a..41a3e9a039e 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -67,63 +67,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id); static irqreturn_t ras_error_interrupt(int irq, void *dev_id); -static void request_ras_irqs(struct device_node *np, - irq_handler_t handler, - const char *name) -{ - int i, index, count = 0; - struct of_irq oirq; - const u32 *opicprop; - unsigned int opicplen; - unsigned int virqs[16]; - - /* Check for obsolete "open-pic-interrupt" property. If present, then - * map those interrupts using the default interrupt host and default - * trigger - */ - opicprop = of_get_property(np, "open-pic-interrupt", &opicplen); - if (opicprop) { - opicplen /= sizeof(u32); - for (i = 0; i < opicplen; i++) { - if (count > 15) - break; - virqs[count] = irq_create_mapping(NULL, *(opicprop++)); - if (virqs[count] == NO_IRQ) - printk(KERN_ERR "Unable to allocate interrupt " - "number for %s\n", np->full_name); - else - count++; - - } - } - /* Else use normal interrupt tree parsing */ - else { - /* First try to do a proper OF tree parsing */ - for (index = 0; of_irq_map_one(np, index, &oirq) == 0; - index++) { - if (count > 15) - break; - virqs[count] = irq_create_of_mapping(oirq.controller, - oirq.specifier, - oirq.size); - if (virqs[count] == NO_IRQ) - printk(KERN_ERR "Unable to allocate interrupt " - "number for %s\n", np->full_name); - else - count++; - } - } - - /* Now request them */ - for (i = 0; i < count; i++) { - if (request_irq(virqs[i], handler, 0, name, NULL)) { - printk(KERN_ERR "Unable to request interrupt %d for " - "%s\n", virqs[i], np->full_name); - return; - } - } -} - /* * Initialize handlers for the set of interrupts caused by hardware errors * and power system events. @@ -138,14 +81,15 @@ static int __init init_ras_IRQ(void) /* Internal Errors */ np = of_find_node_by_path("/event-sources/internal-errors"); if (np != NULL) { - request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); + request_event_sources_irqs(np, ras_error_interrupt, + "RAS_ERROR"); of_node_put(np); } /* EPOW Events */ np = of_find_node_by_path("/event-sources/epow-events"); if (np != NULL) { - request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); + request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW"); of_node_put(np); } diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 6710761bf60..a6d19e3a505 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -496,13 +496,14 @@ static int __init pSeries_probe(void) } -DECLARE_PER_CPU(unsigned long, smt_snooze_delay); +DECLARE_PER_CPU(long, smt_snooze_delay); static void pseries_dedicated_idle_sleep(void) { unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long in_purr, out_purr; + long snooze = __get_cpu_var(smt_snooze_delay); /* * Indicate to the HV that we are idle. Now would be @@ -517,13 +518,12 @@ static void pseries_dedicated_idle_sleep(void) * has been checked recently. If we should poll for a little * while, do so. */ - if (__get_cpu_var(smt_snooze_delay)) { - start_snooze = get_tb() + - __get_cpu_var(smt_snooze_delay) * tb_ticks_per_usec; + if (snooze) { + start_snooze = get_tb() + snooze * tb_ticks_per_usec; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); - while (get_tb() < start_snooze) { + while ((snooze < 0) || (get_tb() < start_snooze)) { if (need_resched() || cpu_is_offline(cpu)) goto out; ppc64_runlatch_off(); diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 4e7f89a8456..3b1bf61c45b 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -55,7 +55,29 @@ * The Primary thread of each non-boot processor was started from the OF client * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. */ -static cpumask_t of_spin_map; +static cpumask_var_t of_spin_mask; + +/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */ +int smp_query_cpu_stopped(unsigned int pcpu) +{ + int cpu_status, status; + int qcss_tok = rtas_token("query-cpu-stopped-state"); + + if (qcss_tok == RTAS_UNKNOWN_SERVICE) { + printk(KERN_INFO "Firmware doesn't support " + "query-cpu-stopped-state\n"); + return QCSS_HARDWARE_ERROR; + } + + status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); + if (status != 0) { + printk(KERN_ERR + "RTAS query-cpu-stopped-state failed: %i\n", status); + return status; + } + + return cpu_status; +} /** * smp_startup_cpu() - start the given cpu @@ -76,12 +98,18 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) unsigned int pcpu; int start_cpu; - if (cpu_isset(lcpu, of_spin_map)) + if (cpumask_test_cpu(lcpu, of_spin_mask)) /* Already started by OF and sitting in spin loop */ return 1; pcpu = get_hard_smp_processor_id(lcpu); + /* Check to see if the CPU out of FW already for kexec */ + if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){ + cpumask_set_cpu(lcpu, of_spin_mask); + return 1; + } + /* Fixup atomic count: it exited inside IRQ handler. */ task_thread_info(paca[lcpu].__current)->preempt_count = 0; @@ -115,7 +143,7 @@ static void __devinit smp_xics_setup_cpu(int cpu) if (firmware_has_feature(FW_FEATURE_SPLPAR)) vpa_init(cpu); - cpu_clear(cpu, of_spin_map); + cpumask_clear_cpu(cpu, of_spin_mask); set_cpu_current_state(cpu, CPU_STATE_ONLINE); set_default_offline_state(cpu); @@ -186,17 +214,19 @@ static void __init smp_init_pseries(void) pr_debug(" -> smp_init_pSeries()\n"); + alloc_bootmem_cpumask_var(&of_spin_mask); + /* Mark threads which are still spinning in hold loops. */ if (cpu_has_feature(CPU_FTR_SMT)) { for_each_present_cpu(i) { if (cpu_thread_in_core(i) == 0) - cpu_set(i, of_spin_map); + cpumask_set_cpu(i, of_spin_mask); } } else { - of_spin_map = cpu_present_map; + cpumask_copy(of_spin_mask, cpu_present_mask); } - cpu_clear(boot_cpuid, of_spin_map); + cpumask_clear_cpu(boot_cpuid, of_spin_mask); /* Non-lpar has additional take/give timebase */ if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 1bcedd8b461..f19d1946839 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -163,29 +163,37 @@ static inline void lpar_qirr_info(int n_cpu , u8 value) /* Interface to generic irq subsystem */ #ifdef CONFIG_SMP -static int get_irq_server(unsigned int virq, cpumask_t cpumask, +/* + * For the moment we only implement delivery to all cpus or one cpu. + * + * If the requested affinity is cpu_all_mask, we set global affinity. + * If not we set it to the first cpu in the mask, even if multiple cpus + * are set. This is so things like irqbalance (which set core and package + * wide affinities) do the right thing. + */ +static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, unsigned int strict_check) { - int server; - /* For the moment only implement delivery to all cpus or one cpu */ - cpumask_t tmp = CPU_MASK_NONE; if (!distribute_irqs) return default_server; - if (!cpus_equal(cpumask, CPU_MASK_ALL)) { - cpus_and(tmp, cpu_online_map, cpumask); - - server = first_cpu(tmp); + if (!cpumask_equal(cpumask, cpu_all_mask)) { + int server = cpumask_first_and(cpu_online_mask, cpumask); - if (server < NR_CPUS) + if (server < nr_cpu_ids) return get_hard_smp_processor_id(server); if (strict_check) return -1; } - if (cpus_equal(cpu_online_map, cpu_present_map)) + /* + * Workaround issue with some versions of JS20 firmware that + * deliver interrupts to cpus which haven't been started. This + * happens when using the maxcpus= boot option. + */ + if (cpumask_equal(cpu_online_mask, cpu_present_mask)) return default_distrib_server; return default_server; @@ -207,7 +215,7 @@ static void xics_unmask_irq(unsigned int virq) if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) return; - server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0); + server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, DEFAULT_PRIORITY); @@ -398,11 +406,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) return -1; } - /* - * For the moment only implement delivery to all cpus or one cpu. - * Get current irq_server for the given irq - */ - irq_server = get_irq_server(virq, *cpumask, 1); + irq_server = get_irq_server(virq, cpumask, 1); if (irq_server == -1) { char cpulist[128]; cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); @@ -611,7 +615,7 @@ int __init smp_xics_probe(void) { xics_request_ipi(); - return cpus_weight(cpu_possible_map); + return cpumask_weight(cpu_possible_mask); } #endif /* CONFIG_SMP */ |