summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c3
-rw-r--r--arch/powerpc/platforms/85xx/smp.c21
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/iseries/vio.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c18
-rw-r--r--arch/powerpc/platforms/pseries/xics.c28
6 files changed, 44 insertions, 30 deletions
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 21f61b8c445..cc29c0f5300 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void)
}
mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+ MPIC_BROKEN_FRR_NIRQS,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 04160a4cc69..a15f582300d 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr)
__iomem u32 *bptr_vaddr;
struct device_node *np;
int n = 0;
+ int ioremappable;
WARN_ON (nr < 0 || nr >= NR_CPUS);
@@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr)
return;
}
+ /*
+ * A secondary core could be in a spinloop in the bootpage
+ * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
+ * The bootpage and highmem can be accessed via ioremap(), but
+ * we need to directly access the spinloop if its in lowmem.
+ */
+ ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
+
/* Map the spin table */
- bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+ if (ioremappable)
+ bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+ else
+ bptr_vaddr = phys_to_virt(*cpu_rel_addr);
local_irq_save(flags);
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
+ if (!ioremappable)
+ flush_dcache_range((ulong)bptr_vaddr,
+ (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
+
/* Wait a bit for the CPU to ack. */
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1);
local_irq_restore(flags);
- iounmap(bptr_vaddr);
+ if (ioremappable)
+ iounmap(bptr_vaddr);
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
}
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index c4d4a19235e..eea120229cd 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -54,7 +54,7 @@ static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer,
*/
static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset)
{
- unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
+ unsigned long limit = rlimit(RLIMIT_CORE);
ssize_t written;
if (*foffset + nr > limit)
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
index 657b72f6849..2aa8b5631be 100644
--- a/arch/powerpc/platforms/iseries/vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -474,6 +474,8 @@ static void __init get_viotape_info(struct device_node *vio_root)
struct vio_waitevent we;
int ret;
+ init_completion(&we.com);
+
ret = viopath_open(viopath_hostLp, viomajorsubtype_tape, 2);
if (ret) {
printk(KERN_WARNING "get_viotape_info: "
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 67b7a10f9fc..37bce52526d 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -236,7 +236,9 @@ static struct device_node *derive_parent(const char *path)
int dlpar_attach_node(struct device_node *dn)
{
+#ifdef CONFIG_PROC_DEVICETREE
struct proc_dir_entry *ent;
+#endif
int rc;
of_node_set_flag(dn, OF_DYNAMIC);
@@ -267,10 +269,10 @@ int dlpar_attach_node(struct device_node *dn)
int dlpar_detach_node(struct device_node *dn)
{
+#ifdef CONFIG_PROC_DEVICETREE
struct device_node *parent = dn->parent;
struct property *prop = dn->properties;
-#ifdef CONFIG_PROC_DEVICETREE
while (prop) {
remove_proc_entry(prop->name, dn->pde);
prop = prop->next;
@@ -344,20 +346,6 @@ int dlpar_release_drc(u32 drc_index)
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-static DEFINE_MUTEX(pseries_cpu_hotplug_mutex);
-
-void cpu_hotplug_driver_lock(void)
-__acquires(pseries_cpu_hotplug_mutex)
-{
- mutex_lock(&pseries_cpu_hotplug_mutex);
-}
-
-void cpu_hotplug_driver_unlock(void)
-__releases(pseries_cpu_hotplug_mutex)
-{
- mutex_unlock(&pseries_cpu_hotplug_mutex);
-}
-
static int dlpar_online_cpu(struct device_node *dn)
{
int rc = 0;
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index b9b9e11609e..f5f79196721 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -163,14 +163,13 @@ static inline void lpar_qirr_info(int n_cpu , u8 value)
/* Interface to generic irq subsystem */
#ifdef CONFIG_SMP
-static int get_irq_server(unsigned int virq, unsigned int strict_check)
+static int get_irq_server(unsigned int virq, cpumask_t cpumask,
+ unsigned int strict_check)
{
int server;
/* For the moment only implement delivery to all cpus or one cpu */
- cpumask_t cpumask;
cpumask_t tmp = CPU_MASK_NONE;
- cpumask_copy(&cpumask, irq_to_desc(virq)->affinity);
if (!distribute_irqs)
return default_server;
@@ -192,10 +191,7 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
return default_server;
}
#else
-static int get_irq_server(unsigned int virq, unsigned int strict_check)
-{
- return default_server;
-}
+#define get_irq_server(virq, cpumask, strict_check) (default_server)
#endif
static void xics_unmask_irq(unsigned int virq)
@@ -211,7 +207,7 @@ static void xics_unmask_irq(unsigned int virq)
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return;
- server = get_irq_server(virq, 0);
+ server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
@@ -405,7 +401,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
* For the moment only implement delivery to all cpus or one cpu.
* Get current irq_server for the given irq
*/
- irq_server = get_irq_server(virq, 1);
+ irq_server = get_irq_server(virq, *cpumask, 1);
if (irq_server == -1) {
char cpulist[128];
cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
@@ -788,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
{
struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
- BUG_ON(os_cppr->index != 0);
+ /*
+ * we only really want to set the priority when there's
+ * just one cppr value on the stack
+ */
+ WARN_ON(os_cppr->index != 0);
- os_cppr->stack[os_cppr->index] = cppr;
+ os_cppr->stack[0] = cppr;
if (firmware_has_feature(FW_FEATURE_LPAR))
lpar_cppr_info(cppr);
@@ -825,8 +825,14 @@ void xics_setup_cpu(void)
void xics_teardown_cpu(void)
{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
int cpu = smp_processor_id();
+ /*
+ * we have to reset the cppr index to 0 because we're
+ * not going to return from the IPI
+ */
+ os_cppr->index = 0;
xics_set_cpu_priority(0);
/* Clear any pending IPI request */