diff options
Diffstat (limited to 'arch/blackfin/mach-common/smp.c')
-rw-r--r-- | arch/blackfin/mach-common/smp.c | 46 |
1 files changed, 34 insertions, 12 deletions
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 93eab614607..349ee3f5466 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c @@ -43,8 +43,13 @@ #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/cpu.h> +#include <asm/time.h> #include <linux/err.h> +/* + * Anomaly notes: + * 05000120 - we always define corelock as 32-bit integer in L2 + */ struct corelock_slot corelock __attribute__ ((__section__(".l2.bss"))); void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, @@ -139,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) static irqreturn_t ipi_handler(int irq, void *dev_instance) { - struct ipi_message *msg, *mg; + struct ipi_message *msg; struct ipi_message_queue *msg_queue; unsigned int cpu = smp_processor_id(); @@ -149,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance) msg_queue->count++; spin_lock(&msg_queue->lock); - list_for_each_entry_safe(msg, mg, &msg_queue->head, list) { + while (!list_empty(&msg_queue->head)) { + msg = list_entry(msg_queue->head.next, typeof(*msg), list); list_del(&msg->list); switch (msg->type) { case BFIN_IPI_RESCHEDULE: @@ -205,6 +211,8 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) return 0; msg = kmalloc(sizeof(*msg), GFP_ATOMIC); + if (!msg) + return -ENOMEM; INIT_LIST_HEAD(&msg->list); msg->call_struct.func = func; msg->call_struct.info = info; @@ -216,7 +224,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) for_each_cpu_mask(cpu, callmap) { msg_queue = &per_cpu(ipi_msg_queue, cpu); spin_lock_irqsave(&msg_queue->lock, flags); - list_add(&msg->list, &msg_queue->head); + list_add_tail(&msg->list, &msg_queue->head); spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu); } @@ -246,6 +254,8 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, cpu_set(cpu, callmap); msg = kmalloc(sizeof(*msg), GFP_ATOMIC); + if (!msg) + return -ENOMEM; INIT_LIST_HEAD(&msg->list); msg->call_struct.func = func; msg->call_struct.info = info; @@ -256,7 +266,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, msg_queue = &per_cpu(ipi_msg_queue, cpu); spin_lock_irqsave(&msg_queue->lock, flags); - list_add(&msg->list, &msg_queue->head); + list_add_tail(&msg->list, &msg_queue->head); spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu); @@ -281,13 +291,15 @@ void smp_send_reschedule(int cpu) return; msg = kmalloc(sizeof(*msg), GFP_ATOMIC); + if (!msg) + return; memset(msg, 0, sizeof(msg)); INIT_LIST_HEAD(&msg->list); msg->type = BFIN_IPI_RESCHEDULE; msg_queue = &per_cpu(ipi_msg_queue, cpu); spin_lock_irqsave(&msg_queue->lock, flags); - list_add(&msg->list, &msg_queue->head); + list_add_tail(&msg->list, &msg_queue->head); spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu); @@ -308,6 +320,8 @@ void smp_send_stop(void) return; msg = kmalloc(sizeof(*msg), GFP_ATOMIC); + if (!msg) + return; memset(msg, 0, sizeof(msg)); INIT_LIST_HEAD(&msg->list); msg->type = BFIN_IPI_CPU_STOP; @@ -315,7 +329,7 @@ void smp_send_stop(void) for_each_cpu_mask(cpu, callmap) { msg_queue = &per_cpu(ipi_msg_queue, cpu); spin_lock_irqsave(&msg_queue->lock, flags); - list_add(&msg->list, &msg_queue->head); + list_add_tail(&msg->list, &msg_queue->head); spin_unlock_irqrestore(&msg_queue->lock, flags); platform_send_ipi_cpu(cpu); } @@ -352,7 +366,7 @@ int __cpuinit __cpu_up(unsigned int cpu) static void __cpuinit setup_secondary(unsigned int cpu) { -#if !(defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)) +#if !defined(CONFIG_TICKSOURCE_GPTMR0) struct irq_desc *timer_desc; #endif unsigned long ilat; @@ -364,16 +378,13 @@ static void __cpuinit setup_secondary(unsigned int cpu) bfin_write_ILAT(ilat); CSYNC(); - /* Reserve the PDA space for the secondary CPU. */ - reserve_pda(); - /* Enable interrupt levels IVG7-15. IARs have been already * programmed by the boot CPU. */ bfin_irq_flags |= IMASK_IVG15 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; -#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) +#if defined(CONFIG_TICKSOURCE_GPTMR0) /* Power down the core timer, just to play safe. */ bfin_write_TCNTL(0); @@ -447,7 +458,7 @@ void __init smp_cpus_done(unsigned int max_cpus) unsigned int cpu; for_each_online_cpu(cpu) - bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; + bogosum += loops_per_jiffy; printk(KERN_INFO "SMP: Total of %d processors activated " "(%lu.%02lu BogoMIPS).\n", @@ -466,6 +477,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end) } EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); +#ifdef __ARCH_SYNC_CORE_ICACHE +void resync_core_icache(void) +{ + unsigned int cpu = get_cpu(); + blackfin_invalidate_entire_icache(); + ++per_cpu(cpu_data, cpu).icache_invld_count; + put_cpu(); +} +EXPORT_SYMBOL(resync_core_icache); +#endif + #ifdef __ARCH_SYNC_CORE_DCACHE unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); |