diff options
author | Len Brown <len.brown@intel.com> | 2011-03-23 02:34:54 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2011-03-23 02:34:54 -0400 |
commit | 02e2407858fd62053bf60349c0e72cd1c7a4a60e (patch) | |
tree | 0ebdbddc97d3abbc675916010e7771065b70c137 /arch/blackfin/kernel | |
parent | 96e1c408ea8a556c5b51e0e7d56bd2afbfbf5fe9 (diff) | |
parent | 6447f55da90b77faec1697d499ed7986bb4f6de6 (diff) |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/acpi/sleep.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r-- | arch/blackfin/kernel/bfin_dma_5xx.c | 32 | ||||
-rw-r--r-- | arch/blackfin/kernel/ipipe.c | 84 | ||||
-rw-r--r-- | arch/blackfin/kernel/irqchip.c | 10 | ||||
-rw-r--r-- | arch/blackfin/kernel/kgdb.c | 4 | ||||
-rw-r--r-- | arch/blackfin/kernel/setup.c | 37 | ||||
-rw-r--r-- | arch/blackfin/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/blackfin/kernel/vmlinux.lds.S | 3 |
7 files changed, 105 insertions, 71 deletions
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 1e485dfdc9f..6ce8dce753c 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c @@ -84,6 +84,24 @@ static int __init proc_dma_init(void) late_initcall(proc_dma_init); #endif +static void set_dma_peripheral_map(unsigned int channel, const char *device_id) +{ +#ifdef CONFIG_BF54x + unsigned int per_map; + + switch (channel) { + case CH_UART2_RX: per_map = 0xC << 12; break; + case CH_UART2_TX: per_map = 0xD << 12; break; + case CH_UART3_RX: per_map = 0xE << 12; break; + case CH_UART3_TX: per_map = 0xF << 12; break; + default: return; + } + + if (strncmp(device_id, "BFIN_UART", 9) == 0) + dma_ch[channel].regs->peripheral_map = per_map; +#endif +} + /** * request_dma - request a DMA channel * @@ -111,19 +129,7 @@ int request_dma(unsigned int channel, const char *device_id) return -EBUSY; } -#ifdef CONFIG_BF54x - if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) { - unsigned int per_map; - per_map = dma_ch[channel].regs->peripheral_map & 0xFFF; - if (strncmp(device_id, "BFIN_UART", 9) == 0) - dma_ch[channel].regs->peripheral_map = per_map | - ((channel - CH_UART2_RX + 0xC)<<12); - else - dma_ch[channel].regs->peripheral_map = per_map | - ((channel - CH_UART2_RX + 0x6)<<12); - } -#endif - + set_dma_peripheral_map(channel, device_id); dma_ch[channel].device_id = device_id; dma_ch[channel].irq = 0; diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index 3b1da4aff2a..f37019c847c 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c @@ -154,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) * pending for it. */ if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && - ipipe_head_cpudom_var(irqpend_himask) == 0) + !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) goto out; __ipipe_walk_pipeline(head); @@ -185,25 +185,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) } EXPORT_SYMBOL(__ipipe_disable_irqdesc); -int __ipipe_syscall_root(struct pt_regs *regs) +asmlinkage int __ipipe_syscall_root(struct pt_regs *regs) { struct ipipe_percpu_domain_data *p; - unsigned long flags; + void (*hook)(void); int ret; + WARN_ON_ONCE(irqs_disabled_hw()); + /* - * We need to run the IRQ tail hook whenever we don't - * propagate a syscall to higher domains, because we know that - * important operations might be pending there (e.g. Xenomai - * deferred rescheduling). + * We need to run the IRQ tail hook each time we intercept a + * syscall, because we know that important operations might be + * pending there (e.g. Xenomai deferred rescheduling). */ - - if (regs->orig_p0 < NR_syscalls) { - void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; - hook(); - if ((current->flags & PF_EVNOTIFY) == 0) - return 0; - } + hook = (__typeof__(hook))__ipipe_irq_tail_hook; + hook(); /* * This routine either returns: @@ -214,51 +210,47 @@ int __ipipe_syscall_root(struct pt_regs *regs) * tail work has to be performed (for handling signals etc). */ - if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) + if (!__ipipe_syscall_watched_p(current, regs->orig_p0) || + !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) return 0; ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); - flags = hard_local_irq_save(); + hard_local_irq_disable(); - if (!__ipipe_root_domain_p) { - hard_local_irq_restore(flags); - return 1; + /* + * This is the end of the syscall path, so we may + * safely assume a valid Linux task stack here. + */ + if (current->ipipe_flags & PF_EVTRET) { + current->ipipe_flags &= ~PF_EVTRET; + __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); } - p = ipipe_root_cpudom_ptr(); - if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) - __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); + if (!__ipipe_root_domain_p) + ret = -1; + else { + p = ipipe_root_cpudom_ptr(); + if (__ipipe_ipending_p(p)) + __ipipe_sync_pipeline(); + } - hard_local_irq_restore(flags); + hard_local_irq_enable(); return -ret; } -unsigned long ipipe_critical_enter(void (*syncfn) (void)) -{ - unsigned long flags; - - flags = hard_local_irq_save(); - - return flags; -} - -void ipipe_critical_exit(unsigned long flags) -{ - hard_local_irq_restore(flags); -} - static void __ipipe_no_irqtail(void) { } int ipipe_get_sysinfo(struct ipipe_sysinfo *info) { - info->ncpus = num_online_cpus(); - info->cpufreq = ipipe_cpu_freq(); - info->archdep.tmirq = IPIPE_TIMER_IRQ; - info->archdep.tmfreq = info->cpufreq; + info->sys_nr_cpus = num_online_cpus(); + info->sys_cpu_freq = ipipe_cpu_freq(); + info->sys_hrtimer_irq = IPIPE_TIMER_IRQ; + info->sys_hrtimer_freq = __ipipe_core_clock; + info->sys_hrclock_freq = __ipipe_core_clock; return 0; } @@ -289,6 +281,7 @@ int ipipe_trigger_irq(unsigned irq) asmlinkage void __ipipe_sync_root(void) { void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; + struct ipipe_percpu_domain_data *p; unsigned long flags; BUG_ON(irqs_disabled()); @@ -300,19 +293,20 @@ asmlinkage void __ipipe_sync_root(void) clear_thread_flag(TIF_IRQ_SYNC); - if (ipipe_root_cpudom_var(irqpend_himask) != 0) - __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); + p = ipipe_root_cpudom_ptr(); + if (__ipipe_ipending_p(p)) + __ipipe_sync_pipeline(); hard_local_irq_restore(flags); } -void ___ipipe_sync_pipeline(unsigned long syncmask) +void ___ipipe_sync_pipeline(void) { if (__ipipe_root_domain_p && test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) return; - __ipipe_sync_stage(syncmask); + __ipipe_sync_stage(); } void __ipipe_disable_root_irqs_hw(void) diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 64cff54a8a5..8f079392aff 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -39,21 +39,23 @@ int show_interrupts(struct seq_file *p, void *v) unsigned long flags; if (i < NR_IRQS) { - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; + struct irq_desc *desc = irq_to_desc(i); + + raw_spin_lock_irqsave(&desc->lock, flags); + action = desc->action; if (!action) goto skip; seq_printf(p, "%3d: ", i); for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); - seq_printf(p, " %8s", irq_desc[i].chip->name); + seq_printf(p, " %8s", get_irq_desc_chip(desc)->name); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) seq_printf(p, " %s", action->name); seq_putc(p, '\n'); skip: - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index eb92592fd80..b8cfe34989e 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c @@ -422,11 +422,7 @@ int kgdb_arch_handle_exception(int vector, int signo, struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0xa1}, -#ifdef CONFIG_SMP - .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP, -#else .flags = KGDB_HW_BREAKPOINT, -#endif .set_hw_breakpoint = bfin_set_hw_break, .remove_hw_breakpoint = bfin_remove_hw_break, .disable_hw_break = bfin_disable_hw_debug, diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index ac71dc15cbd..805c6132c77 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c @@ -215,11 +215,48 @@ void __init bfin_relocate_l1_mem(void) early_dma_memcpy_done(); +#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) + blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1; +#endif + /* if necessary, copy L2 text/data to L2 SRAM */ if (L2_LENGTH && l2_len) memcpy(_stext_l2, _l2_lma, l2_len); } +#ifdef CONFIG_SMP +void __init bfin_relocate_coreb_l1_mem(void) +{ + unsigned long text_l1_len = (unsigned long)_text_l1_len; + unsigned long data_l1_len = (unsigned long)_data_l1_len; + unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len; + + blackfin_dma_early_init(); + + /* if necessary, copy L1 text to L1 instruction SRAM */ + if (L1_CODE_LENGTH && text_l1_len) + early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma, + text_l1_len); + + /* if necessary, copy L1 data to L1 data bank A SRAM */ + if (L1_DATA_A_LENGTH && data_l1_len) + early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma, + data_l1_len); + + /* if necessary, copy L1 data B to L1 data bank B SRAM */ + if (L1_DATA_B_LENGTH && data_b_l1_len) + early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma, + data_b_l1_len); + + early_dma_memcpy_done(); + +#ifdef CONFIG_ICACHE_FLUSH_L1 + blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 - + (unsigned long)_stext_l1 + COREB_L1_CODE_START; +#endif +} +#endif + #ifdef CONFIG_ROMKERNEL void __init bfin_relocate_xip_data(void) { diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index c9113619029..8d73724c009 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c @@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void) /* * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick + * as well as call the "xtime_update()" routine every clocktick */ #ifdef CONFIG_CORE_TIMER_IRQ_L1 __attribute__((l1_text)) #endif irqreturn_t timer_interrupt(int irq, void *dummy) { - write_seqlock(&xtime_lock); - do_timer(1); - write_sequnlock(&xtime_lock); + xtime_update(1); #ifdef CONFIG_IPIPE update_root_process_times(get_irq_regs()); diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 4122678529c..854fa49f1c3 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -136,7 +136,7 @@ SECTIONS . = ALIGN(16); INIT_DATA_SECTION(16) - PERCPU(4) + PERCPU(32, PAGE_SIZE) .exit.data : { @@ -176,6 +176,7 @@ SECTIONS { . = ALIGN(4); __stext_l1 = .; + *(.l1.text.head) *(.l1.text) #ifdef CONFIG_SCHEDULE_L1 SCHED_TEXT |