diff options
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r-- | arch/blackfin/mach-common/Makefile | 1 | ||||
-rw-r--r-- | arch/blackfin/mach-common/cache-c.c | 44 | ||||
-rw-r--r-- | arch/blackfin/mach-common/entry.S | 193 | ||||
-rw-r--r-- | arch/blackfin/mach-common/head.S | 19 | ||||
-rw-r--r-- | arch/blackfin/mach-common/interrupt.S | 78 | ||||
-rw-r--r-- | arch/blackfin/mach-common/ints-priority.c | 19 | ||||
-rw-r--r-- | arch/blackfin/mach-common/lock.S | 223 | ||||
-rw-r--r-- | arch/blackfin/mach-common/pm.c | 64 |
8 files changed, 216 insertions, 425 deletions
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile index dd8b2dc97f5..814cb483853 100644 --- a/arch/blackfin/mach-common/Makefile +++ b/arch/blackfin/mach-common/Makefile @@ -6,7 +6,6 @@ obj-y := \ cache.o cache-c.o entry.o head.o \ interrupt.o arch_checks.o ints-priority.o -obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o obj-$(CONFIG_PM) += pm.o dpmc_modes.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c index b59ce3cb380..4ebbd78db3a 100644 --- a/arch/blackfin/mach-common/cache-c.c +++ b/arch/blackfin/mach-common/cache-c.c @@ -1,14 +1,16 @@ /* * Blackfin cache control code (simpler control-style functions) * - * Copyright 2004-2008 Analog Devices Inc. + * Copyright 2004-2009 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ +#include <linux/init.h> #include <asm/blackfin.h> +#include <asm/cplbinit.h> /* Invalidate the Entire Data cache by * clearing DMC[1:0] bits @@ -34,3 +36,43 @@ void blackfin_invalidate_entire_icache(void) SSYNC(); } +#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE) + +static void +bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr, + unsigned long cplb_data, unsigned long mem_control, + unsigned long mem_mask) +{ + int i; + + for (i = 0; i < MAX_CPLBS; i++) { + bfin_write32(cplb_addr + i * 4, cplb_tbl[i].addr); + bfin_write32(cplb_data + i * 4, cplb_tbl[i].data); + } + + _enable_cplb(mem_control, mem_mask); +} + +#ifdef CONFIG_BFIN_ICACHE +void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl) +{ + bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL, + (IMC | ENICPLB)); +} +#endif + +#ifdef CONFIG_BFIN_DCACHE +void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl) +{ + /* + * Anomaly notes: + * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL + * register, so that the port preferences for DAG0 and DAG1 are set + * to port B + */ + bfin_cache_init(dcplb_tbl, DCPLB_ADDR0, DCPLB_DATA0, DMEM_CONTROL, + (DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0))); +} +#endif + +#endif diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index fb1795d5be2..1e7cac23e25 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S @@ -301,27 +301,31 @@ ENTRY(_ex_replaceable) nop; ENTRY(_ex_trap_c) + /* The only thing that has been saved in this context is + * (R7:6,P5:4), ASTAT & SP - don't use anything else + */ + + GET_PDA(p5, r6); + /* Make sure we are not in a double fault */ p4.l = lo(IPEND); p4.h = hi(IPEND); r7 = [p4]; CC = BITTST (r7, 5); if CC jump _double_fault; + [p5 + PDA_EXIPEND] = r7; /* Call C code (trap_c) to handle the exception, which most * likely involves sending a signal to the current process. * To avoid double faults, lower our priority to IRQ5 first. */ - P5.h = _exception_to_level5; - P5.l = _exception_to_level5; + r7.h = _exception_to_level5; + r7.l = _exception_to_level5; p4.l = lo(EVT5); p4.h = hi(EVT5); - [p4] = p5; + [p4] = r7; csync; - GET_PDA(p5, r6); -#ifndef CONFIG_DEBUG_DOUBLEFAULT - /* * Save these registers, as they are only valid in exception context * (where we are now - as soon as we defer to IRQ5, they can change) @@ -341,7 +345,10 @@ ENTRY(_ex_trap_c) r6 = retx; [p5 + PDA_RETX] = r6; -#endif + + r6 = SEQSTAT; + [p5 + PDA_SEQSTAT] = r6; + /* Save the state of single stepping */ r6 = SYSCFG; [p5 + PDA_SYSCFG] = r6; @@ -349,8 +356,7 @@ ENTRY(_ex_trap_c) BITCLR(r6, SYSCFG_SSSTEP_P); SYSCFG = r6; - /* Disable all interrupts, but make sure level 5 is enabled so - * we can switch to that level. Save the old mask. */ + /* Save the current IMASK, since we change in order to jump to level 5 */ cli r6; [p5 + PDA_EXIMASK] = r6; @@ -358,9 +364,21 @@ ENTRY(_ex_trap_c) p4.h = hi(SAFE_USER_INSTRUCTION); retx = p4; + /* Disable all interrupts, but make sure level 5 is enabled so + * we can switch to that level. + */ r6 = 0x3f; sti r6; + /* In case interrupts are disabled IPEND[4] (global interrupt disable bit) + * clear it (re-enabling interrupts again) by the special sequence of pushing + * RETI onto the stack. This way we can lower ourselves to IVG5 even if the + * exception was taken after the interrupt handler was called but before it + * got a chance to enable global interrupts itself. + */ + [--sp] = reti; + sp += 4; + raise 5; jump.s _bfin_return_from_exception; ENDPROC(_ex_trap_c) @@ -379,8 +397,7 @@ ENTRY(_double_fault) R5 = [P4]; /* Control Register*/ BITCLR(R5,ENICPLB_P); - SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ - .align 8; + CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */ [P4] = R5; SSYNC; @@ -388,8 +405,7 @@ ENTRY(_double_fault) P4.H = HI(DMEM_CONTROL); R5 = [P4]; BITCLR(R5,ENDCPLB_P); - SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ - .align 8; + CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */ [P4] = R5; SSYNC; @@ -420,47 +436,55 @@ ENDPROC(_double_fault) ENTRY(_exception_to_level5) SAVE_ALL_SYS - GET_PDA(p4, r7); /* Fetch current PDA */ - r6 = [p4 + PDA_RETX]; + GET_PDA(p5, r7); /* Fetch current PDA */ + r6 = [p5 + PDA_RETX]; [sp + PT_PC] = r6; - r6 = [p4 + PDA_SYSCFG]; + r6 = [p5 + PDA_SYSCFG]; [sp + PT_SYSCFG] = r6; - /* Restore interrupt mask. We haven't pushed RETI, so this - * doesn't enable interrupts until we return from this handler. */ - r6 = [p4 + PDA_EXIMASK]; - sti r6; + r6 = [p5 + PDA_SEQSTAT]; /* Read back seqstat */ + [sp + PT_SEQSTAT] = r6; /* Restore the hardware error vector. */ - P5.h = _evt_ivhw; - P5.l = _evt_ivhw; + r7.h = _evt_ivhw; + r7.l = _evt_ivhw; p4.l = lo(EVT5); p4.h = hi(EVT5); - [p4] = p5; + [p4] = r7; csync; - p2.l = lo(IPEND); - p2.h = hi(IPEND); - csync; - r0 = [p2]; /* Read current IPEND */ - [sp + PT_IPEND] = r0; /* Store IPEND */ +#ifdef CONFIG_DEBUG_DOUBLEFAULT + /* Now that we have the hardware error vector programmed properly + * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes + * another hardware error, we can catch it (self-nesting). + */ + [--sp] = reti; + sp += 4; +#endif + + r7 = [p5 + PDA_EXIPEND] /* Read the IPEND from the Exception state */ + [sp + PT_IPEND] = r7; /* Store IPEND onto the stack */ r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ SP += -12; call _trap_c; SP += 12; -#ifdef CONFIG_DEBUG_DOUBLEFAULT - /* Grab ILAT */ - p2.l = lo(ILAT); - p2.h = hi(ILAT); - r0 = [p2]; - r1 = 0x20; /* Did I just cause anther HW error? */ - r0 = r0 & r1; - CC = R0 == R1; - if CC JUMP _double_fault; -#endif + /* If interrupts were off during the exception (IPEND[4] = 1), turn them off + * before we return. + */ + CC = BITTST(r7, EVT_IRPTEN_P) + if !CC jump 1f; + /* this will load a random value into the reti register - but that is OK, + * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro + */ + sp += -4; + reti = [sp++]; +1: + /* restore the interrupt mask (IMASK) */ + r6 = [p5 + PDA_EXIMASK]; + sti r6; call _ret_from_exception; RESTORE_ALL_SYS @@ -474,7 +498,7 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ */ EX_SCRATCH_REG = sp; GET_PDA_SAFE(sp); - sp = [sp + PDA_EXSTACK] + sp = [sp + PDA_EXSTACK]; /* Try to deal with syscalls quickly. */ [--sp] = ASTAT; [--sp] = (R7:6,P5:4); @@ -489,14 +513,7 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ ssync; #endif -#if ANOMALY_05000283 || ANOMALY_05000315 - cc = r7 == r7; - p5.h = HI(CHIPID); - p5.l = LO(CHIPID); - if cc jump 1f; - r7.l = W[p5]; -1: -#endif + ANOMALY_283_315_WORKAROUND(p5, r7) #ifdef CONFIG_DEBUG_DOUBLEFAULT /* @@ -510,18 +527,18 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/ p4.l = lo(DCPLB_FAULT_ADDR); p4.h = hi(DCPLB_FAULT_ADDR); r7 = [p4]; - [p5 + PDA_DCPLB] = r7; + [p5 + PDA_DF_DCPLB] = r7; p4.l = lo(ICPLB_FAULT_ADDR); p4.h = hi(ICPLB_FAULT_ADDR); r7 = [p4]; - [p5 + PDA_ICPLB] = r7; + [p5 + PDA_DF_ICPLB] = r7; - r6 = retx; - [p5 + PDA_RETX] = r6; + r7 = retx; + [p5 + PDA_DF_RETX] = r7; r7 = SEQSTAT; /* reason code is in bit 5:0 */ - [p5 + PDA_SEQSTAT] = r7; + [p5 + PDA_DF_SEQSTAT] = r7; #else r7 = SEQSTAT; /* reason code is in bit 5:0 */ #endif @@ -686,8 +703,14 @@ ENTRY(_system_call) #ifdef CONFIG_IPIPE cc = BITTST(r7, TIF_IRQ_SYNC); if !cc jump .Lsyscall_no_irqsync; + /* + * Clear IPEND[4] manually to undo what resume_userspace_1 just did; + * we need this so that high priority domain interrupts may still + * preempt the current domain while the pipeline log is being played + * back. + */ [--sp] = reti; - r0 = [sp++]; + SP += 4; /* don't merge with next insn to keep the pattern obvious */ SP += -12; call ___ipipe_sync_root; SP += 12; @@ -699,7 +722,7 @@ ENTRY(_system_call) /* Reenable interrupts. */ [--sp] = reti; - r0 = [sp++]; + sp += 4; SP += -12; call _schedule; @@ -715,7 +738,7 @@ ENTRY(_system_call) .Lsyscall_do_signals: /* Reenable interrupts. */ [--sp] = reti; - r0 = [sp++]; + sp += 4; r0 = sp; SP += -12; @@ -725,10 +748,6 @@ ENTRY(_system_call) .Lsyscall_really_exit: r5 = [sp + PT_RESERVED]; rets = r5; -#ifdef CONFIG_IPIPE - [--sp] = reti; - r5 = [sp++]; -#endif /* CONFIG_IPIPE */ rts; ENDPROC(_system_call) @@ -816,13 +835,13 @@ ENDPROC(_resume) ENTRY(_ret_from_exception) #ifdef CONFIG_IPIPE - [--sp] = rets; - SP += -12; - call ___ipipe_check_root - SP += 12 - rets = [sp++]; - cc = r0 == 0; - if cc jump 4f; /* not on behalf of Linux, get out */ + p2.l = _per_cpu__ipipe_percpu_domain; + p2.h = _per_cpu__ipipe_percpu_domain; + r0.l = _ipipe_root; + r0.h = _ipipe_root; + r2 = [p2]; + cc = r0 == r2; + if !cc jump 4f; /* not on behalf of the root domain, get out */ #endif /* CONFIG_IPIPE */ p2.l = lo(IPEND); p2.h = hi(IPEND); @@ -882,14 +901,9 @@ ENDPROC(_ret_from_exception) #ifdef CONFIG_IPIPE -_sync_root_irqs: - [--sp] = reti; /* Reenable interrupts */ - r0 = [sp++]; - jump.l ___ipipe_sync_root - _resume_kernel_from_int: - r0.l = _sync_root_irqs - r0.h = _sync_root_irqs + r0.l = ___ipipe_sync_root; + r0.h = ___ipipe_sync_root; [--sp] = rets; [--sp] = ( r7:4, p5:3 ); SP += -12; @@ -953,10 +967,10 @@ ENTRY(_lower_to_irq14) #endif #ifdef CONFIG_DEBUG_HWERR - /* enable irq14 & hwerr interrupt, until we transition to _evt14_softirq */ + /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */ r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); #else - /* Only enable irq14 interrupt, until we transition to _evt14_softirq */ + /* Only enable irq14 interrupt, until we transition to _evt_evt14 */ r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); #endif sti r0; @@ -964,7 +978,7 @@ ENTRY(_lower_to_irq14) rti; ENDPROC(_lower_to_irq14) -ENTRY(_evt14_softirq) +ENTRY(_evt_evt14) #ifdef CONFIG_DEBUG_HWERR r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); sti r0; @@ -974,7 +988,7 @@ ENTRY(_evt14_softirq) [--sp] = RETI; SP += 4; rts; -ENDPROC(_evt14_softirq) +ENDPROC(_evt_evt14) ENTRY(_schedule_and_signal_from_int) /* To end up here, vector 15 was changed - so we have to change it @@ -1004,6 +1018,12 @@ ENTRY(_schedule_and_signal_from_int) #endif sti r0; + /* finish the userspace "atomic" functions for it */ + r1 = FIXED_CODE_END; + r2 = [sp + PT_PC]; + cc = r1 <= r2; + if cc jump .Lresume_userspace (bp); + r0 = sp; sp += -12; call _finish_atomic_sections; @@ -1107,14 +1127,7 @@ ENTRY(_early_trap) SAVE_ALL_SYS trace_buffer_stop(p0,r0); -#if ANOMALY_05000283 || ANOMALY_05000315 - cc = r5 == r5; - p4.h = HI(CHIPID); - p4.l = LO(CHIPID); - if cc jump 1f; - r5.l = W[p4]; -1: -#endif + ANOMALY_283_315_WORKAROUND(p4, r5) /* Turn caches off, to ensure we don't get double exceptions */ @@ -1123,9 +1136,7 @@ ENTRY(_early_trap) R5 = [P4]; /* Control Register*/ BITCLR(R5,ENICPLB_P); - CLI R1; - SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ - .align 8; + CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */ [P4] = R5; SSYNC; @@ -1133,11 +1144,9 @@ ENTRY(_early_trap) P4.H = HI(DMEM_CONTROL); R5 = [P4]; BITCLR(R5,ENDCPLB_P); - SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */ - .align 8; + CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */ [P4] = R5; SSYNC; - STI R1; r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ r1 = RETX; @@ -1611,7 +1620,7 @@ ENTRY(_sys_call_table) .long _sys_preadv .long _sys_pwritev .long _sys_rt_tgsigqueueinfo - .long _sys_perf_counter_open + .long _sys_perf_event_open .rept NR_syscalls-(.-_sys_call_table)/4 .long _sys_ni_syscall diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S index f826f6b9f91..9c79dfea2a5 100644 --- a/arch/blackfin/mach-common/head.S +++ b/arch/blackfin/mach-common/head.S @@ -124,22 +124,22 @@ ENTRY(__start) * below */ GET_PDA(p0, r0); - r6 = [p0 + PDA_RETX]; + r6 = [p0 + PDA_DF_RETX]; p1.l = _init_saved_retx; p1.h = _init_saved_retx; [p1] = r6; - r6 = [p0 + PDA_DCPLB]; + r6 = [p0 + PDA_DF_DCPLB]; p1.l = _init_saved_dcplb_fault_addr; p1.h = _init_saved_dcplb_fault_addr; [p1] = r6; - r6 = [p0 + PDA_ICPLB]; + r6 = [p0 + PDA_DF_ICPLB]; p1.l = _init_saved_icplb_fault_addr; p1.h = _init_saved_icplb_fault_addr; [p1] = r6; - r6 = [p0 + PDA_SEQSTAT]; + r6 = [p0 + PDA_DF_SEQSTAT]; p1.l = _init_saved_seqstat; p1.h = _init_saved_seqstat; [p1] = r6; @@ -153,6 +153,8 @@ ENTRY(__start) #ifdef CONFIG_EARLY_PRINTK call _init_early_exception_vectors; + r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); + sti r0; #endif r0 = 0 (x); @@ -212,12 +214,21 @@ ENTRY(__start) [p0] = p1; csync; +#ifdef CONFIG_EARLY_PRINTK + r0 = (EVT_IVG15 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU) (z); +#else r0 = EVT_IVG15 (z); +#endif sti r0; raise 15; +#ifdef CONFIG_EARLY_PRINTK + p0.l = _early_trap; + p0.h = _early_trap; +#else p0.l = .LWAIT_HERE; p0.h = .LWAIT_HERE; +#endif reti = p0; #if ANOMALY_05000281 nop; nop; nop; diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S index 9c46680186e..82d417ef4b5 100644 --- a/arch/blackfin/mach-common/interrupt.S +++ b/arch/blackfin/mach-common/interrupt.S @@ -119,14 +119,8 @@ __common_int_entry: fp = 0; #endif -#if ANOMALY_05000283 || ANOMALY_05000315 - cc = r7 == r7; - p5.h = HI(CHIPID); - p5.l = LO(CHIPID); - if cc jump 1f; - r7.l = W[p5]; -1: -#endif + ANOMALY_283_315_WORKAROUND(p5, r7) + r1 = sp; SP += -12; #ifdef CONFIG_IPIPE @@ -158,14 +152,7 @@ ENTRY(_evt_ivhw) fp = 0; #endif -#if ANOMALY_05000283 || ANOMALY_05000315 - cc = r7 == r7; - p5.h = HI(CHIPID); - p5.l = LO(CHIPID); - if cc jump 1f; - r7.l = W[p5]; -1: -#endif + ANOMALY_283_315_WORKAROUND(p5, r7) /* Handle all stacked hardware errors * To make sure we don't hang forever, only do it 10 times @@ -261,6 +248,31 @@ ENTRY(_evt_system_call) ENDPROC(_evt_system_call) #ifdef CONFIG_IPIPE +/* + * __ipipe_call_irqtail: lowers the current priority level to EVT15 + * before running a user-defined routine, then raises the priority + * level to EVT14 to prepare the caller for a normal interrupt + * return through RTI. + * + * We currently use this facility in two occasions: + * + * - to branch to __ipipe_irq_tail_hook as requested by a high + * priority domain after the pipeline delivered an interrupt, + * e.g. such as Xenomai, in order to start its rescheduling + * procedure, since we may not switch tasks when IRQ levels are + * nested on the Blackfin, so we have to fake an interrupt return + * so that we may reschedule immediately. + * + * - to branch to sync_root_irqs, in order to play any interrupt + * pending for the root domain (i.e. the Linux kernel). This lowers + * the core priority level enough so that Linux IRQ handlers may + * never delay interrupts handled by high priority domains; we defer + * those handlers until this point instead. This is a substitute + * to using a threaded interrupt model for the Linux kernel. + * + * r0: address of user-defined routine + * context: caller must have preempted EVT15, hw interrupts must be off. + */ ENTRY(___ipipe_call_irqtail) p0 = r0; r0.l = 1f; @@ -276,33 +288,19 @@ ENTRY(___ipipe_call_irqtail) ( r7:4, p5:3 ) = [sp++]; rets = [sp++]; - [--sp] = reti; - reti = [sp++]; /* IRQs are off. */ - r0.h = 3f; - r0.l = 3f; - p0.l = lo(EVT14); - p0.h = hi(EVT14); - [p0] = r0; - csync; - r0 = 0x401f (z); +#ifdef CONFIG_DEBUG_HWERR + /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */ + r0 = (EVT_IVG14 | EVT_IVHW | \ + EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); +#else + /* Only enable irq14 interrupt, until we transition to _evt_evt14 */ + r0 = (EVT_IVG14 | \ + EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU); +#endif sti r0; - raise 14; - [--sp] = reti; /* IRQs on. */ + raise 14; /* Branches to _evt_evt14 */ 2: jump 2b; /* Likely paranoid. */ -3: - sp += 4; /* Discard saved RETI */ - r0.h = _evt14_softirq; - r0.l = _evt14_softirq; - p0.l = lo(EVT14); - p0.h = hi(EVT14); - [p0] = r0; - csync; - p0.l = _bfin_irq_flags; - p0.h = _bfin_irq_flags; - r0 = [p0]; - sti r0; - rts; ENDPROC(___ipipe_call_irqtail) #endif /* CONFIG_IPIPE */ diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index b42150190d0..6ffda78aaf9 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -967,7 +967,7 @@ void __cpuinit init_exception_vectors(void) bfin_write_EVT11(evt_evt11); bfin_write_EVT12(evt_evt12); bfin_write_EVT13(evt_evt13); - bfin_write_EVT14(evt14_softirq); + bfin_write_EVT14(evt_evt14); bfin_write_EVT15(evt_system_call); CSYNC(); } @@ -1052,18 +1052,26 @@ int __init init_arch_irq(void) set_irq_chained_handler(irq, bfin_demux_error_irq); break; #endif + #ifdef CONFIG_SMP +#ifdef CONFIG_TICKSOURCE_GPTMR0 + case IRQ_TIMER0: +#endif +#ifdef CONFIG_TICKSOURCE_CORETMR + case IRQ_CORETMR: +#endif case IRQ_SUPPLE_0: case IRQ_SUPPLE_1: set_irq_handler(irq, handle_percpu_irq); break; #endif + #ifdef CONFIG_IPIPE #ifndef CONFIG_TICKSOURCE_CORETMR case IRQ_TIMER0: set_irq_handler(irq, handle_simple_irq); break; -#endif /* !CONFIG_TICKSOURCE_CORETMR */ +#endif case IRQ_CORETMR: set_irq_handler(irq, handle_simple_irq); break; @@ -1071,15 +1079,10 @@ int __init init_arch_irq(void) set_irq_handler(irq, handle_level_irq); break; #else /* !CONFIG_IPIPE */ -#ifdef CONFIG_TICKSOURCE_GPTMR0 - case IRQ_TIMER0: - set_irq_handler(irq, handle_percpu_irq); - break; -#endif /* CONFIG_TICKSOURCE_GPTMR0 */ default: set_irq_handler(irq, handle_simple_irq); break; -#endif /* !CONFIG_IPIPE */ +#endif /* !CONFIG_IPIPE */ } } diff --git a/arch/blackfin/mach-common/lock.S b/arch/blackfin/mach-common/lock.S deleted file mode 100644 index 6c5f5f0ea7f..00000000000 --- a/arch/blackfin/mach-common/lock.S +++ /dev/null @@ -1,223 +0,0 @@ -/* - * File: arch/blackfin/mach-common/lock.S - * Based on: - * Author: LG Soft India - * - * Created: ? - * Description: kernel locks - * - * Modified: - * Copyright 2004-2006 Analog Devices Inc. - * - * Bugs: Enter bugs at http://blackfin.uclinux.org/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include <linux/linkage.h> -#include <asm/blackfin.h> - -.text - -/* When you come here, it is assumed that - * R0 - Which way to be locked - */ - -ENTRY(_cache_grab_lock) - - [--SP]=( R7:0,P5:0 ); - - P1.H = HI(IMEM_CONTROL); - P1.L = LO(IMEM_CONTROL); - P5.H = HI(ICPLB_ADDR0); - P5.L = LO(ICPLB_ADDR0); - P4.H = HI(ICPLB_DATA0); - P4.L = LO(ICPLB_DATA0); - R7 = R0; - - /* If the code of interest already resides in the cache - * invalidate the entire cache itself. - * invalidate_entire_icache; - */ - - SP += -12; - [--SP] = RETS; - CALL _invalidate_entire_icache; - RETS = [SP++]; - SP += 12; - - /* Disable the Interrupts*/ - - CLI R3; - -.LLOCK_WAY: - - /* Way0 - 0xFFA133E0 - * Way1 - 0xFFA137E0 - * Way2 - 0xFFA13BE0 Total Way Size = 4K - * Way3 - 0xFFA13FE0 - */ - - /* Procedure Ex. -Set the locks for other ways by setting ILOC[3:1] - * Only Way0 of the instruction cache can now be - * replaced by a new code - */ - - R5 = R7; - CC = BITTST(R7,0); - IF CC JUMP .LCLEAR1; - R7 = 0; - BITSET(R7,0); - JUMP .LDONE1; - -.LCLEAR1: - R7 = 0; - BITCLR(R7,0); -.LDONE1: R4 = R7 << 3; - R7 = [P1]; - R7 = R7 | R4; - SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ - .align 8; - [P1] = R7; - SSYNC; - - R7 = R5; - CC = BITTST(R7,1); - IF CC JUMP .LCLEAR2; - R7 = 0; - BITSET(R7,1); - JUMP .LDONE2; - -.LCLEAR2: - R7 = 0; - BITCLR(R7,1); -.LDONE2: R4 = R7 << 3; - R7 = [P1]; - R7 = R7 | R4; - SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ - .align 8; - [P1] = R7; - SSYNC; - - R7 = R5; - CC = BITTST(R7,2); - IF CC JUMP .LCLEAR3; - R7 = 0; - BITSET(R7,2); - JUMP .LDONE3; -.LCLEAR3: - R7 = 0; - BITCLR(R7,2); -.LDONE3: R4 = R7 << 3; - R7 = [P1]; - R7 = R7 | R4; - SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ - .align 8; - [P1] = R7; - SSYNC; - - - R7 = R5; - CC = BITTST(R7,3); - IF CC JUMP .LCLEAR4; - R7 = 0; - BITSET(R7,3); - JUMP .LDONE4; -.LCLEAR4: - R7 = 0; - BITCLR(R7,3); -.LDONE4: R4 = R7 << 3; - R7 = [P1]; - R7 = R7 | R4; - SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ - .align 8; - [P1] = R7; - SSYNC; - - STI R3; - - ( R7:0,P5:0 ) = [SP++]; - - RTS; -ENDPROC(_cache_grab_lock) - -/* After the execution of critical code, the code is now locked into - * the cache way. Now we need to set ILOC. - * - * R0 - Which way to be locked - */ - -ENTRY(_bfin_cache_lock) - - [--SP]=( R7:0,P5:0 ); - - P1.H = HI(IMEM_CONTROL); - P1.L = LO(IMEM_CONTROL); - - /* Disable the Interrupts*/ - CLI R3; - - R7 = [P1]; - R2 = ~(0x78) (X); /* mask out ILOC */ - R7 = R7 & R2; - R0 = R0 << 3; - R7 = R0 | R7; - SSYNC; /* SSYNC required writing to IMEM_CONTROL. */ - .align 8; - [P1] = R7; - SSYNC; - /* Renable the Interrupts */ - STI R3; - - ( R7:0,P5:0 ) = [SP++]; - RTS; -ENDPROC(_bfin_cache_lock) - -/* Invalidate the Entire Instruction cache by - * disabling IMC bit - */ -ENTRY(_invalidate_entire_icache) - [--SP] = ( R7:5); - - P0.L = LO(IMEM_CONTROL); - P0.H = HI(IMEM_CONTROL); - R7 = [P0]; - - /* Clear the IMC bit , All valid bits in the instruction - * cache are set to the invalid state - */ - BITCLR(R7,IMC_P); - CLI R6; - SSYNC; /* SSYNC required before invalidating cache. */ - .align 8; - [P0] = R7; - SSYNC; - STI R6; - - /* Configures the instruction cache agian */ - R6 = (IMC | ENICPLB); - R7 = R7 | R6; - - CLI R6; - SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */ - .align 8; - [P0] = R7; - SSYNC; - STI R6; - - ( R7:5) = [SP++]; - RTS; -ENDPROC(_invalidate_entire_icache) diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c index 9e7e27b7fc8..0e3d4ff9d8b 100644 --- a/arch/blackfin/mach-common/pm.c +++ b/arch/blackfin/mach-common/pm.c @@ -38,6 +38,7 @@ #include <linux/io.h> #include <linux/irq.h> +#include <asm/cplb.h> #include <asm/gpio.h> #include <asm/dma.h> #include <asm/dpmc.h> @@ -170,58 +171,6 @@ static void flushinv_all_dcache(void) } #endif -static inline void dcache_disable(void) -{ -#ifdef CONFIG_BFIN_DCACHE - unsigned long ctrl; - -#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) - flushinv_all_dcache(); -#endif - SSYNC(); - ctrl = bfin_read_DMEM_CONTROL(); - ctrl &= ~ENDCPLB; - bfin_write_DMEM_CONTROL(ctrl); - SSYNC(); -#endif -} - -static inline void dcache_enable(void) -{ -#ifdef CONFIG_BFIN_DCACHE - unsigned long ctrl; - SSYNC(); - ctrl = bfin_read_DMEM_CONTROL(); - ctrl |= ENDCPLB; - bfin_write_DMEM_CONTROL(ctrl); - SSYNC(); -#endif -} - -static inline void icache_disable(void) -{ -#ifdef CONFIG_BFIN_ICACHE - unsigned long ctrl; - SSYNC(); - ctrl = bfin_read_IMEM_CONTROL(); - ctrl &= ~ENICPLB; - bfin_write_IMEM_CONTROL(ctrl); - SSYNC(); -#endif -} - -static inline void icache_enable(void) -{ -#ifdef CONFIG_BFIN_ICACHE - unsigned long ctrl; - SSYNC(); - ctrl = bfin_read_IMEM_CONTROL(); - ctrl |= ENICPLB; - bfin_write_IMEM_CONTROL(ctrl); - SSYNC(); -#endif -} - int bfin_pm_suspend_mem_enter(void) { unsigned long flags; @@ -258,16 +207,19 @@ int bfin_pm_suspend_mem_enter(void) bfin_gpio_pm_hibernate_suspend(); - dcache_disable(); - icache_disable(); +#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) + flushinv_all_dcache(); +#endif + _disable_dcplb(); + _disable_icplb(); bf53x_suspend_l1_mem(memptr); do_hibernate(wakeup | vr_wakeup); /* Goodbye */ bf53x_resume_l1_mem(memptr); - icache_enable(); - dcache_enable(); + _enable_icplb(); + _enable_dcplb(); bfin_gpio_pm_hibernate_restore(); blackfin_dma_resume(); |