summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/arch_checks.c9
-rw-r--r--arch/blackfin/mach-common/cache.S22
-rw-r--r--arch/blackfin/mach-common/clocks-init.c5
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S24
-rw-r--r--arch/blackfin/mach-common/entry.S70
-rw-r--r--arch/blackfin/mach-common/head.S84
-rw-r--r--arch/blackfin/mach-common/interrupt.S14
-rw-r--r--arch/blackfin/mach-common/ints-priority.c137
-rw-r--r--arch/blackfin/mach-common/pm.c11
-rw-r--r--arch/blackfin/mach-common/smp.c6
10 files changed, 228 insertions, 154 deletions
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index 98133b968f7..80d39b2f9db 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -62,3 +62,12 @@
#if (CONFIG_BOOT_LOAD & 0x3)
# error "The kernel load address must be 4 byte aligned"
#endif
+
+/* The entire kernel must be able to make a 24bit pcrel call to start of L1 */
+#if ((0xffffffff - L1_CODE_START + 1) + CONFIG_BOOT_LOAD) > 0x1000000
+# error "The kernel load address is too high; keep it below 10meg for safety"
+#endif
+
+#if ANOMALY_05000448
+# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
+#endif
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index 3c98dacbf28..aa0648c6a9f 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -66,11 +66,33 @@
/* Invalidate all instruction cache lines assocoiated with this memory area */
ENTRY(_blackfin_icache_flush_range)
+/*
+ * Walkaround to avoid loading wrong instruction after invalidating icache
+ * and following sequence is met.
+ *
+ * 1) One instruction address is cached in the instruction cache.
+ * 2) This instruction in SDRAM is changed.
+ * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
+ * 4) This instruction is executed again, but the old one is loaded.
+ */
+ P0 = R0;
+ IFLUSH[P0];
do_flush IFLUSH, , nop
ENDPROC(_blackfin_icache_flush_range)
/* Flush all cache lines assocoiated with this area of memory. */
ENTRY(_blackfin_icache_dcache_flush_range)
+/*
+ * Walkaround to avoid loading wrong instruction after invalidating icache
+ * and following sequence is met.
+ *
+ * 1) One instruction address is cached in the instruction cache.
+ * 2) This instruction in SDRAM is changed.
+ * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
+ * 4) This instruction is executed again, but the old one is loaded.
+ */
+ P0 = R0;
+ IFLUSH[P0];
do_flush FLUSH, IFLUSH
ENDPROC(_blackfin_icache_dcache_flush_range)
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index 5d182abefc7..35393651359 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -14,9 +14,10 @@
#include <asm/clocks.h>
#include <asm/mem_init.h>
+#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
#define PLL_CTL_VAL \
(((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
- (PLL_BYPASS << 8) | (ANOMALY_05000265 ? 0x8000 : 0))
+ (PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
__attribute__((l1_text))
static void do_sync(void)
@@ -76,7 +77,7 @@ void init_clocks(void)
bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
#ifdef EBIU_SDGCTL
bfin_write_EBIU_SDRRC(mem_SDRRC);
- bfin_write_EBIU_SDGCTL(mem_SDGCTL);
+ bfin_write_EBIU_SDGCTL((bfin_read_EBIU_SDGCTL() & SDGCTL_WIDTH) | mem_SDGCTL);
#else
bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() & ~(SRREQ));
do_sync();
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 4da50bcd930..8009a512fb1 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -376,10 +376,22 @@ ENTRY(_do_hibernate)
#endif
#ifdef PINT0_ASSIGN
+ PM_SYS_PUSH(PINT0_MASK_SET)
+ PM_SYS_PUSH(PINT1_MASK_SET)
+ PM_SYS_PUSH(PINT2_MASK_SET)
+ PM_SYS_PUSH(PINT3_MASK_SET)
PM_SYS_PUSH(PINT0_ASSIGN)
PM_SYS_PUSH(PINT1_ASSIGN)
PM_SYS_PUSH(PINT2_ASSIGN)
PM_SYS_PUSH(PINT3_ASSIGN)
+ PM_SYS_PUSH(PINT0_INVERT_SET)
+ PM_SYS_PUSH(PINT1_INVERT_SET)
+ PM_SYS_PUSH(PINT2_INVERT_SET)
+ PM_SYS_PUSH(PINT3_INVERT_SET)
+ PM_SYS_PUSH(PINT0_EDGE_SET)
+ PM_SYS_PUSH(PINT1_EDGE_SET)
+ PM_SYS_PUSH(PINT2_EDGE_SET)
+ PM_SYS_PUSH(PINT3_EDGE_SET)
#endif
PM_SYS_PUSH(EBIU_AMBCTL0)
@@ -714,10 +726,22 @@ ENTRY(_do_hibernate)
PM_SYS_POP(EBIU_AMBCTL0)
#ifdef PINT0_ASSIGN
+ PM_SYS_POP(PINT3_EDGE_SET)
+ PM_SYS_POP(PINT2_EDGE_SET)
+ PM_SYS_POP(PINT1_EDGE_SET)
+ PM_SYS_POP(PINT0_EDGE_SET)
+ PM_SYS_POP(PINT3_INVERT_SET)
+ PM_SYS_POP(PINT2_INVERT_SET)
+ PM_SYS_POP(PINT1_INVERT_SET)
+ PM_SYS_POP(PINT0_INVERT_SET)
PM_SYS_POP(PINT3_ASSIGN)
PM_SYS_POP(PINT2_ASSIGN)
PM_SYS_POP(PINT1_ASSIGN)
PM_SYS_POP(PINT0_ASSIGN)
+ PM_SYS_POP(PINT3_MASK_SET)
+ PM_SYS_POP(PINT2_MASK_SET)
+ PM_SYS_POP(PINT1_MASK_SET)
+ PM_SYS_POP(PINT0_MASK_SET)
#endif
#ifdef SICA_IWR1
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index fae77465137..21e65a339a2 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -151,13 +151,6 @@ ENTRY(_ex_syscall)
jump.s _bfin_return_from_exception;
ENDPROC(_ex_syscall)
-ENTRY(_ex_soft_bp)
- r7 = retx;
- r7 += -2;
- retx = r7;
- jump.s _ex_trap_c;
-ENDPROC(_ex_soft_bp)
-
ENTRY(_ex_single_step)
/* If we just returned from an interrupt, the single step event is
for the RTI instruction. */
@@ -607,6 +600,19 @@ ENTRY(_system_call)
p2 = [p2];
[p2+(TASK_THREAD+THREAD_KSP)] = sp;
+#ifdef CONFIG_IPIPE
+ r0 = sp;
+ SP += -12;
+ call ___ipipe_syscall_root;
+ SP += 12;
+ cc = r0 == 1;
+ if cc jump .Lsyscall_really_exit;
+ cc = r0 == -1;
+ if cc jump .Lresume_userspace;
+ r3 = [sp + PT_R3];
+ r4 = [sp + PT_R4];
+ p0 = [sp + PT_ORIG_P0];
+#endif /* CONFIG_IPIPE */
/* Check the System Call */
r7 = __NR_syscall;
@@ -661,6 +667,17 @@ ENTRY(_system_call)
r7 = r7 & r4;
.Lsyscall_resched:
+#ifdef CONFIG_IPIPE
+ cc = BITTST(r7, TIF_IRQ_SYNC);
+ if !cc jump .Lsyscall_no_irqsync;
+ [--sp] = reti;
+ r0 = [sp++];
+ SP += -12;
+ call ___ipipe_sync_root;
+ SP += 12;
+ jump .Lresume_userspace_1;
+.Lsyscall_no_irqsync:
+#endif
cc = BITTST(r7, TIF_NEED_RESCHED);
if !cc jump .Lsyscall_sigpending;
@@ -692,6 +709,10 @@ ENTRY(_system_call)
.Lsyscall_really_exit:
r5 = [sp + PT_RESERVED];
rets = r5;
+#ifdef CONFIG_IPIPE
+ [--sp] = reti;
+ r5 = [sp++];
+#endif /* CONFIG_IPIPE */
rts;
ENDPROC(_system_call)
@@ -778,6 +799,15 @@ _new_old_task:
ENDPROC(_resume)
ENTRY(_ret_from_exception)
+#ifdef CONFIG_IPIPE
+ [--sp] = rets;
+ SP += -12;
+ call ___ipipe_check_root
+ SP += 12
+ rets = [sp++];
+ cc = r0 == 0;
+ if cc jump 4f; /* not on behalf of Linux, get out */
+#endif /* CONFIG_IPIPE */
p2.l = lo(IPEND);
p2.h = hi(IPEND);
@@ -834,6 +864,28 @@ ENTRY(_ret_from_exception)
rts;
ENDPROC(_ret_from_exception)
+#ifdef CONFIG_IPIPE
+
+_sync_root_irqs:
+ [--sp] = reti; /* Reenable interrupts */
+ r0 = [sp++];
+ jump.l ___ipipe_sync_root
+
+_resume_kernel_from_int:
+ r0.l = _sync_root_irqs
+ r0.h = _sync_root_irqs
+ [--sp] = rets;
+ [--sp] = ( r7:4, p5:3 );
+ SP += -12;
+ call ___ipipe_call_irqtail
+ SP += 12;
+ ( r7:4, p5:3 ) = [sp++];
+ rets = [sp++];
+ rts
+#else
+#define _resume_kernel_from_int 2f
+#endif
+
ENTRY(_return_from_int)
/* If someone else already raised IRQ 15, do nothing. */
csync;
@@ -855,7 +907,7 @@ ENTRY(_return_from_int)
r1 = r0 - r1;
r2 = r0 & r1;
cc = r2 == 0;
- if !cc jump 2f;
+ if !cc jump _resume_kernel_from_int;
/* Lower the interrupt level to 15. */
p0.l = lo(EVT15);
@@ -1087,7 +1139,7 @@ ENTRY(_ex_table)
* EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined
*/
.long _ex_syscall /* 0x00 - User Defined - Linux Syscall */
- .long _ex_soft_bp /* 0x01 - User Defined - Software breakpoint */
+ .long _ex_trap_c /* 0x01 - User Defined - Software breakpoint */
#ifdef CONFIG_KGDB
.long _ex_trap_c /* 0x02 - User Defined - KGDB initial connection
and break signal trap */
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index e1e42c029e1..698d4c05947 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -17,6 +17,19 @@
__INIT
+ENTRY(__init_clear_bss)
+ r2 = r2 - r1;
+ cc = r2 == 0;
+ if cc jump .L_bss_done;
+ r2 >>= 2;
+ p1 = r1;
+ p2 = r2;
+ lsetup (1f, 1f) lc0 = p2;
+1: [p1++] = r0;
+.L_bss_done:
+ rts;
+ENDPROC(__init_clear_bss)
+
#define INITIAL_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
ENTRY(__start)
@@ -144,6 +157,35 @@ ENTRY(__start)
call _init_early_exception_vectors;
#endif
+ r0 = 0 (x);
+ /* Zero out all of the fun bss regions */
+#if L1_DATA_A_LENGTH > 0
+ r1.l = __sbss_l1;
+ r1.h = __sbss_l1;
+ r2.l = __ebss_l1;
+ r2.h = __ebss_l1;
+ call __init_clear_bss
+#endif
+#if L1_DATA_B_LENGTH > 0
+ r1.l = __sbss_b_l1;
+ r1.h = __sbss_b_l1;
+ r2.l = __ebss_b_l1;
+ r2.h = __ebss_b_l1;
+ call __init_clear_bss
+#endif
+#if L2_LENGTH > 0
+ r1.l = __sbss_l2;
+ r1.h = __sbss_l2;
+ r2.l = __ebss_l2;
+ r2.h = __ebss_l2;
+ call __init_clear_bss
+#endif
+ r1.l = ___bss_start;
+ r1.h = ___bss_start;
+ r2.l = ___bss_stop;
+ r2.h = ___bss_stop;
+ call __init_clear_bss
+
/* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
call _bfin_relocate_l1_mem;
#ifdef CONFIG_BFIN_KERNEL_CLOCK
@@ -185,19 +227,6 @@ ENDPROC(__start)
# define WDOG_CTL WDOGA_CTL
#endif
-ENTRY(__init_clear_bss)
- r2 = r2 - r1;
- cc = r2 == 0;
- if cc jump .L_bss_done;
- r2 >>= 2;
- p1 = r1;
- p2 = r2;
- lsetup (1f, 1f) lc0 = p2;
-1: [p1++] = r0;
-.L_bss_done:
- rts;
-ENDPROC(__init_clear_bss)
-
ENTRY(_real_start)
/* Enable nested interrupts */
[--sp] = reti;
@@ -209,35 +238,6 @@ ENTRY(_real_start)
w[p0] = r0;
ssync;
- r0 = 0 (x);
- /* Zero out all of the fun bss regions */
-#if L1_DATA_A_LENGTH > 0
- r1.l = __sbss_l1;
- r1.h = __sbss_l1;
- r2.l = __ebss_l1;
- r2.h = __ebss_l1;
- call __init_clear_bss
-#endif
-#if L1_DATA_B_LENGTH > 0
- r1.l = __sbss_b_l1;
- r1.h = __sbss_b_l1;
- r2.l = __ebss_b_l1;
- r2.h = __ebss_b_l1;
- call __init_clear_bss
-#endif
-#if L2_LENGTH > 0
- r1.l = __sbss_l2;
- r1.h = __sbss_l2;
- r2.l = __ebss_l2;
- r2.h = __ebss_l2;
- call __init_clear_bss
-#endif
- r1.l = ___bss_start;
- r1.h = ___bss_start;
- r2.l = ___bss_stop;
- r2.h = ___bss_stop;
- call __init_clear_bss
-
/* Pass the u-boot arguments to the global value command line */
R0 = R7;
call _cmdline_init;
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 473df0f7fa7..0069c2dd462 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -195,7 +195,7 @@ ENDPROC(_evt_ivhw)
/* Interrupt routine for evt2 (NMI).
* We don't actually use this, so just return.
* For inner circle type details, please see:
- * http://docs.blackfin.uclinux.org/doku.php?id=linux:nmi
+ * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
*/
ENTRY(_evt_nmi)
.weak _evt_nmi
@@ -235,6 +235,7 @@ ENDPROC(_evt_system_call)
#ifdef CONFIG_IPIPE
ENTRY(___ipipe_call_irqtail)
+ p0 = r0;
r0.l = 1f;
r0.h = 1f;
reti = r0;
@@ -242,9 +243,6 @@ ENTRY(___ipipe_call_irqtail)
1:
[--sp] = rets;
[--sp] = ( r7:4, p5:3 );
- p0.l = ___ipipe_irq_tail_hook;
- p0.h = ___ipipe_irq_tail_hook;
- p0 = [p0];
sp += -12;
call (p0);
sp += 12;
@@ -259,7 +257,7 @@ ENTRY(___ipipe_call_irqtail)
p0.h = hi(EVT14);
[p0] = r0;
csync;
- r0 = 0x401f;
+ r0 = 0x401f (z);
sti r0;
raise 14;
[--sp] = reti; /* IRQs on. */
@@ -277,11 +275,7 @@ ENTRY(___ipipe_call_irqtail)
p0.h = _bfin_irq_flags;
r0 = [p0];
sti r0;
-#if 0 /* FIXME: this actually raises scheduling latencies */
- /* Reenable interrupts */
- [--sp] = reti;
- r0 = [sp++];
-#endif
rts;
ENDPROC(___ipipe_call_irqtail)
+
#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 1bba6030dce..a7d7b2dd405 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -161,11 +161,15 @@ static void bfin_core_unmask_irq(unsigned int irq)
static void bfin_internal_mask_irq(unsigned int irq)
{
+ unsigned long flags;
+
#ifdef CONFIG_BF53x
+ local_irq_save_hw(flags);
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
~(1 << SIC_SYSIRQ(irq)));
#else
unsigned mask_bank, mask_bit;
+ local_irq_save_hw(flags);
mask_bank = SIC_SYSIRQ(irq) / 32;
mask_bit = SIC_SYSIRQ(irq) % 32;
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
@@ -175,15 +179,20 @@ static void bfin_internal_mask_irq(unsigned int irq)
~(1 << mask_bit));
#endif
#endif
+ local_irq_restore_hw(flags);
}
static void bfin_internal_unmask_irq(unsigned int irq)
{
+ unsigned long flags;
+
#ifdef CONFIG_BF53x
+ local_irq_save_hw(flags);
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
(1 << SIC_SYSIRQ(irq)));
#else
unsigned mask_bank, mask_bit;
+ local_irq_save_hw(flags);
mask_bank = SIC_SYSIRQ(irq) / 32;
mask_bit = SIC_SYSIRQ(irq) % 32;
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
@@ -193,6 +202,7 @@ static void bfin_internal_unmask_irq(unsigned int irq)
(1 << mask_bit));
#endif
#endif
+ local_irq_restore_hw(flags);
}
#ifdef CONFIG_PM
@@ -390,7 +400,7 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
{
#ifdef CONFIG_IPIPE
- _set_irq_handler(irq, handle_edge_irq);
+ _set_irq_handler(irq, handle_level_irq);
#else
struct irq_desc *desc = irq_desc + irq;
/* May not call generic set_irq_handler() due to spinlock
@@ -1055,13 +1065,18 @@ int __init init_arch_irq(void)
#endif
default:
#ifdef CONFIG_IPIPE
- /*
- * We want internal interrupt sources to be masked, because
- * ISRs may trigger interrupts recursively (e.g. DMA), but
- * interrupts are _not_ masked at CPU level. So let's handle
- * them as level interrupts.
- */
- set_irq_handler(irq, handle_level_irq);
+ /*
+ * We want internal interrupt sources to be
+ * masked, because ISRs may trigger interrupts
+ * recursively (e.g. DMA), but interrupts are
+ * _not_ masked at CPU level. So let's handle
+ * most of them as level interrupts, except
+ * the timer interrupt which is special.
+ */
+ if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
+ set_irq_handler(irq, handle_simple_irq);
+ else
+ set_irq_handler(irq, handle_level_irq);
#else /* !CONFIG_IPIPE */
set_irq_handler(irq, handle_simple_irq);
#endif /* !CONFIG_IPIPE */
@@ -1101,10 +1116,9 @@ int __init init_arch_irq(void)
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
- || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
+#ifdef SIC_IWR0
bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
-#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
+# ifdef SIC_IWR1
/* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
* will screw up the bootrom as it relies on MDMA0/1 waking it
* up from IDLE instructions. See this report for more info:
@@ -1114,10 +1128,8 @@ int __init init_arch_irq(void)
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
else
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
-#else
- bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
-#endif
-# ifdef CONFIG_BF54x
+# endif
+# ifdef SIC_IWR2
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
@@ -1126,9 +1138,8 @@ int __init init_arch_irq(void)
#ifdef CONFIG_IPIPE
for (irq = 0; irq < NR_IRQS; irq++) {
- struct irq_desc *desc = irq_desc + irq;
+ struct irq_desc *desc = irq_to_desc(irq);
desc->ic_prio = __ipipe_get_irq_priority(irq);
- desc->thr_prio = __ipipe_get_irqthread_priority(irq);
}
#endif /* CONFIG_IPIPE */
@@ -1211,76 +1222,21 @@ int __ipipe_get_irq_priority(unsigned irq)
return IVG15;
}
-int __ipipe_get_irqthread_priority(unsigned irq)
-{
- int ient, prio;
- int demux_irq;
-
- /* The returned priority value is rescaled to [0..IVG13+1]
- * with 0 being the lowest effective priority level. */
-
- if (irq <= IRQ_CORETMR)
- return IVG13 - irq + 1;
-
- /* GPIO IRQs are given the priority of the demux
- * interrupt. */
- if (IS_GPIOIRQ(irq)) {
-#if defined(CONFIG_BF54x)
- u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
- demux_irq = (bank == 0 ? IRQ_PINT0 :
- bank == 1 ? IRQ_PINT1 :
- bank == 2 ? IRQ_PINT2 :
- IRQ_PINT3);
-#elif defined(CONFIG_BF561)
- demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
- irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
- IRQ_PROG0_INTA);
-#elif defined(CONFIG_BF52x)
- demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
- irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
- IRQ_PORTF_INTA);
-#else
- demux_irq = irq;
-#endif
- return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
- }
-
- /* The GPIO demux interrupt is given a lower priority
- * than the GPIO IRQs, so that its threaded handler
- * unmasks the interrupt line after the decoded IRQs
- * have been processed. */
- prio = PRIO_GPIODEMUX(irq);
- /* demux irq? */
- if (prio != -1)
- return IVG13 - prio;
-
- for (ient = 0; ient < NR_PERI_INTS; ient++) {
- struct ivgx *ivg = ivg_table + ient;
- if (ivg->irqno == irq) {
- for (prio = 0; prio <= IVG13-IVG7; prio++) {
- if (ivg7_13[prio].ifirst <= ivg &&
- ivg7_13[prio].istop > ivg)
- return IVG7 - prio;
- }
- }
- }
-
- return 0;
-}
-
/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
+ struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
+ struct ipipe_domain *this_domain = ipipe_current_domain;
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
- int irq;
+ int irq, s;
if (likely(vec == EVT_IVTMR_P)) {
irq = IRQ_CORETMR;
- goto handle_irq;
+ goto core_tick;
}
SSYNC();
@@ -1322,24 +1278,39 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
irq = ivg->irqno;
if (irq == IRQ_SYSTMR) {
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+core_tick:
+#else
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
+#endif
/* This is basically what we need from the register frame. */
__raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
__raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
- if (!ipipe_root_domain_p)
- __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
- else
+ if (this_domain != ipipe_root_domain)
__raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
+ else
+ __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
}
-handle_irq:
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
+core_tick:
+#endif
+ if (this_domain == ipipe_root_domain) {
+ s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
+ barrier();
+ }
ipipe_trace_irq_entry(irq);
__ipipe_handle_irq(irq, regs);
- ipipe_trace_irq_exit(irq);
+ ipipe_trace_irq_exit(irq);
- if (ipipe_root_domain_p)
- return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+ if (this_domain == ipipe_root_domain) {
+ set_thread_flag(TIF_IRQ_SYNC);
+ if (!s) {
+ __clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
+ return !test_bit(IPIPE_STALL_FLAG, &p->status);
+ }
+ }
return 0;
}
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index d3d70fd67c1..f48a6aebb49 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -82,10 +82,9 @@ void bfin_pm_suspend_standby_enter(void)
bfin_pm_standby_restore();
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) || \
- defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
+#ifdef SIC_IWR0
bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
-#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
+# ifdef SIC_IWR1
/* BF52x system reset does not properly reset SIC_IWR1 which
* will screw up the bootrom as it relies on MDMA0/1 waking it
* up from IDLE instructions. See this report for more info:
@@ -95,10 +94,8 @@ void bfin_pm_suspend_standby_enter(void)
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
else
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
-#else
- bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
-#endif
-# ifdef CONFIG_BF54x
+# endif
+# ifdef SIC_IWR2
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 77c99284709..93eab614607 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -158,10 +158,14 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
kfree(msg);
break;
case BFIN_IPI_CALL_FUNC:
+ spin_unlock(&msg_queue->lock);
ipi_call_function(cpu, msg);
+ spin_lock(&msg_queue->lock);
break;
case BFIN_IPI_CPU_STOP:
+ spin_unlock(&msg_queue->lock);
ipi_cpu_stop(cpu);
+ spin_lock(&msg_queue->lock);
kfree(msg);
break;
default:
@@ -457,7 +461,7 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
smp_flush_data.start = start;
smp_flush_data.end = end;
- if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
+ if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);