summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/arch_checks.c7
-rw-r--r--arch/blackfin/mach-common/cache-c.c14
-rw-r--r--arch/blackfin/mach-common/cache.S46
-rw-r--r--arch/blackfin/mach-common/clocks-init.c1
-rw-r--r--arch/blackfin/mach-common/cpufreq.c3
-rw-r--r--arch/blackfin/mach-common/entry.S90
-rw-r--r--arch/blackfin/mach-common/head.S34
-rw-r--r--arch/blackfin/mach-common/interrupt.S27
-rw-r--r--arch/blackfin/mach-common/ints-priority.c50
-rw-r--r--arch/blackfin/mach-common/smp.c36
10 files changed, 177 insertions, 131 deletions
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index 80d39b2f9db..da93d920716 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -71,3 +71,10 @@
#if ANOMALY_05000448
# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
#endif
+
+/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
+#if ANOMALY_05000220 && \
+ ((defined(CONFIG_BFIN_WB) && defined(CONFIG_BFIN_L2_NOT_CACHED)) || \
+ (!defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_L2_WB)))
+# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
+#endif
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index e6ab1f81512..b59ce3cb380 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -16,9 +16,21 @@
void blackfin_invalidate_entire_dcache(void)
{
u32 dmem = bfin_read_DMEM_CONTROL();
- SSYNC();
bfin_write_DMEM_CONTROL(dmem & ~0xc);
SSYNC();
bfin_write_DMEM_CONTROL(dmem);
SSYNC();
}
+
+/* Invalidate the Entire Instruction cache by
+ * clearing IMC bit
+ */
+void blackfin_invalidate_entire_icache(void)
+{
+ u32 imem = bfin_read_IMEM_CONTROL();
+ bfin_write_IMEM_CONTROL(imem & ~0x4);
+ SSYNC();
+ bfin_write_IMEM_CONTROL(imem);
+ SSYNC();
+}
+
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index aa0648c6a9f..d9666fe6c3d 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -15,6 +15,13 @@
.text
+/* 05000443 - IFLUSH cannot be last instruction in hardware loop */
+#if ANOMALY_05000443
+# define BROK_FLUSH_INST "IFLUSH"
+#else
+# define BROK_FLUSH_INST "no anomaly! yeah!"
+#endif
+
/* Since all L1 caches work the same way, we use the same method for flushing
* them. Only the actual flush instruction differs. We write this in asm as
* GCC can be hard to coax into writing nice hardware loops.
@@ -23,7 +30,7 @@
* R0 = start address
* R1 = end address
*/
-.macro do_flush flushins:req optflushins optnopins label
+.macro do_flush flushins:req label
R2 = -L1_CACHE_BYTES;
@@ -44,22 +51,15 @@
\label :
.endif
P0 = R0;
+
LSETUP (1f, 2f) LC1 = P1;
1:
-.ifnb \optflushins
- \optflushins [P0];
-.endif
-#if ANOMALY_05000443
-.ifb \optnopins
-2:
-.endif
+.ifeqs "\flushins", BROK_FLUSH_INST
\flushins [P0++];
-.ifnb \optnopins
-2: \optnopins;
-.endif
-#else
+2: nop;
+.else
2: \flushins [P0++];
-#endif
+.endif
RTS;
.endm
@@ -77,25 +77,9 @@ ENTRY(_blackfin_icache_flush_range)
*/
P0 = R0;
IFLUSH[P0];
- do_flush IFLUSH, , nop
+ do_flush IFLUSH
ENDPROC(_blackfin_icache_flush_range)
-/* Flush all cache lines assocoiated with this area of memory. */
-ENTRY(_blackfin_icache_dcache_flush_range)
-/*
- * Walkaround to avoid loading wrong instruction after invalidating icache
- * and following sequence is met.
- *
- * 1) One instruction address is cached in the instruction cache.
- * 2) This instruction in SDRAM is changed.
- * 3) IFLASH[P0] is executed only once in blackfin_icache_flush_range().
- * 4) This instruction is executed again, but the old one is loaded.
- */
- P0 = R0;
- IFLUSH[P0];
- do_flush FLUSH, IFLUSH
-ENDPROC(_blackfin_icache_dcache_flush_range)
-
/* Throw away all D-cached data in specified region without any obligation to
* write them back. Since the Blackfin ISA does not have an "invalidate"
* instruction, we use flush/invalidate. Perhaps as a speed optimization we
@@ -107,7 +91,7 @@ ENDPROC(_blackfin_dcache_invalidate_range)
/* Flush all data cache lines assocoiated with this memory area */
ENTRY(_blackfin_dcache_flush_range)
- do_flush FLUSH, , , .Ldfr
+ do_flush FLUSH, .Ldfr
ENDPROC(_blackfin_dcache_flush_range)
/* Our headers convert the page structure to an address, so just need to flush
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index 35393651359..ef6870e9eea 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -72,6 +72,7 @@ void init_clocks(void)
#endif
bfin_write_PLL_LOCKCNT(0x300);
do_sync();
+ /* We always write PLL_CTL thus avoiding Anomaly 05000242 */
bfin_write16(PLL_CTL, PLL_CTL_VAL);
__asm__ __volatile__("IDLE;");
bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 72e16605ca0..70e3411f558 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -140,7 +140,8 @@ static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
cclk = get_cclk() / 1000;
sclk = get_sclk() / 1000;
-#if ANOMALY_05000273 || (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE))
+#if ANOMALY_05000273 || ANOMALY_05000274 || \
+ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE))
min_cclk = sclk * 2;
#else
min_cclk = sclk;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 21e65a339a2..31fa313e81c 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -36,13 +36,13 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/unistd.h>
-#include <linux/threads.h>
#include <asm/blackfin.h>
#include <asm/errno.h>
#include <asm/fixed_code.h>
#include <asm/thread_info.h> /* TIF_NEED_RESCHED */
#include <asm/asm-offsets.h>
#include <asm/trace.h>
+#include <asm/traps.h>
#include <asm/context.S>
@@ -85,13 +85,15 @@ ENTRY(_ex_workaround_261)
if !cc jump _bfin_return_from_exception;
/* fall through */
R7 = P4;
- R6 = 0x26; /* Data CPLB Miss */
+ R6 = VEC_CPLB_M; /* Data CPLB Miss */
cc = R6 == R7;
if cc jump _ex_dcplb_miss (BP);
- R6 = 0x23; /* Data CPLB Miss */
+#ifdef CONFIG_MPU
+ R6 = VEC_CPLB_VL; /* Data CPLB Violation */
cc = R6 == R7;
if cc jump _ex_dcplb_viol (BP);
- /* Handle 0x23 Data CPLB Protection Violation
+#endif
+ /* Handle Data CPLB Protection Violation
* and Data CPLB Multiple Hits - Linux Trap Zero
*/
jump _ex_trap_c;
@@ -201,7 +203,18 @@ ENTRY(_ex_single_step)
cc = r7 == 0;
if !cc jump 1f;
#endif
-
+#ifdef CONFIG_EXACT_HWERR
+ /* Read the ILAT, and to check to see if the process we are
+ * single stepping caused a previous hardware error
+ * If so, do not single step, (which lowers to IRQ5, and makes
+ * us miss the error).
+ */
+ p5.l = lo(ILAT);
+ p5.h = hi(ILAT);
+ r7 = [p5];
+ cc = bittst(r7, EVT_IVHW_P);
+ if cc jump 1f;
+#endif
/* Single stepping only a single instruction, so clear the trace
* bit here. */
r7 = syscfg;
@@ -260,16 +273,7 @@ ENTRY(_bfin_return_from_exception)
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
- r6 = 0x25;
- CC = R7 == R6;
- if CC JUMP _double_fault;
-
- /* Did we cause a HW error? */
- p5.l = lo(ILAT);
- p5.h = hi(ILAT);
- r6 = [p5];
- r7 = 0x20; /* Did I just cause anther HW error? */
- r6 = r7 & r6;
+ r6 = VEC_UNCOV;
CC = R7 == R6;
if CC JUMP _double_fault;
#endif
@@ -473,6 +477,16 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
[--sp] = ASTAT;
[--sp] = (R7:6,P5:4);
+#ifdef CONFIG_EXACT_HWERR
+ /* Make sure all pending read/writes complete. This will ensure any
+ * accesses which could cause hardware errors completes, and signal
+ * the the hardware before we do something silly, like crash the
+ * kernel. We don't need to work around anomaly 05000312, since
+ * we are already atomic
+ */
+ ssync;
+#endif
+
#if ANOMALY_05000283 || ANOMALY_05000315
cc = r7 == r7;
p5.h = HI(CHIPID);
@@ -855,7 +869,7 @@ ENTRY(_ret_from_exception)
p1.h = _schedule_and_signal;
[p0] = p1;
csync;
- raise 15; /* raise evt14 to do signal or reschedule */
+ raise 15; /* raise evt15 to do signal or reschedule */
4:
r0 = syscfg;
bitclr(r0, 0);
@@ -916,7 +930,7 @@ ENTRY(_return_from_int)
p1.h = _schedule_and_signal_from_int;
[p0] = p1;
csync;
-#if ANOMALY_05000281
+#if ANOMALY_05000281 || ANOMALY_05000461
r0.l = lo(SAFE_USER_INSTRUCTION);
r0.h = hi(SAFE_USER_INSTRUCTION);
reti = r0;
@@ -930,18 +944,27 @@ ENTRY(_return_from_int)
ENDPROC(_return_from_int)
ENTRY(_lower_to_irq14)
-#if ANOMALY_05000281
+#if ANOMALY_05000281 || ANOMALY_05000461
r0.l = lo(SAFE_USER_INSTRUCTION);
r0.h = hi(SAFE_USER_INSTRUCTION);
reti = r0;
#endif
- r0 = 0x401f;
+
+#ifdef CONFIG_DEBUG_HWERR
+ /* enable irq14 & hwerr interrupt, until we transition to _evt14_softirq */
+ r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+#else
+ /* Only enable irq14 interrupt, until we transition to _evt14_softirq */
+ r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
+#endif
sti r0;
raise 14;
rti;
+ENDPROC(_lower_to_irq14)
+
ENTRY(_evt14_softirq)
#ifdef CONFIG_DEBUG_HWERR
- r0 = 0x3f;
+ r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
sti r0;
#else
cli r0;
@@ -949,8 +972,9 @@ ENTRY(_evt14_softirq)
[--sp] = RETI;
SP += 4;
rts;
+ENDPROC(_evt14_softirq)
-_schedule_and_signal_from_int:
+ENTRY(_schedule_and_signal_from_int)
/* To end up here, vector 15 was changed - so we have to change it
* back.
*/
@@ -983,8 +1007,9 @@ _schedule_and_signal_from_int:
call _finish_atomic_sections;
sp += 12;
jump.s .Lresume_userspace;
+ENDPROC(_schedule_and_signal_from_int)
-_schedule_and_signal:
+ENTRY(_schedule_and_signal)
SAVE_CONTEXT_SYSCALL
/* To end up here, vector 15 was changed - so we have to change it
* back.
@@ -1002,7 +1027,7 @@ _schedule_and_signal:
1:
RESTORE_CONTEXT
rti;
-ENDPROC(_lower_to_irq14)
+ENDPROC(_schedule_and_signal)
/* We handle this 100% in exception space - to reduce overhead
* Only potiential problem is if the software buffer gets swapped out of the
@@ -1581,24 +1606,11 @@ ENTRY(_sys_call_table)
.long _sys_dup3
.long _sys_pipe2
.long _sys_inotify_init1 /* 365 */
+ .long _sys_preadv
+ .long _sys_pwritev
+ .long _sys_rt_tgsigqueueinfo
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
.endr
END(_sys_call_table)
-
-#ifdef CONFIG_EXCEPTION_L1_SCRATCH
-/* .section .l1.bss.scratch */
-.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH
-#else
-#ifdef CONFIG_SYSCALL_TAB_L1
-.section .l1.bss
-#else
-.bss
-#endif
-ENTRY(_exception_stack)
- .rept 1024 * NR_CPUS
- .long 0
- .endr
-_exception_stack_top:
-#endif
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index 698d4c05947..f826f6b9f91 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -30,8 +30,6 @@ ENTRY(__init_clear_bss)
rts;
ENDPROC(__init_clear_bss)
-#define INITIAL_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
-
ENTRY(__start)
/* R0: argument of command line string, passed from uboot, save it */
R7 = R0;
@@ -126,30 +124,30 @@ ENTRY(__start)
* below
*/
GET_PDA(p0, r0);
- r7 = [p0 + PDA_RETX];
+ r6 = [p0 + PDA_RETX];
p1.l = _init_saved_retx;
p1.h = _init_saved_retx;
- [p1] = r7;
+ [p1] = r6;
- r7 = [p0 + PDA_DCPLB];
+ r6 = [p0 + PDA_DCPLB];
p1.l = _init_saved_dcplb_fault_addr;
p1.h = _init_saved_dcplb_fault_addr;
- [p1] = r7;
+ [p1] = r6;
- r7 = [p0 + PDA_ICPLB];
+ r6 = [p0 + PDA_ICPLB];
p1.l = _init_saved_icplb_fault_addr;
p1.h = _init_saved_icplb_fault_addr;
- [p1] = r7;
+ [p1] = r6;
- r7 = [p0 + PDA_SEQSTAT];
+ r6 = [p0 + PDA_SEQSTAT];
p1.l = _init_saved_seqstat;
p1.h = _init_saved_seqstat;
- [p1] = r7;
+ [p1] = r6;
#endif
/* Initialize stack pointer */
- sp.l = lo(INITIAL_STACK);
- sp.h = hi(INITIAL_STACK);
+ sp.l = _init_thread_union;
+ sp.h = _init_thread_union;
fp = sp;
usp = sp;
@@ -189,7 +187,15 @@ ENTRY(__start)
/* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
call _bfin_relocate_l1_mem;
#ifdef CONFIG_BFIN_KERNEL_CLOCK
+ /* Only use on-chip scratch space for stack when absolutely required
+ * to avoid Anomaly 05000227 ... we know the init_clocks() func only
+ * uses L1 text and stack space and no other memory region.
+ */
+# define KERNEL_CLOCK_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
+ sp.l = lo(KERNEL_CLOCK_STACK);
+ sp.h = hi(KERNEL_CLOCK_STACK);
call _init_clocks;
+ sp = usp; /* usp hasnt been touched, so restore from there */
#endif
/* This section keeps the processor in supervisor mode
@@ -243,9 +249,7 @@ ENTRY(_real_start)
call _cmdline_init;
/* Load the current thread pointer and stack */
- sp.l = _init_thread_union;
- sp.h = _init_thread_union;
- p1 = THREAD_SIZE (z);
+ p1 = THREAD_SIZE + 4 (z); /* +4 is for reti loading */
sp = sp + p1;
usp = sp;
fp = sp;
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 0069c2dd462..9c46680186e 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -145,6 +145,14 @@ __common_int_entry:
/* interrupt routine for ivhw - 5 */
ENTRY(_evt_ivhw)
+ /* In case a single action kicks off multiple memory transactions, (like
+ * a cache line fetch, - this can cause multiple hardware errors, let's
+ * catch them all. First - make sure all the actions are complete, and
+ * the core sees the hardware errors.
+ */
+ SSYNC;
+ SSYNC;
+
SAVE_ALL_SYS
#ifdef CONFIG_FRAME_POINTER
fp = 0;
@@ -159,6 +167,25 @@ ENTRY(_evt_ivhw)
1:
#endif
+ /* Handle all stacked hardware errors
+ * To make sure we don't hang forever, only do it 10 times
+ */
+ R0 = 0;
+ R2 = 10;
+1:
+ P0.L = LO(ILAT);
+ P0.H = HI(ILAT);
+ R1 = [P0];
+ CC = BITTST(R1, EVT_IVHW_P);
+ IF ! CC JUMP 2f;
+ /* OK a hardware error is pending - clear it */
+ R1 = EVT_IVHW_P;
+ [P0] = R1;
+ R0 += 1;
+ CC = R1 == R2;
+ if CC JUMP 2f;
+ JUMP 1b;
+2:
# We are going to dump something out, so make sure we print IPEND properly
p2.l = lo(IPEND);
p2.h = hi(IPEND);
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index a7d7b2dd405..351afd0e36d 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1052,7 +1052,7 @@ int __init init_arch_irq(void)
set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+#if defined(CONFIG_TICKSOURCE_GPTMR0)
case IRQ_TIMER0:
set_irq_handler(irq, handle_percpu_irq);
break;
@@ -1116,6 +1116,9 @@ int __init init_arch_irq(void)
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
+ /* This implicitly covers ANOMALY_05000171
+ * Boot-ROM code modifies SICA_IWRx wakeup registers
+ */
#ifdef SIC_IWR0
bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
# ifdef SIC_IWR1
@@ -1136,13 +1139,6 @@ int __init init_arch_irq(void)
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
-#ifdef CONFIG_IPIPE
- for (irq = 0; irq < NR_IRQS; irq++) {
- struct irq_desc *desc = irq_to_desc(irq);
- desc->ic_prio = __ipipe_get_irq_priority(irq);
- }
-#endif /* CONFIG_IPIPE */
-
return 0;
}
@@ -1156,23 +1152,22 @@ void do_irq(int vec, struct pt_regs *fp)
} else {
struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
- || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
+#if defined(SIC_ISR0) || defined(SICA_ISR0)
unsigned long sic_status[3];
if (smp_processor_id()) {
-#ifdef CONFIG_SMP
+# ifdef SICB_ISR0
/* This will be optimized out in UP mode. */
sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
-#endif
+# endif
} else {
sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
}
-#ifdef CONFIG_BF54x
+# ifdef SIC_ISR2
sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
-#endif
+# endif
for (;; ivg++) {
if (ivg >= ivg_stop) {
atomic_inc(&num_spurious);
@@ -1236,20 +1231,16 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
if (likely(vec == EVT_IVTMR_P)) {
irq = IRQ_CORETMR;
- goto core_tick;
- }
- SSYNC();
-
-#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
- {
+ } else {
+#if defined(SIC_ISR0) || defined(SICA_ISR0)
unsigned long sic_status[3];
sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
-#ifdef CONFIG_BF54x
+# ifdef SIC_ISR2
sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
-#endif
+# endif
for (;; ivg++) {
if (ivg >= ivg_stop) {
atomic_inc(&num_spurious);
@@ -1258,9 +1249,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
break;
}
- }
#else
- {
unsigned long sic_status;
sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
@@ -1272,15 +1261,13 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
} else if (sic_status & ivg->isrflag)
break;
}
- }
#endif
- irq = ivg->irqno;
+ irq = ivg->irqno;
+ }
if (irq == IRQ_SYSTMR) {
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
-core_tick:
-#else
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
#endif
/* This is basically what we need from the register frame. */
@@ -1292,9 +1279,6 @@ core_tick:
__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
}
-#ifndef CONFIG_GENERIC_CLOCKEVENTS
-core_tick:
-#endif
if (this_domain == ipipe_root_domain) {
s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
barrier();
@@ -1312,7 +1296,7 @@ core_tick:
}
}
- return 0;
+ return 0;
}
#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 93eab614607..61840059dfa 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -43,8 +43,13 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/cpu.h>
+#include <asm/time.h>
#include <linux/err.h>
+/*
+ * Anomaly notes:
+ * 05000120 - we always define corelock as 32-bit integer in L2
+ */
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
@@ -139,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
static irqreturn_t ipi_handler(int irq, void *dev_instance)
{
- struct ipi_message *msg, *mg;
+ struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();
@@ -149,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
msg_queue->count++;
spin_lock(&msg_queue->lock);
- list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
+ while (!list_empty(&msg_queue->head)) {
+ msg = list_entry(msg_queue->head.next, typeof(*msg), list);
list_del(&msg->list);
switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
@@ -216,7 +222,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -256,7 +262,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
@@ -287,7 +293,7 @@ void smp_send_reschedule(int cpu)
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
@@ -315,7 +321,7 @@ void smp_send_stop(void)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -352,7 +358,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
static void __cpuinit setup_secondary(unsigned int cpu)
{
-#if !(defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE))
+#if !defined(CONFIG_TICKSOURCE_GPTMR0)
struct irq_desc *timer_desc;
#endif
unsigned long ilat;
@@ -364,16 +370,13 @@ static void __cpuinit setup_secondary(unsigned int cpu)
bfin_write_ILAT(ilat);
CSYNC();
- /* Reserve the PDA space for the secondary CPU. */
- reserve_pda();
-
/* Enable interrupt levels IVG7-15. IARs have been already
* programmed by the boot CPU. */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
-#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
+#if defined(CONFIG_TICKSOURCE_GPTMR0)
/* Power down the core timer, just to play safe. */
bfin_write_TCNTL(0);
@@ -466,6 +469,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
+#ifdef __ARCH_SYNC_CORE_ICACHE
+void resync_core_icache(void)
+{
+ unsigned int cpu = get_cpu();
+ blackfin_invalidate_entire_icache();
+ ++per_cpu(cpu_data, cpu).icache_invld_count;
+ put_cpu();
+}
+EXPORT_SYMBOL(resync_core_icache);
+#endif
+
#ifdef __ARCH_SYNC_CORE_DCACHE
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));