summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/cpufreq.c168
-rw-r--r--arch/blackfin/mach-common/entry.S85
-rw-r--r--arch/blackfin/mach-common/head.S16
-rw-r--r--arch/blackfin/mach-common/interrupt.S35
-rw-r--r--arch/blackfin/mach-common/ints-priority.c250
-rw-r--r--arch/blackfin/mach-common/pm.c25
-rw-r--r--arch/blackfin/mach-common/smp.c92
7 files changed, 470 insertions, 201 deletions
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 77758289725..4391d03dc84 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -11,10 +11,13 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/fs.h>
+#include <linux/delay.h>
#include <asm/blackfin.h>
#include <asm/time.h>
#include <asm/dpmc.h>
+#define CPUFREQ_CPU 0
+
/* this is the table of CCLK frequencies, in Hz */
/* .index is the entry in the auxillary dpm_state_table[] */
static struct cpufreq_frequency_table bfin_freq_table[] = {
@@ -41,64 +44,124 @@ static struct bfin_dpm_state {
unsigned int tscale; /* change the divider on the core timer interrupt */
} dpm_state_table[3];
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
/*
- normalized to maximum frequncy offset for CYCLES,
- used in time-ts cycles clock source, but could be used
- somewhere also.
+ * normalized to maximum frequncy offset for CYCLES,
+ * used in time-ts cycles clock source, but could be used
+ * somewhere also.
*/
unsigned long long __bfin_cycles_off;
unsigned int __bfin_cycles_mod;
+#endif
/**************************************************************************/
+static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
+{
-static unsigned int bfin_getfreq_khz(unsigned int cpu)
+ unsigned long csel, min_cclk;
+ int index;
+
+ /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
+#if ANOMALY_05000273 || ANOMALY_05000274 || \
+ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
+ min_cclk = sclk * 2;
+#else
+ min_cclk = sclk;
+#endif
+ csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
+
+ for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
+ bfin_freq_table[index].frequency = cclk >> index;
+ dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
+ dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
+
+ pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
+ bfin_freq_table[index].frequency,
+ dpm_state_table[index].csel,
+ dpm_state_table[index].tscale);
+ }
+ return;
+}
+
+static void bfin_adjust_core_timer(void *info)
{
- /* The driver only support single cpu */
- if (cpu != 0)
- return -1;
+ unsigned int tscale;
+ unsigned int index = *(unsigned int *)info;
- return get_cclk() / 1000;
+ /* we have to adjust the core timer, because it is using cclk */
+ tscale = dpm_state_table[index].tscale;
+ bfin_write_TSCALE(tscale);
+ return;
}
+static unsigned int bfin_getfreq_khz(unsigned int cpu)
+{
+ /* Both CoreA/B have the same core clock */
+ return get_cclk() / 1000;
+}
-static int bfin_target(struct cpufreq_policy *policy,
+static int bfin_target(struct cpufreq_policy *poli,
unsigned int target_freq, unsigned int relation)
{
- unsigned int index, plldiv, tscale;
+ unsigned int index, plldiv, cpu;
unsigned long flags, cclk_hz;
struct cpufreq_freqs freqs;
+ static unsigned long lpj_ref;
+ static unsigned int lpj_ref_freq;
+
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
cycles_t cycles;
+#endif
- if (cpufreq_frequency_table_target(policy, bfin_freq_table,
- target_freq, relation, &index))
- return -EINVAL;
-
- cclk_hz = bfin_freq_table[index].frequency;
-
- freqs.old = bfin_getfreq_khz(0);
- freqs.new = cclk_hz;
- freqs.cpu = 0;
-
- pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
- cclk_hz, target_freq, freqs.old);
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- local_irq_save_hw(flags);
- plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
- tscale = dpm_state_table[index].tscale;
- bfin_write_PLL_DIV(plldiv);
- /* we have to adjust the core timer, because it is using cclk */
- bfin_write_TSCALE(tscale);
- cycles = get_cycles();
- SSYNC();
- cycles += 10; /* ~10 cycles we lose after get_cycles() */
- __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
- __bfin_cycles_mod = index;
- local_irq_restore_hw(flags);
- /* TODO: just test case for cycles clock source, remove later */
- pr_debug("cpufreq: done\n");
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ continue;
+
+ if (cpufreq_frequency_table_target(policy, bfin_freq_table,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ cclk_hz = bfin_freq_table[index].frequency;
+
+ freqs.old = bfin_getfreq_khz(0);
+ freqs.new = cclk_hz;
+ freqs.cpu = cpu;
+
+ pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
+ cclk_hz, target_freq, freqs.old);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ if (cpu == CPUFREQ_CPU) {
+ local_irq_save_hw(flags);
+ plldiv = (bfin_read_PLL_DIV() & SSEL) |
+ dpm_state_table[index].csel;
+ bfin_write_PLL_DIV(plldiv);
+ on_each_cpu(bfin_adjust_core_timer, &index, 1);
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+ cycles = get_cycles();
+ SSYNC();
+ cycles += 10; /* ~10 cycles we lose after get_cycles() */
+ __bfin_cycles_off +=
+ (cycles << __bfin_cycles_mod) - (cycles << index);
+ __bfin_cycles_mod = index;
+#endif
+ if (!lpj_ref_freq) {
+ lpj_ref = loops_per_jiffy;
+ lpj_ref_freq = freqs.old;
+ }
+ if (freqs.new != freqs.old) {
+ loops_per_jiffy = cpufreq_scale(lpj_ref,
+ lpj_ref_freq, freqs.new);
+ }
+ local_irq_restore_hw(flags);
+ }
+ /* TODO: just test case for cycles clock source, remove later */
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ pr_debug("cpufreq: done\n");
return 0;
}
@@ -110,37 +173,16 @@ static int bfin_verify_speed(struct cpufreq_policy *policy)
static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
{
- unsigned long cclk, sclk, csel, min_cclk;
- int index;
-
- if (policy->cpu != 0)
- return -EINVAL;
+ unsigned long cclk, sclk;
cclk = get_cclk() / 1000;
sclk = get_sclk() / 1000;
-#if ANOMALY_05000273 || ANOMALY_05000274 || \
- (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
- min_cclk = sclk * 2;
-#else
- min_cclk = sclk;
-#endif
- csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
-
- for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
- bfin_freq_table[index].frequency = cclk >> index;
- dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
- dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
-
- pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
- bfin_freq_table[index].frequency,
- dpm_state_table[index].csel,
- dpm_state_table[index].tscale);
- }
+ if (policy->cpu == CPUFREQ_CPU)
+ bfin_init_tables(cclk, sclk);
policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
- /*Now ,only support one cpu */
policy->cur = cclk;
cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index b0ed0b487ff..a5847f5d67c 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -405,7 +405,7 @@ ENTRY(_double_fault)
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
- call _double_fault_c;
+ pseudo_long_call _double_fault_c, p5;
SP += 12;
.L_double_fault_panic:
JUMP .L_double_fault_panic
@@ -447,7 +447,7 @@ ENTRY(_exception_to_level5)
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
- call _trap_c;
+ pseudo_long_call _trap_c, p4;
SP += 12;
/* If interrupts were off during the exception (IPEND[4] = 1), turn them off
@@ -482,6 +482,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
[--sp] = ASTAT;
[--sp] = (R7:6,P5:4);
+ ANOMALY_283_315_WORKAROUND(p5, r7)
+
#ifdef CONFIG_EXACT_HWERR
/* Make sure all pending read/writes complete. This will ensure any
* accesses which could cause hardware errors completes, and signal
@@ -492,8 +494,6 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
ssync;
#endif
- ANOMALY_283_315_WORKAROUND(p5, r7)
-
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/*
* Save these registers, as they are only valid in exception context
@@ -551,7 +551,7 @@ ENTRY(_kernel_execve)
p0 = sp;
sp += -16;
[sp + 12] = p0;
- call _do_execve;
+ pseudo_long_call _do_execve, p5;
SP += 16;
cc = r0 == 0;
if ! cc jump .Lexecve_failed;
@@ -626,13 +626,6 @@ ENTRY(_system_call)
p0 = [sp + PT_ORIG_P0];
#endif /* CONFIG_IPIPE */
- /* Check the System Call */
- r7 = __NR_syscall;
- /* System call number is passed in P0 */
- r6 = p0;
- cc = r6 < r7;
- if ! cc jump .Lbadsys;
-
/* are we tracing syscalls?*/
r7 = sp;
r6.l = lo(ALIGN_PAGE_MASK);
@@ -642,6 +635,14 @@ ENTRY(_system_call)
r7 = [p2+TI_FLAGS];
CC = BITTST(r7,TIF_SYSCALL_TRACE);
if CC JUMP _sys_trace;
+ CC = BITTST(r7,TIF_SINGLESTEP);
+ if CC JUMP _sys_trace;
+
+ /* Make sure the system call # is valid */
+ p4 = __NR_syscall;
+ /* System call number is passed in P0 */
+ cc = p4 <= p0;
+ if cc jump .Lbadsys;
/* Execute the appropriate system call */
@@ -704,7 +705,7 @@ ENTRY(_system_call)
sp += 4;
SP += -12;
- call _schedule;
+ pseudo_long_call _schedule, p4;
SP += 12;
jump .Lresume_userspace_1;
@@ -723,7 +724,7 @@ ENTRY(_system_call)
r0 = sp;
SP += -12;
- call _do_notify_resume;
+ pseudo_long_call _do_notify_resume, p5;
SP += 12;
.Lsyscall_really_exit:
@@ -736,11 +737,17 @@ ENDPROC(_system_call)
* this symbol need not be global anyways, so ...
*/
_sys_trace:
- call _syscall_trace;
-
- /* Execute the appropriate system call */
+ r0 = sp;
+ pseudo_long_call _syscall_trace_enter, p5;
+ /* Make sure the system call # is valid */
p4 = [SP + PT_P0];
+ p3 = __NR_syscall;
+ cc = p3 <= p4;
+ r0 = -ENOSYS;
+ if cc jump .Lsys_trace_badsys;
+
+ /* Execute the appropriate system call */
p5.l = _sys_call_table;
p5.h = _sys_call_table;
p5 = p5 + (p4 << 2);
@@ -758,9 +765,11 @@ _sys_trace:
SP += -12;
call (p5);
SP += 24;
+.Lsys_trace_badsys:
[sp + PT_R0] = r0;
- call _syscall_trace;
+ r0 = sp;
+ pseudo_long_call _syscall_trace_leave, p5;
jump .Lresume_userspace;
ENDPROC(_sys_trace)
@@ -816,8 +825,8 @@ ENDPROC(_resume)
ENTRY(_ret_from_exception)
#ifdef CONFIG_IPIPE
- p2.l = _per_cpu__ipipe_percpu_domain;
- p2.h = _per_cpu__ipipe_percpu_domain;
+ p2.l = _ipipe_percpu_domain;
+ p2.h = _ipipe_percpu_domain;
r0.l = _ipipe_root;
r0.h = _ipipe_root;
r2 = [p2];
@@ -966,6 +975,13 @@ ENTRY(_evt_evt14)
#else
cli r0;
#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ [--sp] = rets;
+ sp += -12;
+ call _trace_hardirqs_off;
+ sp += 12;
+ rets = [sp++];
+#endif
[--sp] = RETI;
SP += 4;
rts;
@@ -989,6 +1005,14 @@ ENTRY(_schedule_and_signal_from_int)
p1 = rets;
[sp + PT_RESERVED] = p1;
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
+ * is turned on, so disable all irqs. */
+ cli r0;
+ sp += -12;
+ call _trace_hardirqs_on;
+ sp += 12;
+#endif
#ifdef CONFIG_SMP
GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
r0 = [p0 + PDA_IRQFLAGS];
@@ -1007,7 +1031,8 @@ ENTRY(_schedule_and_signal_from_int)
r0 = sp;
sp += -12;
- call _finish_atomic_sections;
+
+ pseudo_long_call _finish_atomic_sections, p5;
sp += 12;
jump.s .Lresume_userspace;
ENDPROC(_schedule_and_signal_from_int)
@@ -1357,7 +1382,7 @@ ENTRY(_sys_call_table)
.long _sys_newuname
.long _sys_ni_syscall /* old sys_modify_ldt */
.long _sys_adjtimex
- .long _sys_ni_syscall /* 125 */ /* sys_mprotect */
+ .long _sys_mprotect /* 125 */
.long _sys_ni_syscall /* old sys_sigprocmask */
.long _sys_ni_syscall /* old "creat_module" */
.long _sys_init_module
@@ -1376,16 +1401,16 @@ ENTRY(_sys_call_table)
.long _sys_getdents
.long _sys_ni_syscall /* sys_select */
.long _sys_flock
- .long _sys_ni_syscall /* sys_msync */
+ .long _sys_msync
.long _sys_readv /* 145 */
.long _sys_writev
.long _sys_getsid
.long _sys_fdatasync
.long _sys_sysctl
- .long _sys_ni_syscall /* 150 */ /* sys_mlock */
- .long _sys_ni_syscall /* sys_munlock */
- .long _sys_ni_syscall /* sys_mlockall */
- .long _sys_ni_syscall /* sys_munlockall */
+ .long _sys_mlock /* 150 */
+ .long _sys_munlock
+ .long _sys_mlockall
+ .long _sys_munlockall
.long _sys_sched_setparam
.long _sys_sched_getparam /* 155 */
.long _sys_sched_setscheduler
@@ -1450,8 +1475,8 @@ ENTRY(_sys_call_table)
.long _sys_setfsuid /* 215 */
.long _sys_setfsgid
.long _sys_pivot_root
- .long _sys_ni_syscall /* sys_mincore */
- .long _sys_ni_syscall /* sys_madvise */
+ .long _sys_mincore
+ .long _sys_madvise
.long _sys_getdents64 /* 220 */
.long _sys_fcntl64
.long _sys_ni_syscall /* reserved for TUX */
@@ -1507,7 +1532,7 @@ ENTRY(_sys_call_table)
.long _sys_utimes
.long _sys_fadvise64_64
.long _sys_ni_syscall /* vserver */
- .long _sys_ni_syscall /* 275, mbind */
+ .long _sys_mbind /* 275 */
.long _sys_ni_syscall /* get_mempolicy */
.long _sys_ni_syscall /* set_mempolicy */
.long _sys_mq_open
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index cab0a0031ee..4391621d904 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -144,8 +144,8 @@ ENTRY(__start)
#endif
/* Initialize stack pointer */
- sp.l = _init_thread_union;
- sp.h = _init_thread_union;
+ sp.l = _init_thread_union + THREAD_SIZE;
+ sp.h = _init_thread_union + THREAD_SIZE;
fp = sp;
usp = sp;
@@ -186,6 +186,11 @@ ENTRY(__start)
/* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
call _bfin_relocate_l1_mem;
+
+#ifdef CONFIG_ROMKERNEL
+ call _bfin_relocate_xip_data;
+#endif
+
#ifdef CONFIG_BFIN_KERNEL_CLOCK
/* Only use on-chip scratch space for stack when absolutely required
* to avoid Anomaly 05000227 ... we know the init_clocks() func only
@@ -257,12 +262,7 @@ ENTRY(_real_start)
R0 = R7;
call _cmdline_init;
- /* Load the current thread pointer and stack */
- p1 = THREAD_SIZE + 4 (z); /* +4 is for reti loading */
- sp = sp + p1;
- usp = sp;
- fp = sp;
- sp += -12;
+ sp += -12 + 4; /* +4 is for reti loading above */
call _init_pda
sp += 12;
jump.l _start_kernel;
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 8085ff1cce0..cee62cf4acd 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -88,6 +88,13 @@ __common_int_entry:
#else
cli r1;
#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ [--sp] = r0;
+ sp += -12;
+ call _trace_hardirqs_off;
+ sp += 12;
+ r0 = [sp++];
+#endif
[--sp] = RETI; /* orig_pc */
/* Clear all L registers. */
r1 = 0 (x);
@@ -109,10 +116,10 @@ __common_int_entry:
cc = r0 == 0;
if cc jump .Lcommon_restore_context;
#else /* CONFIG_IPIPE */
- call _do_irq;
+ pseudo_long_call _do_irq, p2;
SP += 12;
#endif /* CONFIG_IPIPE */
- call _return_from_int;
+ pseudo_long_call _return_from_int, p2;
.Lcommon_restore_context:
RESTORE_CONTEXT
rti;
@@ -168,7 +175,7 @@ ENTRY(_evt_ivhw)
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
SP += -12;
- call _trap_c;
+ pseudo_long_call _trap_c, p5;
SP += 12;
#ifdef EBIU_ERRMST
@@ -179,7 +186,7 @@ ENTRY(_evt_ivhw)
w[p0] = r0.l;
#endif
- call _ret_from_exception;
+ pseudo_long_call _ret_from_exception, p2;
.Lcommon_restore_all_sys:
RESTORE_ALL_SYS
@@ -187,12 +194,28 @@ ENTRY(_evt_ivhw)
ENDPROC(_evt_ivhw)
/* Interrupt routine for evt2 (NMI).
- * We don't actually use this, so just return.
* For inner circle type details, please see:
* http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
*/
ENTRY(_evt_nmi)
+#ifndef CONFIG_NMI_WATCHDOG
.weak _evt_nmi
+#else
+ /* Not take account of CPLBs, this handler will not return */
+ SAVE_ALL_SYS
+ r0 = sp;
+ r1 = retn;
+ [sp + PT_PC] = r1;
+ trace_buffer_save(p4,r5);
+
+ ANOMALY_283_315_WORKAROUND(p4, r5)
+
+ SP += -12;
+ call _do_nmi;
+ SP += 12;
+1:
+ jump 1b;
+#endif
rtn;
ENDPROC(_evt_nmi)
@@ -223,7 +246,7 @@ ENTRY(_evt_system_call)
#ifdef CONFIG_FRAME_POINTER
fp = 0;
#endif
- call _system_call;
+ pseudo_long_call _system_call, p2;
jump .Lcommon_restore_context;
ENDPROC(_evt_system_call)
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 1873b2c1fed..1c8c4c7245c 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -28,6 +28,7 @@
#include <asm/dpmc.h>
#include <asm/bfin5xx_spi.h>
#include <asm/bfin_sport.h>
+#include <asm/bfin_can.h>
#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
@@ -91,26 +92,29 @@ static void __init search_IAR(void)
{
unsigned ivg, irq_pos = 0;
for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
- int irqn;
+ int irqN;
ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
- for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
- int iar_shift = (irqn & 7) * 4;
- if (ivg == (0xf &
-#if defined(CONFIG_BF52x) || defined(CONFIG_BF538) \
- || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
- bfin_read32((unsigned long *)SIC_IAR0 +
- ((irqn % 32) >> 3) + ((irqn / 32) *
- ((SIC_IAR4 - SIC_IAR0) / 4))) >> iar_shift)) {
+ for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
+ int irqn;
+ u32 iar = bfin_read32((unsigned long *)SIC_IAR0 +
+#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
+ defined(CONFIG_BF538) || defined(CONFIG_BF539)
+ ((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
#else
- bfin_read32((unsigned long *)SIC_IAR0 +
- (irqn >> 3)) >> iar_shift)) {
+ (irqN >> 3)
#endif
- ivg_table[irq_pos].irqno = IVG7 + irqn;
- ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
- ivg7_13[ivg].istop++;
- irq_pos++;
+ );
+
+ for (irqn = irqN; irqn < irqN + 4; ++irqn) {
+ int iar_shift = (irqn & 7) * 4;
+ if (ivg == (0xf & (iar >> iar_shift))) {
+ ivg_table[irq_pos].irqno = IVG7 + irqn;
+ ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
+ ivg7_13[ivg].istop++;
+ irq_pos++;
+ }
}
}
}
@@ -172,7 +176,12 @@ static void bfin_internal_mask_irq(unsigned int irq)
local_irq_restore_hw(flags);
}
+#ifdef CONFIG_SMP
+static void bfin_internal_unmask_irq_affinity(unsigned int irq,
+ const struct cpumask *affinity)
+#else
static void bfin_internal_unmask_irq(unsigned int irq)
+#endif
{
unsigned long flags;
@@ -185,16 +194,38 @@ static void bfin_internal_unmask_irq(unsigned int irq)
local_irq_save_hw(flags);
mask_bank = SIC_SYSIRQ(irq) / 32;
mask_bit = SIC_SYSIRQ(irq) % 32;
- bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
- (1 << mask_bit));
#ifdef CONFIG_SMP
- bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) |
- (1 << mask_bit));
+ if (cpumask_test_cpu(0, affinity))
+#endif
+ bfin_write_SIC_IMASK(mask_bank,
+ bfin_read_SIC_IMASK(mask_bank) |
+ (1 << mask_bit));
+#ifdef CONFIG_SMP
+ if (cpumask_test_cpu(1, affinity))
+ bfin_write_SICB_IMASK(mask_bank,
+ bfin_read_SICB_IMASK(mask_bank) |
+ (1 << mask_bit));
#endif
#endif
local_irq_restore_hw(flags);
}
+#ifdef CONFIG_SMP
+static void bfin_internal_unmask_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ bfin_internal_unmask_irq_affinity(irq, desc->affinity);
+}
+
+static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask)
+{
+ bfin_internal_mask_irq(irq);
+ bfin_internal_unmask_irq_affinity(irq, mask);
+
+ return 0;
+}
+#endif
+
#ifdef CONFIG_PM
int bfin_internal_set_wake(unsigned int irq, unsigned int state)
{
@@ -224,11 +255,6 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
wakeup |= USBWE;
break;
#endif
-#ifdef IRQ_KEY
- case IRQ_KEY:
- wakeup |= KPADWE;
- break;
-#endif
#ifdef CONFIG_BF54x
case IRQ_CNT:
wakeup |= ROTWE;
@@ -270,6 +296,9 @@ static struct irq_chip bfin_internal_irqchip = {
.mask_ack = bfin_internal_mask_irq,
.disable = bfin_internal_mask_irq,
.enable = bfin_internal_unmask_irq,
+#ifdef CONFIG_SMP
+ .set_affinity = bfin_internal_set_affinity,
+#endif
#ifdef CONFIG_PM
.set_wake = bfin_internal_set_wake,
#endif
@@ -294,7 +323,6 @@ static int error_int_mask;
static void bfin_generic_error_mask_irq(unsigned int irq)
{
error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
-
if (!error_int_mask)
bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
}
@@ -385,6 +413,127 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
}
#endif /* BF537_GENERIC_ERROR_INT_DEMUX */
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+static int mac_stat_int_mask;
+
+static void bfin_mac_status_ack_irq(unsigned int irq)
+{
+ switch (irq) {
+ case IRQ_MAC_MMCINT:
+ bfin_write_EMAC_MMC_TIRQS(
+ bfin_read_EMAC_MMC_TIRQE() &
+ bfin_read_EMAC_MMC_TIRQS());
+ bfin_write_EMAC_MMC_RIRQS(
+ bfin_read_EMAC_MMC_RIRQE() &
+ bfin_read_EMAC_MMC_RIRQS());
+ break;
+ case IRQ_MAC_RXFSINT:
+ bfin_write_EMAC_RX_STKY(
+ bfin_read_EMAC_RX_IRQE() &
+ bfin_read_EMAC_RX_STKY());
+ break;
+ case IRQ_MAC_TXFSINT:
+ bfin_write_EMAC_TX_STKY(
+ bfin_read_EMAC_TX_IRQE() &
+ bfin_read_EMAC_TX_STKY());
+ break;
+ case IRQ_MAC_WAKEDET:
+ bfin_write_EMAC_WKUP_CTL(
+ bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
+ break;
+ default:
+ /* These bits are W1C */
+ bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
+ break;
+ }
+}
+
+static void bfin_mac_status_mask_irq(unsigned int irq)
+{
+ mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
+#ifdef BF537_GENERIC_ERROR_INT_DEMUX
+ switch (irq) {
+ case IRQ_MAC_PHYINT:
+ bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
+ break;
+ default:
+ break;
+ }
+#else
+ if (!mac_stat_int_mask)
+ bfin_internal_mask_irq(IRQ_MAC_ERROR);
+#endif
+ bfin_mac_status_ack_irq(irq);
+}
+
+static void bfin_mac_status_unmask_irq(unsigned int irq)
+{
+#ifdef BF537_GENERIC_ERROR_INT_DEMUX
+ switch (irq) {
+ case IRQ_MAC_PHYINT:
+ bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
+ break;
+ default:
+ break;
+ }
+#else
+ if (!mac_stat_int_mask)
+ bfin_internal_unmask_irq(IRQ_MAC_ERROR);
+#endif
+ mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
+}
+
+#ifdef CONFIG_PM
+int bfin_mac_status_set_wake(unsigned int irq, unsigned int state)
+{
+#ifdef BF537_GENERIC_ERROR_INT_DEMUX
+ return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
+#else
+ return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
+#endif
+}
+#endif
+
+static struct irq_chip bfin_mac_status_irqchip = {
+ .name = "MACST",
+ .ack = bfin_ack_noop,
+ .mask_ack = bfin_mac_status_mask_irq,
+ .mask = bfin_mac_status_mask_irq,
+ .unmask = bfin_mac_status_unmask_irq,
+#ifdef CONFIG_PM
+ .set_wake = bfin_mac_status_set_wake,
+#endif
+};
+
+static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
+ struct irq_desc *inta_desc)
+{
+ int i, irq = 0;
+ u32 status = bfin_read_EMAC_SYSTAT();
+
+ for (i = 0; i < (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
+ if (status & (1L << i)) {
+ irq = IRQ_MAC_PHYINT + i;
+ break;
+ }
+
+ if (irq) {
+ if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
+ bfin_handle_irq(irq);
+ } else {
+ bfin_mac_status_ack_irq(irq);
+ pr_debug("IRQ %d:"
+ " MASKED MAC ERROR INTERRUPT ASSERTED\n",
+ irq);
+ }
+ } else
+ printk(KERN_ERR
+ "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
+ " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
+ __func__, __FILE__, __LINE__);
+}
+#endif
+
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
{
#ifdef CONFIG_IPIPE
@@ -516,14 +665,7 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
#ifdef CONFIG_PM
int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
{
- unsigned gpio = irq_to_gpio(irq);
-
- if (state)
- gpio_pm_wakeup_request(gpio, PM_WAKE_IGNORE);
- else
- gpio_pm_wakeup_free(gpio);
-
- return 0;
+ return gpio_pm_wakeup_ctrl(irq_to_gpio(irq), state);
}
#endif
@@ -1031,7 +1173,6 @@ int __init init_arch_irq(void)
#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
case IRQ_PORTF_INTA:
#endif
-
set_irq_chained_handler(irq,
bfin_demux_gpio_irq);
break;
@@ -1040,29 +1181,36 @@ int __init init_arch_irq(void)
set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-
-#ifdef CONFIG_SMP
-#ifdef CONFIG_TICKSOURCE_GPTMR0
- case IRQ_TIMER0:
-#endif
-#ifdef CONFIG_TICKSOURCE_CORETMR
- case IRQ_CORETMR:
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+ case IRQ_MAC_ERROR:
+ set_irq_chained_handler(irq, bfin_demux_mac_status_irq);
+ break;
#endif
+#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
set_irq_handler(irq, handle_percpu_irq);
break;
#endif
-#ifdef CONFIG_IPIPE
-#ifndef CONFIG_TICKSOURCE_CORETMR
- case IRQ_TIMER0:
+#ifdef CONFIG_TICKSOURCE_CORETMR
+ case IRQ_CORETMR:
+# ifdef CONFIG_SMP
+ set_irq_handler(irq, handle_percpu_irq);
+ break;
+# else
set_irq_handler(irq, handle_simple_irq);
break;
+# endif
#endif
- case IRQ_CORETMR:
+
+#ifdef CONFIG_TICKSOURCE_GPTMR0
+ case IRQ_TIMER0:
set_irq_handler(irq, handle_simple_irq);
break;
+#endif
+
+#ifdef CONFIG_IPIPE
default:
set_irq_handler(irq, handle_level_irq);
break;
@@ -1078,14 +1226,22 @@ int __init init_arch_irq(void)
for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
handle_level_irq);
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+ set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
+#endif
#endif
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+ for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
+ set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip,
+ handle_level_irq);
+#endif
/* if configured as edge, then will be changed to do_edge_IRQ */
- for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++)
+ for (irq = GPIO_IRQ_BASE;
+ irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
handle_level_irq);
-
bfin_write_IMASK(0);
CSYNC();
ilat = bfin_read_ILAT();
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 8837be4edb4..ea7f95f6bb4 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -11,6 +11,7 @@
#include <linux/suspend.h>
#include <linux/sched.h>
#include <linux/proc_fs.h>
+#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -19,35 +20,11 @@
#include <asm/dma.h>
#include <asm/dpmc.h>
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_H
-#define WAKEUP_TYPE PM_WAKE_HIGH
-#endif
-
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_L
-#define WAKEUP_TYPE PM_WAKE_LOW
-#endif
-
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_F
-#define WAKEUP_TYPE PM_WAKE_FALLING
-#endif
-
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_R
-#define WAKEUP_TYPE PM_WAKE_RISING
-#endif
-
-#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_B
-#define WAKEUP_TYPE PM_WAKE_BOTH_EDGES
-#endif
-
void bfin_pm_suspend_standby_enter(void)
{
unsigned long flags;
-#ifdef CONFIG_PM_WAKEUP_BY_GPIO
- gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE);
-#endif
-
local_irq_save_hw(flags);
bfin_pm_standby_setup();
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 369e687582b..a17107a700d 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -21,6 +21,7 @@
#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
+#include <linux/slab.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
@@ -122,9 +123,17 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
wait = msg->call_struct.wait;
cpu_clear(cpu, msg->call_struct.pending);
func(info);
- if (wait)
+ if (wait) {
+#ifdef __ARCH_SYNC_CORE_DCACHE
+ /*
+ * 'wait' usually means synchronization between CPUs.
+ * Invalidate D cache in case shared data was changed
+ * by func() to ensure cache coherence.
+ */
+ resync_core_dcache();
+#endif
cpu_clear(cpu, msg->call_struct.waitmask);
- else
+ } else
kfree(msg);
}
@@ -161,8 +170,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
kfree(msg);
break;
default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message \
- 0x%lx\n", cpu, msg->type);
+ printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
+ cpu, msg->type);
kfree(msg);
break;
}
@@ -219,6 +228,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
blackfin_dcache_invalidate_range(
(unsigned long)(&msg->call_struct.waitmask),
(unsigned long)(&msg->call_struct.waitmask));
+#ifdef __ARCH_SYNC_CORE_DCACHE
+ /*
+ * Invalidate D cache in case shared data was changed by
+ * other processors to ensure cache coherence.
+ */
+ resync_core_dcache();
+#endif
kfree(msg);
}
return 0;
@@ -261,6 +277,13 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
blackfin_dcache_invalidate_range(
(unsigned long)(&msg->call_struct.waitmask),
(unsigned long)(&msg->call_struct.waitmask));
+#ifdef __ARCH_SYNC_CORE_DCACHE
+ /*
+ * Invalidate D cache in case shared data was changed by
+ * other processors to ensure cache coherence.
+ */
+ resync_core_dcache();
+#endif
kfree(msg);
}
return 0;
@@ -322,8 +345,11 @@ void smp_send_stop(void)
int __cpuinit __cpu_up(unsigned int cpu)
{
- struct task_struct *idle;
int ret;
+ static struct task_struct *idle;
+
+ if (idle)
+ free_task(idle);
idle = fork_idle(cpu);
if (IS_ERR(idle)) {
@@ -332,7 +358,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
}
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
- smp_wmb();
ret = platform_boot_secondary(cpu, idle);
@@ -343,9 +368,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
static void __cpuinit setup_secondary(unsigned int cpu)
{
-#if !defined(CONFIG_TICKSOURCE_GPTMR0)
- struct irq_desc *timer_desc;
-#endif
unsigned long ilat;
bfin_write_IMASK(0);
@@ -360,17 +382,6 @@ static void __cpuinit setup_secondary(unsigned int cpu)
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
-
-#if defined(CONFIG_TICKSOURCE_GPTMR0)
- /* Power down the core timer, just to play safe. */
- bfin_write_TCNTL(0);
-
- /* system timer0 has been setup by CoreA. */
-#else
- timer_desc = irq_desc + IRQ_CORETMR;
- setup_core_timer();
- timer_desc->chip->enable(IRQ_CORETMR);
-#endif
}
void __cpuinit secondary_start_kernel(void)
@@ -405,7 +416,6 @@ void __cpuinit secondary_start_kernel(void)
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */
preempt_disable();
@@ -413,6 +423,9 @@ void __cpuinit secondary_start_kernel(void)
platform_secondary_init(cpu);
+ /* setup local core timer */
+ bfin_local_timer_setup();
+
local_irq_enable();
/*
@@ -462,25 +475,58 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
#ifdef __ARCH_SYNC_CORE_ICACHE
+unsigned long icache_invld_count[NR_CPUS];
void resync_core_icache(void)
{
unsigned int cpu = get_cpu();
blackfin_invalidate_entire_icache();
- ++per_cpu(cpu_data, cpu).icache_invld_count;
+ icache_invld_count[cpu]++;
put_cpu();
}
EXPORT_SYMBOL(resync_core_icache);
#endif
#ifdef __ARCH_SYNC_CORE_DCACHE
+unsigned long dcache_invld_count[NR_CPUS];
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
void resync_core_dcache(void)
{
unsigned int cpu = get_cpu();
blackfin_invalidate_entire_dcache();
- ++per_cpu(cpu_data, cpu).dcache_invld_count;
+ dcache_invld_count[cpu]++;
put_cpu();
}
EXPORT_SYMBOL(resync_core_dcache);
#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+int __cpuexit __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (cpu == 0)
+ return -EPERM;
+
+ set_cpu_online(cpu, false);
+ return 0;
+}
+
+static DECLARE_COMPLETION(cpu_killed);
+
+int __cpuexit __cpu_die(unsigned int cpu)
+{
+ return wait_for_completion_timeout(&cpu_killed, 5000);
+}
+
+void cpu_die(void)
+{
+ complete(&cpu_killed);
+
+ atomic_dec(&init_mm.mm_users);
+ atomic_dec(&init_mm.mm_count);
+
+ local_irq_disable();
+ platform_cpu_die();
+}
+#endif