summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/cpufreq.c4
-rw-r--r--arch/blackfin/mach-common/interrupt.S60
-rw-r--r--arch/blackfin/mach-common/ints-priority.c235
-rw-r--r--arch/blackfin/mach-common/pm.c10
-rw-r--r--arch/blackfin/mach-common/smp.c4
5 files changed, 281 insertions, 32 deletions
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 56f68d879b3..72e16605ca0 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -104,7 +104,7 @@ static int bfin_target(struct cpufreq_policy *policy,
cclk_hz, target_freq, freqs.old);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
tscale = dpm_state_table[index].tscale;
bfin_write_PLL_DIV(plldiv);
@@ -115,7 +115,7 @@ static int bfin_target(struct cpufreq_policy *policy,
cycles += 10; /* ~10 cycles we lose after get_cycles() */
__bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
__bfin_cycles_mod = index;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
/* TODO: just test case for cycles clock source, remove later */
pr_debug("cpufreq: done\n");
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 2604b532897..c1bdd1edc8e 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -129,8 +129,15 @@ __common_int_entry:
#endif
r1 = sp;
SP += -12;
+#ifdef CONFIG_IPIPE
+ call ___ipipe_grab_irq
+ SP += 12;
+ cc = r0 == 0;
+ if cc jump .Lcommon_restore_context;
+#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;
+#endif /* CONFIG_IPIPE */
call _return_from_int;
.Lcommon_restore_context:
RESTORE_CONTEXT
@@ -247,3 +254,56 @@ ENTRY(_evt_system_call)
call _system_call;
jump .Lcommon_restore_context;
ENDPROC(_evt_system_call)
+
+#ifdef CONFIG_IPIPE
+ENTRY(___ipipe_call_irqtail)
+ r0.l = 1f;
+ r0.h = 1f;
+ reti = r0;
+ rti;
+1:
+ [--sp] = rets;
+ [--sp] = ( r7:4, p5:3 );
+ p0.l = ___ipipe_irq_tail_hook;
+ p0.h = ___ipipe_irq_tail_hook;
+ p0 = [p0];
+ sp += -12;
+ call (p0);
+ sp += 12;
+ ( r7:4, p5:3 ) = [sp++];
+ rets = [sp++];
+
+ [--sp] = reti;
+ reti = [sp++]; /* IRQs are off. */
+ r0.h = 3f;
+ r0.l = 3f;
+ p0.l = lo(EVT14);
+ p0.h = hi(EVT14);
+ [p0] = r0;
+ csync;
+ r0 = 0x401f;
+ sti r0;
+ raise 14;
+ [--sp] = reti; /* IRQs on. */
+2:
+ jump 2b; /* Likely paranoid. */
+3:
+ sp += 4; /* Discard saved RETI */
+ r0.h = _evt14_softirq;
+ r0.l = _evt14_softirq;
+ p0.l = lo(EVT14);
+ p0.h = hi(EVT14);
+ [p0] = r0;
+ csync;
+ p0.l = _bfin_irq_flags;
+ p0.h = _bfin_irq_flags;
+ r0 = [p0];
+ sti r0;
+#if 0 /* FIXME: this actually raises scheduling latencies */
+ /* Reenable interrupts */
+ [--sp] = reti;
+ r0 = [sp++];
+#endif
+ rts;
+ENDPROC(___ipipe_call_irqtail)
+#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 7c1db775751..1bba6030dce 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -34,6 +34,9 @@
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
+#ifdef CONFIG_IPIPE
+#include <linux/ipipe.h>
+#endif
#ifdef CONFIG_KGDB
#include <linux/kgdb.h>
#endif
@@ -135,8 +138,8 @@ static void bfin_ack_noop(unsigned int irq)
static void bfin_core_mask_irq(unsigned int irq)
{
bfin_irq_flags &= ~(1 << irq);
- if (!irqs_disabled())
- local_irq_enable();
+ if (!irqs_disabled_hw())
+ local_irq_enable_hw();
}
static void bfin_core_unmask_irq(unsigned int irq)
@@ -151,8 +154,8 @@ static void bfin_core_unmask_irq(unsigned int irq)
* local_irq_enable just does "STI bfin_irq_flags", so it's exactly
* what we need.
*/
- if (!irqs_disabled())
- local_irq_enable();
+ if (!irqs_disabled_hw())
+ local_irq_enable_hw();
return;
}
@@ -235,7 +238,7 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
break;
}
- local_irq_save(flags);
+ local_irq_save_hw(flags);
if (state) {
bfin_sic_iwr[bank] |= (1 << bit);
@@ -246,7 +249,7 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
vr_wakeup &= ~wakeup;
}
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return 0;
}
@@ -272,6 +275,19 @@ static struct irq_chip bfin_internal_irqchip = {
#endif
};
+static void bfin_handle_irq(unsigned irq)
+{
+#ifdef CONFIG_IPIPE
+ struct pt_regs regs; /* Contents not used. */
+ ipipe_trace_irq_entry(irq);
+ __ipipe_handle_irq(irq, &regs);
+ ipipe_trace_irq_exit(irq);
+#else /* !CONFIG_IPIPE */
+ struct irq_desc *desc = irq_desc + irq;
+ desc->handle_irq(irq, desc);
+#endif /* !CONFIG_IPIPE */
+}
+
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
static int error_int_mask;
@@ -325,10 +341,9 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
irq = IRQ_UART1_ERROR;
if (irq) {
- if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) {
- struct irq_desc *desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
- } else {
+ if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
+ bfin_handle_irq(irq);
+ else {
switch (irq) {
case IRQ_PPI_ERROR:
@@ -374,10 +389,14 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
{
+#ifdef CONFIG_IPIPE
+ _set_irq_handler(irq, handle_edge_irq);
+#else
struct irq_desc *desc = irq_desc + irq;
/* May not call generic set_irq_handler() due to spinlock
recursion. */
desc->handle_irq = handle;
+#endif
}
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
@@ -563,10 +582,8 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
mask = get_gpiop_data(i) & get_gpiop_maska(i);
while (mask) {
- if (mask & 1) {
- desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
- }
+ if (mask & 1)
+ bfin_handle_irq(irq);
irq++;
mask >>= 1;
}
@@ -576,10 +593,8 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
do {
- if (mask & 1) {
- desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
- }
+ if (mask & 1)
+ bfin_handle_irq(irq);
irq++;
mask >>= 1;
} while (mask);
@@ -900,8 +915,7 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
while (request) {
if (request & 1) {
irq = pint2irq_lut[pint_val] + SYS_IRQS;
- desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
+ bfin_handle_irq(irq);
}
pint_val++;
request >>= 1;
@@ -1025,11 +1039,10 @@ int __init init_arch_irq(void)
break;
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
case IRQ_GENERIC_ERROR:
- set_irq_handler(irq, bfin_demux_error_irq);
-
+ set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
case IRQ_TIMER0:
set_irq_handler(irq, handle_percpu_irq);
break;
@@ -1041,7 +1054,17 @@ int __init init_arch_irq(void)
break;
#endif
default:
+#ifdef CONFIG_IPIPE
+ /*
+ * We want internal interrupt sources to be masked, because
+ * ISRs may trigger interrupts recursively (e.g. DMA), but
+ * interrupts are _not_ masked at CPU level. So let's handle
+ * them as level interrupts.
+ */
+ set_irq_handler(irq, handle_level_irq);
+#else /* !CONFIG_IPIPE */
set_irq_handler(irq, handle_simple_irq);
+#endif /* !CONFIG_IPIPE */
break;
}
}
@@ -1101,6 +1124,14 @@ int __init init_arch_irq(void)
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
+#ifdef CONFIG_IPIPE
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ struct irq_desc *desc = irq_desc + irq;
+ desc->ic_prio = __ipipe_get_irq_priority(irq);
+ desc->thr_prio = __ipipe_get_irqthread_priority(irq);
+ }
+#endif /* CONFIG_IPIPE */
+
return 0;
}
@@ -1156,3 +1187,161 @@ void do_irq(int vec, struct pt_regs *fp)
}
asm_do_IRQ(vec, fp);
}
+
+#ifdef CONFIG_IPIPE
+
+int __ipipe_get_irq_priority(unsigned irq)
+{
+ int ient, prio;
+
+ if (irq <= IRQ_CORETMR)
+ return irq;
+
+ for (ient = 0; ient < NR_PERI_INTS; ient++) {
+ struct ivgx *ivg = ivg_table + ient;
+ if (ivg->irqno == irq) {
+ for (prio = 0; prio <= IVG13-IVG7; prio++) {
+ if (ivg7_13[prio].ifirst <= ivg &&
+ ivg7_13[prio].istop > ivg)
+ return IVG7 + prio;
+ }
+ }
+ }
+
+ return IVG15;
+}
+
+int __ipipe_get_irqthread_priority(unsigned irq)
+{
+ int ient, prio;
+ int demux_irq;
+
+ /* The returned priority value is rescaled to [0..IVG13+1]
+ * with 0 being the lowest effective priority level. */
+
+ if (irq <= IRQ_CORETMR)
+ return IVG13 - irq + 1;
+
+ /* GPIO IRQs are given the priority of the demux
+ * interrupt. */
+ if (IS_GPIOIRQ(irq)) {
+#if defined(CONFIG_BF54x)
+ u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
+ demux_irq = (bank == 0 ? IRQ_PINT0 :
+ bank == 1 ? IRQ_PINT1 :
+ bank == 2 ? IRQ_PINT2 :
+ IRQ_PINT3);
+#elif defined(CONFIG_BF561)
+ demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
+ irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
+ IRQ_PROG0_INTA);
+#elif defined(CONFIG_BF52x)
+ demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
+ irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
+ IRQ_PORTF_INTA);
+#else
+ demux_irq = irq;
+#endif
+ return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
+ }
+
+ /* The GPIO demux interrupt is given a lower priority
+ * than the GPIO IRQs, so that its threaded handler
+ * unmasks the interrupt line after the decoded IRQs
+ * have been processed. */
+ prio = PRIO_GPIODEMUX(irq);
+ /* demux irq? */
+ if (prio != -1)
+ return IVG13 - prio;
+
+ for (ient = 0; ient < NR_PERI_INTS; ient++) {
+ struct ivgx *ivg = ivg_table + ient;
+ if (ivg->irqno == irq) {
+ for (prio = 0; prio <= IVG13-IVG7; prio++) {
+ if (ivg7_13[prio].ifirst <= ivg &&
+ ivg7_13[prio].istop > ivg)
+ return IVG7 - prio;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
+{
+ struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
+ struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
+ int irq;
+
+ if (likely(vec == EVT_IVTMR_P)) {
+ irq = IRQ_CORETMR;
+ goto handle_irq;
+ }
+
+ SSYNC();
+
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
+ {
+ unsigned long sic_status[3];
+
+ sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
+ sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
+#ifdef CONFIG_BF54x
+ sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
+#endif
+ for (;; ivg++) {
+ if (ivg >= ivg_stop) {
+ atomic_inc(&num_spurious);
+ return 0;
+ }
+ if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
+ break;
+ }
+ }
+#else
+ {
+ unsigned long sic_status;
+
+ sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
+
+ for (;; ivg++) {
+ if (ivg >= ivg_stop) {
+ atomic_inc(&num_spurious);
+ return 0;
+ } else if (sic_status & ivg->isrflag)
+ break;
+ }
+ }
+#endif
+
+ irq = ivg->irqno;
+
+ if (irq == IRQ_SYSTMR) {
+ bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
+ /* This is basically what we need from the register frame. */
+ __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
+ __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
+ if (!ipipe_root_domain_p)
+ __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
+ else
+ __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
+ }
+
+handle_irq:
+
+ ipipe_trace_irq_entry(irq);
+ __ipipe_handle_irq(irq, regs);
+ ipipe_trace_irq_exit(irq);
+
+ if (ipipe_root_domain_p)
+ return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+
+ return 0;
+}
+
+#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 96600b8cb6a..d3d70fd67c1 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -71,7 +71,7 @@ void bfin_pm_suspend_standby_enter(void)
gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE);
#endif
- local_irq_save(flags);
+ local_irq_save_hw(flags);
bfin_pm_standby_setup();
#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@ -105,7 +105,7 @@ void bfin_pm_suspend_standby_enter(void)
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -249,12 +249,12 @@ int bfin_pm_suspend_mem_enter(void)
wakeup |= GPWE;
#endif
- local_irq_save(flags);
+ local_irq_save_hw(flags);
ret = blackfin_dma_suspend();
if (ret) {
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
kfree(memptr);
return ret;
}
@@ -275,7 +275,7 @@ int bfin_pm_suspend_mem_enter(void)
bfin_gpio_pm_hibernate_restore();
blackfin_dma_resume();
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
kfree(memptr);
return 0;
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 66c47a702da..77c99284709 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -348,7 +348,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
static void __cpuinit setup_secondary(unsigned int cpu)
{
-#ifndef CONFIG_TICK_SOURCE_SYSTMR0
+#if !(defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE))
struct irq_desc *timer_desc;
#endif
unsigned long ilat;
@@ -369,7 +369,7 @@ static void __cpuinit setup_secondary(unsigned int cpu)
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
/* Power down the core timer, just to play safe. */
bfin_write_TCNTL(0);