summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c123
1 files changed, 66 insertions, 57 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 844d3f882a1..f7f376ea7b1 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -118,6 +118,7 @@ notrace void raw_local_irq_restore(unsigned long en)
if (!en)
return;
+#ifdef CONFIG_PPC_STD_MMU_64
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
/*
* Do we need to disable preemption here? Not really: in the
@@ -135,6 +136,7 @@ notrace void raw_local_irq_restore(unsigned long en)
if (local_paca->lppaca_ptr->int_dword.any_int)
iseries_handle_interrupts();
}
+#endif /* CONFIG_PPC_STD_MMU_64 */
if (test_perf_counter_pending()) {
clear_perf_counter_pending();
@@ -254,77 +256,84 @@ void fixup_irqs(cpumask_t map)
}
#endif
-void do_IRQ(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned int irq;
#ifdef CONFIG_IRQSTACKS
+static inline void handle_one_irq(unsigned int irq)
+{
struct thread_info *curtp, *irqtp;
-#endif
+ unsigned long saved_sp_limit;
+ struct irq_desc *desc;
- irq_enter();
+ /* Switch to the irq stack to handle this */
+ curtp = current_thread_info();
+ irqtp = hardirq_ctx[smp_processor_id()];
+
+ if (curtp == irqtp) {
+ /* We're already on the irq stack, just handle it */
+ generic_handle_irq(irq);
+ return;
+ }
+
+ desc = irq_desc + irq;
+ saved_sp_limit = current->thread.ksp_limit;
+
+ irqtp->task = curtp->task;
+ irqtp->flags = 0;
+
+ /* Copy the softirq bits in preempt_count so that the
+ * softirq checks work in the hardirq context. */
+ irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
+ (curtp->preempt_count & SOFTIRQ_MASK);
+
+ current->thread.ksp_limit = (unsigned long)irqtp +
+ _ALIGN_UP(sizeof(struct thread_info), 16);
+
+ call_handle_irq(irq, desc, irqtp, desc->handle_irq);
+ current->thread.ksp_limit = saved_sp_limit;
+ irqtp->task = NULL;
+
+ /* Set any flag that may have been set on the
+ * alternate stack
+ */
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+}
+#else
+static inline void handle_one_irq(unsigned int irq)
+{
+ generic_handle_irq(irq);
+}
+#endif
+static inline void check_stack_overflow(void)
+{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: is there less than 2KB free? */
- {
- long sp;
+ long sp;
- sp = __get_SP() & (THREAD_SIZE-1);
+ sp = __get_SP() & (THREAD_SIZE-1);
- if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
- }
+ /* check for stack overflow: is there less than 2KB free? */
+ if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
}
#endif
+}
- /*
- * Every platform is required to implement ppc_md.get_irq.
- * This function will either return an irq number or NO_IRQ to
- * indicate there are no more pending.
- * The value NO_IRQ_IGNORE is for buggy hardware and means that this
- * IRQ has already been handled. -- Tom
- */
- irq = ppc_md.get_irq();
+void do_IRQ(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ unsigned int irq;
- if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
-#ifdef CONFIG_IRQSTACKS
- /* Switch to the irq stack to handle this */
- curtp = current_thread_info();
- irqtp = hardirq_ctx[smp_processor_id()];
- if (curtp != irqtp) {
- struct irq_desc *desc = irq_desc + irq;
- void *handler = desc->handle_irq;
- unsigned long saved_sp_limit = current->thread.ksp_limit;
- if (handler == NULL)
- handler = &__do_IRQ;
- irqtp->task = curtp->task;
- irqtp->flags = 0;
-
- /* Copy the softirq bits in preempt_count so that the
- * softirq checks work in the hardirq context.
- */
- irqtp->preempt_count =
- (irqtp->preempt_count & ~SOFTIRQ_MASK) |
- (curtp->preempt_count & SOFTIRQ_MASK);
+ irq_enter();
- current->thread.ksp_limit = (unsigned long)irqtp +
- _ALIGN_UP(sizeof(struct thread_info), 16);
- call_handle_irq(irq, desc, irqtp, handler);
- current->thread.ksp_limit = saved_sp_limit;
- irqtp->task = NULL;
+ check_stack_overflow();
+ irq = ppc_md.get_irq();
- /* Set any flag that may have been set on the
- * alternate stack
- */
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
- } else
-#endif
- generic_handle_irq(irq);
- } else if (irq != NO_IRQ_IGNORE)
+ if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
+ handle_one_irq(irq);
+ else if (irq != NO_IRQ_IGNORE)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;