diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-04-14 13:18:27 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-04-14 13:19:04 +0200 |
commit | 6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch) | |
tree | 021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /kernel/irq/manage.c | |
parent | 682968e0c425c60f0dde37977e5beb2b12ddc4cc (diff) | |
parent | a385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (diff) |
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree),
to prepare for tooling changes, and also to pick up v3.4 MM
changes that the uprobes code needs to take care of.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 147 |
1 files changed, 88 insertions, 59 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 32313c08444..89a3ea82569 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -282,7 +282,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) { struct irq_chip *chip = irq_desc_get_chip(desc); struct cpumask *set = irq_default_affinity; - int ret; + int ret, node = desc->irq_data.node; /* Excludes PER_CPU and NO_BALANCE interrupts */ if (!irq_can_set_affinity(irq)) @@ -301,6 +301,13 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) } cpumask_and(mask, cpu_online_mask, set); + if (node != NUMA_NO_NODE) { + const struct cpumask *nodemask = cpumask_of_node(node); + + /* make sure at least one of the cpus in nodemask is online */ + if (cpumask_intersects(mask, nodemask)) + cpumask_and(mask, mask, nodemask); + } ret = chip->irq_set_affinity(&desc->irq_data, mask, false); switch (ret) { case IRQ_SET_MASK_OK: @@ -645,7 +652,7 @@ static int irq_wait_for_interrupt(struct irqaction *action) * is marked MASKED. */ static void irq_finalize_oneshot(struct irq_desc *desc, - struct irqaction *action, bool force) + struct irqaction *action) { if (!(desc->istate & IRQS_ONESHOT)) return; @@ -679,7 +686,7 @@ again: * we would clear the threads_oneshot bit of this thread which * was just set. */ - if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) + if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) goto out_unlock; desc->threads_oneshot &= ~action->thread_mask; @@ -739,7 +746,7 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); - irq_finalize_oneshot(desc, action, false); + irq_finalize_oneshot(desc, action); local_bh_enable(); return ret; } @@ -755,10 +762,17 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, irqreturn_t ret; ret = action->thread_fn(action->irq, action->dev_id); - irq_finalize_oneshot(desc, action, false); + irq_finalize_oneshot(desc, action); return ret; } +static void wake_threads_waitq(struct irq_desc *desc) +{ + if (atomic_dec_and_test(&desc->threads_active) && + waitqueue_active(&desc->wait_for_threads)) + wake_up(&desc->wait_for_threads); +} + /* * Interrupt handler thread */ @@ -771,57 +785,41 @@ static int irq_thread(void *data) struct irq_desc *desc = irq_to_desc(action->irq); irqreturn_t (*handler_fn)(struct irq_desc *desc, struct irqaction *action); - int wake; - if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, + if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, &action->thread_flags)) handler_fn = irq_forced_thread_fn; else handler_fn = irq_thread_fn; sched_setscheduler(current, SCHED_FIFO, ¶m); - current->irqaction = action; + current->irq_thread = 1; while (!irq_wait_for_interrupt(action)) { + irqreturn_t action_ret; irq_thread_check_affinity(desc, action); - atomic_inc(&desc->threads_active); + action_ret = handler_fn(desc, action); + if (!noirqdebug) + note_interrupt(action->irq, desc, action_ret); - raw_spin_lock_irq(&desc->lock); - if (unlikely(irqd_irq_disabled(&desc->irq_data))) { - /* - * CHECKME: We might need a dedicated - * IRQ_THREAD_PENDING flag here, which - * retriggers the thread in check_irq_resend() - * but AFAICT IRQS_PENDING should be fine as it - * retriggers the interrupt itself --- tglx - */ - desc->istate |= IRQS_PENDING; - raw_spin_unlock_irq(&desc->lock); - } else { - irqreturn_t action_ret; - - raw_spin_unlock_irq(&desc->lock); - action_ret = handler_fn(desc, action); - if (!noirqdebug) - note_interrupt(action->irq, desc, action_ret); - } - - wake = atomic_dec_and_test(&desc->threads_active); - - if (wake && waitqueue_active(&desc->wait_for_threads)) - wake_up(&desc->wait_for_threads); + wake_threads_waitq(desc); } - /* Prevent a stale desc->threads_oneshot */ - irq_finalize_oneshot(desc, action, true); - /* - * Clear irqaction. Otherwise exit_irq_thread() would make + * This is the regular exit path. __free_irq() is stopping the + * thread via kthread_stop() after calling + * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the + * oneshot mask bit can be set. We cannot verify that as we + * cannot touch the oneshot mask at this point anymore as + * __setup_irq() might have given out currents thread_mask + * again. + * + * Clear irq_thread. Otherwise exit_irq_thread() would make * fuzz about an active irq thread going into nirvana. */ - current->irqaction = NULL; + current->irq_thread = 0; return 0; } @@ -832,27 +830,28 @@ void exit_irq_thread(void) { struct task_struct *tsk = current; struct irq_desc *desc; + struct irqaction *action; - if (!tsk->irqaction) + if (!tsk->irq_thread) return; + action = kthread_data(tsk); + printk(KERN_ERR "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", - tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); + tsk->comm ? tsk->comm : "", tsk->pid, action->irq); - desc = irq_to_desc(tsk->irqaction->irq); + desc = irq_to_desc(action->irq); /* - * Prevent a stale desc->threads_oneshot. Must be called - * before setting the IRQTF_DIED flag. + * If IRQTF_RUNTHREAD is set, we need to decrement + * desc->threads_active and wake possible waiters. */ - irq_finalize_oneshot(desc, tsk->irqaction, true); + if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) + wake_threads_waitq(desc); - /* - * Set the THREAD DIED flag to prevent further wakeups of the - * soon to be gone threaded handler. - */ - set_bit(IRQTF_DIED, &tsk->irqaction->flags); + /* Prevent a stale desc->threads_oneshot */ + irq_finalize_oneshot(desc, action); } static void irq_setup_forced_threading(struct irqaction *new) @@ -985,6 +984,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) /* add new interrupt at end of irq queue */ do { + /* + * Or all existing action->thread_mask bits, + * so we can find the next zero bit for this + * new action. + */ thread_mask |= old->thread_mask; old_ptr = &old->next; old = *old_ptr; @@ -993,14 +997,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) } /* - * Setup the thread mask for this irqaction. Unlikely to have - * 32 resp 64 irqs sharing one line, but who knows. + * Setup the thread mask for this irqaction for ONESHOT. For + * !ONESHOT irqs the thread mask is 0 so we can avoid a + * conditional in irq_wake_thread(). */ - if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { - ret = -EBUSY; - goto out_mask; + if (new->flags & IRQF_ONESHOT) { + /* + * Unlikely to have 32 resp 64 irqs sharing one line, + * but who knows. + */ + if (thread_mask == ~0UL) { + ret = -EBUSY; + goto out_mask; + } + /* + * The thread_mask for the action is or'ed to + * desc->thread_active to indicate that the + * IRQF_ONESHOT thread handler has been woken, but not + * yet finished. The bit is cleared when a thread + * completes. When all threads of a shared interrupt + * line have completed desc->threads_active becomes + * zero and the interrupt line is unmasked. See + * handle.c:irq_wake_thread() for further information. + * + * If no thread is woken by primary (hard irq context) + * interrupt handlers, then desc->threads_active is + * also checked for zero to unmask the irq line in the + * affected hard irq flow handlers + * (handle_[fasteoi|level]_irq). + * + * The new action gets the first zero bit of + * thread_mask assigned. See the loop above which or's + * all existing action->thread_mask bits. + */ + new->thread_mask = 1 << ffz(thread_mask); } - new->thread_mask = 1 << ffz(thread_mask); if (!shared) { init_waitqueue_head(&desc->wait_for_threads); @@ -1103,8 +1134,7 @@ out_thread: struct task_struct *t = new->thread; new->thread = NULL; - if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) - kthread_stop(t); + kthread_stop(t); put_task_struct(t); } out_mput: @@ -1214,8 +1244,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) #endif if (action->thread) { - if (!test_bit(IRQTF_DIED, &action->thread_flags)) - kthread_stop(action->thread); + kthread_stop(action->thread); put_task_struct(action->thread); } |