summaryrefslogtreecommitdiffstats
path: root/include/linux/interrupt.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r--include/linux/interrupt.h72
1 files changed, 58 insertions, 14 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c7bfac1c4a7..698ad053d06 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -199,16 +199,6 @@ extern int check_wakeup_irqs(void);
static inline int check_wakeup_irqs(void) { return 0; }
#endif
-#if defined(CONFIG_SMP)
-
-extern cpumask_var_t irq_default_affinity;
-
-extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
-extern int irq_can_set_affinity(unsigned int irq);
-extern int irq_select_affinity(unsigned int irq);
-
-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
-
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
* @irq: Interrupt to which notification applies
@@ -229,6 +219,49 @@ struct irq_affinity_notify {
void (*release)(struct kref *ref);
};
+#if defined(CONFIG_SMP)
+
+extern cpumask_var_t irq_default_affinity;
+
+/* Internal implementation. Use the helpers below */
+extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
+ bool force);
+
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @cpumask: cpumask
+ *
+ * Fails if cpumask does not contain an online CPU
+ */
+static inline int
+irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return __irq_set_affinity(irq, cpumask, false);
+}
+
+/**
+ * irq_force_affinity - Force the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @cpumask: cpumask
+ *
+ * Same as irq_set_affinity, but without checking the mask against
+ * online cpus.
+ *
+ * Solely for low level cpu hotplug code, where we need to make per
+ * cpu interrupts affine before the cpu becomes online.
+ */
+static inline int
+irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return __irq_set_affinity(irq, cpumask, true);
+}
+
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
@@ -239,6 +272,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
return -EINVAL;
}
+static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return 0;
+}
+
static inline int irq_can_set_affinity(unsigned int irq)
{
return 0;
@@ -251,6 +289,12 @@ static inline int irq_set_affinity_hint(unsigned int irq,
{
return -EINVAL;
}
+
+static inline int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
+ return 0;
+}
#endif /* CONFIG_SMP */
/*
@@ -453,7 +497,7 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
static inline void tasklet_unlock(struct tasklet_struct *t)
{
- smp_mb__before_clear_bit();
+ smp_mb__before_atomic();
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
@@ -501,7 +545,7 @@ static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_inc(&t->count);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
}
static inline void tasklet_disable(struct tasklet_struct *t)
@@ -513,13 +557,13 @@ static inline void tasklet_disable(struct tasklet_struct *t)
static inline void tasklet_enable(struct tasklet_struct *t)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&t->count);
}
static inline void tasklet_hi_enable(struct tasklet_struct *t)
{
- smp_mb__before_atomic_dec();
+ smp_mb__before_atomic();
atomic_dec(&t->count);
}