summaryrefslogtreecommitdiffstats
path: root/include/linux/smp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/smp.h')
-rw-r--r--include/linux/smp.h52
1 files changed, 51 insertions, 1 deletions
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 8cc38d3bab0..717fb746c9a 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -61,7 +61,7 @@ extern void smp_prepare_cpus(unsigned int max_cpus);
/*
* Bring a CPU up
*/
-extern int __cpu_up(unsigned int cpunum);
+extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
/*
* Final polishing of CPUs
@@ -81,6 +81,8 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data,
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait);
+void kick_all_cpus_sync(void);
+
/*
* Generic and arch helpers
*/
@@ -102,6 +104,22 @@ static inline void call_function_init(void) { }
int on_each_cpu(smp_call_func_t func, void *info, int wait);
/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+ void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags);
+
+/*
* Mark the boot cpu "online" so that it can call console drivers in
* printk() and can access its per-cpu storage.
*/
@@ -132,6 +150,36 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
local_irq_enable(); \
0; \
})
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+#define on_each_cpu_mask(mask, func, info, wait) \
+ do { \
+ if (cpumask_test_cpu(0, (mask))) { \
+ local_irq_disable(); \
+ (func)(info); \
+ local_irq_enable(); \
+ } \
+ } while (0)
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+#define on_each_cpu_cond(cond_func, func, info, wait, gfp_flags)\
+ do { \
+ void *__info = (info); \
+ preempt_disable(); \
+ if ((cond_func)(0, __info)) { \
+ local_irq_disable(); \
+ (func)(__info); \
+ local_irq_enable(); \
+ } \
+ preempt_enable(); \
+ } while (0)
+
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
@@ -146,6 +194,8 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
return smp_call_function_single(0, func, info, wait);
}
+static inline void kick_all_cpus_sync(void) { }
+
#endif /* !SMP */
/*