#ifndef _LINUX_STOP_MACHINE #define _LINUX_STOP_MACHINE /* "Bogolock": stop the entire machine, disable interrupts. This is a very heavy lock, which is equivalent to grabbing every spinlock (and more). So the "read" side to such a lock is anything which diables preeempt. */ #include #include #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) #define ALL_CPUS ~0U /** * stop_machine_run: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn() * @cpu: if @cpu == n, run @fn() on cpu n * if @cpu == NR_CPUS, run @fn() on any cpu * if @cpu == ALL_CPUS, run @fn() first on the calling cpu, and then * concurrently on all the other cpus * * Description: This causes a thread to be scheduled on every other cpu, * each of which disables interrupts, and finally interrupts are disabled * on the current CPU. The result is that noone is holding a spinlock * or inside any other preempt-disabled region when @fn() runs. * * This can be thought of as a very heavy write lock, equivalent to * grabbing every spinlock in the kernel. */ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); /** * __stop_machine_run: freeze the machine on all CPUs and run this function * @fn: the function to run * @data: the data ptr for the @fn * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. * * Description: This is a special version of the above, which returns the * thread which has run @fn(): kthread_stop will return the return value * of @fn(). Used by hotplug cpu. */ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); #else static inline int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) { int ret; local_irq_disable(); ret = fn(data); local_irq_enable(); return ret; } #endif /* CONFIG_SMP */ #endif /* _LINUX_STOP_MACHINE */