2005-04-17 02:20:36 +04:00
|
|
|
#ifndef _LINUX_STOP_MACHINE
|
|
|
|
#define _LINUX_STOP_MACHINE
|
|
|
|
/* "Bogolock": stop the entire machine, disable interrupts. This is a
|
|
|
|
very heavy lock, which is equivalent to grabbing every spinlock
|
|
|
|
(and more). So the "read" side to such a lock is anything which
|
|
|
|
diables preeempt. */
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <asm/system.h>
|
|
|
|
|
|
|
|
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
|
2008-02-28 19:33:03 +03:00
|
|
|
|
|
|
|
#define ALL_CPUS ~0U
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/**
|
|
|
|
* stop_machine_run: freeze the machine on all CPUs and run this function
|
|
|
|
* @fn: the function to run
|
|
|
|
* @data: the data ptr for the @fn()
|
2008-02-28 19:33:03 +03:00
|
|
|
* @cpu: if @cpu == n, run @fn() on cpu n
|
|
|
|
* if @cpu == NR_CPUS, run @fn() on any cpu
|
|
|
|
* if @cpu == ALL_CPUS, run @fn() first on the calling cpu, and then
|
|
|
|
* concurrently on all the other cpus
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* Description: This causes a thread to be scheduled on every other cpu,
|
|
|
|
* each of which disables interrupts, and finally interrupts are disabled
|
|
|
|
* on the current CPU. The result is that noone is holding a spinlock
|
|
|
|
* or inside any other preempt-disabled region when @fn() runs.
|
|
|
|
*
|
|
|
|
* This can be thought of as a very heavy write lock, equivalent to
|
|
|
|
* grabbing every spinlock in the kernel. */
|
|
|
|
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __stop_machine_run: freeze the machine on all CPUs and run this function
|
|
|
|
* @fn: the function to run
|
|
|
|
* @data: the data ptr for the @fn
|
|
|
|
* @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS.
|
|
|
|
*
|
|
|
|
* Description: This is a special version of the above, which returns the
|
|
|
|
* thread which has run @fn(): kthread_stop will return the return value
|
|
|
|
* of @fn(). Used by hotplug cpu.
|
|
|
|
*/
|
|
|
|
struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
|
|
|
|
unsigned int cpu);
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline int stop_machine_run(int (*fn)(void *), void *data,
|
|
|
|
unsigned int cpu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
local_irq_disable();
|
|
|
|
ret = fn(data);
|
|
|
|
local_irq_enable();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
#endif /* _LINUX_STOP_MACHINE */
|