[POWERPC] Add smp_call_function_map and smp_call_function_single

Add a new function named smp_call_function_single().  This matches a generic
prototype from include/linux/smp.h.

Add a function smp_call_function_map().  This is, for the most part, a rename
of smp_call_function, with some added cpumask support.  smp_call_function and
smp_call_function_single call into smp_call_function_map.

Lightly tested on 970mp (blade), power4 and power5.

Signed-off-by: Will Schmidt <will_schmidt@vnet.ibm.com>
cc: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
will schmidt 2007-05-03 03:12:34 +10:00 коммит произвёл Paul Mackerras
Родитель e9e77ce871
Коммит 44755d11a3
1 изменённых файлов: 52 добавлений и 21 удалений

Просмотреть файл

@ -176,10 +176,10 @@ static struct call_data_struct {
#define SMP_CALL_TIMEOUT 8 #define SMP_CALL_TIMEOUT 8
/* /*
* This function sends a 'generic call function' IPI to all other CPUs * These functions send a 'generic call function' IPI to other online
* in the system. * CPUS in the system.
* *
* [SUMMARY] Run a function on all other CPUs. * [SUMMARY] Run a function on other CPUs.
* <func> The function to run. This must be fast and non-blocking. * <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function. * <info> An arbitrary pointer to pass to the function.
* <nonatomic> currently unused. * <nonatomic> currently unused.
@ -190,18 +190,26 @@ static struct call_data_struct {
* You must not call this function with disabled interrupts or from a * You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. * hardware interrupt handler or from a bottom half handler.
*/ */
int smp_call_function (void (*func) (void *info), void *info, int nonatomic, int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
int wait) int wait, cpumask_t map)
{ {
struct call_data_struct data; struct call_data_struct data;
int ret = -1, cpus; int ret = -1, num_cpus;
int cpu;
u64 timeout; u64 timeout;
/* Can deadlock when called with interrupts disabled */ /* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
/* remove 'self' from the map */
if (cpu_isset(smp_processor_id(), map))
cpu_clear(smp_processor_id(), map);
/* sanity check the map, remove any non-online processors. */
cpus_and(map, map, cpu_online_map);
if (unlikely(smp_ops == NULL)) if (unlikely(smp_ops == NULL))
return -1; return ret;
data.func = func; data.func = func;
data.info = info; data.info = info;
@ -213,40 +221,42 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_lock(&call_lock); spin_lock(&call_lock);
/* Must grab online cpu count with preempt disabled, otherwise /* Must grab online cpu count with preempt disabled, otherwise
* it can change. */ * it can change. */
cpus = num_online_cpus() - 1; num_cpus = num_online_cpus() - 1;
if (!cpus) { if (!num_cpus || cpus_empty(map)) {
ret = 0; ret = 0;
goto out; goto out;
} }
call_data = &data; call_data = &data;
smp_wmb(); smp_wmb();
/* Send a message to all other CPUs and wait for them to respond */ /* Send a message to all CPUs in the map */
smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); for_each_cpu_mask(cpu, map)
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
/* Wait for response */ /* Wait for indication that they have received the message */
while (atomic_read(&data.started) != cpus) { while (atomic_read(&data.started) != num_cpus) {
HMT_low(); HMT_low();
if (get_tb() >= timeout) { if (get_tb() >= timeout) {
printk("smp_call_function on cpu %d: other cpus not " printk("smp_call_function on cpu %d: other cpus not "
"responding (%d)\n", smp_processor_id(), "responding (%d)\n", smp_processor_id(),
atomic_read(&data.started)); atomic_read(&data.started));
debugger(NULL); debugger(NULL);
goto out; goto out;
} }
} }
/* optionally wait for the CPUs to complete */
if (wait) { if (wait) {
while (atomic_read(&data.finished) != cpus) { while (atomic_read(&data.finished) != num_cpus) {
HMT_low(); HMT_low();
if (get_tb() >= timeout) { if (get_tb() >= timeout) {
printk("smp_call_function on cpu %d: other " printk("smp_call_function on cpu %d: other "
"cpus not finishing (%d/%d)\n", "cpus not finishing (%d/%d)\n",
smp_processor_id(), smp_processor_id(),
atomic_read(&data.finished), atomic_read(&data.finished),
atomic_read(&data.started)); atomic_read(&data.started));
debugger(NULL); debugger(NULL);
goto out; goto out;
} }
@ -262,8 +272,29 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
return ret; return ret;
} }
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
}
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic,
int wait)
{
cpumask_t map=CPU_MASK_NONE;
if (!cpu_online(cpu))
return -EINVAL;
if (cpu == smp_processor_id())
return -EBUSY;
cpu_set(cpu, map);
return smp_call_function_map(func,info,nonatomic,wait,map);
}
EXPORT_SYMBOL(smp_call_function_single);
void smp_call_function_interrupt(void) void smp_call_function_interrupt(void)
{ {
void (*func) (void *info); void (*func) (void *info);