From 6e2756376c706e4da3454a272947983f92e80a7e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 25 Feb 2009 13:59:48 +0100 Subject: [PATCH] generic-ipi: remove CSD_FLAG_WAIT Oleg noticed that we don't strictly need CSD_FLAG_WAIT, rework the code so that we can use CSD_FLAG_LOCK for both purposes. Signed-off-by: Peter Zijlstra Cc: Oleg Nesterov Cc: Linus Torvalds Cc: Nick Piggin Cc: Jens Axboe Cc: "Paul E. McKenney" Cc: Rusty Russell Signed-off-by: Ingo Molnar --- block/blk-softirq.c | 2 +- include/linux/smp.h | 3 +- kernel/sched.c | 2 +- kernel/smp.c | 92 ++++++++++++--------------------------------- kernel/softirq.c | 2 +- 5 files changed, 29 insertions(+), 72 deletions(-) diff --git a/block/blk-softirq.c b/block/blk-softirq.c index ce0efc6b26dc..ee9c21602228 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -64,7 +64,7 @@ static int raise_blk_irq(int cpu, struct request *rq) data->info = rq; data->flags = 0; - __smp_call_function_single(cpu, data); + __smp_call_function_single(cpu, data, 0); return 0; } diff --git a/include/linux/smp.h b/include/linux/smp.h index 715196b09d67..00866d7fdf34 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, return 0; } -void __smp_call_function_single(int cpuid, struct call_single_data *data); +void __smp_call_function_single(int cpuid, struct call_single_data *data, + int wait); /* * Generic and arch helpers diff --git a/kernel/sched.c b/kernel/sched.c index 410eec404133..d4c2749a2998 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1093,7 +1093,7 @@ static void hrtick_start(struct rq *rq, u64 delay) if (rq == this_rq()) { hrtimer_restart(timer); } else if (!rq->hrtick_csd_pending) { - __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); + __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); rq->hrtick_csd_pending = 1; } } diff --git a/kernel/smp.c b/kernel/smp.c index 7a0ce25829dc..f5308258891a 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -23,8 +23,7 @@ static struct { }; enum { - CSD_FLAG_WAIT = 0x01, - CSD_FLAG_LOCK = 0x02, + CSD_FLAG_LOCK = 0x01, }; struct call_function_data { @@ -94,31 +93,6 @@ static int __cpuinit init_call_single_data(void) } early_initcall(init_call_single_data); -/* - * csd_wait/csd_complete are used for synchronous ipi calls - */ -static void csd_wait_prepare(struct call_single_data *data) -{ - data->flags |= CSD_FLAG_WAIT; -} - -static void csd_complete(struct call_single_data *data) -{ - if (data->flags & CSD_FLAG_WAIT) { - /* - * ensure we're all done before saying we are - */ - smp_mb(); - data->flags &= ~CSD_FLAG_WAIT; - } -} - -static void csd_wait(struct call_single_data *data) -{ - while (data->flags & CSD_FLAG_WAIT) - cpu_relax(); -} - /* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources * @@ -126,10 +100,15 @@ static void csd_wait(struct call_single_data *data) * function call. For multi-cpu calls its even more interesting as we'll have * to ensure no other cpu is observing our csd. */ -static void csd_lock(struct call_single_data *data) +static void csd_lock_wait(struct call_single_data *data) { while (data->flags & CSD_FLAG_LOCK) cpu_relax(); +} + +static void csd_lock(struct call_single_data *data) +{ + csd_lock_wait(data); data->flags = CSD_FLAG_LOCK; /* @@ -155,11 +134,12 @@ static void csd_unlock(struct call_single_data *data) * Insert a previously allocated call_single_data element for execution * on the given CPU. data must already have ->func, ->info, and ->flags set. */ -static void generic_exec_single(int cpu, struct call_single_data *data) +static +void generic_exec_single(int cpu, struct call_single_data *data, int wait) { struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); - int wait = data->flags & CSD_FLAG_WAIT, ipi; unsigned long flags; + int ipi; spin_lock_irqsave(&dst->lock, flags); ipi = list_empty(&dst->list); @@ -182,7 +162,7 @@ static void generic_exec_single(int cpu, struct call_single_data *data) arch_send_call_function_single_ipi(cpu); if (wait) - csd_wait(data); + csd_lock_wait(data); } /* @@ -232,7 +212,6 @@ void generic_smp_call_function_interrupt(void) if (refs) continue; - csd_complete(&data->csd); csd_unlock(&data->csd); } @@ -270,9 +249,6 @@ void generic_smp_call_function_single_interrupt(void) data->func(data->info); - if (data_flags & CSD_FLAG_WAIT) - csd_complete(data); - /* * Unlocked CSDs are valid through generic_exec_single() */ @@ -313,36 +289,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, func(info); local_irq_restore(flags); } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { - struct call_single_data *data; + struct call_single_data *data = &d; - if (!wait) { - /* - * We are calling a function on a single CPU - * and we are not going to wait for it to finish. - * We use a per cpu data to pass the information to - * that CPU. Since all callers of this code will - * use the same data, we must synchronize the - * callers to prevent a new caller from corrupting - * the data before the callee can access it. - * - * The CSD_FLAG_LOCK is used to let us know when - * the IPI handler is done with the data. - * The first caller will set it, and the callee - * will clear it. The next caller must wait for - * it to clear before we set it again. This - * will make sure the callee is done with the - * data before a new caller will use it. - */ + if (!wait) data = &__get_cpu_var(csd_data); - csd_lock(data); - } else { - data = &d; - csd_wait_prepare(data); - } + + csd_lock(data); data->func = func; data->info = info; - generic_exec_single(cpu, data); + generic_exec_single(cpu, data, wait); } else { err = -ENXIO; /* CPU not online */ } @@ -362,12 +318,15 @@ EXPORT_SYMBOL(smp_call_function_single); * instance. * */ -void __smp_call_function_single(int cpu, struct call_single_data *data) +void __smp_call_function_single(int cpu, struct call_single_data *data, + int wait) { - /* Can deadlock when called with interrupts disabled */ - WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); + csd_lock(data); - generic_exec_single(cpu, data); + /* Can deadlock when called with interrupts disabled */ + WARN_ON(wait && irqs_disabled()); + + generic_exec_single(cpu, data, wait); } /* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ @@ -425,9 +384,6 @@ void smp_call_function_many(const struct cpumask *mask, csd_lock(&data->csd); spin_lock_irqsave(&data->lock, flags); - if (wait) - csd_wait_prepare(&data->csd); - data->csd.func = func; data->csd.info = info; cpumask_and(data->cpumask, mask, cpu_online_mask); @@ -456,7 +412,7 @@ void smp_call_function_many(const struct cpumask *mask, /* optionally wait for the CPUs to complete */ if (wait) - csd_wait(&data->csd); + csd_lock_wait(&data->csd); } EXPORT_SYMBOL(smp_call_function_many); diff --git a/kernel/softirq.c b/kernel/softirq.c index bdbe9de9cd8d..48c3d5d627a8 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -496,7 +496,7 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir cp->flags = 0; cp->priv = softirq; - __smp_call_function_single(cpu, cp); + __smp_call_function_single(cpu, cp, 0); return 0; } return 1;