smp: Remove allocation mask from on_each_cpu_cond.*()
The allocation mask is no longer used by on_each_cpu_cond() and on_each_cpu_cond_mask() and can be removed. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20200117090137.1205765-4-bigeasy@linutronix.de
This commit is contained in:
Родитель
67719ef25e
Коммит
cb923159bb
|
@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|||
(void *)info, 1);
|
||||
else
|
||||
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
|
||||
(void *)info, 1, GFP_ATOMIC, cpumask);
|
||||
(void *)info, 1, cpumask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
|
|||
|
||||
void invalidate_bh_lrus(void)
|
||||
{
|
||||
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
|
||||
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
|
||||
|
||||
|
|
|
@ -51,11 +51,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
|
|||
* processor.
|
||||
*/
|
||||
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, gfp_t gfp_flags);
|
||||
void *info, bool wait);
|
||||
|
||||
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, gfp_t gfp_flags,
|
||||
const struct cpumask *mask);
|
||||
void *info, bool wait, const struct cpumask *mask);
|
||||
|
||||
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
|
||||
|
||||
|
|
13
kernel/smp.c
13
kernel/smp.c
|
@ -679,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
|||
* @info: An arbitrary pointer to pass to both functions.
|
||||
* @wait: If true, wait (atomically) until function has
|
||||
* completed on other CPUs.
|
||||
* @gfp_flags: GFP flags to use when allocating the cpumask
|
||||
* used internally by the function.
|
||||
*
|
||||
* The function might sleep if the GFP flags indicates a non
|
||||
* atomic allocation is allowed.
|
||||
*
|
||||
* Preemption is disabled to protect against CPUs going offline but not online.
|
||||
* CPUs going online during the call will not be seen or sent an IPI.
|
||||
|
@ -692,8 +687,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
|||
* from a hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, gfp_t gfp_flags,
|
||||
const struct cpumask *mask)
|
||||
void *info, bool wait, const struct cpumask *mask)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
|
||||
|
@ -710,10 +704,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
|||
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||
|
||||
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, gfp_t gfp_flags)
|
||||
void *info, bool wait)
|
||||
{
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
|
||||
cpu_online_mask);
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond);
|
||||
|
||||
|
|
|
@ -69,8 +69,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
|||
* same condtions in UP and SMP.
|
||||
*/
|
||||
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, gfp_t gfp_flags,
|
||||
const struct cpumask *mask)
|
||||
void *info, bool wait, const struct cpumask *mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -85,9 +84,9 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
|||
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||
|
||||
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, gfp_t gfp_flags)
|
||||
void *info, bool wait)
|
||||
{
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond);
|
||||
|
||||
|
|
|
@ -2341,7 +2341,7 @@ static bool has_cpu_slab(int cpu, void *info)
|
|||
|
||||
static void flush_all(struct kmem_cache *s)
|
||||
{
|
||||
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
|
||||
on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче