workqueue: Convert to state machine callbacks
Get rid of the prio ordering of the separate notifiers and use a proper state callback pair. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.197083890@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
c6a84daa34
Коммит
7ee681b252
|
@ -55,15 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t);
|
||||||
#endif
|
#endif
|
||||||
struct notifier_block;
|
struct notifier_block;
|
||||||
|
|
||||||
/*
|
|
||||||
* CPU notifier priorities.
|
|
||||||
*/
|
|
||||||
enum {
|
|
||||||
/* bring up workqueues before normal notifiers and down after */
|
|
||||||
CPU_PRI_WORKQUEUE_UP = 5,
|
|
||||||
CPU_PRI_WORKQUEUE_DOWN = -5,
|
|
||||||
};
|
|
||||||
|
|
||||||
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
|
||||||
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
|
||||||
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
|
||||||
|
|
|
@ -12,6 +12,7 @@ enum cpuhp_state {
|
||||||
CPUHP_PERF_BFIN,
|
CPUHP_PERF_BFIN,
|
||||||
CPUHP_PERF_POWER,
|
CPUHP_PERF_POWER,
|
||||||
CPUHP_PERF_SUPERH,
|
CPUHP_PERF_SUPERH,
|
||||||
|
CPUHP_WORKQUEUE_PREP,
|
||||||
CPUHP_NOTIFY_PREPARE,
|
CPUHP_NOTIFY_PREPARE,
|
||||||
CPUHP_BRINGUP_CPU,
|
CPUHP_BRINGUP_CPU,
|
||||||
CPUHP_AP_IDLE_DEAD,
|
CPUHP_AP_IDLE_DEAD,
|
||||||
|
@ -49,6 +50,7 @@ enum cpuhp_state {
|
||||||
CPUHP_AP_PERF_S390_SF_ONLINE,
|
CPUHP_AP_PERF_S390_SF_ONLINE,
|
||||||
CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
||||||
CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
||||||
|
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||||
CPUHP_AP_NOTIFY_ONLINE,
|
CPUHP_AP_NOTIFY_ONLINE,
|
||||||
CPUHP_AP_ONLINE_DYN,
|
CPUHP_AP_ONLINE_DYN,
|
||||||
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
||||||
|
|
|
@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu);
|
||||||
static inline void wq_watchdog_touch(int cpu) { }
|
static inline void wq_watchdog_touch(int cpu) { }
|
||||||
#endif /* CONFIG_WQ_WATCHDOG */
|
#endif /* CONFIG_WQ_WATCHDOG */
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
int workqueue_prepare_cpu(unsigned int cpu);
|
||||||
|
int workqueue_online_cpu(unsigned int cpu);
|
||||||
|
int workqueue_offline_cpu(unsigned int cpu);
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
10
kernel/cpu.c
10
kernel/cpu.c
|
@ -1185,6 +1185,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
|
||||||
.startup = perf_event_init_cpu,
|
.startup = perf_event_init_cpu,
|
||||||
.teardown = perf_event_exit_cpu,
|
.teardown = perf_event_exit_cpu,
|
||||||
},
|
},
|
||||||
|
[CPUHP_WORKQUEUE_PREP] = {
|
||||||
|
.name = "workqueue prepare",
|
||||||
|
.startup = workqueue_prepare_cpu,
|
||||||
|
.teardown = NULL,
|
||||||
|
},
|
||||||
/*
|
/*
|
||||||
* Preparatory and dead notifiers. Will be replaced once the notifiers
|
* Preparatory and dead notifiers. Will be replaced once the notifiers
|
||||||
* are converted to states.
|
* are converted to states.
|
||||||
|
@ -1267,6 +1272,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
||||||
.startup = perf_event_init_cpu,
|
.startup = perf_event_init_cpu,
|
||||||
.teardown = perf_event_exit_cpu,
|
.teardown = perf_event_exit_cpu,
|
||||||
},
|
},
|
||||||
|
[CPUHP_AP_WORKQUEUE_ONLINE] = {
|
||||||
|
.name = "workqueue online",
|
||||||
|
.startup = workqueue_online_cpu,
|
||||||
|
.teardown = workqueue_offline_cpu,
|
||||||
|
},
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Online/down_prepare notifiers. Will be removed once the notifiers
|
* Online/down_prepare notifiers. Will be removed once the notifiers
|
||||||
|
|
|
@ -4611,84 +4611,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
|
||||||
pool->attrs->cpumask) < 0);
|
pool->attrs->cpumask) < 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int workqueue_prepare_cpu(unsigned int cpu)
|
||||||
* Workqueues should be brought up before normal priority CPU notifiers.
|
{
|
||||||
* This will be registered high priority CPU notifier.
|
struct worker_pool *pool;
|
||||||
*/
|
|
||||||
static int workqueue_cpu_up_callback(struct notifier_block *nfb,
|
for_each_cpu_worker_pool(pool, cpu) {
|
||||||
unsigned long action,
|
if (pool->nr_workers)
|
||||||
void *hcpu)
|
continue;
|
||||||
|
if (!create_worker(pool))
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int workqueue_online_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
int cpu = (unsigned long)hcpu;
|
|
||||||
struct worker_pool *pool;
|
struct worker_pool *pool;
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
int pi;
|
int pi;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
mutex_lock(&wq_pool_mutex);
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
for_each_cpu_worker_pool(pool, cpu) {
|
|
||||||
if (pool->nr_workers)
|
|
||||||
continue;
|
|
||||||
if (!create_worker(pool))
|
|
||||||
return NOTIFY_BAD;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_FAILED:
|
for_each_pool(pool, pi) {
|
||||||
case CPU_ONLINE:
|
mutex_lock(&pool->attach_mutex);
|
||||||
mutex_lock(&wq_pool_mutex);
|
|
||||||
|
|
||||||
for_each_pool(pool, pi) {
|
if (pool->cpu == cpu)
|
||||||
mutex_lock(&pool->attach_mutex);
|
rebind_workers(pool);
|
||||||
|
else if (pool->cpu < 0)
|
||||||
|
restore_unbound_workers_cpumask(pool, cpu);
|
||||||
|
|
||||||
if (pool->cpu == cpu)
|
mutex_unlock(&pool->attach_mutex);
|
||||||
rebind_workers(pool);
|
|
||||||
else if (pool->cpu < 0)
|
|
||||||
restore_unbound_workers_cpumask(pool, cpu);
|
|
||||||
|
|
||||||
mutex_unlock(&pool->attach_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* update NUMA affinity of unbound workqueues */
|
|
||||||
list_for_each_entry(wq, &workqueues, list)
|
|
||||||
wq_update_unbound_numa(wq, cpu, true);
|
|
||||||
|
|
||||||
mutex_unlock(&wq_pool_mutex);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
|
||||||
|
/* update NUMA affinity of unbound workqueues */
|
||||||
|
list_for_each_entry(wq, &workqueues, list)
|
||||||
|
wq_update_unbound_numa(wq, cpu, true);
|
||||||
|
|
||||||
|
mutex_unlock(&wq_pool_mutex);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
int workqueue_offline_cpu(unsigned int cpu)
|
||||||
* Workqueues should be brought down after normal priority CPU notifiers.
|
|
||||||
* This will be registered as low priority CPU notifier.
|
|
||||||
*/
|
|
||||||
static int workqueue_cpu_down_callback(struct notifier_block *nfb,
|
|
||||||
unsigned long action,
|
|
||||||
void *hcpu)
|
|
||||||
{
|
{
|
||||||
int cpu = (unsigned long)hcpu;
|
|
||||||
struct work_struct unbind_work;
|
struct work_struct unbind_work;
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
/* unbinding per-cpu workers should happen on the local CPU */
|
||||||
case CPU_DOWN_PREPARE:
|
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
|
||||||
/* unbinding per-cpu workers should happen on the local CPU */
|
queue_work_on(cpu, system_highpri_wq, &unbind_work);
|
||||||
INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
|
|
||||||
queue_work_on(cpu, system_highpri_wq, &unbind_work);
|
|
||||||
|
|
||||||
/* update NUMA affinity of unbound workqueues */
|
/* update NUMA affinity of unbound workqueues */
|
||||||
mutex_lock(&wq_pool_mutex);
|
mutex_lock(&wq_pool_mutex);
|
||||||
list_for_each_entry(wq, &workqueues, list)
|
list_for_each_entry(wq, &workqueues, list)
|
||||||
wq_update_unbound_numa(wq, cpu, false);
|
wq_update_unbound_numa(wq, cpu, false);
|
||||||
mutex_unlock(&wq_pool_mutex);
|
mutex_unlock(&wq_pool_mutex);
|
||||||
|
|
||||||
/* wait for per-cpu unbinding to finish */
|
/* wait for per-cpu unbinding to finish */
|
||||||
flush_work(&unbind_work);
|
flush_work(&unbind_work);
|
||||||
destroy_work_on_stack(&unbind_work);
|
destroy_work_on_stack(&unbind_work);
|
||||||
break;
|
return 0;
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -5490,9 +5471,6 @@ static int __init init_workqueues(void)
|
||||||
|
|
||||||
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
|
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
|
||||||
|
|
||||||
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
|
|
||||||
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
|
|
||||||
|
|
||||||
wq_numa_init();
|
wq_numa_init();
|
||||||
|
|
||||||
/* initialize CPU pools */
|
/* initialize CPU pools */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче