Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: use zalloc_cpumask_var() for gcwq->mayday_mask workqueue: fix GCWQ_DISASSOCIATED initialization workqueue: Add a workqueue chapter to the tracepoint docbook workqueue: fix cwq->nr_active underflow workqueue: improve destroy_workqueue() debuggability workqueue: mark lock acquisition on worker_maybe_bind_and_lock() workqueue: annotate lock context change workqueue: free rescuer on destroy_workqueue
This commit is contained in:
Коммит
cd4d4fc413
|
@ -104,4 +104,9 @@
|
||||||
<title>Block IO</title>
|
<title>Block IO</title>
|
||||||
!Iinclude/trace/events/block.h
|
!Iinclude/trace/events/block.h
|
||||||
</chapter>
|
</chapter>
|
||||||
|
|
||||||
|
<chapter id="workqueue">
|
||||||
|
<title>Workqueue</title>
|
||||||
|
!Iinclude/trace/events/workqueue.h
|
||||||
|
</chapter>
|
||||||
</book>
|
</book>
|
||||||
|
|
|
@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work);
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
|
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
|
||||||
WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
|
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
|
||||||
WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
|
WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
|
||||||
|
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
|
||||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||||
WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
|
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
|
||||||
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
|
WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
|
||||||
#else
|
#else
|
||||||
WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
|
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
WORK_STRUCT_COLOR_BITS = 4,
|
WORK_STRUCT_COLOR_BITS = 4,
|
||||||
|
|
||||||
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
|
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
|
||||||
|
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
|
||||||
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
|
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
|
||||||
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
|
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
|
||||||
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
||||||
|
@ -59,8 +61,8 @@ enum {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
|
* Reserve 7 bits off of cwq pointer w/ debugobjects turned
|
||||||
* off. This makes cwqs aligned to 128 bytes which isn't too
|
* off. This makes cwqs aligned to 256 bytes and allows 15
|
||||||
* excessive while allowing 15 workqueue flush colors.
|
* workqueue flush colors.
|
||||||
*/
|
*/
|
||||||
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
|
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
|
||||||
WORK_STRUCT_COLOR_BITS,
|
WORK_STRUCT_COLOR_BITS,
|
||||||
|
@ -241,6 +243,8 @@ enum {
|
||||||
WQ_HIGHPRI = 1 << 4, /* high priority */
|
WQ_HIGHPRI = 1 << 4, /* high priority */
|
||||||
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
||||||
|
|
||||||
|
WQ_DYING = 1 << 6, /* internal: workqueue is dying */
|
||||||
|
|
||||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||||
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
||||||
|
|
|
@ -90,7 +90,8 @@ enum {
|
||||||
/*
|
/*
|
||||||
* Structure fields follow one of the following exclusion rules.
|
* Structure fields follow one of the following exclusion rules.
|
||||||
*
|
*
|
||||||
* I: Set during initialization and read-only afterwards.
|
* I: Modifiable by initialization/destruction paths and read-only for
|
||||||
|
* everyone else.
|
||||||
*
|
*
|
||||||
* P: Preemption protected. Disabling preemption is enough and should
|
* P: Preemption protected. Disabling preemption is enough and should
|
||||||
* only be modified and accessed from the local cpu.
|
* only be modified and accessed from the local cpu.
|
||||||
|
@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
|
||||||
cpumask_test_and_set_cpu((cpu), (mask))
|
cpumask_test_and_set_cpu((cpu), (mask))
|
||||||
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
|
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
|
||||||
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
|
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
|
||||||
#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp))
|
#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
|
||||||
#define free_mayday_mask(mask) free_cpumask_var((mask))
|
#define free_mayday_mask(mask) free_cpumask_var((mask))
|
||||||
#else
|
#else
|
||||||
typedef unsigned long mayday_mask_t;
|
typedef unsigned long mayday_mask_t;
|
||||||
|
@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
|
||||||
struct global_cwq *gcwq;
|
struct global_cwq *gcwq;
|
||||||
struct cpu_workqueue_struct *cwq;
|
struct cpu_workqueue_struct *cwq;
|
||||||
struct list_head *worklist;
|
struct list_head *worklist;
|
||||||
|
unsigned int work_flags;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
debug_work_activate(work);
|
debug_work_activate(work);
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(wq->flags & WQ_DYING))
|
||||||
|
return;
|
||||||
|
|
||||||
/* determine gcwq to use */
|
/* determine gcwq to use */
|
||||||
if (!(wq->flags & WQ_UNBOUND)) {
|
if (!(wq->flags & WQ_UNBOUND)) {
|
||||||
struct global_cwq *last_gcwq;
|
struct global_cwq *last_gcwq;
|
||||||
|
@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
|
||||||
BUG_ON(!list_empty(&work->entry));
|
BUG_ON(!list_empty(&work->entry));
|
||||||
|
|
||||||
cwq->nr_in_flight[cwq->work_color]++;
|
cwq->nr_in_flight[cwq->work_color]++;
|
||||||
|
work_flags = work_color_to_flags(cwq->work_color);
|
||||||
|
|
||||||
if (likely(cwq->nr_active < cwq->max_active)) {
|
if (likely(cwq->nr_active < cwq->max_active)) {
|
||||||
cwq->nr_active++;
|
cwq->nr_active++;
|
||||||
worklist = gcwq_determine_ins_pos(gcwq, cwq);
|
worklist = gcwq_determine_ins_pos(gcwq, cwq);
|
||||||
} else
|
} else {
|
||||||
|
work_flags |= WORK_STRUCT_DELAYED;
|
||||||
worklist = &cwq->delayed_works;
|
worklist = &cwq->delayed_works;
|
||||||
|
}
|
||||||
|
|
||||||
insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
|
insert_work(cwq, work, worklist, work_flags);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&gcwq->lock, flags);
|
spin_unlock_irqrestore(&gcwq->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
|
||||||
* bound), %false if offline.
|
* bound), %false if offline.
|
||||||
*/
|
*/
|
||||||
static bool worker_maybe_bind_and_lock(struct worker *worker)
|
static bool worker_maybe_bind_and_lock(struct worker *worker)
|
||||||
|
__acquires(&gcwq->lock)
|
||||||
{
|
{
|
||||||
struct global_cwq *gcwq = worker->gcwq;
|
struct global_cwq *gcwq = worker->gcwq;
|
||||||
struct task_struct *task = worker->task;
|
struct task_struct *task = worker->task;
|
||||||
|
@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
|
||||||
* otherwise.
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
static bool maybe_create_worker(struct global_cwq *gcwq)
|
static bool maybe_create_worker(struct global_cwq *gcwq)
|
||||||
|
__releases(&gcwq->lock)
|
||||||
|
__acquires(&gcwq->lock)
|
||||||
{
|
{
|
||||||
if (!need_to_create_worker(gcwq))
|
if (!need_to_create_worker(gcwq))
|
||||||
return false;
|
return false;
|
||||||
|
@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
||||||
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
|
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
|
||||||
|
|
||||||
move_linked_works(work, pos, NULL);
|
move_linked_works(work, pos, NULL);
|
||||||
|
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
|
||||||
cwq->nr_active++;
|
cwq->nr_active++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
||||||
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
|
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
|
||||||
* @cwq: cwq of interest
|
* @cwq: cwq of interest
|
||||||
* @color: color of work which left the queue
|
* @color: color of work which left the queue
|
||||||
|
* @delayed: for a delayed work
|
||||||
*
|
*
|
||||||
* A work either has completed or is removed from pending queue,
|
* A work either has completed or is removed from pending queue,
|
||||||
* decrement nr_in_flight of its cwq and handle workqueue flushing.
|
* decrement nr_in_flight of its cwq and handle workqueue flushing.
|
||||||
|
@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
||||||
* CONTEXT:
|
* CONTEXT:
|
||||||
* spin_lock_irq(gcwq->lock).
|
* spin_lock_irq(gcwq->lock).
|
||||||
*/
|
*/
|
||||||
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
|
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
|
||||||
|
bool delayed)
|
||||||
{
|
{
|
||||||
/* ignore uncolored works */
|
/* ignore uncolored works */
|
||||||
if (color == WORK_NO_COLOR)
|
if (color == WORK_NO_COLOR)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cwq->nr_in_flight[color]--;
|
cwq->nr_in_flight[color]--;
|
||||||
cwq->nr_active--;
|
|
||||||
|
|
||||||
if (!list_empty(&cwq->delayed_works)) {
|
if (!delayed) {
|
||||||
/* one down, submit a delayed one */
|
cwq->nr_active--;
|
||||||
if (cwq->nr_active < cwq->max_active)
|
if (!list_empty(&cwq->delayed_works)) {
|
||||||
cwq_activate_first_delayed(cwq);
|
/* one down, submit a delayed one */
|
||||||
|
if (cwq->nr_active < cwq->max_active)
|
||||||
|
cwq_activate_first_delayed(cwq);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* is flush in progress and are we at the flushing tip? */
|
/* is flush in progress and are we at the flushing tip? */
|
||||||
|
@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
|
||||||
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
|
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
|
||||||
*/
|
*/
|
||||||
static void process_one_work(struct worker *worker, struct work_struct *work)
|
static void process_one_work(struct worker *worker, struct work_struct *work)
|
||||||
|
__releases(&gcwq->lock)
|
||||||
|
__acquires(&gcwq->lock)
|
||||||
{
|
{
|
||||||
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
|
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
|
||||||
struct global_cwq *gcwq = cwq->gcwq;
|
struct global_cwq *gcwq = cwq->gcwq;
|
||||||
|
@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
|
||||||
hlist_del_init(&worker->hentry);
|
hlist_del_init(&worker->hentry);
|
||||||
worker->current_work = NULL;
|
worker->current_work = NULL;
|
||||||
worker->current_cwq = NULL;
|
worker->current_cwq = NULL;
|
||||||
cwq_dec_nr_in_flight(cwq, work_color);
|
cwq_dec_nr_in_flight(cwq, work_color, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
|
||||||
debug_work_deactivate(work);
|
debug_work_deactivate(work);
|
||||||
list_del_init(&work->entry);
|
list_del_init(&work->entry);
|
||||||
cwq_dec_nr_in_flight(get_work_cwq(work),
|
cwq_dec_nr_in_flight(get_work_cwq(work),
|
||||||
get_work_color(work));
|
get_work_color(work),
|
||||||
|
*work_data_bits(work) & WORK_STRUCT_DELAYED);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
|
||||||
if (IS_ERR(rescuer->task))
|
if (IS_ERR(rescuer->task))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
wq->rescuer = rescuer;
|
|
||||||
rescuer->task->flags |= PF_THREAD_BOUND;
|
rescuer->task->flags |= PF_THREAD_BOUND;
|
||||||
wake_up_process(rescuer->task);
|
wake_up_process(rescuer->task);
|
||||||
}
|
}
|
||||||
|
@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
|
wq->flags |= WQ_DYING;
|
||||||
flush_workqueue(wq);
|
flush_workqueue(wq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
||||||
if (wq->flags & WQ_RESCUER) {
|
if (wq->flags & WQ_RESCUER) {
|
||||||
kthread_stop(wq->rescuer->task);
|
kthread_stop(wq->rescuer->task);
|
||||||
free_mayday_mask(wq->mayday_mask);
|
free_mayday_mask(wq->mayday_mask);
|
||||||
|
kfree(wq->rescuer);
|
||||||
}
|
}
|
||||||
|
|
||||||
free_cwqs(wq);
|
free_cwqs(wq);
|
||||||
|
@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
|
||||||
* multiple times. To be used by cpu_callback.
|
* multiple times. To be used by cpu_callback.
|
||||||
*/
|
*/
|
||||||
static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
|
static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
|
||||||
|
__releases(&gcwq->lock)
|
||||||
|
__acquires(&gcwq->lock)
|
||||||
{
|
{
|
||||||
if (!(gcwq->trustee_state == state ||
|
if (!(gcwq->trustee_state == state ||
|
||||||
gcwq->trustee_state == TRUSTEE_DONE)) {
|
gcwq->trustee_state == TRUSTEE_DONE)) {
|
||||||
|
@ -3545,8 +3567,7 @@ static int __init init_workqueues(void)
|
||||||
spin_lock_init(&gcwq->lock);
|
spin_lock_init(&gcwq->lock);
|
||||||
INIT_LIST_HEAD(&gcwq->worklist);
|
INIT_LIST_HEAD(&gcwq->worklist);
|
||||||
gcwq->cpu = cpu;
|
gcwq->cpu = cpu;
|
||||||
if (cpu == WORK_CPU_UNBOUND)
|
gcwq->flags |= GCWQ_DISASSOCIATED;
|
||||||
gcwq->flags |= GCWQ_DISASSOCIATED;
|
|
||||||
|
|
||||||
INIT_LIST_HEAD(&gcwq->idle_list);
|
INIT_LIST_HEAD(&gcwq->idle_list);
|
||||||
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
|
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
|
||||||
|
@ -3570,6 +3591,8 @@ static int __init init_workqueues(void)
|
||||||
struct global_cwq *gcwq = get_gcwq(cpu);
|
struct global_cwq *gcwq = get_gcwq(cpu);
|
||||||
struct worker *worker;
|
struct worker *worker;
|
||||||
|
|
||||||
|
if (cpu != WORK_CPU_UNBOUND)
|
||||||
|
gcwq->flags &= ~GCWQ_DISASSOCIATED;
|
||||||
worker = create_worker(gcwq, true);
|
worker = create_worker(gcwq, true);
|
||||||
BUG_ON(!worker);
|
BUG_ON(!worker);
|
||||||
spin_lock_irq(&gcwq->lock);
|
spin_lock_irq(&gcwq->lock);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче