flush_cpu_workqueue: don't flush an empty ->worklist

Now when we have ->current_work we can avoid adding a barrier and waiting
for its completition when cwq's queue is empty.

Note: this change is also useful if we change flush_workqueue() to also
check the dead CPUs.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Gautham Shenoy <ego@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2007-05-09 02:33:54 -07:00 коммит произвёл Linus Torvalds
Родитель edab2516a6
Коммит 83c22520c5
1 изменённых файлов: 17 добавлений и 8 удалений

Просмотреть файл

@ -404,12 +404,15 @@ static void wq_barrier_func(struct work_struct *work)
complete(&barr->done); complete(&barr->done);
} }
static inline void init_wq_barrier(struct wq_barrier *barr) static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
struct wq_barrier *barr, int tail)
{ {
INIT_WORK(&barr->work, wq_barrier_func); INIT_WORK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
init_completion(&barr->done); init_completion(&barr->done);
insert_work(cwq, &barr->work, tail);
} }
static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
@ -428,15 +431,22 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
preempt_disable(); preempt_disable();
} else { } else {
struct wq_barrier barr; struct wq_barrier barr;
int active = 0;
init_wq_barrier(&barr); spin_lock_irq(&cwq->lock);
__queue_work(cwq, &barr.work); if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
insert_wq_barrier(cwq, &barr, 1);
active = 1;
}
spin_unlock_irq(&cwq->lock);
preempt_enable(); /* Can no longer touch *cwq */ if (active) {
preempt_enable();
wait_for_completion(&barr.done); wait_for_completion(&barr.done);
preempt_disable(); preempt_disable();
} }
} }
}
/** /**
* flush_workqueue - ensure that any scheduled work has run to completion. * flush_workqueue - ensure that any scheduled work has run to completion.
@ -475,8 +485,7 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,
spin_lock_irq(&cwq->lock); spin_lock_irq(&cwq->lock);
if (unlikely(cwq->current_work == work)) { if (unlikely(cwq->current_work == work)) {
init_wq_barrier(&barr); insert_wq_barrier(cwq, &barr, 0);
insert_work(cwq, &barr.work, 0);
running = 1; running = 1;
} }
spin_unlock_irq(&cwq->lock); spin_unlock_irq(&cwq->lock);