workqueue: reimplement cancel_delayed_work() using try_to_grab_pending()
cancel_delayed_work() can't be called from IRQ handlers due to its use of del_timer_sync() and can't cancel work items which are already transferred from timer to worklist. Also, unlike other flush and cancel functions, a canceled delayed_work would still point to the last associated cpu_workqueue. If the workqueue is destroyed afterwards and the work item is re-used on a different workqueue, the queueing code can oops trying to dereference already freed cpu_workqueue. This patch reimplements cancel_delayed_work() using try_to_grab_pending() and set_work_cpu_and_clear_pending(). This allows the function to be called from IRQ handlers and makes its behavior consistent with other flush / cancel functions. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Родитель
e7c2f96744
Коммит
57b30ae77b
|
@ -420,6 +420,7 @@ extern bool flush_work(struct work_struct *work);
|
|||
extern bool cancel_work_sync(struct work_struct *work);
|
||||
|
||||
extern bool flush_delayed_work(struct delayed_work *dwork);
|
||||
extern bool cancel_delayed_work(struct delayed_work *dwork);
|
||||
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
|
||||
|
||||
extern void workqueue_set_max_active(struct workqueue_struct *wq,
|
||||
|
@ -428,22 +429,6 @@ extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
|
|||
extern unsigned int work_cpu(struct work_struct *work);
|
||||
extern unsigned int work_busy(struct work_struct *work);
|
||||
|
||||
/*
|
||||
* Kill off a pending schedule_delayed_work(). Note that the work callback
|
||||
* function may still be running on return from cancel_delayed_work(), unless
|
||||
* it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
|
||||
* cancel_work_sync() to wait on it.
|
||||
*/
|
||||
static inline bool cancel_delayed_work(struct delayed_work *work)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
ret = del_timer_sync(&work->timer);
|
||||
if (ret)
|
||||
work_clear_pending(&work->work);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like above, but uses del_timer() instead of del_timer_sync(). This means,
|
||||
* if it returns 0 the timer function may be running and the queueing is in
|
||||
|
|
|
@ -2948,6 +2948,36 @@ bool flush_delayed_work(struct delayed_work *dwork)
|
|||
}
|
||||
EXPORT_SYMBOL(flush_delayed_work);
|
||||
|
||||
/**
|
||||
* cancel_delayed_work - cancel a delayed work
|
||||
* @dwork: delayed_work to cancel
|
||||
*
|
||||
* Kill off a pending delayed_work. Returns %true if @dwork was pending
|
||||
* and canceled; %false if wasn't pending. Note that the work callback
|
||||
* function may still be running on return, unless it returns %true and the
|
||||
* work doesn't re-arm itself. Explicitly flush or use
|
||||
* cancel_delayed_work_sync() to wait on it.
|
||||
*
|
||||
* This function is safe to call from any context including IRQ handler.
|
||||
*/
|
||||
bool cancel_delayed_work(struct delayed_work *dwork)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = try_to_grab_pending(&dwork->work, true, &flags);
|
||||
} while (unlikely(ret == -EAGAIN));
|
||||
|
||||
if (unlikely(ret < 0))
|
||||
return false;
|
||||
|
||||
set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
|
||||
local_irq_restore(flags);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(cancel_delayed_work);
|
||||
|
||||
/**
|
||||
* cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
|
||||
* @dwork: the delayed work cancel
|
||||
|
|
Загрузка…
Ссылка в новой задаче