irq/work: Use llist_for_each_entry_safe
The llist_for_each_entry() loop in irq_work_run_list() is unsafe because
once the works PENDING bit is cleared it can be requeued on another CPU.
Use llist_for_each_entry_safe() instead.
Fixes: 16c0890dc6
("irq/work: Don't reinvent the wheel but use existing llist API")
Reported-by:Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frederic Weisbecker <frederic@kernel.org>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Petri Latvala <petri.latvala@intel.com>
Link: http://lkml.kernel.org/r/151027307351.14762.4611888896020658384@mail.alporthouse.com
This commit is contained in:
Родитель
9dc505d6df
Коммит
d00a08cf9e
|
@ -128,9 +128,9 @@ bool irq_work_needs_cpu(void)
|
|||
|
||||
static void irq_work_run_list(struct llist_head *list)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_work *work;
|
||||
struct irq_work *work, *tmp;
|
||||
struct llist_node *llnode;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
|
@ -138,7 +138,7 @@ static void irq_work_run_list(struct llist_head *list)
|
|||
return;
|
||||
|
||||
llnode = llist_del_all(list);
|
||||
llist_for_each_entry(work, llnode, llnode) {
|
||||
llist_for_each_entry_safe(work, tmp, llnode, llnode) {
|
||||
/*
|
||||
* Clear the PENDING bit, after this point the @work
|
||||
* can be re-used.
|
||||
|
|
Загрузка…
Ссылка в новой задаче