sched/core: Fix update_rq_clock() splat on hotplug (and suspend/resume)

The hotplug code still triggers the warning about using a stale
rq->clock value.

Fix things up to actually run update_rq_clock() in a place where we
record the 'UPDATED' flag, and then modify the annotation to retain
this flag over the rq->lock fiddling that happens as a result of
actually migrating all the tasks elsewhere.

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Tested-by: Mike Galbraith <efault@gmx.de>
Tested-by: Sachin Sant <sachinp@linux.vnet.ibm.com>
Tested-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ross Zwisler <zwisler@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: 4d25b35ea3 ("sched/fair: Restore previous rq_flags when migrating tasks in hotplug")
Link: http://lkml.kernel.org/r/20170202155506.GX6515@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2017-02-02 16:55:06 +01:00 коммит произвёл Ingo Molnar
Родитель 2bfe01eff4
Коммит 8cb68b343a
1 изменённых файлов: 4 добавлений и 10 удалений

Просмотреть файл

@ -5557,7 +5557,7 @@ static void migrate_tasks(struct rq *dead_rq)
{ {
struct rq *rq = dead_rq; struct rq *rq = dead_rq;
struct task_struct *next, *stop = rq->stop; struct task_struct *next, *stop = rq->stop;
struct rq_flags rf, old_rf; struct rq_flags rf;
int dest_cpu; int dest_cpu;
/* /*
@ -5576,7 +5576,9 @@ static void migrate_tasks(struct rq *dead_rq)
* class method both need to have an up-to-date * class method both need to have an up-to-date
* value of rq->clock[_task] * value of rq->clock[_task]
*/ */
rq_pin_lock(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
rq_unpin_lock(rq, &rf);
for (;;) { for (;;) {
/* /*
@ -5589,7 +5591,7 @@ static void migrate_tasks(struct rq *dead_rq)
/* /*
* pick_next_task() assumes pinned rq->lock: * pick_next_task() assumes pinned rq->lock:
*/ */
rq_pin_lock(rq, &rf); rq_repin_lock(rq, &rf);
next = pick_next_task(rq, &fake_task, &rf); next = pick_next_task(rq, &fake_task, &rf);
BUG_ON(!next); BUG_ON(!next);
next->sched_class->put_prev_task(rq, next); next->sched_class->put_prev_task(rq, next);
@ -5618,13 +5620,6 @@ static void migrate_tasks(struct rq *dead_rq)
continue; continue;
} }
/*
* __migrate_task() may return with a different
* rq->lock held and a new cookie in 'rf', but we need
* to preserve rf::clock_update_flags for 'dead_rq'.
*/
old_rf = rf;
/* Find suitable destination for @next, with force if needed. */ /* Find suitable destination for @next, with force if needed. */
dest_cpu = select_fallback_rq(dead_rq->cpu, next); dest_cpu = select_fallback_rq(dead_rq->cpu, next);
@ -5633,7 +5628,6 @@ static void migrate_tasks(struct rq *dead_rq)
raw_spin_unlock(&rq->lock); raw_spin_unlock(&rq->lock);
rq = dead_rq; rq = dead_rq;
raw_spin_lock(&rq->lock); raw_spin_lock(&rq->lock);
rf = old_rf;
} }
raw_spin_unlock(&next->pi_lock); raw_spin_unlock(&next->pi_lock);
} }