sched: fix load average accounting vs. cpu hotplug
The new load average code clears rq->calc_load_active on CPU_ONLINE. That's wrong as the new onlined CPU might have got a scheduler tick already and accounted the delta to the stale value of the time we offlined the CPU. Clear the value when we cleanup the dead CPU instead. Also move the update of the calc_load_update time for the newly online CPU to CPU_UP_PREPARE to avoid that the CPU plays catch up with the stale update time value. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Родитель
54fdc58166
Коммит
a468d38934
|
@ -7289,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
|
|||
static void calc_global_load_remove(struct rq *rq)
|
||||
{
|
||||
atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
|
||||
rq->calc_load_active = 0;
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
|
@ -7515,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||
task_rq_unlock(rq, &flags);
|
||||
get_task_struct(p);
|
||||
cpu_rq(cpu)->migration_thread = p;
|
||||
rq->calc_load_update = calc_load_update;
|
||||
break;
|
||||
|
||||
case CPU_ONLINE:
|
||||
|
@ -7525,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|||
/* Update our root-domain */
|
||||
rq = cpu_rq(cpu);
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
rq->calc_load_update = calc_load_update;
|
||||
rq->calc_load_active = 0;
|
||||
if (rq->rd) {
|
||||
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче