sched: Fix rq->nr_uninterruptible update race
KOSAKI Motohiro noticed the following race: > CPU0 CPU1 > -------------------------------------------------------- > deactivate_task() > task->state = TASK_UNINTERRUPTIBLE; > activate_task() > rq->nr_uninterruptible--; > > schedule() > deactivate_task() > rq->nr_uninterruptible++; > Kosaki-San's scenario is possible when CPU0 runs __sched_setscheduler() against CPU1's current @task. __sched_setscheduler() does a dequeue/enqueue in order to move the task to its new queue (position) to reflect the newly provided scheduling parameters. However it should be completely invariant to nr_uninterruptible accounting, sched_setscheduler() doesn't affect readyness to run, merely policy on when to run. So convert the inappropriate activate/deactivate_task usage to enqueue/dequeue_task, which avoids the nr_uninterruptible accounting. Also convert the two other sites: __migrate_task() and normalize_task() that still use activate/deactivate_task. These sites aren't really a problem since __migrate_task() will only be called on non-running task (and therefore are immume to the described problem) and normalize_task() isn't ever used on regular systems. Also remove the comments from activate/deactivate_task since they're misleading at best. Reported-by: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1327486224.2614.45.camel@laptop Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
87f71ae2dd
Коммит
4ca9b72b71
|
@ -723,9 +723,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
|||
p->sched_class->dequeue_task(rq, p, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* activate_task - move a task to the runqueue.
|
||||
*/
|
||||
void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
if (task_contributes_to_load(p))
|
||||
|
@ -734,9 +731,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
|||
enqueue_task(rq, p, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* deactivate_task - remove a task from the runqueue.
|
||||
*/
|
||||
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
if (task_contributes_to_load(p))
|
||||
|
@ -4134,7 +4128,7 @@ recheck:
|
|||
on_rq = p->on_rq;
|
||||
running = task_current(rq, p);
|
||||
if (on_rq)
|
||||
deactivate_task(rq, p, 0);
|
||||
dequeue_task(rq, p, 0);
|
||||
if (running)
|
||||
p->sched_class->put_prev_task(rq, p);
|
||||
|
||||
|
@ -4147,7 +4141,7 @@ recheck:
|
|||
if (running)
|
||||
p->sched_class->set_curr_task(rq);
|
||||
if (on_rq)
|
||||
activate_task(rq, p, 0);
|
||||
enqueue_task(rq, p, 0);
|
||||
|
||||
check_class_changed(rq, p, prev_class, oldprio);
|
||||
task_rq_unlock(rq, p, &flags);
|
||||
|
@ -4998,9 +4992,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
|||
* placed properly.
|
||||
*/
|
||||
if (p->on_rq) {
|
||||
deactivate_task(rq_src, p, 0);
|
||||
dequeue_task(rq_src, p, 0);
|
||||
set_task_cpu(p, dest_cpu);
|
||||
activate_task(rq_dest, p, 0);
|
||||
enqueue_task(rq_dest, p, 0);
|
||||
check_preempt_curr(rq_dest, p, 0);
|
||||
}
|
||||
done:
|
||||
|
@ -7032,10 +7026,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
|
|||
|
||||
on_rq = p->on_rq;
|
||||
if (on_rq)
|
||||
deactivate_task(rq, p, 0);
|
||||
dequeue_task(rq, p, 0);
|
||||
__setscheduler(rq, p, SCHED_NORMAL, 0);
|
||||
if (on_rq) {
|
||||
activate_task(rq, p, 0);
|
||||
enqueue_task(rq, p, 0);
|
||||
resched_task(rq->curr);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче