sched/fair: Fix reschedule which is generated on throttled cfs_rq
(sched_entity::on_rq == 1) does not guarantee the task is pickable; changes on throttled cfs_rq must not lead to reschedule. Check for task_struct::on_rq instead. Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1407312361.8424.35.camel@tkhai Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
8b06c55bdb
Коммит
f36c019c79
|
@ -7494,7 +7494,7 @@ static void task_fork_fair(struct task_struct *p)
|
|||
static void
|
||||
prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
{
|
||||
if (!p->se.on_rq)
|
||||
if (!p->on_rq)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -7550,15 +7550,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
|
|||
*/
|
||||
static void switched_to_fair(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
struct sched_entity *se = &p->se;
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct sched_entity *se = &p->se;
|
||||
/*
|
||||
* Since the real-depth could have been changed (only FAIR
|
||||
* class maintain depth value), reset depth properly.
|
||||
*/
|
||||
se->depth = se->parent ? se->parent->depth + 1 : 0;
|
||||
#endif
|
||||
if (!se->on_rq)
|
||||
if (!p->on_rq)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче