sched: Implement hierarchical task accounting for SCHED_OTHER
Introduce hierarchical task accounting for the group scheduling case in CFS, as well as promoting the responsibility for maintaining rq->nr_running to the scheduling classes. The primary motivation for this is that with scheduling classes supporting bandwidth throttling it is possible for entities participating in throttled sub-trees to not have root visible changes in rq->nr_running across activate and de-activate operations. This in turn leads to incorrect idle and weight-per-task load balance decisions. This also allows us to make a small fixlet to the fastpath in pick_next_task() under group scheduling. Note: this issue also exists with the existing sched_rt throttling mechanism. This patch does not address that. Signed-off-by: Paul Turner <pjt@google.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184756.878333391@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
5710f15b52
Коммит
953bfcd10e
|
@ -311,7 +311,7 @@ struct task_group root_task_group;
|
|||
/* CFS-related fields in a runqueue */
|
||||
struct cfs_rq {
|
||||
struct load_weight load;
|
||||
unsigned long nr_running;
|
||||
unsigned long nr_running, h_nr_running;
|
||||
|
||||
u64 exec_clock;
|
||||
u64 min_vruntime;
|
||||
|
@ -1802,7 +1802,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
|||
rq->nr_uninterruptible--;
|
||||
|
||||
enqueue_task(rq, p, flags);
|
||||
inc_nr_running(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1814,7 +1813,6 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
|
|||
rq->nr_uninterruptible++;
|
||||
|
||||
dequeue_task(rq, p, flags);
|
||||
dec_nr_running(rq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
|
@ -4258,7 +4256,7 @@ pick_next_task(struct rq *rq)
|
|||
* Optimization: we know that if all tasks are in
|
||||
* the fair class we can call that function directly:
|
||||
*/
|
||||
if (likely(rq->nr_running == rq->cfs.nr_running)) {
|
||||
if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
|
||||
p = fair_sched_class.pick_next_task(rq);
|
||||
if (likely(p))
|
||||
return p;
|
||||
|
|
|
@ -1310,16 +1310,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
break;
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
enqueue_entity(cfs_rq, se, flags);
|
||||
cfs_rq->h_nr_running++;
|
||||
flags = ENQUEUE_WAKEUP;
|
||||
}
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
cfs_rq->h_nr_running++;
|
||||
|
||||
update_cfs_load(cfs_rq, 0);
|
||||
update_cfs_shares(cfs_rq);
|
||||
}
|
||||
|
||||
inc_nr_running(rq);
|
||||
hrtick_update(rq);
|
||||
}
|
||||
|
||||
|
@ -1339,6 +1342,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
dequeue_entity(cfs_rq, se, flags);
|
||||
cfs_rq->h_nr_running--;
|
||||
|
||||
/* Don't dequeue parent if it has other entities besides us */
|
||||
if (cfs_rq->load.weight) {
|
||||
|
@ -1358,11 +1362,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
|
||||
for_each_sched_entity(se) {
|
||||
cfs_rq = cfs_rq_of(se);
|
||||
cfs_rq->h_nr_running--;
|
||||
|
||||
update_cfs_load(cfs_rq, 0);
|
||||
update_cfs_shares(cfs_rq);
|
||||
}
|
||||
|
||||
dec_nr_running(rq);
|
||||
hrtick_update(rq);
|
||||
}
|
||||
|
||||
|
|
|
@ -936,6 +936,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|||
|
||||
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
|
||||
inc_nr_running(rq);
|
||||
}
|
||||
|
||||
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
@ -946,6 +948,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|||
dequeue_rt_entity(rt_se);
|
||||
|
||||
dequeue_pushable_task(rq, p);
|
||||
|
||||
dec_nr_running(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1841,4 +1845,3 @@ static void print_rt_stats(struct seq_file *m, int cpu)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
#endif /* CONFIG_SCHED_DEBUG */
|
||||
|
||||
|
|
|
@ -34,11 +34,13 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
|
|||
static void
|
||||
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
inc_nr_running(rq);
|
||||
}
|
||||
|
||||
static void
|
||||
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
dec_nr_running(rq);
|
||||
}
|
||||
|
||||
static void yield_task_stop(struct rq *rq)
|
||||
|
|
Загрузка…
Ссылка в новой задаче