sched/fair: Fix the wrong throttled clock time for cfs_rq_clock_task()
Two minor fixes for cfs_rq_clock_task(): 1) If cfs_rq is currently being throttled, we need to subtract the cfs throttled clock time. 2) Make "throttled_clock_task_time" update SMP unrelated. Now UP cases need it as well. Signed-off-by: Xunlei Pang <xlpang@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Juri Lelli <juri.lelli@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1462885398-14724-1-git-send-email-xlpang@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
719af93ab7
Коммит
1a99ae3f00
|
@ -3688,7 +3688,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
|
|||
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
if (unlikely(cfs_rq->throttle_count))
|
||||
return cfs_rq->throttled_clock_task;
|
||||
return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
|
||||
|
||||
return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
|
||||
}
|
||||
|
@ -3826,13 +3826,11 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
|
|||
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
|
||||
|
||||
cfs_rq->throttle_count--;
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cfs_rq->throttle_count) {
|
||||
/* adjust cfs_rq_clock_task() */
|
||||
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
|
||||
cfs_rq->throttled_clock_task;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче