sched/fair: Update task group's load_avg after task migration
When cfs_rq has cfs_rq->removed_load_avg set (when a task migrates from this cfs_rq), we need to update its contribution to the group's load_avg. This should not increase tg's update too much, because in most cases, the cfs_rq has already decayed its load_avg. Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Yuyang Du <yuyang.du@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1444699103-20272-2-git-send-email-yuyang.du@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
fde7d22e01
Коммит
3e386d56ba
|
@ -2664,13 +2664,14 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
|
|||
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
|
||||
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
int decayed;
|
||||
struct sched_avg *sa = &cfs_rq->avg;
|
||||
int decayed, removed = 0;
|
||||
|
||||
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
|
||||
long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
|
||||
sa->load_avg = max_t(long, sa->load_avg - r, 0);
|
||||
sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
|
||||
removed = 1;
|
||||
}
|
||||
|
||||
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
|
||||
|
@ -2688,7 +2689,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|||
cfs_rq->load_last_update_time_copy = sa->last_update_time;
|
||||
#endif
|
||||
|
||||
return decayed;
|
||||
return decayed || removed;
|
||||
}
|
||||
|
||||
/* Update task and its cfs_rq load average */
|
||||
|
|
Загрузка…
Ссылка в новой задаче