sched/pelt: Relax the sync of runnable_sum with runnable_avg
Similarly to util_avg and util_sum, don't sync runnable_sum with the low bound of runnable_avg but only ensure that runnable_sum stays in the correct range. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: Sachin Sant <sachinp@linux.ibm.com> Link: https://lkml.kernel.org/r/20220111134659.24961-4-vincent.guittot@linaro.org
This commit is contained in:
Родитель
7ceb771030
Коммит
95246d1ec8
|
@ -3483,11 +3483,11 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
|
|||
static inline void
|
||||
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
|
||||
{
|
||||
long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
|
||||
u32 divider;
|
||||
long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
|
||||
u32 new_sum, divider;
|
||||
|
||||
/* Nothing to update */
|
||||
if (!delta)
|
||||
if (!delta_avg)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -3498,11 +3498,16 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
|
|||
|
||||
/* Set new sched_entity's runnable */
|
||||
se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
|
||||
se->avg.runnable_sum = se->avg.runnable_avg * divider;
|
||||
new_sum = se->avg.runnable_avg * divider;
|
||||
delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
|
||||
se->avg.runnable_sum = new_sum;
|
||||
|
||||
/* Update parent cfs_rq runnable */
|
||||
add_positive(&cfs_rq->avg.runnable_avg, delta);
|
||||
cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
|
||||
add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
|
||||
add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
|
||||
/* See update_cfs_rq_load_avg() */
|
||||
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
|
||||
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -3702,7 +3707,10 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|||
|
||||
r = removed_runnable;
|
||||
sub_positive(&sa->runnable_avg, r);
|
||||
sa->runnable_sum = sa->runnable_avg * divider;
|
||||
sub_positive(&sa->runnable_sum, r * divider);
|
||||
/* See sa->util_sum above */
|
||||
sa->runnable_sum = max_t(u32, sa->runnable_sum,
|
||||
sa->runnable_avg * PELT_MIN_DIVIDER);
|
||||
|
||||
/*
|
||||
* removed_runnable is the unweighted version of removed_load so we
|
||||
|
@ -3789,12 +3797,6 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
|||
*/
|
||||
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
/*
|
||||
* cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
|
||||
* See ___update_load_avg() for details.
|
||||
*/
|
||||
u32 divider = get_pelt_divider(&cfs_rq->avg);
|
||||
|
||||
dequeue_load_avg(cfs_rq, se);
|
||||
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
|
||||
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
|
||||
|
@ -3803,7 +3805,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
|||
cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
|
||||
|
||||
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
|
||||
cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
|
||||
sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
|
||||
/* See update_cfs_rq_load_avg() */
|
||||
cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
|
||||
cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
|
||||
|
||||
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче