sched/fair: Remove redundant call to cpufreq_update_util()
With commit
bef69dd878
("sched/cpufreq: Move the cfs_rq_util_change() call to cpufreq_update_util()")
update_load_avg() has become the central point for calling cpufreq
(not including the update of blocked load). This change helps to
simplify further the number of calls to cpufreq_update_util() and to
remove last redundant ones. With update_load_avg(), we are now sure
that cpufreq_update_util() will be called after every task attachment
to a cfs_rq and especially after propagating this event down to the
util_avg of the root cfs_rq, which is the level that is used by
cpufreq governors like schedutil to set the frequency of a CPU.
The SCHED_CPUFREQ_MIGRATION flag forces an early call to cpufreq when
the migration happens in a cgroup whereas util_avg of root cfs_rq is
not yet updated and this call is duplicated with the one that happens
immediately after when the migration event reaches the root cfs_rq.
The dedicated flag SCHED_CPUFREQ_MIGRATION is now useless and can be
removed. The interface of attach_entity_load_avg() can also be
simplified accordingly.
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lkml.kernel.org/r/1579083620-24943-1-git-send-email-vincent.guittot@linaro.org
This commit is contained in:
Родитель
3d817689a6
Коммит
a4f9a0e51b
|
@ -9,7 +9,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define SCHED_CPUFREQ_IOWAIT (1U << 0)
|
#define SCHED_CPUFREQ_IOWAIT (1U << 0)
|
||||||
#define SCHED_CPUFREQ_MIGRATION (1U << 1)
|
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_FREQ
|
#ifdef CONFIG_CPU_FREQ
|
||||||
struct cpufreq_policy;
|
struct cpufreq_policy;
|
||||||
|
|
|
@ -801,7 +801,7 @@ void post_init_entity_util_avg(struct task_struct *p)
|
||||||
* For !fair tasks do:
|
* For !fair tasks do:
|
||||||
*
|
*
|
||||||
update_cfs_rq_load_avg(now, cfs_rq);
|
update_cfs_rq_load_avg(now, cfs_rq);
|
||||||
attach_entity_load_avg(cfs_rq, se, 0);
|
attach_entity_load_avg(cfs_rq, se);
|
||||||
switched_from_fair(rq, p);
|
switched_from_fair(rq, p);
|
||||||
*
|
*
|
||||||
* such that the next switched_to_fair() has the
|
* such that the next switched_to_fair() has the
|
||||||
|
@ -3114,7 +3114,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
|
||||||
{
|
{
|
||||||
struct rq *rq = rq_of(cfs_rq);
|
struct rq *rq = rq_of(cfs_rq);
|
||||||
|
|
||||||
if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
|
if (&rq->cfs == cfs_rq) {
|
||||||
/*
|
/*
|
||||||
* There are a few boundary cases this might miss but it should
|
* There are a few boundary cases this might miss but it should
|
||||||
* get called often enough that that should (hopefully) not be
|
* get called often enough that that should (hopefully) not be
|
||||||
|
@ -3521,7 +3521,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||||
* Must call update_cfs_rq_load_avg() before this, since we rely on
|
* Must call update_cfs_rq_load_avg() before this, since we rely on
|
||||||
* cfs_rq->avg.last_update_time being current.
|
* cfs_rq->avg.last_update_time being current.
|
||||||
*/
|
*/
|
||||||
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
{
|
{
|
||||||
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
|
u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
|
||||||
|
|
||||||
|
@ -3557,7 +3557,7 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||||
|
|
||||||
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
|
add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
|
||||||
|
|
||||||
cfs_rq_util_change(cfs_rq, flags);
|
cfs_rq_util_change(cfs_rq, 0);
|
||||||
|
|
||||||
trace_pelt_cfs_tp(cfs_rq);
|
trace_pelt_cfs_tp(cfs_rq);
|
||||||
}
|
}
|
||||||
|
@ -3615,7 +3615,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||||
*
|
*
|
||||||
* IOW we're enqueueing a task on a new CPU.
|
* IOW we're enqueueing a task on a new CPU.
|
||||||
*/
|
*/
|
||||||
attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
|
attach_entity_load_avg(cfs_rq, se);
|
||||||
update_tg_load_avg(cfs_rq, 0);
|
update_tg_load_avg(cfs_rq, 0);
|
||||||
|
|
||||||
} else if (decayed) {
|
} else if (decayed) {
|
||||||
|
@ -3872,7 +3872,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
||||||
static inline void remove_entity_load_avg(struct sched_entity *se) {}
|
static inline void remove_entity_load_avg(struct sched_entity *se) {}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
|
attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
||||||
static inline void
|
static inline void
|
||||||
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
|
||||||
|
|
||||||
|
@ -10436,7 +10436,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
|
||||||
|
|
||||||
/* Synchronize entity with its cfs_rq */
|
/* Synchronize entity with its cfs_rq */
|
||||||
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
|
update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
|
||||||
attach_entity_load_avg(cfs_rq, se, 0);
|
attach_entity_load_avg(cfs_rq, se);
|
||||||
update_tg_load_avg(cfs_rq, false);
|
update_tg_load_avg(cfs_rq, false);
|
||||||
propagate_entity_cfs_rq(se);
|
propagate_entity_cfs_rq(se);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче