sched/fair: Fixes for capacity inversion detection
Traversing the Perf Domains requires rcu_read_lock() to be held and is
conditional on sched_energy_enabled(). Ensure right protections applied.
Also skip capacity inversion detection for our own pd; which was an
error.
Fixes: 44c7b80bff
("sched/fair: Detect capacity inversion")
Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Qais Yousef (Google) <qyousef@layalina.io>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20230112122708.330667-3-qyousef@layalina.io
This commit is contained in:
Родитель
e26fd28db8
Коммит
da07d2f9c1
|
@ -8868,16 +8868,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
|||
* * Thermal pressure will impact all cpus in this perf domain
|
||||
* equally.
|
||||
*/
|
||||
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
|
||||
if (sched_energy_enabled()) {
|
||||
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
|
||||
struct perf_domain *pd = rcu_dereference(rq->rd->pd);
|
||||
struct perf_domain *pd;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
pd = rcu_dereference(rq->rd->pd);
|
||||
rq->cpu_capacity_inverted = 0;
|
||||
|
||||
for (; pd; pd = pd->next) {
|
||||
struct cpumask *pd_span = perf_domain_span(pd);
|
||||
unsigned long pd_cap_orig, pd_cap;
|
||||
|
||||
/* We can't be inverted against our own pd */
|
||||
if (cpumask_test_cpu(cpu_of(rq), pd_span))
|
||||
continue;
|
||||
|
||||
cpu = cpumask_any(pd_span);
|
||||
pd_cap_orig = arch_scale_cpu_capacity(cpu);
|
||||
|
||||
|
@ -8902,6 +8909,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
trace_sched_cpu_capacity_tp(rq);
|
||||
|
|
Загрузка…
Ссылка в новой задаче