sched/numa: Don't scale the imbalance
It's far too easy to get ridiculously large imbalance pct when you scale it like that. Use a fixed 125% for now. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-zsriaft1dv7hhboyrpvqjy6s@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
04f733b4af
Коммит
870a0bb5d6
|
@ -6261,11 +6261,6 @@ static int *sched_domains_numa_distance;
|
||||||
static struct cpumask ***sched_domains_numa_masks;
|
static struct cpumask ***sched_domains_numa_masks;
|
||||||
static int sched_domains_curr_level;
|
static int sched_domains_curr_level;
|
||||||
|
|
||||||
static inline unsigned long numa_scale(unsigned long x, int level)
|
|
||||||
{
|
|
||||||
return x * sched_domains_numa_distance[level] / sched_domains_numa_scale;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int sd_local_flags(int level)
|
static inline int sd_local_flags(int level)
|
||||||
{
|
{
|
||||||
if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
|
if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
|
||||||
|
@ -6286,7 +6281,7 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
|
||||||
.min_interval = sd_weight,
|
.min_interval = sd_weight,
|
||||||
.max_interval = 2*sd_weight,
|
.max_interval = 2*sd_weight,
|
||||||
.busy_factor = 32,
|
.busy_factor = 32,
|
||||||
.imbalance_pct = 100 + numa_scale(25, level),
|
.imbalance_pct = 125,
|
||||||
.cache_nice_tries = 2,
|
.cache_nice_tries = 2,
|
||||||
.busy_idx = 3,
|
.busy_idx = 3,
|
||||||
.idle_idx = 2,
|
.idle_idx = 2,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче