Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar. * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Fix OOPS when build_sched_domains() percpu allocation fails sched: Fix more load-balancing fallout
This commit is contained in:
Коммит
daae677f56
|
@ -6405,16 +6405,26 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
||||||
struct sd_data *sdd = &tl->data;
|
struct sd_data *sdd = &tl->data;
|
||||||
|
|
||||||
for_each_cpu(j, cpu_map) {
|
for_each_cpu(j, cpu_map) {
|
||||||
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
|
struct sched_domain *sd;
|
||||||
if (sd && (sd->flags & SD_OVERLAP))
|
|
||||||
free_sched_groups(sd->groups, 0);
|
if (sdd->sd) {
|
||||||
kfree(*per_cpu_ptr(sdd->sd, j));
|
sd = *per_cpu_ptr(sdd->sd, j);
|
||||||
kfree(*per_cpu_ptr(sdd->sg, j));
|
if (sd && (sd->flags & SD_OVERLAP))
|
||||||
kfree(*per_cpu_ptr(sdd->sgp, j));
|
free_sched_groups(sd->groups, 0);
|
||||||
|
kfree(*per_cpu_ptr(sdd->sd, j));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sdd->sg)
|
||||||
|
kfree(*per_cpu_ptr(sdd->sg, j));
|
||||||
|
if (sdd->sgp)
|
||||||
|
kfree(*per_cpu_ptr(sdd->sgp, j));
|
||||||
}
|
}
|
||||||
free_percpu(sdd->sd);
|
free_percpu(sdd->sd);
|
||||||
|
sdd->sd = NULL;
|
||||||
free_percpu(sdd->sg);
|
free_percpu(sdd->sg);
|
||||||
|
sdd->sg = NULL;
|
||||||
free_percpu(sdd->sgp);
|
free_percpu(sdd->sgp);
|
||||||
|
sdd->sgp = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
|
update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (entity_is_task(se))
|
if (entity_is_task(se))
|
||||||
list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
|
list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
|
||||||
#endif
|
#endif
|
||||||
cfs_rq->nr_running++;
|
cfs_rq->nr_running++;
|
||||||
}
|
}
|
||||||
|
@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env)
|
||||||
|
|
||||||
static unsigned long task_h_load(struct task_struct *p);
|
static unsigned long task_h_load(struct task_struct *p);
|
||||||
|
|
||||||
|
static const unsigned int sched_nr_migrate_break = 32;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* move_tasks tries to move up to load_move weighted load from busiest to
|
* move_tasks tries to move up to load_move weighted load from busiest to
|
||||||
* this_rq, as part of a balancing operation within domain "sd".
|
* this_rq, as part of a balancing operation within domain "sd".
|
||||||
|
@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env)
|
||||||
|
|
||||||
/* take a breather every nr_migrate tasks */
|
/* take a breather every nr_migrate tasks */
|
||||||
if (env->loop > env->loop_break) {
|
if (env->loop > env->loop_break) {
|
||||||
env->loop_break += sysctl_sched_nr_migrate;
|
env->loop_break += sched_nr_migrate_break;
|
||||||
env->flags |= LBF_NEED_BREAK;
|
env->flags |= LBF_NEED_BREAK;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env)
|
||||||
|
|
||||||
load = task_h_load(p);
|
load = task_h_load(p);
|
||||||
|
|
||||||
if (load < 16 && !env->sd->nr_balance_failed)
|
if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
|
||||||
goto next;
|
goto next;
|
||||||
|
|
||||||
if ((load / 2) > env->load_move)
|
if ((load / 2) > env->load_move)
|
||||||
|
@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
||||||
.dst_cpu = this_cpu,
|
.dst_cpu = this_cpu,
|
||||||
.dst_rq = this_rq,
|
.dst_rq = this_rq,
|
||||||
.idle = idle,
|
.idle = idle,
|
||||||
.loop_break = sysctl_sched_nr_migrate,
|
.loop_break = sched_nr_migrate_break,
|
||||||
};
|
};
|
||||||
|
|
||||||
cpumask_copy(cpus, cpu_active_mask);
|
cpumask_copy(cpus, cpu_active_mask);
|
||||||
|
@ -4445,10 +4447,10 @@ redo:
|
||||||
* correctly treated as an imbalance.
|
* correctly treated as an imbalance.
|
||||||
*/
|
*/
|
||||||
env.flags |= LBF_ALL_PINNED;
|
env.flags |= LBF_ALL_PINNED;
|
||||||
env.load_move = imbalance;
|
env.load_move = imbalance;
|
||||||
env.src_cpu = busiest->cpu;
|
env.src_cpu = busiest->cpu;
|
||||||
env.src_rq = busiest;
|
env.src_rq = busiest;
|
||||||
env.loop_max = busiest->nr_running;
|
env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);
|
||||||
|
|
||||||
more_balance:
|
more_balance:
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
|
@ -68,3 +68,4 @@ SCHED_FEAT(TTWU_QUEUE, true)
|
||||||
|
|
||||||
SCHED_FEAT(FORCE_SD_OVERLAP, false)
|
SCHED_FEAT(FORCE_SD_OVERLAP, false)
|
||||||
SCHED_FEAT(RT_RUNTIME_SHARE, true)
|
SCHED_FEAT(RT_RUNTIME_SHARE, true)
|
||||||
|
SCHED_FEAT(LB_MIN, false)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче