sched: Remove rq_iterator usage from load_balance_fair
Since we only ever iterate the fair class, do away with this abstraction. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
3d45fd804a
Коммит
ee00e66fff
|
@ -1866,26 +1866,9 @@ static unsigned long
|
||||||
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
unsigned long max_load_move, struct sched_domain *sd,
|
unsigned long max_load_move, struct sched_domain *sd,
|
||||||
enum cpu_idle_type idle, int *all_pinned,
|
enum cpu_idle_type idle, int *all_pinned,
|
||||||
int *this_best_prio, struct rq_iterator *iterator);
|
int *this_best_prio, struct cfs_rq *busiest_cfs_rq);
|
||||||
|
|
||||||
|
|
||||||
static unsigned long
|
|
||||||
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
||||||
unsigned long max_load_move, struct sched_domain *sd,
|
|
||||||
enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
|
|
||||||
struct cfs_rq *cfs_rq)
|
|
||||||
{
|
|
||||||
struct rq_iterator cfs_rq_iterator;
|
|
||||||
|
|
||||||
cfs_rq_iterator.start = load_balance_start_fair;
|
|
||||||
cfs_rq_iterator.next = load_balance_next_fair;
|
|
||||||
cfs_rq_iterator.arg = cfs_rq;
|
|
||||||
|
|
||||||
return balance_tasks(this_rq, this_cpu, busiest,
|
|
||||||
max_load_move, sd, idle, all_pinned,
|
|
||||||
this_best_prio, &cfs_rq_iterator);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
static unsigned long
|
static unsigned long
|
||||||
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
|
@ -1915,9 +1898,9 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
rem_load = (u64)rem_load_move * busiest_weight;
|
rem_load = (u64)rem_load_move * busiest_weight;
|
||||||
rem_load = div_u64(rem_load, busiest_h_load + 1);
|
rem_load = div_u64(rem_load, busiest_h_load + 1);
|
||||||
|
|
||||||
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
|
moved_load = balance_tasks(this_rq, this_cpu, busiest,
|
||||||
rem_load, sd, idle, all_pinned, this_best_prio,
|
rem_load, sd, idle, all_pinned, this_best_prio,
|
||||||
tg->cfs_rq[busiest_cpu]);
|
busiest_cfs_rq);
|
||||||
|
|
||||||
if (!moved_load)
|
if (!moved_load)
|
||||||
continue;
|
continue;
|
||||||
|
@ -1940,7 +1923,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||||
int *all_pinned, int *this_best_prio)
|
int *all_pinned, int *this_best_prio)
|
||||||
{
|
{
|
||||||
return __load_balance_fair(this_rq, this_cpu, busiest,
|
return balance_tasks(this_rq, this_cpu, busiest,
|
||||||
max_load_move, sd, idle, all_pinned,
|
max_load_move, sd, idle, all_pinned,
|
||||||
this_best_prio, &busiest->cfs);
|
this_best_prio, &busiest->cfs);
|
||||||
}
|
}
|
||||||
|
@ -2050,53 +2033,48 @@ static unsigned long
|
||||||
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
unsigned long max_load_move, struct sched_domain *sd,
|
unsigned long max_load_move, struct sched_domain *sd,
|
||||||
enum cpu_idle_type idle, int *all_pinned,
|
enum cpu_idle_type idle, int *all_pinned,
|
||||||
int *this_best_prio, struct rq_iterator *iterator)
|
int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
|
||||||
{
|
{
|
||||||
int loops = 0, pulled = 0, pinned = 0;
|
int loops = 0, pulled = 0, pinned = 0;
|
||||||
struct task_struct *p;
|
|
||||||
long rem_load_move = max_load_move;
|
long rem_load_move = max_load_move;
|
||||||
|
struct task_struct *p, *n;
|
||||||
|
|
||||||
if (max_load_move == 0)
|
if (max_load_move == 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
pinned = 1;
|
pinned = 1;
|
||||||
|
|
||||||
/*
|
list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
|
||||||
* Start the load-balancing iterator:
|
if (loops++ > sysctl_sched_nr_migrate)
|
||||||
*/
|
break;
|
||||||
p = iterator->start(iterator->arg);
|
|
||||||
next:
|
|
||||||
if (!p || loops++ > sysctl_sched_nr_migrate)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if ((p->se.load.weight >> 1) > rem_load_move ||
|
if ((p->se.load.weight >> 1) > rem_load_move ||
|
||||||
!can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
|
!can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
|
||||||
p = iterator->next(iterator->arg);
|
continue;
|
||||||
goto next;
|
|
||||||
}
|
|
||||||
|
|
||||||
pull_task(busiest, p, this_rq, this_cpu);
|
pull_task(busiest, p, this_rq, this_cpu);
|
||||||
pulled++;
|
pulled++;
|
||||||
rem_load_move -= p->se.load.weight;
|
rem_load_move -= p->se.load.weight;
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
/*
|
/*
|
||||||
* NEWIDLE balancing is a source of latency, so preemptible kernels
|
* NEWIDLE balancing is a source of latency, so preemptible
|
||||||
* will stop after the first task is pulled to minimize the critical
|
* kernels will stop after the first task is pulled to minimize
|
||||||
* section.
|
* the critical section.
|
||||||
*/
|
*/
|
||||||
if (idle == CPU_NEWLY_IDLE)
|
if (idle == CPU_NEWLY_IDLE)
|
||||||
goto out;
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We only want to steal up to the prescribed amount of weighted load.
|
* We only want to steal up to the prescribed amount of
|
||||||
*/
|
* weighted load.
|
||||||
if (rem_load_move > 0) {
|
*/
|
||||||
|
if (rem_load_move <= 0)
|
||||||
|
break;
|
||||||
|
|
||||||
if (p->prio < *this_best_prio)
|
if (p->prio < *this_best_prio)
|
||||||
*this_best_prio = p->prio;
|
*this_best_prio = p->prio;
|
||||||
p = iterator->next(iterator->arg);
|
|
||||||
goto next;
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
/*
|
/*
|
||||||
|
|
Загрузка…
Ссылка в новой задаче