sched: Rename select_task_rq() argument
In order to be able to rename the sync argument, we need to rename the current flag argument. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
8e6598af3f
Коммит
0763a660a8
|
@ -1037,7 +1037,7 @@ struct sched_class {
|
||||||
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int (*select_task_rq)(struct task_struct *p, int flag, int sync);
|
int (*select_task_rq)(struct task_struct *p, int sd_flag, int sync);
|
||||||
|
|
||||||
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
|
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
|
||||||
struct rq *busiest, unsigned long max_load_move,
|
struct rq *busiest, unsigned long max_load_move,
|
||||||
|
|
|
@ -1331,7 +1331,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
||||||
*
|
*
|
||||||
* preempt must be disabled.
|
* preempt must be disabled.
|
||||||
*/
|
*/
|
||||||
static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
|
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int sync)
|
||||||
{
|
{
|
||||||
struct sched_domain *tmp, *sd = NULL;
|
struct sched_domain *tmp, *sd = NULL;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
@ -1339,7 +1339,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
|
||||||
int new_cpu = cpu;
|
int new_cpu = cpu;
|
||||||
int want_affine = 0;
|
int want_affine = 0;
|
||||||
|
|
||||||
if (flag & SD_BALANCE_WAKE) {
|
if (sd_flag & SD_BALANCE_WAKE) {
|
||||||
if (sched_feat(AFFINE_WAKEUPS))
|
if (sched_feat(AFFINE_WAKEUPS))
|
||||||
want_affine = 1;
|
want_affine = 1;
|
||||||
new_cpu = prev_cpu;
|
new_cpu = prev_cpu;
|
||||||
|
@ -1368,7 +1368,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (flag) {
|
switch (sd_flag) {
|
||||||
case SD_BALANCE_WAKE:
|
case SD_BALANCE_WAKE:
|
||||||
if (!sched_feat(LB_WAKEUP_UPDATE))
|
if (!sched_feat(LB_WAKEUP_UPDATE))
|
||||||
break;
|
break;
|
||||||
|
@ -1392,7 +1392,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
|
||||||
want_affine = 0;
|
want_affine = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(tmp->flags & flag))
|
if (!(tmp->flags & sd_flag))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
sd = tmp;
|
sd = tmp;
|
||||||
|
@ -1402,12 +1402,12 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
|
||||||
struct sched_group *group;
|
struct sched_group *group;
|
||||||
int weight;
|
int weight;
|
||||||
|
|
||||||
if (!(sd->flags & flag)) {
|
if (!(sd->flags & sd_flag)) {
|
||||||
sd = sd->child;
|
sd = sd->child;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
group = find_idlest_group(sd, p, cpu, flag);
|
group = find_idlest_group(sd, p, cpu, sd_flag);
|
||||||
if (!group) {
|
if (!group) {
|
||||||
sd = sd->child;
|
sd = sd->child;
|
||||||
continue;
|
continue;
|
||||||
|
@ -1427,7 +1427,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
|
||||||
for_each_domain(cpu, tmp) {
|
for_each_domain(cpu, tmp) {
|
||||||
if (weight <= cpumask_weight(sched_domain_span(tmp)))
|
if (weight <= cpumask_weight(sched_domain_span(tmp)))
|
||||||
break;
|
break;
|
||||||
if (tmp->flags & flag)
|
if (tmp->flags & sd_flag)
|
||||||
sd = tmp;
|
sd = tmp;
|
||||||
}
|
}
|
||||||
/* while loop will break here if sd == NULL */
|
/* while loop will break here if sd == NULL */
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static int select_task_rq_idle(struct task_struct *p, int flag, int sync)
|
static int select_task_rq_idle(struct task_struct *p, int sd_flag, int sync)
|
||||||
{
|
{
|
||||||
return task_cpu(p); /* IDLE tasks as never migrated */
|
return task_cpu(p); /* IDLE tasks as never migrated */
|
||||||
}
|
}
|
||||||
|
|
|
@ -938,11 +938,11 @@ static void yield_task_rt(struct rq *rq)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static int find_lowest_rq(struct task_struct *task);
|
static int find_lowest_rq(struct task_struct *task);
|
||||||
|
|
||||||
static int select_task_rq_rt(struct task_struct *p, int flag, int sync)
|
static int select_task_rq_rt(struct task_struct *p, int sd_flag, int sync)
|
||||||
{
|
{
|
||||||
struct rq *rq = task_rq(p);
|
struct rq *rq = task_rq(p);
|
||||||
|
|
||||||
if (flag != SD_BALANCE_WAKE)
|
if (sd_flag != SD_BALANCE_WAKE)
|
||||||
return smp_processor_id();
|
return smp_processor_id();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Загрузка…
Ссылка в новой задаче