sched/core: Replace sd_busy/nr_busy_cpus with sched_domain_shared
Move the nr_busy_cpus thing from its hacky sd->parent->groups->sgc location into the much more natural sched_domain_shared location. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
24fc7edb92
Коммит
0e369d7575
|
@ -1069,6 +1069,7 @@ struct sched_group;
|
|||
|
||||
struct sched_domain_shared {
|
||||
atomic_t ref;
|
||||
atomic_t nr_busy_cpus;
|
||||
};
|
||||
|
||||
struct sched_domain {
|
||||
|
|
|
@ -5981,14 +5981,14 @@ static void destroy_sched_domains(struct sched_domain *sd)
|
|||
DEFINE_PER_CPU(struct sched_domain *, sd_llc);
|
||||
DEFINE_PER_CPU(int, sd_llc_size);
|
||||
DEFINE_PER_CPU(int, sd_llc_id);
|
||||
DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
|
||||
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
|
||||
DEFINE_PER_CPU(struct sched_domain *, sd_busy);
|
||||
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
|
||||
|
||||
static void update_top_cache_domain(int cpu)
|
||||
{
|
||||
struct sched_domain_shared *sds = NULL;
|
||||
struct sched_domain *sd;
|
||||
struct sched_domain *busy_sd = NULL;
|
||||
int id = cpu;
|
||||
int size = 1;
|
||||
|
||||
|
@ -5996,13 +5996,13 @@ static void update_top_cache_domain(int cpu)
|
|||
if (sd) {
|
||||
id = cpumask_first(sched_domain_span(sd));
|
||||
size = cpumask_weight(sched_domain_span(sd));
|
||||
busy_sd = sd->parent; /* sd_busy */
|
||||
sds = sd->shared;
|
||||
}
|
||||
rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
|
||||
|
||||
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
|
||||
per_cpu(sd_llc_size, cpu) = size;
|
||||
per_cpu(sd_llc_id, cpu) = id;
|
||||
rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
|
||||
|
||||
sd = lowest_flag_domain(cpu, SD_NUMA);
|
||||
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
|
||||
|
@ -6299,7 +6299,6 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
|
|||
return;
|
||||
|
||||
update_group_capacity(sd, cpu);
|
||||
atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6546,6 +6545,7 @@ sd_init(struct sched_domain_topology_level *tl,
|
|||
if (sd->flags & SD_SHARE_PKG_RESOURCES) {
|
||||
sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
|
||||
atomic_inc(&sd->shared->ref);
|
||||
atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
|
||||
}
|
||||
|
||||
sd->private = sdd;
|
||||
|
|
|
@ -8008,13 +8008,13 @@ static inline void set_cpu_sd_state_busy(void)
|
|||
int cpu = smp_processor_id();
|
||||
|
||||
rcu_read_lock();
|
||||
sd = rcu_dereference(per_cpu(sd_busy, cpu));
|
||||
sd = rcu_dereference(per_cpu(sd_llc, cpu));
|
||||
|
||||
if (!sd || !sd->nohz_idle)
|
||||
goto unlock;
|
||||
sd->nohz_idle = 0;
|
||||
|
||||
atomic_inc(&sd->groups->sgc->nr_busy_cpus);
|
||||
atomic_inc(&sd->shared->nr_busy_cpus);
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -8025,13 +8025,13 @@ void set_cpu_sd_state_idle(void)
|
|||
int cpu = smp_processor_id();
|
||||
|
||||
rcu_read_lock();
|
||||
sd = rcu_dereference(per_cpu(sd_busy, cpu));
|
||||
sd = rcu_dereference(per_cpu(sd_llc, cpu));
|
||||
|
||||
if (!sd || sd->nohz_idle)
|
||||
goto unlock;
|
||||
sd->nohz_idle = 1;
|
||||
|
||||
atomic_dec(&sd->groups->sgc->nr_busy_cpus);
|
||||
atomic_dec(&sd->shared->nr_busy_cpus);
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -8258,8 +8258,8 @@ end:
|
|||
static inline bool nohz_kick_needed(struct rq *rq)
|
||||
{
|
||||
unsigned long now = jiffies;
|
||||
struct sched_domain_shared *sds;
|
||||
struct sched_domain *sd;
|
||||
struct sched_group_capacity *sgc;
|
||||
int nr_busy, cpu = rq->cpu;
|
||||
bool kick = false;
|
||||
|
||||
|
@ -8287,11 +8287,13 @@ static inline bool nohz_kick_needed(struct rq *rq)
|
|||
return true;
|
||||
|
||||
rcu_read_lock();
|
||||
sd = rcu_dereference(per_cpu(sd_busy, cpu));
|
||||
if (sd) {
|
||||
sgc = sd->groups->sgc;
|
||||
nr_busy = atomic_read(&sgc->nr_busy_cpus);
|
||||
|
||||
sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
|
||||
if (sds) {
|
||||
/*
|
||||
* XXX: write a coherent comment on why we do this.
|
||||
* See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
|
||||
*/
|
||||
nr_busy = atomic_read(&sds->nr_busy_cpus);
|
||||
if (nr_busy > 1) {
|
||||
kick = true;
|
||||
goto unlock;
|
||||
|
|
|
@ -858,8 +858,8 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
|
|||
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
|
||||
DECLARE_PER_CPU(int, sd_llc_size);
|
||||
DECLARE_PER_CPU(int, sd_llc_id);
|
||||
DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
|
||||
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
|
||||
DECLARE_PER_CPU(struct sched_domain *, sd_busy);
|
||||
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
|
||||
|
||||
struct sched_group_capacity {
|
||||
|
@ -871,10 +871,6 @@ struct sched_group_capacity {
|
|||
unsigned int capacity;
|
||||
unsigned long next_update;
|
||||
int imbalance; /* XXX unrelated to capacity but shared group state */
|
||||
/*
|
||||
* Number of busy cpus in this group.
|
||||
*/
|
||||
atomic_t nr_busy_cpus;
|
||||
|
||||
unsigned long cpumask[0]; /* iteration mask */
|
||||
};
|
||||
|
|
Загрузка…
Ссылка в новой задаче