2008-07-18 08:55:51 +04:00
|
|
|
#ifndef _ASM_SPARC64_TOPOLOGY_H
|
|
|
|
#define _ASM_SPARC64_TOPOLOGY_H
|
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
|
|
|
|
#include <asm/mmzone.h>
|
|
|
|
|
|
|
|
static inline int cpu_to_node(int cpu)
|
|
|
|
{
|
|
|
|
return numa_cpu_lookup_table[cpu];
|
|
|
|
}
|
|
|
|
|
|
|
|
#define parent_node(node) (node)
|
|
|
|
|
2010-01-06 07:55:14 +03:00
|
|
|
#define cpumask_of_node(node) ((node) == -1 ? \
|
|
|
|
cpu_all_mask : \
|
|
|
|
&numa_cpumask_lookup_table[node])
|
2008-07-18 08:55:51 +04:00
|
|
|
|
|
|
|
struct pci_bus;
|
|
|
|
#ifdef CONFIG_PCI
|
|
|
|
extern int pcibus_to_node(struct pci_bus *pbus);
|
|
|
|
#else
|
|
|
|
static inline int pcibus_to_node(struct pci_bus *pbus)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-01-11 15:06:40 +03:00
|
|
|
#define cpumask_of_pcibus(bus) \
|
|
|
|
(pcibus_to_node(bus) == -1 ? \
|
2009-03-16 07:10:38 +03:00
|
|
|
cpu_all_mask : \
|
2009-01-11 15:06:40 +03:00
|
|
|
cpumask_of_node(pcibus_to_node(bus)))
|
2008-07-18 08:55:51 +04:00
|
|
|
|
|
|
|
#define SD_NODE_INIT (struct sched_domain) { \
|
|
|
|
.min_interval = 8, \
|
|
|
|
.max_interval = 32, \
|
|
|
|
.busy_factor = 32, \
|
|
|
|
.imbalance_pct = 125, \
|
|
|
|
.cache_nice_tries = 2, \
|
|
|
|
.busy_idx = 3, \
|
|
|
|
.idle_idx = 2, \
|
|
|
|
.newidle_idx = 0, \
|
2009-09-03 15:16:51 +04:00
|
|
|
.wake_idx = 0, \
|
2009-09-15 17:22:03 +04:00
|
|
|
.forkexec_idx = 0, \
|
2008-07-18 08:55:51 +04:00
|
|
|
.flags = SD_LOAD_BALANCE \
|
|
|
|
| SD_BALANCE_FORK \
|
|
|
|
| SD_BALANCE_EXEC \
|
sched: Merge select_task_rq_fair() and sched_balance_self()
The problem with wake_idle() is that is doesn't respect things like
cpu_power, which means it doesn't deal well with SMT nor the recent
RT interaction.
To cure this, it needs to do what sched_balance_self() does, which
leads to the possibility of merging select_task_rq_fair() and
sched_balance_self().
Modify sched_balance_self() to:
- update_shares() when walking up the domain tree,
(it only called it for the top domain, but it should
have done this anyway), which allows us to remove
this ugly bit from try_to_wake_up().
- do wake_affine() on the smallest domain that contains
both this (the waking) and the prev (the wakee) cpu for
WAKE invocations.
Then use the top-down balance steps it had to replace wake_idle().
This leads to the dissapearance of SD_WAKE_BALANCE and
SD_WAKE_IDLE_FAR, with SD_WAKE_IDLE replaced with SD_BALANCE_WAKE.
SD_WAKE_AFFINE needs SD_BALANCE_WAKE to be effective.
Touch all topology bits to replace the old with new SD flags --
platforms might need re-tuning, enabling SD_BALANCE_WAKE
conditionally on a NUMA distance seems like a good additional
feature, magny-core and small nehalem systems would want this
enabled, systems with slow interconnects would not.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-10 15:50:02 +04:00
|
|
|
| SD_SERIALIZE, \
|
2008-07-18 08:55:51 +04:00
|
|
|
.last_balance = jiffies, \
|
|
|
|
.balance_interval = 1, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* CONFIG_NUMA */
|
|
|
|
|
|
|
|
#include <asm-generic/topology.h>
|
|
|
|
|
|
|
|
#endif /* !(CONFIG_NUMA) */
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
|
|
|
|
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
|
2009-01-01 02:42:20 +03:00
|
|
|
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
|
|
|
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
2008-07-18 08:55:51 +04:00
|
|
|
#define mc_capable() (sparc64_multi_core)
|
|
|
|
#define smt_capable() (sparc64_multi_core)
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2011-04-12 08:00:40 +04:00
|
|
|
extern cpumask_t cpu_core_map[NR_CPUS];
|
|
|
|
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
|
|
|
|
{
|
|
|
|
return &cpu_core_map[cpu];
|
|
|
|
}
|
2008-07-18 08:55:51 +04:00
|
|
|
|
|
|
|
#endif /* _ASM_SPARC64_TOPOLOGY_H */
|