Merge branch 'x86-mm' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into x86/mm

This commit is contained in:
Ingo Molnar 2011-03-05 07:32:45 +01:00
Родитель d04c579f97 078a198906
Коммит ca764aaf02
5 изменённых файлов: 81 добавлений и 103 удалений

Просмотреть файл

@ -54,8 +54,6 @@ static inline phys_addr_t get_max_mapped(void)
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
void init_memory_mapping_high(void);
extern void initmem_init(void);
extern void free_initmem(void);

Просмотреть файл

@ -963,6 +963,14 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
max_pfn_mapped = init_memory_mapping(1UL<<32,
max_pfn<<PAGE_SHIFT);
/* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
#endif
memblock.current_limit = get_max_mapped();
/*

Просмотреть файл

@ -606,63 +606,9 @@ kernel_physical_mapping_init(unsigned long start,
void __init initmem_init(void)
{
memblock_x86_register_active_regions(0, 0, max_pfn);
init_memory_mapping_high();
}
#endif
struct mapping_work_data {
unsigned long start;
unsigned long end;
unsigned long pfn_mapped;
};
static int __init_refok
mapping_work_fn(unsigned long start_pfn, unsigned long end_pfn, void *datax)
{
struct mapping_work_data *data = datax;
unsigned long pfn_mapped;
unsigned long final_start, final_end;
final_start = max_t(unsigned long, start_pfn<<PAGE_SHIFT, data->start);
final_end = min_t(unsigned long, end_pfn<<PAGE_SHIFT, data->end);
if (final_end <= final_start)
return 0;
pfn_mapped = init_memory_mapping(final_start, final_end);
if (pfn_mapped > data->pfn_mapped)
data->pfn_mapped = pfn_mapped;
return 0;
}
static unsigned long __init_refok
init_memory_mapping_active_regions(unsigned long start, unsigned long end)
{
struct mapping_work_data data;
data.start = start;
data.end = end;
data.pfn_mapped = 0;
work_with_active_regions(MAX_NUMNODES, mapping_work_fn, &data);
return data.pfn_mapped;
}
void __init_refok init_memory_mapping_high(void)
{
if (max_pfn > max_low_pfn) {
max_pfn_mapped = init_memory_mapping_active_regions(1UL<<32,
max_pfn<<PAGE_SHIFT);
/* can we preserve max_low_pfn ? */
max_low_pfn = max_pfn;
memblock.current_limit = get_max_mapped();
}
}
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];

Просмотреть файл

@ -543,8 +543,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (!numa_meminfo_cover_memory(mi))
return -EINVAL;
init_memory_mapping_high();
/* Finally register nodes. */
for_each_node_mask(nid, node_possible_map) {
u64 start = (u64)max_pfn << PAGE_SHIFT;
@ -564,6 +562,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
return 0;
}
/**
* dummy_numma_init - Fallback dummy NUMA init
*
* Used if there's no underlying NUMA architecture, NUMA initialization
* fails, or NUMA is disabled on the command line.
*
* Must online at least one node and add memory blocks that cover all
* allowed memory. This function must not fail.
*/
static int __init dummy_numa_init(void)
{
printk(KERN_INFO "%s\n",
@ -577,57 +584,64 @@ static int __init dummy_numa_init(void)
return 0;
}
static int __init numa_init(int (*init_func)(void))
{
int i;
int ret;
for (i = 0; i < MAX_LOCAL_APIC; i++)
set_apicid_to_node(i, NUMA_NO_NODE);
nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
remove_all_active_ranges();
numa_reset_distance();
ret = init_func();
if (ret < 0)
return ret;
ret = numa_cleanup_meminfo(&numa_meminfo);
if (ret < 0)
return ret;
numa_emulation(&numa_meminfo, numa_distance_cnt);
ret = numa_register_memblks(&numa_meminfo);
if (ret < 0)
return ret;
for (i = 0; i < nr_cpu_ids; i++) {
int nid = early_cpu_to_node(i);
if (nid == NUMA_NO_NODE)
continue;
if (!node_online(nid))
numa_clear_node(i);
}
numa_init_array();
return 0;
}
void __init initmem_init(void)
{
int (*numa_init[])(void) = { [2] = dummy_numa_init };
int i, j;
int ret;
if (!numa_off) {
#ifdef CONFIG_ACPI_NUMA
numa_init[0] = x86_acpi_numa_init;
ret = numa_init(x86_acpi_numa_init);
if (!ret)
return;
#endif
#ifdef CONFIG_AMD_NUMA
numa_init[1] = amd_numa_init;
ret = numa_init(amd_numa_init);
if (!ret)
return;
#endif
}
for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
if (!numa_init[i])
continue;
for (j = 0; j < MAX_LOCAL_APIC; j++)
set_apicid_to_node(j, NUMA_NO_NODE);
nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
remove_all_active_ranges();
numa_reset_distance();
if (numa_init[i]() < 0)
continue;
if (numa_cleanup_meminfo(&numa_meminfo) < 0)
continue;
numa_emulation(&numa_meminfo, numa_distance_cnt);
if (numa_register_memblks(&numa_meminfo) < 0)
continue;
for (j = 0; j < nr_cpu_ids; j++) {
int nid = early_cpu_to_node(j);
if (nid == NUMA_NO_NODE)
continue;
if (!node_online(nid))
numa_clear_node(j);
}
numa_init_array();
return;
}
BUG();
numa_init(dummy_numa_init);
}
unsigned long __init numa_free_all_bootmem(void)

Просмотреть файл

@ -301,6 +301,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
const u64 max_addr = max_pfn << PAGE_SHIFT;
u8 *phys_dist = NULL;
size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]);
int dfl_phys_nid;
int i, j, ret;
if (!emu_cmdline)
@ -357,6 +358,19 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
node_distance(i, j);
}
/* determine the default phys nid to use for unmapped nodes */
dfl_phys_nid = NUMA_NO_NODE;
for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) {
if (emu_nid_to_phys[i] != NUMA_NO_NODE) {
dfl_phys_nid = emu_nid_to_phys[i];
break;
}
}
if (dfl_phys_nid == NUMA_NO_NODE) {
pr_warning("NUMA: Warning: can't determine default physical node, disabling emulation\n");
goto no_emu;
}
/* commit */
*numa_meminfo = ei;
@ -377,7 +391,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
/* make sure all emulated nodes are mapped to a physical node */
for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
if (emu_nid_to_phys[i] == NUMA_NO_NODE)
emu_nid_to_phys[i] = 0;
emu_nid_to_phys[i] = dfl_phys_nid;
/*
* Transform distance table. numa_set_distance() ignores all
@ -417,9 +431,7 @@ void __cpuinit numa_add_cpu(int cpu)
{
int physnid, nid;
nid = numa_cpu_node(cpu);
if (nid == NUMA_NO_NODE)
nid = early_cpu_to_node(cpu);
nid = early_cpu_to_node(cpu);
BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
physnid = emu_nid_to_phys[nid];