From 7b16bbf97375d9fb7fc107b3f80afeb94a204e44 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Thu, 18 Oct 2012 14:33:23 +0800 Subject: [PATCH] Revert "x86/mm: Fix the size calculation of mapping tables" Commit: 722bc6b16771 x86/mm: Fix the size calculation of mapping tables Tried to address the issue that the first 2/4M should use 4k pages if PSE enabled, but extra counts should only be valid for x86_32. This commit caused a kdump regression: the kdump kernel hangs. Work is in progress to fundamentally fix the various page table initialization issues that we have, via the design suggested by H. Peter Anvin, but it's not ready yet to be merged. So, to get a working kdump revert to the last known working version, which is the revert of this commit and of a followup fix (which was incomplete): bd2753b2dda7 x86/mm: Only add extra pages count for the first memory range during pre-allocation Tested kdump on physical and virtual machines. Signed-off-by: Dave Young Acked-by: Yinghai Lu Acked-by: Cong Wang Acked-by: Flavio Leitner Tested-by: Flavio Leitner Cc: Dan Carpenter Cc: Cong Wang Cc: Flavio Leitner Cc: Tejun Heo Cc: ianfang.cn@gmail.com Cc: Vivek Goyal Cc: Linus Torvalds Cc: Andrew Morton Cc: Signed-off-by: Ingo Molnar --- arch/x86/mm/init.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index ab1f6a93b527..8653b3a722be 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -29,14 +29,8 @@ int direct_gbpages #endif ; -struct map_range { - unsigned long start; - unsigned long end; - unsigned page_size_mask; -}; - -static void __init find_early_table_space(struct map_range *mr, unsigned long end, - int use_pse, int use_gbpages) +static void __init find_early_table_space(unsigned long end, int use_pse, + int use_gbpages) { unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; phys_addr_t base; @@ -61,10 +55,6 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en #ifdef CONFIG_X86_32 extra += PMD_SIZE; #endif - /* The first 2/4M doesn't use large pages. */ - if (mr->start < PMD_SIZE) - extra += mr->end - mr->start; - ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; } else ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -95,6 +85,12 @@ void __init native_pagetable_reserve(u64 start, u64 end) memblock_reserve(start, end - start); } +struct map_range { + unsigned long start; + unsigned long end; + unsigned page_size_mask; +}; + #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ @@ -267,7 +263,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * nodes are discovered. */ if (!after_bootmem) - find_early_table_space(&mr[0], end, use_pse, use_gbpages); + find_early_table_space(end, use_pse, use_gbpages); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,