powerpc: Cleanup memblock usage
* early_init_devtree(): Total memory size is aligned to PAGE_SIZE; however, alignment isn't enforced if memory_limit is explicitly specified. Simplify the logic and always apply PAGE_SIZE alignment. * MMU_init(): memblock regions is truncated by directly modifying memblock.memory.cnt. This is incomplete (reserved array is not truncated) and unnecessarily low level hindering further memblock improvments. Use memblock_enforce_memory_limit() instead. * wii_memory_fixups(): Unnecessarily low level direct manipulation of memblock regions. The same result can be achieved using properly abstracted operations. Reimplement using memblock API. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Yinghai Lu <yinghai@kernel.org>
This commit is contained in:
Родитель
c0ce8fef55
Коммит
6fbef13c4f
|
@ -754,17 +754,12 @@ void __init early_init_devtree(void *params)
|
||||||
early_reserve_mem();
|
early_reserve_mem();
|
||||||
phyp_dump_reserve_mem();
|
phyp_dump_reserve_mem();
|
||||||
|
|
||||||
limit = memory_limit;
|
/*
|
||||||
if (! limit) {
|
* Ensure that total memory size is page-aligned, because otherwise
|
||||||
phys_addr_t memsize;
|
* mark_bootmem() gets upset.
|
||||||
|
*/
|
||||||
/* Ensure that total memory size is page-aligned, because
|
memblock_analyze();
|
||||||
* otherwise mark_bootmem() gets upset. */
|
limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
|
||||||
memblock_analyze();
|
|
||||||
memsize = memblock_phys_mem_size();
|
|
||||||
if ((memsize & PAGE_MASK) != memsize)
|
|
||||||
limit = memsize & PAGE_MASK;
|
|
||||||
}
|
|
||||||
memblock_enforce_memory_limit(limit);
|
memblock_enforce_memory_limit(limit);
|
||||||
|
|
||||||
memblock_analyze();
|
memblock_analyze();
|
||||||
|
|
|
@ -134,7 +134,7 @@ void __init MMU_init(void)
|
||||||
|
|
||||||
if (memblock.memory.cnt > 1) {
|
if (memblock.memory.cnt > 1) {
|
||||||
#ifndef CONFIG_WII
|
#ifndef CONFIG_WII
|
||||||
memblock.memory.cnt = 1;
|
memblock_enforce_memory_limit(memblock.memory.regions[0].size);
|
||||||
memblock_analyze();
|
memblock_analyze();
|
||||||
printk(KERN_WARNING "Only using first contiguous memory region");
|
printk(KERN_WARNING "Only using first contiguous memory region");
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -79,23 +79,19 @@ void __init wii_memory_fixups(void)
|
||||||
BUG_ON(memblock.memory.cnt != 2);
|
BUG_ON(memblock.memory.cnt != 2);
|
||||||
BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
|
BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
|
||||||
|
|
||||||
p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
|
/* trim unaligned tail */
|
||||||
p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);
|
memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE),
|
||||||
|
(phys_addr_t)ULLONG_MAX);
|
||||||
|
|
||||||
wii_hole_start = p[0].base + p[0].size;
|
/* determine hole, add & reserve them */
|
||||||
|
wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE);
|
||||||
wii_hole_size = p[1].base - wii_hole_start;
|
wii_hole_size = p[1].base - wii_hole_start;
|
||||||
|
memblock_add(wii_hole_start, wii_hole_size);
|
||||||
pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
|
memblock_reserve(wii_hole_start, wii_hole_size);
|
||||||
pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
|
|
||||||
pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);
|
|
||||||
|
|
||||||
p[0].size += wii_hole_size + p[1].size;
|
|
||||||
|
|
||||||
memblock.memory.cnt = 1;
|
|
||||||
memblock_analyze();
|
memblock_analyze();
|
||||||
|
|
||||||
/* reserve the hole */
|
BUG_ON(memblock.memory.cnt != 1);
|
||||||
memblock_reserve(wii_hole_start, wii_hole_size);
|
__memblock_dump_all();
|
||||||
|
|
||||||
/* allow ioremapping the address space in the hole */
|
/* allow ioremapping the address space in the hole */
|
||||||
__allow_ioremap_reserved = 1;
|
__allow_ioremap_reserved = 1;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче