2005-09-26 10:04:21 +04:00
|
|
|
/*
|
|
|
|
* PowerPC version
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
|
|
|
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
|
|
|
* Copyright (C) 1996 Paul Mackerras
|
|
|
|
*
|
|
|
|
* Derived from "arch/i386/mm/init.c"
|
|
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
|
|
*
|
|
|
|
* Dave Engebretsen <engebret@us.ibm.com>
|
|
|
|
* Rework for PPC64 port.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
[POWERPC] vmemmap fixes to use smaller pages
This changes vmemmap to use a different region (region 0xf) of the
address space, and to configure the page size of that region
dynamically at boot.
The problem with the current approach of always using 16M pages is that
it's not well suited to machines that have small amounts of memory such
as small partitions on pseries, or PS3's.
In fact, on the PS3, failure to allocate the 16M page backing vmmemmap
tends to prevent hotplugging the HV's "additional" memory, thus limiting
the available memory even more, from my experience down to something
like 80M total, which makes it really not very useable.
The logic used by my match to choose the vmemmap page size is:
- If 16M pages are available and there's 1G or more RAM at boot,
use that size.
- Else if 64K pages are available, use that
- Else use 4K pages
I've tested on a POWER6 (16M pages) and on an iSeries POWER3 (4K pages)
and it seems to work fine.
Note that I intend to change the way we organize the kernel regions &
SLBs so the actual region will change from 0xf back to something else at
one point, as I simplify the SLB miss handler, but that will be for a
later patch.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-04-30 09:41:48 +04:00
|
|
|
#undef DEBUG
|
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/idr.h>
|
|
|
|
#include <linux/nodemask.h>
|
|
|
|
#include <linux/module.h>
|
2006-06-27 13:53:52 +04:00
|
|
|
#include <linux/poison.h>
|
2008-02-14 03:56:49 +03:00
|
|
|
#include <linux/lmb.h>
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/prom.h>
|
|
|
|
#include <asm/rtas.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/smp.h>
|
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/eeh.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/mmzone.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/abs_addr.h>
|
|
|
|
#include <asm/vdso.h>
|
2005-11-16 07:43:48 +03:00
|
|
|
|
|
|
|
#include "mmu_decl.h"
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
#if PGTABLE_RANGE > USER_VSID_RANGE
|
|
|
|
#warning Limited user VSID range means pagetable space is wasted
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
|
|
|
|
#warning TASK_SIZE is smaller than it needs to be.
|
|
|
|
#endif
|
|
|
|
|
2008-04-21 22:22:34 +04:00
|
|
|
phys_addr_t memstart_addr = ~0;
|
|
|
|
phys_addr_t kernstart_addr;
|
2008-04-15 23:52:22 +04:00
|
|
|
|
2005-09-26 10:04:21 +04:00
|
|
|
void free_initmem(void)
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
addr = (unsigned long)__init_begin;
|
|
|
|
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
|
2006-06-27 13:53:52 +04:00
|
|
|
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
|
2005-09-26 10:04:21 +04:00
|
|
|
ClearPageReserved(virt_to_page(addr));
|
2006-03-22 11:08:40 +03:00
|
|
|
init_page_count(virt_to_page(addr));
|
2005-09-26 10:04:21 +04:00
|
|
|
free_page(addr);
|
|
|
|
totalram_pages++;
|
|
|
|
}
|
|
|
|
printk ("Freeing unused kernel memory: %luk freed\n",
|
|
|
|
((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
|
|
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
if (start < end)
|
|
|
|
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
|
|
|
|
for (; start < end; start += PAGE_SIZE) {
|
|
|
|
ClearPageReserved(virt_to_page(start));
|
2006-03-22 11:08:40 +03:00
|
|
|
init_page_count(virt_to_page(start));
|
2005-09-26 10:04:21 +04:00
|
|
|
free_page(start);
|
|
|
|
totalram_pages++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-09-21 06:53:02 +04:00
|
|
|
#ifdef CONFIG_PROC_KCORE
|
2005-09-26 10:04:21 +04:00
|
|
|
static struct kcore_list kcore_vmem;
|
|
|
|
|
|
|
|
static int __init setup_kcore(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i=0; i < lmb.memory.cnt; i++) {
|
|
|
|
unsigned long base, size;
|
|
|
|
struct kcore_list *kcore_mem;
|
|
|
|
|
|
|
|
base = lmb.memory.region[i].base;
|
|
|
|
size = lmb.memory.region[i].size;
|
|
|
|
|
|
|
|
/* GFP_ATOMIC to avoid might_sleep warnings during boot */
|
|
|
|
kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
|
|
|
|
if (!kcore_mem)
|
2008-03-29 00:21:07 +03:00
|
|
|
panic("%s: kmalloc failed\n", __func__);
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
kclist_add(kcore_mem, __va(base), size);
|
|
|
|
}
|
|
|
|
|
|
|
|
kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
module_init(setup_kcore);
|
2007-09-21 06:53:02 +04:00
|
|
|
#endif
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2008-07-26 06:45:34 +04:00
|
|
|
static void pgd_ctor(void *addr)
|
2005-09-26 10:04:21 +04:00
|
|
|
{
|
2008-07-26 06:45:34 +04:00
|
|
|
memset(addr, 0, PGD_TABLE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pmd_ctor(void *addr)
|
|
|
|
{
|
|
|
|
memset(addr, 0, PMD_TABLE_SIZE);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
|
|
|
|
2005-11-10 06:53:16 +03:00
|
|
|
static const unsigned int pgtable_cache_size[2] = {
|
2007-05-09 08:38:48 +04:00
|
|
|
PGD_TABLE_SIZE, PMD_TABLE_SIZE
|
2005-09-26 10:04:21 +04:00
|
|
|
};
|
|
|
|
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
|
2007-05-09 08:38:48 +04:00
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
"pgd_cache", "pmd_cache",
|
|
|
|
#else
|
|
|
|
"pgd_cache", "pud_pmd_cache",
|
2005-11-07 03:06:55 +03:00
|
|
|
#endif /* CONFIG_PPC_64K_PAGES */
|
2007-05-09 08:38:48 +04:00
|
|
|
};
|
2005-09-26 10:04:21 +04:00
|
|
|
|
2006-04-28 09:02:51 +04:00
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
2008-07-24 08:27:56 +04:00
|
|
|
/* Hugepages need an extra cache per hugepagesize, initialized in
|
|
|
|
* hugetlbpage.c. We can't put into the tables above, because HPAGE_SHIFT
|
|
|
|
* is not compile time constant. */
|
|
|
|
struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+MMU_PAGE_COUNT];
|
2006-04-28 09:02:51 +04:00
|
|
|
#else
|
2006-12-07 07:33:20 +03:00
|
|
|
struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
|
2006-04-28 09:02:51 +04:00
|
|
|
#endif
|
2005-09-26 10:04:21 +04:00
|
|
|
|
|
|
|
void pgtable_cache_init(void)
|
|
|
|
{
|
2008-07-26 06:45:34 +04:00
|
|
|
pgtable_cache[0] = kmem_cache_create(pgtable_cache_name[0], PGD_TABLE_SIZE, PGD_TABLE_SIZE, SLAB_PANIC, pgd_ctor);
|
|
|
|
pgtable_cache[1] = kmem_cache_create(pgtable_cache_name[1], PMD_TABLE_SIZE, PMD_TABLE_SIZE, SLAB_PANIC, pmd_ctor);
|
2005-09-26 10:04:21 +04:00
|
|
|
}
|
2007-10-16 12:24:17 +04:00
|
|
|
|
|
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
|
|
/*
|
|
|
|
* Given an address within the vmemmap, determine the pfn of the page that
|
|
|
|
* represents the start of the section it is within. Note that we have to
|
|
|
|
* do this by hand as the proffered address may not be correctly aligned.
|
|
|
|
* Subtraction of non-aligned pointers produces undefined results.
|
|
|
|
*/
|
2008-05-08 08:27:07 +04:00
|
|
|
static unsigned long __meminit vmemmap_section_start(unsigned long page)
|
2007-10-16 12:24:17 +04:00
|
|
|
{
|
|
|
|
unsigned long offset = page - ((unsigned long)(vmemmap));
|
|
|
|
|
|
|
|
/* Return the pfn of the start of the section. */
|
|
|
|
return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if this vmemmap page is already initialised. If any section
|
|
|
|
* which overlaps this vmemmap page is initialised then this page is
|
|
|
|
* initialised already.
|
|
|
|
*/
|
2008-05-08 08:27:07 +04:00
|
|
|
static int __meminit vmemmap_populated(unsigned long start, int page_size)
|
2007-10-16 12:24:17 +04:00
|
|
|
{
|
|
|
|
unsigned long end = start + page_size;
|
|
|
|
|
|
|
|
for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
|
|
|
|
if (pfn_valid(vmemmap_section_start(start)))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int __meminit vmemmap_populate(struct page *start_page,
|
[POWERPC] vmemmap fixes to use smaller pages
This changes vmemmap to use a different region (region 0xf) of the
address space, and to configure the page size of that region
dynamically at boot.
The problem with the current approach of always using 16M pages is that
it's not well suited to machines that have small amounts of memory such
as small partitions on pseries, or PS3's.
In fact, on the PS3, failure to allocate the 16M page backing vmmemmap
tends to prevent hotplugging the HV's "additional" memory, thus limiting
the available memory even more, from my experience down to something
like 80M total, which makes it really not very useable.
The logic used by my match to choose the vmemmap page size is:
- If 16M pages are available and there's 1G or more RAM at boot,
use that size.
- Else if 64K pages are available, use that
- Else use 4K pages
I've tested on a POWER6 (16M pages) and on an iSeries POWER3 (4K pages)
and it seems to work fine.
Note that I intend to change the way we organize the kernel regions &
SLBs so the actual region will change from 0xf back to something else at
one point, as I simplify the SLB miss handler, but that will be for a
later patch.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-04-30 09:41:48 +04:00
|
|
|
unsigned long nr_pages, int node)
|
2007-10-16 12:24:17 +04:00
|
|
|
{
|
|
|
|
unsigned long start = (unsigned long)start_page;
|
|
|
|
unsigned long end = (unsigned long)(start_page + nr_pages);
|
[POWERPC] vmemmap fixes to use smaller pages
This changes vmemmap to use a different region (region 0xf) of the
address space, and to configure the page size of that region
dynamically at boot.
The problem with the current approach of always using 16M pages is that
it's not well suited to machines that have small amounts of memory such
as small partitions on pseries, or PS3's.
In fact, on the PS3, failure to allocate the 16M page backing vmmemmap
tends to prevent hotplugging the HV's "additional" memory, thus limiting
the available memory even more, from my experience down to something
like 80M total, which makes it really not very useable.
The logic used by my match to choose the vmemmap page size is:
- If 16M pages are available and there's 1G or more RAM at boot,
use that size.
- Else if 64K pages are available, use that
- Else use 4K pages
I've tested on a POWER6 (16M pages) and on an iSeries POWER3 (4K pages)
and it seems to work fine.
Note that I intend to change the way we organize the kernel regions &
SLBs so the actual region will change from 0xf back to something else at
one point, as I simplify the SLB miss handler, but that will be for a
later patch.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-04-30 09:41:48 +04:00
|
|
|
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
|
2007-10-16 12:24:17 +04:00
|
|
|
|
|
|
|
/* Align to the page size of the linear mapping. */
|
|
|
|
start = _ALIGN_DOWN(start, page_size);
|
|
|
|
|
|
|
|
for (; start < end; start += page_size) {
|
|
|
|
int mapped;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
if (vmemmap_populated(start, page_size))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
p = vmemmap_alloc_block(page_size, node);
|
|
|
|
if (!p)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2007-11-13 07:41:49 +03:00
|
|
|
pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n",
|
|
|
|
start, p, __pa(p));
|
2007-10-16 12:24:17 +04:00
|
|
|
|
2008-08-05 10:19:56 +04:00
|
|
|
mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
|
2008-10-12 21:54:24 +04:00
|
|
|
pgprot_val(PAGE_KERNEL),
|
|
|
|
mmu_vmemmap_psize, mmu_kernel_ssize);
|
2007-10-16 12:24:17 +04:00
|
|
|
BUG_ON(mapped < 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
[POWERPC] vmemmap fixes to use smaller pages
This changes vmemmap to use a different region (region 0xf) of the
address space, and to configure the page size of that region
dynamically at boot.
The problem with the current approach of always using 16M pages is that
it's not well suited to machines that have small amounts of memory such
as small partitions on pseries, or PS3's.
In fact, on the PS3, failure to allocate the 16M page backing vmmemmap
tends to prevent hotplugging the HV's "additional" memory, thus limiting
the available memory even more, from my experience down to something
like 80M total, which makes it really not very useable.
The logic used by my match to choose the vmemmap page size is:
- If 16M pages are available and there's 1G or more RAM at boot,
use that size.
- Else if 64K pages are available, use that
- Else use 4K pages
I've tested on a POWER6 (16M pages) and on an iSeries POWER3 (4K pages)
and it seems to work fine.
Note that I intend to change the way we organize the kernel regions &
SLBs so the actual region will change from 0xf back to something else at
one point, as I simplify the SLB miss handler, but that will be for a
later patch.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-04-30 09:41:48 +04:00
|
|
|
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|