SPARC64: SPARSEMEM_VMEMMAP support
[apw@shadowen.org: style fixups] [apw@shadowen.org: vmemmap sparc64: convert to new config options] Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Christoph Lameter <clameter@sgi.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
ef229c5a5e
Коммит
46644c2477
|
@ -240,10 +240,10 @@ config ARCH_SELECT_MEMORY_MODEL
|
|||
|
||||
config ARCH_SPARSEMEM_ENABLE
|
||||
def_bool y
|
||||
select SPARSEMEM_VMEMMAP_ENABLE
|
||||
|
||||
config ARCH_SPARSEMEM_DEFAULT
|
||||
def_bool y
|
||||
select SPARSEMEM_STATIC
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
|
|
|
@ -226,6 +226,15 @@ kvmap_dtlb_load:
|
|||
ba,pt %xcc, sun4v_dtlb_load
|
||||
mov %g5, %g3
|
||||
|
||||
kvmap_vmemmap:
|
||||
sub %g4, %g5, %g5
|
||||
srlx %g5, 22, %g5
|
||||
sethi %hi(vmemmap_table), %g1
|
||||
sllx %g5, 3, %g5
|
||||
or %g1, %lo(vmemmap_table), %g1
|
||||
ba,pt %xcc, kvmap_dtlb_load
|
||||
ldx [%g1 + %g5], %g5
|
||||
|
||||
kvmap_dtlb_nonlinear:
|
||||
/* Catch kernel NULL pointer derefs. */
|
||||
sethi %hi(PAGE_SIZE), %g5
|
||||
|
@ -233,6 +242,13 @@ kvmap_dtlb_nonlinear:
|
|||
bleu,pn %xcc, kvmap_dtlb_longpath
|
||||
nop
|
||||
|
||||
/* Do not use the TSB for vmemmap. */
|
||||
mov (VMEMMAP_BASE >> 24), %g5
|
||||
sllx %g5, 24, %g5
|
||||
cmp %g4,%g5
|
||||
bgeu,pn %xcc, kvmap_vmemmap
|
||||
nop
|
||||
|
||||
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
|
||||
|
||||
kvmap_dtlb_tsbmiss:
|
||||
|
|
|
@ -1645,6 +1645,58 @@ EXPORT_SYMBOL(_PAGE_E);
|
|||
unsigned long _PAGE_CACHE __read_mostly;
|
||||
EXPORT_SYMBOL(_PAGE_CACHE);
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
|
||||
#define VMEMMAP_CHUNK_SHIFT 22
|
||||
#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
|
||||
#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
|
||||
#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
|
||||
|
||||
#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
|
||||
sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
|
||||
unsigned long vmemmap_table[VMEMMAP_SIZE];
|
||||
|
||||
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
|
||||
{
|
||||
unsigned long vstart = (unsigned long) start;
|
||||
unsigned long vend = (unsigned long) (start + nr);
|
||||
unsigned long phys_start = (vstart - VMEMMAP_BASE);
|
||||
unsigned long phys_end = (vend - VMEMMAP_BASE);
|
||||
unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
|
||||
unsigned long end = VMEMMAP_ALIGN(phys_end);
|
||||
unsigned long pte_base;
|
||||
|
||||
pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
|
||||
_PAGE_CP_4U | _PAGE_CV_4U |
|
||||
_PAGE_P_4U | _PAGE_W_4U);
|
||||
if (tlb_type == hypervisor)
|
||||
pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
|
||||
_PAGE_CP_4V | _PAGE_CV_4V |
|
||||
_PAGE_P_4V | _PAGE_W_4V);
|
||||
|
||||
for (; addr < end; addr += VMEMMAP_CHUNK) {
|
||||
unsigned long *vmem_pp =
|
||||
vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
|
||||
void *block;
|
||||
|
||||
if (!(*vmem_pp & _PAGE_VALID)) {
|
||||
block = vmemmap_alloc_block(1UL << 22, node);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
*vmem_pp = pte_base | __pa(block);
|
||||
|
||||
printk(KERN_INFO "[%p-%p] page_structs=%lu "
|
||||
"node=%d entry=%lu/%lu\n", start, block, nr,
|
||||
node,
|
||||
addr >> VMEMMAP_CHUNK_SHIFT,
|
||||
VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
static void prot_init_common(unsigned long page_none,
|
||||
unsigned long page_shared,
|
||||
unsigned long page_copy,
|
||||
|
|
|
@ -42,6 +42,9 @@
|
|||
#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
|
||||
#define VMALLOC_START _AC(0x0000000100000000,UL)
|
||||
#define VMALLOC_END _AC(0x0000000200000000,UL)
|
||||
#define VMEMMAP_BASE _AC(0x0000000200000000,UL)
|
||||
|
||||
#define vmemmap ((struct page *)VMEMMAP_BASE)
|
||||
|
||||
/* XXX All of this needs to be rethought so we can take advantage
|
||||
* XXX cheetah's full 64-bit virtual address space, ie. no more hole
|
||||
|
|
Загрузка…
Ссылка в новой задаче