2007-08-20 17:50:28 +04:00
|
|
|
/*
|
|
|
|
* highmem.h: virtual kernel memory mappings for high memory
|
|
|
|
*
|
|
|
|
* PowerPC version, stolen from the i386 version.
|
|
|
|
*
|
|
|
|
* Used in CONFIG_HIGHMEM systems for memory pages which
|
|
|
|
* are not addressable by direct kernel virtual addresses.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
|
|
|
|
* Gerhard.Wichert@pdb.siemens.de
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Redesigned the x86 32-bit VM architecture to deal with
|
|
|
|
* up to 16 Terrabyte physical memory. With current x86 CPUs
|
|
|
|
* we now support up to 64 Gigabytes physical RAM.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_HIGHMEM_H
|
|
|
|
#define _ASM_HIGHMEM_H
|
|
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <asm/kmap_types.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/page.h>
|
2008-04-23 17:05:20 +04:00
|
|
|
#include <asm/fixmap.h>
|
2007-08-20 17:50:28 +04:00
|
|
|
|
|
|
|
extern pte_t *kmap_pte;
|
|
|
|
extern pgprot_t kmap_prot;
|
|
|
|
extern pte_t *pkmap_page_table;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Right now we initialize only a single pte table. It can be extended
|
|
|
|
* easily, subsequent pte tables have to be allocated in one physical
|
|
|
|
* chunk of RAM.
|
|
|
|
*/
|
2008-12-11 04:55:41 +03:00
|
|
|
/*
|
powerpc/44x: Support for 256KB PAGE_SIZE
This patch adds support for 256KB pages on ppc44x-based boards.
For simplification of implementation with 256KB pages we still assume
2-level paging. As a side effect this leads to wasting extra memory space
reserved for PTE tables: only 1/4 of pages allocated for PTEs are
actually used. But this may be an acceptable trade-off to achieve the
high performance we have with big PAGE_SIZEs in some applications (e.g.
RAID).
Also with 256KB PAGE_SIZE we increase THREAD_SIZE up to 32KB to minimize
the risk of stack overflows in the cases of on-stack arrays, which size
depends on the page size (e.g. multipage BIOs, NTFS, etc.).
With 256KB PAGE_SIZE we need to decrease the PKMAP_ORDER at least down
to 9, otherwise all high memory (2 ^ 10 * PAGE_SIZE == 256MB) we'll be
occupied by PKMAP addresses leaving no place for vmalloc. We do not
separate PKMAP_ORDER for 256K from 16K/64K PAGE_SIZE here; actually that
value of 10 in support for 16K/64K had been selected rather intuitively.
Thus now for all cases of PAGE_SIZE on ppc44x (including the default, 4KB,
one) we have 512 pages for PKMAP.
Because ELF standard supports only page sizes up to 64K, then you should
use binutils later than 2.17.50.0.3 with '-zmax-page-size' set to 256K
for building applications, which are to be run with the 256KB-page sized
kernel. If using the older binutils, then you should patch them like follows:
--- binutils/bfd/elf32-ppc.c.orig
+++ binutils/bfd/elf32-ppc.c
-#define ELF_MAXPAGESIZE 0x10000
+#define ELF_MAXPAGESIZE 0x40000
One more restriction we currently have with 256KB page sizes is inability
to use shmem safely, so, for now, the 256KB is available only if you turn
the CONFIG_SHMEM option off (another variant is to use BROKEN).
Though, if you need shmem with 256KB pages, you can always remove the !SHMEM
dependency in 'config PPC_256K_PAGES', and use the workaround available here:
http://lkml.org/lkml/2008/12/19/20
Signed-off-by: Yuri Tikhonov <yur@emcraft.com>
Signed-off-by: Ilya Yanok <yanok@emcraft.com>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
2009-01-29 04:40:44 +03:00
|
|
|
* We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
|
|
|
|
* table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
|
|
|
|
* and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
|
|
|
|
* in case of 16K/64K/256K page sizes.
|
2008-12-11 04:55:41 +03:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC_4K_PAGES
|
|
|
|
#define PKMAP_ORDER PTE_SHIFT
|
|
|
|
#else
|
powerpc/44x: Support for 256KB PAGE_SIZE
This patch adds support for 256KB pages on ppc44x-based boards.
For simplification of implementation with 256KB pages we still assume
2-level paging. As a side effect this leads to wasting extra memory space
reserved for PTE tables: only 1/4 of pages allocated for PTEs are
actually used. But this may be an acceptable trade-off to achieve the
high performance we have with big PAGE_SIZEs in some applications (e.g.
RAID).
Also with 256KB PAGE_SIZE we increase THREAD_SIZE up to 32KB to minimize
the risk of stack overflows in the cases of on-stack arrays, which size
depends on the page size (e.g. multipage BIOs, NTFS, etc.).
With 256KB PAGE_SIZE we need to decrease the PKMAP_ORDER at least down
to 9, otherwise all high memory (2 ^ 10 * PAGE_SIZE == 256MB) we'll be
occupied by PKMAP addresses leaving no place for vmalloc. We do not
separate PKMAP_ORDER for 256K from 16K/64K PAGE_SIZE here; actually that
value of 10 in support for 16K/64K had been selected rather intuitively.
Thus now for all cases of PAGE_SIZE on ppc44x (including the default, 4KB,
one) we have 512 pages for PKMAP.
Because ELF standard supports only page sizes up to 64K, then you should
use binutils later than 2.17.50.0.3 with '-zmax-page-size' set to 256K
for building applications, which are to be run with the 256KB-page sized
kernel. If using the older binutils, then you should patch them like follows:
--- binutils/bfd/elf32-ppc.c.orig
+++ binutils/bfd/elf32-ppc.c
-#define ELF_MAXPAGESIZE 0x10000
+#define ELF_MAXPAGESIZE 0x40000
One more restriction we currently have with 256KB page sizes is inability
to use shmem safely, so, for now, the 256KB is available only if you turn
the CONFIG_SHMEM option off (another variant is to use BROKEN).
Though, if you need shmem with 256KB pages, you can always remove the !SHMEM
dependency in 'config PPC_256K_PAGES', and use the workaround available here:
http://lkml.org/lkml/2008/12/19/20
Signed-off-by: Yuri Tikhonov <yur@emcraft.com>
Signed-off-by: Ilya Yanok <yanok@emcraft.com>
Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
2009-01-29 04:40:44 +03:00
|
|
|
#define PKMAP_ORDER 9
|
2008-12-11 04:55:41 +03:00
|
|
|
#endif
|
|
|
|
#define LAST_PKMAP (1 << PKMAP_ORDER)
|
|
|
|
#ifndef CONFIG_PPC_4K_PAGES
|
|
|
|
#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
|
|
|
|
#else
|
2008-04-23 17:05:20 +04:00
|
|
|
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
|
2008-12-11 04:55:41 +03:00
|
|
|
#endif
|
|
|
|
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
2007-08-20 17:50:28 +04:00
|
|
|
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
|
|
|
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
|
|
|
|
|
|
|
extern void *kmap_high(struct page *page);
|
|
|
|
extern void kunmap_high(struct page *page);
|
|
|
|
|
|
|
|
static inline void *kmap(struct page *page)
|
|
|
|
{
|
|
|
|
might_sleep();
|
|
|
|
if (!PageHighMem(page))
|
|
|
|
return page_address(page);
|
|
|
|
return kmap_high(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kunmap(struct page *page)
|
|
|
|
{
|
|
|
|
BUG_ON(in_interrupt());
|
|
|
|
if (!PageHighMem(page))
|
|
|
|
return;
|
|
|
|
kunmap_high(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
|
|
|
|
* gives a more generic (and caching) interface. But kmap_atomic can
|
|
|
|
* be used in IRQ contexts, so in some (very limited) cases we need
|
|
|
|
* it.
|
|
|
|
*/
|
2008-04-23 17:05:20 +04:00
|
|
|
static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
|
2007-08-20 17:50:28 +04:00
|
|
|
{
|
|
|
|
unsigned int idx;
|
|
|
|
unsigned long vaddr;
|
|
|
|
|
|
|
|
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
|
|
pagefault_disable();
|
|
|
|
if (!PageHighMem(page))
|
|
|
|
return page_address(page);
|
|
|
|
|
|
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
2008-04-23 17:05:20 +04:00
|
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
|
|
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
2007-08-20 17:50:28 +04:00
|
|
|
#endif
|
powerpc/mm: Rework I$/D$ coherency (v3)
This patch reworks the way we do I and D cache coherency on PowerPC.
The "old" way was split in 3 different parts depending on the processor type:
- Hash with per-page exec support (64-bit and >= POWER4 only) does it
at hashing time, by preventing exec on unclean pages and cleaning pages
on exec faults.
- Everything without per-page exec support (32-bit hash, 8xx, and
64-bit < POWER4) does it for all page going to user space in update_mmu_cache().
- Embedded with per-page exec support does it from do_page_fault() on
exec faults, in a way similar to what the hash code does.
That leads to confusion, and bugs. For example, the method using update_mmu_cache()
is racy on SMP where another processor can see the new PTE and hash it in before
we have cleaned the cache, and then blow trying to execute. This is hard to hit but
I think it has bitten us in the past.
Also, it's inefficient for embedded where we always end up having to do at least
one more page fault.
This reworks the whole thing by moving the cache sync into two main call sites,
though we keep different behaviours depending on the HW capability. The call
sites are set_pte_at() which is now made out of line, and ptep_set_access_flags()
which joins the former in pgtable.c
The base idea for Embedded with per-page exec support, is that we now do the
flush at set_pte_at() time when coming from an exec fault, which allows us
to avoid the double fault problem completely (we can even improve the situation
more by implementing TLB preload in update_mmu_cache() but that's for later).
If for some reason we didn't do it there and we try to execute, we'll hit
the page fault, which will do a minor fault, which will hit ptep_set_access_flags()
to do things like update _PAGE_ACCESSED or _PAGE_DIRTY if needed, we just make
this guys also perform the I/D cache sync for exec faults now. This second path
is the catch all for things that weren't cleaned at set_pte_at() time.
For cpus without per-pag exec support, we always do the sync at set_pte_at(),
thus guaranteeing that when the PTE is visible to other processors, the cache
is clean.
For the 64-bit hash with per-page exec support case, we keep the old mechanism
for now. I'll look into changing it later, once I've reworked a bit how we
use _PAGE_EXEC.
This is also a first step for adding _PAGE_EXEC support for embedded platforms
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2009-02-10 19:02:37 +03:00
|
|
|
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
|
2008-12-18 22:13:38 +03:00
|
|
|
local_flush_tlb_page(NULL, vaddr);
|
2007-08-20 17:50:28 +04:00
|
|
|
|
|
|
|
return (void*) vaddr;
|
|
|
|
}
|
|
|
|
|
2008-04-23 17:05:20 +04:00
|
|
|
static inline void *kmap_atomic(struct page *page, enum km_type type)
|
|
|
|
{
|
|
|
|
return kmap_atomic_prot(page, type, kmap_prot);
|
|
|
|
}
|
|
|
|
|
2007-08-20 17:50:28 +04:00
|
|
|
static inline void kunmap_atomic(void *kvaddr, enum km_type type)
|
|
|
|
{
|
2008-04-23 17:05:20 +04:00
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
2007-08-20 17:50:28 +04:00
|
|
|
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
2008-04-23 17:05:20 +04:00
|
|
|
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
|
2007-08-20 17:50:28 +04:00
|
|
|
|
2008-04-23 17:05:20 +04:00
|
|
|
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
|
2007-08-20 17:50:28 +04:00
|
|
|
pagefault_enable();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-04-23 17:05:20 +04:00
|
|
|
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
2007-08-20 17:50:28 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* force other mappings to Oops if they'll try to access
|
|
|
|
* this pte without first remap it
|
|
|
|
*/
|
2008-04-23 17:05:20 +04:00
|
|
|
pte_clear(&init_mm, vaddr, kmap_pte-idx);
|
2008-12-18 22:13:38 +03:00
|
|
|
local_flush_tlb_page(NULL, vaddr);
|
2007-08-20 17:50:28 +04:00
|
|
|
#endif
|
|
|
|
pagefault_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct page *kmap_atomic_to_page(void *ptr)
|
|
|
|
{
|
|
|
|
unsigned long idx, vaddr = (unsigned long) ptr;
|
2008-04-23 17:05:20 +04:00
|
|
|
pte_t *pte;
|
2007-08-20 17:50:28 +04:00
|
|
|
|
2008-04-23 17:05:20 +04:00
|
|
|
if (vaddr < FIXADDR_START)
|
2007-08-20 17:50:28 +04:00
|
|
|
return virt_to_page(ptr);
|
|
|
|
|
2008-04-23 17:05:20 +04:00
|
|
|
idx = virt_to_fix(vaddr);
|
|
|
|
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
|
|
|
|
return pte_page(*pte);
|
2007-08-20 17:50:28 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#define flush_cache_kmaps() flush_cache_all()
|
|
|
|
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_HIGHMEM_H */
|