2005-11-11 06:25:24 +03:00
|
|
|
#ifndef _ASM_POWERPC_PAGE_H
|
|
|
|
#define _ASM_POWERPC_PAGE_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2001,2005 IBM Corporation.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/asm-compat.h>
|
2006-05-17 12:00:49 +04:00
|
|
|
#include <asm/kdump.h>
|
2008-04-21 22:22:34 +04:00
|
|
|
#include <asm/types.h>
|
2005-11-11 06:25:24 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
|
|
|
|
* page size. When using 64K pages however, whether we are really supporting
|
|
|
|
* 64K pages in HW or not is irrelevant to those definitions.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
#define PAGE_SHIFT 16
|
|
|
|
#else
|
|
|
|
#define PAGE_SHIFT 12
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
|
|
|
|
|
|
|
|
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
|
|
|
|
#define __HAVE_ARCH_GATE_AREA 1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
|
|
|
|
* assign PAGE_MASK to a larger type it gets extended the way we want
|
|
|
|
* (i.e. with 1s in the high bits)
|
|
|
|
*/
|
|
|
|
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
|
|
|
|
|
2005-12-05 19:24:33 +03:00
|
|
|
/*
|
|
|
|
* KERNELBASE is the virtual address of the start of the kernel, it's often
|
|
|
|
* the same as PAGE_OFFSET, but _might not be_.
|
|
|
|
*
|
|
|
|
* The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
|
|
|
|
*
|
2008-04-21 22:22:34 +04:00
|
|
|
* PAGE_OFFSET is the virtual address of the start of lowmem.
|
|
|
|
*
|
|
|
|
* PHYSICAL_START is the physical address of the start of the kernel.
|
|
|
|
*
|
|
|
|
* MEMORY_START is the physical address of the start of lowmem.
|
|
|
|
*
|
|
|
|
* KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
|
|
|
|
* ppc32 and based on how they are set we determine MEMORY_START.
|
|
|
|
*
|
|
|
|
* For the linear mapping the following equation should be true:
|
|
|
|
* KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
|
|
|
|
*
|
|
|
|
* Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
|
|
|
|
*
|
|
|
|
* There are two was to determine a physical address from a virtual one:
|
|
|
|
* va = pa + PAGE_OFFSET - MEMORY_START
|
|
|
|
* va = pa + KERNELBASE - PHYSICAL_START
|
2005-12-05 19:24:33 +03:00
|
|
|
*
|
|
|
|
* If you want to know something's offset from the start of the kernel you
|
|
|
|
* should subtract KERNELBASE.
|
|
|
|
*
|
|
|
|
* If you want to test if something's a kernel address, use is_kernel_addr().
|
|
|
|
*/
|
2005-12-04 10:39:23 +03:00
|
|
|
|
2008-04-21 22:22:34 +04:00
|
|
|
#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
|
|
|
|
#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
|
|
|
|
#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
|
|
|
|
|
powerpc: Make the 64-bit kernel as a position-independent executable
This implements CONFIG_RELOCATABLE for 64-bit by making the kernel as
a position-independent executable (PIE) when it is set. This involves
processing the dynamic relocations in the image in the early stages of
booting, even if the kernel is being run at the address it is linked at,
since the linker does not necessarily fill in words in the image for
which there are dynamic relocations. (In fact the linker does fill in
such words for 64-bit executables, though not for 32-bit executables,
so in principle we could avoid calling relocate() entirely when we're
running a 64-bit kernel at the linked address.)
The dynamic relocations are processed by a new function relocate(addr),
where the addr parameter is the virtual address where the image will be
run. In fact we call it twice; once before calling prom_init, and again
when starting the main kernel. This means that reloc_offset() returns
0 in prom_init (since it has been relocated to the address it is running
at), which necessitated a few adjustments.
This also changes __va and __pa to use an equivalent definition that is
simpler. With the relocatable kernel, PAGE_OFFSET and MEMORY_START are
constants (for 64-bit) whereas PHYSICAL_START is a variable (and
KERNELBASE ideally should be too, but isn't yet).
With this, relocatable kernels still copy themselves down to physical
address 0 and run there.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-08-30 05:43:47 +04:00
|
|
|
#if defined(CONFIG_RELOCATABLE)
|
2008-04-21 22:22:34 +04:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
extern phys_addr_t memstart_addr;
|
|
|
|
extern phys_addr_t kernstart_addr;
|
|
|
|
#endif
|
|
|
|
#define PHYSICAL_START kernstart_addr
|
|
|
|
#else
|
|
|
|
#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
|
powerpc: Make the 64-bit kernel as a position-independent executable
This implements CONFIG_RELOCATABLE for 64-bit by making the kernel as
a position-independent executable (PIE) when it is set. This involves
processing the dynamic relocations in the image in the early stages of
booting, even if the kernel is being run at the address it is linked at,
since the linker does not necessarily fill in words in the image for
which there are dynamic relocations. (In fact the linker does fill in
such words for 64-bit executables, though not for 32-bit executables,
so in principle we could avoid calling relocate() entirely when we're
running a 64-bit kernel at the linked address.)
The dynamic relocations are processed by a new function relocate(addr),
where the addr parameter is the virtual address where the image will be
run. In fact we call it twice; once before calling prom_init, and again
when starting the main kernel. This means that reloc_offset() returns
0 in prom_init (since it has been relocated to the address it is running
at), which necessitated a few adjustments.
This also changes __va and __pa to use an equivalent definition that is
simpler. With the relocatable kernel, PAGE_OFFSET and MEMORY_START are
constants (for 64-bit) whereas PHYSICAL_START is a variable (and
KERNELBASE ideally should be too, but isn't yet).
With this, relocatable kernels still copy themselves down to physical
address 0 and run there.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-08-30 05:43:47 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#define MEMORY_START 0UL
|
|
|
|
#elif defined(CONFIG_RELOCATABLE)
|
|
|
|
#define MEMORY_START memstart_addr
|
|
|
|
#else
|
2008-04-21 22:22:34 +04:00
|
|
|
#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
|
|
|
|
#endif
|
2005-11-11 06:25:24 +03:00
|
|
|
|
|
|
|
#ifdef CONFIG_FLATMEM
|
2008-04-21 22:22:34 +04:00
|
|
|
#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
|
|
|
|
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
|
2005-11-11 06:25:24 +03:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
|
|
|
|
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
|
|
|
|
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
|
|
|
|
|
powerpc: Make the 64-bit kernel as a position-independent executable
This implements CONFIG_RELOCATABLE for 64-bit by making the kernel as
a position-independent executable (PIE) when it is set. This involves
processing the dynamic relocations in the image in the early stages of
booting, even if the kernel is being run at the address it is linked at,
since the linker does not necessarily fill in words in the image for
which there are dynamic relocations. (In fact the linker does fill in
such words for 64-bit executables, though not for 32-bit executables,
so in principle we could avoid calling relocate() entirely when we're
running a 64-bit kernel at the linked address.)
The dynamic relocations are processed by a new function relocate(addr),
where the addr parameter is the virtual address where the image will be
run. In fact we call it twice; once before calling prom_init, and again
when starting the main kernel. This means that reloc_offset() returns
0 in prom_init (since it has been relocated to the address it is running
at), which necessitated a few adjustments.
This also changes __va and __pa to use an equivalent definition that is
simpler. With the relocatable kernel, PAGE_OFFSET and MEMORY_START are
constants (for 64-bit) whereas PHYSICAL_START is a variable (and
KERNELBASE ideally should be too, but isn't yet).
With this, relocatable kernels still copy themselves down to physical
address 0 and run there.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-08-30 05:43:47 +04:00
|
|
|
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - MEMORY_START))
|
|
|
|
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
|
2005-11-11 06:25:24 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
|
|
|
|
* and needs to be executable. This means the whole heap ends
|
|
|
|
* up being executable.
|
|
|
|
*/
|
|
|
|
#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
|
|
|
|
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
|
|
|
|
|
|
#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
|
|
|
|
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
|
|
|
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
#include <asm/page_64.h>
|
|
|
|
#else
|
|
|
|
#include <asm/page_32.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* align addr on a size boundary - adjust address up/down if needed */
|
|
|
|
#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
|
|
|
|
#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
|
|
|
|
|
|
|
|
/* align addr on a size boundary - adjust address up if needed */
|
|
|
|
#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
|
|
|
|
|
2005-12-04 10:39:15 +03:00
|
|
|
/*
|
|
|
|
* Don't compare things with KERNELBASE or PAGE_OFFSET to test for
|
|
|
|
* "kernelness", use is_kernel_addr() - it should do what you want.
|
|
|
|
*/
|
|
|
|
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
|
|
|
|
|
2005-11-11 06:25:24 +03:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
#undef STRICT_MM_TYPECHECKS
|
|
|
|
|
|
|
|
#ifdef STRICT_MM_TYPECHECKS
|
|
|
|
/* These are used to make use of C type-checking. */
|
|
|
|
|
|
|
|
/* PTE level */
|
|
|
|
typedef struct { pte_basic_t pte; } pte_t;
|
|
|
|
#define pte_val(x) ((x).pte)
|
|
|
|
#define __pte(x) ((pte_t) { (x) })
|
|
|
|
|
|
|
|
/* 64k pages additionally define a bigger "real PTE" type that gathers
|
|
|
|
* the "second half" part of the PTE for pseudo 64k pages
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
|
|
|
|
#else
|
|
|
|
typedef struct { pte_t pte; } real_pte_t;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* PMD level */
|
2007-05-08 06:46:49 +04:00
|
|
|
#ifdef CONFIG_PPC64
|
2005-11-11 06:25:24 +03:00
|
|
|
typedef struct { unsigned long pmd; } pmd_t;
|
|
|
|
#define pmd_val(x) ((x).pmd)
|
|
|
|
#define __pmd(x) ((pmd_t) { (x) })
|
|
|
|
|
|
|
|
/* PUD level exusts only on 4k pages */
|
2007-05-08 06:46:49 +04:00
|
|
|
#ifndef CONFIG_PPC_64K_PAGES
|
2005-11-11 06:25:24 +03:00
|
|
|
typedef struct { unsigned long pud; } pud_t;
|
|
|
|
#define pud_val(x) ((x).pud)
|
|
|
|
#define __pud(x) ((pud_t) { (x) })
|
2007-05-08 06:46:49 +04:00
|
|
|
#endif /* !CONFIG_PPC_64K_PAGES */
|
|
|
|
#endif /* CONFIG_PPC64 */
|
2005-11-11 06:25:24 +03:00
|
|
|
|
|
|
|
/* PGD level */
|
|
|
|
typedef struct { unsigned long pgd; } pgd_t;
|
|
|
|
#define pgd_val(x) ((x).pgd)
|
|
|
|
#define __pgd(x) ((pgd_t) { (x) })
|
|
|
|
|
|
|
|
/* Page protection bits */
|
|
|
|
typedef struct { unsigned long pgprot; } pgprot_t;
|
|
|
|
#define pgprot_val(x) ((x).pgprot)
|
|
|
|
#define __pgprot(x) ((pgprot_t) { (x) })
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
/*
|
|
|
|
* .. while these make it easier on the compiler
|
|
|
|
*/
|
|
|
|
|
|
|
|
typedef pte_basic_t pte_t;
|
|
|
|
#define pte_val(x) (x)
|
|
|
|
#define __pte(x) (x)
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
|
|
|
|
#else
|
|
|
|
typedef unsigned long real_pte_t;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2007-05-08 06:46:49 +04:00
|
|
|
#ifdef CONFIG_PPC64
|
2005-11-11 06:25:24 +03:00
|
|
|
typedef unsigned long pmd_t;
|
|
|
|
#define pmd_val(x) (x)
|
|
|
|
#define __pmd(x) (x)
|
|
|
|
|
2007-05-08 06:46:49 +04:00
|
|
|
#ifndef CONFIG_PPC_64K_PAGES
|
2005-11-11 06:25:24 +03:00
|
|
|
typedef unsigned long pud_t;
|
|
|
|
#define pud_val(x) (x)
|
|
|
|
#define __pud(x) (x)
|
2007-05-08 06:46:49 +04:00
|
|
|
#endif /* !CONFIG_PPC_64K_PAGES */
|
|
|
|
#endif /* CONFIG_PPC64 */
|
2005-11-11 06:25:24 +03:00
|
|
|
|
|
|
|
typedef unsigned long pgd_t;
|
|
|
|
#define pgd_val(x) (x)
|
|
|
|
#define pgprot_val(x) (x)
|
|
|
|
|
|
|
|
typedef unsigned long pgprot_t;
|
|
|
|
#define __pgd(x) (x)
|
|
|
|
#define __pgprot(x) (x)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct page;
|
|
|
|
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
|
|
|
|
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
|
|
|
|
struct page *p);
|
|
|
|
extern int page_is_ram(unsigned long pfn);
|
|
|
|
|
2006-05-30 07:51:37 +04:00
|
|
|
struct vm_area_struct;
|
|
|
|
|
2008-02-08 15:22:04 +03:00
|
|
|
typedef struct page *pgtable_t;
|
|
|
|
|
2006-03-27 13:15:35 +04:00
|
|
|
#include <asm-generic/memory_model.h>
|
2005-11-11 06:25:24 +03:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_POWERPC_PAGE_H */
|