Pull rationalise-regions into release branch

This commit is contained in:
Tony Luck 2005-08-29 15:50:32 -07:00
Родитель bcdd3a9114 1b66776da7
Коммит 3290580285
8 изменённых файлов: 50 добавлений и 47 удалений

Просмотреть файл

@ -35,7 +35,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
return -ENOMEM;
#ifdef CONFIG_HUGETLB_PAGE
if (REGION_NUMBER(addr) == REGION_HPAGE)
if (REGION_NUMBER(addr) == RGN_HPAGE)
addr = 0;
#endif
if (!addr)

Просмотреть файл

@ -76,7 +76,7 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
if (REGION_NUMBER(addr) != REGION_HPAGE)
if (REGION_NUMBER(addr) != RGN_HPAGE)
return -EINVAL;
return 0;
@ -87,7 +87,7 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ
struct page *page;
pte_t *ptep;
if (REGION_NUMBER(addr) != REGION_HPAGE)
if (REGION_NUMBER(addr) != RGN_HPAGE)
return ERR_PTR(-EINVAL);
ptep = huge_pte_offset(mm, addr);
@ -142,8 +142,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
return -ENOMEM;
if (len & ~HPAGE_MASK)
return -EINVAL;
/* This code assumes that REGION_HPAGE != 0. */
if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
/* This code assumes that RGN_HPAGE != 0. */
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
addr = HPAGE_REGION_BASE;
else
addr = ALIGN(addr, HPAGE_SIZE);

Просмотреть файл

@ -23,7 +23,7 @@
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
#define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
/*
* The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but

Просмотреть файл

@ -19,6 +19,7 @@
#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
# include <asm/page.h>
# ifndef __ASSEMBLY__
#include <linux/compiler.h>
@ -122,7 +123,7 @@ reload_context (nv_mm_context_t context)
unsigned long rid_incr = 0;
unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
old_rr4 = ia64_get_rr(0x8000000000000000UL);
old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
rid = context << 3; /* make space for encoding the region number */
rid_incr = 1 << 8;
@ -134,6 +135,10 @@ reload_context (nv_mm_context_t context)
rr4 = rr0 + 4*rid_incr;
#ifdef CONFIG_HUGETLB_PAGE
rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
# if RGN_HPAGE != 4
# error "reload_context assumes RGN_HPAGE is 4"
# endif
#endif
ia64_set_rr(0x0000000000000000UL, rr0);

Просмотреть файл

@ -12,6 +12,19 @@
#include <asm/intrinsics.h>
#include <asm/types.h>
/*
* The top three bits of an IA64 address are its Region Number.
* Different regions are assigned to different purposes.
*/
#define RGN_SHIFT (61)
#define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
#define RGN_BITS (RGN_BASE(-1))
#define RGN_KERNEL 7 /* Identity mapped region */
#define RGN_UNCACHED 6 /* Identity mapped I/O region */
#define RGN_GATE 5 /* Gate page, Kernel text, etc */
#define RGN_HPAGE 4 /* For Huge TLB pages */
/*
* PAGE_SHIFT determines the actual kernel page size.
*/
@ -36,10 +49,9 @@
#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */
#ifdef CONFIG_HUGETLB_PAGE
# define REGION_HPAGE (4UL) /* note: this is hardcoded in reload_context()!*/
# define REGION_SHIFT 61
# define HPAGE_REGION_BASE (REGION_HPAGE << REGION_SHIFT)
# define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
# define HPAGE_SHIFT hpage_shift
# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
@ -130,16 +142,13 @@ typedef union ia64_va {
#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
#define REGION_SIZE REGION_NUMBER(1)
#define REGION_KERNEL 7
#ifdef CONFIG_HUGETLB_PAGE
# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
# define is_hugepage_only_range(mm, addr, len) \
(REGION_NUMBER(addr) == REGION_HPAGE && \
REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
(REGION_NUMBER(addr) == RGN_HPAGE && \
REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
extern unsigned int hpage_shift;
#endif
@ -197,7 +206,7 @@ get_order (unsigned long size)
# define __pgprot(x) (x)
#endif /* !STRICT_MM_TYPECHECKS */
#define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000)
#define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \

Просмотреть файл

@ -204,21 +204,18 @@ ia64_phys_addr_valid (unsigned long addr)
#define set_pte(ptep, pteval) (*(ptep) = (pteval))
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
#define RGN_SIZE (1UL << 61)
#define RGN_KERNEL 7
#define VMALLOC_START 0xa000000200000000UL
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
# define VMALLOC_END vmalloc_end
extern unsigned long vmalloc_end;
#else
# define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
#endif
/* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page

Просмотреть файл

@ -65,7 +65,6 @@
#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT)
#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT)
#define REGION_BITS 0xe000000000000000UL
/*
@ -79,38 +78,30 @@
#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT)
/*
* Base addresses for various address ranges.
*/
#define CACHED 0xe000000000000000UL
#define UNCACHED 0xc000000000000000UL
#define UNCACHED_PHYS 0x8000000000000000UL
/*
* Virtual Mode Local & Global MMR space.
*/
#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL
#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL
#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET)
#define LOCAL_MMR_SPACE (UNCACHED | LOCAL_MMR_OFFSET)
#define LOCAL_PHYS_MMR_SPACE (UNCACHED_PHYS | LOCAL_MMR_OFFSET)
#define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET)
#define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET)
#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL
#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL
#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET)
#define GLOBAL_MMR_SPACE (UNCACHED | GLOBAL_MMR_OFFSET)
#define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET)
/*
* Physical mode addresses
*/
#define GLOBAL_PHYS_MMR_SPACE (UNCACHED_PHYS | GLOBAL_MMR_OFFSET)
#define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET)
/*
* Clear region & AS bits.
*/
#define TO_PHYS_MASK (~(REGION_BITS | AS_MASK))
#define TO_PHYS_MASK (~(RGN_BITS | AS_MASK))
/*
@ -135,10 +126,10 @@
/*
* general address defines
*/
#define CAC_BASE (CACHED | AS_CAC_SPACE)
#define AMO_BASE (UNCACHED | AS_AMO_SPACE)
#define AMO_PHYS_BASE (UNCACHED_PHYS | AS_AMO_SPACE)
#define GET_BASE (CACHED | AS_GET_SPACE)
#define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE)
#define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE)
#define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE)
#define GET_BASE (PAGE_OFFSET | AS_GET_SPACE)
/*
* Convert Memory addresses between various addressing modes.
@ -183,8 +174,8 @@
/*
* Macros to test for address type.
*/
#define IS_AMO_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_BASE)
#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (REGION_BITS | AS_MASK)) == AMO_PHYS_BASE)
#define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE)
#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE)
/*
@ -199,7 +190,7 @@
#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \
((u64) (w) << TIO_SWIN_SIZE_BITS))
#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n))
#define TIO_IO_BASE(n) (UNCACHED | NASID_SPACE(n))
#define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n))
#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE)
#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS))

Просмотреть файл

@ -19,12 +19,13 @@
#include <asm/pal.h>
#include <asm/percpu.h>
#define GATE_ADDR __IA64_UL_CONST(0xa000000000000000)
#define GATE_ADDR RGN_BASE(RGN_GATE)
/*
* 0xa000000000000000+2*PERCPU_PAGE_SIZE
* - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
*/
#define KERNEL_START __IA64_UL_CONST(0xa000000100000000)
#define KERNEL_START (GATE_ADDR+0x100000000)
#define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
#ifndef __ASSEMBLY__