[PATCH] hugepage: is_aligned_hugepage_range() cleanup
Quite a long time back, prepare_hugepage_range() replaced is_aligned_hugepage_range() as the callback from mm/mmap.c to arch code to verify if an address range is suitable for a hugepage mapping. is_aligned_hugepage_range() stuck around, but only to implement prepare_hugepage_range() on archs which didn't implement their own. Most archs (everything except ia64 and powerpc) used the same implementation of is_aligned_hugepage_range(). On powerpc, which implements its own prepare_hugepage_range(), the custom version was never used. In addition, "is_aligned_hugepage_range()" was a bad name, because it suggests it returns true iff the given range is a good hugepage range, whereas in fact it returns 0-or-error (so the sense is reversed). This patch cleans up by abolishing is_aligned_hugepage_range(). Instead prepare_hugepage_range() is defined directly. Most archs use the default version, which simply checks the given region is aligned to the size of a hugepage. ia64 and powerpc define custom versions. The ia64 one simply checks that the range is in the correct address space region in addition to being suitably aligned. The powerpc version (just as previously) checks for suitable addresses, and if necessary performs low-level MMU frobbing to set up new areas for use by hugepages. No libhugetlbfs testsuite regressions on ppc64 (POWER5 LPAR). Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Zhang Yanmin <yanmin.zhang@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
3915bcf38f
Коммит
42b88befd6
|
@ -48,18 +48,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||||
return (pte_t *) pmd;
|
return (pte_t *) pmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This function checks for proper alignment of input addr and len parameters.
|
|
||||||
*/
|
|
||||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
|
||||||
{
|
|
||||||
if (len & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
if (addr & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0 /* This is just for testing */
|
#if 0 /* This is just for testing */
|
||||||
struct page *
|
struct page *
|
||||||
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
||||||
|
|
|
@ -68,9 +68,10 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
|
||||||
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
|
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function checks for proper alignment of input addr and len parameters.
|
* Don't actually need to do any preparation, but need to make sure
|
||||||
|
* the address is in the right region.
|
||||||
*/
|
*/
|
||||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||||
{
|
{
|
||||||
if (len & ~HPAGE_MASK)
|
if (len & ~HPAGE_MASK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -133,21 +133,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||||
return __pte(old);
|
return __pte(old);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This function checks for proper alignment of input addr and len parameters.
|
|
||||||
*/
|
|
||||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
|
||||||
{
|
|
||||||
if (len & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
if (addr & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
if (! (within_hugepage_low_range(addr, len)
|
|
||||||
|| within_hugepage_high_range(addr, len)) )
|
|
||||||
return -EINVAL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct slb_flush_info {
|
struct slb_flush_info {
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
u16 newareas;
|
u16 newareas;
|
||||||
|
|
|
@ -84,18 +84,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This function checks for proper alignment of input addr and len parameters.
|
|
||||||
*/
|
|
||||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
|
||||||
{
|
|
||||||
if (len & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
if (addr & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct page *follow_huge_addr(struct mm_struct *mm,
|
struct page *follow_huge_addr(struct mm_struct *mm,
|
||||||
unsigned long address, int write)
|
unsigned long address, int write)
|
||||||
{
|
{
|
||||||
|
|
|
@ -84,18 +84,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This function checks for proper alignment of input addr and len parameters.
|
|
||||||
*/
|
|
||||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
|
||||||
{
|
|
||||||
if (len & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
if (addr & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct page *follow_huge_addr(struct mm_struct *mm,
|
struct page *follow_huge_addr(struct mm_struct *mm,
|
||||||
unsigned long address, int write)
|
unsigned long address, int write)
|
||||||
{
|
{
|
||||||
|
|
|
@ -263,18 +263,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This function checks for proper alignment of input addr and len parameters.
|
|
||||||
*/
|
|
||||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
|
|
||||||
{
|
|
||||||
if (len & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
if (addr & ~HPAGE_MASK)
|
|
||||||
return -EINVAL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct page *follow_huge_addr(struct mm_struct *mm,
|
struct page *follow_huge_addr(struct mm_struct *mm,
|
||||||
unsigned long address, int write)
|
unsigned long address, int write)
|
||||||
{
|
{
|
||||||
|
|
|
@ -57,6 +57,7 @@
|
||||||
|
|
||||||
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
||||||
# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
|
# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
|
||||||
|
# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
|
||||||
# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
|
# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
|
||||||
#endif /* CONFIG_HUGETLB_PAGE */
|
#endif /* CONFIG_HUGETLB_PAGE */
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,6 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
||||||
int write);
|
int write);
|
||||||
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
||||||
pmd_t *pmd, int write);
|
pmd_t *pmd, int write);
|
||||||
int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
|
|
||||||
int pmd_huge(pmd_t pmd);
|
int pmd_huge(pmd_t pmd);
|
||||||
void hugetlb_change_protection(struct vm_area_struct *vma,
|
void hugetlb_change_protection(struct vm_area_struct *vma,
|
||||||
unsigned long address, unsigned long end, pgprot_t newprot);
|
unsigned long address, unsigned long end, pgprot_t newprot);
|
||||||
|
@ -54,8 +53,18 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
|
#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
|
||||||
#define prepare_hugepage_range(addr, len) \
|
/*
|
||||||
is_aligned_hugepage_range(addr, len)
|
* If the arch doesn't supply something else, assume that hugepage
|
||||||
|
* size aligned regions are ok without further preparation.
|
||||||
|
*/
|
||||||
|
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||||
|
{
|
||||||
|
if (len & ~HPAGE_MASK)
|
||||||
|
return -EINVAL;
|
||||||
|
if (addr & ~HPAGE_MASK)
|
||||||
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
int prepare_hugepage_range(unsigned long addr, unsigned long len);
|
int prepare_hugepage_range(unsigned long addr, unsigned long len);
|
||||||
#endif
|
#endif
|
||||||
|
@ -95,7 +104,6 @@ static inline unsigned long hugetlb_total_pages(void)
|
||||||
#define hugetlb_report_meminfo(buf) 0
|
#define hugetlb_report_meminfo(buf) 0
|
||||||
#define hugetlb_report_node_meminfo(n, buf) 0
|
#define hugetlb_report_node_meminfo(n, buf) 0
|
||||||
#define follow_huge_pmd(mm, addr, pmd, write) NULL
|
#define follow_huge_pmd(mm, addr, pmd, write) NULL
|
||||||
#define is_aligned_hugepage_range(addr, len) 0
|
|
||||||
#define prepare_hugepage_range(addr, len) (-EINVAL)
|
#define prepare_hugepage_range(addr, len) (-EINVAL)
|
||||||
#define pmd_huge(x) 0
|
#define pmd_huge(x) 0
|
||||||
#define is_hugepage_only_range(mm, addr, len) 0
|
#define is_hugepage_only_range(mm, addr, len) 0
|
||||||
|
|
Загрузка…
Ссылка в новой задаче