[PATCH] hugetlb: prepare_hugepage_range check offset too
(David:) If hugetlbfs_file_mmap() returns a failure to do_mmap_pgoff() - for example, because the given file offset is not hugepage aligned - then do_mmap_pgoff will go to the unmap_and_free_vma backout path. But at this stage the vma hasn't been marked as hugepage, and the backout path will call unmap_region() on it. That will eventually call down to the non-hugepage version of unmap_page_range(). On ppc64, at least, that will cause serious problems if there are any existing hugepage pagetable entries in the vicinity - for example if there are any other hugepage mappings under the same PUD. unmap_page_range() will trigger a bad_pud() on the hugepage pud entries. I suspect this will also cause bad problems on ia64, though I don't have a machine to test it on. (Hugh:) prepare_hugepage_range() should check file offset alignment when it checks virtual address and length, to stop MAP_FIXED with a bad huge offset from unmapping before it fails further down. PowerPC should apply the same prepare_hugepage_range alignment checks as ia64 and all the others do. Then none of the alignment checks in hugetlbfs_file_mmap are required (nor is the check for too small a mapping); but even so, move up setting of VM_HUGETLB and add a comment to warn of what David Gibson discovered - if hugetlbfs_file_mmap fails before setting it, do_mmap_pgoff's unmap_region when unwinding from error will go the non-huge way, which may cause bad behaviour on architectures (powerpc and ia64) which segregate their huge mappings into a separate region of the address space. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Acked-by: Adam Litke <agl@us.ibm.com> Acked-by: David Gibson <david@gibson.dropbear.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
69ae9e3ee4
Коммит
68589bc353
|
@ -70,8 +70,10 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
|
|||
* Don't actually need to do any preparation, but need to make sure
|
||||
* the address is in the right region.
|
||||
*/
|
||||
int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||
int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
|
||||
{
|
||||
if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
if (len & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (addr & ~HPAGE_MASK)
|
||||
|
|
|
@ -491,11 +491,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||
int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if ( (addr+len) < addr )
|
||||
if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
if (len & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (addr & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (addr < 0x100000000UL)
|
||||
|
|
|
@ -62,24 +62,19 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
|||
loff_t len, vma_len;
|
||||
int ret;
|
||||
|
||||
if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (vma->vm_start & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (vma->vm_end & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* vma alignment has already been checked by prepare_hugepage_range.
|
||||
* If you add any error returns here, do so after setting VM_HUGETLB,
|
||||
* so is_vm_hugetlb_page tests below unmap_region go the right way
|
||||
* when do_mmap_pgoff unwinds (may be important on powerpc and ia64).
|
||||
*/
|
||||
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
|
||||
vma->vm_ops = &hugetlb_vm_ops;
|
||||
|
||||
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
file_accessed(file);
|
||||
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
|
||||
vma->vm_ops = &hugetlb_vm_ops;
|
||||
|
||||
ret = -ENOMEM;
|
||||
len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
|
||||
|
|
|
@ -60,8 +60,11 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
|
|||
* If the arch doesn't supply something else, assume that hugepage
|
||||
* size aligned regions are ok without further preparation.
|
||||
*/
|
||||
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||
static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
|
||||
pgoff_t pgoff)
|
||||
{
|
||||
if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
if (len & ~HPAGE_MASK)
|
||||
return -EINVAL;
|
||||
if (addr & ~HPAGE_MASK)
|
||||
|
@ -69,7 +72,8 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
|||
return 0;
|
||||
}
|
||||
#else
|
||||
int prepare_hugepage_range(unsigned long addr, unsigned long len);
|
||||
int prepare_hugepage_range(unsigned long addr, unsigned long len,
|
||||
pgoff_t pgoff);
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
|
||||
|
@ -107,7 +111,7 @@ static inline unsigned long hugetlb_total_pages(void)
|
|||
#define hugetlb_report_meminfo(buf) 0
|
||||
#define hugetlb_report_node_meminfo(n, buf) 0
|
||||
#define follow_huge_pmd(mm, addr, pmd, write) NULL
|
||||
#define prepare_hugepage_range(addr, len) (-EINVAL)
|
||||
#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
|
||||
#define pmd_huge(x) 0
|
||||
#define is_hugepage_only_range(mm, addr, len) 0
|
||||
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
|
||||
|
|
|
@ -1379,7 +1379,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
|
|||
* Check if the given range is hugepage aligned, and
|
||||
* can be made suitable for hugepages.
|
||||
*/
|
||||
ret = prepare_hugepage_range(addr, len);
|
||||
ret = prepare_hugepage_range(addr, len, pgoff);
|
||||
} else {
|
||||
/*
|
||||
* Ensure that a normal request is not falling in a
|
||||
|
|
Загрузка…
Ссылка в новой задаче