metag: hugetlb: convert to vm_unmapped_area()
Convert hugetlb_get_unmapped_area_new_pmd() to use vm_unmapped_area() rather than searching the virtual address space itself. This fixes the following errors in linux-next due to the specified members being removed after other architectures have already been converted: arch/metag/mm/hugetlbpage.c: In function 'hugetlb_get_unmapped_area_new_pmd': arch/metag/mm/hugetlbpage.c:199: error: 'struct mm_struct' has no member named 'cached_hole_size' arch/metag/mm/hugetlbpage.c:200: error: 'struct mm_struct' has no member named 'free_area_cache' arch/metag/mm/hugetlbpage.c:215: error: 'struct mm_struct' has no member named 'cached_hole_size' Signed-off-by: James Hogan <james.hogan@imgtec.com> Acked-by: Michel Lespinasse <walken@google.com>
This commit is contained in:
Родитель
c838e72a35
Коммит
f75c28d896
|
@ -192,43 +192,15 @@ new_search:
|
||||||
static unsigned long
|
static unsigned long
|
||||||
hugetlb_get_unmapped_area_new_pmd(unsigned long len)
|
hugetlb_get_unmapped_area_new_pmd(unsigned long len)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct vm_unmapped_area_info info;
|
||||||
struct vm_area_struct *vma;
|
|
||||||
unsigned long start_addr, addr;
|
|
||||||
|
|
||||||
if (ALIGN_HUGEPT(len) > mm->cached_hole_size)
|
info.flags = 0;
|
||||||
start_addr = mm->free_area_cache;
|
info.length = len;
|
||||||
else
|
info.low_limit = TASK_UNMAPPED_BASE;
|
||||||
start_addr = TASK_UNMAPPED_BASE;
|
info.high_limit = TASK_SIZE;
|
||||||
|
info.align_mask = PAGE_MASK & HUGEPT_MASK;
|
||||||
new_search:
|
info.align_offset = 0;
|
||||||
addr = ALIGN_HUGEPT(start_addr);
|
return vm_unmapped_area(&info);
|
||||||
|
|
||||||
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
|
|
||||||
if (TASK_SIZE - len < addr) {
|
|
||||||
/*
|
|
||||||
* Start a new search - just in case we missed
|
|
||||||
* some holes.
|
|
||||||
*/
|
|
||||||
if (start_addr != TASK_UNMAPPED_BASE) {
|
|
||||||
start_addr = TASK_UNMAPPED_BASE;
|
|
||||||
mm->cached_hole_size = 0;
|
|
||||||
goto new_search;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
/* skip ahead if we've aligned right over some vmas */
|
|
||||||
if (vma && vma->vm_end <= addr)
|
|
||||||
continue;
|
|
||||||
if (!vma || ALIGN_HUGEPT(addr + len) <= vma->vm_start) {
|
|
||||||
#if HPAGE_SHIFT < HUGEPT_SHIFT
|
|
||||||
if (len & HUGEPT_MASK)
|
|
||||||
mm->context.part_huge = addr + len;
|
|
||||||
#endif
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
addr = ALIGN_HUGEPT(vma->vm_end);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long
|
unsigned long
|
||||||
|
@ -266,11 +238,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
||||||
* Find an unmapped naturally aligned set of 4MB blocks that we can use
|
* Find an unmapped naturally aligned set of 4MB blocks that we can use
|
||||||
* for huge pages.
|
* for huge pages.
|
||||||
*/
|
*/
|
||||||
addr = hugetlb_get_unmapped_area_new_pmd(len);
|
return hugetlb_get_unmapped_area_new_pmd(len);
|
||||||
if (likely(addr))
|
|
||||||
return addr;
|
|
||||||
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
|
#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
|
||||||
|
|
Загрузка…
Ссылка в новой задаче