mm, thp: allow fallback when pte_alloc_one() fails for huge pmd
The transparent hugepages feature is careful to not invoke the oom killer when a hugepage cannot be allocated. pte_alloc_one() failing in __do_huge_pmd_anonymous_page(), however, currently results in VM_FAULT_OOM which invokes the pagefault oom killer to kill a memory-hogging task. This is unnecessary since it's possible to drop the reference to the hugepage and fallback to allocating a small page. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
aa2e878efa
Коммит
edad9d2c33
|
@ -640,11 +640,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
|
|||
|
||||
VM_BUG_ON(!PageCompound(page));
|
||||
pgtable = pte_alloc_one(mm, haddr);
|
||||
if (unlikely(!pgtable)) {
|
||||
mem_cgroup_uncharge_page(page);
|
||||
put_page(page);
|
||||
if (unlikely(!pgtable))
|
||||
return VM_FAULT_OOM;
|
||||
}
|
||||
|
||||
clear_huge_page(page, haddr, HPAGE_PMD_NR);
|
||||
__SetPageUptodate(page);
|
||||
|
@ -723,8 +720,14 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
put_page(page);
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
|
||||
page))) {
|
||||
mem_cgroup_uncharge_page(page);
|
||||
put_page(page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
|
||||
return 0;
|
||||
}
|
||||
out:
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче