thp: setup huge zero page on non-write page fault
All code paths seems covered. Now we can map huge zero page on read page fault. We setup it in do_huge_pmd_anonymous_page() if area around fault address is suitable for THP and we've got read page fault. If we fail to setup huge zero page (ENOMEM) we fallback to handle_pte_fault() as we normally do in THP. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: "H. Peter Anvin" <hpa@linux.intel.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
c5a647d09f
Коммит
80371957f0
|
@ -733,6 +733,16 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
if (unlikely(khugepaged_enter(vma)))
|
if (unlikely(khugepaged_enter(vma)))
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
|
if (!(flags & FAULT_FLAG_WRITE)) {
|
||||||
|
pgtable_t pgtable;
|
||||||
|
pgtable = pte_alloc_one(mm, haddr);
|
||||||
|
if (unlikely(!pgtable))
|
||||||
|
return VM_FAULT_OOM;
|
||||||
|
spin_lock(&mm->page_table_lock);
|
||||||
|
set_huge_zero_page(pgtable, mm, vma, haddr, pmd);
|
||||||
|
spin_unlock(&mm->page_table_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
|
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
|
||||||
vma, haddr, numa_node_id(), 0);
|
vma, haddr, numa_node_id(), 0);
|
||||||
if (unlikely(!page)) {
|
if (unlikely(!page)) {
|
||||||
|
|
Загрузка…
Ссылка в новой задаче