[PATCH] Hugetlb: Reorganize hugetlb_fault to prepare for COW
This patch splits the "no_page()" type activity into its own function, hugetlb_no_page(). hugetlb_fault() becomes the entry point for hugetlb faults and delegates to the appropriate handler depending on the type of fault. Right now we still have only hugetlb_no_page() but a later patch introduces a COW fault. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: "Seth, Rohit" <rohit.seth@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
85ef47f74a
Коммит
86e5216f8d
34
mm/hugetlb.c
34
mm/hugetlb.c
|
@ -376,20 +376,15 @@ out:
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
unsigned long address, int write_access)
|
unsigned long address, pte_t *ptep)
|
||||||
{
|
{
|
||||||
int ret = VM_FAULT_SIGBUS;
|
int ret = VM_FAULT_SIGBUS;
|
||||||
unsigned long idx;
|
unsigned long idx;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
pte_t *pte;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
|
|
||||||
pte = huge_pte_alloc(mm, address);
|
|
||||||
if (!pte)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
mapping = vma->vm_file->f_mapping;
|
mapping = vma->vm_file->f_mapping;
|
||||||
idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
|
idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
|
||||||
+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
|
+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
|
||||||
|
@ -408,11 +403,11 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
goto backout;
|
goto backout;
|
||||||
|
|
||||||
ret = VM_FAULT_MINOR;
|
ret = VM_FAULT_MINOR;
|
||||||
if (!pte_none(*pte))
|
if (!pte_none(*ptep))
|
||||||
goto backout;
|
goto backout;
|
||||||
|
|
||||||
add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
|
add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
|
||||||
set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page));
|
set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, page));
|
||||||
spin_unlock(&mm->page_table_lock);
|
spin_unlock(&mm->page_table_lock);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
out:
|
out:
|
||||||
|
@ -426,6 +421,27 @@ backout:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
|
unsigned long address, int write_access)
|
||||||
|
{
|
||||||
|
pte_t *ptep;
|
||||||
|
pte_t entry;
|
||||||
|
|
||||||
|
ptep = huge_pte_alloc(mm, address);
|
||||||
|
if (!ptep)
|
||||||
|
return VM_FAULT_OOM;
|
||||||
|
|
||||||
|
entry = *ptep;
|
||||||
|
if (pte_none(entry))
|
||||||
|
return hugetlb_no_page(mm, vma, address, ptep);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We could get here if another thread instantiated the pte
|
||||||
|
* before the test above.
|
||||||
|
*/
|
||||||
|
return VM_FAULT_MINOR;
|
||||||
|
}
|
||||||
|
|
||||||
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
struct page **pages, struct vm_area_struct **vmas,
|
struct page **pages, struct vm_area_struct **vmas,
|
||||||
unsigned long *position, int *length, int i)
|
unsigned long *position, int *length, int i)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче