pagewalk: only split huge pages when necessary
Right now, if a mm_walk has either ->pte_entry or ->pmd_entry set, it will unconditionally split any transparent huge pages it runs in to. In practice, that means that anyone doing a cat /proc/$pid/smaps will unconditionally break down every huge page in the process and depend on khugepaged to re-collapse it later. This is fairly suboptimal. This patch changes that behavior. It teaches each ->pmd_entry handler (there are five) that they must break down the THPs themselves. Also, the _generic_ code will never break down a THP unless a ->pte_entry handler is actually set. This means that the ->pmd_entry handlers can now choose to deal with THPs without breaking them down. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: David Rientjes <rientjes@google.com> Reviewed-by: Eric B Munson <emunson@mgebm.net> Tested-by: Eric B Munson <emunson@mgebm.net> Cc: Michael J Wolf <mjwolf@us.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matt Mackall <mpm@selenic.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
278df9f451
Коммит
033193275b
|
@ -343,6 +343,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|||
struct page *page;
|
||||
int mapcount;
|
||||
|
||||
split_huge_page_pmd(walk->mm, pmd);
|
||||
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
||||
ptent = *pte;
|
||||
|
@ -467,6 +469,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
|
|||
spinlock_t *ptl;
|
||||
struct page *page;
|
||||
|
||||
split_huge_page_pmd(walk->mm, pmd);
|
||||
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
||||
ptent = *pte;
|
||||
|
@ -623,6 +627,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
|
|||
pte_t *pte;
|
||||
int err = 0;
|
||||
|
||||
split_huge_page_pmd(walk->mm, pmd);
|
||||
|
||||
/* find the first VMA at or above 'addr' */
|
||||
vma = find_vma(walk->mm, addr);
|
||||
for (; addr != end; addr += PAGE_SIZE) {
|
||||
|
|
|
@ -914,6 +914,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
|
|||
* @pgd_entry: if set, called for each non-empty PGD (top-level) entry
|
||||
* @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
|
||||
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
|
||||
* this handler is required to be able to handle
|
||||
* pmd_trans_huge() pmds. They may simply choose to
|
||||
* split_huge_page() instead of handling it explicitly.
|
||||
* @pte_entry: if set, called for each non-empty PTE (4th-level) entry
|
||||
* @pte_hole: if set, called for each hole at all levels
|
||||
* @hugetlb_entry: if set, called for each hugetlb entry
|
||||
|
|
|
@ -4763,7 +4763,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
|
|||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
VM_BUG_ON(pmd_trans_huge(*pmd));
|
||||
split_huge_page_pmd(walk->mm, pmd);
|
||||
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
for (; addr != end; pte++, addr += PAGE_SIZE)
|
||||
if (is_target_pte_for_mc(vma, addr, *pte, NULL))
|
||||
|
@ -4925,8 +4926,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
|
|||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
split_huge_page_pmd(walk->mm, pmd);
|
||||
retry:
|
||||
VM_BUG_ON(pmd_trans_huge(*pmd));
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
for (; addr != end; addr += PAGE_SIZE) {
|
||||
pte_t ptent = *(pte++);
|
||||
|
|
|
@ -33,19 +33,35 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
|
|||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
again:
|
||||
next = pmd_addr_end(addr, end);
|
||||
split_huge_page_pmd(walk->mm, pmd);
|
||||
if (pmd_none_or_clear_bad(pmd)) {
|
||||
if (pmd_none(*pmd)) {
|
||||
if (walk->pte_hole)
|
||||
err = walk->pte_hole(addr, next, walk);
|
||||
if (err)
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* This implies that each ->pmd_entry() handler
|
||||
* needs to know about pmd_trans_huge() pmds
|
||||
*/
|
||||
if (walk->pmd_entry)
|
||||
err = walk->pmd_entry(pmd, addr, next, walk);
|
||||
if (!err && walk->pte_entry)
|
||||
err = walk_pte_range(pmd, addr, next, walk);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Check this here so we only break down trans_huge
|
||||
* pages when we _need_ to
|
||||
*/
|
||||
if (!walk->pte_entry)
|
||||
continue;
|
||||
|
||||
split_huge_page_pmd(walk->mm, pmd);
|
||||
if (pmd_none_or_clear_bad(pmd))
|
||||
goto again;
|
||||
err = walk_pte_range(pmd, addr, next, walk);
|
||||
if (err)
|
||||
break;
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
|
|
Загрузка…
Ссылка в новой задаче