mm/huge_memory.c: use zap_deposited_table() more
Depending on the flags of the PMD being zapped there may or may not be a deposited pgtable to be freed. In two of the three cases this is open coded while the third uses the zap_deposited_table() helper. This patch converts the others to use the helper to clean things up a bit. Link: http://lkml.kernel.org/r/20170411174233.21902-2-oohall@gmail.com Cc: Reza Arbab <arbab@linux.vnet.ibm.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: linux-nvdimm@ml01.01.org Cc: Oliver O'Halloran <oohall@gmail.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
bfe1c56645
Коммит
c14a6eb44d
|
@ -1615,8 +1615,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||
if (is_huge_zero_pmd(orig_pmd))
|
||||
tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
|
||||
} else if (is_huge_zero_pmd(orig_pmd)) {
|
||||
pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
|
||||
atomic_long_dec(&tlb->mm->nr_ptes);
|
||||
zap_deposited_table(tlb->mm, pmd);
|
||||
spin_unlock(ptl);
|
||||
tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
|
||||
} else {
|
||||
|
@ -1625,10 +1624,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
if (PageAnon(page)) {
|
||||
pgtable_t pgtable;
|
||||
pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
|
||||
pte_free(tlb->mm, pgtable);
|
||||
atomic_long_dec(&tlb->mm->nr_ptes);
|
||||
zap_deposited_table(tlb->mm, pmd);
|
||||
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
|
||||
} else {
|
||||
if (arch_needs_pgtable_deposit())
|
||||
|
|
Загрузка…
Ссылка в новой задаче