mm/thp/migration: switch from flush_tlb_range to flush_pmd_tlb_range
We remove one instace of flush_tlb_range here. That was added by commit
f714f4f20e
("mm: numa: call MMU notifiers on THP migration"). But the
pmdp_huge_clear_flush_notify should have done the require flush for us.
Hence remove the extra flush.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
bcf6691797
Коммит
458aa76d13
|
@ -783,6 +783,23 @@ static inline int pmd_clear_huge(pmd_t *pmd)
|
|||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
||||
|
||||
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/*
|
||||
* ARCHes with special requirements for evicting THP backing TLB entries can
|
||||
* implement this. Otherwise also, it can help optimize normal TLB flush in
|
||||
* THP regime. stock flush_tlb_range() typically has optimization to nuke the
|
||||
* entire TLB TLB if flush span is greater than a threshold, which will
|
||||
* likely be true for a single huge page. Thus a single thp flush will
|
||||
* invalidate the entire TLB which is not desitable.
|
||||
* e.g. see arch/arc: flush_pmd_tlb_range
|
||||
*/
|
||||
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
|
||||
#else
|
||||
#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifndef io_remap_pfn_range
|
||||
|
|
|
@ -1773,7 +1773,10 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|||
put_page(new_page);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* We are not sure a pending tlb flush here is for a huge page
|
||||
* mapping or not. Hence use the tlb range variant
|
||||
*/
|
||||
if (mm_tlb_flush_pending(mm))
|
||||
flush_tlb_range(vma, mmun_start, mmun_end);
|
||||
|
||||
|
@ -1829,12 +1832,11 @@ fail_putback:
|
|||
page_add_anon_rmap(new_page, vma, mmun_start, true);
|
||||
pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
|
||||
set_pmd_at(mm, mmun_start, pmd, entry);
|
||||
flush_tlb_range(vma, mmun_start, mmun_end);
|
||||
update_mmu_cache_pmd(vma, address, &entry);
|
||||
|
||||
if (page_count(page) != 2) {
|
||||
set_pmd_at(mm, mmun_start, pmd, orig_entry);
|
||||
flush_tlb_range(vma, mmun_start, mmun_end);
|
||||
flush_pmd_tlb_range(vma, mmun_start, mmun_end);
|
||||
mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
|
||||
update_mmu_cache_pmd(vma, address, &entry);
|
||||
page_remove_rmap(new_page, true);
|
||||
|
|
|
@ -84,20 +84,6 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
|
|||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
||||
#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
||||
|
||||
/*
|
||||
* ARCHes with special requirements for evicting THP backing TLB entries can
|
||||
* implement this. Otherwise also, it can help optimize normal TLB flush in
|
||||
* THP regime. stock flush_tlb_range() typically has optimization to nuke the
|
||||
* entire TLB if flush span is greater than a threshold, which will
|
||||
* likely be true for a single huge page. Thus a single thp flush will
|
||||
* invalidate the entire TLB which is not desirable.
|
||||
* e.g. see arch/arc: flush_pmd_tlb_range
|
||||
*/
|
||||
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
|
||||
int pmdp_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp,
|
||||
|
|
Загрузка…
Ссылка в новой задаче