powerpc/mm/radix: Avoid flushing the PWC on every flush_tlb_range

We do that because it's used by THP pmd collapsing, so use
instead a dedicated flush function.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Benjamin Herrenschmidt 2017-07-19 14:49:06 +10:00 коммит произвёл Michael Ellerman
Родитель a46cc7a90f
Коммит 424de9c6e3
3 изменённых файлов: 43 добавлений и 6 удалений

Просмотреть файл

@ -36,6 +36,7 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
#define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
#endif
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size);
extern void radix__flush_tlb_lpid(unsigned long lpid);

Просмотреть файл

@ -804,9 +804,12 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
*/
pmd = *pmdp;
pmd_clear(pmdp);
/*FIXME!! Verify whether we need this kick below */
kick_all_cpus_sync();
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
return pmd;
}

Просмотреть файл

@ -272,11 +272,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
{
struct mm_struct *mm = vma->vm_mm;
/*
* This is currently used when collapsing THPs so we need to
* flush the PWC. We should fix this.
*/
radix__flush_all_mm(mm);
radix__flush_tlb_mm(mm);
}
EXPORT_SYMBOL(radix__flush_tlb_range);
@ -355,6 +351,43 @@ err_out:
preempt_enable();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
{
int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(mmu_virtual_psize);
unsigned long pid, end;
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
/* 4k page size, just blow the world */
if (PAGE_SIZE == 0x1000) {
radix__flush_all_mm(mm);
return;
}
/* Otherwise first do the PWC */
if (local)
_tlbiel_pid(pid, RIC_FLUSH_PWC);
else
_tlbie_pid(pid, RIC_FLUSH_PWC);
/* Then iterate the pages */
end = addr + HPAGE_PMD_SIZE;
for (; addr < end; addr += PAGE_SIZE) {
if (local)
_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
else
_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
}
no_context:
preempt_enable();
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size)
{