thp: mprotect: pass vma down to page table walkers
Flushing the tlb for huge pmds requires the vma's anon_vma, so pass along the vma instead of the mm, we can always get the latter when we need it. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
c489f1257b
Коммит
b36f5b0710
|
@ -78,7 +78,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
||||||
pte_unmap_unlock(pte - 1, ptl);
|
pte_unmap_unlock(pte - 1, ptl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
|
static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||||
int dirty_accountable)
|
int dirty_accountable)
|
||||||
{
|
{
|
||||||
|
@ -88,14 +88,15 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
do {
|
do {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
split_huge_page_pmd(mm, pmd);
|
split_huge_page_pmd(vma->vm_mm, pmd);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
continue;
|
continue;
|
||||||
change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
|
change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
|
||||||
|
dirty_accountable);
|
||||||
} while (pmd++, addr = next, addr != end);
|
} while (pmd++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
||||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||||
int dirty_accountable)
|
int dirty_accountable)
|
||||||
{
|
{
|
||||||
|
@ -107,7 +108,8 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
||||||
next = pud_addr_end(addr, end);
|
next = pud_addr_end(addr, end);
|
||||||
if (pud_none_or_clear_bad(pud))
|
if (pud_none_or_clear_bad(pud))
|
||||||
continue;
|
continue;
|
||||||
change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
|
change_pmd_range(vma, pud, addr, next, newprot,
|
||||||
|
dirty_accountable);
|
||||||
} while (pud++, addr = next, addr != end);
|
} while (pud++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +129,8 @@ static void change_protection(struct vm_area_struct *vma,
|
||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
if (pgd_none_or_clear_bad(pgd))
|
if (pgd_none_or_clear_bad(pgd))
|
||||||
continue;
|
continue;
|
||||||
change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
|
change_pud_range(vma, pgd, addr, next, newprot,
|
||||||
|
dirty_accountable);
|
||||||
} while (pgd++, addr = next, addr != end);
|
} while (pgd++, addr = next, addr != end);
|
||||||
flush_tlb_range(vma, start, end);
|
flush_tlb_range(vma, start, end);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче