[PATCH] freepgt: free_pgtables from FIRST_USER_ADDRESS
The patches to free_pgtables by vma left problems on any architectures which leave some user address page table entries unencapsulated by vma. Andi has fixed the 32-bit vDSO on x86_64 to use a vma. Now fix arm (and arm26), whose first PAGE_SIZE is reserved (perhaps) for machine vectors. Our calls to free_pgtables must not touch that area, and exit_mmap's BUG_ON(nr_ptes) must allow that arm's get_pgd_slow may (or may not) have allocated an extra page table, which its free_pgd_slow would free later. FIRST_USER_PGD_NR has misled me and others: until all the arches define FIRST_USER_ADDRESS instead, a hack in mmap.c to derive one from t'other. This patch fixes the bugs, the remaining patches just clean it up. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Родитель
021740dc30
Коммит
e2cdef8c84
11
mm/mmap.c
11
mm/mmap.c
|
@ -1612,6 +1612,11 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
|
||||||
validate_mm(mm);
|
validate_mm(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef FIRST_USER_ADDRESS /* temporary hack */
|
||||||
|
#define THIS_IS_ARM FIRST_USER_PGD_NR
|
||||||
|
#define FIRST_USER_ADDRESS (THIS_IS_ARM * PAGE_SIZE)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get rid of page table information in the indicated region.
|
* Get rid of page table information in the indicated region.
|
||||||
*
|
*
|
||||||
|
@ -1630,7 +1635,7 @@ static void unmap_region(struct mm_struct *mm,
|
||||||
tlb = tlb_gather_mmu(mm, 0);
|
tlb = tlb_gather_mmu(mm, 0);
|
||||||
unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
|
unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
|
||||||
vm_unacct_memory(nr_accounted);
|
vm_unacct_memory(nr_accounted);
|
||||||
free_pgtables(&tlb, vma, prev? prev->vm_end: 0,
|
free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
|
||||||
next? next->vm_start: 0);
|
next? next->vm_start: 0);
|
||||||
tlb_finish_mmu(tlb, start, end);
|
tlb_finish_mmu(tlb, start, end);
|
||||||
spin_unlock(&mm->page_table_lock);
|
spin_unlock(&mm->page_table_lock);
|
||||||
|
@ -1910,7 +1915,7 @@ void exit_mmap(struct mm_struct *mm)
|
||||||
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
/* Use -1 here to ensure all VMAs in the mm are unmapped */
|
||||||
end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
|
end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
|
||||||
vm_unacct_memory(nr_accounted);
|
vm_unacct_memory(nr_accounted);
|
||||||
free_pgtables(&tlb, vma, 0, 0);
|
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
|
||||||
tlb_finish_mmu(tlb, 0, end);
|
tlb_finish_mmu(tlb, 0, end);
|
||||||
|
|
||||||
mm->mmap = mm->mmap_cache = NULL;
|
mm->mmap = mm->mmap_cache = NULL;
|
||||||
|
@ -1931,7 +1936,7 @@ void exit_mmap(struct mm_struct *mm)
|
||||||
vma = next;
|
vma = next;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(mm->nr_ptes); /* This is just debugging */
|
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Insert vm structure into process list sorted by address
|
/* Insert vm structure into process list sorted by address
|
||||||
|
|
Загрузка…
Ссылка в новой задаче