vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces

The VM accounting makes no sense at this level, and half of the callers
didn't ever actually use the end result.  The only time we want to
unaccount the memory is when we actually remove the vma, so do the
accounting at that point instead.

This simplifies the interfaces (no need to pass down that silly page
counter to functions that really don't care), and also makes it much
more obvious what is actually going on: we do vm_[un]acct_memory() when
adding or removing the vma, not on random page walking.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2012-05-06 13:54:06 -07:00
Родитель 7e027b14d5
Коммит 4f74d2c8e8
3 изменённых файлов: 19 добавлений и 20 удалений

Просмотреть файл

@ -896,9 +896,8 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size); unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address, void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *); unsigned long size, struct zap_details *);
void unmap_vmas(struct mmu_gather *tlb, void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
struct vm_area_struct *start_vma, unsigned long start_addr, unsigned long start, unsigned long end);
unsigned long end_addr, unsigned long *nr_accounted);
/** /**
* mm_walk - callbacks for walk_page_range * mm_walk - callbacks for walk_page_range

Просмотреть файл

@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
static void unmap_single_vma(struct mmu_gather *tlb, static void unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr, struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr,
struct zap_details *details) struct zap_details *details)
{ {
unsigned long start = max(vma->vm_start, start_addr); unsigned long start = max(vma->vm_start, start_addr);
@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (end <= vma->vm_start) if (end <= vma->vm_start)
return; return;
if (vma->vm_flags & VM_ACCOUNT)
*nr_accounted += (end - start) >> PAGE_SHIFT;
if (unlikely(is_pfn_mapping(vma))) if (unlikely(is_pfn_mapping(vma)))
untrack_pfn_vma(vma, 0, 0); untrack_pfn_vma(vma, 0, 0);
@ -1339,7 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* @vma: the starting vma * @vma: the starting vma
* @start_addr: virtual address at which to start unmapping * @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping * @end_addr: virtual address at which to end unmapping
* @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
* *
* Unmap all pages in the vma list. * Unmap all pages in the vma list.
* *
@ -1354,13 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
*/ */
void unmap_vmas(struct mmu_gather *tlb, void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr, struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted) unsigned long end_addr)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, NULL); unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
} }
@ -1379,14 +1375,13 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb; struct mmu_gather tlb;
unsigned long end = start + size; unsigned long end = start + size;
unsigned long nr_accounted = 0;
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0); tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm); update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end); mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next) for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
unmap_single_vma(&tlb, vma, start, end, &nr_accounted, details); unmap_single_vma(&tlb, vma, start, end, details);
mmu_notifier_invalidate_range_end(mm, start, end); mmu_notifier_invalidate_range_end(mm, start, end);
tlb_finish_mmu(&tlb, start, end); tlb_finish_mmu(&tlb, start, end);
} }
@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb; struct mmu_gather tlb;
unsigned long end = address + size; unsigned long end = address + size;
unsigned long nr_accounted = 0;
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0); tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm); update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end); mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); unmap_single_vma(&tlb, vma, address, end, details);
mmu_notifier_invalidate_range_end(mm, address, end); mmu_notifier_invalidate_range_end(mm, address, end);
tlb_finish_mmu(&tlb, address, end); tlb_finish_mmu(&tlb, address, end);
} }

Просмотреть файл

@ -1889,15 +1889,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
*/ */
static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
{ {
unsigned long nr_accounted = 0;
/* Update high watermark before we lower total_vm */ /* Update high watermark before we lower total_vm */
update_hiwater_vm(mm); update_hiwater_vm(mm);
do { do {
long nrpages = vma_pages(vma); long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
mm->total_vm -= nrpages; mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma); vma = remove_vma(vma);
} while (vma); } while (vma);
vm_unacct_memory(nr_accounted);
validate_mm(mm); validate_mm(mm);
} }
@ -1912,13 +1917,11 @@ static void unmap_region(struct mm_struct *mm,
{ {
struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
struct mmu_gather tlb; struct mmu_gather tlb;
unsigned long nr_accounted = 0;
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0); tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end, &nr_accounted); unmap_vmas(&tlb, vma, start, end);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : 0); next ? next->vm_start : 0);
tlb_finish_mmu(&tlb, start, end); tlb_finish_mmu(&tlb, start, end);
@ -2305,8 +2308,7 @@ void exit_mmap(struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, 1); tlb_gather_mmu(&tlb, mm, 1);
/* update_hiwater_rss(mm) here? but nobody should be looking */ /* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */ /* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1, &nr_accounted); unmap_vmas(&tlb, vma, 0, -1);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(&tlb, 0, -1); tlb_finish_mmu(&tlb, 0, -1);
@ -2315,8 +2317,12 @@ void exit_mmap(struct mm_struct *mm)
* Walk the list again, actually closing and freeing it, * Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks. * with preemption enabled, without holding any MM locks.
*/ */
while (vma) while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
vma = remove_vma(vma); vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
} }