Merge branch 'vm-cleanups' (unmap_vma() interface cleanup)

This series sanitizes the interface to unmap_vma().  The crazy interface
annoyed me no end when I was looking at unmap_single_vma(), which we can
spend quite a lot of time in (especially with loads that have a lot of
small fork/exec's: shell scripts etc).

Moving the nr_accounted calculations to where they belong at least
clarifies things a little.  I hope to come back to look at the
performance of this later, but if/when I get back to it I at least don't
have to see the crazy interfaces any more.

* vm-cleanups:
  vm: remove 'nr_accounted' calculations from the unmap_vmas() interfaces
  vm: simplify unmap_vmas() calling convention
This commit is contained in:
Linus Torvalds 2012-05-21 08:37:07 -07:00
Родитель 76e10d158e 4f74d2c8e8
Коммит dddbd5414b
3 изменённых файлов: 25 добавлений и 27 удалений

Просмотреть файл

@ -896,10 +896,8 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size); unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address, void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *); unsigned long size, struct zap_details *);
void unmap_vmas(struct mmu_gather *tlb, void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
struct vm_area_struct *start_vma, unsigned long start_addr, unsigned long start, unsigned long end);
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *);
/** /**
* mm_walk - callbacks for walk_page_range * mm_walk - callbacks for walk_page_range

Просмотреть файл

@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb,
static void unmap_single_vma(struct mmu_gather *tlb, static void unmap_single_vma(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr, struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr,
struct zap_details *details) struct zap_details *details)
{ {
unsigned long start = max(vma->vm_start, start_addr); unsigned long start = max(vma->vm_start, start_addr);
@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (end <= vma->vm_start) if (end <= vma->vm_start)
return; return;
if (vma->vm_flags & VM_ACCOUNT)
*nr_accounted += (end - start) >> PAGE_SHIFT;
if (unlikely(is_pfn_mapping(vma))) if (unlikely(is_pfn_mapping(vma)))
untrack_pfn_vma(vma, 0, 0); untrack_pfn_vma(vma, 0, 0);
@ -1339,8 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* @vma: the starting vma * @vma: the starting vma
* @start_addr: virtual address at which to start unmapping * @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping * @end_addr: virtual address at which to end unmapping
* @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
* @details: details of nonlinear truncation or shared cache invalidation
* *
* Unmap all pages in the vma list. * Unmap all pages in the vma list.
* *
@ -1355,15 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
*/ */
void unmap_vmas(struct mmu_gather *tlb, void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr, struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr)
struct zap_details *details)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
details);
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
} }
@ -1376,19 +1369,21 @@ void unmap_vmas(struct mmu_gather *tlb,
* *
* Caller must protect the VMA list * Caller must protect the VMA list
*/ */
void zap_page_range(struct vm_area_struct *vma, unsigned long address, void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long size, struct zap_details *details) unsigned long size, struct zap_details *details)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb; struct mmu_gather tlb;
unsigned long end = address + size; unsigned long end = start + size;
unsigned long nr_accounted = 0;
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0); tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); mmu_notifier_invalidate_range_start(mm, start, end);
tlb_finish_mmu(&tlb, address, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
unmap_single_vma(&tlb, vma, start, end, details);
mmu_notifier_invalidate_range_end(mm, start, end);
tlb_finish_mmu(&tlb, start, end);
} }
/** /**
@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb; struct mmu_gather tlb;
unsigned long end = address + size; unsigned long end = address + size;
unsigned long nr_accounted = 0;
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0); tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm); update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end); mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); unmap_single_vma(&tlb, vma, address, end, details);
mmu_notifier_invalidate_range_end(mm, address, end); mmu_notifier_invalidate_range_end(mm, address, end);
tlb_finish_mmu(&tlb, address, end); tlb_finish_mmu(&tlb, address, end);
} }

Просмотреть файл

@ -1889,15 +1889,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr)
*/ */
static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
{ {
unsigned long nr_accounted = 0;
/* Update high watermark before we lower total_vm */ /* Update high watermark before we lower total_vm */
update_hiwater_vm(mm); update_hiwater_vm(mm);
do { do {
long nrpages = vma_pages(vma); long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
mm->total_vm -= nrpages; mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma); vma = remove_vma(vma);
} while (vma); } while (vma);
vm_unacct_memory(nr_accounted);
validate_mm(mm); validate_mm(mm);
} }
@ -1912,13 +1917,11 @@ static void unmap_region(struct mm_struct *mm,
{ {
struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
struct mmu_gather tlb; struct mmu_gather tlb;
unsigned long nr_accounted = 0;
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0); tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); unmap_vmas(&tlb, vma, start, end);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : 0); next ? next->vm_start : 0);
tlb_finish_mmu(&tlb, start, end); tlb_finish_mmu(&tlb, start, end);
@ -2305,8 +2308,7 @@ void exit_mmap(struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, 1); tlb_gather_mmu(&tlb, mm, 1);
/* update_hiwater_rss(mm) here? but nobody should be looking */ /* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */ /* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); unmap_vmas(&tlb, vma, 0, -1);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(&tlb, 0, -1); tlb_finish_mmu(&tlb, 0, -1);
@ -2315,8 +2317,12 @@ void exit_mmap(struct mm_struct *mm)
* Walk the list again, actually closing and freeing it, * Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks. * with preemption enabled, without holding any MM locks.
*/ */
while (vma) while (vma) {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
vma = remove_vma(vma); vma = remove_vma(vma);
}
vm_unacct_memory(nr_accounted);
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
} }