mm: rename RECLAIM_SWAP to RECLAIM_UNMAP

The name SWAP implies that we are dealing with anonymous pages only.  In
fact, the original patch that introduced the min_unmapped_ratio logic
was to fix an issue related to file pages.  Rename it to RECLAIM_UNMAP
to match what does.

Historically, commit a6dc60f897 ("vmscan: rename sc.may_swap to
may_unmap") renamed .may_swap to .may_unmap, leaving RECLAIM_SWAP
behind.  commit 2e2e425989 ("vmscan,memcg: reintroduce sc->may_swap")
reintroduced .may_swap for memory controller.

Signed-off-by: Zhihui Zhang <zzhsuny@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Zhihui Zhang 2015-06-24 16:56:42 -07:00 коммит произвёл Linus Torvalds
Родитель f012a84aff
Коммит 95bbc0c721
1 изменённых файлов: 6 добавлений и 6 удалений

Просмотреть файл

@ -3597,7 +3597,7 @@ int zone_reclaim_mode __read_mostly;
#define RECLAIM_OFF 0 #define RECLAIM_OFF 0
#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */ #define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ #define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
/* /*
* Priority for ZONE_RECLAIM. This determines the fraction of pages * Priority for ZONE_RECLAIM. This determines the fraction of pages
@ -3639,12 +3639,12 @@ static long zone_pagecache_reclaimable(struct zone *zone)
long delta = 0; long delta = 0;
/* /*
* If RECLAIM_SWAP is set, then all file pages are considered * If RECLAIM_UNMAP is set, then all file pages are considered
* potentially reclaimable. Otherwise, we have to worry about * potentially reclaimable. Otherwise, we have to worry about
* pages like swapcache and zone_unmapped_file_pages() provides * pages like swapcache and zone_unmapped_file_pages() provides
* a better estimate * a better estimate
*/ */
if (zone_reclaim_mode & RECLAIM_SWAP) if (zone_reclaim_mode & RECLAIM_UNMAP)
nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
else else
nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
@ -3675,15 +3675,15 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
.order = order, .order = order,
.priority = ZONE_RECLAIM_PRIORITY, .priority = ZONE_RECLAIM_PRIORITY,
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), .may_unmap = !!(zone_reclaim_mode & RECLAIM_UNMAP),
.may_swap = 1, .may_swap = 1,
}; };
cond_resched(); cond_resched();
/* /*
* We need to be able to allocate from the reserves for RECLAIM_SWAP * We need to be able to allocate from the reserves for RECLAIM_UNMAP
* and we also need to be able to write out pages for RECLAIM_WRITE * and we also need to be able to write out pages for RECLAIM_WRITE
* and RECLAIM_SWAP. * and RECLAIM_UNMAP.
*/ */
p->flags |= PF_MEMALLOC | PF_SWAPWRITE; p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
lockdep_set_current_reclaim_state(gfp_mask); lockdep_set_current_reclaim_state(gfp_mask);