2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* linux/mm/vmscan.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
|
|
*
|
|
|
|
* Swap reorganised 29.12.95, Stephen Tweedie.
|
|
|
|
* kswapd added: 7.1.96 sct
|
|
|
|
* Removed kswapd_ctl limits, and swap out as many pages as needed
|
|
|
|
* to bring the system back to freepages.high: 2.4.97, Rik van Riel.
|
|
|
|
* Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
|
|
|
|
* Multiqueue VM started 5.8.00, Rik van Riel.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/module.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/gfp.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/highmem.h>
|
2006-09-27 12:50:00 +04:00
|
|
|
#include <linux/vmstat.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/buffer_head.h> /* for try_to_release_page(),
|
|
|
|
buffer_heads_over_limit */
|
|
|
|
#include <linux/mm_inline.h>
|
|
|
|
#include <linux/pagevec.h>
|
|
|
|
#include <linux/backing-dev.h>
|
|
|
|
#include <linux/rmap.h>
|
|
|
|
#include <linux/topology.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/cpuset.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/rwsem.h>
|
2006-03-22 11:09:04 +03:00
|
|
|
#include <linux/delay.h>
|
2006-06-27 13:53:33 +04:00
|
|
|
#include <linux/kthread.h>
|
2006-12-07 07:34:23 +03:00
|
|
|
#include <linux/freezer.h>
|
2008-02-07 11:13:56 +03:00
|
|
|
#include <linux/memcontrol.h>
|
2008-07-25 12:48:52 +04:00
|
|
|
#include <linux/delayacct.h>
|
2008-10-19 07:26:53 +04:00
|
|
|
#include <linux/sysctl.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <asm/div64.h>
|
|
|
|
|
|
|
|
#include <linux/swapops.h>
|
|
|
|
|
2006-03-22 11:08:33 +03:00
|
|
|
#include "internal.h"
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
struct scan_control {
|
|
|
|
/* Incremented by the number of inactive pages that were scanned */
|
|
|
|
unsigned long nr_scanned;
|
|
|
|
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
/* Number of pages freed so far during a call to shrink_zones() */
|
|
|
|
unsigned long nr_reclaimed;
|
|
|
|
|
2009-12-15 04:59:10 +03:00
|
|
|
/* How many pages shrink_list() should reclaim */
|
|
|
|
unsigned long nr_to_reclaim;
|
|
|
|
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
unsigned long hibernation_mode;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* This context's GFP mask */
|
2005-10-21 11:18:50 +04:00
|
|
|
gfp_t gfp_mask;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
int may_writepage;
|
|
|
|
|
2009-04-01 02:19:30 +04:00
|
|
|
/* Can mapped pages be reclaimed? */
|
|
|
|
int may_unmap;
|
2006-01-19 04:42:30 +03:00
|
|
|
|
2009-04-21 23:24:57 +04:00
|
|
|
/* Can pages be swapped as part of reclaim? */
|
|
|
|
int may_swap;
|
|
|
|
|
2006-06-23 13:03:18 +04:00
|
|
|
int swappiness;
|
2006-09-26 10:31:27 +04:00
|
|
|
|
2007-07-17 15:03:16 +04:00
|
|
|
int order;
|
2008-02-07 11:13:56 +03:00
|
|
|
|
2010-05-25 01:32:37 +04:00
|
|
|
/*
|
|
|
|
* Intend to reclaim enough contenious memory rather than to reclaim
|
|
|
|
* enough amount memory. I.e, it's the mode for high order allocation.
|
|
|
|
*/
|
|
|
|
bool lumpy_reclaim_mode;
|
|
|
|
|
2008-02-07 11:13:56 +03:00
|
|
|
/* Which cgroup do we reclaim from */
|
|
|
|
struct mem_cgroup *mem_cgroup;
|
|
|
|
|
2009-04-01 02:23:31 +04:00
|
|
|
/*
|
|
|
|
* Nodemask of nodes allowed by the caller. If NULL, all nodes
|
|
|
|
* are scanned.
|
|
|
|
*/
|
|
|
|
nodemask_t *nodemask;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
|
|
|
|
|
|
|
|
#ifdef ARCH_HAS_PREFETCH
|
|
|
|
#define prefetch_prev_lru_page(_page, _base, _field) \
|
|
|
|
do { \
|
|
|
|
if ((_page)->lru.prev != _base) { \
|
|
|
|
struct page *prev; \
|
|
|
|
\
|
|
|
|
prev = lru_to_page(&(_page->lru)); \
|
|
|
|
prefetch(&prev->_field); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef ARCH_HAS_PREFETCHW
|
|
|
|
#define prefetchw_prev_lru_page(_page, _base, _field) \
|
|
|
|
do { \
|
|
|
|
if ((_page)->lru.prev != _base) { \
|
|
|
|
struct page *prev; \
|
|
|
|
\
|
|
|
|
prev = lru_to_page(&(_page->lru)); \
|
|
|
|
prefetchw(&prev->_field); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* From 0 .. 100. Higher means more swappy.
|
|
|
|
*/
|
|
|
|
int vm_swappiness = 60;
|
2006-06-23 13:03:47 +04:00
|
|
|
long vm_total_pages; /* The total number of pages which the VM controls */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
static LIST_HEAD(shrinker_list);
|
|
|
|
static DECLARE_RWSEM(shrinker_rwsem);
|
|
|
|
|
2008-03-05 01:28:39 +03:00
|
|
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
2009-01-08 05:08:23 +03:00
|
|
|
#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
|
2008-02-07 11:14:29 +03:00
|
|
|
#else
|
2009-01-08 05:08:23 +03:00
|
|
|
#define scanning_global_lru(sc) (1)
|
2008-02-07 11:14:29 +03:00
|
|
|
#endif
|
|
|
|
|
2009-01-08 05:08:15 +03:00
|
|
|
static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
|
|
|
|
struct scan_control *sc)
|
|
|
|
{
|
2009-01-08 05:08:23 +03:00
|
|
|
if (!scanning_global_lru(sc))
|
2009-01-08 05:08:20 +03:00
|
|
|
return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
|
|
|
|
|
2009-01-08 05:08:15 +03:00
|
|
|
return &zone->reclaim_stat;
|
|
|
|
}
|
|
|
|
|
2009-09-22 04:03:09 +04:00
|
|
|
static unsigned long zone_nr_lru_pages(struct zone *zone,
|
|
|
|
struct scan_control *sc, enum lru_list lru)
|
2009-01-08 05:08:16 +03:00
|
|
|
{
|
2009-01-08 05:08:23 +03:00
|
|
|
if (!scanning_global_lru(sc))
|
2009-01-08 05:08:19 +03:00
|
|
|
return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
|
|
|
|
|
2009-01-08 05:08:16 +03:00
|
|
|
return zone_page_state(zone, NR_LRU_BASE + lru);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Add a shrinker callback to be called from the vm
|
|
|
|
*/
|
2007-07-17 15:03:17 +04:00
|
|
|
void register_shrinker(struct shrinker *shrinker)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-07-17 15:03:17 +04:00
|
|
|
shrinker->nr = 0;
|
|
|
|
down_write(&shrinker_rwsem);
|
|
|
|
list_add_tail(&shrinker->list, &shrinker_list);
|
|
|
|
up_write(&shrinker_rwsem);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2007-07-17 15:03:17 +04:00
|
|
|
EXPORT_SYMBOL(register_shrinker);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove one
|
|
|
|
*/
|
2007-07-17 15:03:17 +04:00
|
|
|
void unregister_shrinker(struct shrinker *shrinker)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
down_write(&shrinker_rwsem);
|
|
|
|
list_del(&shrinker->list);
|
|
|
|
up_write(&shrinker_rwsem);
|
|
|
|
}
|
2007-07-17 15:03:17 +04:00
|
|
|
EXPORT_SYMBOL(unregister_shrinker);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#define SHRINK_BATCH 128
|
|
|
|
/*
|
|
|
|
* Call the shrink functions to age shrinkable caches
|
|
|
|
*
|
|
|
|
* Here we assume it costs one seek to replace a lru page and that it also
|
|
|
|
* takes a seek to recreate a cache object. With this in mind we age equal
|
|
|
|
* percentages of the lru and ageable caches. This should balance the seeks
|
|
|
|
* generated by these structures.
|
|
|
|
*
|
2007-10-20 03:27:18 +04:00
|
|
|
* If the vm encountered mapped pages on the LRU it increase the pressure on
|
2005-04-17 02:20:36 +04:00
|
|
|
* slab to avoid swapping.
|
|
|
|
*
|
|
|
|
* We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
|
|
|
|
*
|
|
|
|
* `lru_pages' represents the number of on-LRU pages in all the zones which
|
|
|
|
* are eligible for the caller's allocation attempt. It is used for balancing
|
|
|
|
* slab reclaim versus page reclaim.
|
2005-06-22 04:14:35 +04:00
|
|
|
*
|
|
|
|
* Returns the number of slab objects which we shrunk.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
|
|
|
|
unsigned long lru_pages)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct shrinker *shrinker;
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long ret = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (scanned == 0)
|
|
|
|
scanned = SWAP_CLUSTER_MAX;
|
|
|
|
|
|
|
|
if (!down_read_trylock(&shrinker_rwsem))
|
2005-06-22 04:14:35 +04:00
|
|
|
return 1; /* Assume we'll be able to shrink next time */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
list_for_each_entry(shrinker, &shrinker_list, list) {
|
|
|
|
unsigned long long delta;
|
|
|
|
unsigned long total_scan;
|
2007-07-17 15:03:17 +04:00
|
|
|
unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
delta = (4 * scanned) / shrinker->seeks;
|
2005-11-29 00:44:15 +03:00
|
|
|
delta *= max_pass;
|
2005-04-17 02:20:36 +04:00
|
|
|
do_div(delta, lru_pages + 1);
|
|
|
|
shrinker->nr += delta;
|
2005-11-29 00:44:15 +03:00
|
|
|
if (shrinker->nr < 0) {
|
2009-04-01 02:23:29 +04:00
|
|
|
printk(KERN_ERR "shrink_slab: %pF negative objects to "
|
|
|
|
"delete nr=%ld\n",
|
|
|
|
shrinker->shrink, shrinker->nr);
|
2005-11-29 00:44:15 +03:00
|
|
|
shrinker->nr = max_pass;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid risking looping forever due to too large nr value:
|
|
|
|
* never try to free more than twice the estimate number of
|
|
|
|
* freeable entries.
|
|
|
|
*/
|
|
|
|
if (shrinker->nr > max_pass * 2)
|
|
|
|
shrinker->nr = max_pass * 2;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
total_scan = shrinker->nr;
|
|
|
|
shrinker->nr = 0;
|
|
|
|
|
|
|
|
while (total_scan >= SHRINK_BATCH) {
|
|
|
|
long this_scan = SHRINK_BATCH;
|
|
|
|
int shrink_ret;
|
2005-06-22 04:14:35 +04:00
|
|
|
int nr_before;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-07-17 15:03:17 +04:00
|
|
|
nr_before = (*shrinker->shrink)(0, gfp_mask);
|
|
|
|
shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (shrink_ret == -1)
|
|
|
|
break;
|
2005-06-22 04:14:35 +04:00
|
|
|
if (shrink_ret < nr_before)
|
|
|
|
ret += nr_before - shrink_ret;
|
2006-06-30 12:55:45 +04:00
|
|
|
count_vm_events(SLABS_SCANNED, this_scan);
|
2005-04-17 02:20:36 +04:00
|
|
|
total_scan -= this_scan;
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
shrinker->nr += total_scan;
|
|
|
|
}
|
|
|
|
up_read(&shrinker_rwsem);
|
2005-06-22 04:14:35 +04:00
|
|
|
return ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_page_cache_freeable(struct page *page)
|
|
|
|
{
|
2009-09-22 04:03:00 +04:00
|
|
|
/*
|
|
|
|
* A freeable page cache page is referenced only by the caller
|
|
|
|
* that isolated the page, the page cache radix tree and
|
|
|
|
* optional buffer heads at page->private.
|
|
|
|
*/
|
2009-09-22 04:02:59 +04:00
|
|
|
return page_count(page) - page_has_private(page) == 2;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int may_write_to_queue(struct backing_dev_info *bdi)
|
|
|
|
{
|
2006-01-08 12:00:47 +03:00
|
|
|
if (current->flags & PF_SWAPWRITE)
|
2005-04-17 02:20:36 +04:00
|
|
|
return 1;
|
|
|
|
if (!bdi_write_congested(bdi))
|
|
|
|
return 1;
|
|
|
|
if (bdi == current->backing_dev_info)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We detected a synchronous write error writing a page out. Probably
|
|
|
|
* -ENOSPC. We need to propagate that into the address_space for a subsequent
|
|
|
|
* fsync(), msync() or close().
|
|
|
|
*
|
|
|
|
* The tricky part is that after writepage we cannot touch the mapping: nothing
|
|
|
|
* prevents it from being freed up. But we have a ref on the page and once
|
|
|
|
* that page is locked, the mapping is pinned.
|
|
|
|
*
|
|
|
|
* We're allowed to run sleeping lock_page() here because we know the caller has
|
|
|
|
* __GFP_FS.
|
|
|
|
*/
|
|
|
|
static void handle_write_error(struct address_space *mapping,
|
|
|
|
struct page *page, int error)
|
|
|
|
{
|
|
|
|
lock_page(page);
|
2007-05-08 11:23:25 +04:00
|
|
|
if (page_mapping(page) == mapping)
|
|
|
|
mapping_set_error(mapping, error);
|
2005-04-17 02:20:36 +04:00
|
|
|
unlock_page(page);
|
|
|
|
}
|
|
|
|
|
2007-08-23 01:01:26 +04:00
|
|
|
/* Request for sync pageout. */
|
|
|
|
enum pageout_io {
|
|
|
|
PAGEOUT_IO_ASYNC,
|
|
|
|
PAGEOUT_IO_SYNC,
|
|
|
|
};
|
|
|
|
|
2006-06-23 13:03:38 +04:00
|
|
|
/* possible outcome of pageout() */
|
|
|
|
typedef enum {
|
|
|
|
/* failed to write page out, page is locked */
|
|
|
|
PAGE_KEEP,
|
|
|
|
/* move page to the active list, page is locked */
|
|
|
|
PAGE_ACTIVATE,
|
|
|
|
/* page has been sent to the disk successfully, page is unlocked */
|
|
|
|
PAGE_SUCCESS,
|
|
|
|
/* page is clean and locked */
|
|
|
|
PAGE_CLEAN,
|
|
|
|
} pageout_t;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
[PATCH] vmscan: rename functions
We have:
try_to_free_pages
->shrink_caches(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_cache(struct zone *, ...)
->shrink_list(struct list_head *, ...)
->refill_inactive_list((struct zone *, ...)
which is fairly irrational.
Rename things so that we have
try_to_free_pages
->shrink_zones(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_inactive_list(struct zone *, ...)
->shrink_page_list(struct list_head *, ...)
->shrink_active_list(struct zone *, ...)
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 11:08:21 +03:00
|
|
|
* pageout is called by shrink_page_list() for each dirty page.
|
|
|
|
* Calls ->writepage().
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2007-08-23 01:01:26 +04:00
|
|
|
static pageout_t pageout(struct page *page, struct address_space *mapping,
|
|
|
|
enum pageout_io sync_writeback)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the page is dirty, only perform writeback if that write
|
|
|
|
* will be non-blocking. To prevent this allocation from being
|
|
|
|
* stalled by pagecache activity. But note that there may be
|
|
|
|
* stalls if we need to run get_block(). We could test
|
|
|
|
* PagePrivate for that.
|
|
|
|
*
|
2009-12-15 04:58:49 +03:00
|
|
|
* If this process is currently in __generic_file_aio_write() against
|
2005-04-17 02:20:36 +04:00
|
|
|
* this page's queue, we can perform writeback even if that
|
|
|
|
* will block.
|
|
|
|
*
|
|
|
|
* If the page is swapcache, write it back even if that would
|
|
|
|
* block, for some throttling. This happens by accident, because
|
|
|
|
* swap_backing_dev_info is bust: it doesn't reflect the
|
|
|
|
* congestion state of the swapdevs. Easy to fix, if needed.
|
|
|
|
*/
|
|
|
|
if (!is_page_cache_freeable(page))
|
|
|
|
return PAGE_KEEP;
|
|
|
|
if (!mapping) {
|
|
|
|
/*
|
|
|
|
* Some data journaling orphaned pages can have
|
|
|
|
* page->mapping == NULL while being dirty with clean buffers.
|
|
|
|
*/
|
2009-04-03 19:42:36 +04:00
|
|
|
if (page_has_private(page)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (try_to_free_buffers(page)) {
|
|
|
|
ClearPageDirty(page);
|
2008-04-30 11:55:07 +04:00
|
|
|
printk("%s: orphaned page\n", __func__);
|
2005-04-17 02:20:36 +04:00
|
|
|
return PAGE_CLEAN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return PAGE_KEEP;
|
|
|
|
}
|
|
|
|
if (mapping->a_ops->writepage == NULL)
|
|
|
|
return PAGE_ACTIVATE;
|
|
|
|
if (!may_write_to_queue(mapping->backing_dev_info))
|
|
|
|
return PAGE_KEEP;
|
|
|
|
|
|
|
|
if (clear_page_dirty_for_io(page)) {
|
|
|
|
int res;
|
|
|
|
struct writeback_control wbc = {
|
|
|
|
.sync_mode = WB_SYNC_NONE,
|
|
|
|
.nr_to_write = SWAP_CLUSTER_MAX,
|
[PATCH] writeback: fix range handling
When a writeback_control's `start' and `end' fields are used to
indicate a one-byte-range starting at file offset zero, the required
values of .start=0,.end=0 mean that the ->writepages() implementation
has no way of telling that it is being asked to perform a range
request. Because we're currently overloading (start == 0 && end == 0)
to mean "this is not a write-a-range request".
To make all this sane, the patch changes range of writeback_control.
So caller does: If it is calling ->writepages() to write pages, it
sets range (range_start/end or range_cyclic) always.
And if range_cyclic is true, ->writepages() thinks the range is
cyclic, otherwise it just uses range_start and range_end.
This patch does,
- Add LLONG_MAX, LLONG_MIN, ULLONG_MAX to include/linux/kernel.h
-1 is usually ok for range_end (type is long long). But, if someone did,
range_end += val; range_end is "val - 1"
u64val = range_end >> bits; u64val is "~(0ULL)"
or something, they are wrong. So, this adds LLONG_MAX to avoid nasty
things, and uses LLONG_MAX for range_end.
- All callers of ->writepages() sets range_start/end or range_cyclic.
- Fix updates of ->writeback_index. It seems already bit strange.
If it starts at 0 and ended by check of nr_to_write, this last
index may reduce chance to scan end of file. So, this updates
->writeback_index only if range_cyclic is true or whole-file is
scanned.
Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Steven French <sfrench@us.ibm.com>
Cc: "Vladimir V. Saveliev" <vs@namesys.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 13:03:26 +04:00
|
|
|
.range_start = 0,
|
|
|
|
.range_end = LLONG_MAX,
|
2005-04-17 02:20:36 +04:00
|
|
|
.nonblocking = 1,
|
|
|
|
.for_reclaim = 1,
|
|
|
|
};
|
|
|
|
|
|
|
|
SetPageReclaim(page);
|
|
|
|
res = mapping->a_ops->writepage(page, &wbc);
|
|
|
|
if (res < 0)
|
|
|
|
handle_write_error(mapping, page, res);
|
2005-12-16 01:28:17 +03:00
|
|
|
if (res == AOP_WRITEPAGE_ACTIVATE) {
|
2005-04-17 02:20:36 +04:00
|
|
|
ClearPageReclaim(page);
|
|
|
|
return PAGE_ACTIVATE;
|
|
|
|
}
|
2007-08-23 01:01:26 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait on writeback if requested to. This happens when
|
|
|
|
* direct reclaiming a large contiguous area and the
|
|
|
|
* first attempt to free a range of pages fails.
|
|
|
|
*/
|
|
|
|
if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
|
|
|
|
wait_on_page_writeback(page);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!PageWriteback(page)) {
|
|
|
|
/* synchronous write or broken a_ops? */
|
|
|
|
ClearPageReclaim(page);
|
|
|
|
}
|
2006-09-27 12:50:00 +04:00
|
|
|
inc_zone_page_state(page, NR_VMSCAN_WRITE);
|
2005-04-17 02:20:36 +04:00
|
|
|
return PAGE_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
return PAGE_CLEAN;
|
|
|
|
}
|
|
|
|
|
2006-10-17 11:09:36 +04:00
|
|
|
/*
|
2008-07-26 06:45:30 +04:00
|
|
|
* Same as remove_mapping, but if the page is removed from the mapping, it
|
|
|
|
* gets returned with a refcount of 0.
|
2006-10-17 11:09:36 +04:00
|
|
|
*/
|
2008-07-26 06:45:30 +04:00
|
|
|
static int __remove_mapping(struct address_space *mapping, struct page *page)
|
2006-01-08 12:00:48 +03:00
|
|
|
{
|
2006-09-26 10:31:23 +04:00
|
|
|
BUG_ON(!PageLocked(page));
|
|
|
|
BUG_ON(mapping != page_mapping(page));
|
2006-01-08 12:00:48 +03:00
|
|
|
|
2008-07-26 06:45:32 +04:00
|
|
|
spin_lock_irq(&mapping->tree_lock);
|
2006-01-08 12:00:48 +03:00
|
|
|
/*
|
2006-09-27 12:50:02 +04:00
|
|
|
* The non racy check for a busy page.
|
|
|
|
*
|
|
|
|
* Must be careful with the order of the tests. When someone has
|
|
|
|
* a ref to the page, it may be possible that they dirty it then
|
|
|
|
* drop the reference. So if PageDirty is tested before page_count
|
|
|
|
* here, then the following race may occur:
|
|
|
|
*
|
|
|
|
* get_user_pages(&page);
|
|
|
|
* [user mapping goes away]
|
|
|
|
* write_to(page);
|
|
|
|
* !PageDirty(page) [good]
|
|
|
|
* SetPageDirty(page);
|
|
|
|
* put_page(page);
|
|
|
|
* !page_count(page) [good, discard it]
|
|
|
|
*
|
|
|
|
* [oops, our write_to data is lost]
|
|
|
|
*
|
|
|
|
* Reversing the order of the tests ensures such a situation cannot
|
|
|
|
* escape unnoticed. The smp_rmb is needed to ensure the page->flags
|
|
|
|
* load is not satisfied before that of page->_count.
|
|
|
|
*
|
|
|
|
* Note that if SetPageDirty is always performed via set_page_dirty,
|
|
|
|
* and thus under tree_lock, then this ordering is not required.
|
2006-01-08 12:00:48 +03:00
|
|
|
*/
|
2008-07-26 06:45:30 +04:00
|
|
|
if (!page_freeze_refs(page, 2))
|
2006-01-08 12:00:48 +03:00
|
|
|
goto cannot_free;
|
2008-07-26 06:45:30 +04:00
|
|
|
/* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
|
|
|
|
if (unlikely(PageDirty(page))) {
|
|
|
|
page_unfreeze_refs(page, 2);
|
2006-01-08 12:00:48 +03:00
|
|
|
goto cannot_free;
|
2008-07-26 06:45:30 +04:00
|
|
|
}
|
2006-01-08 12:00:48 +03:00
|
|
|
|
|
|
|
if (PageSwapCache(page)) {
|
|
|
|
swp_entry_t swap = { .val = page_private(page) };
|
|
|
|
__delete_from_swap_cache(page);
|
2008-07-26 06:45:32 +04:00
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
2009-06-17 02:32:52 +04:00
|
|
|
swapcache_free(swap, page);
|
2008-07-26 06:45:30 +04:00
|
|
|
} else {
|
|
|
|
__remove_from_page_cache(page);
|
2008-07-26 06:45:32 +04:00
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
2009-05-29 01:34:28 +04:00
|
|
|
mem_cgroup_uncharge_cache_page(page);
|
2006-01-08 12:00:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
cannot_free:
|
2008-07-26 06:45:32 +04:00
|
|
|
spin_unlock_irq(&mapping->tree_lock);
|
2006-01-08 12:00:48 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-07-26 06:45:30 +04:00
|
|
|
/*
|
|
|
|
* Attempt to detach a locked page from its ->mapping. If it is dirty or if
|
|
|
|
* someone else has a ref on the page, abort and return 0. If it was
|
|
|
|
* successfully detached, return 1. Assumes the caller has a single ref on
|
|
|
|
* this page.
|
|
|
|
*/
|
|
|
|
int remove_mapping(struct address_space *mapping, struct page *page)
|
|
|
|
{
|
|
|
|
if (__remove_mapping(mapping, page)) {
|
|
|
|
/*
|
|
|
|
* Unfreezing the refcount with 1 rather than 2 effectively
|
|
|
|
* drops the pagecache ref for us without requiring another
|
|
|
|
* atomic operation.
|
|
|
|
*/
|
|
|
|
page_unfreeze_refs(page, 1);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
/**
|
|
|
|
* putback_lru_page - put previously isolated page onto appropriate LRU list
|
|
|
|
* @page: page to be put back to appropriate lru list
|
|
|
|
*
|
|
|
|
* Add previously isolated @page to appropriate LRU list.
|
|
|
|
* Page may still be unevictable for other reasons.
|
|
|
|
*
|
|
|
|
* lru_lock must not be held, interrupts must be enabled.
|
|
|
|
*/
|
|
|
|
void putback_lru_page(struct page *page)
|
|
|
|
{
|
|
|
|
int lru;
|
|
|
|
int active = !!TestClearPageActive(page);
|
2008-10-19 07:26:40 +04:00
|
|
|
int was_unevictable = PageUnevictable(page);
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
|
|
|
|
VM_BUG_ON(PageLRU(page));
|
|
|
|
|
|
|
|
redo:
|
|
|
|
ClearPageUnevictable(page);
|
|
|
|
|
|
|
|
if (page_evictable(page, NULL)) {
|
|
|
|
/*
|
|
|
|
* For evictable pages, we can use the cache.
|
|
|
|
* In event of a race, worst case is we end up with an
|
|
|
|
* unevictable page on [in]active list.
|
|
|
|
* We know how to handle that.
|
|
|
|
*/
|
2009-09-22 04:02:58 +04:00
|
|
|
lru = active + page_lru_base_type(page);
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
lru_cache_add_lru(page, lru);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Put unevictable pages directly on zone's unevictable
|
|
|
|
* list.
|
|
|
|
*/
|
|
|
|
lru = LRU_UNEVICTABLE;
|
|
|
|
add_page_to_unevictable_list(page);
|
2009-10-27 02:50:00 +03:00
|
|
|
/*
|
|
|
|
* When racing with an mlock clearing (page is
|
|
|
|
* unlocked), make sure that if the other thread does
|
|
|
|
* not observe our setting of PG_lru and fails
|
|
|
|
* isolation, we see PG_mlocked cleared below and move
|
|
|
|
* the page back to the evictable list.
|
|
|
|
*
|
|
|
|
* The other side is TestClearPageMlocked().
|
|
|
|
*/
|
|
|
|
smp_mb();
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* page's status can change while we move it among lru. If an evictable
|
|
|
|
* page is on unevictable list, it never be freed. To avoid that,
|
|
|
|
* check after we added it to the list, again.
|
|
|
|
*/
|
|
|
|
if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
|
|
|
|
if (!isolate_lru_page(page)) {
|
|
|
|
put_page(page);
|
|
|
|
goto redo;
|
|
|
|
}
|
|
|
|
/* This means someone else dropped this page from LRU
|
|
|
|
* So, it will be freed or putback to LRU again. There is
|
|
|
|
* nothing to do here.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2008-10-19 07:26:40 +04:00
|
|
|
if (was_unevictable && lru != LRU_UNEVICTABLE)
|
|
|
|
count_vm_event(UNEVICTABLE_PGRESCUED);
|
|
|
|
else if (!was_unevictable && lru == LRU_UNEVICTABLE)
|
|
|
|
count_vm_event(UNEVICTABLE_PGCULLED);
|
|
|
|
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
put_page(page); /* drop ref from isolate */
|
|
|
|
}
|
|
|
|
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
enum page_references {
|
|
|
|
PAGEREF_RECLAIM,
|
|
|
|
PAGEREF_RECLAIM_CLEAN,
|
2010-03-06 00:42:22 +03:00
|
|
|
PAGEREF_KEEP,
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
PAGEREF_ACTIVATE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static enum page_references page_check_references(struct page *page,
|
|
|
|
struct scan_control *sc)
|
|
|
|
{
|
2010-03-06 00:42:22 +03:00
|
|
|
int referenced_ptes, referenced_page;
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
unsigned long vm_flags;
|
|
|
|
|
2010-03-06 00:42:22 +03:00
|
|
|
referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
|
|
|
|
referenced_page = TestClearPageReferenced(page);
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
|
|
|
|
/* Lumpy reclaim - ignore references */
|
2010-05-25 01:32:37 +04:00
|
|
|
if (sc->lumpy_reclaim_mode)
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
return PAGEREF_RECLAIM;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mlock lost the isolation race with us. Let try_to_unmap()
|
|
|
|
* move the page to the unevictable list.
|
|
|
|
*/
|
|
|
|
if (vm_flags & VM_LOCKED)
|
|
|
|
return PAGEREF_RECLAIM;
|
|
|
|
|
2010-03-06 00:42:22 +03:00
|
|
|
if (referenced_ptes) {
|
|
|
|
if (PageAnon(page))
|
|
|
|
return PAGEREF_ACTIVATE;
|
|
|
|
/*
|
|
|
|
* All mapped pages start out with page table
|
|
|
|
* references from the instantiating fault, so we need
|
|
|
|
* to look twice if a mapped file page is used more
|
|
|
|
* than once.
|
|
|
|
*
|
|
|
|
* Mark it and spare it for another trip around the
|
|
|
|
* inactive list. Another page table reference will
|
|
|
|
* lead to its activation.
|
|
|
|
*
|
|
|
|
* Note: the mark is set for activated pages as well
|
|
|
|
* so that recently deactivated but used pages are
|
|
|
|
* quickly recovered.
|
|
|
|
*/
|
|
|
|
SetPageReferenced(page);
|
|
|
|
|
|
|
|
if (referenced_page)
|
|
|
|
return PAGEREF_ACTIVATE;
|
|
|
|
|
|
|
|
return PAGEREF_KEEP;
|
|
|
|
}
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
|
|
|
|
/* Reclaim if clean, defer dirty pages to writeback */
|
2010-03-06 00:42:22 +03:00
|
|
|
if (referenced_page)
|
|
|
|
return PAGEREF_RECLAIM_CLEAN;
|
|
|
|
|
|
|
|
return PAGEREF_RECLAIM;
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
[PATCH] vmscan: rename functions
We have:
try_to_free_pages
->shrink_caches(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_cache(struct zone *, ...)
->shrink_list(struct list_head *, ...)
->refill_inactive_list((struct zone *, ...)
which is fairly irrational.
Rename things so that we have
try_to_free_pages
->shrink_zones(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_inactive_list(struct zone *, ...)
->shrink_page_list(struct list_head *, ...)
->shrink_active_list(struct zone *, ...)
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 11:08:21 +03:00
|
|
|
* shrink_page_list() returns the number of reclaimed pages
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
[PATCH] vmscan: rename functions
We have:
try_to_free_pages
->shrink_caches(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_cache(struct zone *, ...)
->shrink_list(struct list_head *, ...)
->refill_inactive_list((struct zone *, ...)
which is fairly irrational.
Rename things so that we have
try_to_free_pages
->shrink_zones(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_inactive_list(struct zone *, ...)
->shrink_page_list(struct list_head *, ...)
->shrink_active_list(struct zone *, ...)
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 11:08:21 +03:00
|
|
|
static unsigned long shrink_page_list(struct list_head *page_list,
|
2007-08-23 01:01:26 +04:00
|
|
|
struct scan_control *sc,
|
|
|
|
enum pageout_io sync_writeback)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
LIST_HEAD(ret_pages);
|
|
|
|
struct pagevec freed_pvec;
|
|
|
|
int pgactivate = 0;
|
2006-03-22 11:08:20 +03:00
|
|
|
unsigned long nr_reclaimed = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
pagevec_init(&freed_pvec, 1);
|
|
|
|
while (!list_empty(page_list)) {
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
enum page_references references;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct address_space *mapping;
|
|
|
|
struct page *page;
|
|
|
|
int may_enter_fs;
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
page = lru_to_page(page_list);
|
|
|
|
list_del(&page->lru);
|
|
|
|
|
2008-08-02 14:01:03 +04:00
|
|
|
if (!trylock_page(page))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto keep;
|
|
|
|
|
2006-09-26 10:30:55 +04:00
|
|
|
VM_BUG_ON(PageActive(page));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
sc->nr_scanned++;
|
2006-02-12 04:55:53 +03:00
|
|
|
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
if (unlikely(!page_evictable(page, NULL)))
|
|
|
|
goto cull_mlocked;
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
|
2009-04-01 02:19:30 +04:00
|
|
|
if (!sc->may_unmap && page_mapped(page))
|
2006-02-12 04:55:53 +03:00
|
|
|
goto keep_locked;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Double the slab pressure for mapped and swapcache pages */
|
|
|
|
if (page_mapped(page) || PageSwapCache(page))
|
|
|
|
sc->nr_scanned++;
|
|
|
|
|
2007-08-23 01:01:26 +04:00
|
|
|
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
|
|
|
|
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
|
|
|
|
|
|
|
|
if (PageWriteback(page)) {
|
|
|
|
/*
|
|
|
|
* Synchronous reclaim is performed in two passes,
|
|
|
|
* first an asynchronous pass over the list to
|
|
|
|
* start parallel writeback, and a second synchronous
|
|
|
|
* pass to wait for the IO to complete. Wait here
|
|
|
|
* for any page for which writeback has already
|
|
|
|
* started.
|
|
|
|
*/
|
|
|
|
if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
|
|
|
|
wait_on_page_writeback(page);
|
2008-03-24 22:29:52 +03:00
|
|
|
else
|
2007-08-23 01:01:26 +04:00
|
|
|
goto keep_locked;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
references = page_check_references(page, sc);
|
|
|
|
switch (references) {
|
|
|
|
case PAGEREF_ACTIVATE:
|
2005-04-17 02:20:36 +04:00
|
|
|
goto activate_locked;
|
2010-03-06 00:42:22 +03:00
|
|
|
case PAGEREF_KEEP:
|
|
|
|
goto keep_locked;
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
case PAGEREF_RECLAIM:
|
|
|
|
case PAGEREF_RECLAIM_CLEAN:
|
|
|
|
; /* try to reclaim the page below */
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Anonymous process memory has backing store?
|
|
|
|
* Try to allocate it some swap space here.
|
|
|
|
*/
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
if (PageAnon(page) && !PageSwapCache(page)) {
|
2008-11-20 02:36:37 +03:00
|
|
|
if (!(sc->gfp_mask & __GFP_IO))
|
|
|
|
goto keep_locked;
|
2009-01-07 01:39:39 +03:00
|
|
|
if (!add_to_swap(page))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto activate_locked;
|
2008-11-20 02:36:37 +03:00
|
|
|
may_enter_fs = 1;
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
mapping = page_mapping(page);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The page is mapped into the page tables of one or more
|
|
|
|
* processes. Try to unmap it here.
|
|
|
|
*/
|
|
|
|
if (page_mapped(page) && mapping) {
|
2009-09-16 13:50:10 +04:00
|
|
|
switch (try_to_unmap(page, TTU_UNMAP)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
case SWAP_FAIL:
|
|
|
|
goto activate_locked;
|
|
|
|
case SWAP_AGAIN:
|
|
|
|
goto keep_locked;
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
case SWAP_MLOCK:
|
|
|
|
goto cull_mlocked;
|
2005-04-17 02:20:36 +04:00
|
|
|
case SWAP_SUCCESS:
|
|
|
|
; /* try to free the page below */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PageDirty(page)) {
|
vmscan: factor out page reference checks
The used-once mapped file page detection patchset.
It is meant to help workloads with large amounts of shortly used file
mappings, like rtorrent hashing a file or git when dealing with loose
objects (git gc on a bigger site?).
Right now, the VM activates referenced mapped file pages on first
encounter on the inactive list and it takes a full memory cycle to
reclaim them again. When those pages dominate memory, the system
no longer has a meaningful notion of 'working set' and is required
to give up the active list to make reclaim progress. Obviously,
this results in rather bad scanning latencies and the wrong pages
being reclaimed.
This patch makes the VM be more careful about activating mapped file
pages in the first place. The minimum granted lifetime without
another memory access becomes an inactive list cycle instead of the
full memory cycle, which is more natural given the mentioned loads.
This test resembles a hashing rtorrent process. Sequentially, 32MB
chunks of a file are mapped into memory, hashed (sha1) and unmapped
again. While this happens, every 5 seconds a process is launched and
its execution time taken:
python2.4 -c 'import pydoc'
old: max=2.31s mean=1.26s (0.34)
new: max=1.25s mean=0.32s (0.32)
find /etc -type f
old: max=2.52s mean=1.44s (0.43)
new: max=1.92s mean=0.12s (0.17)
vim -c ':quit'
old: max=6.14s mean=4.03s (0.49)
new: max=3.48s mean=2.41s (0.25)
mplayer --help
old: max=8.08s mean=5.74s (1.02)
new: max=3.79s mean=1.32s (0.81)
overall hash time (stdev):
old: time=1192.30 (12.85) thruput=25.78mb/s (0.27)
new: time=1060.27 (32.58) thruput=29.02mb/s (0.88) (-11%)
I also tested kernbench with regular IO streaming in the background to
see whether the delayed activation of frequently used mapped file
pages had a negative impact on performance in the presence of pressure
on the inactive list. The patch made no significant difference in
timing, neither for kernbench nor for the streaming IO throughput.
The first patch submission raised concerns about the cost of the extra
faults for actually activated pages on machines that have no hardware
support for young page table entries.
I created an artificial worst case scenario on an ARM machine with
around 300MHz and 64MB of memory to figure out the dimensions
involved. The test would mmap a file of 20MB, then
1. touch all its pages to fault them in
2. force one full scan cycle on the inactive file LRU
-- old: mapping pages activated
-- new: mapping pages inactive
3. touch the mapping pages again
-- old and new: fault exceptions to set the young bits
4. force another full scan cycle on the inactive file LRU
5. touch the mapping pages one last time
-- new: fault exceptions to set the young bits
The test showed an overall increase of 6% in time over 100 iterations
of the above (old: ~212sec, new: ~225sec). 13 secs total overhead /
(100 * 5k pages), ignoring the execution time of the test itself,
makes for about 25us overhead for every page that gets actually
activated. Note:
1. File mapping the size of one third of main memory, _completely_
in active use across memory pressure - i.e., most pages referenced
within one LRU cycle. This should be rare to non-existant,
especially on such embedded setups.
2. Many huge activation batches. Those batches only occur when the
working set fluctuates. If it changes completely between every full
LRU cycle, you have problematic reclaim overhead anyway.
3. Access of activated pages at maximum speed: sequential loads from
every single page without doing anything in between. In reality,
the extra faults will get distributed between actual operations on
the data.
So even if a workload manages to get the VM into the situation of
activating a third of memory in one go on such a setup, it will take
2.2 seconds instead 2.1 without the patch.
Comparing the numbers (and my user-experience over several months),
I think this change is an overall improvement to the VM.
Patch 1 is only refactoring to break up that ugly compound conditional
in shrink_page_list() and make it easy to document and add new checks
in a readable fashion.
Patch 2 gets rid of the obsolete page_mapping_inuse(). It's not
strictly related to #3, but it was in the original submission and is a
net simplification, so I kept it.
Patch 3 implements used-once detection of mapped file pages.
This patch:
Moving the big conditional into its own predicate function makes the code
a bit easier to read and allows for better commenting on the checks
one-by-one.
This is just cleaning up, no semantics should have been changed.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: OSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 00:42:19 +03:00
|
|
|
if (references == PAGEREF_RECLAIM_CLEAN)
|
2005-04-17 02:20:36 +04:00
|
|
|
goto keep_locked;
|
2008-03-24 22:29:52 +03:00
|
|
|
if (!may_enter_fs)
|
2005-04-17 02:20:36 +04:00
|
|
|
goto keep_locked;
|
2006-02-01 14:05:28 +03:00
|
|
|
if (!sc->may_writepage)
|
2005-04-17 02:20:36 +04:00
|
|
|
goto keep_locked;
|
|
|
|
|
|
|
|
/* Page is dirty, try to write it out here */
|
2007-08-23 01:01:26 +04:00
|
|
|
switch (pageout(page, mapping, sync_writeback)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
case PAGE_KEEP:
|
|
|
|
goto keep_locked;
|
|
|
|
case PAGE_ACTIVATE:
|
|
|
|
goto activate_locked;
|
|
|
|
case PAGE_SUCCESS:
|
2008-03-24 22:29:52 +03:00
|
|
|
if (PageWriteback(page) || PageDirty(page))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto keep;
|
|
|
|
/*
|
|
|
|
* A synchronous write - probably a ramdisk. Go
|
|
|
|
* ahead and try to reclaim the page.
|
|
|
|
*/
|
2008-08-02 14:01:03 +04:00
|
|
|
if (!trylock_page(page))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto keep;
|
|
|
|
if (PageDirty(page) || PageWriteback(page))
|
|
|
|
goto keep_locked;
|
|
|
|
mapping = page_mapping(page);
|
|
|
|
case PAGE_CLEAN:
|
|
|
|
; /* try to free the page below */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the page has buffers, try to free the buffer mappings
|
|
|
|
* associated with this page. If we succeed we try to free
|
|
|
|
* the page as well.
|
|
|
|
*
|
|
|
|
* We do this even if the page is PageDirty().
|
|
|
|
* try_to_release_page() does not perform I/O, but it is
|
|
|
|
* possible for a page to have PageDirty set, but it is actually
|
|
|
|
* clean (all its buffers are clean). This happens if the
|
|
|
|
* buffers were written out directly, with submit_bh(). ext3
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
* will do this, as well as the blockdev mapping.
|
2005-04-17 02:20:36 +04:00
|
|
|
* try_to_release_page() will discover that cleanness and will
|
|
|
|
* drop the buffers and mark the page clean - it can be freed.
|
|
|
|
*
|
|
|
|
* Rarely, pages can have buffers and no ->mapping. These are
|
|
|
|
* the pages which were not successfully invalidated in
|
|
|
|
* truncate_complete_page(). We try to drop those buffers here
|
|
|
|
* and if that worked, and the page is no longer mapped into
|
|
|
|
* process address space (page_count == 1) it can be freed.
|
|
|
|
* Otherwise, leave the page on the LRU so it is swappable.
|
|
|
|
*/
|
2009-04-03 19:42:36 +04:00
|
|
|
if (page_has_private(page)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!try_to_release_page(page, sc->gfp_mask))
|
|
|
|
goto activate_locked;
|
2008-07-26 06:45:30 +04:00
|
|
|
if (!mapping && page_count(page) == 1) {
|
|
|
|
unlock_page(page);
|
|
|
|
if (put_page_testzero(page))
|
|
|
|
goto free_it;
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* rare race with speculative reference.
|
|
|
|
* the speculative reference will free
|
|
|
|
* this page shortly, so we may
|
|
|
|
* increment nr_reclaimed here (and
|
|
|
|
* leave it off the LRU).
|
|
|
|
*/
|
|
|
|
nr_reclaimed++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2008-07-26 06:45:30 +04:00
|
|
|
if (!mapping || !__remove_mapping(mapping, page))
|
2006-01-08 12:00:48 +03:00
|
|
|
goto keep_locked;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-10-19 07:26:58 +04:00
|
|
|
/*
|
|
|
|
* At this point, we have no other references and there is
|
|
|
|
* no way to pick any more up (removed from LRU, removed
|
|
|
|
* from pagecache). Can use non-atomic bitops now (and
|
|
|
|
* we obviously don't have to worry about waking up a process
|
|
|
|
* waiting on the page lock, because there are no references.
|
|
|
|
*/
|
|
|
|
__clear_page_locked(page);
|
2008-07-26 06:45:30 +04:00
|
|
|
free_it:
|
2006-03-22 11:08:20 +03:00
|
|
|
nr_reclaimed++;
|
2008-07-26 06:45:30 +04:00
|
|
|
if (!pagevec_add(&freed_pvec, page)) {
|
|
|
|
__pagevec_free(&freed_pvec);
|
|
|
|
pagevec_reinit(&freed_pvec);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
continue;
|
|
|
|
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
cull_mlocked:
|
2009-01-07 01:39:38 +03:00
|
|
|
if (PageSwapCache(page))
|
|
|
|
try_to_free_swap(page);
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
unlock_page(page);
|
|
|
|
putback_lru_page(page);
|
|
|
|
continue;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
activate_locked:
|
2008-10-19 07:26:23 +04:00
|
|
|
/* Not a candidate for swapping, so reclaim swap space. */
|
|
|
|
if (PageSwapCache(page) && vm_swap_full())
|
2009-01-07 01:39:36 +03:00
|
|
|
try_to_free_swap(page);
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
VM_BUG_ON(PageActive(page));
|
2005-04-17 02:20:36 +04:00
|
|
|
SetPageActive(page);
|
|
|
|
pgactivate++;
|
|
|
|
keep_locked:
|
|
|
|
unlock_page(page);
|
|
|
|
keep:
|
|
|
|
list_add(&page->lru, &ret_pages);
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
list_splice(&ret_pages, page_list);
|
|
|
|
if (pagevec_count(&freed_pvec))
|
2008-07-26 06:45:30 +04:00
|
|
|
__pagevec_free(&freed_pvec);
|
2006-06-30 12:55:45 +04:00
|
|
|
count_vm_events(PGACTIVATE, pgactivate);
|
2006-03-22 11:08:20 +03:00
|
|
|
return nr_reclaimed;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2007-07-17 15:03:16 +04:00
|
|
|
/*
|
|
|
|
* Attempt to remove the specified page from its LRU. Only take this page
|
|
|
|
* if it is of the appropriate PageActive status. Pages which are being
|
|
|
|
* freed elsewhere are also ignored.
|
|
|
|
*
|
|
|
|
* page: page to consider
|
|
|
|
* mode: one of the LRU isolation modes defined above
|
|
|
|
*
|
|
|
|
* returns 0 on success, -ve errno on failure.
|
|
|
|
*/
|
2008-10-19 07:26:32 +04:00
|
|
|
int __isolate_lru_page(struct page *page, int mode, int file)
|
2007-07-17 15:03:16 +04:00
|
|
|
{
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
/* Only take pages on the LRU. */
|
|
|
|
if (!PageLRU(page))
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When checking the active state, we need to be sure we are
|
|
|
|
* dealing with comparible boolean values. Take the logical not
|
|
|
|
* of each.
|
|
|
|
*/
|
|
|
|
if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
|
|
|
|
return ret;
|
|
|
|
|
2009-09-22 04:02:59 +04:00
|
|
|
if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
|
2008-10-19 07:26:32 +04:00
|
|
|
return ret;
|
|
|
|
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
/*
|
|
|
|
* When this function is being called for lumpy reclaim, we
|
|
|
|
* initially look into all LRU pages, active, inactive and
|
|
|
|
* unevictable; only give shrink_page_list evictable pages.
|
|
|
|
*/
|
|
|
|
if (PageUnevictable(page))
|
|
|
|
return ret;
|
|
|
|
|
2007-07-17 15:03:16 +04:00
|
|
|
ret = -EBUSY;
|
memcg: synchronized LRU
A big patch for changing memcg's LRU semantics.
Now,
- page_cgroup is linked to mem_cgroup's its own LRU (per zone).
- LRU of page_cgroup is not synchronous with global LRU.
- page and page_cgroup is one-to-one and statically allocated.
- To find page_cgroup is on what LRU, you have to check pc->mem_cgroup as
- lru = page_cgroup_zoneinfo(pc, nid_of_pc, zid_of_pc);
- SwapCache is handled.
And, when we handle LRU list of page_cgroup, we do following.
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc); .....................(1)
mz = page_cgroup_zoneinfo(pc);
spin_lock(&mz->lru_lock);
.....add to LRU
spin_unlock(&mz->lru_lock);
unlock_page_cgroup(pc);
But (1) is spin_lock and we have to be afraid of dead-lock with zone->lru_lock.
So, trylock() is used at (1), now. Without (1), we can't trust "mz" is correct.
This is a trial to remove this dirty nesting of locks.
This patch changes mz->lru_lock to be zone->lru_lock.
Then, above sequence will be written as
spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU
mem_cgroup_add/remove/etc_lru() {
pc = lookup_page_cgroup(page);
mz = page_cgroup_zoneinfo(pc);
if (PageCgroupUsed(pc)) {
....add to LRU
}
spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU
This is much simpler.
(*) We're safe even if we don't take lock_page_cgroup(pc). Because..
1. When pc->mem_cgroup can be modified.
- at charge.
- at account_move().
2. at charge
the PCG_USED bit is not set before pc->mem_cgroup is fixed.
3. at account_move()
the page is isolated and not on LRU.
Pros.
- easy for maintenance.
- memcg can make use of laziness of pagevec.
- we don't have to duplicated LRU/Active/Unevictable bit in page_cgroup.
- LRU status of memcg will be synchronized with global LRU's one.
- # of locks are reduced.
- account_move() is simplified very much.
Cons.
- may increase cost of LRU rotation.
(no impact if memcg is not configured.)
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-08 05:08:01 +03:00
|
|
|
|
2007-07-17 15:03:16 +04:00
|
|
|
if (likely(get_page_unless_zero(page))) {
|
|
|
|
/*
|
|
|
|
* Be careful not to clear PageLRU until after we're
|
|
|
|
* sure the page is not being freed elsewhere -- the
|
|
|
|
* page release code relies on it.
|
|
|
|
*/
|
|
|
|
ClearPageLRU(page);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* zone->lru_lock is heavily contended. Some of the functions that
|
|
|
|
* shrink the lists perform better by taking out a batch of pages
|
|
|
|
* and working on them outside the LRU lock.
|
|
|
|
*
|
|
|
|
* For pagecache intensive workloads, this function is the hottest
|
|
|
|
* spot in the kernel (apart from copy_*_user functions).
|
|
|
|
*
|
|
|
|
* Appropriate locks must be held before calling this function.
|
|
|
|
*
|
|
|
|
* @nr_to_scan: The number of pages to look through on the list.
|
|
|
|
* @src: The LRU list to pull pages off.
|
|
|
|
* @dst: The temp list to put pages on to.
|
|
|
|
* @scanned: The number of pages that were scanned.
|
2007-07-17 15:03:16 +04:00
|
|
|
* @order: The caller's attempted allocation order
|
|
|
|
* @mode: One of the LRU isolation modes
|
2008-10-19 07:26:32 +04:00
|
|
|
* @file: True [1] if isolating file [!anon] pages
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* returns how many pages were moved onto *@dst.
|
|
|
|
*/
|
2006-03-22 11:08:19 +03:00
|
|
|
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
|
|
|
|
struct list_head *src, struct list_head *dst,
|
2008-10-19 07:26:32 +04:00
|
|
|
unsigned long *scanned, int order, int mode, int file)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long nr_taken = 0;
|
2006-03-22 11:08:23 +03:00
|
|
|
unsigned long scan;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-03-22 11:08:23 +03:00
|
|
|
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
|
2007-07-17 15:03:16 +04:00
|
|
|
struct page *page;
|
|
|
|
unsigned long pfn;
|
|
|
|
unsigned long end_pfn;
|
|
|
|
unsigned long page_pfn;
|
|
|
|
int zone_id;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
page = lru_to_page(src);
|
|
|
|
prefetchw_prev_lru_page(page, src, flags);
|
|
|
|
|
2006-09-26 10:30:55 +04:00
|
|
|
VM_BUG_ON(!PageLRU(page));
|
2006-03-22 11:07:59 +03:00
|
|
|
|
2008-10-19 07:26:32 +04:00
|
|
|
switch (__isolate_lru_page(page, mode, file)) {
|
2007-07-17 15:03:16 +04:00
|
|
|
case 0:
|
|
|
|
list_move(&page->lru, dst);
|
2009-06-18 03:27:21 +04:00
|
|
|
mem_cgroup_del_lru(page);
|
2006-03-22 11:08:03 +03:00
|
|
|
nr_taken++;
|
2007-07-17 15:03:16 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case -EBUSY:
|
|
|
|
/* else it is being freed elsewhere */
|
|
|
|
list_move(&page->lru, src);
|
2009-06-18 03:27:21 +04:00
|
|
|
mem_cgroup_rotate_lru_list(page, page_lru(page));
|
2007-07-17 15:03:16 +04:00
|
|
|
continue;
|
2006-03-22 11:07:58 +03:00
|
|
|
|
2007-07-17 15:03:16 +04:00
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!order)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to take all pages in the order aligned region
|
|
|
|
* surrounding the tag page. Only take those pages of
|
|
|
|
* the same active state as that tag page. We may safely
|
|
|
|
* round the target page pfn down to the requested order
|
|
|
|
* as the mem_map is guarenteed valid out to MAX_ORDER,
|
|
|
|
* where that page is in a different zone we will detect
|
|
|
|
* it from its zone id and abort this block scan.
|
|
|
|
*/
|
|
|
|
zone_id = page_zone_id(page);
|
|
|
|
page_pfn = page_to_pfn(page);
|
|
|
|
pfn = page_pfn & ~((1 << order) - 1);
|
|
|
|
end_pfn = pfn + (1 << order);
|
|
|
|
for (; pfn < end_pfn; pfn++) {
|
|
|
|
struct page *cursor_page;
|
|
|
|
|
|
|
|
/* The target page is in the block, ignore it. */
|
|
|
|
if (unlikely(pfn == page_pfn))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Avoid holes within the zone. */
|
|
|
|
if (unlikely(!pfn_valid_within(pfn)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
cursor_page = pfn_to_page(pfn);
|
2008-10-19 07:26:32 +04:00
|
|
|
|
2007-07-17 15:03:16 +04:00
|
|
|
/* Check that we have not crossed a zone boundary. */
|
|
|
|
if (unlikely(page_zone_id(cursor_page) != zone_id))
|
|
|
|
continue;
|
2009-09-22 04:01:43 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we don't have enough swap space, reclaiming of
|
|
|
|
* anon page which don't already have a swap slot is
|
|
|
|
* pointless.
|
|
|
|
*/
|
|
|
|
if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
|
|
|
|
!PageSwapCache(cursor_page))
|
|
|
|
continue;
|
|
|
|
|
2009-06-17 02:33:24 +04:00
|
|
|
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
|
2007-07-17 15:03:16 +04:00
|
|
|
list_move(&cursor_page->lru, dst);
|
2009-06-23 03:57:55 +04:00
|
|
|
mem_cgroup_del_lru(cursor_page);
|
2007-07-17 15:03:16 +04:00
|
|
|
nr_taken++;
|
|
|
|
scan++;
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
*scanned = scan;
|
|
|
|
return nr_taken;
|
|
|
|
}
|
|
|
|
|
2008-02-07 11:13:56 +03:00
|
|
|
static unsigned long isolate_pages_global(unsigned long nr,
|
|
|
|
struct list_head *dst,
|
|
|
|
unsigned long *scanned, int order,
|
|
|
|
int mode, struct zone *z,
|
2008-10-19 07:26:32 +04:00
|
|
|
int active, int file)
|
2008-02-07 11:13:56 +03:00
|
|
|
{
|
2008-10-19 07:26:32 +04:00
|
|
|
int lru = LRU_BASE;
|
2008-02-07 11:13:56 +03:00
|
|
|
if (active)
|
2008-10-19 07:26:32 +04:00
|
|
|
lru += LRU_ACTIVE;
|
|
|
|
if (file)
|
|
|
|
lru += LRU_FILE;
|
|
|
|
return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
|
2009-09-22 04:02:56 +04:00
|
|
|
mode, file);
|
2008-02-07 11:13:56 +03:00
|
|
|
}
|
|
|
|
|
2007-07-17 15:03:16 +04:00
|
|
|
/*
|
|
|
|
* clear_active_flags() is a helper for shrink_active_list(), clearing
|
|
|
|
* any active bits from the pages in the list.
|
|
|
|
*/
|
2008-10-19 07:26:32 +04:00
|
|
|
static unsigned long clear_active_flags(struct list_head *page_list,
|
|
|
|
unsigned int *count)
|
2007-07-17 15:03:16 +04:00
|
|
|
{
|
|
|
|
int nr_active = 0;
|
2008-10-19 07:26:32 +04:00
|
|
|
int lru;
|
2007-07-17 15:03:16 +04:00
|
|
|
struct page *page;
|
|
|
|
|
2008-10-19 07:26:32 +04:00
|
|
|
list_for_each_entry(page, page_list, lru) {
|
2009-09-22 04:02:58 +04:00
|
|
|
lru = page_lru_base_type(page);
|
2007-07-17 15:03:16 +04:00
|
|
|
if (PageActive(page)) {
|
2008-10-19 07:26:32 +04:00
|
|
|
lru += LRU_ACTIVE;
|
2007-07-17 15:03:16 +04:00
|
|
|
ClearPageActive(page);
|
|
|
|
nr_active++;
|
|
|
|
}
|
2008-10-19 07:26:32 +04:00
|
|
|
count[lru]++;
|
|
|
|
}
|
2007-07-17 15:03:16 +04:00
|
|
|
|
|
|
|
return nr_active;
|
|
|
|
}
|
|
|
|
|
vmscan: move isolate_lru_page() to vmscan.c
On large memory systems, the VM can spend way too much time scanning
through pages that it cannot (or should not) evict from memory. Not only
does it use up CPU time, but it also provokes lock contention and can
leave large systems under memory presure in a catatonic state.
This patch series improves VM scalability by:
1) putting filesystem backed, swap backed and unevictable pages
onto their own LRUs, so the system only scans the pages that it
can/should evict from memory
2) switching to two handed clock replacement for the anonymous LRUs,
so the number of pages that need to be scanned when the system
starts swapping is bound to a reasonable number
3) keeping unevictable pages off the LRU completely, so the
VM does not waste CPU time scanning them. ramfs, ramdisk,
SHM_LOCKED shared memory segments and mlock()ed VMA pages
are keept on the unevictable list.
This patch:
isolate_lru_page logically belongs to be in vmscan.c than migrate.c.
It is tough, because we don't need that function without memory migration
so there is a valid argument to have it in migrate.c. However a
subsequent patch needs to make use of it in the core mm, so we can happily
move it to vmscan.c.
Also, make the function a little more generic by not requiring that it
adds an isolated page to a given list. Callers can do that.
Note that we now have '__isolate_lru_page()', that does
something quite different, visible outside of vmscan.c
for use with memory controller. Methinks we need to
rationalize these names/purposes. --lts
[akpm@linux-foundation.org: fix mm/memory_hotplug.c build]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:09 +04:00
|
|
|
/**
|
|
|
|
* isolate_lru_page - tries to isolate a page from its LRU list
|
|
|
|
* @page: page to isolate from its LRU list
|
|
|
|
*
|
|
|
|
* Isolates a @page from an LRU list, clears PageLRU and adjusts the
|
|
|
|
* vmstat statistic corresponding to whatever LRU list the page was on.
|
|
|
|
*
|
|
|
|
* Returns 0 if the page was removed from an LRU list.
|
|
|
|
* Returns -EBUSY if the page was not on an LRU list.
|
|
|
|
*
|
|
|
|
* The returned page will have PageLRU() cleared. If it was found on
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
* the active list, it will have PageActive set. If it was found on
|
|
|
|
* the unevictable list, it will have the PageUnevictable bit set. That flag
|
|
|
|
* may need to be cleared by the caller before letting the page go.
|
vmscan: move isolate_lru_page() to vmscan.c
On large memory systems, the VM can spend way too much time scanning
through pages that it cannot (or should not) evict from memory. Not only
does it use up CPU time, but it also provokes lock contention and can
leave large systems under memory presure in a catatonic state.
This patch series improves VM scalability by:
1) putting filesystem backed, swap backed and unevictable pages
onto their own LRUs, so the system only scans the pages that it
can/should evict from memory
2) switching to two handed clock replacement for the anonymous LRUs,
so the number of pages that need to be scanned when the system
starts swapping is bound to a reasonable number
3) keeping unevictable pages off the LRU completely, so the
VM does not waste CPU time scanning them. ramfs, ramdisk,
SHM_LOCKED shared memory segments and mlock()ed VMA pages
are keept on the unevictable list.
This patch:
isolate_lru_page logically belongs to be in vmscan.c than migrate.c.
It is tough, because we don't need that function without memory migration
so there is a valid argument to have it in migrate.c. However a
subsequent patch needs to make use of it in the core mm, so we can happily
move it to vmscan.c.
Also, make the function a little more generic by not requiring that it
adds an isolated page to a given list. Callers can do that.
Note that we now have '__isolate_lru_page()', that does
something quite different, visible outside of vmscan.c
for use with memory controller. Methinks we need to
rationalize these names/purposes. --lts
[akpm@linux-foundation.org: fix mm/memory_hotplug.c build]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:09 +04:00
|
|
|
*
|
|
|
|
* The vmstat statistic corresponding to the list on which the page was
|
|
|
|
* found will be decremented.
|
|
|
|
*
|
|
|
|
* Restrictions:
|
|
|
|
* (1) Must be called with an elevated refcount on the page. This is a
|
|
|
|
* fundamentnal difference from isolate_lru_pages (which is called
|
|
|
|
* without a stable reference).
|
|
|
|
* (2) the lru_lock must not be held.
|
|
|
|
* (3) interrupts must be enabled.
|
|
|
|
*/
|
|
|
|
int isolate_lru_page(struct page *page)
|
|
|
|
{
|
|
|
|
int ret = -EBUSY;
|
|
|
|
|
|
|
|
if (PageLRU(page)) {
|
|
|
|
struct zone *zone = page_zone(page);
|
|
|
|
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
|
|
if (PageLRU(page) && get_page_unless_zero(page)) {
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
int lru = page_lru(page);
|
vmscan: move isolate_lru_page() to vmscan.c
On large memory systems, the VM can spend way too much time scanning
through pages that it cannot (or should not) evict from memory. Not only
does it use up CPU time, but it also provokes lock contention and can
leave large systems under memory presure in a catatonic state.
This patch series improves VM scalability by:
1) putting filesystem backed, swap backed and unevictable pages
onto their own LRUs, so the system only scans the pages that it
can/should evict from memory
2) switching to two handed clock replacement for the anonymous LRUs,
so the number of pages that need to be scanned when the system
starts swapping is bound to a reasonable number
3) keeping unevictable pages off the LRU completely, so the
VM does not waste CPU time scanning them. ramfs, ramdisk,
SHM_LOCKED shared memory segments and mlock()ed VMA pages
are keept on the unevictable list.
This patch:
isolate_lru_page logically belongs to be in vmscan.c than migrate.c.
It is tough, because we don't need that function without memory migration
so there is a valid argument to have it in migrate.c. However a
subsequent patch needs to make use of it in the core mm, so we can happily
move it to vmscan.c.
Also, make the function a little more generic by not requiring that it
adds an isolated page to a given list. Callers can do that.
Note that we now have '__isolate_lru_page()', that does
something quite different, visible outside of vmscan.c
for use with memory controller. Methinks we need to
rationalize these names/purposes. --lts
[akpm@linux-foundation.org: fix mm/memory_hotplug.c build]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:09 +04:00
|
|
|
ret = 0;
|
|
|
|
ClearPageLRU(page);
|
2008-10-19 07:26:32 +04:00
|
|
|
|
|
|
|
del_page_from_lru_list(zone, page, lru);
|
vmscan: move isolate_lru_page() to vmscan.c
On large memory systems, the VM can spend way too much time scanning
through pages that it cannot (or should not) evict from memory. Not only
does it use up CPU time, but it also provokes lock contention and can
leave large systems under memory presure in a catatonic state.
This patch series improves VM scalability by:
1) putting filesystem backed, swap backed and unevictable pages
onto their own LRUs, so the system only scans the pages that it
can/should evict from memory
2) switching to two handed clock replacement for the anonymous LRUs,
so the number of pages that need to be scanned when the system
starts swapping is bound to a reasonable number
3) keeping unevictable pages off the LRU completely, so the
VM does not waste CPU time scanning them. ramfs, ramdisk,
SHM_LOCKED shared memory segments and mlock()ed VMA pages
are keept on the unevictable list.
This patch:
isolate_lru_page logically belongs to be in vmscan.c than migrate.c.
It is tough, because we don't need that function without memory migration
so there is a valid argument to have it in migrate.c. However a
subsequent patch needs to make use of it in the core mm, so we can happily
move it to vmscan.c.
Also, make the function a little more generic by not requiring that it
adds an isolated page to a given list. Callers can do that.
Note that we now have '__isolate_lru_page()', that does
something quite different, visible outside of vmscan.c
for use with memory controller. Methinks we need to
rationalize these names/purposes. --lts
[akpm@linux-foundation.org: fix mm/memory_hotplug.c build]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:09 +04:00
|
|
|
}
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-22 04:01:38 +04:00
|
|
|
/*
|
|
|
|
* Are there way too many processes in the direct reclaim path already?
|
|
|
|
*/
|
|
|
|
static int too_many_isolated(struct zone *zone, int file,
|
|
|
|
struct scan_control *sc)
|
|
|
|
{
|
|
|
|
unsigned long inactive, isolated;
|
|
|
|
|
|
|
|
if (current_is_kswapd())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!scanning_global_lru(sc))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (file) {
|
|
|
|
inactive = zone_page_state(zone, NR_INACTIVE_FILE);
|
|
|
|
isolated = zone_page_state(zone, NR_ISOLATED_FILE);
|
|
|
|
} else {
|
|
|
|
inactive = zone_page_state(zone, NR_INACTIVE_ANON);
|
|
|
|
isolated = zone_page_state(zone, NR_ISOLATED_ANON);
|
|
|
|
}
|
|
|
|
|
|
|
|
return isolated > inactive;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
[PATCH] vmscan: rename functions
We have:
try_to_free_pages
->shrink_caches(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_cache(struct zone *, ...)
->shrink_list(struct list_head *, ...)
->refill_inactive_list((struct zone *, ...)
which is fairly irrational.
Rename things so that we have
try_to_free_pages
->shrink_zones(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_inactive_list(struct zone *, ...)
->shrink_page_list(struct list_head *, ...)
->shrink_active_list(struct zone *, ...)
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 11:08:21 +03:00
|
|
|
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
|
|
|
|
* of reclaimed pages
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
[PATCH] vmscan: rename functions
We have:
try_to_free_pages
->shrink_caches(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_cache(struct zone *, ...)
->shrink_list(struct list_head *, ...)
->refill_inactive_list((struct zone *, ...)
which is fairly irrational.
Rename things so that we have
try_to_free_pages
->shrink_zones(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_inactive_list(struct zone *, ...)
->shrink_page_list(struct list_head *, ...)
->shrink_active_list(struct zone *, ...)
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 11:08:21 +03:00
|
|
|
static unsigned long shrink_inactive_list(unsigned long max_scan,
|
2008-10-19 07:26:36 +04:00
|
|
|
struct zone *zone, struct scan_control *sc,
|
|
|
|
int priority, int file)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
LIST_HEAD(page_list);
|
|
|
|
struct pagevec pvec;
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long nr_scanned = 0;
|
2006-03-22 11:08:20 +03:00
|
|
|
unsigned long nr_reclaimed = 0;
|
2009-01-08 05:08:15 +03:00
|
|
|
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
|
2009-06-17 02:31:40 +04:00
|
|
|
|
2009-09-22 04:01:38 +04:00
|
|
|
while (unlikely(too_many_isolated(zone, file, sc))) {
|
2009-10-27 02:49:35 +03:00
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
2009-09-22 04:01:38 +04:00
|
|
|
|
|
|
|
/* We are about to die and free our memory. Return now. */
|
|
|
|
if (fatal_signal_pending(current))
|
|
|
|
return SWAP_CLUSTER_MAX;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
pagevec_init(&pvec, 1);
|
|
|
|
|
|
|
|
lru_add_drain();
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
2006-03-22 11:08:19 +03:00
|
|
|
do {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct page *page;
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long nr_taken;
|
|
|
|
unsigned long nr_scan;
|
|
|
|
unsigned long nr_freed;
|
2007-07-17 15:03:16 +04:00
|
|
|
unsigned long nr_active;
|
2008-10-19 07:26:32 +04:00
|
|
|
unsigned int count[NR_LRU_LISTS] = { 0, };
|
2010-05-25 01:32:37 +04:00
|
|
|
int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE;
|
2009-09-22 04:01:37 +04:00
|
|
|
unsigned long nr_anon;
|
|
|
|
unsigned long nr_file;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-09-22 04:01:36 +04:00
|
|
|
if (scanning_global_lru(sc)) {
|
2010-05-25 01:32:40 +04:00
|
|
|
nr_taken = isolate_pages_global(SWAP_CLUSTER_MAX,
|
|
|
|
&page_list, &nr_scan,
|
|
|
|
sc->order, mode,
|
|
|
|
zone, 0, file);
|
2009-09-22 04:01:36 +04:00
|
|
|
zone->pages_scanned += nr_scan;
|
|
|
|
if (current_is_kswapd())
|
|
|
|
__count_zone_vm_events(PGSCAN_KSWAPD, zone,
|
|
|
|
nr_scan);
|
|
|
|
else
|
|
|
|
__count_zone_vm_events(PGSCAN_DIRECT, zone,
|
|
|
|
nr_scan);
|
2010-05-25 01:32:40 +04:00
|
|
|
} else {
|
|
|
|
nr_taken = mem_cgroup_isolate_pages(SWAP_CLUSTER_MAX,
|
|
|
|
&page_list, &nr_scan,
|
|
|
|
sc->order, mode,
|
|
|
|
zone, sc->mem_cgroup,
|
|
|
|
0, file);
|
|
|
|
/*
|
|
|
|
* mem_cgroup_isolate_pages() keeps track of
|
|
|
|
* scanned pages on its own.
|
|
|
|
*/
|
2009-09-22 04:01:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nr_taken == 0)
|
|
|
|
goto done;
|
|
|
|
|
2008-10-19 07:26:32 +04:00
|
|
|
nr_active = clear_active_flags(&page_list, count);
|
2007-08-23 01:01:25 +04:00
|
|
|
__count_vm_events(PGDEACTIVATE, nr_active);
|
2007-07-17 15:03:16 +04:00
|
|
|
|
2008-10-19 07:26:32 +04:00
|
|
|
__mod_zone_page_state(zone, NR_ACTIVE_FILE,
|
|
|
|
-count[LRU_ACTIVE_FILE]);
|
|
|
|
__mod_zone_page_state(zone, NR_INACTIVE_FILE,
|
|
|
|
-count[LRU_INACTIVE_FILE]);
|
|
|
|
__mod_zone_page_state(zone, NR_ACTIVE_ANON,
|
|
|
|
-count[LRU_ACTIVE_ANON]);
|
|
|
|
__mod_zone_page_state(zone, NR_INACTIVE_ANON,
|
|
|
|
-count[LRU_INACTIVE_ANON]);
|
|
|
|
|
2009-09-22 04:01:37 +04:00
|
|
|
nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
|
|
|
|
nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
|
|
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
|
|
|
|
__mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
|
2009-01-08 05:08:20 +03:00
|
|
|
|
2009-12-15 04:59:48 +03:00
|
|
|
reclaim_stat->recent_scanned[0] += nr_anon;
|
|
|
|
reclaim_stat->recent_scanned[1] += nr_file;
|
2009-01-08 05:08:20 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
2006-03-22 11:08:19 +03:00
|
|
|
nr_scanned += nr_scan;
|
2007-08-23 01:01:26 +04:00
|
|
|
nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are direct reclaiming for contiguous pages and we do
|
|
|
|
* not reclaim everything in the list, try again and wait
|
|
|
|
* for IO to complete. This will stall high-order allocations
|
|
|
|
* but that should be acceptable to the caller
|
|
|
|
*/
|
|
|
|
if (nr_freed < nr_taken && !current_is_kswapd() &&
|
2010-05-25 01:32:37 +04:00
|
|
|
sc->lumpy_reclaim_mode) {
|
2009-07-09 16:52:32 +04:00
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
2007-08-23 01:01:26 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The attempt at page out may have made some
|
|
|
|
* of the pages active, mark them inactive again.
|
|
|
|
*/
|
2008-10-19 07:26:32 +04:00
|
|
|
nr_active = clear_active_flags(&page_list, count);
|
2007-08-23 01:01:26 +04:00
|
|
|
count_vm_events(PGDEACTIVATE, nr_active);
|
|
|
|
|
|
|
|
nr_freed += shrink_page_list(&page_list, sc,
|
|
|
|
PAGEOUT_IO_SYNC);
|
|
|
|
}
|
|
|
|
|
2006-03-22 11:08:20 +03:00
|
|
|
nr_reclaimed += nr_freed;
|
2009-09-22 04:01:36 +04:00
|
|
|
|
2006-01-06 11:11:20 +03:00
|
|
|
local_irq_disable();
|
2009-09-22 04:01:36 +04:00
|
|
|
if (current_is_kswapd())
|
2006-06-30 12:55:45 +04:00
|
|
|
__count_vm_events(KSWAPD_STEAL, nr_freed);
|
2006-12-30 03:48:59 +03:00
|
|
|
__count_zone_vm_events(PGSTEAL, zone, nr_freed);
|
2006-01-06 11:11:20 +03:00
|
|
|
|
|
|
|
spin_lock(&zone->lru_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Put back any unfreeable pages.
|
|
|
|
*/
|
|
|
|
while (!list_empty(&page_list)) {
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
int lru;
|
2005-04-17 02:20:36 +04:00
|
|
|
page = lru_to_page(&page_list);
|
2006-09-26 10:30:55 +04:00
|
|
|
VM_BUG_ON(PageLRU(page));
|
2005-04-17 02:20:36 +04:00
|
|
|
list_del(&page->lru);
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
if (unlikely(!page_evictable(page, NULL))) {
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
putback_lru_page(page);
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
SetPageLRU(page);
|
|
|
|
lru = page_lru(page);
|
|
|
|
add_page_to_lru_list(zone, page, lru);
|
2009-09-22 04:01:45 +04:00
|
|
|
if (is_active_lru(lru)) {
|
2009-09-22 04:02:56 +04:00
|
|
|
int file = is_file_lru(lru);
|
2009-01-08 05:08:15 +03:00
|
|
|
reclaim_stat->recent_rotated[file]++;
|
2008-10-19 07:26:32 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!pagevec_add(&pvec, page)) {
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
__pagevec_release(&pvec);
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
|
|
}
|
|
|
|
}
|
2009-09-22 04:01:37 +04:00
|
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
|
|
|
|
__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
|
|
|
|
|
2006-03-22 11:08:19 +03:00
|
|
|
} while (nr_scanned < max_scan);
|
2009-09-22 04:01:36 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
done:
|
2009-09-22 04:01:36 +04:00
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
pagevec_release(&pvec);
|
2006-03-22 11:08:20 +03:00
|
|
|
return nr_reclaimed;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
/*
|
|
|
|
* We are about to scan this zone at a certain priority level. If that priority
|
|
|
|
* level is smaller (ie: more urgent) than the previous priority, then note
|
|
|
|
* that priority level within the zone. This is done so that when the next
|
|
|
|
* process comes in to scan this zone, it will immediately start out at this
|
|
|
|
* priority level rather than having to build up its own scanning priority.
|
|
|
|
* Here, this priority affects only the reclaim-mapped threshold.
|
|
|
|
*/
|
|
|
|
static inline void note_zone_scanning_priority(struct zone *zone, int priority)
|
|
|
|
{
|
|
|
|
if (priority < zone->prev_priority)
|
|
|
|
zone->prev_priority = priority;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* This moves pages from the active list to the inactive list.
|
|
|
|
*
|
|
|
|
* We move them the other way if the page is referenced by one or more
|
|
|
|
* processes, from rmap.
|
|
|
|
*
|
|
|
|
* If the pages are mostly unmapped, the processing is fast and it is
|
|
|
|
* appropriate to hold zone->lru_lock across the whole operation. But if
|
|
|
|
* the pages are mapped, the processing is slow (page_referenced()) so we
|
|
|
|
* should drop zone->lru_lock around each page. It's impossible to balance
|
|
|
|
* this, so instead we remove the pages from the LRU while processing them.
|
|
|
|
* It is safe to rely on PG_active against the non-LRU pages in here because
|
|
|
|
* nobody will play with that bit on a non-LRU page.
|
|
|
|
*
|
|
|
|
* The downside is that we have to touch page->_count against each page.
|
|
|
|
* But we had to alter page->flags anyway.
|
|
|
|
*/
|
2008-02-07 11:14:37 +03:00
|
|
|
|
2009-06-17 02:33:13 +04:00
|
|
|
static void move_active_pages_to_lru(struct zone *zone,
|
|
|
|
struct list_head *list,
|
|
|
|
enum lru_list lru)
|
|
|
|
{
|
|
|
|
unsigned long pgmoved = 0;
|
|
|
|
struct pagevec pvec;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
pagevec_init(&pvec, 1);
|
|
|
|
|
|
|
|
while (!list_empty(list)) {
|
|
|
|
page = lru_to_page(list);
|
|
|
|
|
|
|
|
VM_BUG_ON(PageLRU(page));
|
|
|
|
SetPageLRU(page);
|
|
|
|
|
|
|
|
list_move(&page->lru, &zone->lru[lru].list);
|
|
|
|
mem_cgroup_add_lru_list(page, lru);
|
|
|
|
pgmoved++;
|
|
|
|
|
|
|
|
if (!pagevec_add(&pvec, page) || list_empty(list)) {
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
if (buffer_heads_over_limit)
|
|
|
|
pagevec_strip(&pvec);
|
|
|
|
__pagevec_release(&pvec);
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
|
|
|
|
if (!is_active_lru(lru))
|
|
|
|
__count_vm_events(PGDEACTIVATE, pgmoved);
|
|
|
|
}
|
2008-02-07 11:14:37 +03:00
|
|
|
|
[PATCH] vmscan: rename functions
We have:
try_to_free_pages
->shrink_caches(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_cache(struct zone *, ...)
->shrink_list(struct list_head *, ...)
->refill_inactive_list((struct zone *, ...)
which is fairly irrational.
Rename things so that we have
try_to_free_pages
->shrink_zones(struct zone **zones, ..)
->shrink_zone(struct zone *, ...)
->shrink_inactive_list(struct zone *, ...)
->shrink_page_list(struct list_head *, ...)
->shrink_active_list(struct zone *, ...)
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 11:08:21 +03:00
|
|
|
static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
|
2008-10-19 07:26:32 +04:00
|
|
|
struct scan_control *sc, int priority, int file)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2009-09-22 04:01:35 +04:00
|
|
|
unsigned long nr_taken;
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long pgscanned;
|
2009-06-17 02:33:05 +04:00
|
|
|
unsigned long vm_flags;
|
2005-04-17 02:20:36 +04:00
|
|
|
LIST_HEAD(l_hold); /* The pages which were snipped off */
|
vmscan: make mapped executable pages the first class citizen
Protect referenced PROT_EXEC mapped pages from being deactivated.
PROT_EXEC(or its internal presentation VM_EXEC) pages normally belong to some
currently running executables and their linked libraries, they shall really be
cached aggressively to provide good user experiences.
Thanks to Johannes Weiner for the advice to reuse the VMA walk in
page_referenced() to get the PROT_EXEC bit.
[more details]
( The consequences of this patch will have to be discussed together with
Rik van Riel's recent patch "vmscan: evict use-once pages first". )
( Some of the good points and insights are taken into this changelog.
Thanks to all the involved people for the great LKML discussions. )
the problem
===========
For a typical desktop, the most precious working set is composed of
*actively accessed*
(1) memory mapped executables
(2) and their anonymous pages
(3) and other files
(4) and the dcache/icache/.. slabs
while the least important data are
(5) infrequently used or use-once files
For a typical desktop, one major problem is busty and large amount of (5)
use-once files flushing out the working set.
Inside the working set, (4) dcache/icache have already been too sticky ;-)
So we only have to care (2) anonymous and (1)(3) file pages.
anonymous pages
===============
Anonymous pages are effectively immune to the streaming IO attack, because we
now have separate file/anon LRU lists. When the use-once files crowd into the
file LRU, the list's "quality" is significantly lowered. Therefore the scan
balance policy in get_scan_ratio() will choose to scan the (low quality) file
LRU much more frequently than the anon LRU.
file pages
==========
Rik proposed to *not* scan the active file LRU when the inactive list grows
larger than active list. This guarantees that when there are use-once streaming
IO, and the working set is not too large(so that active_size < inactive_size),
the active file LRU will *not* be scanned at all. So the not-too-large working
set can be well protected.
But there are also situations where the file working set is a bit large so that
(active_size >= inactive_size), or the streaming IOs are not purely use-once.
In these cases, the active list will be scanned slowly. Because the current
shrink_active_list() policy is to deactivate active pages regardless of their
referenced bits. The deactivated pages become susceptible to the streaming IO
attack: the inactive list could be scanned fast (500MB / 50MBps = 10s) so that
the deactivated pages don't have enough time to get re-referenced. Because a
user tend to switch between windows in intervals from seconds to minutes.
This patch holds mapped executable pages in the active list as long as they
are referenced during each full scan of the active list. Because the active
list is normally scanned much slower, they get longer grace time (eg. 100s)
for further references, which better matches the pace of user operations.
Therefore this patch greatly prolongs the in-cache time of executable code,
when there are moderate memory pressures.
before patch: guaranteed to be cached if reference intervals < I
after patch: guaranteed to be cached if reference intervals < I+A
(except when randomly reclaimed by the lumpy reclaim)
where
A = time to fully scan the active file LRU
I = time to fully scan the inactive file LRU
Note that normally A >> I.
side effects
============
This patch is safe in general, it restores the pre-2.6.28 mmap() behavior
but in a much smaller and well targeted scope.
One may worry about some one to abuse the PROT_EXEC heuristic. But as
Andrew Morton stated, there are other tricks to getting that sort of boost.
Another concern is the PROT_EXEC mapped pages growing large in rare cases,
and therefore hurting reclaim efficiency. But a sane application targeted for
large audience will never use PROT_EXEC for data mappings. If some home made
application tries to abuse that bit, it shall be aware of the consequences.
If it is abused to scale of 2/3 total memory, it gains nothing but overheads.
benchmarks
==========
1) memory tight desktop
1.1) brief summary
- clock time and major faults are reduced by 50%;
- pswpin numbers are reduced to ~1/3.
That means X desktop responsiveness is doubled under high memory/swap pressure.
1.2) test scenario
- nfsroot gnome desktop with 512M physical memory
- run some programs, and switch between the existing windows
after starting each new program.
1.3) progress timing (seconds)
before after programs
0.02 0.02 N xeyes
0.75 0.76 N firefox
2.02 1.88 N nautilus
3.36 3.17 N nautilus --browser
5.26 4.89 N gthumb
7.12 6.47 N gedit
9.22 8.16 N xpdf /usr/share/doc/shared-mime-info/shared-mime-info-spec.pdf
13.58 12.55 N xterm
15.87 14.57 N mlterm
18.63 17.06 N gnome-terminal
21.16 18.90 N urxvt
26.24 23.48 N gnome-system-monitor
28.72 26.52 N gnome-help
32.15 29.65 N gnome-dictionary
39.66 36.12 N /usr/games/sol
43.16 39.27 N /usr/games/gnometris
48.65 42.56 N /usr/games/gnect
53.31 47.03 N /usr/games/gtali
58.60 52.05 N /usr/games/iagno
65.77 55.42 N /usr/games/gnotravex
70.76 61.47 N /usr/games/mahjongg
76.15 67.11 N /usr/games/gnome-sudoku
86.32 75.15 N /usr/games/glines
92.21 79.70 N /usr/games/glchess
103.79 88.48 N /usr/games/gnomine
113.84 96.51 N /usr/games/gnotski
124.40 102.19 N /usr/games/gnibbles
137.41 114.93 N /usr/games/gnobots2
155.53 125.02 N /usr/games/blackjack
179.85 135.11 N /usr/games/same-gnome
224.49 154.50 N /usr/bin/gnome-window-properties
248.44 162.09 N /usr/bin/gnome-default-applications-properties
282.62 173.29 N /usr/bin/gnome-at-properties
323.72 188.21 N /usr/bin/gnome-typing-monitor
363.99 199.93 N /usr/bin/gnome-at-visual
394.21 206.95 N /usr/bin/gnome-sound-properties
435.14 224.49 N /usr/bin/gnome-at-mobility
463.05 234.11 N /usr/bin/gnome-keybinding-properties
503.75 248.59 N /usr/bin/gnome-about-me
554.00 276.27 N /usr/bin/gnome-display-properties
615.48 304.39 N /usr/bin/gnome-network-preferences
693.03 342.01 N /usr/bin/gnome-mouse-properties
759.90 388.58 N /usr/bin/gnome-appearance-properties
937.90 508.47 N /usr/bin/gnome-control-center
1109.75 587.57 N /usr/bin/gnome-keyboard-properties
1399.05 758.16 N : oocalc
1524.64 830.03 N : oodraw
1684.31 900.03 N : ooimpress
1874.04 993.91 N : oomath
2115.12 1081.89 N : ooweb
2369.02 1161.99 N : oowriter
Note that the last ": oo*" commands are actually commented out.
1.4) vmstat numbers (some relevant ones are marked with *)
before after
nr_free_pages 1293 3898
nr_inactive_anon 59956 53460
nr_active_anon 26815 30026
nr_inactive_file 2657 3218
nr_active_file 2019 2806
nr_unevictable 4 4
nr_mlock 4 4
nr_anon_pages 26706 27859
*nr_mapped 3542 4469
nr_file_pages 72232 67681
nr_dirty 1 0
nr_writeback 123 19
nr_slab_reclaimable 3375 3534
nr_slab_unreclaimable 11405 10665
nr_page_table_pages 8106 7864
nr_unstable 0 0
nr_bounce 0 0
*nr_vmscan_write 394776 230839
nr_writeback_temp 0 0
numa_hit 6843353 3318676
numa_miss 0 0
numa_foreign 0 0
numa_interleave 1719 1719
numa_local 6843353 3318676
numa_other 0 0
*pgpgin 5954683 2057175
*pgpgout 1578276 922744
*pswpin 1486615 512238
*pswpout 394568 230685
pgalloc_dma 277432 56602
pgalloc_dma32 6769477 3310348
pgalloc_normal 0 0
pgalloc_movable 0 0
pgfree 7048396 3371118
pgactivate 2036343 1471492
pgdeactivate 2189691 1612829
pgfault 3702176 3100702
*pgmajfault 452116 201343
pgrefill_dma 12185 7127
pgrefill_dma32 334384 653703
pgrefill_normal 0 0
pgrefill_movable 0 0
pgsteal_dma 74214 22179
pgsteal_dma32 3334164 1638029
pgsteal_normal 0 0
pgsteal_movable 0 0
pgscan_kswapd_dma 1081421 1216199
pgscan_kswapd_dma32 58979118 46002810
pgscan_kswapd_normal 0 0
pgscan_kswapd_movable 0 0
pgscan_direct_dma 2015438 1086109
pgscan_direct_dma32 55787823 36101597
pgscan_direct_normal 0 0
pgscan_direct_movable 0 0
pginodesteal 3461 7281
slabs_scanned 564864 527616
kswapd_steal 2889797 1448082
kswapd_inodesteal 14827 14835
pageoutrun 43459 21562
allocstall 9653 4032
pgrotated 384216 228631
1.5) free numbers at the end of the tests
before patch:
total used free shared buffers cached
Mem: 474 467 7 0 0 236
-/+ buffers/cache: 230 243
Swap: 1023 418 605
after patch:
total used free shared buffers cached
Mem: 474 457 16 0 0 236
-/+ buffers/cache: 221 253
Swap: 1023 404 619
2) memory flushing in a file server
2.1) brief summary
The number of major faults from 50 to 3 during 10% cache hot reads.
That means this patch successfully stops major faults when the active file
list is slowly scanned when there are partially cache hot streaming IO.
2.2) test scenario
Do 100000 pread(size=110 pages, offset=(i*100) pages), where 10% of the
pages will be activated:
for i in `seq 0 100 10000000`; do echo $i 110; done > pattern-hot-10
iotrace.rb --load pattern-hot-10 --play /b/sparse
vmmon nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
and monitor /proc/vmstat during the time. The test box has 2G memory.
I carried out tests on fresh booted console as well as X desktop, and
fetched the vmstat numbers on
(1) begin: shortly after the big read IO starts;
(2) end: just before the big read IO stops;
(3) restore: the big read IO stops and the zsh working set restored
(4) restore X: after IO, switch back and forth between the urxvt and firefox
windows to restore their working set.
2.3) console mode results
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.29 VM_EXEC protection ON:
begin: 2481 2237 8694 630 0 574299
end: 275 231976 233914 633 776271 20933042
restore: 370 232154 234524 691 777183 20958453
2.6.29 VM_EXEC protection ON (second run):
begin: 2434 2237 8493 629 0 574195
end: 284 231970 233536 632 771918 20896129
restore: 399 232218 234789 690 774526 20957909
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 2479 2344 9659 210 0 579643
end: 284 232010 234142 260 772776 20917184
restore: 379 232159 234371 301 774888 20967849
The above console numbers show that
- The startup pgmajfault of 2.6.30-rc4-mm is merely 1/3 that of 2.6.29.
I'd attribute that improvement to the mmap readahead improvements :-)
- The pgmajfault increment during the file copy is 633-630=3 vs 260-210=50.
That's a huge improvement - which means with the VM_EXEC protection logic,
active mmap pages is pretty safe even under partially cache hot streaming IO.
- when active:inactive file lru size reaches 1:1, their scan rates is 1:20.8
under 10% cache hot IO. (computed with formula Dpgdeactivate:Dpgfree)
That roughly means the active mmap pages get 20.8 more chances to get
re-referenced to stay in memory.
- The absolute nr_mapped drops considerably to 1/9 during the big IO, and the
dropped pages are mostly inactive ones. The patch has almost no impact in
this aspect, that means it won't unnecessarily increase memory pressure.
(In contrast, your 20% mmap protection ratio will keep them all, and
therefore eliminate the extra 41 major faults to restore working set
of zsh etc.)
The iotrace.rb read throughput is
151.194384MB/s 284.198252s 100001x 450560b --load pattern-hot-10 --play /b/sparse
which means the inactive list is rotated at the speed of 250MB/s,
so a full scan of which takes about 3.5 seconds, while a full scan
of active file list takes about 77 seconds.
2.4) X mode results
We can reach roughly the same conclusions for X desktop:
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.30-rc4-mm VM_EXEC protection ON:
begin: 9740 8920 64075 561 0 678360
end: 768 218254 220029 565 798953 21057006
restore: 857 218543 220987 606 799462 21075710
restore X: 2414 218560 225344 797 799462 21080795
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 9368 5035 26389 554 0 633391
end: 770 218449 221230 661 646472 17832500
restore: 1113 218466 220978 710 649881 17905235
restore X: 2687 218650 225484 947 802700 21083584
- the absolute nr_mapped drops considerably (to 1/13 of the original size)
during the streaming IO.
- the delta of pgmajfault is 3 vs 107 during IO, or 236 vs 393
during the whole process.
Cc: Elladan <elladan@eskimo.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-17 02:33:12 +04:00
|
|
|
LIST_HEAD(l_active);
|
2008-10-19 07:26:14 +04:00
|
|
|
LIST_HEAD(l_inactive);
|
2005-04-17 02:20:36 +04:00
|
|
|
struct page *page;
|
2009-01-08 05:08:15 +03:00
|
|
|
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
|
2009-09-22 04:01:35 +04:00
|
|
|
unsigned long nr_rotated = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
lru_add_drain();
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
2009-01-08 05:08:23 +03:00
|
|
|
if (scanning_global_lru(sc)) {
|
2010-05-25 01:32:40 +04:00
|
|
|
nr_taken = isolate_pages_global(nr_pages, &l_hold,
|
|
|
|
&pgscanned, sc->order,
|
|
|
|
ISOLATE_ACTIVE, zone,
|
|
|
|
1, file);
|
2008-02-07 11:14:37 +03:00
|
|
|
zone->pages_scanned += pgscanned;
|
2010-05-25 01:32:40 +04:00
|
|
|
} else {
|
|
|
|
nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
|
|
|
|
&pgscanned, sc->order,
|
|
|
|
ISOLATE_ACTIVE, zone,
|
|
|
|
sc->mem_cgroup, 1, file);
|
|
|
|
/*
|
|
|
|
* mem_cgroup_isolate_pages() keeps track of
|
|
|
|
* scanned pages on its own.
|
|
|
|
*/
|
2008-10-19 07:26:32 +04:00
|
|
|
}
|
2010-05-25 01:32:40 +04:00
|
|
|
|
2009-09-22 04:02:56 +04:00
|
|
|
reclaim_stat->recent_scanned[file] += nr_taken;
|
2008-02-07 11:14:37 +03:00
|
|
|
|
2009-06-17 02:33:13 +04:00
|
|
|
__count_zone_vm_events(PGREFILL, zone, pgscanned);
|
2008-10-19 07:26:32 +04:00
|
|
|
if (file)
|
2009-09-22 04:01:35 +04:00
|
|
|
__mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
|
2008-10-19 07:26:32 +04:00
|
|
|
else
|
2009-09-22 04:01:35 +04:00
|
|
|
__mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
|
2009-09-22 04:01:37 +04:00
|
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
|
|
while (!list_empty(&l_hold)) {
|
|
|
|
cond_resched();
|
|
|
|
page = lru_to_page(&l_hold);
|
|
|
|
list_del(&page->lru);
|
2008-10-19 07:26:35 +04:00
|
|
|
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
if (unlikely(!page_evictable(page, NULL))) {
|
|
|
|
putback_lru_page(page);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-03-06 00:42:22 +03:00
|
|
|
if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
|
2009-09-22 04:01:35 +04:00
|
|
|
nr_rotated++;
|
vmscan: make mapped executable pages the first class citizen
Protect referenced PROT_EXEC mapped pages from being deactivated.
PROT_EXEC(or its internal presentation VM_EXEC) pages normally belong to some
currently running executables and their linked libraries, they shall really be
cached aggressively to provide good user experiences.
Thanks to Johannes Weiner for the advice to reuse the VMA walk in
page_referenced() to get the PROT_EXEC bit.
[more details]
( The consequences of this patch will have to be discussed together with
Rik van Riel's recent patch "vmscan: evict use-once pages first". )
( Some of the good points and insights are taken into this changelog.
Thanks to all the involved people for the great LKML discussions. )
the problem
===========
For a typical desktop, the most precious working set is composed of
*actively accessed*
(1) memory mapped executables
(2) and their anonymous pages
(3) and other files
(4) and the dcache/icache/.. slabs
while the least important data are
(5) infrequently used or use-once files
For a typical desktop, one major problem is busty and large amount of (5)
use-once files flushing out the working set.
Inside the working set, (4) dcache/icache have already been too sticky ;-)
So we only have to care (2) anonymous and (1)(3) file pages.
anonymous pages
===============
Anonymous pages are effectively immune to the streaming IO attack, because we
now have separate file/anon LRU lists. When the use-once files crowd into the
file LRU, the list's "quality" is significantly lowered. Therefore the scan
balance policy in get_scan_ratio() will choose to scan the (low quality) file
LRU much more frequently than the anon LRU.
file pages
==========
Rik proposed to *not* scan the active file LRU when the inactive list grows
larger than active list. This guarantees that when there are use-once streaming
IO, and the working set is not too large(so that active_size < inactive_size),
the active file LRU will *not* be scanned at all. So the not-too-large working
set can be well protected.
But there are also situations where the file working set is a bit large so that
(active_size >= inactive_size), or the streaming IOs are not purely use-once.
In these cases, the active list will be scanned slowly. Because the current
shrink_active_list() policy is to deactivate active pages regardless of their
referenced bits. The deactivated pages become susceptible to the streaming IO
attack: the inactive list could be scanned fast (500MB / 50MBps = 10s) so that
the deactivated pages don't have enough time to get re-referenced. Because a
user tend to switch between windows in intervals from seconds to minutes.
This patch holds mapped executable pages in the active list as long as they
are referenced during each full scan of the active list. Because the active
list is normally scanned much slower, they get longer grace time (eg. 100s)
for further references, which better matches the pace of user operations.
Therefore this patch greatly prolongs the in-cache time of executable code,
when there are moderate memory pressures.
before patch: guaranteed to be cached if reference intervals < I
after patch: guaranteed to be cached if reference intervals < I+A
(except when randomly reclaimed by the lumpy reclaim)
where
A = time to fully scan the active file LRU
I = time to fully scan the inactive file LRU
Note that normally A >> I.
side effects
============
This patch is safe in general, it restores the pre-2.6.28 mmap() behavior
but in a much smaller and well targeted scope.
One may worry about some one to abuse the PROT_EXEC heuristic. But as
Andrew Morton stated, there are other tricks to getting that sort of boost.
Another concern is the PROT_EXEC mapped pages growing large in rare cases,
and therefore hurting reclaim efficiency. But a sane application targeted for
large audience will never use PROT_EXEC for data mappings. If some home made
application tries to abuse that bit, it shall be aware of the consequences.
If it is abused to scale of 2/3 total memory, it gains nothing but overheads.
benchmarks
==========
1) memory tight desktop
1.1) brief summary
- clock time and major faults are reduced by 50%;
- pswpin numbers are reduced to ~1/3.
That means X desktop responsiveness is doubled under high memory/swap pressure.
1.2) test scenario
- nfsroot gnome desktop with 512M physical memory
- run some programs, and switch between the existing windows
after starting each new program.
1.3) progress timing (seconds)
before after programs
0.02 0.02 N xeyes
0.75 0.76 N firefox
2.02 1.88 N nautilus
3.36 3.17 N nautilus --browser
5.26 4.89 N gthumb
7.12 6.47 N gedit
9.22 8.16 N xpdf /usr/share/doc/shared-mime-info/shared-mime-info-spec.pdf
13.58 12.55 N xterm
15.87 14.57 N mlterm
18.63 17.06 N gnome-terminal
21.16 18.90 N urxvt
26.24 23.48 N gnome-system-monitor
28.72 26.52 N gnome-help
32.15 29.65 N gnome-dictionary
39.66 36.12 N /usr/games/sol
43.16 39.27 N /usr/games/gnometris
48.65 42.56 N /usr/games/gnect
53.31 47.03 N /usr/games/gtali
58.60 52.05 N /usr/games/iagno
65.77 55.42 N /usr/games/gnotravex
70.76 61.47 N /usr/games/mahjongg
76.15 67.11 N /usr/games/gnome-sudoku
86.32 75.15 N /usr/games/glines
92.21 79.70 N /usr/games/glchess
103.79 88.48 N /usr/games/gnomine
113.84 96.51 N /usr/games/gnotski
124.40 102.19 N /usr/games/gnibbles
137.41 114.93 N /usr/games/gnobots2
155.53 125.02 N /usr/games/blackjack
179.85 135.11 N /usr/games/same-gnome
224.49 154.50 N /usr/bin/gnome-window-properties
248.44 162.09 N /usr/bin/gnome-default-applications-properties
282.62 173.29 N /usr/bin/gnome-at-properties
323.72 188.21 N /usr/bin/gnome-typing-monitor
363.99 199.93 N /usr/bin/gnome-at-visual
394.21 206.95 N /usr/bin/gnome-sound-properties
435.14 224.49 N /usr/bin/gnome-at-mobility
463.05 234.11 N /usr/bin/gnome-keybinding-properties
503.75 248.59 N /usr/bin/gnome-about-me
554.00 276.27 N /usr/bin/gnome-display-properties
615.48 304.39 N /usr/bin/gnome-network-preferences
693.03 342.01 N /usr/bin/gnome-mouse-properties
759.90 388.58 N /usr/bin/gnome-appearance-properties
937.90 508.47 N /usr/bin/gnome-control-center
1109.75 587.57 N /usr/bin/gnome-keyboard-properties
1399.05 758.16 N : oocalc
1524.64 830.03 N : oodraw
1684.31 900.03 N : ooimpress
1874.04 993.91 N : oomath
2115.12 1081.89 N : ooweb
2369.02 1161.99 N : oowriter
Note that the last ": oo*" commands are actually commented out.
1.4) vmstat numbers (some relevant ones are marked with *)
before after
nr_free_pages 1293 3898
nr_inactive_anon 59956 53460
nr_active_anon 26815 30026
nr_inactive_file 2657 3218
nr_active_file 2019 2806
nr_unevictable 4 4
nr_mlock 4 4
nr_anon_pages 26706 27859
*nr_mapped 3542 4469
nr_file_pages 72232 67681
nr_dirty 1 0
nr_writeback 123 19
nr_slab_reclaimable 3375 3534
nr_slab_unreclaimable 11405 10665
nr_page_table_pages 8106 7864
nr_unstable 0 0
nr_bounce 0 0
*nr_vmscan_write 394776 230839
nr_writeback_temp 0 0
numa_hit 6843353 3318676
numa_miss 0 0
numa_foreign 0 0
numa_interleave 1719 1719
numa_local 6843353 3318676
numa_other 0 0
*pgpgin 5954683 2057175
*pgpgout 1578276 922744
*pswpin 1486615 512238
*pswpout 394568 230685
pgalloc_dma 277432 56602
pgalloc_dma32 6769477 3310348
pgalloc_normal 0 0
pgalloc_movable 0 0
pgfree 7048396 3371118
pgactivate 2036343 1471492
pgdeactivate 2189691 1612829
pgfault 3702176 3100702
*pgmajfault 452116 201343
pgrefill_dma 12185 7127
pgrefill_dma32 334384 653703
pgrefill_normal 0 0
pgrefill_movable 0 0
pgsteal_dma 74214 22179
pgsteal_dma32 3334164 1638029
pgsteal_normal 0 0
pgsteal_movable 0 0
pgscan_kswapd_dma 1081421 1216199
pgscan_kswapd_dma32 58979118 46002810
pgscan_kswapd_normal 0 0
pgscan_kswapd_movable 0 0
pgscan_direct_dma 2015438 1086109
pgscan_direct_dma32 55787823 36101597
pgscan_direct_normal 0 0
pgscan_direct_movable 0 0
pginodesteal 3461 7281
slabs_scanned 564864 527616
kswapd_steal 2889797 1448082
kswapd_inodesteal 14827 14835
pageoutrun 43459 21562
allocstall 9653 4032
pgrotated 384216 228631
1.5) free numbers at the end of the tests
before patch:
total used free shared buffers cached
Mem: 474 467 7 0 0 236
-/+ buffers/cache: 230 243
Swap: 1023 418 605
after patch:
total used free shared buffers cached
Mem: 474 457 16 0 0 236
-/+ buffers/cache: 221 253
Swap: 1023 404 619
2) memory flushing in a file server
2.1) brief summary
The number of major faults from 50 to 3 during 10% cache hot reads.
That means this patch successfully stops major faults when the active file
list is slowly scanned when there are partially cache hot streaming IO.
2.2) test scenario
Do 100000 pread(size=110 pages, offset=(i*100) pages), where 10% of the
pages will be activated:
for i in `seq 0 100 10000000`; do echo $i 110; done > pattern-hot-10
iotrace.rb --load pattern-hot-10 --play /b/sparse
vmmon nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
and monitor /proc/vmstat during the time. The test box has 2G memory.
I carried out tests on fresh booted console as well as X desktop, and
fetched the vmstat numbers on
(1) begin: shortly after the big read IO starts;
(2) end: just before the big read IO stops;
(3) restore: the big read IO stops and the zsh working set restored
(4) restore X: after IO, switch back and forth between the urxvt and firefox
windows to restore their working set.
2.3) console mode results
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.29 VM_EXEC protection ON:
begin: 2481 2237 8694 630 0 574299
end: 275 231976 233914 633 776271 20933042
restore: 370 232154 234524 691 777183 20958453
2.6.29 VM_EXEC protection ON (second run):
begin: 2434 2237 8493 629 0 574195
end: 284 231970 233536 632 771918 20896129
restore: 399 232218 234789 690 774526 20957909
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 2479 2344 9659 210 0 579643
end: 284 232010 234142 260 772776 20917184
restore: 379 232159 234371 301 774888 20967849
The above console numbers show that
- The startup pgmajfault of 2.6.30-rc4-mm is merely 1/3 that of 2.6.29.
I'd attribute that improvement to the mmap readahead improvements :-)
- The pgmajfault increment during the file copy is 633-630=3 vs 260-210=50.
That's a huge improvement - which means with the VM_EXEC protection logic,
active mmap pages is pretty safe even under partially cache hot streaming IO.
- when active:inactive file lru size reaches 1:1, their scan rates is 1:20.8
under 10% cache hot IO. (computed with formula Dpgdeactivate:Dpgfree)
That roughly means the active mmap pages get 20.8 more chances to get
re-referenced to stay in memory.
- The absolute nr_mapped drops considerably to 1/9 during the big IO, and the
dropped pages are mostly inactive ones. The patch has almost no impact in
this aspect, that means it won't unnecessarily increase memory pressure.
(In contrast, your 20% mmap protection ratio will keep them all, and
therefore eliminate the extra 41 major faults to restore working set
of zsh etc.)
The iotrace.rb read throughput is
151.194384MB/s 284.198252s 100001x 450560b --load pattern-hot-10 --play /b/sparse
which means the inactive list is rotated at the speed of 250MB/s,
so a full scan of which takes about 3.5 seconds, while a full scan
of active file list takes about 77 seconds.
2.4) X mode results
We can reach roughly the same conclusions for X desktop:
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.30-rc4-mm VM_EXEC protection ON:
begin: 9740 8920 64075 561 0 678360
end: 768 218254 220029 565 798953 21057006
restore: 857 218543 220987 606 799462 21075710
restore X: 2414 218560 225344 797 799462 21080795
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 9368 5035 26389 554 0 633391
end: 770 218449 221230 661 646472 17832500
restore: 1113 218466 220978 710 649881 17905235
restore X: 2687 218650 225484 947 802700 21083584
- the absolute nr_mapped drops considerably (to 1/13 of the original size)
during the streaming IO.
- the delta of pgmajfault is 3 vs 107 during IO, or 236 vs 393
during the whole process.
Cc: Elladan <elladan@eskimo.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-17 02:33:12 +04:00
|
|
|
/*
|
|
|
|
* Identify referenced, file-backed active pages and
|
|
|
|
* give them one more trip around the active list. So
|
|
|
|
* that executable code get better chances to stay in
|
|
|
|
* memory under moderate memory pressure. Anon pages
|
|
|
|
* are not likely to be evicted by use-once streaming
|
|
|
|
* IO, plus JVM can create lots of anon VM_EXEC pages,
|
|
|
|
* so we ignore them here.
|
|
|
|
*/
|
2009-10-27 02:49:53 +03:00
|
|
|
if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
|
vmscan: make mapped executable pages the first class citizen
Protect referenced PROT_EXEC mapped pages from being deactivated.
PROT_EXEC(or its internal presentation VM_EXEC) pages normally belong to some
currently running executables and their linked libraries, they shall really be
cached aggressively to provide good user experiences.
Thanks to Johannes Weiner for the advice to reuse the VMA walk in
page_referenced() to get the PROT_EXEC bit.
[more details]
( The consequences of this patch will have to be discussed together with
Rik van Riel's recent patch "vmscan: evict use-once pages first". )
( Some of the good points and insights are taken into this changelog.
Thanks to all the involved people for the great LKML discussions. )
the problem
===========
For a typical desktop, the most precious working set is composed of
*actively accessed*
(1) memory mapped executables
(2) and their anonymous pages
(3) and other files
(4) and the dcache/icache/.. slabs
while the least important data are
(5) infrequently used or use-once files
For a typical desktop, one major problem is busty and large amount of (5)
use-once files flushing out the working set.
Inside the working set, (4) dcache/icache have already been too sticky ;-)
So we only have to care (2) anonymous and (1)(3) file pages.
anonymous pages
===============
Anonymous pages are effectively immune to the streaming IO attack, because we
now have separate file/anon LRU lists. When the use-once files crowd into the
file LRU, the list's "quality" is significantly lowered. Therefore the scan
balance policy in get_scan_ratio() will choose to scan the (low quality) file
LRU much more frequently than the anon LRU.
file pages
==========
Rik proposed to *not* scan the active file LRU when the inactive list grows
larger than active list. This guarantees that when there are use-once streaming
IO, and the working set is not too large(so that active_size < inactive_size),
the active file LRU will *not* be scanned at all. So the not-too-large working
set can be well protected.
But there are also situations where the file working set is a bit large so that
(active_size >= inactive_size), or the streaming IOs are not purely use-once.
In these cases, the active list will be scanned slowly. Because the current
shrink_active_list() policy is to deactivate active pages regardless of their
referenced bits. The deactivated pages become susceptible to the streaming IO
attack: the inactive list could be scanned fast (500MB / 50MBps = 10s) so that
the deactivated pages don't have enough time to get re-referenced. Because a
user tend to switch between windows in intervals from seconds to minutes.
This patch holds mapped executable pages in the active list as long as they
are referenced during each full scan of the active list. Because the active
list is normally scanned much slower, they get longer grace time (eg. 100s)
for further references, which better matches the pace of user operations.
Therefore this patch greatly prolongs the in-cache time of executable code,
when there are moderate memory pressures.
before patch: guaranteed to be cached if reference intervals < I
after patch: guaranteed to be cached if reference intervals < I+A
(except when randomly reclaimed by the lumpy reclaim)
where
A = time to fully scan the active file LRU
I = time to fully scan the inactive file LRU
Note that normally A >> I.
side effects
============
This patch is safe in general, it restores the pre-2.6.28 mmap() behavior
but in a much smaller and well targeted scope.
One may worry about some one to abuse the PROT_EXEC heuristic. But as
Andrew Morton stated, there are other tricks to getting that sort of boost.
Another concern is the PROT_EXEC mapped pages growing large in rare cases,
and therefore hurting reclaim efficiency. But a sane application targeted for
large audience will never use PROT_EXEC for data mappings. If some home made
application tries to abuse that bit, it shall be aware of the consequences.
If it is abused to scale of 2/3 total memory, it gains nothing but overheads.
benchmarks
==========
1) memory tight desktop
1.1) brief summary
- clock time and major faults are reduced by 50%;
- pswpin numbers are reduced to ~1/3.
That means X desktop responsiveness is doubled under high memory/swap pressure.
1.2) test scenario
- nfsroot gnome desktop with 512M physical memory
- run some programs, and switch between the existing windows
after starting each new program.
1.3) progress timing (seconds)
before after programs
0.02 0.02 N xeyes
0.75 0.76 N firefox
2.02 1.88 N nautilus
3.36 3.17 N nautilus --browser
5.26 4.89 N gthumb
7.12 6.47 N gedit
9.22 8.16 N xpdf /usr/share/doc/shared-mime-info/shared-mime-info-spec.pdf
13.58 12.55 N xterm
15.87 14.57 N mlterm
18.63 17.06 N gnome-terminal
21.16 18.90 N urxvt
26.24 23.48 N gnome-system-monitor
28.72 26.52 N gnome-help
32.15 29.65 N gnome-dictionary
39.66 36.12 N /usr/games/sol
43.16 39.27 N /usr/games/gnometris
48.65 42.56 N /usr/games/gnect
53.31 47.03 N /usr/games/gtali
58.60 52.05 N /usr/games/iagno
65.77 55.42 N /usr/games/gnotravex
70.76 61.47 N /usr/games/mahjongg
76.15 67.11 N /usr/games/gnome-sudoku
86.32 75.15 N /usr/games/glines
92.21 79.70 N /usr/games/glchess
103.79 88.48 N /usr/games/gnomine
113.84 96.51 N /usr/games/gnotski
124.40 102.19 N /usr/games/gnibbles
137.41 114.93 N /usr/games/gnobots2
155.53 125.02 N /usr/games/blackjack
179.85 135.11 N /usr/games/same-gnome
224.49 154.50 N /usr/bin/gnome-window-properties
248.44 162.09 N /usr/bin/gnome-default-applications-properties
282.62 173.29 N /usr/bin/gnome-at-properties
323.72 188.21 N /usr/bin/gnome-typing-monitor
363.99 199.93 N /usr/bin/gnome-at-visual
394.21 206.95 N /usr/bin/gnome-sound-properties
435.14 224.49 N /usr/bin/gnome-at-mobility
463.05 234.11 N /usr/bin/gnome-keybinding-properties
503.75 248.59 N /usr/bin/gnome-about-me
554.00 276.27 N /usr/bin/gnome-display-properties
615.48 304.39 N /usr/bin/gnome-network-preferences
693.03 342.01 N /usr/bin/gnome-mouse-properties
759.90 388.58 N /usr/bin/gnome-appearance-properties
937.90 508.47 N /usr/bin/gnome-control-center
1109.75 587.57 N /usr/bin/gnome-keyboard-properties
1399.05 758.16 N : oocalc
1524.64 830.03 N : oodraw
1684.31 900.03 N : ooimpress
1874.04 993.91 N : oomath
2115.12 1081.89 N : ooweb
2369.02 1161.99 N : oowriter
Note that the last ": oo*" commands are actually commented out.
1.4) vmstat numbers (some relevant ones are marked with *)
before after
nr_free_pages 1293 3898
nr_inactive_anon 59956 53460
nr_active_anon 26815 30026
nr_inactive_file 2657 3218
nr_active_file 2019 2806
nr_unevictable 4 4
nr_mlock 4 4
nr_anon_pages 26706 27859
*nr_mapped 3542 4469
nr_file_pages 72232 67681
nr_dirty 1 0
nr_writeback 123 19
nr_slab_reclaimable 3375 3534
nr_slab_unreclaimable 11405 10665
nr_page_table_pages 8106 7864
nr_unstable 0 0
nr_bounce 0 0
*nr_vmscan_write 394776 230839
nr_writeback_temp 0 0
numa_hit 6843353 3318676
numa_miss 0 0
numa_foreign 0 0
numa_interleave 1719 1719
numa_local 6843353 3318676
numa_other 0 0
*pgpgin 5954683 2057175
*pgpgout 1578276 922744
*pswpin 1486615 512238
*pswpout 394568 230685
pgalloc_dma 277432 56602
pgalloc_dma32 6769477 3310348
pgalloc_normal 0 0
pgalloc_movable 0 0
pgfree 7048396 3371118
pgactivate 2036343 1471492
pgdeactivate 2189691 1612829
pgfault 3702176 3100702
*pgmajfault 452116 201343
pgrefill_dma 12185 7127
pgrefill_dma32 334384 653703
pgrefill_normal 0 0
pgrefill_movable 0 0
pgsteal_dma 74214 22179
pgsteal_dma32 3334164 1638029
pgsteal_normal 0 0
pgsteal_movable 0 0
pgscan_kswapd_dma 1081421 1216199
pgscan_kswapd_dma32 58979118 46002810
pgscan_kswapd_normal 0 0
pgscan_kswapd_movable 0 0
pgscan_direct_dma 2015438 1086109
pgscan_direct_dma32 55787823 36101597
pgscan_direct_normal 0 0
pgscan_direct_movable 0 0
pginodesteal 3461 7281
slabs_scanned 564864 527616
kswapd_steal 2889797 1448082
kswapd_inodesteal 14827 14835
pageoutrun 43459 21562
allocstall 9653 4032
pgrotated 384216 228631
1.5) free numbers at the end of the tests
before patch:
total used free shared buffers cached
Mem: 474 467 7 0 0 236
-/+ buffers/cache: 230 243
Swap: 1023 418 605
after patch:
total used free shared buffers cached
Mem: 474 457 16 0 0 236
-/+ buffers/cache: 221 253
Swap: 1023 404 619
2) memory flushing in a file server
2.1) brief summary
The number of major faults from 50 to 3 during 10% cache hot reads.
That means this patch successfully stops major faults when the active file
list is slowly scanned when there are partially cache hot streaming IO.
2.2) test scenario
Do 100000 pread(size=110 pages, offset=(i*100) pages), where 10% of the
pages will be activated:
for i in `seq 0 100 10000000`; do echo $i 110; done > pattern-hot-10
iotrace.rb --load pattern-hot-10 --play /b/sparse
vmmon nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
and monitor /proc/vmstat during the time. The test box has 2G memory.
I carried out tests on fresh booted console as well as X desktop, and
fetched the vmstat numbers on
(1) begin: shortly after the big read IO starts;
(2) end: just before the big read IO stops;
(3) restore: the big read IO stops and the zsh working set restored
(4) restore X: after IO, switch back and forth between the urxvt and firefox
windows to restore their working set.
2.3) console mode results
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.29 VM_EXEC protection ON:
begin: 2481 2237 8694 630 0 574299
end: 275 231976 233914 633 776271 20933042
restore: 370 232154 234524 691 777183 20958453
2.6.29 VM_EXEC protection ON (second run):
begin: 2434 2237 8493 629 0 574195
end: 284 231970 233536 632 771918 20896129
restore: 399 232218 234789 690 774526 20957909
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 2479 2344 9659 210 0 579643
end: 284 232010 234142 260 772776 20917184
restore: 379 232159 234371 301 774888 20967849
The above console numbers show that
- The startup pgmajfault of 2.6.30-rc4-mm is merely 1/3 that of 2.6.29.
I'd attribute that improvement to the mmap readahead improvements :-)
- The pgmajfault increment during the file copy is 633-630=3 vs 260-210=50.
That's a huge improvement - which means with the VM_EXEC protection logic,
active mmap pages is pretty safe even under partially cache hot streaming IO.
- when active:inactive file lru size reaches 1:1, their scan rates is 1:20.8
under 10% cache hot IO. (computed with formula Dpgdeactivate:Dpgfree)
That roughly means the active mmap pages get 20.8 more chances to get
re-referenced to stay in memory.
- The absolute nr_mapped drops considerably to 1/9 during the big IO, and the
dropped pages are mostly inactive ones. The patch has almost no impact in
this aspect, that means it won't unnecessarily increase memory pressure.
(In contrast, your 20% mmap protection ratio will keep them all, and
therefore eliminate the extra 41 major faults to restore working set
of zsh etc.)
The iotrace.rb read throughput is
151.194384MB/s 284.198252s 100001x 450560b --load pattern-hot-10 --play /b/sparse
which means the inactive list is rotated at the speed of 250MB/s,
so a full scan of which takes about 3.5 seconds, while a full scan
of active file list takes about 77 seconds.
2.4) X mode results
We can reach roughly the same conclusions for X desktop:
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.30-rc4-mm VM_EXEC protection ON:
begin: 9740 8920 64075 561 0 678360
end: 768 218254 220029 565 798953 21057006
restore: 857 218543 220987 606 799462 21075710
restore X: 2414 218560 225344 797 799462 21080795
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 9368 5035 26389 554 0 633391
end: 770 218449 221230 661 646472 17832500
restore: 1113 218466 220978 710 649881 17905235
restore X: 2687 218650 225484 947 802700 21083584
- the absolute nr_mapped drops considerably (to 1/13 of the original size)
during the streaming IO.
- the delta of pgmajfault is 3 vs 107 during IO, or 236 vs 393
during the whole process.
Cc: Elladan <elladan@eskimo.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-17 02:33:12 +04:00
|
|
|
list_add(&page->lru, &l_active);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2008-10-19 07:26:35 +04:00
|
|
|
|
2009-09-22 04:01:44 +04:00
|
|
|
ClearPageActive(page); /* we are de-activating */
|
2005-04-17 02:20:36 +04:00
|
|
|
list_add(&page->lru, &l_inactive);
|
|
|
|
}
|
|
|
|
|
2009-01-07 01:40:13 +03:00
|
|
|
/*
|
vmscan: make mapped executable pages the first class citizen
Protect referenced PROT_EXEC mapped pages from being deactivated.
PROT_EXEC(or its internal presentation VM_EXEC) pages normally belong to some
currently running executables and their linked libraries, they shall really be
cached aggressively to provide good user experiences.
Thanks to Johannes Weiner for the advice to reuse the VMA walk in
page_referenced() to get the PROT_EXEC bit.
[more details]
( The consequences of this patch will have to be discussed together with
Rik van Riel's recent patch "vmscan: evict use-once pages first". )
( Some of the good points and insights are taken into this changelog.
Thanks to all the involved people for the great LKML discussions. )
the problem
===========
For a typical desktop, the most precious working set is composed of
*actively accessed*
(1) memory mapped executables
(2) and their anonymous pages
(3) and other files
(4) and the dcache/icache/.. slabs
while the least important data are
(5) infrequently used or use-once files
For a typical desktop, one major problem is busty and large amount of (5)
use-once files flushing out the working set.
Inside the working set, (4) dcache/icache have already been too sticky ;-)
So we only have to care (2) anonymous and (1)(3) file pages.
anonymous pages
===============
Anonymous pages are effectively immune to the streaming IO attack, because we
now have separate file/anon LRU lists. When the use-once files crowd into the
file LRU, the list's "quality" is significantly lowered. Therefore the scan
balance policy in get_scan_ratio() will choose to scan the (low quality) file
LRU much more frequently than the anon LRU.
file pages
==========
Rik proposed to *not* scan the active file LRU when the inactive list grows
larger than active list. This guarantees that when there are use-once streaming
IO, and the working set is not too large(so that active_size < inactive_size),
the active file LRU will *not* be scanned at all. So the not-too-large working
set can be well protected.
But there are also situations where the file working set is a bit large so that
(active_size >= inactive_size), or the streaming IOs are not purely use-once.
In these cases, the active list will be scanned slowly. Because the current
shrink_active_list() policy is to deactivate active pages regardless of their
referenced bits. The deactivated pages become susceptible to the streaming IO
attack: the inactive list could be scanned fast (500MB / 50MBps = 10s) so that
the deactivated pages don't have enough time to get re-referenced. Because a
user tend to switch between windows in intervals from seconds to minutes.
This patch holds mapped executable pages in the active list as long as they
are referenced during each full scan of the active list. Because the active
list is normally scanned much slower, they get longer grace time (eg. 100s)
for further references, which better matches the pace of user operations.
Therefore this patch greatly prolongs the in-cache time of executable code,
when there are moderate memory pressures.
before patch: guaranteed to be cached if reference intervals < I
after patch: guaranteed to be cached if reference intervals < I+A
(except when randomly reclaimed by the lumpy reclaim)
where
A = time to fully scan the active file LRU
I = time to fully scan the inactive file LRU
Note that normally A >> I.
side effects
============
This patch is safe in general, it restores the pre-2.6.28 mmap() behavior
but in a much smaller and well targeted scope.
One may worry about some one to abuse the PROT_EXEC heuristic. But as
Andrew Morton stated, there are other tricks to getting that sort of boost.
Another concern is the PROT_EXEC mapped pages growing large in rare cases,
and therefore hurting reclaim efficiency. But a sane application targeted for
large audience will never use PROT_EXEC for data mappings. If some home made
application tries to abuse that bit, it shall be aware of the consequences.
If it is abused to scale of 2/3 total memory, it gains nothing but overheads.
benchmarks
==========
1) memory tight desktop
1.1) brief summary
- clock time and major faults are reduced by 50%;
- pswpin numbers are reduced to ~1/3.
That means X desktop responsiveness is doubled under high memory/swap pressure.
1.2) test scenario
- nfsroot gnome desktop with 512M physical memory
- run some programs, and switch between the existing windows
after starting each new program.
1.3) progress timing (seconds)
before after programs
0.02 0.02 N xeyes
0.75 0.76 N firefox
2.02 1.88 N nautilus
3.36 3.17 N nautilus --browser
5.26 4.89 N gthumb
7.12 6.47 N gedit
9.22 8.16 N xpdf /usr/share/doc/shared-mime-info/shared-mime-info-spec.pdf
13.58 12.55 N xterm
15.87 14.57 N mlterm
18.63 17.06 N gnome-terminal
21.16 18.90 N urxvt
26.24 23.48 N gnome-system-monitor
28.72 26.52 N gnome-help
32.15 29.65 N gnome-dictionary
39.66 36.12 N /usr/games/sol
43.16 39.27 N /usr/games/gnometris
48.65 42.56 N /usr/games/gnect
53.31 47.03 N /usr/games/gtali
58.60 52.05 N /usr/games/iagno
65.77 55.42 N /usr/games/gnotravex
70.76 61.47 N /usr/games/mahjongg
76.15 67.11 N /usr/games/gnome-sudoku
86.32 75.15 N /usr/games/glines
92.21 79.70 N /usr/games/glchess
103.79 88.48 N /usr/games/gnomine
113.84 96.51 N /usr/games/gnotski
124.40 102.19 N /usr/games/gnibbles
137.41 114.93 N /usr/games/gnobots2
155.53 125.02 N /usr/games/blackjack
179.85 135.11 N /usr/games/same-gnome
224.49 154.50 N /usr/bin/gnome-window-properties
248.44 162.09 N /usr/bin/gnome-default-applications-properties
282.62 173.29 N /usr/bin/gnome-at-properties
323.72 188.21 N /usr/bin/gnome-typing-monitor
363.99 199.93 N /usr/bin/gnome-at-visual
394.21 206.95 N /usr/bin/gnome-sound-properties
435.14 224.49 N /usr/bin/gnome-at-mobility
463.05 234.11 N /usr/bin/gnome-keybinding-properties
503.75 248.59 N /usr/bin/gnome-about-me
554.00 276.27 N /usr/bin/gnome-display-properties
615.48 304.39 N /usr/bin/gnome-network-preferences
693.03 342.01 N /usr/bin/gnome-mouse-properties
759.90 388.58 N /usr/bin/gnome-appearance-properties
937.90 508.47 N /usr/bin/gnome-control-center
1109.75 587.57 N /usr/bin/gnome-keyboard-properties
1399.05 758.16 N : oocalc
1524.64 830.03 N : oodraw
1684.31 900.03 N : ooimpress
1874.04 993.91 N : oomath
2115.12 1081.89 N : ooweb
2369.02 1161.99 N : oowriter
Note that the last ": oo*" commands are actually commented out.
1.4) vmstat numbers (some relevant ones are marked with *)
before after
nr_free_pages 1293 3898
nr_inactive_anon 59956 53460
nr_active_anon 26815 30026
nr_inactive_file 2657 3218
nr_active_file 2019 2806
nr_unevictable 4 4
nr_mlock 4 4
nr_anon_pages 26706 27859
*nr_mapped 3542 4469
nr_file_pages 72232 67681
nr_dirty 1 0
nr_writeback 123 19
nr_slab_reclaimable 3375 3534
nr_slab_unreclaimable 11405 10665
nr_page_table_pages 8106 7864
nr_unstable 0 0
nr_bounce 0 0
*nr_vmscan_write 394776 230839
nr_writeback_temp 0 0
numa_hit 6843353 3318676
numa_miss 0 0
numa_foreign 0 0
numa_interleave 1719 1719
numa_local 6843353 3318676
numa_other 0 0
*pgpgin 5954683 2057175
*pgpgout 1578276 922744
*pswpin 1486615 512238
*pswpout 394568 230685
pgalloc_dma 277432 56602
pgalloc_dma32 6769477 3310348
pgalloc_normal 0 0
pgalloc_movable 0 0
pgfree 7048396 3371118
pgactivate 2036343 1471492
pgdeactivate 2189691 1612829
pgfault 3702176 3100702
*pgmajfault 452116 201343
pgrefill_dma 12185 7127
pgrefill_dma32 334384 653703
pgrefill_normal 0 0
pgrefill_movable 0 0
pgsteal_dma 74214 22179
pgsteal_dma32 3334164 1638029
pgsteal_normal 0 0
pgsteal_movable 0 0
pgscan_kswapd_dma 1081421 1216199
pgscan_kswapd_dma32 58979118 46002810
pgscan_kswapd_normal 0 0
pgscan_kswapd_movable 0 0
pgscan_direct_dma 2015438 1086109
pgscan_direct_dma32 55787823 36101597
pgscan_direct_normal 0 0
pgscan_direct_movable 0 0
pginodesteal 3461 7281
slabs_scanned 564864 527616
kswapd_steal 2889797 1448082
kswapd_inodesteal 14827 14835
pageoutrun 43459 21562
allocstall 9653 4032
pgrotated 384216 228631
1.5) free numbers at the end of the tests
before patch:
total used free shared buffers cached
Mem: 474 467 7 0 0 236
-/+ buffers/cache: 230 243
Swap: 1023 418 605
after patch:
total used free shared buffers cached
Mem: 474 457 16 0 0 236
-/+ buffers/cache: 221 253
Swap: 1023 404 619
2) memory flushing in a file server
2.1) brief summary
The number of major faults from 50 to 3 during 10% cache hot reads.
That means this patch successfully stops major faults when the active file
list is slowly scanned when there are partially cache hot streaming IO.
2.2) test scenario
Do 100000 pread(size=110 pages, offset=(i*100) pages), where 10% of the
pages will be activated:
for i in `seq 0 100 10000000`; do echo $i 110; done > pattern-hot-10
iotrace.rb --load pattern-hot-10 --play /b/sparse
vmmon nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
and monitor /proc/vmstat during the time. The test box has 2G memory.
I carried out tests on fresh booted console as well as X desktop, and
fetched the vmstat numbers on
(1) begin: shortly after the big read IO starts;
(2) end: just before the big read IO stops;
(3) restore: the big read IO stops and the zsh working set restored
(4) restore X: after IO, switch back and forth between the urxvt and firefox
windows to restore their working set.
2.3) console mode results
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.29 VM_EXEC protection ON:
begin: 2481 2237 8694 630 0 574299
end: 275 231976 233914 633 776271 20933042
restore: 370 232154 234524 691 777183 20958453
2.6.29 VM_EXEC protection ON (second run):
begin: 2434 2237 8493 629 0 574195
end: 284 231970 233536 632 771918 20896129
restore: 399 232218 234789 690 774526 20957909
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 2479 2344 9659 210 0 579643
end: 284 232010 234142 260 772776 20917184
restore: 379 232159 234371 301 774888 20967849
The above console numbers show that
- The startup pgmajfault of 2.6.30-rc4-mm is merely 1/3 that of 2.6.29.
I'd attribute that improvement to the mmap readahead improvements :-)
- The pgmajfault increment during the file copy is 633-630=3 vs 260-210=50.
That's a huge improvement - which means with the VM_EXEC protection logic,
active mmap pages is pretty safe even under partially cache hot streaming IO.
- when active:inactive file lru size reaches 1:1, their scan rates is 1:20.8
under 10% cache hot IO. (computed with formula Dpgdeactivate:Dpgfree)
That roughly means the active mmap pages get 20.8 more chances to get
re-referenced to stay in memory.
- The absolute nr_mapped drops considerably to 1/9 during the big IO, and the
dropped pages are mostly inactive ones. The patch has almost no impact in
this aspect, that means it won't unnecessarily increase memory pressure.
(In contrast, your 20% mmap protection ratio will keep them all, and
therefore eliminate the extra 41 major faults to restore working set
of zsh etc.)
The iotrace.rb read throughput is
151.194384MB/s 284.198252s 100001x 450560b --load pattern-hot-10 --play /b/sparse
which means the inactive list is rotated at the speed of 250MB/s,
so a full scan of which takes about 3.5 seconds, while a full scan
of active file list takes about 77 seconds.
2.4) X mode results
We can reach roughly the same conclusions for X desktop:
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.30-rc4-mm VM_EXEC protection ON:
begin: 9740 8920 64075 561 0 678360
end: 768 218254 220029 565 798953 21057006
restore: 857 218543 220987 606 799462 21075710
restore X: 2414 218560 225344 797 799462 21080795
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 9368 5035 26389 554 0 633391
end: 770 218449 221230 661 646472 17832500
restore: 1113 218466 220978 710 649881 17905235
restore X: 2687 218650 225484 947 802700 21083584
- the absolute nr_mapped drops considerably (to 1/13 of the original size)
during the streaming IO.
- the delta of pgmajfault is 3 vs 107 during IO, or 236 vs 393
during the whole process.
Cc: Elladan <elladan@eskimo.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-17 02:33:12 +04:00
|
|
|
* Move pages back to the lru list.
|
2009-01-07 01:40:13 +03:00
|
|
|
*/
|
2008-12-01 05:00:35 +03:00
|
|
|
spin_lock_irq(&zone->lru_lock);
|
2008-10-19 07:26:34 +04:00
|
|
|
/*
|
vmscan: make mapped executable pages the first class citizen
Protect referenced PROT_EXEC mapped pages from being deactivated.
PROT_EXEC(or its internal presentation VM_EXEC) pages normally belong to some
currently running executables and their linked libraries, they shall really be
cached aggressively to provide good user experiences.
Thanks to Johannes Weiner for the advice to reuse the VMA walk in
page_referenced() to get the PROT_EXEC bit.
[more details]
( The consequences of this patch will have to be discussed together with
Rik van Riel's recent patch "vmscan: evict use-once pages first". )
( Some of the good points and insights are taken into this changelog.
Thanks to all the involved people for the great LKML discussions. )
the problem
===========
For a typical desktop, the most precious working set is composed of
*actively accessed*
(1) memory mapped executables
(2) and their anonymous pages
(3) and other files
(4) and the dcache/icache/.. slabs
while the least important data are
(5) infrequently used or use-once files
For a typical desktop, one major problem is busty and large amount of (5)
use-once files flushing out the working set.
Inside the working set, (4) dcache/icache have already been too sticky ;-)
So we only have to care (2) anonymous and (1)(3) file pages.
anonymous pages
===============
Anonymous pages are effectively immune to the streaming IO attack, because we
now have separate file/anon LRU lists. When the use-once files crowd into the
file LRU, the list's "quality" is significantly lowered. Therefore the scan
balance policy in get_scan_ratio() will choose to scan the (low quality) file
LRU much more frequently than the anon LRU.
file pages
==========
Rik proposed to *not* scan the active file LRU when the inactive list grows
larger than active list. This guarantees that when there are use-once streaming
IO, and the working set is not too large(so that active_size < inactive_size),
the active file LRU will *not* be scanned at all. So the not-too-large working
set can be well protected.
But there are also situations where the file working set is a bit large so that
(active_size >= inactive_size), or the streaming IOs are not purely use-once.
In these cases, the active list will be scanned slowly. Because the current
shrink_active_list() policy is to deactivate active pages regardless of their
referenced bits. The deactivated pages become susceptible to the streaming IO
attack: the inactive list could be scanned fast (500MB / 50MBps = 10s) so that
the deactivated pages don't have enough time to get re-referenced. Because a
user tend to switch between windows in intervals from seconds to minutes.
This patch holds mapped executable pages in the active list as long as they
are referenced during each full scan of the active list. Because the active
list is normally scanned much slower, they get longer grace time (eg. 100s)
for further references, which better matches the pace of user operations.
Therefore this patch greatly prolongs the in-cache time of executable code,
when there are moderate memory pressures.
before patch: guaranteed to be cached if reference intervals < I
after patch: guaranteed to be cached if reference intervals < I+A
(except when randomly reclaimed by the lumpy reclaim)
where
A = time to fully scan the active file LRU
I = time to fully scan the inactive file LRU
Note that normally A >> I.
side effects
============
This patch is safe in general, it restores the pre-2.6.28 mmap() behavior
but in a much smaller and well targeted scope.
One may worry about some one to abuse the PROT_EXEC heuristic. But as
Andrew Morton stated, there are other tricks to getting that sort of boost.
Another concern is the PROT_EXEC mapped pages growing large in rare cases,
and therefore hurting reclaim efficiency. But a sane application targeted for
large audience will never use PROT_EXEC for data mappings. If some home made
application tries to abuse that bit, it shall be aware of the consequences.
If it is abused to scale of 2/3 total memory, it gains nothing but overheads.
benchmarks
==========
1) memory tight desktop
1.1) brief summary
- clock time and major faults are reduced by 50%;
- pswpin numbers are reduced to ~1/3.
That means X desktop responsiveness is doubled under high memory/swap pressure.
1.2) test scenario
- nfsroot gnome desktop with 512M physical memory
- run some programs, and switch between the existing windows
after starting each new program.
1.3) progress timing (seconds)
before after programs
0.02 0.02 N xeyes
0.75 0.76 N firefox
2.02 1.88 N nautilus
3.36 3.17 N nautilus --browser
5.26 4.89 N gthumb
7.12 6.47 N gedit
9.22 8.16 N xpdf /usr/share/doc/shared-mime-info/shared-mime-info-spec.pdf
13.58 12.55 N xterm
15.87 14.57 N mlterm
18.63 17.06 N gnome-terminal
21.16 18.90 N urxvt
26.24 23.48 N gnome-system-monitor
28.72 26.52 N gnome-help
32.15 29.65 N gnome-dictionary
39.66 36.12 N /usr/games/sol
43.16 39.27 N /usr/games/gnometris
48.65 42.56 N /usr/games/gnect
53.31 47.03 N /usr/games/gtali
58.60 52.05 N /usr/games/iagno
65.77 55.42 N /usr/games/gnotravex
70.76 61.47 N /usr/games/mahjongg
76.15 67.11 N /usr/games/gnome-sudoku
86.32 75.15 N /usr/games/glines
92.21 79.70 N /usr/games/glchess
103.79 88.48 N /usr/games/gnomine
113.84 96.51 N /usr/games/gnotski
124.40 102.19 N /usr/games/gnibbles
137.41 114.93 N /usr/games/gnobots2
155.53 125.02 N /usr/games/blackjack
179.85 135.11 N /usr/games/same-gnome
224.49 154.50 N /usr/bin/gnome-window-properties
248.44 162.09 N /usr/bin/gnome-default-applications-properties
282.62 173.29 N /usr/bin/gnome-at-properties
323.72 188.21 N /usr/bin/gnome-typing-monitor
363.99 199.93 N /usr/bin/gnome-at-visual
394.21 206.95 N /usr/bin/gnome-sound-properties
435.14 224.49 N /usr/bin/gnome-at-mobility
463.05 234.11 N /usr/bin/gnome-keybinding-properties
503.75 248.59 N /usr/bin/gnome-about-me
554.00 276.27 N /usr/bin/gnome-display-properties
615.48 304.39 N /usr/bin/gnome-network-preferences
693.03 342.01 N /usr/bin/gnome-mouse-properties
759.90 388.58 N /usr/bin/gnome-appearance-properties
937.90 508.47 N /usr/bin/gnome-control-center
1109.75 587.57 N /usr/bin/gnome-keyboard-properties
1399.05 758.16 N : oocalc
1524.64 830.03 N : oodraw
1684.31 900.03 N : ooimpress
1874.04 993.91 N : oomath
2115.12 1081.89 N : ooweb
2369.02 1161.99 N : oowriter
Note that the last ": oo*" commands are actually commented out.
1.4) vmstat numbers (some relevant ones are marked with *)
before after
nr_free_pages 1293 3898
nr_inactive_anon 59956 53460
nr_active_anon 26815 30026
nr_inactive_file 2657 3218
nr_active_file 2019 2806
nr_unevictable 4 4
nr_mlock 4 4
nr_anon_pages 26706 27859
*nr_mapped 3542 4469
nr_file_pages 72232 67681
nr_dirty 1 0
nr_writeback 123 19
nr_slab_reclaimable 3375 3534
nr_slab_unreclaimable 11405 10665
nr_page_table_pages 8106 7864
nr_unstable 0 0
nr_bounce 0 0
*nr_vmscan_write 394776 230839
nr_writeback_temp 0 0
numa_hit 6843353 3318676
numa_miss 0 0
numa_foreign 0 0
numa_interleave 1719 1719
numa_local 6843353 3318676
numa_other 0 0
*pgpgin 5954683 2057175
*pgpgout 1578276 922744
*pswpin 1486615 512238
*pswpout 394568 230685
pgalloc_dma 277432 56602
pgalloc_dma32 6769477 3310348
pgalloc_normal 0 0
pgalloc_movable 0 0
pgfree 7048396 3371118
pgactivate 2036343 1471492
pgdeactivate 2189691 1612829
pgfault 3702176 3100702
*pgmajfault 452116 201343
pgrefill_dma 12185 7127
pgrefill_dma32 334384 653703
pgrefill_normal 0 0
pgrefill_movable 0 0
pgsteal_dma 74214 22179
pgsteal_dma32 3334164 1638029
pgsteal_normal 0 0
pgsteal_movable 0 0
pgscan_kswapd_dma 1081421 1216199
pgscan_kswapd_dma32 58979118 46002810
pgscan_kswapd_normal 0 0
pgscan_kswapd_movable 0 0
pgscan_direct_dma 2015438 1086109
pgscan_direct_dma32 55787823 36101597
pgscan_direct_normal 0 0
pgscan_direct_movable 0 0
pginodesteal 3461 7281
slabs_scanned 564864 527616
kswapd_steal 2889797 1448082
kswapd_inodesteal 14827 14835
pageoutrun 43459 21562
allocstall 9653 4032
pgrotated 384216 228631
1.5) free numbers at the end of the tests
before patch:
total used free shared buffers cached
Mem: 474 467 7 0 0 236
-/+ buffers/cache: 230 243
Swap: 1023 418 605
after patch:
total used free shared buffers cached
Mem: 474 457 16 0 0 236
-/+ buffers/cache: 221 253
Swap: 1023 404 619
2) memory flushing in a file server
2.1) brief summary
The number of major faults from 50 to 3 during 10% cache hot reads.
That means this patch successfully stops major faults when the active file
list is slowly scanned when there are partially cache hot streaming IO.
2.2) test scenario
Do 100000 pread(size=110 pages, offset=(i*100) pages), where 10% of the
pages will be activated:
for i in `seq 0 100 10000000`; do echo $i 110; done > pattern-hot-10
iotrace.rb --load pattern-hot-10 --play /b/sparse
vmmon nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
and monitor /proc/vmstat during the time. The test box has 2G memory.
I carried out tests on fresh booted console as well as X desktop, and
fetched the vmstat numbers on
(1) begin: shortly after the big read IO starts;
(2) end: just before the big read IO stops;
(3) restore: the big read IO stops and the zsh working set restored
(4) restore X: after IO, switch back and forth between the urxvt and firefox
windows to restore their working set.
2.3) console mode results
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.29 VM_EXEC protection ON:
begin: 2481 2237 8694 630 0 574299
end: 275 231976 233914 633 776271 20933042
restore: 370 232154 234524 691 777183 20958453
2.6.29 VM_EXEC protection ON (second run):
begin: 2434 2237 8493 629 0 574195
end: 284 231970 233536 632 771918 20896129
restore: 399 232218 234789 690 774526 20957909
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 2479 2344 9659 210 0 579643
end: 284 232010 234142 260 772776 20917184
restore: 379 232159 234371 301 774888 20967849
The above console numbers show that
- The startup pgmajfault of 2.6.30-rc4-mm is merely 1/3 that of 2.6.29.
I'd attribute that improvement to the mmap readahead improvements :-)
- The pgmajfault increment during the file copy is 633-630=3 vs 260-210=50.
That's a huge improvement - which means with the VM_EXEC protection logic,
active mmap pages is pretty safe even under partially cache hot streaming IO.
- when active:inactive file lru size reaches 1:1, their scan rates is 1:20.8
under 10% cache hot IO. (computed with formula Dpgdeactivate:Dpgfree)
That roughly means the active mmap pages get 20.8 more chances to get
re-referenced to stay in memory.
- The absolute nr_mapped drops considerably to 1/9 during the big IO, and the
dropped pages are mostly inactive ones. The patch has almost no impact in
this aspect, that means it won't unnecessarily increase memory pressure.
(In contrast, your 20% mmap protection ratio will keep them all, and
therefore eliminate the extra 41 major faults to restore working set
of zsh etc.)
The iotrace.rb read throughput is
151.194384MB/s 284.198252s 100001x 450560b --load pattern-hot-10 --play /b/sparse
which means the inactive list is rotated at the speed of 250MB/s,
so a full scan of which takes about 3.5 seconds, while a full scan
of active file list takes about 77 seconds.
2.4) X mode results
We can reach roughly the same conclusions for X desktop:
nr_mapped nr_active_file nr_inactive_file pgmajfault pgdeactivate pgfree
2.6.30-rc4-mm VM_EXEC protection ON:
begin: 9740 8920 64075 561 0 678360
end: 768 218254 220029 565 798953 21057006
restore: 857 218543 220987 606 799462 21075710
restore X: 2414 218560 225344 797 799462 21080795
2.6.30-rc4-mm VM_EXEC protection OFF:
begin: 9368 5035 26389 554 0 633391
end: 770 218449 221230 661 646472 17832500
restore: 1113 218466 220978 710 649881 17905235
restore X: 2687 218650 225484 947 802700 21083584
- the absolute nr_mapped drops considerably (to 1/13 of the original size)
during the streaming IO.
- the delta of pgmajfault is 3 vs 107 during IO, or 236 vs 393
during the whole process.
Cc: Elladan <elladan@eskimo.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-17 02:33:12 +04:00
|
|
|
* Count referenced pages from currently used mappings as rotated,
|
|
|
|
* even though only some of them are actually re-activated. This
|
|
|
|
* helps balance scan pressure between file and anonymous pages in
|
|
|
|
* get_scan_ratio.
|
2008-10-19 07:26:35 +04:00
|
|
|
*/
|
2009-09-22 04:02:56 +04:00
|
|
|
reclaim_stat->recent_rotated[file] += nr_rotated;
|
2008-10-19 07:26:34 +04:00
|
|
|
|
2009-06-17 02:33:13 +04:00
|
|
|
move_active_pages_to_lru(zone, &l_active,
|
|
|
|
LRU_ACTIVE + file * LRU_FILE);
|
|
|
|
move_active_pages_to_lru(zone, &l_inactive,
|
|
|
|
LRU_BASE + file * LRU_FILE);
|
2009-09-22 04:01:37 +04:00
|
|
|
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
|
2006-06-30 12:55:45 +04:00
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-01-08 05:08:18 +03:00
|
|
|
static int inactive_anon_is_low_global(struct zone *zone)
|
2009-01-08 05:08:14 +03:00
|
|
|
{
|
|
|
|
unsigned long active, inactive;
|
|
|
|
|
|
|
|
active = zone_page_state(zone, NR_ACTIVE_ANON);
|
|
|
|
inactive = zone_page_state(zone, NR_INACTIVE_ANON);
|
|
|
|
|
|
|
|
if (inactive * zone->inactive_ratio < active)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-01-08 05:08:18 +03:00
|
|
|
/**
|
|
|
|
* inactive_anon_is_low - check if anonymous pages need to be deactivated
|
|
|
|
* @zone: zone to check
|
|
|
|
* @sc: scan control of this context
|
|
|
|
*
|
|
|
|
* Returns true if the zone does not have enough inactive anon pages,
|
|
|
|
* meaning some active anon pages need to be deactivated.
|
|
|
|
*/
|
|
|
|
static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
|
|
|
|
{
|
|
|
|
int low;
|
|
|
|
|
2009-01-08 05:08:23 +03:00
|
|
|
if (scanning_global_lru(sc))
|
2009-01-08 05:08:18 +03:00
|
|
|
low = inactive_anon_is_low_global(zone);
|
|
|
|
else
|
2009-01-08 05:08:25 +03:00
|
|
|
low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
|
2009-01-08 05:08:18 +03:00
|
|
|
return low;
|
|
|
|
}
|
|
|
|
|
2009-06-17 02:32:28 +04:00
|
|
|
static int inactive_file_is_low_global(struct zone *zone)
|
|
|
|
{
|
|
|
|
unsigned long active, inactive;
|
|
|
|
|
|
|
|
active = zone_page_state(zone, NR_ACTIVE_FILE);
|
|
|
|
inactive = zone_page_state(zone, NR_INACTIVE_FILE);
|
|
|
|
|
|
|
|
return (active > inactive);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* inactive_file_is_low - check if file pages need to be deactivated
|
|
|
|
* @zone: zone to check
|
|
|
|
* @sc: scan control of this context
|
|
|
|
*
|
|
|
|
* When the system is doing streaming IO, memory pressure here
|
|
|
|
* ensures that active file pages get deactivated, until more
|
|
|
|
* than half of the file pages are on the inactive list.
|
|
|
|
*
|
|
|
|
* Once we get to that situation, protect the system's working
|
|
|
|
* set from being evicted by disabling active file page aging.
|
|
|
|
*
|
|
|
|
* This uses a different ratio than the anonymous pages, because
|
|
|
|
* the page cache uses a use-once replacement algorithm.
|
|
|
|
*/
|
|
|
|
static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
|
|
|
|
{
|
|
|
|
int low;
|
|
|
|
|
|
|
|
if (scanning_global_lru(sc))
|
|
|
|
low = inactive_file_is_low_global(zone);
|
|
|
|
else
|
|
|
|
low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
|
|
|
|
return low;
|
|
|
|
}
|
|
|
|
|
2009-12-15 04:59:48 +03:00
|
|
|
static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
|
|
|
|
int file)
|
|
|
|
{
|
|
|
|
if (file)
|
|
|
|
return inactive_file_is_low(zone, sc);
|
|
|
|
else
|
|
|
|
return inactive_anon_is_low(zone, sc);
|
|
|
|
}
|
|
|
|
|
2008-10-19 07:26:32 +04:00
|
|
|
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
|
2008-10-19 07:26:14 +04:00
|
|
|
struct zone *zone, struct scan_control *sc, int priority)
|
|
|
|
{
|
2008-10-19 07:26:32 +04:00
|
|
|
int file = is_file_lru(lru);
|
|
|
|
|
2009-12-15 04:59:48 +03:00
|
|
|
if (is_active_lru(lru)) {
|
|
|
|
if (inactive_list_is_low(zone, sc, file))
|
|
|
|
shrink_active_list(nr_to_scan, zone, sc, priority, file);
|
2008-10-19 07:26:34 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-10-19 07:26:36 +04:00
|
|
|
return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
|
2008-10-19 07:26:32 +04:00
|
|
|
}
|
|
|
|
|
vmscan: prevent get_scan_ratio() rounding errors
get_scan_ratio() calculates percentage and if the percentage is < 1%, it
will round percentage down to 0% and cause we completely ignore scanning
anon/file pages to reclaim memory even the total anon/file pages are very
big.
To avoid underflow, we don't use percentage, instead we directly calculate
how many pages should be scaned. In this way, we should get several
scanned pages for < 1% percent.
This has some benefits:
1. increase our calculation precision
2. making our scan more smoothly. Without this, if percent[x] is
underflow, shrink_zone() doesn't scan any pages and suddenly it scans
all pages when priority is zero. With this, even priority isn't zero,
shrink_zone() gets chance to scan some pages.
Note, this patch doesn't really change logics, but just increase
precision. For system with a lot of memory, this might slightly changes
behavior. For example, in a sequential file read workload, without the
patch, we don't swap any anon pages. With it, if anon memory size is
bigger than 16G, we will see one anon page swapped. The 16G is calculated
as PAGE_SIZE * priority(4096) * (fp/ap). fp/ap is assumed to be 1024
which is common in this workload. So the impact sounds not a big deal.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-25 01:32:36 +04:00
|
|
|
/*
|
|
|
|
* Smallish @nr_to_scan's are deposited in @nr_saved_scan,
|
|
|
|
* until we collected @swap_cluster_max pages to scan.
|
|
|
|
*/
|
|
|
|
static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
|
|
|
|
unsigned long *nr_saved_scan)
|
|
|
|
{
|
|
|
|
unsigned long nr;
|
|
|
|
|
|
|
|
*nr_saved_scan += nr_to_scan;
|
|
|
|
nr = *nr_saved_scan;
|
|
|
|
|
|
|
|
if (nr >= SWAP_CLUSTER_MAX)
|
|
|
|
*nr_saved_scan = 0;
|
|
|
|
else
|
|
|
|
nr = 0;
|
|
|
|
|
|
|
|
return nr;
|
|
|
|
}
|
|
|
|
|
2008-10-19 07:26:32 +04:00
|
|
|
/*
|
|
|
|
* Determine how aggressively the anon and file LRU lists should be
|
|
|
|
* scanned. The relative value of each set of LRU lists is determined
|
|
|
|
* by looking at the fraction of the pages scanned we did rotate back
|
|
|
|
* onto the active list instead of evict.
|
|
|
|
*
|
vmscan: prevent get_scan_ratio() rounding errors
get_scan_ratio() calculates percentage and if the percentage is < 1%, it
will round percentage down to 0% and cause we completely ignore scanning
anon/file pages to reclaim memory even the total anon/file pages are very
big.
To avoid underflow, we don't use percentage, instead we directly calculate
how many pages should be scaned. In this way, we should get several
scanned pages for < 1% percent.
This has some benefits:
1. increase our calculation precision
2. making our scan more smoothly. Without this, if percent[x] is
underflow, shrink_zone() doesn't scan any pages and suddenly it scans
all pages when priority is zero. With this, even priority isn't zero,
shrink_zone() gets chance to scan some pages.
Note, this patch doesn't really change logics, but just increase
precision. For system with a lot of memory, this might slightly changes
behavior. For example, in a sequential file read workload, without the
patch, we don't swap any anon pages. With it, if anon memory size is
bigger than 16G, we will see one anon page swapped. The 16G is calculated
as PAGE_SIZE * priority(4096) * (fp/ap). fp/ap is assumed to be 1024
which is common in this workload. So the impact sounds not a big deal.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-25 01:32:36 +04:00
|
|
|
* nr[0] = anon pages to scan; nr[1] = file pages to scan
|
2008-10-19 07:26:32 +04:00
|
|
|
*/
|
vmscan: prevent get_scan_ratio() rounding errors
get_scan_ratio() calculates percentage and if the percentage is < 1%, it
will round percentage down to 0% and cause we completely ignore scanning
anon/file pages to reclaim memory even the total anon/file pages are very
big.
To avoid underflow, we don't use percentage, instead we directly calculate
how many pages should be scaned. In this way, we should get several
scanned pages for < 1% percent.
This has some benefits:
1. increase our calculation precision
2. making our scan more smoothly. Without this, if percent[x] is
underflow, shrink_zone() doesn't scan any pages and suddenly it scans
all pages when priority is zero. With this, even priority isn't zero,
shrink_zone() gets chance to scan some pages.
Note, this patch doesn't really change logics, but just increase
precision. For system with a lot of memory, this might slightly changes
behavior. For example, in a sequential file read workload, without the
patch, we don't swap any anon pages. With it, if anon memory size is
bigger than 16G, we will see one anon page swapped. The 16G is calculated
as PAGE_SIZE * priority(4096) * (fp/ap). fp/ap is assumed to be 1024
which is common in this workload. So the impact sounds not a big deal.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-25 01:32:36 +04:00
|
|
|
static void get_scan_count(struct zone *zone, struct scan_control *sc,
|
|
|
|
unsigned long *nr, int priority)
|
2008-10-19 07:26:32 +04:00
|
|
|
{
|
|
|
|
unsigned long anon, file, free;
|
|
|
|
unsigned long anon_prio, file_prio;
|
|
|
|
unsigned long ap, fp;
|
2009-01-08 05:08:15 +03:00
|
|
|
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
|
vmscan: prevent get_scan_ratio() rounding errors
get_scan_ratio() calculates percentage and if the percentage is < 1%, it
will round percentage down to 0% and cause we completely ignore scanning
anon/file pages to reclaim memory even the total anon/file pages are very
big.
To avoid underflow, we don't use percentage, instead we directly calculate
how many pages should be scaned. In this way, we should get several
scanned pages for < 1% percent.
This has some benefits:
1. increase our calculation precision
2. making our scan more smoothly. Without this, if percent[x] is
underflow, shrink_zone() doesn't scan any pages and suddenly it scans
all pages when priority is zero. With this, even priority isn't zero,
shrink_zone() gets chance to scan some pages.
Note, this patch doesn't really change logics, but just increase
precision. For system with a lot of memory, this might slightly changes
behavior. For example, in a sequential file read workload, without the
patch, we don't swap any anon pages. With it, if anon memory size is
bigger than 16G, we will see one anon page swapped. The 16G is calculated
as PAGE_SIZE * priority(4096) * (fp/ap). fp/ap is assumed to be 1024
which is common in this workload. So the impact sounds not a big deal.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-25 01:32:36 +04:00
|
|
|
u64 fraction[2], denominator;
|
|
|
|
enum lru_list l;
|
|
|
|
int noswap = 0;
|
|
|
|
|
|
|
|
/* If we have no swap space, do not bother scanning anon pages. */
|
|
|
|
if (!sc->may_swap || (nr_swap_pages <= 0)) {
|
|
|
|
noswap = 1;
|
|
|
|
fraction[0] = 0;
|
|
|
|
fraction[1] = 1;
|
|
|
|
denominator = 1;
|
|
|
|
goto out;
|
|
|
|
}
|
2008-10-19 07:26:32 +04:00
|
|
|
|
2009-09-22 04:03:09 +04:00
|
|
|
anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
|
|
|
|
zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
|
|
|
|
file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
|
|
|
|
zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
|
2009-01-07 01:39:41 +03:00
|
|
|
|
2009-01-08 05:08:23 +03:00
|
|
|
if (scanning_global_lru(sc)) {
|
2009-01-08 05:08:17 +03:00
|
|
|
free = zone_page_state(zone, NR_FREE_PAGES);
|
|
|
|
/* If we have very few page cache pages,
|
|
|
|
force-scan anon pages. */
|
2009-06-17 02:32:12 +04:00
|
|
|
if (unlikely(file + free <= high_wmark_pages(zone))) {
|
vmscan: prevent get_scan_ratio() rounding errors
get_scan_ratio() calculates percentage and if the percentage is < 1%, it
will round percentage down to 0% and cause we completely ignore scanning
anon/file pages to reclaim memory even the total anon/file pages are very
big.
To avoid underflow, we don't use percentage, instead we directly calculate
how many pages should be scaned. In this way, we should get several
scanned pages for < 1% percent.
This has some benefits:
1. increase our calculation precision
2. making our scan more smoothly. Without this, if percent[x] is
underflow, shrink_zone() doesn't scan any pages and suddenly it scans
all pages when priority is zero. With this, even priority isn't zero,
shrink_zone() gets chance to scan some pages.
Note, this patch doesn't really change logics, but just increase
precision. For system with a lot of memory, this might slightly changes
behavior. For example, in a sequential file read workload, without the
patch, we don't swap any anon pages. With it, if anon memory size is
bigger than 16G, we will see one anon page swapped. The 16G is calculated
as PAGE_SIZE * priority(4096) * (fp/ap). fp/ap is assumed to be 1024
which is common in this workload. So the impact sounds not a big deal.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-25 01:32:36 +04:00
|
|
|
fraction[0] = 1;
|
|
|
|
fraction[1] = 0;
|
|
|
|
denominator = 1;
|
|
|
|
goto out;
|
2009-01-08 05:08:17 +03:00
|
|
|
}
|
2008-10-19 07:26:32 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OK, so we have swap space and a fair amount of page cache
|
|
|
|
* pages. We use the recently rotated / recently scanned
|
|
|
|
* ratios to determine how valuable each cache is.
|
|
|
|
*
|
|
|
|
* Because workloads change over time (and to avoid overflow)
|
|
|
|
* we keep these statistics as a floating average, which ends
|
|
|
|
* up weighing recent references more than old ones.
|
|
|
|
*
|
|
|
|
* anon in [0], file in [1]
|
|
|
|
*/
|
2009-01-08 05:08:15 +03:00
|
|
|
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
|
2008-10-19 07:26:32 +04:00
|
|
|
spin_lock_irq(&zone->lru_lock);
|
2009-01-08 05:08:15 +03:00
|
|
|
reclaim_stat->recent_scanned[0] /= 2;
|
|
|
|
reclaim_stat->recent_rotated[0] /= 2;
|
2008-10-19 07:26:32 +04:00
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
}
|
|
|
|
|
2009-01-08 05:08:15 +03:00
|
|
|
if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
|
2008-10-19 07:26:32 +04:00
|
|
|
spin_lock_irq(&zone->lru_lock);
|
2009-01-08 05:08:15 +03:00
|
|
|
reclaim_stat->recent_scanned[1] /= 2;
|
|
|
|
reclaim_stat->recent_rotated[1] /= 2;
|
2008-10-19 07:26:32 +04:00
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* With swappiness at 100, anonymous and file have the same priority.
|
|
|
|
* This scanning priority is essentially the inverse of IO cost.
|
|
|
|
*/
|
|
|
|
anon_prio = sc->swappiness;
|
|
|
|
file_prio = 200 - sc->swappiness;
|
|
|
|
|
|
|
|
/*
|
2008-11-20 02:36:44 +03:00
|
|
|
* The amount of pressure on anon vs file pages is inversely
|
|
|
|
* proportional to the fraction of recently scanned pages on
|
|
|
|
* each list that were recently referenced and in active use.
|
2008-10-19 07:26:32 +04:00
|
|
|
*/
|
2009-01-08 05:08:15 +03:00
|
|
|
ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
|
|
|
|
ap /= reclaim_stat->recent_rotated[0] + 1;
|
2008-10-19 07:26:32 +04:00
|
|
|
|
2009-01-08 05:08:15 +03:00
|
|
|
fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
|
|
|
|
fp /= reclaim_stat->recent_rotated[1] + 1;
|
2008-10-19 07:26:32 +04:00
|
|
|
|
vmscan: prevent get_scan_ratio() rounding errors
get_scan_ratio() calculates percentage and if the percentage is < 1%, it
will round percentage down to 0% and cause we completely ignore scanning
anon/file pages to reclaim memory even the total anon/file pages are very
big.
To avoid underflow, we don't use percentage, instead we directly calculate
how many pages should be scaned. In this way, we should get several
scanned pages for < 1% percent.
This has some benefits:
1. increase our calculation precision
2. making our scan more smoothly. Without this, if percent[x] is
underflow, shrink_zone() doesn't scan any pages and suddenly it scans
all pages when priority is zero. With this, even priority isn't zero,
shrink_zone() gets chance to scan some pages.
Note, this patch doesn't really change logics, but just increase
precision. For system with a lot of memory, this might slightly changes
behavior. For example, in a sequential file read workload, without the
patch, we don't swap any anon pages. With it, if anon memory size is
bigger than 16G, we will see one anon page swapped. The 16G is calculated
as PAGE_SIZE * priority(4096) * (fp/ap). fp/ap is assumed to be 1024
which is common in this workload. So the impact sounds not a big deal.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-25 01:32:36 +04:00
|
|
|
fraction[0] = ap;
|
|
|
|
fraction[1] = fp;
|
|
|
|
denominator = ap + fp + 1;
|
|
|
|
out:
|
|
|
|
for_each_evictable_lru(l) {
|
|
|
|
int file = is_file_lru(l);
|
|
|
|
unsigned long scan;
|
2009-06-17 02:32:29 +04:00
|
|
|
|
vmscan: prevent get_scan_ratio() rounding errors
get_scan_ratio() calculates percentage and if the percentage is < 1%, it
will round percentage down to 0% and cause we completely ignore scanning
anon/file pages to reclaim memory even the total anon/file pages are very
big.
To avoid underflow, we don't use percentage, instead we directly calculate
how many pages should be scaned. In this way, we should get several
scanned pages for < 1% percent.
This has some benefits:
1. increase our calculation precision
2. making our scan more smoothly. Without this, if percent[x] is
underflow, shrink_zone() doesn't scan any pages and suddenly it scans
all pages when priority is zero. With this, even priority isn't zero,
shrink_zone() gets chance to scan some pages.
Note, this patch doesn't really change logics, but just increase
precision. For system with a lot of memory, this might slightly changes
behavior. For example, in a sequential file read workload, without the
patch, we don't swap any anon pages. With it, if anon memory size is
bigger than 16G, we will see one anon page swapped. The 16G is calculated
as PAGE_SIZE * priority(4096) * (fp/ap). fp/ap is assumed to be 1024
which is common in this workload. So the impact sounds not a big deal.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-25 01:32:36 +04:00
|
|
|
scan = zone_nr_lru_pages(zone, sc, l);
|
|
|
|
if (priority || noswap) {
|
|
|
|
scan >>= priority;
|
|
|
|
scan = div64_u64(scan * fraction[file], denominator);
|
|
|
|
}
|
|
|
|
nr[l] = nr_scan_try_batch(scan,
|
|
|
|
&reclaim_stat->nr_saved_scan[l]);
|
|
|
|
}
|
2009-06-17 02:32:29 +04:00
|
|
|
}
|
2008-10-19 07:26:32 +04:00
|
|
|
|
2010-05-25 01:32:37 +04:00
|
|
|
static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If we need a large contiguous chunk of memory, or have
|
|
|
|
* trouble getting a small set of contiguous pages, we
|
|
|
|
* will reclaim both active and inactive pages.
|
|
|
|
*/
|
|
|
|
if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
|
|
|
|
sc->lumpy_reclaim_mode = 1;
|
|
|
|
else if (sc->order && priority < DEF_PRIORITY - 2)
|
|
|
|
sc->lumpy_reclaim_mode = 1;
|
|
|
|
else
|
|
|
|
sc->lumpy_reclaim_mode = 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
|
|
|
|
*/
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
static void shrink_zone(int priority, struct zone *zone,
|
2006-03-22 11:08:20 +03:00
|
|
|
struct scan_control *sc)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2008-10-19 07:26:14 +04:00
|
|
|
unsigned long nr[NR_LRU_LISTS];
|
2006-03-22 11:08:18 +03:00
|
|
|
unsigned long nr_to_scan;
|
2008-10-19 07:26:14 +04:00
|
|
|
enum lru_list l;
|
2009-01-07 01:40:02 +03:00
|
|
|
unsigned long nr_reclaimed = sc->nr_reclaimed;
|
2009-12-15 04:59:10 +03:00
|
|
|
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
|
2008-10-19 07:26:55 +04:00
|
|
|
|
vmscan: prevent get_scan_ratio() rounding errors
get_scan_ratio() calculates percentage and if the percentage is < 1%, it
will round percentage down to 0% and cause we completely ignore scanning
anon/file pages to reclaim memory even the total anon/file pages are very
big.
To avoid underflow, we don't use percentage, instead we directly calculate
how many pages should be scaned. In this way, we should get several
scanned pages for < 1% percent.
This has some benefits:
1. increase our calculation precision
2. making our scan more smoothly. Without this, if percent[x] is
underflow, shrink_zone() doesn't scan any pages and suddenly it scans
all pages when priority is zero. With this, even priority isn't zero,
shrink_zone() gets chance to scan some pages.
Note, this patch doesn't really change logics, but just increase
precision. For system with a lot of memory, this might slightly changes
behavior. For example, in a sequential file read workload, without the
patch, we don't swap any anon pages. With it, if anon memory size is
bigger than 16G, we will see one anon page swapped. The 16G is calculated
as PAGE_SIZE * priority(4096) * (fp/ap). fp/ap is assumed to be 1024
which is common in this workload. So the impact sounds not a big deal.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-05-25 01:32:36 +04:00
|
|
|
get_scan_count(zone, sc, nr, priority);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-05-25 01:32:37 +04:00
|
|
|
set_lumpy_reclaim_mode(priority, sc);
|
|
|
|
|
2008-10-19 07:26:34 +04:00
|
|
|
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
|
|
|
nr[LRU_INACTIVE_FILE]) {
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
for_each_evictable_lru(l) {
|
2008-10-19 07:26:14 +04:00
|
|
|
if (nr[l]) {
|
2009-12-15 04:59:14 +03:00
|
|
|
nr_to_scan = min_t(unsigned long,
|
|
|
|
nr[l], SWAP_CLUSTER_MAX);
|
2008-10-19 07:26:14 +04:00
|
|
|
nr[l] -= nr_to_scan;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-01-07 01:40:02 +03:00
|
|
|
nr_reclaimed += shrink_list(l, nr_to_scan,
|
|
|
|
zone, sc, priority);
|
2008-10-19 07:26:14 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
/*
|
|
|
|
* On large memory systems, scan >> priority can become
|
|
|
|
* really large. This is fine for the starting priority;
|
|
|
|
* we want to put equal scanning pressure on each zone.
|
|
|
|
* However, if the VM has a harder time of freeing pages,
|
|
|
|
* with multiple processes reclaiming pages, the total
|
|
|
|
* freeing target can get unreasonably large.
|
|
|
|
*/
|
2009-12-15 04:59:15 +03:00
|
|
|
if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-01-07 01:40:02 +03:00
|
|
|
sc->nr_reclaimed = nr_reclaimed;
|
|
|
|
|
2008-10-19 07:26:34 +04:00
|
|
|
/*
|
|
|
|
* Even if we did not try to evict anon pages at all, we want to
|
|
|
|
* rebalance the anon lru active/inactive ratio.
|
|
|
|
*/
|
2009-06-17 02:32:44 +04:00
|
|
|
if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
|
2008-10-19 07:26:34 +04:00
|
|
|
shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
|
|
|
|
|
2007-03-01 07:13:21 +03:00
|
|
|
throttle_vm_writeout(sc->gfp_mask);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the direct reclaim path, for page-allocating processes. We only
|
|
|
|
* try to reclaim pages from zones which will satisfy the caller's allocation
|
|
|
|
* request.
|
|
|
|
*
|
2009-06-17 02:32:12 +04:00
|
|
|
* We reclaim from a zone even if that zone is over high_wmark_pages(zone).
|
|
|
|
* Because:
|
2005-04-17 02:20:36 +04:00
|
|
|
* a) The caller may be trying to free *extra* pages to satisfy a higher-order
|
|
|
|
* allocation or
|
2009-06-17 02:32:12 +04:00
|
|
|
* b) The target zone may be at high_wmark_pages(zone) but the lower zones
|
|
|
|
* must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
|
|
|
|
* zone defense algorithm.
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* If a zone is deemed to be full of pinned pages then just give it a light
|
|
|
|
* scan then give up on it.
|
|
|
|
*/
|
2010-05-25 01:32:40 +04:00
|
|
|
static int shrink_zones(int priority, struct zonelist *zonelist,
|
2006-03-22 11:08:20 +03:00
|
|
|
struct scan_control *sc)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2008-04-28 13:12:16 +04:00
|
|
|
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
|
2008-04-28 13:12:17 +04:00
|
|
|
struct zoneref *z;
|
2008-04-28 13:12:16 +04:00
|
|
|
struct zone *zone;
|
2010-05-25 01:32:40 +04:00
|
|
|
int progress = 0;
|
2008-02-07 11:14:37 +03:00
|
|
|
|
2009-04-01 02:23:31 +04:00
|
|
|
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
|
|
|
|
sc->nodemask) {
|
2006-01-06 11:11:15 +03:00
|
|
|
if (!populated_zone(zone))
|
2005-04-17 02:20:36 +04:00
|
|
|
continue;
|
2008-02-07 11:14:37 +03:00
|
|
|
/*
|
|
|
|
* Take care memory controller reclaiming has small influence
|
|
|
|
* to global LRU.
|
|
|
|
*/
|
2009-01-08 05:08:23 +03:00
|
|
|
if (scanning_global_lru(sc)) {
|
2008-02-07 11:14:37 +03:00
|
|
|
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
|
continue;
|
|
|
|
note_zone_scanning_priority(zone, priority);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-03-06 00:41:55 +03:00
|
|
|
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
|
2008-02-07 11:14:37 +03:00
|
|
|
continue; /* Let kswapd poll it */
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Ignore cpuset limitation here. We just want to reduce
|
|
|
|
* # of used pages by us regardless of memory shortage.
|
|
|
|
*/
|
|
|
|
mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
|
|
|
|
priority);
|
|
|
|
}
|
2006-09-26 10:31:27 +04:00
|
|
|
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
shrink_zone(priority, zone, sc);
|
2010-05-25 01:32:40 +04:00
|
|
|
progress = 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2010-05-25 01:32:40 +04:00
|
|
|
return progress;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2008-10-19 07:26:32 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* This is the main entry point to direct page reclaim.
|
|
|
|
*
|
|
|
|
* If a full scan of the inactive list fails to free enough memory then we
|
|
|
|
* are "out of memory" and something needs to be killed.
|
|
|
|
*
|
|
|
|
* If the caller is !__GFP_FS then the probability of a failure is reasonably
|
|
|
|
* high - the zone may be full of dirty or under-writeback pages, which this
|
2009-09-23 21:37:09 +04:00
|
|
|
* caller can't do much about. We kick the writeback threads and take explicit
|
|
|
|
* naps in the hope that some of these pages can be written. But if the
|
|
|
|
* allocating task holds filesystem locks which prevent writeout this might not
|
|
|
|
* work, and the allocation attempt will fail.
|
page allocator: smarter retry of costly-order allocations
Because of page order checks in __alloc_pages(), hugepage (and similarly
large order) allocations will not retry unless explicitly marked
__GFP_REPEAT. However, the current retry logic is nearly an infinite
loop (or until reclaim does no progress whatsoever). For these costly
allocations, that seems like overkill and could potentially never
terminate. Mel observed that allowing current __GFP_REPEAT semantics for
hugepage allocations essentially killed the system. I believe this is
because we may continue to reclaim small orders of pages all over, but
never have enough to satisfy the hugepage allocation request. This is
clearly only a problem for large order allocations, of which hugepages
are the most obvious (to me).
Modify try_to_free_pages() to indicate how many pages were reclaimed.
Use that information in __alloc_pages() to eventually fail a large
__GFP_REPEAT allocation when we've reclaimed an order of pages equal to
or greater than the allocation's order. This relies on lumpy reclaim
functioning as advertised. Due to fragmentation, lumpy reclaim may not
be able to free up the order needed in one invocation, so multiple
iterations may be requred. In other words, the more fragmented memory
is, the more retry attempts __GFP_REPEAT will make (particularly for
higher order allocations).
This changes the semantics of __GFP_REPEAT subtly, but *only* for
allocations > PAGE_ALLOC_COSTLY_ORDER. With this patch, for those size
allocations, we will try up to some point (at least 1<<order reclaimed
pages), rather than forever (which is the case for allocations <=
PAGE_ALLOC_COSTLY_ORDER).
This change improves the /proc/sys/vm/nr_hugepages interface with a
follow-on patch that makes pool allocations use __GFP_REPEAT. Rather
than administrators repeatedly echo'ing a particular value into the
sysctl, and forcing reclaim into action manually, this change allows for
the sysctl to attempt a reasonable effort itself. Similarly, dynamic
pool growth should be more successful under load, as lumpy reclaim can
try to free up pages, rather than failing right away.
Choosing to reclaim only up to the order of the requested allocation
strikes a balance between not failing hugepage allocations and returning
to the caller when it's unlikely to every succeed. Because of lumpy
reclaim, if we have freed the order requested, hopefully it has been in
big chunks and those chunks will allow our allocation to succeed. If
that isn't the case after freeing up the current order, I don't think it
is likely to succeed in the future, although it is possible given a
particular fragmentation pattern.
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Tested-by: Mel Gorman <mel@csn.ul.ie>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-29 11:58:25 +04:00
|
|
|
*
|
|
|
|
* returns: 0, if no pages reclaimed
|
|
|
|
* else, the number of pages reclaimed
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2008-04-28 13:12:12 +04:00
|
|
|
static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
|
2008-04-28 13:12:17 +04:00
|
|
|
struct scan_control *sc)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int priority;
|
2008-06-13 02:21:27 +04:00
|
|
|
unsigned long ret = 0;
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long total_scanned = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct reclaim_state *reclaim_state = current->reclaim_state;
|
|
|
|
unsigned long lru_pages = 0;
|
2008-04-28 13:12:17 +04:00
|
|
|
struct zoneref *z;
|
2008-04-28 13:12:16 +04:00
|
|
|
struct zone *zone;
|
2008-04-28 13:12:17 +04:00
|
|
|
enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
|
2009-12-15 04:59:10 +03:00
|
|
|
unsigned long writeback_threshold;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-05-25 01:32:08 +04:00
|
|
|
get_mems_allowed();
|
2008-07-25 12:48:52 +04:00
|
|
|
delayacct_freepages_start();
|
|
|
|
|
2009-01-08 05:08:23 +03:00
|
|
|
if (scanning_global_lru(sc))
|
2008-02-07 11:14:37 +03:00
|
|
|
count_vm_event(ALLOCSTALL);
|
|
|
|
/*
|
|
|
|
* mem_cgroup will not do shrink_slab.
|
|
|
|
*/
|
2009-01-08 05:08:23 +03:00
|
|
|
if (scanning_global_lru(sc)) {
|
2008-04-28 13:12:16 +04:00
|
|
|
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-02-07 11:14:37 +03:00
|
|
|
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
|
continue;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-09-22 04:01:42 +04:00
|
|
|
lru_pages += zone_reclaimable_pages(zone);
|
2008-02-07 11:14:37 +03:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
|
2008-02-07 11:13:56 +03:00
|
|
|
sc->nr_scanned = 0;
|
2005-11-29 00:44:07 +03:00
|
|
|
if (!priority)
|
|
|
|
disable_swap_token();
|
2010-05-25 01:32:40 +04:00
|
|
|
ret = shrink_zones(priority, zonelist, sc);
|
2008-02-07 11:13:56 +03:00
|
|
|
/*
|
|
|
|
* Don't shrink slabs when reclaiming memory from
|
|
|
|
* over limit cgroups
|
|
|
|
*/
|
2009-01-08 05:08:23 +03:00
|
|
|
if (scanning_global_lru(sc)) {
|
2008-04-28 13:12:17 +04:00
|
|
|
shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
|
2008-02-07 11:14:29 +03:00
|
|
|
if (reclaim_state) {
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
|
2008-02-07 11:14:29 +03:00
|
|
|
reclaim_state->reclaimed_slab = 0;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2008-02-07 11:13:56 +03:00
|
|
|
total_scanned += sc->nr_scanned;
|
2009-12-15 04:59:10 +03:00
|
|
|
if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
ret = sc->nr_reclaimed;
|
2005-04-17 02:20:36 +04:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to write back as many pages as we just scanned. This
|
|
|
|
* tends to cause slow streaming writers to write data to the
|
|
|
|
* disk smoothly, at the dirtying rate, which is nice. But
|
|
|
|
* that's undesirable in laptop mode, where we *want* lumpy
|
|
|
|
* writeout. So in laptop mode, write out the whole world.
|
|
|
|
*/
|
2009-12-15 04:59:10 +03:00
|
|
|
writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
|
|
|
|
if (total_scanned > writeback_threshold) {
|
2009-09-09 11:08:54 +04:00
|
|
|
wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
|
2008-02-07 11:13:56 +03:00
|
|
|
sc->may_writepage = 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Take a nap, wait for some writeback to complete */
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
if (!sc->hibernation_mode && sc->nr_scanned &&
|
|
|
|
priority < DEF_PRIORITY - 2)
|
2009-07-09 16:52:32 +04:00
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2008-07-30 09:33:42 +04:00
|
|
|
/* top priority shrink_zones still had more to do? don't OOM, then */
|
2010-05-25 01:32:40 +04:00
|
|
|
if (ret && scanning_global_lru(sc))
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
ret = sc->nr_reclaimed;
|
2005-04-17 02:20:36 +04:00
|
|
|
out:
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
/*
|
|
|
|
* Now that we've scanned all the zones at this priority level, note
|
|
|
|
* that level within the zone so that the next thread which performs
|
|
|
|
* scanning of this zone will immediately start out at this priority
|
|
|
|
* level. This affects only the decision whether or not to bring
|
|
|
|
* mapped pages onto the inactive list.
|
|
|
|
*/
|
|
|
|
if (priority < 0)
|
|
|
|
priority = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-01-08 05:08:23 +03:00
|
|
|
if (scanning_global_lru(sc)) {
|
2008-04-28 13:12:16 +04:00
|
|
|
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
|
2008-02-07 11:14:37 +03:00
|
|
|
|
|
|
|
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
zone->prev_priority = priority;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-07-25 12:48:52 +04:00
|
|
|
delayacct_freepages_end();
|
2010-05-25 01:32:08 +04:00
|
|
|
put_mems_allowed();
|
2008-07-25 12:48:52 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-04-28 13:12:12 +04:00
|
|
|
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
2009-04-01 02:23:31 +04:00
|
|
|
gfp_t gfp_mask, nodemask_t *nodemask)
|
2008-02-07 11:13:56 +03:00
|
|
|
{
|
|
|
|
struct scan_control sc = {
|
|
|
|
.gfp_mask = gfp_mask,
|
|
|
|
.may_writepage = !laptop_mode,
|
2009-12-15 04:59:10 +03:00
|
|
|
.nr_to_reclaim = SWAP_CLUSTER_MAX,
|
2009-04-01 02:19:30 +04:00
|
|
|
.may_unmap = 1,
|
2009-04-21 23:24:57 +04:00
|
|
|
.may_swap = 1,
|
2008-02-07 11:13:56 +03:00
|
|
|
.swappiness = vm_swappiness,
|
|
|
|
.order = order,
|
|
|
|
.mem_cgroup = NULL,
|
2009-04-01 02:23:31 +04:00
|
|
|
.nodemask = nodemask,
|
2008-02-07 11:13:56 +03:00
|
|
|
};
|
|
|
|
|
2008-04-28 13:12:17 +04:00
|
|
|
return do_try_to_free_pages(zonelist, &sc);
|
2008-02-07 11:13:56 +03:00
|
|
|
}
|
|
|
|
|
2008-03-05 01:28:39 +03:00
|
|
|
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
|
2008-02-07 11:13:56 +03:00
|
|
|
|
2009-09-24 02:56:39 +04:00
|
|
|
unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
|
|
|
|
gfp_t gfp_mask, bool noswap,
|
|
|
|
unsigned int swappiness,
|
|
|
|
struct zone *zone, int nid)
|
|
|
|
{
|
|
|
|
struct scan_control sc = {
|
|
|
|
.may_writepage = !laptop_mode,
|
|
|
|
.may_unmap = 1,
|
|
|
|
.may_swap = !noswap,
|
|
|
|
.swappiness = swappiness,
|
|
|
|
.order = 0,
|
|
|
|
.mem_cgroup = mem,
|
|
|
|
};
|
|
|
|
nodemask_t nm = nodemask_of_node(nid);
|
|
|
|
|
|
|
|
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
|
|
|
|
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
|
|
|
|
sc.nodemask = &nm;
|
|
|
|
sc.nr_reclaimed = 0;
|
|
|
|
sc.nr_scanned = 0;
|
|
|
|
/*
|
|
|
|
* NOTE: Although we can get the priority field, using it
|
|
|
|
* here is not a good idea, since it limits the pages we can scan.
|
|
|
|
* if we don't reclaim here, the shrink_zone from balance_pgdat
|
|
|
|
* will pick up pages from other mem cgroup's as well. We hack
|
|
|
|
* the priority and make it zero.
|
|
|
|
*/
|
|
|
|
shrink_zone(0, zone, &sc);
|
|
|
|
return sc.nr_reclaimed;
|
|
|
|
}
|
|
|
|
|
2008-02-07 11:14:02 +03:00
|
|
|
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
|
2009-01-08 05:08:24 +03:00
|
|
|
gfp_t gfp_mask,
|
|
|
|
bool noswap,
|
|
|
|
unsigned int swappiness)
|
2008-02-07 11:13:56 +03:00
|
|
|
{
|
2009-09-24 02:56:39 +04:00
|
|
|
struct zonelist *zonelist;
|
2008-02-07 11:13:56 +03:00
|
|
|
struct scan_control sc = {
|
|
|
|
.may_writepage = !laptop_mode,
|
2009-04-01 02:19:30 +04:00
|
|
|
.may_unmap = 1,
|
2009-04-21 23:24:57 +04:00
|
|
|
.may_swap = !noswap,
|
2009-12-15 04:59:10 +03:00
|
|
|
.nr_to_reclaim = SWAP_CLUSTER_MAX,
|
2009-01-08 05:08:24 +03:00
|
|
|
.swappiness = swappiness,
|
2008-02-07 11:13:56 +03:00
|
|
|
.order = 0,
|
|
|
|
.mem_cgroup = mem_cont,
|
2009-04-01 02:23:31 +04:00
|
|
|
.nodemask = NULL, /* we don't care the placement */
|
2008-02-07 11:13:56 +03:00
|
|
|
};
|
|
|
|
|
2008-04-28 13:12:17 +04:00
|
|
|
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
|
|
|
|
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
|
|
|
|
zonelist = NODE_DATA(numa_node_id())->node_zonelists;
|
|
|
|
return do_try_to_free_pages(zonelist, &sc);
|
2008-02-07 11:13:56 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-12-15 04:58:53 +03:00
|
|
|
/* is kswapd sleeping prematurely? */
|
2009-12-15 04:58:55 +03:00
|
|
|
static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
|
2009-12-15 04:58:53 +03:00
|
|
|
{
|
2009-12-15 04:58:55 +03:00
|
|
|
int i;
|
2009-12-15 04:58:53 +03:00
|
|
|
|
|
|
|
/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
|
|
|
|
if (remaining)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* If after HZ/10, a zone is below the high mark, it's premature */
|
2009-12-15 04:58:55 +03:00
|
|
|
for (i = 0; i < pgdat->nr_zones; i++) {
|
|
|
|
struct zone *zone = pgdat->node_zones + i;
|
|
|
|
|
|
|
|
if (!populated_zone(zone))
|
|
|
|
continue;
|
|
|
|
|
2010-03-06 00:41:55 +03:00
|
|
|
if (zone->all_unreclaimable)
|
2010-01-16 04:01:25 +03:00
|
|
|
continue;
|
|
|
|
|
2009-12-15 04:58:53 +03:00
|
|
|
if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
|
|
|
|
0, 0))
|
|
|
|
return 1;
|
2009-12-15 04:58:55 +03:00
|
|
|
}
|
2009-12-15 04:58:53 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* For kswapd, balance_pgdat() will work across all this node's zones until
|
2009-06-17 02:32:12 +04:00
|
|
|
* they are all at high_wmark_pages(zone).
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* Returns the number of pages which were actually freed.
|
|
|
|
*
|
|
|
|
* There is special handling here for zones which are full of pinned pages.
|
|
|
|
* This can happen if the pages are all mlocked, or if they are all used by
|
|
|
|
* device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
|
|
|
|
* What we do is to detect the case where all pages in the zone have been
|
|
|
|
* scanned twice and there has been zero successful reclaim. Mark the zone as
|
|
|
|
* dead and from now on, only perform a short scan. Basically we're polling
|
|
|
|
* the zone for when the problem goes away.
|
|
|
|
*
|
|
|
|
* kswapd scans the zones in the highmem->normal->dma direction. It skips
|
2009-06-17 02:32:12 +04:00
|
|
|
* zones which have free_pages > high_wmark_pages(zone), but once a zone is
|
|
|
|
* found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
|
|
|
|
* lower zones regardless of the number of free pages in the lower zones. This
|
|
|
|
* interoperates with the page allocator fallback scheme to ensure that aging
|
|
|
|
* of pages is balanced across the zones.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2006-06-23 13:03:18 +04:00
|
|
|
static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int all_zones_ok;
|
|
|
|
int priority;
|
|
|
|
int i;
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long total_scanned;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct reclaim_state *reclaim_state = current->reclaim_state;
|
2006-03-22 11:08:18 +03:00
|
|
|
struct scan_control sc = {
|
|
|
|
.gfp_mask = GFP_KERNEL,
|
2009-04-01 02:19:30 +04:00
|
|
|
.may_unmap = 1,
|
2009-04-21 23:24:57 +04:00
|
|
|
.may_swap = 1,
|
2009-12-15 04:59:10 +03:00
|
|
|
/*
|
|
|
|
* kswapd doesn't want to be bailed out while reclaim. because
|
|
|
|
* we want to put equal scanning pressure on each zone.
|
|
|
|
*/
|
|
|
|
.nr_to_reclaim = ULONG_MAX,
|
2006-06-23 13:03:18 +04:00
|
|
|
.swappiness = vm_swappiness,
|
2007-07-17 15:03:16 +04:00
|
|
|
.order = order,
|
2008-02-07 11:13:56 +03:00
|
|
|
.mem_cgroup = NULL,
|
2006-03-22 11:08:18 +03:00
|
|
|
};
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
/*
|
|
|
|
* temp_priority is used to remember the scanning priority at which
|
2009-06-17 02:32:12 +04:00
|
|
|
* this zone was successfully refilled to
|
|
|
|
* free_pages == high_wmark_pages(zone).
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
*/
|
|
|
|
int temp_priority[MAX_NR_ZONES];
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
loop_again:
|
|
|
|
total_scanned = 0;
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
sc.nr_reclaimed = 0;
|
2006-06-12 02:22:26 +04:00
|
|
|
sc.may_writepage = !laptop_mode;
|
2006-06-30 12:55:45 +04:00
|
|
|
count_vm_event(PAGEOUTRUN);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
for (i = 0; i < pgdat->nr_zones; i++)
|
|
|
|
temp_priority[i] = DEF_PRIORITY;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
|
|
|
|
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
|
|
|
|
unsigned long lru_pages = 0;
|
2009-12-15 04:58:55 +03:00
|
|
|
int has_under_min_watermark_zone = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-29 00:44:07 +03:00
|
|
|
/* The swap token gets in the way of swapout... */
|
|
|
|
if (!priority)
|
|
|
|
disable_swap_token();
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
all_zones_ok = 1;
|
|
|
|
|
2006-06-23 13:03:18 +04:00
|
|
|
/*
|
|
|
|
* Scan in the highmem->dma direction for the highest
|
|
|
|
* zone which needs scanning
|
|
|
|
*/
|
|
|
|
for (i = pgdat->nr_zones - 1; i >= 0; i--) {
|
|
|
|
struct zone *zone = pgdat->node_zones + i;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-06-23 13:03:18 +04:00
|
|
|
if (!populated_zone(zone))
|
|
|
|
continue;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-03-06 00:41:55 +03:00
|
|
|
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
|
2006-06-23 13:03:18 +04:00
|
|
|
continue;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-10-19 07:26:34 +04:00
|
|
|
/*
|
|
|
|
* Do some background aging of the anon list, to give
|
|
|
|
* pages a chance to be referenced before reclaiming.
|
|
|
|
*/
|
2009-01-08 05:08:18 +03:00
|
|
|
if (inactive_anon_is_low(zone, &sc))
|
2008-10-19 07:26:34 +04:00
|
|
|
shrink_active_list(SWAP_CLUSTER_MAX, zone,
|
|
|
|
&sc, priority, 0);
|
|
|
|
|
2009-06-17 02:32:12 +04:00
|
|
|
if (!zone_watermark_ok(zone, order,
|
|
|
|
high_wmark_pages(zone), 0, 0)) {
|
2006-06-23 13:03:18 +04:00
|
|
|
end_zone = i;
|
2006-12-07 07:32:01 +03:00
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
2006-12-07 07:32:01 +03:00
|
|
|
if (i < 0)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
for (i = 0; i <= end_zone; i++) {
|
|
|
|
struct zone *zone = pgdat->node_zones + i;
|
|
|
|
|
2009-09-22 04:01:42 +04:00
|
|
|
lru_pages += zone_reclaimable_pages(zone);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now scan the zone in the dma->highmem direction, stopping
|
|
|
|
* at the last zone which needs scanning.
|
|
|
|
*
|
|
|
|
* We do this because the page allocator works in the opposite
|
|
|
|
* direction. This prevents the page allocator from allocating
|
|
|
|
* pages behind kswapd's direction of progress, which would
|
|
|
|
* cause too much scanning of the lower zones.
|
|
|
|
*/
|
|
|
|
for (i = 0; i <= end_zone; i++) {
|
|
|
|
struct zone *zone = pgdat->node_zones + i;
|
2005-06-22 04:14:35 +04:00
|
|
|
int nr_slab;
|
2009-09-24 02:56:39 +04:00
|
|
|
int nid, zid;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-01-06 11:11:15 +03:00
|
|
|
if (!populated_zone(zone))
|
2005-04-17 02:20:36 +04:00
|
|
|
continue;
|
|
|
|
|
2010-03-06 00:41:55 +03:00
|
|
|
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
|
2005-04-17 02:20:36 +04:00
|
|
|
continue;
|
|
|
|
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
temp_priority[i] = priority;
|
2005-04-17 02:20:36 +04:00
|
|
|
sc.nr_scanned = 0;
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
note_zone_scanning_priority(zone, priority);
|
2009-09-24 02:56:39 +04:00
|
|
|
|
|
|
|
nid = pgdat->node_id;
|
|
|
|
zid = zone_idx(zone);
|
|
|
|
/*
|
|
|
|
* Call soft limit reclaim before calling shrink_zone.
|
|
|
|
* For now we ignore the return value
|
|
|
|
*/
|
|
|
|
mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
|
|
|
|
nid, zid);
|
mm: prevent kswapd from freeing excessive amounts of lowmem
The current VM can get itself into trouble fairly easily on systems with a
small ZONE_HIGHMEM, which is common on i686 computers with 1GB of memory.
On one side, page_alloc() will allocate down to zone->pages_low, while on
the other side, kswapd() and balance_pgdat() will try to free memory from
every zone, until every zone has more free pages than zone->pages_high.
Highmem can be filled up to zone->pages_low with page tables, ramfs,
vmalloc allocations and other unswappable things quite easily and without
many bad side effects, since we still have a huge ZONE_NORMAL to do future
allocations from.
However, as long as the number of free pages in the highmem zone is below
zone->pages_high, kswapd will continue swapping things out from
ZONE_NORMAL, too!
Sami Farin managed to get his system into a stage where kswapd had freed
about 700MB of low memory and was still "going strong".
The attached patch will make kswapd stop paging out data from zones when
there is more than enough memory free. We do go above zone->pages_high in
order to keep pressure between zones equal in normal circumstances, but the
patch should prevent the kind of excesses that made Sami's computer totally
unusable.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 12:24:50 +04:00
|
|
|
/*
|
|
|
|
* We put equal pressure on every zone, unless one
|
|
|
|
* zone has way too many pages free already.
|
|
|
|
*/
|
2009-06-17 02:32:12 +04:00
|
|
|
if (!zone_watermark_ok(zone, order,
|
|
|
|
8*high_wmark_pages(zone), end_zone, 0))
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
shrink_zone(priority, zone, &sc);
|
2005-04-17 02:20:36 +04:00
|
|
|
reclaim_state->reclaimed_slab = 0;
|
2005-06-22 04:14:35 +04:00
|
|
|
nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
|
|
|
|
lru_pages);
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
|
2005-04-17 02:20:36 +04:00
|
|
|
total_scanned += sc.nr_scanned;
|
2010-03-06 00:41:55 +03:00
|
|
|
if (zone->all_unreclaimable)
|
2005-04-17 02:20:36 +04:00
|
|
|
continue;
|
2010-03-06 00:41:55 +03:00
|
|
|
if (nr_slab == 0 &&
|
|
|
|
zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
|
|
|
|
zone->all_unreclaimable = 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* If we've done a decent amount of scanning and
|
|
|
|
* the reclaim ratio is low, start doing writepage
|
|
|
|
* even in laptop mode
|
|
|
|
*/
|
|
|
|
if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
|
2005-04-17 02:20:36 +04:00
|
|
|
sc.may_writepage = 1;
|
2009-12-15 04:58:55 +03:00
|
|
|
|
2010-03-06 00:41:45 +03:00
|
|
|
if (!zone_watermark_ok(zone, order,
|
|
|
|
high_wmark_pages(zone), end_zone, 0)) {
|
|
|
|
all_zones_ok = 0;
|
|
|
|
/*
|
|
|
|
* We are still under min water mark. This
|
|
|
|
* means that we have a GFP_ATOMIC allocation
|
|
|
|
* failure risk. Hurry up!
|
|
|
|
*/
|
|
|
|
if (!zone_watermark_ok(zone, order,
|
|
|
|
min_wmark_pages(zone), end_zone, 0))
|
|
|
|
has_under_min_watermark_zone = 1;
|
|
|
|
}
|
2009-12-15 04:58:55 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
if (all_zones_ok)
|
|
|
|
break; /* kswapd: all done */
|
|
|
|
/*
|
|
|
|
* OK, kswapd is getting into trouble. Take a nap, then take
|
|
|
|
* another pass across the zones.
|
|
|
|
*/
|
2009-12-15 04:58:55 +03:00
|
|
|
if (total_scanned && (priority < DEF_PRIORITY - 2)) {
|
|
|
|
if (has_under_min_watermark_zone)
|
|
|
|
count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
|
|
|
|
else
|
|
|
|
congestion_wait(BLK_RW_ASYNC, HZ/10);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We do this so kswapd doesn't build up large priorities for
|
|
|
|
* example when it is freeing in parallel with allocators. It
|
|
|
|
* matches the direct reclaim path behaviour in terms of impact
|
|
|
|
* on zone->*_priority.
|
|
|
|
*/
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
out:
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
/*
|
|
|
|
* Note within each zone the priority level at which this zone was
|
|
|
|
* brought into a happy state. So that the next thread which scans this
|
|
|
|
* zone will start out at that priority level.
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
for (i = 0; i < pgdat->nr_zones; i++) {
|
|
|
|
struct zone *zone = pgdat->node_zones + i;
|
|
|
|
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
zone->prev_priority = temp_priority[i];
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
if (!all_zones_ok) {
|
|
|
|
cond_resched();
|
[PATCH] swsusp: Improve handling of highmem
Currently swsusp saves the contents of highmem pages by copying them to the
normal zone which is quite inefficient (eg. it requires two normal pages
to be used for saving one highmem page). This may be improved by using
highmem for saving the contents of saveable highmem pages.
Namely, during the suspend phase of the suspend-resume cycle we try to
allocate as many free highmem pages as there are saveable highmem pages.
If there are not enough highmem image pages to store the contents of all of
the saveable highmem pages, some of them will be stored in the "normal"
memory. Next, we allocate as many free "normal" pages as needed to store
the (remaining) image data. We use a memory bitmap to mark the allocated
free pages (ie. highmem as well as "normal" image pages).
Now, we use another memory bitmap to mark all of the saveable pages
(highmem as well as "normal") and the contents of the saveable pages are
copied into the image pages. Then, the second bitmap is used to save the
pfns corresponding to the saveable pages and the first one is used to save
their data.
During the resume phase the pfns of the pages that were saveable during the
suspend are loaded from the image and used to mark the "unsafe" page
frames. Next, we try to allocate as many free highmem page frames as to
load all of the image data that had been in the highmem before the suspend
and we allocate so many free "normal" page frames that the total number of
allocated free pages (highmem and "normal") is equal to the size of the
image. While doing this we have to make sure that there will be some extra
free "normal" and "safe" page frames for two lists of PBEs constructed
later.
Now, the image data are loaded, if possible, into their "original" page
frames. The image data that cannot be written into their "original" page
frames are loaded into "safe" page frames and their "original" kernel
virtual addresses, as well as the addresses of the "safe" pages containing
their copies, are stored in one of two lists of PBEs.
One list of PBEs is for the copies of "normal" suspend pages (ie. "normal"
pages that were saveable during the suspend) and it is used in the same way
as previously (ie. by the architecture-dependent parts of swsusp). The
other list of PBEs is for the copies of highmem suspend pages. The pages
in this list are restored (in a reversible way) right before the
arch-dependent code is called.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-07 07:34:18 +03:00
|
|
|
|
|
|
|
try_to_freeze();
|
|
|
|
|
2009-01-07 01:40:33 +03:00
|
|
|
/*
|
|
|
|
* Fragmentation may mean that the system cannot be
|
|
|
|
* rebalanced for high-order allocations in all zones.
|
|
|
|
* At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
|
|
|
|
* it means the zones have been fully scanned and are still
|
|
|
|
* not balanced. For high-order allocations, there is
|
|
|
|
* little point trying all over again as kswapd may
|
|
|
|
* infinite loop.
|
|
|
|
*
|
|
|
|
* Instead, recheck all watermarks at order-0 as they
|
|
|
|
* are the most important. If watermarks are ok, kswapd will go
|
|
|
|
* back to sleep. High-order users can still perform direct
|
|
|
|
* reclaim if they wish.
|
|
|
|
*/
|
|
|
|
if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
|
|
|
|
order = sc.order = 0;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
goto loop_again;
|
|
|
|
}
|
|
|
|
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
return sc.nr_reclaimed;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The background pageout daemon, started as a kernel thread
|
2008-10-19 07:26:32 +04:00
|
|
|
* from the init process.
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* This basically trickles out pages so that we have _some_
|
|
|
|
* free memory available even if there is no other activity
|
|
|
|
* that frees anything up. This is needed for things like routing
|
|
|
|
* etc, where we otherwise might have all activity going on in
|
|
|
|
* asynchronous contexts that cannot page things out.
|
|
|
|
*
|
|
|
|
* If there are applications that are active memory-allocators
|
|
|
|
* (most normal use), this basically shouldn't matter.
|
|
|
|
*/
|
|
|
|
static int kswapd(void *p)
|
|
|
|
{
|
|
|
|
unsigned long order;
|
|
|
|
pg_data_t *pgdat = (pg_data_t*)p;
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
struct reclaim_state reclaim_state = {
|
|
|
|
.reclaimed_slab = 0,
|
|
|
|
};
|
2009-03-13 07:19:46 +03:00
|
|
|
const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
lockdep: annotate reclaim context (__GFP_NOFS)
Here is another version, with the incremental patch rolled up, and
added reclaim context annotation to kswapd, and allocation tracing
to slab allocators (which may only ever reach the page allocator
in rare cases, so it is good to put annotations here too).
Haven't tested this version as such, but it should be getting closer
to merge worthy ;)
--
After noticing some code in mm/filemap.c accidentally perform a __GFP_FS
allocation when it should not have been, I thought it might be a good idea to
try to catch this kind of thing with lockdep.
I coded up a little idea that seems to work. Unfortunately the system has to
actually be in __GFP_FS page reclaim, then take the lock, before it will mark
it. But at least that might still be some orders of magnitude more common
(and more debuggable) than an actual deadlock condition, so we have some
improvement I hope (the concept is no less complete than discovery of a lock's
interrupt contexts).
I guess we could even do the same thing with __GFP_IO (normal reclaim), and
even GFP_NOIO locks too... but filesystems will have the most locks and fiddly
code paths, so let's start there and see how it goes.
It *seems* to work. I did a quick test.
=================================
[ INFO: inconsistent lock state ]
2.6.28-rc6-00007-ged31348-dirty #26
---------------------------------
inconsistent {in-reclaim-W} -> {ov-reclaim-W} usage.
modprobe/8526 [HC0[0]:SC0[0]:HE1:SE1] takes:
(testlock){--..}, at: [<ffffffffa0020055>] brd_init+0x55/0x216 [brd]
{in-reclaim-W} state was registered at:
[<ffffffff80267bdb>] __lock_acquire+0x75b/0x1a60
[<ffffffff80268f71>] lock_acquire+0x91/0xc0
[<ffffffff8070f0e1>] mutex_lock_nested+0xb1/0x310
[<ffffffffa002002b>] brd_init+0x2b/0x216 [brd]
[<ffffffff8020903b>] _stext+0x3b/0x170
[<ffffffff80272ebf>] sys_init_module+0xaf/0x1e0
[<ffffffff8020c3fb>] system_call_fastpath+0x16/0x1b
[<ffffffffffffffff>] 0xffffffffffffffff
irq event stamp: 3929
hardirqs last enabled at (3929): [<ffffffff8070f2b5>] mutex_lock_nested+0x285/0x310
hardirqs last disabled at (3928): [<ffffffff8070f089>] mutex_lock_nested+0x59/0x310
softirqs last enabled at (3732): [<ffffffff8061f623>] sk_filter+0x83/0xe0
softirqs last disabled at (3730): [<ffffffff8061f5b6>] sk_filter+0x16/0xe0
other info that might help us debug this:
1 lock held by modprobe/8526:
#0: (testlock){--..}, at: [<ffffffffa0020055>] brd_init+0x55/0x216 [brd]
stack backtrace:
Pid: 8526, comm: modprobe Not tainted 2.6.28-rc6-00007-ged31348-dirty #26
Call Trace:
[<ffffffff80265483>] print_usage_bug+0x193/0x1d0
[<ffffffff80266530>] mark_lock+0xaf0/0xca0
[<ffffffff80266735>] mark_held_locks+0x55/0xc0
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffff802667ca>] trace_reclaim_fs+0x2a/0x60
[<ffffffff80285005>] __alloc_pages_internal+0x475/0x580
[<ffffffff8070f29e>] ? mutex_lock_nested+0x26e/0x310
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffffa002006a>] brd_init+0x6a/0x216 [brd]
[<ffffffffa0020000>] ? brd_init+0x0/0x216 [brd]
[<ffffffff8020903b>] _stext+0x3b/0x170
[<ffffffff8070f8b9>] ? mutex_unlock+0x9/0x10
[<ffffffff8070f83d>] ? __mutex_unlock_slowpath+0x10d/0x180
[<ffffffff802669ec>] ? trace_hardirqs_on_caller+0x12c/0x190
[<ffffffff80272ebf>] sys_init_module+0xaf/0x1e0
[<ffffffff8020c3fb>] system_call_fastpath+0x16/0x1b
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-01-21 10:12:39 +03:00
|
|
|
lockdep_set_current_reclaim_state(GFP_KERNEL);
|
|
|
|
|
2009-01-01 02:42:29 +03:00
|
|
|
if (!cpumask_empty(cpumask))
|
2008-04-05 05:11:10 +04:00
|
|
|
set_cpus_allowed_ptr(tsk, cpumask);
|
2005-04-17 02:20:36 +04:00
|
|
|
current->reclaim_state = &reclaim_state;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the memory management that we're a "memory allocator",
|
|
|
|
* and that if we need more memory we should get access to it
|
|
|
|
* regardless (see "__alloc_pages()"). "kswapd" should
|
|
|
|
* never get caught in the normal page freeing logic.
|
|
|
|
*
|
|
|
|
* (Kswapd normally doesn't need memory anyway, but sometimes
|
|
|
|
* you need a small amount of memory in order to be able to
|
|
|
|
* page out something else, and this flag essentially protects
|
|
|
|
* us from recursively trying to free more memory as we're
|
|
|
|
* trying to free the first piece of memory in the first place).
|
|
|
|
*/
|
2006-01-08 12:00:47 +03:00
|
|
|
tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
|
2007-07-17 15:03:35 +04:00
|
|
|
set_freezable();
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
order = 0;
|
|
|
|
for ( ; ; ) {
|
|
|
|
unsigned long new_order;
|
2009-12-15 04:58:33 +03:00
|
|
|
int ret;
|
2005-06-25 10:13:50 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
|
|
|
|
new_order = pgdat->kswapd_max_order;
|
|
|
|
pgdat->kswapd_max_order = 0;
|
|
|
|
if (order < new_order) {
|
|
|
|
/*
|
|
|
|
* Don't sleep if someone wants a larger 'order'
|
|
|
|
* allocation
|
|
|
|
*/
|
|
|
|
order = new_order;
|
|
|
|
} else {
|
2009-12-15 04:58:53 +03:00
|
|
|
if (!freezing(current) && !kthread_should_stop()) {
|
|
|
|
long remaining = 0;
|
|
|
|
|
|
|
|
/* Try to sleep for a short interval */
|
2009-12-15 04:58:55 +03:00
|
|
|
if (!sleeping_prematurely(pgdat, order, remaining)) {
|
2009-12-15 04:58:53 +03:00
|
|
|
remaining = schedule_timeout(HZ/10);
|
|
|
|
finish_wait(&pgdat->kswapd_wait, &wait);
|
|
|
|
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After a short sleep, check if it was a
|
|
|
|
* premature sleep. If not, then go fully
|
|
|
|
* to sleep until explicitly woken up
|
|
|
|
*/
|
2009-12-15 04:58:55 +03:00
|
|
|
if (!sleeping_prematurely(pgdat, order, remaining))
|
2009-12-15 04:58:53 +03:00
|
|
|
schedule();
|
|
|
|
else {
|
|
|
|
if (remaining)
|
2009-12-15 04:58:55 +03:00
|
|
|
count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
|
2009-12-15 04:58:53 +03:00
|
|
|
else
|
2009-12-15 04:58:55 +03:00
|
|
|
count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
|
2009-12-15 04:58:53 +03:00
|
|
|
}
|
|
|
|
}
|
2007-05-07 01:50:48 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
order = pgdat->kswapd_max_order;
|
|
|
|
}
|
|
|
|
finish_wait(&pgdat->kswapd_wait, &wait);
|
|
|
|
|
2009-12-15 04:58:33 +03:00
|
|
|
ret = try_to_freeze();
|
|
|
|
if (kthread_should_stop())
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can speed up thawing tasks if we don't call balance_pgdat
|
|
|
|
* after returning from the refrigerator
|
|
|
|
*/
|
|
|
|
if (!ret)
|
2007-05-07 01:50:48 +04:00
|
|
|
balance_pgdat(pgdat, order);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A zone is low on free memory, so wake its kswapd task to service it.
|
|
|
|
*/
|
|
|
|
void wakeup_kswapd(struct zone *zone, int order)
|
|
|
|
{
|
|
|
|
pg_data_t *pgdat;
|
|
|
|
|
2006-01-06 11:11:15 +03:00
|
|
|
if (!populated_zone(zone))
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
|
|
|
|
|
|
|
pgdat = zone->zone_pgdat;
|
2009-06-17 02:32:12 +04:00
|
|
|
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
|
|
|
if (pgdat->kswapd_max_order < order)
|
|
|
|
pgdat->kswapd_max_order = order;
|
[PATCH] cpuset: rework cpuset_zone_allowed api
Elaborate the API for calling cpuset_zone_allowed(), so that users have to
explicitly choose between the two variants:
cpuset_zone_allowed_hardwall()
cpuset_zone_allowed_softwall()
Until now, whether or not you got the hardwall flavor depended solely on
whether or not you or'd in the __GFP_HARDWALL gfp flag to the gfp_mask
argument.
If you didn't specify __GFP_HARDWALL, you implicitly got the softwall
version.
Unfortunately, this meant that users would end up with the softwall version
without thinking about it. Since only the softwall version might sleep,
this led to bugs with possible sleeping in interrupt context on more than
one occassion.
The hardwall version requires that the current tasks mems_allowed allows
the node of the specified zone (or that you're in interrupt or that
__GFP_THISNODE is set or that you're on a one cpuset system.)
The softwall version, depending on the gfp_mask, might allow a node if it
was allowed in the nearest enclusing cpuset marked mem_exclusive (which
requires taking the cpuset lock 'callback_mutex' to evaluate.)
This patch removes the cpuset_zone_allowed() call, and forces the caller to
explicitly choose between the hardwall and the softwall case.
If the caller wants the gfp_mask to determine this choice, they should (1)
be sure they can sleep or that __GFP_HARDWALL is set, and (2) invoke the
cpuset_zone_allowed_softwall() routine.
This adds another 100 or 200 bytes to the kernel text space, due to the few
lines of nearly duplicate code at the top of both cpuset_zone_allowed_*
routines. It should save a few instructions executed for the calls that
turned into calls of cpuset_zone_allowed_hardwall, thanks to not having to
set (before the call) then check (within the call) the __GFP_HARDWALL flag.
For the most critical call, from get_page_from_freelist(), the same
instructions are executed as before -- the old cpuset_zone_allowed()
routine it used to call is the same code as the
cpuset_zone_allowed_softwall() routine that it calls now.
Not a perfect win, but seems worth it, to reduce this chance of hitting a
sleeping with irq off complaint again.
Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-13 11:34:25 +03:00
|
|
|
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
2005-09-13 12:25:07 +04:00
|
|
|
if (!waitqueue_active(&pgdat->kswapd_wait))
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
2005-09-13 12:25:07 +04:00
|
|
|
wake_up_interruptible(&pgdat->kswapd_wait);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-09-22 04:01:42 +04:00
|
|
|
/*
|
|
|
|
* The reclaimable count would be mostly accurate.
|
|
|
|
* The less reclaimable pages may be
|
|
|
|
* - mlocked pages, which will be moved to unevictable list when encountered
|
|
|
|
* - mapped pages, which may require several travels to be reclaimed
|
|
|
|
* - dirty pages, which is not "instantly" reclaimable
|
|
|
|
*/
|
|
|
|
unsigned long global_reclaimable_pages(void)
|
2008-10-19 07:26:32 +04:00
|
|
|
{
|
2009-09-22 04:01:42 +04:00
|
|
|
int nr;
|
|
|
|
|
|
|
|
nr = global_page_state(NR_ACTIVE_FILE) +
|
|
|
|
global_page_state(NR_INACTIVE_FILE);
|
|
|
|
|
|
|
|
if (nr_swap_pages > 0)
|
|
|
|
nr += global_page_state(NR_ACTIVE_ANON) +
|
|
|
|
global_page_state(NR_INACTIVE_ANON);
|
|
|
|
|
|
|
|
return nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long zone_reclaimable_pages(struct zone *zone)
|
|
|
|
{
|
|
|
|
int nr;
|
|
|
|
|
|
|
|
nr = zone_page_state(zone, NR_ACTIVE_FILE) +
|
|
|
|
zone_page_state(zone, NR_INACTIVE_FILE);
|
|
|
|
|
|
|
|
if (nr_swap_pages > 0)
|
|
|
|
nr += zone_page_state(zone, NR_ACTIVE_ANON) +
|
|
|
|
zone_page_state(zone, NR_INACTIVE_ANON);
|
|
|
|
|
|
|
|
return nr;
|
2008-10-19 07:26:32 +04:00
|
|
|
}
|
|
|
|
|
2009-05-25 00:16:31 +04:00
|
|
|
#ifdef CONFIG_HIBERNATION
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
|
2006-06-23 13:03:18 +04:00
|
|
|
* freed pages.
|
|
|
|
*
|
|
|
|
* Rather than trying to age LRUs the aim is to preserve the overall
|
|
|
|
* LRU order by reclaiming preferentially
|
|
|
|
* inactive > active > active referenced > active mapped
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-06-23 13:03:18 +04:00
|
|
|
struct reclaim_state reclaim_state;
|
|
|
|
struct scan_control sc = {
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
.gfp_mask = GFP_HIGHUSER_MOVABLE,
|
|
|
|
.may_swap = 1,
|
|
|
|
.may_unmap = 1,
|
2006-06-23 13:03:18 +04:00
|
|
|
.may_writepage = 1,
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
.nr_to_reclaim = nr_to_reclaim,
|
|
|
|
.hibernation_mode = 1,
|
|
|
|
.swappiness = vm_swappiness,
|
|
|
|
.order = 0,
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
|
|
|
|
struct task_struct *p = current;
|
|
|
|
unsigned long nr_reclaimed;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
p->flags |= PF_MEMALLOC;
|
|
|
|
lockdep_set_current_reclaim_state(sc.gfp_mask);
|
|
|
|
reclaim_state.reclaimed_slab = 0;
|
|
|
|
p->reclaim_state = &reclaim_state;
|
2006-06-23 13:03:18 +04:00
|
|
|
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
|
2009-04-01 02:19:34 +04:00
|
|
|
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
p->reclaim_state = NULL;
|
|
|
|
lockdep_clear_current_reclaim_state();
|
|
|
|
p->flags &= ~PF_MEMALLOC;
|
2006-06-23 13:03:18 +04:00
|
|
|
|
vmscan: kill hibernation specific reclaim logic and unify it
shrink_all_zone() was introduced by commit d6277db4ab (swsusp: rework
memory shrinker) for hibernate performance improvement. and
sc.swap_cluster_max was introduced by commit a06fe4d307 (Speed freeing
memory for suspend).
commit a06fe4d307 said
Without the patch:
Freed 14600 pages in 1749 jiffies = 32.61 MB/s (Anomolous!)
Freed 88563 pages in 14719 jiffies = 23.50 MB/s
Freed 205734 pages in 32389 jiffies = 24.81 MB/s
With the patch:
Freed 68252 pages in 496 jiffies = 537.52 MB/s
Freed 116464 pages in 569 jiffies = 798.54 MB/s
Freed 209699 pages in 705 jiffies = 1161.89 MB/s
At that time, their patch was pretty worth. However, Modern Hardware
trend and recent VM improvement broke its worth. From several reason, I
think we should remove shrink_all_zones() at all.
detail:
1) Old days, shrink_zone()'s slowness was mainly caused by stupid io-throttle
at no i/o congestion.
but current shrink_zone() is sane, not slow.
2) shrink_all_zone() try to shrink all pages at a time. but it doesn't works
fine on numa system.
example)
System has 4GB memory and each node have 2GB. and hibernate need 1GB.
optimal)
steal 500MB from each node.
shrink_all_zones)
steal 1GB from node-0.
Oh, Cache balancing logic was broken. ;)
Unfortunately, Desktop system moved ahead NUMA at nowadays.
(Side note, if hibernate require 2GB, shrink_all_zones() never success
on above machine)
3) if the node has several I/O flighting pages, shrink_all_zones() makes
pretty bad result.
schenario) hibernate need 1GB
1) shrink_all_zones() try to reclaim 1GB from Node-0
2) but it only reclaimed 990MB
3) stupidly, shrink_all_zones() try to reclaim 1GB from Node-1
4) it reclaimed 990MB
Oh, well. it reclaimed twice much than required.
In the other hand, current shrink_zone() has sane baling out logic.
then, it doesn't make overkill reclaim. then, we lost shrink_zones()'s risk.
4) SplitLRU VM always keep active/inactive ratio very carefully. inactive list only
shrinking break its assumption. it makes unnecessary OOM risk. it obviously suboptimal.
Now, shrink_all_memory() is only the wrapper function of do_try_to_free_pages().
it bring good reviewability and debuggability, and solve above problems.
side note: Reclaim logic unificication makes two good side effect.
- Fix recursive reclaim bug on shrink_all_memory().
it did forgot to use PF_MEMALLOC. it mean the system be able to stuck into deadlock.
- Now, shrink_all_memory() got lockdep awareness. it bring good debuggability.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:12 +03:00
|
|
|
return nr_reclaimed;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2009-05-25 00:16:31 +04:00
|
|
|
#endif /* CONFIG_HIBERNATION */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* It's optimal to keep kswapds on the same CPUs as their memory, but
|
|
|
|
not required for correctness. So if the last cpu in a node goes
|
|
|
|
away, we get changed to run anywhere: as the first one comes back,
|
|
|
|
restore their cpu bindings. */
|
2006-06-27 13:54:07 +04:00
|
|
|
static int __devinit cpu_callback(struct notifier_block *nfb,
|
2006-03-22 11:08:19 +03:00
|
|
|
unsigned long action, void *hcpu)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-10-16 12:25:40 +04:00
|
|
|
int nid;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-05-09 13:35:10 +04:00
|
|
|
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
|
2007-10-16 12:25:40 +04:00
|
|
|
for_each_node_state(nid, N_HIGH_MEMORY) {
|
2008-04-05 05:11:10 +04:00
|
|
|
pg_data_t *pgdat = NODE_DATA(nid);
|
2009-03-13 07:19:46 +03:00
|
|
|
const struct cpumask *mask;
|
|
|
|
|
|
|
|
mask = cpumask_of_node(pgdat->node_id);
|
2008-04-05 05:11:10 +04:00
|
|
|
|
2009-01-01 02:42:24 +03:00
|
|
|
if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
|
2005-04-17 02:20:36 +04:00
|
|
|
/* One of our CPUs online: restore mask */
|
2008-04-05 05:11:10 +04:00
|
|
|
set_cpus_allowed_ptr(pgdat->kswapd, mask);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
2006-06-27 13:53:33 +04:00
|
|
|
/*
|
|
|
|
* This kswapd start function will be called by init and node-hot-add.
|
|
|
|
* On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
|
|
|
|
*/
|
|
|
|
int kswapd_run(int nid)
|
|
|
|
{
|
|
|
|
pg_data_t *pgdat = NODE_DATA(nid);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (pgdat->kswapd)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
|
|
|
|
if (IS_ERR(pgdat->kswapd)) {
|
|
|
|
/* failure at boot is fatal */
|
|
|
|
BUG_ON(system_state == SYSTEM_BOOTING);
|
|
|
|
printk("Failed to start kswapd on node %d\n",nid);
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-12-15 04:58:33 +03:00
|
|
|
/*
|
|
|
|
* Called by memory hotplug when all memory in a node is offlined.
|
|
|
|
*/
|
|
|
|
void kswapd_stop(int nid)
|
|
|
|
{
|
|
|
|
struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
|
|
|
|
|
|
|
|
if (kswapd)
|
|
|
|
kthread_stop(kswapd);
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
static int __init kswapd_init(void)
|
|
|
|
{
|
2006-06-27 13:53:33 +04:00
|
|
|
int nid;
|
2006-03-22 11:08:19 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
swap_setup();
|
2007-10-16 12:25:31 +04:00
|
|
|
for_each_node_state(nid, N_HIGH_MEMORY)
|
2006-06-27 13:53:33 +04:00
|
|
|
kswapd_run(nid);
|
2005-04-17 02:20:36 +04:00
|
|
|
hotcpu_notifier(cpu_callback, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(kswapd_init)
|
2006-01-19 04:42:31 +03:00
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
/*
|
|
|
|
* Zone reclaim mode
|
|
|
|
*
|
|
|
|
* If non-zero call zone_reclaim when the number of free pages falls below
|
|
|
|
* the watermarks.
|
|
|
|
*/
|
|
|
|
int zone_reclaim_mode __read_mostly;
|
|
|
|
|
2006-02-01 14:05:34 +03:00
|
|
|
#define RECLAIM_OFF 0
|
2008-07-30 09:33:41 +04:00
|
|
|
#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
|
2006-02-01 14:05:34 +03:00
|
|
|
#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
|
|
|
|
#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
|
|
|
|
|
2006-02-01 14:05:32 +03:00
|
|
|
/*
|
|
|
|
* Priority for ZONE_RECLAIM. This determines the fraction of pages
|
|
|
|
* of a node considered for each zone_reclaim. 4 scans 1/16th of
|
|
|
|
* a zone.
|
|
|
|
*/
|
|
|
|
#define ZONE_RECLAIM_PRIORITY 4
|
|
|
|
|
2006-07-03 11:24:13 +04:00
|
|
|
/*
|
|
|
|
* Percentage of pages in a zone that must be unmapped for zone_reclaim to
|
|
|
|
* occur.
|
|
|
|
*/
|
|
|
|
int sysctl_min_unmapped_ratio = 1;
|
|
|
|
|
2006-09-26 10:31:52 +04:00
|
|
|
/*
|
|
|
|
* If the number of slab pages in a zone grows beyond this percentage then
|
|
|
|
* slab reclaim needs to occur.
|
|
|
|
*/
|
|
|
|
int sysctl_min_slab_ratio = 5;
|
|
|
|
|
vmscan: properly account for the number of page cache pages zone_reclaim() can reclaim
A bug was brought to my attention against a distro kernel but it affects
mainline and I believe problems like this have been reported in various
guises on the mailing lists although I don't have specific examples at the
moment.
The reported problem was that malloc() stalled for a long time (minutes in
some cases) if a large tmpfs mount was occupying a large percentage of
memory overall. The pages did not get cleaned or reclaimed by
zone_reclaim() because the zone_reclaim_mode was unsuitable, but the lists
are uselessly scanned frequencly making the CPU spin at near 100%.
This patchset intends to address that bug and bring the behaviour of
zone_reclaim() more in line with expectations which were noticed during
investigation. It is based on top of mmotm and takes advantage of
Kosaki's work with respect to zone_reclaim().
Patch 1 fixes the heuristics that zone_reclaim() uses to determine if the
scan should go ahead. The broken heuristic is what was causing the
malloc() stall as it uselessly scanned the LRU constantly. Currently,
zone_reclaim is assuming zone_reclaim_mode is 1 and historically it
could not deal with tmpfs pages at all. This fixes up the heuristic so
that an unnecessary scan is more likely to be correctly avoided.
Patch 2 notes that zone_reclaim() returning a failure automatically means
the zone is marked full. This is not always true. It could have
failed because the GFP mask or zone_reclaim_mode were unsuitable.
Patch 3 introduces a counter zreclaim_failed that will increment each
time the zone_reclaim scan-avoidance heuristics fail. If that
counter is rapidly increasing, then zone_reclaim_mode should be
set to 0 as a temporarily resolution and a bug reported because
the scan-avoidance heuristic is still broken.
This patch:
On NUMA machines, the administrator can configure zone_reclaim_mode that
is a more targetted form of direct reclaim. On machines with large NUMA
distances for example, a zone_reclaim_mode defaults to 1 meaning that
clean unmapped pages will be reclaimed if the zone watermarks are not
being met.
There is a heuristic that determines if the scan is worthwhile but the
problem is that the heuristic is not being properly applied and is
basically assuming zone_reclaim_mode is 1 if it is enabled. The lack of
proper detection can manfiest as high CPU usage as the LRU list is scanned
uselessly.
Historically, once enabled it was depending on NR_FILE_PAGES which may
include swapcache pages that the reclaim_mode cannot deal with. Patch
vmscan-change-the-number-of-the-unmapped-files-in-zone-reclaim.patch by
Kosaki Motohiro noted that zone_page_state(zone, NR_FILE_PAGES) included
pages that were not file-backed such as swapcache and made a calculation
based on the inactive, active and mapped files. This is far superior when
zone_reclaim==1 but if RECLAIM_SWAP is set, then NR_FILE_PAGES is a
reasonable starting figure.
This patch alters how zone_reclaim() works out how many pages it might be
able to reclaim given the current reclaim_mode. If RECLAIM_SWAP is set in
the reclaim_mode it will either consider NR_FILE_PAGES as potential
candidates or else use NR_{IN}ACTIVE}_PAGES-NR_FILE_MAPPED to discount
swapcache and other non-file-backed pages. If RECLAIM_WRITE is not set,
then NR_FILE_DIRTY number of pages are not candidates. If RECLAIM_SWAP is
not set, then NR_FILE_MAPPED are not.
[kosaki.motohiro@jp.fujitsu.com: Estimate unmapped pages minus tmpfs pages]
[fengguang.wu@intel.com: Fix underflow problem in Kosaki's estimate]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-17 02:33:20 +04:00
|
|
|
static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
|
|
|
|
{
|
|
|
|
unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
|
|
|
|
unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
|
|
|
|
zone_page_state(zone, NR_ACTIVE_FILE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It's possible for there to be more file mapped pages than
|
|
|
|
* accounted for by the pages on the file LRU lists because
|
|
|
|
* tmpfs pages accounted for as ANON can also be FILE_MAPPED
|
|
|
|
*/
|
|
|
|
return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Work out how many page cache pages we can reclaim in this reclaim_mode */
|
|
|
|
static long zone_pagecache_reclaimable(struct zone *zone)
|
|
|
|
{
|
|
|
|
long nr_pagecache_reclaimable;
|
|
|
|
long delta = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If RECLAIM_SWAP is set, then all file pages are considered
|
|
|
|
* potentially reclaimable. Otherwise, we have to worry about
|
|
|
|
* pages like swapcache and zone_unmapped_file_pages() provides
|
|
|
|
* a better estimate
|
|
|
|
*/
|
|
|
|
if (zone_reclaim_mode & RECLAIM_SWAP)
|
|
|
|
nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
|
|
|
|
else
|
|
|
|
nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
|
|
|
|
|
|
|
|
/* If we can't clean pages, remove dirty pages from consideration */
|
|
|
|
if (!(zone_reclaim_mode & RECLAIM_WRITE))
|
|
|
|
delta += zone_page_state(zone, NR_FILE_DIRTY);
|
|
|
|
|
|
|
|
/* Watch for any possible underflows due to delta */
|
|
|
|
if (unlikely(delta > nr_pagecache_reclaimable))
|
|
|
|
delta = nr_pagecache_reclaimable;
|
|
|
|
|
|
|
|
return nr_pagecache_reclaimable - delta;
|
|
|
|
}
|
|
|
|
|
2006-01-19 04:42:31 +03:00
|
|
|
/*
|
|
|
|
* Try to free up some pages from this zone through reclaim.
|
|
|
|
*/
|
2006-03-22 11:08:18 +03:00
|
|
|
static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
2006-01-19 04:42:31 +03:00
|
|
|
{
|
2006-03-22 11:08:22 +03:00
|
|
|
/* Minimum pages needed in order to stay on node */
|
2006-03-22 11:08:19 +03:00
|
|
|
const unsigned long nr_pages = 1 << order;
|
2006-01-19 04:42:31 +03:00
|
|
|
struct task_struct *p = current;
|
|
|
|
struct reclaim_state reclaim_state;
|
2006-03-22 11:08:18 +03:00
|
|
|
int priority;
|
2006-03-22 11:08:18 +03:00
|
|
|
struct scan_control sc = {
|
|
|
|
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
|
2009-04-01 02:19:30 +04:00
|
|
|
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
|
2009-04-21 23:24:57 +04:00
|
|
|
.may_swap = 1,
|
2009-12-15 04:59:10 +03:00
|
|
|
.nr_to_reclaim = max_t(unsigned long, nr_pages,
|
|
|
|
SWAP_CLUSTER_MAX),
|
2006-03-22 11:08:18 +03:00
|
|
|
.gfp_mask = gfp_mask,
|
2006-06-23 13:03:18 +04:00
|
|
|
.swappiness = vm_swappiness,
|
2009-04-01 02:19:38 +04:00
|
|
|
.order = order,
|
2006-03-22 11:08:18 +03:00
|
|
|
};
|
2006-09-26 10:31:53 +04:00
|
|
|
unsigned long slab_reclaimable;
|
2006-01-19 04:42:31 +03:00
|
|
|
|
|
|
|
disable_swap_token();
|
|
|
|
cond_resched();
|
2006-02-25 00:04:22 +03:00
|
|
|
/*
|
|
|
|
* We need to be able to allocate from the reserves for RECLAIM_SWAP
|
|
|
|
* and we also need to be able to write out pages for RECLAIM_WRITE
|
|
|
|
* and RECLAIM_SWAP.
|
|
|
|
*/
|
|
|
|
p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
|
2010-03-06 00:41:47 +03:00
|
|
|
lockdep_set_current_reclaim_state(gfp_mask);
|
2006-01-19 04:42:31 +03:00
|
|
|
reclaim_state.reclaimed_slab = 0;
|
|
|
|
p->reclaim_state = &reclaim_state;
|
2006-02-01 14:05:29 +03:00
|
|
|
|
vmscan: properly account for the number of page cache pages zone_reclaim() can reclaim
A bug was brought to my attention against a distro kernel but it affects
mainline and I believe problems like this have been reported in various
guises on the mailing lists although I don't have specific examples at the
moment.
The reported problem was that malloc() stalled for a long time (minutes in
some cases) if a large tmpfs mount was occupying a large percentage of
memory overall. The pages did not get cleaned or reclaimed by
zone_reclaim() because the zone_reclaim_mode was unsuitable, but the lists
are uselessly scanned frequencly making the CPU spin at near 100%.
This patchset intends to address that bug and bring the behaviour of
zone_reclaim() more in line with expectations which were noticed during
investigation. It is based on top of mmotm and takes advantage of
Kosaki's work with respect to zone_reclaim().
Patch 1 fixes the heuristics that zone_reclaim() uses to determine if the
scan should go ahead. The broken heuristic is what was causing the
malloc() stall as it uselessly scanned the LRU constantly. Currently,
zone_reclaim is assuming zone_reclaim_mode is 1 and historically it
could not deal with tmpfs pages at all. This fixes up the heuristic so
that an unnecessary scan is more likely to be correctly avoided.
Patch 2 notes that zone_reclaim() returning a failure automatically means
the zone is marked full. This is not always true. It could have
failed because the GFP mask or zone_reclaim_mode were unsuitable.
Patch 3 introduces a counter zreclaim_failed that will increment each
time the zone_reclaim scan-avoidance heuristics fail. If that
counter is rapidly increasing, then zone_reclaim_mode should be
set to 0 as a temporarily resolution and a bug reported because
the scan-avoidance heuristic is still broken.
This patch:
On NUMA machines, the administrator can configure zone_reclaim_mode that
is a more targetted form of direct reclaim. On machines with large NUMA
distances for example, a zone_reclaim_mode defaults to 1 meaning that
clean unmapped pages will be reclaimed if the zone watermarks are not
being met.
There is a heuristic that determines if the scan is worthwhile but the
problem is that the heuristic is not being properly applied and is
basically assuming zone_reclaim_mode is 1 if it is enabled. The lack of
proper detection can manfiest as high CPU usage as the LRU list is scanned
uselessly.
Historically, once enabled it was depending on NR_FILE_PAGES which may
include swapcache pages that the reclaim_mode cannot deal with. Patch
vmscan-change-the-number-of-the-unmapped-files-in-zone-reclaim.patch by
Kosaki Motohiro noted that zone_page_state(zone, NR_FILE_PAGES) included
pages that were not file-backed such as swapcache and made a calculation
based on the inactive, active and mapped files. This is far superior when
zone_reclaim==1 but if RECLAIM_SWAP is set, then NR_FILE_PAGES is a
reasonable starting figure.
This patch alters how zone_reclaim() works out how many pages it might be
able to reclaim given the current reclaim_mode. If RECLAIM_SWAP is set in
the reclaim_mode it will either consider NR_FILE_PAGES as potential
candidates or else use NR_{IN}ACTIVE}_PAGES-NR_FILE_MAPPED to discount
swapcache and other non-file-backed pages. If RECLAIM_WRITE is not set,
then NR_FILE_DIRTY number of pages are not candidates. If RECLAIM_SWAP is
not set, then NR_FILE_MAPPED are not.
[kosaki.motohiro@jp.fujitsu.com: Estimate unmapped pages minus tmpfs pages]
[fengguang.wu@intel.com: Fix underflow problem in Kosaki's estimate]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-17 02:33:20 +04:00
|
|
|
if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
|
2006-09-26 10:31:52 +04:00
|
|
|
/*
|
|
|
|
* Free memory by calling shrink zone with increasing
|
|
|
|
* priorities until we have enough memory freed.
|
|
|
|
*/
|
|
|
|
priority = ZONE_RECLAIM_PRIORITY;
|
|
|
|
do {
|
[PATCH] vmscan: Fix temp_priority race
The temp_priority field in zone is racy, as we can walk through a reclaim
path, and just before we copy it into prev_priority, it can be overwritten
(say with DEF_PRIORITY) by another reclaimer.
The same bug is contained in both try_to_free_pages and balance_pgdat, but
it is fixed slightly differently. In balance_pgdat, we keep a separate
priority record per zone in a local array. In try_to_free_pages there is
no need to do this, as the priority level is the same for all zones that we
reclaim from.
Impact of this bug is that temp_priority is copied into prev_priority, and
setting this artificially high causes reclaimers to set distress
artificially low. They then fail to reclaim mapped pages, when they are,
in fact, under severe memory pressure (their priority may be as low as 0).
This causes the OOM killer to fire incorrectly.
From: Andrew Morton <akpm@osdl.org>
__zone_reclaim() isn't modifying zone->prev_priority. But zone->prev_priority
is used in the decision whether or not to bring mapped pages onto the inactive
list. Hence there's a risk here that __zone_reclaim() will fail because
zone->prev_priority ir large (ie: low urgency) and lots of mapped pages end up
stuck on the active list.
Fix that up by decreasing (ie making more urgent) zone->prev_priority as
__zone_reclaim() scans the zone's pages.
This bug perhaps explains why ZONE_RECLAIM_PRIORITY was created. It should be
possible to remove that now, and to just start out at DEF_PRIORITY?
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-10-28 21:38:24 +04:00
|
|
|
note_zone_scanning_priority(zone, priority);
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
shrink_zone(priority, zone, &sc);
|
2006-09-26 10:31:52 +04:00
|
|
|
priority--;
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
} while (priority >= 0 && sc.nr_reclaimed < nr_pages);
|
2006-09-26 10:31:52 +04:00
|
|
|
}
|
2006-02-01 14:05:29 +03:00
|
|
|
|
2006-09-26 10:31:53 +04:00
|
|
|
slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
|
|
|
|
if (slab_reclaimable > zone->min_slab_pages) {
|
2006-02-01 14:05:35 +03:00
|
|
|
/*
|
2006-03-22 11:08:22 +03:00
|
|
|
* shrink_slab() does not currently allow us to determine how
|
2006-09-26 10:31:52 +04:00
|
|
|
* many pages were freed in this zone. So we take the current
|
|
|
|
* number of slab pages and shake the slab until it is reduced
|
|
|
|
* by the same nr_pages that we used for reclaiming unmapped
|
|
|
|
* pages.
|
2006-02-01 14:05:35 +03:00
|
|
|
*
|
2006-09-26 10:31:52 +04:00
|
|
|
* Note that shrink_slab will free memory on all zones and may
|
|
|
|
* take a long time.
|
2006-02-01 14:05:35 +03:00
|
|
|
*/
|
2006-09-26 10:31:52 +04:00
|
|
|
while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
|
2006-09-26 10:31:53 +04:00
|
|
|
zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
|
|
|
|
slab_reclaimable - nr_pages)
|
2006-09-26 10:31:52 +04:00
|
|
|
;
|
2006-09-26 10:31:53 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Update nr_reclaimed by the number of slab pages we
|
|
|
|
* reclaimed from this zone.
|
|
|
|
*/
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
sc.nr_reclaimed += slab_reclaimable -
|
2006-09-26 10:31:53 +04:00
|
|
|
zone_page_state(zone, NR_SLAB_RECLAIMABLE);
|
2006-02-01 14:05:35 +03:00
|
|
|
}
|
|
|
|
|
2006-01-19 04:42:31 +03:00
|
|
|
p->reclaim_state = NULL;
|
2006-02-25 00:04:22 +03:00
|
|
|
current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
|
2010-03-06 00:41:47 +03:00
|
|
|
lockdep_clear_current_reclaim_state();
|
vmscan: bail out of direct reclaim after swap_cluster_max pages
When the VM is under pressure, it can happen that several direct reclaim
processes are in the pageout code simultaneously. It also happens that
the reclaiming processes run into mostly referenced, mapped and dirty
pages in the first round.
This results in multiple direct reclaim processes having a lower
pageout priority, which corresponds to a higher target of pages to
scan.
This in turn can result in each direct reclaim process freeing
many pages. Together, they can end up freeing way too many pages.
This kicks useful data out of memory (in some cases more than half
of all memory is swapped out). It also impacts performance by
keeping tasks stuck in the pageout code for too long.
A 30% improvement in hackbench has been observed with this patch.
The fix is relatively simple: in shrink_zone() we can check how many
pages we have already freed, direct reclaim tasks break out of the
scanning loop if they have already freed enough pages and have reached
a lower priority level.
We do not break out of shrink_zone() when priority == DEF_PRIORITY,
to ensure that equal pressure is applied to every zone in the common
case.
However, in order to do this we do need to know how many pages we already
freed, so move nr_reclaimed into scan_control.
akpm: a historical interlude...
We tried this in 2004:
:commit e468e46a9bea3297011d5918663ce6d19094cf87
:Author: akpm <akpm>
:Date: Thu Jun 24 15:53:52 2004 +0000
:
:[PATCH] vmscan.c: dont reclaim too many pages
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly hit
: a large number of reclaimable pages on the LRU.
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed.
And we reverted it in 2006:
:commit 210fe530305ee50cd889fe9250168228b2994f32
:Author: Andrew Morton <akpm@osdl.org>
:Date: Fri Jan 6 00:11:14 2006 -0800
:
: [PATCH] vmscan: balancing fix
:
: Revert a patch which went into 2.6.8-rc1. The changelog for that patch was:
:
: The shrink_zone() logic can, under some circumstances, cause far too many
: pages to be reclaimed. Say, we're scanning at high priority and suddenly
: hit a large number of reclaimable pages on the LRU.
:
: Change things so we bale out when SWAP_CLUSTER_MAX pages have been
: reclaimed.
:
: Problem is, this change caused significant imbalance in inter-zone scan
: balancing by truncating scans of larger zones.
:
: Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone
: balancing algorithm would require that if we're scanning 100 pages of
: ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will
: cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are
: reclaimed. Thus effectively causing smaller zones to be scanned relatively
: harder than large ones.
:
: Now I need to remember what the workload was which caused me to write this
: patch originally, then fix it up in a different way...
And we haven't demonstrated that whatever problem caused that reversion is
not being reintroduced by this change in 2008.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-07 01:40:01 +03:00
|
|
|
return sc.nr_reclaimed >= nr_pages;
|
2006-01-19 04:42:31 +03:00
|
|
|
}
|
2006-03-22 11:08:18 +03:00
|
|
|
|
|
|
|
int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
|
|
|
|
{
|
|
|
|
int node_id;
|
2007-10-17 10:26:01 +04:00
|
|
|
int ret;
|
2006-03-22 11:08:18 +03:00
|
|
|
|
|
|
|
/*
|
2006-09-26 10:31:52 +04:00
|
|
|
* Zone reclaim reclaims unmapped file backed pages and
|
|
|
|
* slab pages if we are over the defined limits.
|
2006-06-30 12:55:37 +04:00
|
|
|
*
|
2006-07-03 11:24:13 +04:00
|
|
|
* A small portion of unmapped file backed pages is needed for
|
|
|
|
* file I/O otherwise pages read by file I/O will be immediately
|
|
|
|
* thrown out if the zone is overallocated. So we do not reclaim
|
|
|
|
* if less than a specified percentage of the zone is used by
|
|
|
|
* unmapped file backed pages.
|
2006-03-22 11:08:18 +03:00
|
|
|
*/
|
vmscan: properly account for the number of page cache pages zone_reclaim() can reclaim
A bug was brought to my attention against a distro kernel but it affects
mainline and I believe problems like this have been reported in various
guises on the mailing lists although I don't have specific examples at the
moment.
The reported problem was that malloc() stalled for a long time (minutes in
some cases) if a large tmpfs mount was occupying a large percentage of
memory overall. The pages did not get cleaned or reclaimed by
zone_reclaim() because the zone_reclaim_mode was unsuitable, but the lists
are uselessly scanned frequencly making the CPU spin at near 100%.
This patchset intends to address that bug and bring the behaviour of
zone_reclaim() more in line with expectations which were noticed during
investigation. It is based on top of mmotm and takes advantage of
Kosaki's work with respect to zone_reclaim().
Patch 1 fixes the heuristics that zone_reclaim() uses to determine if the
scan should go ahead. The broken heuristic is what was causing the
malloc() stall as it uselessly scanned the LRU constantly. Currently,
zone_reclaim is assuming zone_reclaim_mode is 1 and historically it
could not deal with tmpfs pages at all. This fixes up the heuristic so
that an unnecessary scan is more likely to be correctly avoided.
Patch 2 notes that zone_reclaim() returning a failure automatically means
the zone is marked full. This is not always true. It could have
failed because the GFP mask or zone_reclaim_mode were unsuitable.
Patch 3 introduces a counter zreclaim_failed that will increment each
time the zone_reclaim scan-avoidance heuristics fail. If that
counter is rapidly increasing, then zone_reclaim_mode should be
set to 0 as a temporarily resolution and a bug reported because
the scan-avoidance heuristic is still broken.
This patch:
On NUMA machines, the administrator can configure zone_reclaim_mode that
is a more targetted form of direct reclaim. On machines with large NUMA
distances for example, a zone_reclaim_mode defaults to 1 meaning that
clean unmapped pages will be reclaimed if the zone watermarks are not
being met.
There is a heuristic that determines if the scan is worthwhile but the
problem is that the heuristic is not being properly applied and is
basically assuming zone_reclaim_mode is 1 if it is enabled. The lack of
proper detection can manfiest as high CPU usage as the LRU list is scanned
uselessly.
Historically, once enabled it was depending on NR_FILE_PAGES which may
include swapcache pages that the reclaim_mode cannot deal with. Patch
vmscan-change-the-number-of-the-unmapped-files-in-zone-reclaim.patch by
Kosaki Motohiro noted that zone_page_state(zone, NR_FILE_PAGES) included
pages that were not file-backed such as swapcache and made a calculation
based on the inactive, active and mapped files. This is far superior when
zone_reclaim==1 but if RECLAIM_SWAP is set, then NR_FILE_PAGES is a
reasonable starting figure.
This patch alters how zone_reclaim() works out how many pages it might be
able to reclaim given the current reclaim_mode. If RECLAIM_SWAP is set in
the reclaim_mode it will either consider NR_FILE_PAGES as potential
candidates or else use NR_{IN}ACTIVE}_PAGES-NR_FILE_MAPPED to discount
swapcache and other non-file-backed pages. If RECLAIM_WRITE is not set,
then NR_FILE_DIRTY number of pages are not candidates. If RECLAIM_SWAP is
not set, then NR_FILE_MAPPED are not.
[kosaki.motohiro@jp.fujitsu.com: Estimate unmapped pages minus tmpfs pages]
[fengguang.wu@intel.com: Fix underflow problem in Kosaki's estimate]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: <stable@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-17 02:33:20 +04:00
|
|
|
if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
|
|
|
|
zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
|
2009-06-17 02:33:22 +04:00
|
|
|
return ZONE_RECLAIM_FULL;
|
2006-03-22 11:08:18 +03:00
|
|
|
|
2010-03-06 00:41:55 +03:00
|
|
|
if (zone->all_unreclaimable)
|
2009-06-17 02:33:22 +04:00
|
|
|
return ZONE_RECLAIM_FULL;
|
2007-10-17 10:26:01 +04:00
|
|
|
|
2006-03-22 11:08:18 +03:00
|
|
|
/*
|
2007-10-17 10:26:01 +04:00
|
|
|
* Do not scan if the allocation should not be delayed.
|
2006-03-22 11:08:18 +03:00
|
|
|
*/
|
2007-10-17 10:26:01 +04:00
|
|
|
if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
|
2009-06-17 02:33:22 +04:00
|
|
|
return ZONE_RECLAIM_NOSCAN;
|
2006-03-22 11:08:18 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only run zone reclaim on the local zone or on zones that do not
|
|
|
|
* have associated processors. This will favor the local processor
|
|
|
|
* over remote processors and spread off node memory allocations
|
|
|
|
* as wide as possible.
|
|
|
|
*/
|
2006-09-26 10:31:55 +04:00
|
|
|
node_id = zone_to_nid(zone);
|
2007-10-16 12:25:36 +04:00
|
|
|
if (node_state(node_id, N_CPU) && node_id != numa_node_id())
|
2009-06-17 02:33:22 +04:00
|
|
|
return ZONE_RECLAIM_NOSCAN;
|
2007-10-17 10:26:01 +04:00
|
|
|
|
|
|
|
if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
|
2009-06-17 02:33:22 +04:00
|
|
|
return ZONE_RECLAIM_NOSCAN;
|
|
|
|
|
2007-10-17 10:26:01 +04:00
|
|
|
ret = __zone_reclaim(zone, gfp_mask, order);
|
|
|
|
zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
|
|
|
|
|
2009-06-17 02:33:23 +04:00
|
|
|
if (!ret)
|
|
|
|
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
|
|
|
|
|
2007-10-17 10:26:01 +04:00
|
|
|
return ret;
|
2006-03-22 11:08:18 +03:00
|
|
|
}
|
2006-01-19 04:42:31 +03:00
|
|
|
#endif
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* page_evictable - test whether a page is evictable
|
|
|
|
* @page: the page to test
|
|
|
|
* @vma: the VMA in which the page is or will be mapped, may be NULL
|
|
|
|
*
|
|
|
|
* Test whether page is evictable--i.e., should be placed on active/inactive
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
* lists vs unevictable list. The vma argument is !NULL when called from the
|
|
|
|
* fault path to determine how to instantate a new page.
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
*
|
|
|
|
* Reasons page might not be evictable:
|
2008-10-19 07:26:42 +04:00
|
|
|
* (1) page's mapping marked unevictable
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
* (2) page is part of an mlocked VMA
|
2008-10-19 07:26:42 +04:00
|
|
|
*
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
*/
|
|
|
|
int page_evictable(struct page *page, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
|
2008-10-19 07:26:42 +04:00
|
|
|
if (mapping_unevictable(page_mapping(page)))
|
|
|
|
return 0;
|
|
|
|
|
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd
will not scan them over and over again.
This is achieved through various strategies:
1) add yet another page flag--PG_mlocked--to indicate that
the page is locked for efficient testing in vmscan and,
optionally, fault path. This allows early culling of
unevictable pages, preventing them from getting to
page_referenced()/try_to_unmap(). Also allows separate
accounting of mlock'd pages, as Nick's original patch
did.
Note: Nick's original mlock patch used a PG_mlocked
flag. I had removed this in favor of the PG_unevictable
flag + an mlock_count [new page struct member]. I
restored the PG_mlocked flag to eliminate the new
count field.
2) add the mlock/unevictable infrastructure to mm/mlock.c,
with internal APIs in mm/internal.h. This is a rework
of Nick's original patch to these files, taking into
account that mlocked pages are now kept on unevictable
LRU list.
3) update vmscan.c:page_evictable() to check PageMlocked()
and, if vma passed in, the vm_flags. Note that the vma
will only be passed in for new pages in the fault path;
and then only if the "cull unevictable pages in fault
path" patch is included.
4) add try_to_unlock() to rmap.c to walk a page's rmap and
ClearPageMlocked() if no other vmas have it mlocked.
Reuses as much of try_to_unmap() as possible. This
effectively replaces the use of one of the lru list links
as an mlock count. If this mechanism let's pages in mlocked
vmas leak through w/o PG_mlocked set [I don't know that it
does], we should catch them later in try_to_unmap(). One
hopes this will be rare, as it will be relatively expensive.
Original mm/internal.h, mm/rmap.c and mm/mlock.c changes:
Signed-off-by: Nick Piggin <npiggin@suse.de>
splitlru: introduce __get_user_pages():
New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS.
because current get_user_pages() can't grab PROT_NONE pages theresore it
cause PROT_NONE pages can't munlock.
[akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch]
[akpm@linux-foundation.org: untangle patch interdependencies]
[akpm@linux-foundation.org: fix things after out-of-order merging]
[hugh@veritas.com: fix page-flags mess]
[lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm']
[kosaki.motohiro@jp.fujitsu.com: build fix]
[kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments]
[kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Matt Mackall <mpm@selenic.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:44 +04:00
|
|
|
if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
|
|
|
|
return 0;
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:39 +04:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2008-10-19 07:26:43 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
|
|
|
|
* @page: page to check evictability and move to appropriate lru list
|
|
|
|
* @zone: zone page is in
|
|
|
|
*
|
|
|
|
* Checks a page for evictability and moves the page to the appropriate
|
|
|
|
* zone lru list.
|
|
|
|
*
|
|
|
|
* Restrictions: zone->lru_lock must be held, page must be on LRU and must
|
|
|
|
* have PageUnevictable set.
|
|
|
|
*/
|
|
|
|
static void check_move_unevictable_page(struct page *page, struct zone *zone)
|
|
|
|
{
|
|
|
|
VM_BUG_ON(PageActive(page));
|
|
|
|
|
|
|
|
retry:
|
|
|
|
ClearPageUnevictable(page);
|
|
|
|
if (page_evictable(page, NULL)) {
|
2009-09-22 04:02:58 +04:00
|
|
|
enum lru_list l = page_lru_base_type(page);
|
2008-10-19 07:26:53 +04:00
|
|
|
|
2008-10-19 07:26:43 +04:00
|
|
|
__dec_zone_state(zone, NR_UNEVICTABLE);
|
|
|
|
list_move(&page->lru, &zone->lru[l].list);
|
memcg: synchronized LRU
A big patch for changing memcg's LRU semantics.
Now,
- page_cgroup is linked to mem_cgroup's its own LRU (per zone).
- LRU of page_cgroup is not synchronous with global LRU.
- page and page_cgroup is one-to-one and statically allocated.
- To find page_cgroup is on what LRU, you have to check pc->mem_cgroup as
- lru = page_cgroup_zoneinfo(pc, nid_of_pc, zid_of_pc);
- SwapCache is handled.
And, when we handle LRU list of page_cgroup, we do following.
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc); .....................(1)
mz = page_cgroup_zoneinfo(pc);
spin_lock(&mz->lru_lock);
.....add to LRU
spin_unlock(&mz->lru_lock);
unlock_page_cgroup(pc);
But (1) is spin_lock and we have to be afraid of dead-lock with zone->lru_lock.
So, trylock() is used at (1), now. Without (1), we can't trust "mz" is correct.
This is a trial to remove this dirty nesting of locks.
This patch changes mz->lru_lock to be zone->lru_lock.
Then, above sequence will be written as
spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU
mem_cgroup_add/remove/etc_lru() {
pc = lookup_page_cgroup(page);
mz = page_cgroup_zoneinfo(pc);
if (PageCgroupUsed(pc)) {
....add to LRU
}
spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU
This is much simpler.
(*) We're safe even if we don't take lock_page_cgroup(pc). Because..
1. When pc->mem_cgroup can be modified.
- at charge.
- at account_move().
2. at charge
the PCG_USED bit is not set before pc->mem_cgroup is fixed.
3. at account_move()
the page is isolated and not on LRU.
Pros.
- easy for maintenance.
- memcg can make use of laziness of pagevec.
- we don't have to duplicated LRU/Active/Unevictable bit in page_cgroup.
- LRU status of memcg will be synchronized with global LRU's one.
- # of locks are reduced.
- account_move() is simplified very much.
Cons.
- may increase cost of LRU rotation.
(no impact if memcg is not configured.)
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-08 05:08:01 +03:00
|
|
|
mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
|
2008-10-19 07:26:43 +04:00
|
|
|
__inc_zone_state(zone, NR_INACTIVE_ANON + l);
|
|
|
|
__count_vm_event(UNEVICTABLE_PGRESCUED);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* rotate unevictable list
|
|
|
|
*/
|
|
|
|
SetPageUnevictable(page);
|
|
|
|
list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
|
memcg: synchronized LRU
A big patch for changing memcg's LRU semantics.
Now,
- page_cgroup is linked to mem_cgroup's its own LRU (per zone).
- LRU of page_cgroup is not synchronous with global LRU.
- page and page_cgroup is one-to-one and statically allocated.
- To find page_cgroup is on what LRU, you have to check pc->mem_cgroup as
- lru = page_cgroup_zoneinfo(pc, nid_of_pc, zid_of_pc);
- SwapCache is handled.
And, when we handle LRU list of page_cgroup, we do following.
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc); .....................(1)
mz = page_cgroup_zoneinfo(pc);
spin_lock(&mz->lru_lock);
.....add to LRU
spin_unlock(&mz->lru_lock);
unlock_page_cgroup(pc);
But (1) is spin_lock and we have to be afraid of dead-lock with zone->lru_lock.
So, trylock() is used at (1), now. Without (1), we can't trust "mz" is correct.
This is a trial to remove this dirty nesting of locks.
This patch changes mz->lru_lock to be zone->lru_lock.
Then, above sequence will be written as
spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU
mem_cgroup_add/remove/etc_lru() {
pc = lookup_page_cgroup(page);
mz = page_cgroup_zoneinfo(pc);
if (PageCgroupUsed(pc)) {
....add to LRU
}
spin_lock(&zone->lru_lock); # in vmscan.c or swap.c via global LRU
This is much simpler.
(*) We're safe even if we don't take lock_page_cgroup(pc). Because..
1. When pc->mem_cgroup can be modified.
- at charge.
- at account_move().
2. at charge
the PCG_USED bit is not set before pc->mem_cgroup is fixed.
3. at account_move()
the page is isolated and not on LRU.
Pros.
- easy for maintenance.
- memcg can make use of laziness of pagevec.
- we don't have to duplicated LRU/Active/Unevictable bit in page_cgroup.
- LRU status of memcg will be synchronized with global LRU's one.
- # of locks are reduced.
- account_move() is simplified very much.
Cons.
- may increase cost of LRU rotation.
(no impact if memcg is not configured.)
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-01-08 05:08:01 +03:00
|
|
|
mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
|
2008-10-19 07:26:43 +04:00
|
|
|
if (page_evictable(page, NULL))
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* scan_mapping_unevictable_pages - scan an address space for evictable pages
|
|
|
|
* @mapping: struct address_space to scan for evictable pages
|
|
|
|
*
|
|
|
|
* Scan all pages in mapping. Check unevictable pages for
|
|
|
|
* evictability and move them to the appropriate zone lru list.
|
|
|
|
*/
|
|
|
|
void scan_mapping_unevictable_pages(struct address_space *mapping)
|
|
|
|
{
|
|
|
|
pgoff_t next = 0;
|
|
|
|
pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
|
|
|
|
PAGE_CACHE_SHIFT;
|
|
|
|
struct zone *zone;
|
|
|
|
struct pagevec pvec;
|
|
|
|
|
|
|
|
if (mapping->nrpages == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pagevec_init(&pvec, 0);
|
|
|
|
while (next < end &&
|
|
|
|
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
|
|
|
|
int i;
|
|
|
|
int pg_scanned = 0;
|
|
|
|
|
|
|
|
zone = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < pagevec_count(&pvec); i++) {
|
|
|
|
struct page *page = pvec.pages[i];
|
|
|
|
pgoff_t page_index = page->index;
|
|
|
|
struct zone *pagezone = page_zone(page);
|
|
|
|
|
|
|
|
pg_scanned++;
|
|
|
|
if (page_index > next)
|
|
|
|
next = page_index;
|
|
|
|
next++;
|
|
|
|
|
|
|
|
if (pagezone != zone) {
|
|
|
|
if (zone)
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
zone = pagezone;
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PageLRU(page) && PageUnevictable(page))
|
|
|
|
check_move_unevictable_page(page, zone);
|
|
|
|
}
|
|
|
|
if (zone)
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
pagevec_release(&pvec);
|
|
|
|
|
|
|
|
count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2008-10-19 07:26:53 +04:00
|
|
|
|
|
|
|
/**
|
|
|
|
* scan_zone_unevictable_pages - check unevictable list for evictable pages
|
|
|
|
* @zone - zone of which to scan the unevictable list
|
|
|
|
*
|
|
|
|
* Scan @zone's unevictable LRU lists to check for pages that have become
|
|
|
|
* evictable. Move those that have to @zone's inactive list where they
|
|
|
|
* become candidates for reclaim, unless shrink_inactive_zone() decides
|
|
|
|
* to reactivate them. Pages that are still unevictable are rotated
|
|
|
|
* back onto @zone's unevictable list.
|
|
|
|
*/
|
|
|
|
#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
|
2009-01-07 01:39:45 +03:00
|
|
|
static void scan_zone_unevictable_pages(struct zone *zone)
|
2008-10-19 07:26:53 +04:00
|
|
|
{
|
|
|
|
struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
|
|
|
|
unsigned long scan;
|
|
|
|
unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
|
|
|
|
|
|
|
|
while (nr_to_scan > 0) {
|
|
|
|
unsigned long batch_size = min(nr_to_scan,
|
|
|
|
SCAN_UNEVICTABLE_BATCH_SIZE);
|
|
|
|
|
|
|
|
spin_lock_irq(&zone->lru_lock);
|
|
|
|
for (scan = 0; scan < batch_size; scan++) {
|
|
|
|
struct page *page = lru_to_page(l_unevictable);
|
|
|
|
|
|
|
|
if (!trylock_page(page))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
prefetchw_prev_lru_page(page, l_unevictable, flags);
|
|
|
|
|
|
|
|
if (likely(PageLRU(page) && PageUnevictable(page)))
|
|
|
|
check_move_unevictable_page(page, zone);
|
|
|
|
|
|
|
|
unlock_page(page);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&zone->lru_lock);
|
|
|
|
|
|
|
|
nr_to_scan -= batch_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
|
|
|
|
*
|
|
|
|
* A really big hammer: scan all zones' unevictable LRU lists to check for
|
|
|
|
* pages that have become evictable. Move those back to the zones'
|
|
|
|
* inactive list where they become candidates for reclaim.
|
|
|
|
* This occurs when, e.g., we have unswappable pages on the unevictable lists,
|
|
|
|
* and we add swap to the system. As such, it runs in the context of a task
|
|
|
|
* that has possibly/probably made some previously unevictable pages
|
|
|
|
* evictable.
|
|
|
|
*/
|
2009-01-07 01:39:44 +03:00
|
|
|
static void scan_all_zones_unevictable_pages(void)
|
2008-10-19 07:26:53 +04:00
|
|
|
{
|
|
|
|
struct zone *zone;
|
|
|
|
|
|
|
|
for_each_zone(zone) {
|
|
|
|
scan_zone_unevictable_pages(zone);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
|
|
|
|
* all nodes' unevictable lists for evictable pages
|
|
|
|
*/
|
|
|
|
unsigned long scan_unevictable_pages;
|
|
|
|
|
|
|
|
int scan_unevictable_handler(struct ctl_table *table, int write,
|
2009-09-24 02:57:19 +04:00
|
|
|
void __user *buffer,
|
2008-10-19 07:26:53 +04:00
|
|
|
size_t *length, loff_t *ppos)
|
|
|
|
{
|
2009-09-24 02:57:19 +04:00
|
|
|
proc_doulongvec_minmax(table, write, buffer, length, ppos);
|
2008-10-19 07:26:53 +04:00
|
|
|
|
|
|
|
if (write && *(unsigned long *)table->data)
|
|
|
|
scan_all_zones_unevictable_pages();
|
|
|
|
|
|
|
|
scan_unevictable_pages = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* per node 'scan_unevictable_pages' attribute. On demand re-scan of
|
|
|
|
* a specified node's per zone unevictable lists for evictable pages.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static ssize_t read_scan_unevictable_node(struct sys_device *dev,
|
|
|
|
struct sysdev_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "0\n"); /* always zero; should fit... */
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t write_scan_unevictable_node(struct sys_device *dev,
|
|
|
|
struct sysdev_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
|
|
|
|
struct zone *zone;
|
|
|
|
unsigned long res;
|
|
|
|
unsigned long req = strict_strtoul(buf, 10, &res);
|
|
|
|
|
|
|
|
if (!req)
|
|
|
|
return 1; /* zero is no-op */
|
|
|
|
|
|
|
|
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
|
|
|
|
if (!populated_zone(zone))
|
|
|
|
continue;
|
|
|
|
scan_zone_unevictable_pages(zone);
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
|
|
|
|
read_scan_unevictable_node,
|
|
|
|
write_scan_unevictable_node);
|
|
|
|
|
|
|
|
int scan_unevictable_register_node(struct node *node)
|
|
|
|
{
|
|
|
|
return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
|
|
|
|
}
|
|
|
|
|
|
|
|
void scan_unevictable_unregister_node(struct node *node)
|
|
|
|
{
|
|
|
|
sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
|
|
|
|
}
|
|
|
|
|