memcg: remove mem_cgroup_cal_reclaim()
Now, get_scan_ratio() return correct value although memcg reclaim. Then, mem_cgroup_calc_reclaim() can be removed. So, memcg reclaim get the same capability of anon/file reclaim balancing as global reclaim now. Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
3e2f41f1f6
Коммит
9439c1c95b
|
@ -97,9 +97,6 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
|
|||
int priority);
|
||||
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
|
||||
int priority);
|
||||
|
||||
extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
|
||||
int priority, enum lru_list lru);
|
||||
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
|
||||
struct zone *zone);
|
||||
unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
|
||||
|
@ -244,13 +241,6 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
|
|||
{
|
||||
}
|
||||
|
||||
static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
|
||||
struct zone *zone, int priority,
|
||||
enum lru_list lru)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_disabled(void)
|
||||
{
|
||||
return true;
|
||||
|
|
|
@ -414,27 +414,6 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
|
|||
mem->prev_priority = priority;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate # of pages to be scanned in this priority/zone.
|
||||
* See also vmscan.c
|
||||
*
|
||||
* priority starts from "DEF_PRIORITY" and decremented in each loop.
|
||||
* (see include/linux/mmzone.h)
|
||||
*/
|
||||
|
||||
long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
|
||||
int priority, enum lru_list lru)
|
||||
{
|
||||
long nr_pages;
|
||||
int nid = zone->zone_pgdat->node_id;
|
||||
int zid = zone_idx(zone);
|
||||
struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
|
||||
|
||||
nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
|
||||
|
||||
return (nr_pages >> priority);
|
||||
}
|
||||
|
||||
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
|
||||
{
|
||||
unsigned long active;
|
||||
|
|
27
mm/vmscan.c
27
mm/vmscan.c
|
@ -1466,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone,
|
|||
get_scan_ratio(zone, sc, percent);
|
||||
|
||||
for_each_evictable_lru(l) {
|
||||
if (scan_global_lru(sc)) {
|
||||
int file = is_file_lru(l);
|
||||
int scan;
|
||||
int file = is_file_lru(l);
|
||||
int scan;
|
||||
|
||||
scan = zone_page_state(zone, NR_LRU_BASE + l);
|
||||
if (priority) {
|
||||
scan >>= priority;
|
||||
scan = (scan * percent[file]) / 100;
|
||||
}
|
||||
scan = zone_page_state(zone, NR_LRU_BASE + l);
|
||||
if (priority) {
|
||||
scan >>= priority;
|
||||
scan = (scan * percent[file]) / 100;
|
||||
}
|
||||
if (scan_global_lru(sc)) {
|
||||
zone->lru[l].nr_scan += scan;
|
||||
nr[l] = zone->lru[l].nr_scan;
|
||||
if (nr[l] >= swap_cluster_max)
|
||||
zone->lru[l].nr_scan = 0;
|
||||
else
|
||||
nr[l] = 0;
|
||||
} else {
|
||||
/*
|
||||
* This reclaim occurs not because zone memory shortage
|
||||
* but because memory controller hits its limit.
|
||||
* Don't modify zone reclaim related data.
|
||||
*/
|
||||
nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
|
||||
priority, l);
|
||||
}
|
||||
} else
|
||||
nr[l] = scan;
|
||||
}
|
||||
|
||||
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
|
||||
|
|
Загрузка…
Ссылка в новой задаче