memcg: make memcg's file mapped consistent with global VM

In global VM, FILE_MAPPED is used but memcg uses MAPPED_FILE.  This makes
grep difficult.  Replace memcg's MAPPED_FILE with FILE_MAPPED

And in global VM, mapped shared memory is accounted into FILE_MAPPED.
But memcg doesn't. fix it.
Note:
  page_is_file_cache() just checks SwapBacked or not.
  So, we need to check PageAnon.

Cc: Balbir Singh <balbir@in.ibm.com>
Reviewed-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KAMEZAWA Hiroyuki 2009-12-15 16:47:09 -08:00 коммит произвёл Linus Torvalds
Родитель cdec2e4265
Коммит d8046582d5
3 изменённых файлов: 13 добавлений и 16 удалений

Просмотреть файл

@ -122,7 +122,7 @@ static inline bool mem_cgroup_disabled(void)
} }
extern bool mem_cgroup_oom_called(struct task_struct *task); extern bool mem_cgroup_oom_called(struct task_struct *task);
void mem_cgroup_update_mapped_file_stat(struct page *page, int val); void mem_cgroup_update_file_mapped(struct page *page, int val);
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask, int nid, gfp_t gfp_mask, int nid,
int zid); int zid);
@ -287,7 +287,7 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{ {
} }
static inline void mem_cgroup_update_mapped_file_stat(struct page *page, static inline void mem_cgroup_update_file_mapped(struct page *page,
int val) int val)
{ {
} }

Просмотреть файл

@ -67,7 +67,7 @@ enum mem_cgroup_stat_index {
*/ */
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_MAPPED_FILE, /* # of pages charged as file rss */ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */ MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */
@ -1227,7 +1227,7 @@ static void record_last_oom(struct mem_cgroup *mem)
* Currently used to update mapped file statistics, but the routine can be * Currently used to update mapped file statistics, but the routine can be
* generalized to update other statistics as well. * generalized to update other statistics as well.
*/ */
void mem_cgroup_update_mapped_file_stat(struct page *page, int val) void mem_cgroup_update_file_mapped(struct page *page, int val)
{ {
struct mem_cgroup *mem; struct mem_cgroup *mem;
struct mem_cgroup_stat *stat; struct mem_cgroup_stat *stat;
@ -1235,9 +1235,6 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
int cpu; int cpu;
struct page_cgroup *pc; struct page_cgroup *pc;
if (!page_is_file_cache(page))
return;
pc = lookup_page_cgroup(page); pc = lookup_page_cgroup(page);
if (unlikely(!pc)) if (unlikely(!pc))
return; return;
@ -1257,7 +1254,7 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
stat = &mem->stat; stat = &mem->stat;
cpustat = &stat->cpustat[cpu]; cpustat = &stat->cpustat[cpu];
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val); __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
done: done:
unlock_page_cgroup(pc); unlock_page_cgroup(pc);
} }
@ -1654,18 +1651,18 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
mem_cgroup_charge_statistics(from, pc, false); mem_cgroup_charge_statistics(from, pc, false);
page = pc->page; page = pc->page;
if (page_is_file_cache(page) && page_mapped(page)) { if (page_mapped(page) && !PageAnon(page)) {
cpu = smp_processor_id(); cpu = smp_processor_id();
/* Update mapped_file data for mem_cgroup "from" */ /* Update mapped_file data for mem_cgroup "from" */
stat = &from->stat; stat = &from->stat;
cpustat = &stat->cpustat[cpu]; cpustat = &stat->cpustat[cpu];
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
-1); -1);
/* Update mapped_file data for mem_cgroup "to" */ /* Update mapped_file data for mem_cgroup "to" */
stat = &to->stat; stat = &to->stat;
cpustat = &stat->cpustat[cpu]; cpustat = &stat->cpustat[cpu];
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
1); 1);
} }
@ -2889,7 +2886,7 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
enum { enum {
MCS_CACHE, MCS_CACHE,
MCS_RSS, MCS_RSS,
MCS_MAPPED_FILE, MCS_FILE_MAPPED,
MCS_PGPGIN, MCS_PGPGIN,
MCS_PGPGOUT, MCS_PGPGOUT,
MCS_SWAP, MCS_SWAP,
@ -2933,8 +2930,8 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
s->stat[MCS_CACHE] += val * PAGE_SIZE; s->stat[MCS_CACHE] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
s->stat[MCS_RSS] += val * PAGE_SIZE; s->stat[MCS_RSS] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE); val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED);
s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE; s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT); val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
s->stat[MCS_PGPGIN] += val; s->stat[MCS_PGPGIN] += val;
val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT); val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);

Просмотреть файл

@ -721,7 +721,7 @@ void page_add_file_rmap(struct page *page)
{ {
if (atomic_inc_and_test(&page->_mapcount)) { if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED); __inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_update_mapped_file_stat(page, 1); mem_cgroup_update_file_mapped(page, 1);
} }
} }
@ -753,8 +753,8 @@ void page_remove_rmap(struct page *page)
__dec_zone_page_state(page, NR_ANON_PAGES); __dec_zone_page_state(page, NR_ANON_PAGES);
} else { } else {
__dec_zone_page_state(page, NR_FILE_MAPPED); __dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_update_file_mapped(page, -1);
} }
mem_cgroup_update_mapped_file_stat(page, -1);
/* /*
* It would be tidy to reset the PageAnon mapping here, * It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap * but that might overwrite a racing page_add_anon_rmap