mm: memcg: shorten preempt-disabled section around event checks

Only the ratelimit checks themselves have to run with preemption
disabled, the resulting actions - checking for usage thresholds,
updating the soft limit tree - can and should run with preemption
enabled.

Signed-off-by: Johannes Weiner <jweiner@redhat.com>
Reported-by: Yong Zhang <yong.zhang0@gmail.com>
Tested-by: Yong Zhang <yong.zhang0@gmail.com>
Reported-by: Luis Henriques <henrix@camandro.org>
Tested-by: Luis Henriques <henrix@camandro.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2012-01-12 17:18:23 -08:00 коммит произвёл Linus Torvalds
Родитель e94c8a9cbc
Коммит f53d7ce32e
1 изменённых файлов: 38 добавлений и 41 удалений

Просмотреть файл

@ -748,37 +748,32 @@ static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
return total; return total;
} }
static bool __memcg_event_check(struct mem_cgroup *memcg, int target) static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
enum mem_cgroup_events_target target)
{ {
unsigned long val, next; unsigned long val, next;
val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
next = __this_cpu_read(memcg->stat->targets[target]); next = __this_cpu_read(memcg->stat->targets[target]);
/* from time_after() in jiffies.h */ /* from time_after() in jiffies.h */
return ((long)next - (long)val < 0); if ((long)next - (long)val < 0) {
} switch (target) {
case MEM_CGROUP_TARGET_THRESH:
static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target) next = val + THRESHOLDS_EVENTS_TARGET;
{ break;
unsigned long val, next; case MEM_CGROUP_TARGET_SOFTLIMIT:
next = val + SOFTLIMIT_EVENTS_TARGET;
val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); break;
case MEM_CGROUP_TARGET_NUMAINFO:
switch (target) { next = val + NUMAINFO_EVENTS_TARGET;
case MEM_CGROUP_TARGET_THRESH: break;
next = val + THRESHOLDS_EVENTS_TARGET; default:
break; break;
case MEM_CGROUP_TARGET_SOFTLIMIT: }
next = val + SOFTLIMIT_EVENTS_TARGET; __this_cpu_write(memcg->stat->targets[target], next);
break; return true;
case MEM_CGROUP_TARGET_NUMAINFO:
next = val + NUMAINFO_EVENTS_TARGET;
break;
default:
return;
} }
return false;
__this_cpu_write(memcg->stat->targets[target], next);
} }
/* /*
@ -789,25 +784,27 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
{ {
preempt_disable(); preempt_disable();
/* threshold event is triggered in finer grain than soft limit */ /* threshold event is triggered in finer grain than soft limit */
if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) { if (unlikely(mem_cgroup_event_ratelimit(memcg,
mem_cgroup_threshold(memcg); MEM_CGROUP_TARGET_THRESH))) {
__mem_cgroup_target_update(memcg, MEM_CGROUP_TARGET_THRESH); bool do_softlimit, do_numainfo;
if (unlikely(__memcg_event_check(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT))) { do_softlimit = mem_cgroup_event_ratelimit(memcg,
mem_cgroup_update_tree(memcg, page); MEM_CGROUP_TARGET_SOFTLIMIT);
__mem_cgroup_target_update(memcg,
MEM_CGROUP_TARGET_SOFTLIMIT);
}
#if MAX_NUMNODES > 1 #if MAX_NUMNODES > 1
if (unlikely(__memcg_event_check(memcg, do_numainfo = mem_cgroup_event_ratelimit(memcg,
MEM_CGROUP_TARGET_NUMAINFO))) { MEM_CGROUP_TARGET_NUMAINFO);
atomic_inc(&memcg->numainfo_events);
__mem_cgroup_target_update(memcg,
MEM_CGROUP_TARGET_NUMAINFO);
}
#endif #endif
} preempt_enable();
preempt_enable();
mem_cgroup_threshold(memcg);
if (unlikely(do_softlimit))
mem_cgroup_update_tree(memcg, page);
#if MAX_NUMNODES > 1
if (unlikely(do_numainfo))
atomic_inc(&memcg->numainfo_events);
#endif
} else
preempt_enable();
} }
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)