mm, memcg: prevent memory.swap.max load tearing
The write side of this is xchg()/smp_mb(), so that's all good. Just a few sites missing a READ_ONCE. Signed-off-by: Chris Down <chris@chrisdown.name> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Roman Gushchin <guro@fb.com> Cc: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/bbec2c3d822217334855c8877a9d28b2a6d395fb.1584034301.git.chris@chrisdown.name Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
c3d5320086
Коммит
32d087cdd9
|
@ -1525,7 +1525,7 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
|
|||
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
|
||||
pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
|
||||
K((u64)page_counter_read(&memcg->swap)),
|
||||
K((u64)memcg->swap.max), memcg->swap.failcnt);
|
||||
K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
|
||||
else {
|
||||
pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
|
||||
K((u64)page_counter_read(&memcg->memsw)),
|
||||
|
@ -1558,7 +1558,7 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
|
|||
unsigned long swap_max;
|
||||
|
||||
memsw_max = memcg->memsw.max;
|
||||
swap_max = memcg->swap.max;
|
||||
swap_max = READ_ONCE(memcg->swap.max);
|
||||
swap_max = min(swap_max, (unsigned long)total_swap_pages);
|
||||
max = min(max + swap_max, memsw_max);
|
||||
}
|
||||
|
@ -7117,7 +7117,8 @@ bool mem_cgroup_swap_full(struct page *page)
|
|||
return false;
|
||||
|
||||
for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
|
||||
if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
|
||||
if (page_counter_read(&memcg->swap) * 2 >=
|
||||
READ_ONCE(memcg->swap.max))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
|
Загрузка…
Ссылка в новой задаче