mm, oom: add oom victim's memcg to the oom context information
The current oom report doesn't display victim's memcg context during the global OOM situation. While this information is not strictly needed, it can be really helpful for containerized environments to locate which container has lost a process. Now that we have a single line for the oom context, we can trivially add both the oom memcg (this can be either global_oom or a specific memcg which hits its hard limits) and task_memcg which is the victim's memcg. Below is the single line output in the oom report after this patch. - global oom context information: oom-kill:constraint=<constraint>,nodemask=<nodemask>,cpuset=<cpuset>,mems_allowed=<mems_allowed>,global_oom,task_memcg=<memcg>,task=<comm>,pid=<pid>,uid=<uid> - memcg oom context information: oom-kill:constraint=<constraint>,nodemask=<nodemask>,cpuset=<cpuset>,mems_allowed=<mems_allowed>,oom_memcg=<memcg>,task_memcg=<memcg>,task=<comm>,pid=<pid>,uid=<uid> [penguin-kernel@I-love.SAKURA.ne.jp: use pr_cont() in mem_cgroup_print_oom_context()] Link: http://lkml.kernel.org/r/201812190723.wBJ7NdkN032628@www262.sakura.ne.jp Link: http://lkml.kernel.org/r/1542799799-36184-2-git-send-email-ufo19890607@gmail.com Signed-off-by: yuzhoujian <yuzhoujian@didichuxing.com> Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Acked-by: Michal Hocko <mhocko@suse.com> Cc: David Rientjes <rientjes@google.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> Cc: Roman Gushchin <guro@fb.com> Cc: Yang Shi <yang.s@alibaba-inc.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
ef8444ea01
Коммит
f0c867d958
|
@ -526,9 +526,11 @@ void mem_cgroup_handle_over_high(void);
|
|||
|
||||
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
|
||||
|
||||
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
|
||||
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
|
||||
struct task_struct *p);
|
||||
|
||||
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
|
||||
|
||||
static inline void mem_cgroup_enter_user_fault(void)
|
||||
{
|
||||
WARN_ON(current->in_user_fault);
|
||||
|
@ -970,7 +972,12 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
|
|||
}
|
||||
|
||||
static inline void
|
||||
mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
||||
mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -1293,33 +1293,40 @@ static const char *const memcg1_stat_names[] = {
|
|||
|
||||
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||||
/**
|
||||
* mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
|
||||
* mem_cgroup_print_oom_context: Print OOM information relevant to
|
||||
* memory controller.
|
||||
* @memcg: The memory cgroup that went over limit
|
||||
* @p: Task that is going to be killed
|
||||
*
|
||||
* NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
|
||||
* enabled
|
||||
*/
|
||||
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
||||
void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
|
||||
{
|
||||
rcu_read_lock();
|
||||
|
||||
if (memcg) {
|
||||
pr_cont(",oom_memcg=");
|
||||
pr_cont_cgroup_path(memcg->css.cgroup);
|
||||
} else
|
||||
pr_cont(",global_oom");
|
||||
if (p) {
|
||||
pr_cont(",task_memcg=");
|
||||
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
|
||||
* memory controller.
|
||||
* @memcg: The memory cgroup that went over limit
|
||||
*/
|
||||
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
|
||||
{
|
||||
struct mem_cgroup *iter;
|
||||
unsigned int i;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
if (p) {
|
||||
pr_info("Task in ");
|
||||
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
|
||||
pr_cont(" killed as a result of limit of ");
|
||||
} else {
|
||||
pr_info("Memory limit reached of cgroup ");
|
||||
}
|
||||
|
||||
pr_cont_cgroup_path(memcg->css.cgroup);
|
||||
pr_cont("\n");
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
|
||||
K((u64)page_counter_read(&memcg->memory)),
|
||||
K((u64)memcg->memory.max), memcg->memory.failcnt);
|
||||
|
|
|
@ -435,6 +435,7 @@ static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
|
|||
oom_constraint_text[oc->constraint],
|
||||
nodemask_pr_args(oc->nodemask));
|
||||
cpuset_print_current_mems_allowed();
|
||||
mem_cgroup_print_oom_context(oc->memcg, victim);
|
||||
pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
|
||||
from_kuid(&init_user_ns, task_uid(victim)));
|
||||
}
|
||||
|
@ -449,7 +450,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
|
|||
|
||||
dump_stack();
|
||||
if (is_memcg_oom(oc))
|
||||
mem_cgroup_print_oom_info(oc->memcg, p);
|
||||
mem_cgroup_print_oom_meminfo(oc->memcg);
|
||||
else {
|
||||
show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
|
||||
if (is_dump_unreclaim_slabs())
|
||||
|
|
Загрузка…
Ссылка в новой задаче