2006-10-20 10:28:32 +04:00
|
|
|
#ifndef __INCLUDE_LINUX_OOM_H
|
|
|
|
#define __INCLUDE_LINUX_OOM_H
|
|
|
|
|
2007-10-17 10:25:53 +04:00
|
|
|
|
oom: badness heuristic rewrite
This a complete rewrite of the oom killer's badness() heuristic which is
used to determine which task to kill in oom conditions. The goal is to
make it as simple and predictable as possible so the results are better
understood and we end up killing the task which will lead to the most
memory freeing while still respecting the fine-tuning from userspace.
Instead of basing the heuristic on mm->total_vm for each task, the task's
rss and swap space is used instead. This is a better indication of the
amount of memory that will be freeable if the oom killed task is chosen
and subsequently exits. This helps specifically in cases where KDE or
GNOME is chosen for oom kill on desktop systems instead of a memory
hogging task.
The baseline for the heuristic is a proportion of memory that each task is
currently using in memory plus swap compared to the amount of "allowable"
memory. "Allowable," in this sense, means the system-wide resources for
unconstrained oom conditions, the set of mempolicy nodes, the mems
attached to current's cpuset, or a memory controller's limit. The
proportion is given on a scale of 0 (never kill) to 1000 (always kill),
roughly meaning that if a task has a badness() score of 500 that the task
consumes approximately 50% of allowable memory resident in RAM or in swap
space.
The proportion is always relative to the amount of "allowable" memory and
not the total amount of RAM systemwide so that mempolicies and cpusets may
operate in isolation; they shall not need to know the true size of the
machine on which they are running if they are bound to a specific set of
nodes or mems, respectively.
Root tasks are given 3% extra memory just like __vm_enough_memory()
provides in LSMs. In the event of two tasks consuming similar amounts of
memory, it is generally better to save root's task.
Because of the change in the badness() heuristic's baseline, it is also
necessary to introduce a new user interface to tune it. It's not possible
to redefine the meaning of /proc/pid/oom_adj with a new scale since the
ABI cannot be changed for backward compatability. Instead, a new tunable,
/proc/pid/oom_score_adj, is added that ranges from -1000 to +1000. It may
be used to polarize the heuristic such that certain tasks are never
considered for oom kill while others may always be considered. The value
is added directly into the badness() score so a value of -500, for
example, means to discount 50% of its memory consumption in comparison to
other tasks either on the system, bound to the mempolicy, in the cpuset,
or sharing the same memory controller.
/proc/pid/oom_adj is changed so that its meaning is rescaled into the
units used by /proc/pid/oom_score_adj, and vice versa. Changing one of
these per-task tunables will rescale the value of the other to an
equivalent meaning. Although /proc/pid/oom_adj was originally defined as
a bitshift on the badness score, it now shares the same linear growth as
/proc/pid/oom_score_adj but with different granularity. This is required
so the ABI is not broken with userspace applications and allows oom_adj to
be deprecated for future removal.
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-08-10 04:19:46 +04:00
|
|
|
#include <linux/sched.h>
|
2007-10-17 10:25:59 +04:00
|
|
|
#include <linux/types.h>
|
2009-12-16 03:45:33 +03:00
|
|
|
#include <linux/nodemask.h>
|
2012-10-13 13:46:48 +04:00
|
|
|
#include <uapi/linux/oom.h>
|
2007-10-17 10:25:59 +04:00
|
|
|
|
|
|
|
struct zonelist;
|
|
|
|
struct notifier_block;
|
2010-08-10 04:19:43 +04:00
|
|
|
struct mem_cgroup;
|
|
|
|
struct task_struct;
|
2007-10-17 10:25:59 +04:00
|
|
|
|
2015-09-09 01:00:44 +03:00
|
|
|
/*
|
|
|
|
* Details of the page allocation that triggered the oom killer that are used to
|
|
|
|
* determine what should be killed.
|
|
|
|
*/
|
2015-09-09 01:00:36 +03:00
|
|
|
struct oom_control {
|
2015-09-09 01:00:44 +03:00
|
|
|
/* Used to determine cpuset */
|
2015-09-09 01:00:36 +03:00
|
|
|
struct zonelist *zonelist;
|
2015-09-09 01:00:44 +03:00
|
|
|
|
|
|
|
/* Used to determine mempolicy */
|
|
|
|
nodemask_t *nodemask;
|
|
|
|
|
|
|
|
/* Used to determine cpuset and node locality requirement */
|
|
|
|
const gfp_t gfp_mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* order == -1 means the oom kill is required by sysrq, otherwise only
|
|
|
|
* for display purposes.
|
|
|
|
*/
|
|
|
|
const int order;
|
2015-09-09 01:00:36 +03:00
|
|
|
};
|
|
|
|
|
2007-10-17 10:25:53 +04:00
|
|
|
/*
|
|
|
|
* Types of limitations to the nodes from which allocations may occur
|
|
|
|
*/
|
|
|
|
enum oom_constraint {
|
|
|
|
CONSTRAINT_NONE,
|
|
|
|
CONSTRAINT_CPUSET,
|
|
|
|
CONSTRAINT_MEMORY_POLICY,
|
2010-08-10 04:18:54 +04:00
|
|
|
CONSTRAINT_MEMCG,
|
2007-10-17 10:25:53 +04:00
|
|
|
};
|
|
|
|
|
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist
try_set_zonelist_oom() which is used in the page allocator. Concurrent
oom kills are thus a rare event and only occur in systems using
mempolicies and with a large number of nodes.
Memory controller oom kills, however, can frequently be concurrent since
there is no serialization once the oom killer is called for oom conditions
in several different memcgs in parallel.
This creates a massive contention on tasklist_lock since the oom killer
requires the readside for the tasklist iteration. If several memcgs are
calling the oom killer, this lock can be held for a substantial amount of
time, especially if threads continue to enter it as other threads are
exiting.
Since the exit path grabs the writeside of the lock with irqs disabled in
a few different places, this can cause a soft lockup on cpus as a result
of tasklist_lock starvation.
The kernel lacks unfair writelocks, and successful calls to the oom killer
usually result in at least one thread entering the exit path, so an
alternative solution is needed.
This patch introduces a seperate oom handler for memcgs so that they do
not require tasklist_lock for as much time. Instead, it iterates only
over the threads attached to the oom memcg and grabs a reference to the
selected thread before calling oom_kill_process() to ensure it doesn't
prematurely exit.
This still requires tasklist_lock for the tasklist dump, iterating
children of the selected process, and killing all other threads on the
system sharing the same memory as the selected victim. So while this
isn't a complete solution to tasklist_lock starvation, it significantly
reduces the amount of time that it is held.
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Sha Zhengju <handai.szj@taobao.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-08-01 03:43:44 +04:00
|
|
|
enum oom_scan_t {
|
|
|
|
OOM_SCAN_OK, /* scan thread and find its badness */
|
|
|
|
OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
|
|
|
|
OOM_SCAN_ABORT, /* abort the iteration and return */
|
|
|
|
OOM_SCAN_SELECT, /* always select this thread first */
|
|
|
|
};
|
|
|
|
|
2015-06-25 02:57:19 +03:00
|
|
|
extern struct mutex oom_lock;
|
|
|
|
|
2012-12-12 04:02:56 +04:00
|
|
|
static inline void set_current_oom_origin(void)
|
|
|
|
{
|
2016-05-24 02:23:57 +03:00
|
|
|
current->signal->oom_flag_origin = true;
|
2012-12-12 04:02:56 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_current_oom_origin(void)
|
|
|
|
{
|
2016-05-24 02:23:57 +03:00
|
|
|
current->signal->oom_flag_origin = false;
|
2012-12-12 04:02:56 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool oom_task_origin(const struct task_struct *p)
|
|
|
|
{
|
2016-05-24 02:23:57 +03:00
|
|
|
return p->signal->oom_flag_origin;
|
2012-12-12 04:02:56 +04:00
|
|
|
}
|
2011-05-25 04:11:40 +04:00
|
|
|
|
2015-06-25 02:57:07 +03:00
|
|
|
extern void mark_oom_victim(struct task_struct *tsk);
|
2015-02-12 02:26:12 +03:00
|
|
|
|
2016-05-20 03:13:12 +03:00
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
extern void try_oom_reaper(struct task_struct *tsk);
|
|
|
|
#else
|
|
|
|
static inline void try_oom_reaper(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-05-30 02:06:47 +04:00
|
|
|
extern unsigned long oom_badness(struct task_struct *p,
|
|
|
|
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
|
|
|
unsigned long totalpages);
|
2014-10-20 20:12:32 +04:00
|
|
|
|
2015-09-09 01:00:36 +03:00
|
|
|
extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
|
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist
try_set_zonelist_oom() which is used in the page allocator. Concurrent
oom kills are thus a rare event and only occur in systems using
mempolicies and with a large number of nodes.
Memory controller oom kills, however, can frequently be concurrent since
there is no serialization once the oom killer is called for oom conditions
in several different memcgs in parallel.
This creates a massive contention on tasklist_lock since the oom killer
requires the readside for the tasklist iteration. If several memcgs are
calling the oom killer, this lock can be held for a substantial amount of
time, especially if threads continue to enter it as other threads are
exiting.
Since the exit path grabs the writeside of the lock with irqs disabled in
a few different places, this can cause a soft lockup on cpus as a result
of tasklist_lock starvation.
The kernel lacks unfair writelocks, and successful calls to the oom killer
usually result in at least one thread entering the exit path, so an
alternative solution is needed.
This patch introduces a seperate oom handler for memcgs so that they do
not require tasklist_lock for as much time. Instead, it iterates only
over the threads attached to the oom memcg and grabs a reference to the
selected thread before calling oom_kill_process() to ensure it doesn't
prematurely exit.
This still requires tasklist_lock for the tasklist dump, iterating
children of the selected process, and killing all other threads on the
system sharing the same memory as the selected victim. So while this
isn't a complete solution to tasklist_lock starvation, it significantly
reduces the amount of time that it is held.
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Sha Zhengju <handai.szj@taobao.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-08-01 03:43:44 +04:00
|
|
|
unsigned int points, unsigned long totalpages,
|
2015-09-09 01:00:36 +03:00
|
|
|
struct mem_cgroup *memcg, const char *message);
|
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist
try_set_zonelist_oom() which is used in the page allocator. Concurrent
oom kills are thus a rare event and only occur in systems using
mempolicies and with a large number of nodes.
Memory controller oom kills, however, can frequently be concurrent since
there is no serialization once the oom killer is called for oom conditions
in several different memcgs in parallel.
This creates a massive contention on tasklist_lock since the oom killer
requires the readside for the tasklist iteration. If several memcgs are
calling the oom killer, this lock can be held for a substantial amount of
time, especially if threads continue to enter it as other threads are
exiting.
Since the exit path grabs the writeside of the lock with irqs disabled in
a few different places, this can cause a soft lockup on cpus as a result
of tasklist_lock starvation.
The kernel lacks unfair writelocks, and successful calls to the oom killer
usually result in at least one thread entering the exit path, so an
alternative solution is needed.
This patch introduces a seperate oom handler for memcgs so that they do
not require tasklist_lock for as much time. Instead, it iterates only
over the threads attached to the oom memcg and grabs a reference to the
selected thread before calling oom_kill_process() to ensure it doesn't
prematurely exit.
This still requires tasklist_lock for the tasklist dump, iterating
children of the selected process, and killing all other threads on the
system sharing the same memory as the selected victim. So while this
isn't a complete solution to tasklist_lock starvation, it significantly
reduces the amount of time that it is held.
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Sha Zhengju <handai.szj@taobao.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-08-01 03:43:44 +04:00
|
|
|
|
2015-09-09 01:00:36 +03:00
|
|
|
extern void check_panic_on_oom(struct oom_control *oc,
|
|
|
|
enum oom_constraint constraint,
|
2015-04-15 01:48:18 +03:00
|
|
|
struct mem_cgroup *memcg);
|
2012-08-01 03:43:48 +04:00
|
|
|
|
2015-09-09 01:00:36 +03:00
|
|
|
extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
|
|
|
|
struct task_struct *task, unsigned long totalpages);
|
mm, memcg: introduce own oom handler to iterate only over its own threads
The global oom killer is serialized by the per-zonelist
try_set_zonelist_oom() which is used in the page allocator. Concurrent
oom kills are thus a rare event and only occur in systems using
mempolicies and with a large number of nodes.
Memory controller oom kills, however, can frequently be concurrent since
there is no serialization once the oom killer is called for oom conditions
in several different memcgs in parallel.
This creates a massive contention on tasklist_lock since the oom killer
requires the readside for the tasklist iteration. If several memcgs are
calling the oom killer, this lock can be held for a substantial amount of
time, especially if threads continue to enter it as other threads are
exiting.
Since the exit path grabs the writeside of the lock with irqs disabled in
a few different places, this can cause a soft lockup on cpus as a result
of tasklist_lock starvation.
The kernel lacks unfair writelocks, and successful calls to the oom killer
usually result in at least one thread entering the exit path, so an
alternative solution is needed.
This patch introduces a seperate oom handler for memcgs so that they do
not require tasklist_lock for as much time. Instead, it iterates only
over the threads attached to the oom memcg and grabs a reference to the
selected thread before calling oom_kill_process() to ensure it doesn't
prematurely exit.
This still requires tasklist_lock for the tasklist dump, iterating
children of the selected process, and killing all other threads on the
system sharing the same memory as the selected victim. So while this
isn't a complete solution to tasklist_lock starvation, it significantly
reduces the amount of time that it is held.
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Sha Zhengju <handai.szj@taobao.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-08-01 03:43:44 +04:00
|
|
|
|
2015-09-09 01:00:36 +03:00
|
|
|
extern bool out_of_memory(struct oom_control *oc);
|
2015-06-25 02:57:07 +03:00
|
|
|
|
2016-03-26 00:20:27 +03:00
|
|
|
extern void exit_oom_victim(struct task_struct *tsk);
|
2015-06-25 02:57:07 +03:00
|
|
|
|
2007-10-17 10:25:53 +04:00
|
|
|
extern int register_oom_notifier(struct notifier_block *nb);
|
|
|
|
extern int unregister_oom_notifier(struct notifier_block *nb);
|
|
|
|
|
2009-09-22 04:03:09 +04:00
|
|
|
extern bool oom_killer_disabled;
|
2015-02-12 02:26:24 +03:00
|
|
|
extern bool oom_killer_disable(void);
|
|
|
|
extern void oom_killer_enable(void);
|
2010-08-10 04:18:56 +04:00
|
|
|
|
2010-08-11 05:03:00 +04:00
|
|
|
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
|
|
|
|
2014-12-13 03:56:24 +03:00
|
|
|
static inline bool task_will_free_mem(struct task_struct *task)
|
|
|
|
{
|
2016-05-21 02:57:24 +03:00
|
|
|
struct signal_struct *sig = task->signal;
|
|
|
|
|
2014-12-13 03:56:24 +03:00
|
|
|
/*
|
|
|
|
* A coredumping process may sleep for an extended period in exit_mm(),
|
|
|
|
* so the oom killer cannot assume that the process will promptly exit
|
|
|
|
* and release memory.
|
|
|
|
*/
|
2016-05-21 02:57:24 +03:00
|
|
|
if (sig->flags & SIGNAL_GROUP_COREDUMP)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(task->flags & PF_EXITING))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Make sure that the whole thread group is going down */
|
|
|
|
if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2014-12-13 03:56:24 +03:00
|
|
|
}
|
|
|
|
|
2010-08-10 04:18:56 +04:00
|
|
|
/* sysctls */
|
|
|
|
extern int sysctl_oom_dump_tasks;
|
|
|
|
extern int sysctl_oom_kill_allocating_task;
|
|
|
|
extern int sysctl_panic_on_oom;
|
2007-10-17 10:25:53 +04:00
|
|
|
#endif /* _INCLUDE_LINUX_OOM_H */
|