blob: c696c265f0193e2a06127f8815e89714ca3ab0fd [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Alexey Dobriyan8ac773b2006-10-19 23:28:32 -07002#ifndef __INCLUDE_LINUX_OOM_H
3#define __INCLUDE_LINUX_OOM_H
4
David Rientjes5a3135c22007-10-16 23:25:53 -07005
Ingo Molnar3f07c012017-02-08 18:51:30 +01006#include <linux/sched/signal.h>
David Rientjes172acf62007-10-16 23:25:59 -07007#include <linux/types.h>
KAMEZAWA Hiroyuki4365a562009-12-15 16:45:33 -08008#include <linux/nodemask.h>
David Howells607ca462012-10-13 10:46:48 +01009#include <uapi/linux/oom.h>
Michal Hocko6b31d592017-08-18 15:16:15 -070010#include <linux/sched/coredump.h> /* MMF_* */
11#include <linux/mm.h> /* VM_FAULT* */
David Rientjes172acf62007-10-16 23:25:59 -070012
13struct zonelist;
14struct notifier_block;
Andrew Morton74bcbf42010-08-09 17:19:43 -070015struct mem_cgroup;
16struct task_struct;
David Rientjes172acf62007-10-16 23:25:59 -070017
yuzhoujianef8444e2018-12-28 00:36:07 -080018enum oom_constraint {
19 CONSTRAINT_NONE,
20 CONSTRAINT_CPUSET,
21 CONSTRAINT_MEMORY_POLICY,
22 CONSTRAINT_MEMCG,
23};
24
David Rientjes8989e4c2015-09-08 15:00:44 -070025/*
26 * Details of the page allocation that triggered the oom killer that are used to
27 * determine what should be killed.
28 */
David Rientjes6e0fc462015-09-08 15:00:36 -070029struct oom_control {
David Rientjes8989e4c2015-09-08 15:00:44 -070030 /* Used to determine cpuset */
David Rientjes6e0fc462015-09-08 15:00:36 -070031 struct zonelist *zonelist;
David Rientjes8989e4c2015-09-08 15:00:44 -070032
33 /* Used to determine mempolicy */
34 nodemask_t *nodemask;
35
Vladimir Davydov2a966b72016-07-26 15:22:33 -070036 /* Memory cgroup in which oom is invoked, or NULL for global oom */
37 struct mem_cgroup *memcg;
38
David Rientjes8989e4c2015-09-08 15:00:44 -070039 /* Used to determine cpuset and node locality requirement */
40 const gfp_t gfp_mask;
41
42 /*
43 * order == -1 means the oom kill is required by sysrq, otherwise only
44 * for display purposes.
45 */
46 const int order;
David Rientjes6e0fc462015-09-08 15:00:36 -070047
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -070048 /* Used by oom implementation, do not set */
49 unsigned long totalpages;
50 struct task_struct *chosen;
51 unsigned long chosen_points;
yuzhoujianef8444e2018-12-28 00:36:07 -080052
53 /* Used to print the constraint info. */
54 enum oom_constraint constraint;
David Rientjes9cbb78b2012-07-31 16:43:44 -070055};
56
Johannes Weinerdc564012015-06-24 16:57:19 -070057extern struct mutex oom_lock;
58
David Rientjese1e12d22012-12-11 16:02:56 -080059static inline void set_current_oom_origin(void)
60{
Tetsuo Handac96fc2d2016-05-23 16:23:57 -070061 current->signal->oom_flag_origin = true;
David Rientjese1e12d22012-12-11 16:02:56 -080062}
63
64static inline void clear_current_oom_origin(void)
65{
Tetsuo Handac96fc2d2016-05-23 16:23:57 -070066 current->signal->oom_flag_origin = false;
David Rientjese1e12d22012-12-11 16:02:56 -080067}
68
69static inline bool oom_task_origin(const struct task_struct *p)
70{
Tetsuo Handac96fc2d2016-05-23 16:23:57 -070071 return p->signal->oom_flag_origin;
David Rientjese1e12d22012-12-11 16:02:56 -080072}
David Rientjes72788c32011-05-24 17:11:40 -070073
Michal Hocko862e3072016-10-07 16:58:57 -070074static inline bool tsk_is_oom_victim(struct task_struct * tsk)
75{
76 return tsk->signal->oom_mm;
77}
78
Michal Hocko6b31d592017-08-18 15:16:15 -070079/*
Michal Hocko4837fe32017-12-14 15:33:15 -080080 * Use this helper if tsk->mm != mm and the victim mm needs a special
81 * handling. This is guaranteed to stay true after once set.
82 */
83static inline bool mm_is_oom_victim(struct mm_struct *mm)
84{
85 return test_bit(MMF_OOM_VICTIM, &mm->flags);
86}
87
88/*
Michal Hocko6b31d592017-08-18 15:16:15 -070089 * Checks whether a page fault on the given mm is still reliable.
90 * This is no longer true if the oom reaper started to reap the
91 * address space which is reflected by MMF_UNSTABLE flag set in
92 * the mm. At that moment any !shared mapping would lose the content
93 * and could cause a memory corruption (zero pages instead of the
94 * original content).
95 *
96 * User should call this before establishing a page table entry for
97 * a !shared mapping and under the proper page table lock.
98 *
99 * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
100 */
Souptick Joarder2b740302018-08-23 17:01:36 -0700101static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
Michal Hocko6b31d592017-08-18 15:16:15 -0700102{
103 if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
104 return VM_FAULT_SIGBUS;
105 return 0;
106}
107
Michal Hocko93065ac2018-08-21 21:52:33 -0700108bool __oom_reap_task_mm(struct mm_struct *mm);
David Rientjes27ae3572018-05-11 16:02:04 -0700109
David Rientjesa7f638f2012-05-29 15:06:47 -0700110extern unsigned long oom_badness(struct task_struct *p,
David Rientjesa7f638f2012-05-29 15:06:47 -0700111 unsigned long totalpages);
Michal Hocko5695be12014-10-20 18:12:32 +0200112
David Rientjes6e0fc462015-09-08 15:00:36 -0700113extern bool out_of_memory(struct oom_control *oc);
Johannes Weiner16e95192015-06-24 16:57:07 -0700114
Tetsuo Handa38531202016-10-07 16:59:03 -0700115extern void exit_oom_victim(void);
Johannes Weiner16e95192015-06-24 16:57:07 -0700116
David Rientjes5a3135c22007-10-16 23:25:53 -0700117extern int register_oom_notifier(struct notifier_block *nb);
118extern int unregister_oom_notifier(struct notifier_block *nb);
119
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700120extern bool oom_killer_disable(signed long timeout);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800121extern void oom_killer_enable(void);
David Rientjes8e4228e2010-08-09 17:18:56 -0700122
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -0700123extern struct task_struct *find_lock_task_mm(struct task_struct *p);
124
David Rientjes8e4228e2010-08-09 17:18:56 -0700125/* sysctls */
126extern int sysctl_oom_dump_tasks;
127extern int sysctl_oom_kill_allocating_task;
128extern int sysctl_panic_on_oom;
David Rientjes5a3135c22007-10-16 23:25:53 -0700129#endif /* _INCLUDE_LINUX_OOM_H */