blob: b72d75141e125b50b777df27baebdcd938292d8b [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002/* memcontrol.h - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08007 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
Balbir Singh8cdea7c2008-02-07 00:13:50 -08009 */
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
Hirokazu Takahashif8d665422009-01-07 18:08:02 -080013#include <linux/cgroup.h>
Ying Han456f9982011-05-26 16:25:38 -070014#include <linux/vm_event_item.h>
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080015#include <linux/hardirq.h>
Glauber Costaa8964b92012-12-18 14:22:09 -080016#include <linux/jump_label.h>
Michal Hocko33398cf2015-09-08 15:01:02 -070017#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
Johannes Weiner00f3ca22017-07-06 15:40:52 -070020#include <linux/mm.h>
21#include <linux/vmstat.h>
Michal Hocko33398cf2015-09-08 15:01:02 -070022#include <linux/writeback.h>
Johannes Weinerfdf1cdb2016-03-15 14:57:25 -070023#include <linux/page-flags.h>
Ying Han456f9982011-05-26 16:25:38 -070024
Pavel Emelianov78fb7462008-02-07 00:13:51 -080025struct mem_cgroup;
Roman Gushchinbf4f0592020-08-06 23:20:49 -070026struct obj_cgroup;
Balbir Singh8697d332008-02-07 00:13:59 -080027struct page;
28struct mm_struct;
Glauber Costa2633d7a2012-12-18 14:22:34 -080029struct kmem_cache;
Pavel Emelianov78fb7462008-02-07 00:13:51 -080030
Johannes Weiner71cd3112017-05-03 14:55:13 -070031/* Cgroup-specific page state, on top of universal node page state */
32enum memcg_stat_item {
Johannes Weiner468c3982020-06-03 16:02:01 -070033 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
Johannes Weiner71cd3112017-05-03 14:55:13 -070034 MEMCG_SOCK,
Roman Gushchin772616b2020-08-11 18:30:21 -070035 MEMCG_PERCPU_B,
Shakeel Butt4e5aa1f2022-01-14 14:05:45 -080036 MEMCG_VMALLOC,
Johannes Weinerb2807f02016-01-20 15:03:22 -080037 MEMCG_NR_STAT,
Greg Thelen2a7106f2011-01-13 15:47:37 -080038};
39
Johannes Weinere27be242018-04-10 16:29:45 -070040enum memcg_memory_event {
41 MEMCG_LOW,
Johannes Weiner71cd3112017-05-03 14:55:13 -070042 MEMCG_HIGH,
43 MEMCG_MAX,
44 MEMCG_OOM,
Roman Gushchinfe6bdfc2018-06-14 15:28:05 -070045 MEMCG_OOM_KILL,
Dan Schatzbergb6bf9ab2022-01-14 14:05:35 -080046 MEMCG_OOM_GROUP_KILL,
Jakub Kicinski4b82ab42020-06-01 21:49:52 -070047 MEMCG_SWAP_HIGH,
Tejun Heof3a53a32018-06-07 17:05:35 -070048 MEMCG_SWAP_MAX,
49 MEMCG_SWAP_FAIL,
Johannes Weinere27be242018-04-10 16:29:45 -070050 MEMCG_NR_MEMORY_EVENTS,
Johannes Weiner71cd3112017-05-03 14:55:13 -070051};
52
Johannes Weiner56600482012-01-12 17:17:59 -080053struct mem_cgroup_reclaim_cookie {
Mel Gormanef8f2322016-07-28 15:46:05 -070054 pg_data_t *pgdat;
Johannes Weiner56600482012-01-12 17:17:59 -080055 unsigned int generation;
56};
57
Johannes Weiner71cd3112017-05-03 14:55:13 -070058#ifdef CONFIG_MEMCG
59
60#define MEM_CGROUP_ID_SHIFT 16
61#define MEM_CGROUP_ID_MAX USHRT_MAX
62
63struct mem_cgroup_id {
64 int id;
Kirill Tkhai1c2d4792018-10-26 15:09:28 -070065 refcount_t ref;
Johannes Weiner71cd3112017-05-03 14:55:13 -070066};
67
Michal Hocko33398cf2015-09-08 15:01:02 -070068/*
69 * Per memcg event counter is incremented at every pagein/pageout. With THP,
Randy Dunlap0845f832020-08-11 18:32:40 -070070 * it will be incremented by the number of pages. This counter is used
71 * to trigger some periodic events. This is straightforward and better
Michal Hocko33398cf2015-09-08 15:01:02 -070072 * than using jiffies etc. to handle periodic memcg event.
73 */
74enum mem_cgroup_events_target {
75 MEM_CGROUP_TARGET_THRESH,
76 MEM_CGROUP_TARGET_SOFTLIMIT,
Michal Hocko33398cf2015-09-08 15:01:02 -070077 MEM_CGROUP_NTARGETS,
78};
79
Chris Down871789d2019-05-14 15:46:57 -070080struct memcg_vmstats_percpu {
Johannes Weiner2d146aa2021-04-29 22:56:26 -070081 /* Local (CPU and cgroup) page state & events */
82 long state[MEMCG_NR_STAT];
83 unsigned long events[NR_VM_EVENT_ITEMS];
84
85 /* Delta calculation for lockless upward propagation */
86 long state_prev[MEMCG_NR_STAT];
87 unsigned long events_prev[NR_VM_EVENT_ITEMS];
88
89 /* Cgroup1: threshold notifications & softlimit tree updates */
90 unsigned long nr_page_events;
91 unsigned long targets[MEM_CGROUP_NTARGETS];
92};
93
94struct memcg_vmstats {
95 /* Aggregated (CPU and subtree) page state & events */
96 long state[MEMCG_NR_STAT];
97 unsigned long events[NR_VM_EVENT_ITEMS];
98
99 /* Pending child counts during tree propagation */
100 long state_pending[MEMCG_NR_STAT];
101 unsigned long events_pending[NR_VM_EVENT_ITEMS];
Michal Hocko33398cf2015-09-08 15:01:02 -0700102};
103
104struct mem_cgroup_reclaim_iter {
105 struct mem_cgroup *position;
106 /* scan generation, increased every round-trip */
107 unsigned int generation;
108};
109
110/*
Yang Shi3c6f17e2021-05-04 18:36:33 -0700111 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
112 * shrinkers, which have elements charged to this memcg.
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700113 */
Yang Shie4262c42021-05-04 18:36:23 -0700114struct shrinker_info {
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700115 struct rcu_head rcu;
Yang Shi3c6f17e2021-05-04 18:36:33 -0700116 atomic_long_t *nr_deferred;
117 unsigned long *map;
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700118};
119
Shakeel Butt7e1c0d62021-09-02 14:55:00 -0700120struct lruvec_stats_percpu {
121 /* Local (CPU and cgroup) state */
122 long state[NR_VM_NODE_STAT_ITEMS];
123
124 /* Delta calculation for lockless upward propagation */
125 long state_prev[NR_VM_NODE_STAT_ITEMS];
126};
127
128struct lruvec_stats {
129 /* Aggregated (CPU and subtree) state */
130 long state[NR_VM_NODE_STAT_ITEMS];
131
132 /* Pending child counts during tree propagation */
133 long state_pending[NR_VM_NODE_STAT_ITEMS];
134};
135
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700136/*
Hao Lee242c37b2019-11-30 17:50:12 -0800137 * per-node information in memory controller.
Michal Hocko33398cf2015-09-08 15:01:02 -0700138 */
Mel Gormanef8f2322016-07-28 15:46:05 -0700139struct mem_cgroup_per_node {
Michal Hocko33398cf2015-09-08 15:01:02 -0700140 struct lruvec lruvec;
Johannes Weinera983b5e2018-01-31 16:16:45 -0800141
Shakeel Butt7e1c0d62021-09-02 14:55:00 -0700142 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
143 struct lruvec_stats lruvec_stats;
Johannes Weinera983b5e2018-01-31 16:16:45 -0800144
Michal Hockob4536f0c82017-01-10 16:58:04 -0800145 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
Michal Hocko33398cf2015-09-08 15:01:02 -0700146
Yafang Shao9da83f32019-11-30 17:50:03 -0800147 struct mem_cgroup_reclaim_iter iter;
Michal Hocko33398cf2015-09-08 15:01:02 -0700148
Yang Shie4262c42021-05-04 18:36:23 -0700149 struct shrinker_info __rcu *shrinker_info;
Yang Shi0a432dc2019-09-23 15:38:12 -0700150
Michal Hocko33398cf2015-09-08 15:01:02 -0700151 struct rb_node tree_node; /* RB tree node */
152 unsigned long usage_in_excess;/* Set to the value by which */
153 /* the soft limit is exceeded*/
154 bool on_tree;
155 struct mem_cgroup *memcg; /* Back pointer, we cannot */
156 /* use container_of */
157};
158
Michal Hocko33398cf2015-09-08 15:01:02 -0700159struct mem_cgroup_threshold {
160 struct eventfd_ctx *eventfd;
161 unsigned long threshold;
162};
163
164/* For threshold */
165struct mem_cgroup_threshold_ary {
166 /* An array index points to threshold just below or equal to usage. */
167 int current_threshold;
168 /* Size of entries[] */
169 unsigned int size;
170 /* Array of thresholds */
Gustavo A. R. Silva307ed942020-03-23 18:36:10 -0500171 struct mem_cgroup_threshold entries[];
Michal Hocko33398cf2015-09-08 15:01:02 -0700172};
173
174struct mem_cgroup_thresholds {
175 /* Primary thresholds array */
176 struct mem_cgroup_threshold_ary *primary;
177 /*
178 * Spare threshold array.
179 * This is needed to make mem_cgroup_unregister_event() "never fail".
180 * It must be able to store at least primary->size - 1 entries.
181 */
182 struct mem_cgroup_threshold_ary *spare;
183};
184
Aaron Lue81bf972018-06-07 17:09:44 -0700185#if defined(CONFIG_SMP)
186struct memcg_padding {
187 char x[0];
188} ____cacheline_internodealigned_in_smp;
Huilong Deng6a1803b2021-06-28 19:38:24 -0700189#define MEMCG_PADDING(name) struct memcg_padding name
Aaron Lue81bf972018-06-07 17:09:44 -0700190#else
191#define MEMCG_PADDING(name)
192#endif
193
Michal Hocko33398cf2015-09-08 15:01:02 -0700194/*
Tejun Heo97b27822019-08-26 09:06:56 -0700195 * Remember four most recent foreign writebacks with dirty pages in this
196 * cgroup. Inode sharing is expected to be uncommon and, even if we miss
197 * one in a given round, we're likely to catch it later if it keeps
198 * foreign-dirtying, so a fairly low count should be enough.
199 *
200 * See mem_cgroup_track_foreign_dirty_slowpath() for details.
201 */
202#define MEMCG_CGWB_FRN_CNT 4
203
204struct memcg_cgwb_frn {
205 u64 bdi_id; /* bdi->id of the foreign inode */
206 int memcg_id; /* memcg->css.id of foreign inode */
207 u64 at; /* jiffies_64 at the time of dirtying */
208 struct wb_completion done; /* tracks in-flight foreign writebacks */
209};
210
211/*
Roman Gushchinbf4f0592020-08-06 23:20:49 -0700212 * Bucket for arbitrarily byte-sized objects charged to a memory
213 * cgroup. The bucket can be reparented in one piece when the cgroup
214 * is destroyed, without having to round up the individual references
215 * of all live memory objects in the wild.
216 */
217struct obj_cgroup {
218 struct percpu_ref refcnt;
219 struct mem_cgroup *memcg;
220 atomic_t nr_charged_bytes;
221 union {
222 struct list_head list;
223 struct rcu_head rcu;
224 };
225};
226
227/*
Michal Hocko33398cf2015-09-08 15:01:02 -0700228 * The memory controller data structure. The memory controller controls both
229 * page cache and RSS per cgroup. We would eventually like to provide
230 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
231 * to help the administrator determine what knobs to tune.
232 */
233struct mem_cgroup {
234 struct cgroup_subsys_state css;
235
Johannes Weiner73f576c2016-07-20 15:44:57 -0700236 /* Private memcg ID. Used to ID objects that outlive the cgroup */
237 struct mem_cgroup_id id;
238
Michal Hocko33398cf2015-09-08 15:01:02 -0700239 /* Accounted resources */
Waiman Longbd0b2302020-10-13 16:52:56 -0700240 struct page_counter memory; /* Both v1 & v2 */
241
242 union {
243 struct page_counter swap; /* v2 only */
244 struct page_counter memsw; /* v1 only */
245 };
Johannes Weiner0db15292016-01-20 15:02:50 -0800246
247 /* Legacy consumer-oriented counters */
Waiman Longbd0b2302020-10-13 16:52:56 -0700248 struct page_counter kmem; /* v1 only */
249 struct page_counter tcpmem; /* v1 only */
Michal Hocko33398cf2015-09-08 15:01:02 -0700250
Johannes Weinerf7e1cb62016-01-14 15:21:29 -0800251 /* Range enforcement for interrupt charges */
252 struct work_struct high_work;
253
Michal Hocko33398cf2015-09-08 15:01:02 -0700254 unsigned long soft_limit;
255
256 /* vmpressure notifications */
257 struct vmpressure vmpressure;
258
Michal Hocko33398cf2015-09-08 15:01:02 -0700259 /*
Roman Gushchin3d8b38e2018-08-21 21:53:54 -0700260 * Should the OOM killer kill all belonging tasks, had it kill one?
261 */
262 bool oom_group;
263
Michal Hocko33398cf2015-09-08 15:01:02 -0700264 /* protected by memcg_oom_lock */
265 bool oom_lock;
266 int under_oom;
267
268 int swappiness;
269 /* OOM-Killer disable */
270 int oom_kill_disable;
271
Shakeel Butt1e577f92019-07-11 20:55:55 -0700272 /* memory.events and memory.events.local */
Tejun Heo472912a2015-09-18 18:01:59 -0400273 struct cgroup_file events_file;
Shakeel Butt1e577f92019-07-11 20:55:55 -0700274 struct cgroup_file events_local_file;
Tejun Heo472912a2015-09-18 18:01:59 -0400275
Tejun Heof3a53a32018-06-07 17:05:35 -0700276 /* handle for "memory.swap.events" */
277 struct cgroup_file swap_events_file;
278
Michal Hocko33398cf2015-09-08 15:01:02 -0700279 /* protect arrays of thresholds */
280 struct mutex thresholds_lock;
281
282 /* thresholds for memory usage. RCU-protected */
283 struct mem_cgroup_thresholds thresholds;
284
285 /* thresholds for mem+swap usage. RCU-protected */
286 struct mem_cgroup_thresholds memsw_thresholds;
287
288 /* For oom notifier event fd */
289 struct list_head oom_notify;
290
291 /*
292 * Should we move charges of a task when a task is moved into this
293 * mem_cgroup ? And what type of charges should we move ?
294 */
295 unsigned long move_charge_at_immigrate;
Aaron Lue81bf972018-06-07 17:09:44 -0700296 /* taken only while moving_account > 0 */
297 spinlock_t move_lock;
298 unsigned long move_lock_flags;
299
300 MEMCG_PADDING(_pad1_);
301
Johannes Weiner2d146aa2021-04-29 22:56:26 -0700302 /* memory.stat */
303 struct memcg_vmstats vmstats;
Johannes Weiner42a30032019-05-14 15:47:12 -0700304
Johannes Weiner815744d2019-06-13 15:55:46 -0700305 /* memory.events */
Johannes Weiner42a30032019-05-14 15:47:12 -0700306 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
Shakeel Butt1e577f92019-07-11 20:55:55 -0700307 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
Michal Hocko33398cf2015-09-08 15:01:02 -0700308
Johannes Weinerd886f4e2016-01-20 15:02:47 -0800309 unsigned long socket_pressure;
310
311 /* Legacy tcp memory accounting */
Johannes Weiner0db15292016-01-20 15:02:50 -0800312 bool tcpmem_active;
313 int tcpmem_pressure;
Johannes Weinerd886f4e2016-01-20 15:02:47 -0800314
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700315#ifdef CONFIG_MEMCG_KMEM
Michal Hocko33398cf2015-09-08 15:01:02 -0700316 int kmemcg_id;
Roman Gushchinbf4f0592020-08-06 23:20:49 -0700317 struct obj_cgroup __rcu *objcg;
318 struct list_head objcg_list; /* list of inherited objcgs */
Michal Hocko33398cf2015-09-08 15:01:02 -0700319#endif
320
Feng Tang4df91062020-11-25 13:22:21 +0800321 MEMCG_PADDING(_pad2_);
322
323 /*
324 * set > 0 if pages under this cgroup are moving to other cgroup.
325 */
326 atomic_t moving_account;
327 struct task_struct *move_lock_task;
328
Feng Tang4df91062020-11-25 13:22:21 +0800329 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
330
Michal Hocko33398cf2015-09-08 15:01:02 -0700331#ifdef CONFIG_CGROUP_WRITEBACK
332 struct list_head cgwb_list;
333 struct wb_domain cgwb_domain;
Tejun Heo97b27822019-08-26 09:06:56 -0700334 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
Michal Hocko33398cf2015-09-08 15:01:02 -0700335#endif
336
337 /* List of events which userspace want to receive */
338 struct list_head event_list;
339 spinlock_t event_list_lock;
340
Yang Shi87eaceb2019-09-23 15:38:15 -0700341#ifdef CONFIG_TRANSPARENT_HUGEPAGE
342 struct deferred_split deferred_split_queue;
343#endif
344
wenhuizhangb51478a2021-06-28 19:38:12 -0700345 struct mem_cgroup_per_node *nodeinfo[];
Michal Hocko33398cf2015-09-08 15:01:02 -0700346};
Johannes Weiner7d828602016-01-14 15:20:56 -0800347
Johannes Weinera983b5e2018-01-31 16:16:45 -0800348/*
349 * size of first charge trial. "32" comes from vmscan.c's magic value.
350 * TODO: maybe necessary to use big numbers in big irons.
351 */
352#define MEMCG_CHARGE_BATCH 32U
353
Johannes Weiner7d828602016-01-14 15:20:56 -0800354extern struct mem_cgroup *root_mem_cgroup;
Tejun Heo56161632015-05-22 17:13:20 -0400355
Roman Gushchin87944e22020-12-01 13:58:29 -0800356enum page_memcg_data_flags {
357 /* page->memcg_data is a pointer to an objcgs vector */
358 MEMCG_DATA_OBJCGS = (1UL << 0),
Roman Gushchin18b2db32020-12-01 13:58:30 -0800359 /* page has been accounted as a non-slab kernel page */
360 MEMCG_DATA_KMEM = (1UL << 1),
Roman Gushchin87944e22020-12-01 13:58:29 -0800361 /* the next bit after the last actual flag */
Roman Gushchin18b2db32020-12-01 13:58:30 -0800362 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
Roman Gushchin87944e22020-12-01 13:58:29 -0800363};
364
365#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
366
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400367static inline bool folio_memcg_kmem(struct folio *folio);
Muchun Songb4e0b682021-04-29 22:56:52 -0700368
369/*
370 * After the initialization objcg->memcg is always pointing at
371 * a valid memcg, but can be atomically swapped to the parent memcg.
372 *
373 * The caller must ensure that the returned memcg won't be released:
374 * e.g. acquire the rcu_read_lock or css_set_lock.
375 */
376static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
377{
378 return READ_ONCE(objcg->memcg);
379}
380
381/*
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400382 * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
383 * @folio: Pointer to the folio.
Muchun Songb4e0b682021-04-29 22:56:52 -0700384 *
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400385 * Returns a pointer to the memory cgroup associated with the folio,
386 * or NULL. This function assumes that the folio is known to have a
Muchun Songb4e0b682021-04-29 22:56:52 -0700387 * proper memory cgroup pointer. It's not safe to call this function
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400388 * against some type of folios, e.g. slab folios or ex-slab folios or
389 * kmem folios.
Muchun Songb4e0b682021-04-29 22:56:52 -0700390 */
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400391static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
Muchun Songb4e0b682021-04-29 22:56:52 -0700392{
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400393 unsigned long memcg_data = folio->memcg_data;
Muchun Songb4e0b682021-04-29 22:56:52 -0700394
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400395 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
396 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
397 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
Muchun Songb4e0b682021-04-29 22:56:52 -0700398
399 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
400}
401
402/*
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400403 * __folio_objcg - get the object cgroup associated with a kmem folio.
404 * @folio: Pointer to the folio.
Muchun Songb4e0b682021-04-29 22:56:52 -0700405 *
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400406 * Returns a pointer to the object cgroup associated with the folio,
407 * or NULL. This function assumes that the folio is known to have a
Muchun Songb4e0b682021-04-29 22:56:52 -0700408 * proper object cgroup pointer. It's not safe to call this function
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400409 * against some type of folios, e.g. slab folios or ex-slab folios or
410 * LRU folios.
Muchun Songb4e0b682021-04-29 22:56:52 -0700411 */
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400412static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
Muchun Songb4e0b682021-04-29 22:56:52 -0700413{
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400414 unsigned long memcg_data = folio->memcg_data;
Muchun Songb4e0b682021-04-29 22:56:52 -0700415
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400416 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
417 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
418 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
Muchun Songb4e0b682021-04-29 22:56:52 -0700419
420 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
421}
422
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800423/*
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400424 * folio_memcg - Get the memory cgroup associated with a folio.
425 * @folio: Pointer to the folio.
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800426 *
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400427 * Returns a pointer to the memory cgroup associated with the folio,
428 * or NULL. This function assumes that the folio is known to have a
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800429 * proper memory cgroup pointer. It's not safe to call this function
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400430 * against some type of folios, e.g. slab folios or ex-slab folios.
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800431 *
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400432 * For a non-kmem folio any of the following ensures folio and memcg binding
Muchun Songb4e0b682021-04-29 22:56:52 -0700433 * stability:
434 *
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400435 * - the folio lock
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800436 * - LRU isolation
437 * - lock_page_memcg()
438 * - exclusive reference
Muchun Songb4e0b682021-04-29 22:56:52 -0700439 *
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400440 * For a kmem folio a caller should hold an rcu read lock to protect memcg
441 * associated with a kmem folio from being released.
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800442 */
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400443static inline struct mem_cgroup *folio_memcg(struct folio *folio)
444{
445 if (folio_memcg_kmem(folio))
446 return obj_cgroup_memcg(__folio_objcg(folio));
447 return __folio_memcg(folio);
448}
449
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800450static inline struct mem_cgroup *page_memcg(struct page *page)
451{
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400452 return folio_memcg(page_folio(page));
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800453}
454
Matthew Wilcox (Oracle)c5ce6192021-05-04 17:19:13 -0400455/**
456 * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
457 * @folio: Pointer to the folio.
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800458 *
Matthew Wilcox (Oracle)c5ce6192021-05-04 17:19:13 -0400459 * This function assumes that the folio is known to have a
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800460 * proper memory cgroup pointer. It's not safe to call this function
Matthew Wilcox (Oracle)c5ce6192021-05-04 17:19:13 -0400461 * against some type of folios, e.g. slab folios or ex-slab folios.
462 *
463 * Return: A pointer to the memory cgroup associated with the folio,
464 * or NULL.
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800465 */
Matthew Wilcox (Oracle)c5ce6192021-05-04 17:19:13 -0400466static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800467{
Matthew Wilcox (Oracle)c5ce6192021-05-04 17:19:13 -0400468 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
Muchun Songb4e0b682021-04-29 22:56:52 -0700469
Matthew Wilcox (Oracle)c5ce6192021-05-04 17:19:13 -0400470 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800471 WARN_ON_ONCE(!rcu_read_lock_held());
472
Muchun Songb4e0b682021-04-29 22:56:52 -0700473 if (memcg_data & MEMCG_DATA_KMEM) {
474 struct obj_cgroup *objcg;
475
476 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
477 return obj_cgroup_memcg(objcg);
478 }
479
480 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800481}
482
483/*
484 * page_memcg_check - get the memory cgroup associated with a page
485 * @page: a pointer to the page struct
486 *
487 * Returns a pointer to the memory cgroup associated with the page,
Muchun Songb4e0b682021-04-29 22:56:52 -0700488 * or NULL. This function unlike page_memcg() can take any page
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800489 * as an argument. It has to be used in cases when it's not known if a page
Muchun Songb4e0b682021-04-29 22:56:52 -0700490 * has an associated memory cgroup pointer or an object cgroups vector or
491 * an object cgroup.
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800492 *
Muchun Songb4e0b682021-04-29 22:56:52 -0700493 * For a non-kmem page any of the following ensures page and memcg binding
494 * stability:
495 *
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800496 * - the page lock
497 * - LRU isolation
498 * - lock_page_memcg()
499 * - exclusive reference
Muchun Songb4e0b682021-04-29 22:56:52 -0700500 *
501 * For a kmem page a caller should hold an rcu read lock to protect memcg
502 * associated with a kmem page from being released.
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800503 */
504static inline struct mem_cgroup *page_memcg_check(struct page *page)
505{
506 /*
507 * Because page->memcg_data might be changed asynchronously
508 * for slab pages, READ_ONCE() should be used here.
509 */
510 unsigned long memcg_data = READ_ONCE(page->memcg_data);
511
Roman Gushchin87944e22020-12-01 13:58:29 -0800512 if (memcg_data & MEMCG_DATA_OBJCGS)
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800513 return NULL;
514
Muchun Songb4e0b682021-04-29 22:56:52 -0700515 if (memcg_data & MEMCG_DATA_KMEM) {
516 struct obj_cgroup *objcg;
517
518 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
519 return obj_cgroup_memcg(objcg);
520 }
521
Roman Gushchin18b2db32020-12-01 13:58:30 -0800522 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
523}
524
Muchun Songbd290e12021-04-29 22:56:58 -0700525#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin18b2db32020-12-01 13:58:30 -0800526/*
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400527 * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
528 * @folio: Pointer to the folio.
Roman Gushchin18b2db32020-12-01 13:58:30 -0800529 *
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400530 * Checks if the folio has MemcgKmem flag set. The caller must ensure
531 * that the folio has an associated memory cgroup. It's not safe to call
532 * this function against some types of folios, e.g. slab folios.
Roman Gushchin18b2db32020-12-01 13:58:30 -0800533 */
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400534static inline bool folio_memcg_kmem(struct folio *folio)
Roman Gushchin18b2db32020-12-01 13:58:30 -0800535{
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400536 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
537 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
538 return folio->memcg_data & MEMCG_DATA_KMEM;
Roman Gushchinbcfe06b2020-12-01 13:58:27 -0800539}
540
Roman Gushchin270c6a72020-12-01 13:58:28 -0800541
Roman Gushchin270c6a72020-12-01 13:58:28 -0800542#else
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400543static inline bool folio_memcg_kmem(struct folio *folio)
Muchun Songbd290e12021-04-29 22:56:58 -0700544{
545 return false;
546}
547
Roman Gushchin270c6a72020-12-01 13:58:28 -0800548#endif
549
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -0400550static inline bool PageMemcgKmem(struct page *page)
551{
552 return folio_memcg_kmem(page_folio(page));
553}
554
Kirill Tkhaidfd2f102018-08-17 15:48:06 -0700555static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
556{
557 return (memcg == root_mem_cgroup);
558}
559
Johannes Weiner23047a92016-03-15 14:57:16 -0700560static inline bool mem_cgroup_disabled(void)
561{
562 return !cgroup_subsys_enabled(memory_cgrp_subsys);
563}
564
Johannes Weinerf56ce412021-08-19 19:04:21 -0700565static inline void mem_cgroup_protection(struct mem_cgroup *root,
566 struct mem_cgroup *memcg,
567 unsigned long *min,
568 unsigned long *low)
Chris Down9783aa92019-10-06 17:58:32 -0700569{
Johannes Weinerf56ce412021-08-19 19:04:21 -0700570 *min = *low = 0;
571
Chris Down1bc63fb2019-10-06 17:58:38 -0700572 if (mem_cgroup_disabled())
Johannes Weinerf56ce412021-08-19 19:04:21 -0700573 return;
Chris Down9783aa92019-10-06 17:58:32 -0700574
Yafang Shao22f74962020-08-06 23:22:01 -0700575 /*
576 * There is no reclaim protection applied to a targeted reclaim.
577 * We are special casing this specific case here because
578 * mem_cgroup_protected calculation is not robust enough to keep
579 * the protection invariant for calculated effective values for
580 * parallel reclaimers with different reclaim target. This is
581 * especially a problem for tail memcgs (as they have pages on LRU)
582 * which would want to have effective values 0 for targeted reclaim
583 * but a different value for external reclaim.
584 *
585 * Example
586 * Let's have global and A's reclaim in parallel:
587 * |
588 * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
589 * |\
590 * | C (low = 1G, usage = 2.5G)
591 * B (low = 1G, usage = 0.5G)
592 *
593 * For the global reclaim
594 * A.elow = A.low
595 * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
596 * C.elow = min(C.usage, C.low)
597 *
598 * With the effective values resetting we have A reclaim
599 * A.elow = 0
600 * B.elow = B.low
601 * C.elow = C.low
602 *
603 * If the global reclaim races with A's reclaim then
604 * B.elow = C.elow = 0 because children_low_usage > A.elow)
605 * is possible and reclaiming B would be violating the protection.
606 *
607 */
608 if (root == memcg)
Johannes Weinerf56ce412021-08-19 19:04:21 -0700609 return;
Yafang Shao22f74962020-08-06 23:22:01 -0700610
Johannes Weinerf56ce412021-08-19 19:04:21 -0700611 *min = READ_ONCE(memcg->memory.emin);
612 *low = READ_ONCE(memcg->memory.elow);
Chris Down9783aa92019-10-06 17:58:32 -0700613}
614
Chris Down45c7f7e2020-08-06 23:22:05 -0700615void mem_cgroup_calculate_protection(struct mem_cgroup *root,
616 struct mem_cgroup *memcg);
617
618static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
619{
620 /*
621 * The root memcg doesn't account charges, and doesn't support
622 * protection.
623 */
624 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
625
626}
627
628static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
629{
630 if (!mem_cgroup_supports_protection(memcg))
631 return false;
632
633 return READ_ONCE(memcg->memory.elow) >=
634 page_counter_read(&memcg->memory);
635}
636
637static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
638{
639 if (!mem_cgroup_supports_protection(memcg))
640 return false;
641
642 return READ_ONCE(memcg->memory.emin) >=
643 page_counter_read(&memcg->memory);
644}
Johannes Weiner241994ed2015-02-11 15:26:06 -0800645
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -0400646int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
647
648/**
649 * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
650 * @folio: Folio to charge.
651 * @mm: mm context of the allocating task.
652 * @gfp: Reclaim mode.
653 *
654 * Try to charge @folio to the memcg that @mm belongs to, reclaiming
655 * pages according to @gfp if necessary. If @mm is NULL, try to
656 * charge to the active memcg.
657 *
658 * Do not use this for folios allocated for swapin.
659 *
660 * Return: 0 on success. Otherwise, an error code is returned.
661 */
662static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
663 gfp_t gfp)
Suren Baghdasaryan2c8d8f92021-09-02 14:54:50 -0700664{
665 if (mem_cgroup_disabled())
666 return 0;
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -0400667 return __mem_cgroup_charge(folio, mm, gfp);
Suren Baghdasaryan2c8d8f92021-09-02 14:54:50 -0700668}
669
Shakeel Butt0add0c72021-04-29 22:56:36 -0700670int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
671 gfp_t gfp, swp_entry_t entry);
672void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
Johannes Weiner3fea5a42020-06-03 16:01:41 -0700673
Matthew Wilcox (Oracle)bbc6b702021-05-01 20:42:23 -0400674void __mem_cgroup_uncharge(struct folio *folio);
675
676/**
677 * mem_cgroup_uncharge - Uncharge a folio.
678 * @folio: Folio to uncharge.
679 *
680 * Uncharge a folio previously charged with mem_cgroup_charge().
681 */
682static inline void mem_cgroup_uncharge(struct folio *folio)
Suren Baghdasaryan2c8d8f92021-09-02 14:54:50 -0700683{
684 if (mem_cgroup_disabled())
685 return;
Matthew Wilcox (Oracle)bbc6b702021-05-01 20:42:23 -0400686 __mem_cgroup_uncharge(folio);
Suren Baghdasaryan2c8d8f92021-09-02 14:54:50 -0700687}
688
689void __mem_cgroup_uncharge_list(struct list_head *page_list);
690static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
691{
692 if (mem_cgroup_disabled())
693 return;
694 __mem_cgroup_uncharge_list(page_list);
695}
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700696
Matthew Wilcox (Oracle)d21bba22021-05-06 18:14:59 -0400697void mem_cgroup_migrate(struct folio *old, struct folio *new);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700698
Johannes Weiner55779ec2016-07-28 15:45:10 -0700699/**
Johannes Weiner867e5e12019-11-30 17:55:34 -0800700 * mem_cgroup_lruvec - get the lru list vector for a memcg & node
Johannes Weiner55779ec2016-07-28 15:45:10 -0700701 * @memcg: memcg of the wanted lruvec
Hui Su9a1ac222020-12-18 14:01:41 -0800702 * @pgdat: pglist_data
Johannes Weiner55779ec2016-07-28 15:45:10 -0700703 *
Johannes Weiner867e5e12019-11-30 17:55:34 -0800704 * Returns the lru list vector holding pages for a given @memcg &
Hui Su9a1ac222020-12-18 14:01:41 -0800705 * @pgdat combination. This can be the node lruvec, if the memory
Johannes Weiner867e5e12019-11-30 17:55:34 -0800706 * controller is disabled.
Johannes Weiner55779ec2016-07-28 15:45:10 -0700707 */
Johannes Weiner867e5e12019-11-30 17:55:34 -0800708static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
709 struct pglist_data *pgdat)
Johannes Weiner55779ec2016-07-28 15:45:10 -0700710{
Mel Gormanef8f2322016-07-28 15:46:05 -0700711 struct mem_cgroup_per_node *mz;
Johannes Weiner55779ec2016-07-28 15:45:10 -0700712 struct lruvec *lruvec;
713
714 if (mem_cgroup_disabled()) {
Johannes Weiner867e5e12019-11-30 17:55:34 -0800715 lruvec = &pgdat->__lruvec;
Johannes Weiner55779ec2016-07-28 15:45:10 -0700716 goto out;
717 }
718
Johannes Weiner1b051172019-11-30 17:55:52 -0800719 if (!memcg)
720 memcg = root_mem_cgroup;
721
Johannes Weinera3747b52021-04-29 22:56:14 -0700722 mz = memcg->nodeinfo[pgdat->node_id];
Johannes Weiner55779ec2016-07-28 15:45:10 -0700723 lruvec = &mz->lruvec;
724out:
725 /*
726 * Since a node can be onlined after the mem_cgroup was created,
Mel Gorman599d0c92016-07-28 15:45:31 -0700727 * we have to be prepared to initialize lruvec->pgdat here;
Johannes Weiner55779ec2016-07-28 15:45:10 -0700728 * and if offlined then reonlined, we need to reinitialize it.
729 */
Mel Gormanef8f2322016-07-28 15:46:05 -0700730 if (unlikely(lruvec->pgdat != pgdat))
731 lruvec->pgdat = pgdat;
Johannes Weiner55779ec2016-07-28 15:45:10 -0700732 return lruvec;
733}
734
Hui Su9a1ac222020-12-18 14:01:41 -0800735/**
Matthew Wilcox (Oracle)b1baabd2021-06-28 20:00:28 -0400736 * folio_lruvec - return lruvec for isolating/putting an LRU folio
737 * @folio: Pointer to the folio.
Hui Su9a1ac222020-12-18 14:01:41 -0800738 *
Matthew Wilcox (Oracle)b1baabd2021-06-28 20:00:28 -0400739 * This function relies on folio->mem_cgroup being stable.
Hui Su9a1ac222020-12-18 14:01:41 -0800740 */
Matthew Wilcox (Oracle)b1baabd2021-06-28 20:00:28 -0400741static inline struct lruvec *folio_lruvec(struct folio *folio)
Hui Su9a1ac222020-12-18 14:01:41 -0800742{
Matthew Wilcox (Oracle)b1baabd2021-06-28 20:00:28 -0400743 struct mem_cgroup *memcg = folio_memcg(folio);
Hui Su9a1ac222020-12-18 14:01:41 -0800744
Matthew Wilcox (Oracle)b1baabd2021-06-28 20:00:28 -0400745 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
746 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
Hui Su9a1ac222020-12-18 14:01:41 -0800747}
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -0800748
Michal Hocko64219992015-09-08 15:01:07 -0700749struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
Vladimir Davydove993d902015-09-09 15:35:35 -0700750
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700751struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
752
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -0400753struct lruvec *folio_lruvec_lock(struct folio *folio);
754struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
755struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
Alex Shi6168d0d2020-12-15 12:34:29 -0800756 unsigned long *flags);
757
758#ifdef CONFIG_DEBUG_VM
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -0400759void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
Alex Shi6168d0d2020-12-15 12:34:29 -0800760#else
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -0400761static inline
762void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
Alex Shi6168d0d2020-12-15 12:34:29 -0800763{
764}
765#endif
766
Michal Hocko33398cf2015-09-08 15:01:02 -0700767static inline
768struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
769 return css ? container_of(css, struct mem_cgroup, css) : NULL;
770}
771
Roman Gushchinbf4f0592020-08-06 23:20:49 -0700772static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
773{
774 return percpu_ref_tryget(&objcg->refcnt);
775}
776
777static inline void obj_cgroup_get(struct obj_cgroup *objcg)
778{
779 percpu_ref_get(&objcg->refcnt);
780}
781
Muchun Songb4e0b682021-04-29 22:56:52 -0700782static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
783 unsigned long nr)
784{
785 percpu_ref_get_many(&objcg->refcnt, nr);
786}
787
Roman Gushchinbf4f0592020-08-06 23:20:49 -0700788static inline void obj_cgroup_put(struct obj_cgroup *objcg)
789{
790 percpu_ref_put(&objcg->refcnt);
791}
792
Roman Gushchindc0b5862018-08-17 15:46:36 -0700793static inline void mem_cgroup_put(struct mem_cgroup *memcg)
794{
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700795 if (memcg)
796 css_put(&memcg->css);
Roman Gushchindc0b5862018-08-17 15:46:36 -0700797}
798
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800799#define mem_cgroup_from_counter(counter, member) \
800 container_of(counter, struct mem_cgroup, member)
801
Michal Hocko33398cf2015-09-08 15:01:02 -0700802struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
803 struct mem_cgroup *,
804 struct mem_cgroup_reclaim_cookie *);
805void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700806int mem_cgroup_scan_tasks(struct mem_cgroup *,
807 int (*)(struct task_struct *, void *), void *);
Michal Hocko33398cf2015-09-08 15:01:02 -0700808
Johannes Weiner23047a92016-03-15 14:57:16 -0700809static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
810{
811 if (mem_cgroup_disabled())
812 return 0;
813
Johannes Weiner73f576c2016-07-20 15:44:57 -0700814 return memcg->id.id;
Johannes Weiner23047a92016-03-15 14:57:16 -0700815}
Johannes Weiner73f576c2016-07-20 15:44:57 -0700816struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
Johannes Weiner23047a92016-03-15 14:57:16 -0700817
Chris Downaa9694b2019-03-05 15:45:52 -0800818static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
819{
820 return mem_cgroup_from_css(seq_css(m));
821}
822
Roman Gushchin22621852017-07-06 15:40:25 -0700823static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
824{
825 struct mem_cgroup_per_node *mz;
826
827 if (mem_cgroup_disabled())
828 return NULL;
829
830 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
831 return mz->memcg;
832}
833
Johannes Weiner8e8ae642016-01-14 15:21:32 -0800834/**
835 * parent_mem_cgroup - find the accounting parent of a memcg
836 * @memcg: memcg whose parent to find
837 *
838 * Returns the parent memcg, or NULL if this is the root or the memory
839 * controller is in legacy no-hierarchy mode.
840 */
841static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
842{
843 if (!memcg->memory.parent)
844 return NULL;
845 return mem_cgroup_from_counter(memcg->memory.parent, memory);
846}
847
Michal Hocko33398cf2015-09-08 15:01:02 -0700848static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
849 struct mem_cgroup *root)
850{
851 if (root == memcg)
852 return true;
Michal Hocko33398cf2015-09-08 15:01:02 -0700853 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
854}
Glauber Costae1aab162011-12-11 21:47:03 +0000855
Johannes Weiner2314b422014-12-10 15:44:33 -0800856static inline bool mm_match_cgroup(struct mm_struct *mm,
857 struct mem_cgroup *memcg)
Lai Jiangshan2e4d4092009-01-07 18:08:07 -0800858{
Johannes Weiner587af302012-10-08 16:34:12 -0700859 struct mem_cgroup *task_memcg;
Johannes Weiner413918b2014-12-10 15:44:30 -0800860 bool match = false;
Johannes Weinerc3ac9a82012-05-29 15:06:25 -0700861
Lai Jiangshan2e4d4092009-01-07 18:08:07 -0800862 rcu_read_lock();
Johannes Weiner587af302012-10-08 16:34:12 -0700863 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
Johannes Weiner413918b2014-12-10 15:44:30 -0800864 if (task_memcg)
Johannes Weiner2314b422014-12-10 15:44:33 -0800865 match = mem_cgroup_is_descendant(task_memcg, memcg);
Lai Jiangshan2e4d4092009-01-07 18:08:07 -0800866 rcu_read_unlock();
Johannes Weinerc3ac9a82012-05-29 15:06:25 -0700867 return match;
Lai Jiangshan2e4d4092009-01-07 18:08:07 -0800868}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800869
Michal Hocko64219992015-09-08 15:01:07 -0700870struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
Vladimir Davydov2fc04522015-09-09 15:35:28 -0700871ino_t page_cgroup_ino(struct page *page);
Wu Fengguangd3242362009-12-16 12:19:59 +0100872
Vladimir Davydoveb01aaa2016-01-20 15:03:02 -0800873static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
874{
875 if (mem_cgroup_disabled())
876 return true;
877 return !!(memcg->css.flags & CSS_ONLINE);
878}
879
Michal Hocko33398cf2015-09-08 15:01:02 -0700880void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
Michal Hockob4536f0c82017-01-10 16:58:04 -0800881 int zid, int nr_pages);
Michal Hocko33398cf2015-09-08 15:01:02 -0700882
Michal Hocko33398cf2015-09-08 15:01:02 -0700883static inline
Michal Hockob4536f0c82017-01-10 16:58:04 -0800884unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
885 enum lru_list lru, int zone_idx)
886{
887 struct mem_cgroup_per_node *mz;
888
889 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
Qian Caie0e3f422020-08-14 17:31:37 -0700890 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
Michal Hocko33398cf2015-09-08 15:01:02 -0700891}
892
Tejun Heob23afb92015-11-05 18:46:11 -0800893void mem_cgroup_handle_over_high(void);
894
Roman Gushchinbbec2e12018-06-07 17:06:18 -0700895unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700896
Chris Down9783aa92019-10-06 17:58:32 -0700897unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
898
yuzhoujianf0c867d2018-12-28 00:36:10 -0800899void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
Michal Hocko64219992015-09-08 15:01:07 -0700900 struct task_struct *p);
KAMEZAWA Hiroyuki58ae83d2008-02-07 00:14:32 -0800901
yuzhoujianf0c867d2018-12-28 00:36:10 -0800902void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
903
Michal Hocko29ef6802018-08-17 15:47:11 -0700904static inline void mem_cgroup_enter_user_fault(void)
Johannes Weiner519e5242013-09-12 15:13:42 -0700905{
Michal Hocko29ef6802018-08-17 15:47:11 -0700906 WARN_ON(current->in_user_fault);
907 current->in_user_fault = 1;
Johannes Weiner519e5242013-09-12 15:13:42 -0700908}
909
Michal Hocko29ef6802018-08-17 15:47:11 -0700910static inline void mem_cgroup_exit_user_fault(void)
Johannes Weiner519e5242013-09-12 15:13:42 -0700911{
Michal Hocko29ef6802018-08-17 15:47:11 -0700912 WARN_ON(!current->in_user_fault);
913 current->in_user_fault = 0;
Johannes Weiner519e5242013-09-12 15:13:42 -0700914}
915
Johannes Weiner3812c8c2013-09-12 15:13:44 -0700916static inline bool task_in_memcg_oom(struct task_struct *p)
917{
Tejun Heo626ebc42015-11-05 18:46:09 -0800918 return p->memcg_in_oom;
Johannes Weiner3812c8c2013-09-12 15:13:44 -0700919}
920
Johannes Weiner49426422013-10-16 13:46:59 -0700921bool mem_cgroup_oom_synchronize(bool wait);
Roman Gushchin3d8b38e2018-08-21 21:53:54 -0700922struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
923 struct mem_cgroup *oom_domain);
924void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
Johannes Weiner3812c8c2013-09-12 15:13:44 -0700925
Andrew Mortonc255a452012-07-31 16:43:02 -0700926#ifdef CONFIG_MEMCG_SWAP
Johannes Weinereccb52e2020-06-03 16:02:11 -0700927extern bool cgroup_memory_noswap;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -0800928#endif
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800929
Matthew Wilcox (Oracle)f70ad442021-06-28 17:26:00 -0400930void folio_memcg_lock(struct folio *folio);
931void folio_memcg_unlock(struct folio *folio);
Johannes Weiner1c824a62021-04-29 22:55:32 -0700932void lock_page_memcg(struct page *page);
Johannes Weiner62cccb82016-03-15 14:57:22 -0700933void unlock_page_memcg(struct page *page);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -0700934
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700935void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
Johannes Weiner00f3ca22017-07-06 15:40:52 -0700936
Matthias Kaehlcke04fecbf2017-09-06 16:22:09 -0700937/* idx can be of type enum memcg_stat_item or node_stat_item */
Johannes Weinerccda7f42017-05-03 14:55:16 -0700938static inline void mod_memcg_state(struct mem_cgroup *memcg,
Matthias Kaehlcke04fecbf2017-09-06 16:22:09 -0700939 int idx, int val)
Johannes Weiner2a2e4882017-05-03 14:55:03 -0700940{
Johannes Weinerc3cc3912018-02-21 14:45:24 -0800941 unsigned long flags;
942
943 local_irq_save(flags);
Johannes Weinera983b5e2018-01-31 16:16:45 -0800944 __mod_memcg_state(memcg, idx, val);
Johannes Weinerc3cc3912018-02-21 14:45:24 -0800945 local_irq_restore(flags);
Johannes Weiner2a2e4882017-05-03 14:55:03 -0700946}
947
Shakeel Butt4e5aa1f2022-01-14 14:05:45 -0800948static inline void mod_memcg_page_state(struct page *page,
949 int idx, int val)
950{
951 struct mem_cgroup *memcg;
952
953 if (mem_cgroup_disabled())
954 return;
955
956 rcu_read_lock();
957 memcg = page_memcg(page);
958 if (memcg)
959 mod_memcg_state(memcg, idx, val);
960 rcu_read_unlock();
961}
962
Shakeel Butt7490a2d2021-09-02 14:53:27 -0700963static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
964{
Shakeel Butt96e51cc2021-09-02 14:55:46 -0700965 return READ_ONCE(memcg->vmstats.state[idx]);
Shakeel Butt7490a2d2021-09-02 14:53:27 -0700966}
967
Johannes Weiner42a30032019-05-14 15:47:12 -0700968static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
969 enum node_stat_item idx)
970{
971 struct mem_cgroup_per_node *pn;
Johannes Weiner42a30032019-05-14 15:47:12 -0700972
973 if (mem_cgroup_disabled())
974 return node_page_state(lruvec_pgdat(lruvec), idx);
975
976 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
Shakeel Butt96e51cc2021-09-02 14:55:46 -0700977 return READ_ONCE(pn->lruvec_stats.state[idx]);
Johannes Weiner42a30032019-05-14 15:47:12 -0700978}
979
Johannes Weiner205b20c2019-05-14 15:47:06 -0700980static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
981 enum node_stat_item idx)
Greg Thelen2a7106f2011-01-13 15:47:37 -0800982{
Johannes Weiner00f3ca22017-07-06 15:40:52 -0700983 struct mem_cgroup_per_node *pn;
Johannes Weiner815744d2019-06-13 15:55:46 -0700984 long x = 0;
985 int cpu;
Johannes Weiner00f3ca22017-07-06 15:40:52 -0700986
987 if (mem_cgroup_disabled())
988 return node_page_state(lruvec_pgdat(lruvec), idx);
989
990 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
Johannes Weiner815744d2019-06-13 15:55:46 -0700991 for_each_possible_cpu(cpu)
Shakeel Butt7e1c0d62021-09-02 14:55:00 -0700992 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
Johannes Weinera983b5e2018-01-31 16:16:45 -0800993#ifdef CONFIG_SMP
994 if (x < 0)
995 x = 0;
996#endif
997 return x;
Greg Thelen2a7106f2011-01-13 15:47:37 -0800998}
999
Shakeel Buttaa48e472021-09-02 14:55:04 -07001000void mem_cgroup_flush_stats(void);
1001
Roman Gushchineedc4e52020-08-06 23:20:32 -07001002void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1003 int val);
Muchun Songda3ceef2020-12-14 19:07:04 -08001004void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
Shakeel Butt991e7672020-08-06 23:21:37 -07001005
Muchun Songda3ceef2020-12-14 19:07:04 -08001006static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
Shakeel Butt991e7672020-08-06 23:21:37 -07001007 int val)
1008{
1009 unsigned long flags;
1010
1011 local_irq_save(flags);
Muchun Songda3ceef2020-12-14 19:07:04 -08001012 __mod_lruvec_kmem_state(p, idx, val);
Shakeel Butt991e7672020-08-06 23:21:37 -07001013 local_irq_restore(flags);
1014}
1015
Roman Gushchineedc4e52020-08-06 23:20:32 -07001016static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1017 enum node_stat_item idx, int val)
1018{
1019 unsigned long flags;
1020
1021 local_irq_save(flags);
1022 __mod_memcg_lruvec_state(lruvec, idx, val);
1023 local_irq_restore(flags);
1024}
1025
Johannes Weinerdb9adbc2019-05-14 15:47:09 -07001026void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1027 unsigned long count);
Johannes Weinerc9019e92018-01-31 16:16:37 -08001028
Roman Gushchin22621852017-07-06 15:40:25 -07001029static inline void count_memcg_events(struct mem_cgroup *memcg,
Johannes Weinere27be242018-04-10 16:29:45 -07001030 enum vm_event_item idx,
1031 unsigned long count)
Roman Gushchin22621852017-07-06 15:40:25 -07001032{
Johannes Weinerc3cc3912018-02-21 14:45:24 -08001033 unsigned long flags;
1034
1035 local_irq_save(flags);
Johannes Weinera983b5e2018-01-31 16:16:45 -08001036 __count_memcg_events(memcg, idx, count);
Johannes Weinerc3cc3912018-02-21 14:45:24 -08001037 local_irq_restore(flags);
Roman Gushchin22621852017-07-06 15:40:25 -07001038}
1039
1040static inline void count_memcg_page_event(struct page *page,
Johannes Weinere27be242018-04-10 16:29:45 -07001041 enum vm_event_item idx)
Roman Gushchin22621852017-07-06 15:40:25 -07001042{
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08001043 struct mem_cgroup *memcg = page_memcg(page);
1044
1045 if (memcg)
1046 count_memcg_events(memcg, idx, 1);
Roman Gushchin22621852017-07-06 15:40:25 -07001047}
1048
1049static inline void count_memcg_event_mm(struct mm_struct *mm,
1050 enum vm_event_item idx)
David Rientjes68ae5642012-12-12 13:51:57 -08001051{
Michal Hocko33398cf2015-09-08 15:01:02 -07001052 struct mem_cgroup *memcg;
1053
David Rientjes68ae5642012-12-12 13:51:57 -08001054 if (mem_cgroup_disabled())
1055 return;
Michal Hocko33398cf2015-09-08 15:01:02 -07001056
1057 rcu_read_lock();
1058 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
Roman Gushchinfe6bdfc2018-06-14 15:28:05 -07001059 if (likely(memcg))
Johannes Weinerc9019e92018-01-31 16:16:37 -08001060 count_memcg_events(memcg, idx, 1);
Michal Hocko33398cf2015-09-08 15:01:02 -07001061 rcu_read_unlock();
David Rientjes68ae5642012-12-12 13:51:57 -08001062}
Johannes Weinerc9019e92018-01-31 16:16:37 -08001063
Johannes Weinere27be242018-04-10 16:29:45 -07001064static inline void memcg_memory_event(struct mem_cgroup *memcg,
1065 enum memcg_memory_event event)
Johannes Weinerc9019e92018-01-31 16:16:37 -08001066{
Muchun Song8b21ca022020-11-13 22:52:13 -08001067 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1068 event == MEMCG_SWAP_FAIL;
1069
Shakeel Butt1e577f92019-07-11 20:55:55 -07001070 atomic_long_inc(&memcg->memory_events_local[event]);
Muchun Song8b21ca022020-11-13 22:52:13 -08001071 if (!swap_event)
1072 cgroup_file_notify(&memcg->events_local_file);
Shakeel Butt1e577f92019-07-11 20:55:55 -07001073
Chris Down9852ae32019-05-31 22:30:22 -07001074 do {
1075 atomic_long_inc(&memcg->memory_events[event]);
Muchun Song8b21ca022020-11-13 22:52:13 -08001076 if (swap_event)
1077 cgroup_file_notify(&memcg->swap_events_file);
1078 else
1079 cgroup_file_notify(&memcg->events_file);
Chris Down9852ae32019-05-31 22:30:22 -07001080
Yafang Shao04fd61a42020-05-13 17:50:34 -07001081 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1082 break;
Chris Down9852ae32019-05-31 22:30:22 -07001083 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1084 break;
1085 } while ((memcg = parent_mem_cgroup(memcg)) &&
1086 !mem_cgroup_is_root(memcg));
Johannes Weinerc9019e92018-01-31 16:16:37 -08001087}
1088
Roman Gushchinfe6bdfc2018-06-14 15:28:05 -07001089static inline void memcg_memory_event_mm(struct mm_struct *mm,
1090 enum memcg_memory_event event)
1091{
1092 struct mem_cgroup *memcg;
1093
1094 if (mem_cgroup_disabled())
1095 return;
1096
1097 rcu_read_lock();
1098 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1099 if (likely(memcg))
1100 memcg_memory_event(memcg, event);
1101 rcu_read_unlock();
1102}
1103
Zhou Guanghuibe6c8982021-03-12 21:08:30 -08001104void split_page_memcg(struct page *head, unsigned int nr);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08001105
Johannes Weiner2d146aa2021-04-29 22:56:26 -07001106unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1107 gfp_t gfp_mask,
1108 unsigned long *total_scanned);
1109
Andrew Mortonc255a452012-07-31 16:43:02 -07001110#else /* CONFIG_MEMCG */
Johannes Weiner23047a92016-03-15 14:57:16 -07001111
1112#define MEM_CGROUP_ID_SHIFT 0
1113#define MEM_CGROUP_ID_MAX 0
1114
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -04001115static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1116{
1117 return NULL;
1118}
1119
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08001120static inline struct mem_cgroup *page_memcg(struct page *page)
1121{
1122 return NULL;
1123}
1124
Matthew Wilcox (Oracle)c5ce6192021-05-04 17:19:13 -04001125static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
Roman Gushchinbcfe06b2020-12-01 13:58:27 -08001126{
1127 WARN_ON_ONCE(!rcu_read_lock_held());
1128 return NULL;
1129}
1130
1131static inline struct mem_cgroup *page_memcg_check(struct page *page)
1132{
1133 return NULL;
1134}
1135
Matthew Wilcox (Oracle)1b7e4462021-06-28 14:59:26 -04001136static inline bool folio_memcg_kmem(struct folio *folio)
1137{
1138 return false;
1139}
1140
Roman Gushchin18b2db32020-12-01 13:58:30 -08001141static inline bool PageMemcgKmem(struct page *page)
1142{
1143 return false;
1144}
1145
Kirill Tkhaidfd2f102018-08-17 15:48:06 -07001146static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1147{
1148 return true;
1149}
1150
Johannes Weiner23047a92016-03-15 14:57:16 -07001151static inline bool mem_cgroup_disabled(void)
1152{
1153 return true;
1154}
1155
Johannes Weinere27be242018-04-10 16:29:45 -07001156static inline void memcg_memory_event(struct mem_cgroup *memcg,
1157 enum memcg_memory_event event)
Johannes Weiner241994ed2015-02-11 15:26:06 -08001158{
1159}
1160
Roman Gushchinfe6bdfc2018-06-14 15:28:05 -07001161static inline void memcg_memory_event_mm(struct mm_struct *mm,
1162 enum memcg_memory_event event)
1163{
1164}
1165
Johannes Weinerf56ce412021-08-19 19:04:21 -07001166static inline void mem_cgroup_protection(struct mem_cgroup *root,
1167 struct mem_cgroup *memcg,
1168 unsigned long *min,
1169 unsigned long *low)
Chris Down9783aa92019-10-06 17:58:32 -07001170{
Johannes Weinerf56ce412021-08-19 19:04:21 -07001171 *min = *low = 0;
Chris Down9783aa92019-10-06 17:58:32 -07001172}
1173
Chris Down45c7f7e2020-08-06 23:22:05 -07001174static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1175 struct mem_cgroup *memcg)
Johannes Weiner241994ed2015-02-11 15:26:06 -08001176{
Chris Down45c7f7e2020-08-06 23:22:05 -07001177}
1178
1179static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1180{
1181 return false;
1182}
1183
1184static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1185{
1186 return false;
Johannes Weiner241994ed2015-02-11 15:26:06 -08001187}
1188
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -04001189static inline int mem_cgroup_charge(struct folio *folio,
1190 struct mm_struct *mm, gfp_t gfp)
Johannes Weiner3fea5a42020-06-03 16:01:41 -07001191{
1192 return 0;
1193}
1194
Shakeel Butt0add0c72021-04-29 22:56:36 -07001195static inline int mem_cgroup_swapin_charge_page(struct page *page,
1196 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1197{
1198 return 0;
1199}
1200
1201static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1202{
1203}
1204
Matthew Wilcox (Oracle)bbc6b702021-05-01 20:42:23 -04001205static inline void mem_cgroup_uncharge(struct folio *folio)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07001206{
1207}
1208
Johannes Weiner747db952014-08-08 14:19:24 -07001209static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08001210{
1211}
1212
Matthew Wilcox (Oracle)d21bba22021-05-06 18:14:59 -04001213static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07001214{
1215}
1216
Johannes Weiner867e5e12019-11-30 17:55:34 -08001217static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1218 struct pglist_data *pgdat)
Johannes Weiner925b7672012-01-12 17:18:15 -08001219{
Johannes Weiner867e5e12019-11-30 17:55:34 -08001220 return &pgdat->__lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001221}
1222
Matthew Wilcox (Oracle)b1baabd2021-06-28 20:00:28 -04001223static inline struct lruvec *folio_lruvec(struct folio *folio)
Minchan Kim3f58a822011-03-22 16:32:53 -07001224{
Matthew Wilcox (Oracle)b1baabd2021-06-28 20:00:28 -04001225 struct pglist_data *pgdat = folio_pgdat(folio);
Johannes Weiner867e5e12019-11-30 17:55:34 -08001226 return &pgdat->__lruvec;
Balbir Singh66e17072008-02-07 00:13:56 -08001227}
1228
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001229static inline
1230void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
Johannes Weiner2d146aa2021-04-29 22:56:26 -07001231{
1232}
1233
Johannes Weinerb9107182019-11-30 17:55:59 -08001234static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1235{
1236 return NULL;
1237}
1238
Johannes Weiner587af302012-10-08 16:34:12 -07001239static inline bool mm_match_cgroup(struct mm_struct *mm,
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001240 struct mem_cgroup *memcg)
Balbir Singhbed71612008-02-07 00:14:01 -08001241{
Johannes Weiner587af302012-10-08 16:34:12 -07001242 return true;
Balbir Singhbed71612008-02-07 00:14:01 -08001243}
1244
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07001245static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1246{
1247 return NULL;
1248}
1249
Dan Schatzbergc74d40e2021-06-28 19:38:21 -07001250static inline
1251struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1252{
1253 return NULL;
1254}
1255
Roman Gushchindc0b5862018-08-17 15:46:36 -07001256static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1257{
1258}
1259
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001260static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
Alex Shi6168d0d2020-12-15 12:34:29 -08001261{
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001262 struct pglist_data *pgdat = folio_pgdat(folio);
Alex Shi6168d0d2020-12-15 12:34:29 -08001263
1264 spin_lock(&pgdat->__lruvec.lru_lock);
1265 return &pgdat->__lruvec;
1266}
1267
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001268static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
Alex Shi6168d0d2020-12-15 12:34:29 -08001269{
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001270 struct pglist_data *pgdat = folio_pgdat(folio);
Alex Shi6168d0d2020-12-15 12:34:29 -08001271
1272 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1273 return &pgdat->__lruvec;
1274}
1275
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001276static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
Alex Shi6168d0d2020-12-15 12:34:29 -08001277 unsigned long *flagsp)
1278{
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001279 struct pglist_data *pgdat = folio_pgdat(folio);
Alex Shi6168d0d2020-12-15 12:34:29 -08001280
1281 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1282 return &pgdat->__lruvec;
1283}
1284
Johannes Weiner56600482012-01-12 17:17:59 -08001285static inline struct mem_cgroup *
1286mem_cgroup_iter(struct mem_cgroup *root,
1287 struct mem_cgroup *prev,
1288 struct mem_cgroup_reclaim_cookie *reclaim)
1289{
1290 return NULL;
1291}
1292
1293static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1294 struct mem_cgroup *prev)
1295{
1296}
1297
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001298static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1299 int (*fn)(struct task_struct *, void *), void *arg)
1300{
1301 return 0;
1302}
1303
Johannes Weiner23047a92016-03-15 14:57:16 -07001304static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08001305{
Johannes Weiner23047a92016-03-15 14:57:16 -07001306 return 0;
1307}
1308
1309static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1310{
1311 WARN_ON_ONCE(id);
1312 /* XXX: This should always return root_mem_cgroup */
1313 return NULL;
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08001314}
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001315
Chris Downaa9694b2019-03-05 15:45:52 -08001316static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1317{
1318 return NULL;
1319}
1320
Roman Gushchin22621852017-07-06 15:40:25 -07001321static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1322{
1323 return NULL;
1324}
1325
Vladimir Davydoveb01aaa2016-01-20 15:03:02 -08001326static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001327{
Yaowei Bai13308ca2015-11-05 18:47:40 -08001328 return true;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001329}
1330
Michal Hockob4536f0c82017-01-10 16:58:04 -08001331static inline
1332unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1333 enum lru_list lru, int zone_idx)
1334{
1335 return 0;
1336}
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -08001337
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001338static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001339{
1340 return 0;
1341}
1342
Chris Down9783aa92019-10-06 17:58:32 -07001343static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1344{
1345 return 0;
1346}
1347
Balbir Singhe2224322009-04-02 16:57:39 -07001348static inline void
yuzhoujianf0c867d2018-12-28 00:36:10 -08001349mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1350{
1351}
1352
1353static inline void
1354mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
Balbir Singhe2224322009-04-02 16:57:39 -07001355{
1356}
1357
Johannes Weiner1c824a62021-04-29 22:55:32 -07001358static inline void lock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001359{
1360}
1361
Johannes Weiner62cccb82016-03-15 14:57:22 -07001362static inline void unlock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001363{
1364}
1365
Matthew Wilcox (Oracle)f70ad442021-06-28 17:26:00 -04001366static inline void folio_memcg_lock(struct folio *folio)
1367{
1368}
1369
1370static inline void folio_memcg_unlock(struct folio *folio)
1371{
1372}
1373
Tejun Heob23afb92015-11-05 18:46:11 -08001374static inline void mem_cgroup_handle_over_high(void)
1375{
1376}
1377
Michal Hocko29ef6802018-08-17 15:47:11 -07001378static inline void mem_cgroup_enter_user_fault(void)
Johannes Weiner519e5242013-09-12 15:13:42 -07001379{
1380}
1381
Michal Hocko29ef6802018-08-17 15:47:11 -07001382static inline void mem_cgroup_exit_user_fault(void)
Johannes Weiner519e5242013-09-12 15:13:42 -07001383{
1384}
1385
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001386static inline bool task_in_memcg_oom(struct task_struct *p)
1387{
1388 return false;
1389}
1390
Johannes Weiner49426422013-10-16 13:46:59 -07001391static inline bool mem_cgroup_oom_synchronize(bool wait)
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001392{
1393 return false;
1394}
1395
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07001396static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1397 struct task_struct *victim, struct mem_cgroup *oom_domain)
1398{
1399 return NULL;
1400}
1401
1402static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1403{
1404}
1405
Johannes Weiner00f3ca22017-07-06 15:40:52 -07001406static inline void __mod_memcg_state(struct mem_cgroup *memcg,
Matthias Kaehlcke04fecbf2017-09-06 16:22:09 -07001407 int idx,
Johannes Weiner00f3ca22017-07-06 15:40:52 -07001408 int nr)
1409{
1410}
1411
Johannes Weinerccda7f42017-05-03 14:55:16 -07001412static inline void mod_memcg_state(struct mem_cgroup *memcg,
Matthias Kaehlcke04fecbf2017-09-06 16:22:09 -07001413 int idx,
Johannes Weinerccda7f42017-05-03 14:55:16 -07001414 int nr)
Johannes Weiner2a2e4882017-05-03 14:55:03 -07001415{
1416}
1417
Shakeel Butt4e5aa1f2022-01-14 14:05:45 -08001418static inline void mod_memcg_page_state(struct page *page,
1419 int idx, int val)
1420{
1421}
1422
Shakeel Butt7490a2d2021-09-02 14:53:27 -07001423static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1424{
1425 return 0;
1426}
1427
Johannes Weiner42a30032019-05-14 15:47:12 -07001428static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1429 enum node_stat_item idx)
1430{
1431 return node_page_state(lruvec_pgdat(lruvec), idx);
1432}
1433
Johannes Weiner205b20c2019-05-14 15:47:06 -07001434static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1435 enum node_stat_item idx)
Greg Thelen2a7106f2011-01-13 15:47:37 -08001436{
Johannes Weiner00f3ca22017-07-06 15:40:52 -07001437 return node_page_state(lruvec_pgdat(lruvec), idx);
Greg Thelen2a7106f2011-01-13 15:47:37 -08001438}
1439
Shakeel Buttaa48e472021-09-02 14:55:04 -07001440static inline void mem_cgroup_flush_stats(void)
1441{
1442}
1443
Roman Gushchineedc4e52020-08-06 23:20:32 -07001444static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1445 enum node_stat_item idx, int val)
1446{
1447}
1448
Muchun Songda3ceef2020-12-14 19:07:04 -08001449static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
Roman Gushchinec9f0232019-08-13 15:37:41 -07001450 int val)
1451{
1452 struct page *page = virt_to_head_page(p);
1453
1454 __mod_node_page_state(page_pgdat(page), idx, val);
1455}
1456
Muchun Songda3ceef2020-12-14 19:07:04 -08001457static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
Shakeel Butt991e7672020-08-06 23:21:37 -07001458 int val)
1459{
1460 struct page *page = virt_to_head_page(p);
1461
1462 mod_node_page_state(page_pgdat(page), idx, val);
1463}
1464
Roman Gushchin22621852017-07-06 15:40:25 -07001465static inline void count_memcg_events(struct mem_cgroup *memcg,
1466 enum vm_event_item idx,
1467 unsigned long count)
1468{
1469}
1470
Kirill Tkhai9851ac12019-05-13 17:16:54 -07001471static inline void __count_memcg_events(struct mem_cgroup *memcg,
1472 enum vm_event_item idx,
1473 unsigned long count)
1474{
1475}
1476
Roman Gushchin22621852017-07-06 15:40:25 -07001477static inline void count_memcg_page_event(struct page *page,
Matthias Kaehlcke04fecbf2017-09-06 16:22:09 -07001478 int idx)
Roman Gushchin22621852017-07-06 15:40:25 -07001479{
1480}
1481
Ying Han456f9982011-05-26 16:25:38 -07001482static inline
Roman Gushchin22621852017-07-06 15:40:25 -07001483void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
Ying Han456f9982011-05-26 16:25:38 -07001484{
1485}
Alex Shi6168d0d2020-12-15 12:34:29 -08001486
Johannes Weiner2d146aa2021-04-29 22:56:26 -07001487static inline void split_page_memcg(struct page *head, unsigned int nr)
Alex Shi6168d0d2020-12-15 12:34:29 -08001488{
1489}
Johannes Weiner2d146aa2021-04-29 22:56:26 -07001490
1491static inline
1492unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1493 gfp_t gfp_mask,
1494 unsigned long *total_scanned)
1495{
1496 return 0;
1497}
Andrew Mortonc255a452012-07-31 16:43:02 -07001498#endif /* CONFIG_MEMCG */
Pavel Emelianov78fb7462008-02-07 00:13:51 -08001499
Muchun Songda3ceef2020-12-14 19:07:04 -08001500static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
Roman Gushchinec9f0232019-08-13 15:37:41 -07001501{
Muchun Songda3ceef2020-12-14 19:07:04 -08001502 __mod_lruvec_kmem_state(p, idx, 1);
Roman Gushchinec9f0232019-08-13 15:37:41 -07001503}
1504
Muchun Songda3ceef2020-12-14 19:07:04 -08001505static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
Roman Gushchinec9f0232019-08-13 15:37:41 -07001506{
Muchun Songda3ceef2020-12-14 19:07:04 -08001507 __mod_lruvec_kmem_state(p, idx, -1);
Roman Gushchinec9f0232019-08-13 15:37:41 -07001508}
1509
Johannes Weiner7cf111b2020-06-03 16:03:06 -07001510static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1511{
1512 struct mem_cgroup *memcg;
1513
1514 memcg = lruvec_memcg(lruvec);
1515 if (!memcg)
1516 return NULL;
1517 memcg = parent_mem_cgroup(memcg);
1518 if (!memcg)
1519 return NULL;
1520 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1521}
1522
Alex Shi6168d0d2020-12-15 12:34:29 -08001523static inline void unlock_page_lruvec(struct lruvec *lruvec)
1524{
1525 spin_unlock(&lruvec->lru_lock);
1526}
1527
1528static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1529{
1530 spin_unlock_irq(&lruvec->lru_lock);
1531}
1532
1533static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1534 unsigned long flags)
1535{
1536 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1537}
1538
Muchun Song7467c392021-06-28 19:37:59 -07001539/* Test requires a stable page->memcg binding, see page_memcg() */
Matthew Wilcox (Oracle)0de340c2021-06-29 22:27:31 -04001540static inline bool folio_matches_lruvec(struct folio *folio,
1541 struct lruvec *lruvec)
Muchun Songf2e4d282021-06-28 19:37:56 -07001542{
Matthew Wilcox (Oracle)0de340c2021-06-29 22:27:31 -04001543 return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1544 lruvec_memcg(lruvec) == folio_memcg(folio);
Muchun Songf2e4d282021-06-28 19:37:56 -07001545}
1546
Alexander Duyck2a5e4e32020-12-15 12:34:33 -08001547/* Don't lock again iff page's lruvec locked */
Matthew Wilcox (Oracle)0de340c2021-06-29 22:27:31 -04001548static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
Alexander Duyck2a5e4e32020-12-15 12:34:33 -08001549 struct lruvec *locked_lruvec)
1550{
1551 if (locked_lruvec) {
Matthew Wilcox (Oracle)0de340c2021-06-29 22:27:31 -04001552 if (folio_matches_lruvec(folio, locked_lruvec))
Alexander Duyck2a5e4e32020-12-15 12:34:33 -08001553 return locked_lruvec;
1554
1555 unlock_page_lruvec_irq(locked_lruvec);
1556 }
1557
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001558 return folio_lruvec_lock_irq(folio);
Alexander Duyck2a5e4e32020-12-15 12:34:33 -08001559}
1560
1561/* Don't lock again iff page's lruvec locked */
Matthew Wilcox (Oracle)0de340c2021-06-29 22:27:31 -04001562static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
Alexander Duyck2a5e4e32020-12-15 12:34:33 -08001563 struct lruvec *locked_lruvec, unsigned long *flags)
1564{
1565 if (locked_lruvec) {
Matthew Wilcox (Oracle)0de340c2021-06-29 22:27:31 -04001566 if (folio_matches_lruvec(folio, locked_lruvec))
Alexander Duyck2a5e4e32020-12-15 12:34:33 -08001567 return locked_lruvec;
1568
1569 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1570 }
1571
Matthew Wilcox (Oracle)e809c3f2021-06-28 21:59:47 -04001572 return folio_lruvec_lock_irqsave(folio, flags);
Alexander Duyck2a5e4e32020-12-15 12:34:33 -08001573}
1574
Tejun Heo52ebea72015-05-22 17:13:37 -04001575#ifdef CONFIG_CGROUP_WRITEBACK
Tejun Heo841710a2015-05-22 18:23:33 -04001576
Tejun Heo841710a2015-05-22 18:23:33 -04001577struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
Tejun Heoc5edf9c2015-09-29 13:04:26 -04001578void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1579 unsigned long *pheadroom, unsigned long *pdirty,
1580 unsigned long *pwriteback);
Tejun Heo841710a2015-05-22 18:23:33 -04001581
Matthew Wilcox (Oracle)9d8053f2021-05-04 11:43:01 -04001582void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
Tejun Heo97b27822019-08-26 09:06:56 -07001583 struct bdi_writeback *wb);
1584
Matthew Wilcox (Oracle)203a3152021-05-04 11:01:10 -04001585static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
Tejun Heo97b27822019-08-26 09:06:56 -07001586 struct bdi_writeback *wb)
1587{
Baoquan He08d1d0e2019-10-06 17:58:15 -07001588 if (mem_cgroup_disabled())
1589 return;
1590
Matthew Wilcox (Oracle)9d8053f2021-05-04 11:43:01 -04001591 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
1592 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
Tejun Heo97b27822019-08-26 09:06:56 -07001593}
1594
1595void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1596
Tejun Heo841710a2015-05-22 18:23:33 -04001597#else /* CONFIG_CGROUP_WRITEBACK */
1598
1599static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1600{
1601 return NULL;
1602}
1603
Tejun Heoc2aa7232015-05-22 18:23:35 -04001604static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
Tejun Heoc5edf9c2015-09-29 13:04:26 -04001605 unsigned long *pfilepages,
1606 unsigned long *pheadroom,
Tejun Heoc2aa7232015-05-22 18:23:35 -04001607 unsigned long *pdirty,
1608 unsigned long *pwriteback)
1609{
1610}
1611
Matthew Wilcox (Oracle)203a3152021-05-04 11:01:10 -04001612static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
Tejun Heo97b27822019-08-26 09:06:56 -07001613 struct bdi_writeback *wb)
1614{
1615}
1616
1617static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1618{
1619}
1620
Tejun Heo841710a2015-05-22 18:23:33 -04001621#endif /* CONFIG_CGROUP_WRITEBACK */
Tejun Heo52ebea72015-05-22 17:13:37 -04001622
Glauber Costae1aab162011-12-11 21:47:03 +00001623struct sock;
Wei Wang4b1327b2021-08-17 12:40:03 -07001624bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1625 gfp_t gfp_mask);
Johannes Weinerbaac50b2016-01-14 15:21:17 -08001626void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08001627#ifdef CONFIG_MEMCG
Johannes Weineref129472016-01-14 15:21:34 -08001628extern struct static_key_false memcg_sockets_enabled_key;
1629#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
Johannes Weiner2d758072016-10-07 17:00:58 -07001630void mem_cgroup_sk_alloc(struct sock *sk);
1631void mem_cgroup_sk_free(struct sock *sk);
Johannes Weinerbaac50b2016-01-14 15:21:17 -08001632static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
Johannes Weinere8056052016-01-14 15:21:14 -08001633{
Johannes Weiner0db15292016-01-20 15:02:50 -08001634 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
Johannes Weiner8e8ae642016-01-14 15:21:32 -08001635 return true;
Johannes Weiner8e8ae642016-01-14 15:21:32 -08001636 do {
Yuanzheng Song7e6ec492021-11-05 13:42:52 -07001637 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
Johannes Weiner8e8ae642016-01-14 15:21:32 -08001638 return true;
1639 } while ((memcg = parent_mem_cgroup(memcg)));
1640 return false;
Johannes Weinere8056052016-01-14 15:21:14 -08001641}
Yang Shi0a432dc2019-09-23 15:38:12 -07001642
Yang Shie4262c42021-05-04 18:36:23 -07001643int alloc_shrinker_info(struct mem_cgroup *memcg);
1644void free_shrinker_info(struct mem_cgroup *memcg);
Yang Shi2bfd3632021-05-04 18:36:11 -07001645void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
Yang Shia1780152021-05-04 18:36:42 -07001646void reparent_shrinker_deferred(struct mem_cgroup *memcg);
Johannes Weinere8056052016-01-14 15:21:14 -08001647#else
Johannes Weiner80e95fe2016-01-14 15:21:20 -08001648#define mem_cgroup_sockets_enabled 0
Johannes Weiner2d758072016-10-07 17:00:58 -07001649static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1650static inline void mem_cgroup_sk_free(struct sock *sk) { };
Johannes Weinerbaac50b2016-01-14 15:21:17 -08001651static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
Johannes Weinere8056052016-01-14 15:21:14 -08001652{
1653 return false;
1654}
Yang Shi0a432dc2019-09-23 15:38:12 -07001655
Yang Shi2bfd3632021-05-04 18:36:11 -07001656static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1657 int nid, int shrinker_id)
Yang Shi0a432dc2019-09-23 15:38:12 -07001658{
1659}
Johannes Weinere8056052016-01-14 15:21:14 -08001660#endif
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08001661
Roman Gushchin9b6f7e12018-10-26 15:03:19 -07001662#ifdef CONFIG_MEMCG_KMEM
Roman Gushchin4d5c8ae2021-06-02 18:09:30 -07001663bool mem_cgroup_kmem_disabled(void);
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001664int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1665void __memcg_kmem_uncharge_page(struct page *page, int order);
Vladimir Davydov45264772016-07-26 15:24:21 -07001666
Roman Gushchinbf4f0592020-08-06 23:20:49 -07001667struct obj_cgroup *get_obj_cgroup_from_current(void);
1668
1669int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1670void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1671
Johannes Weineref129472016-01-14 15:21:34 -08001672extern struct static_key_false memcg_kmem_enabled_key;
Glauber Costa749c5412012-12-18 14:23:01 -08001673
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08001674extern int memcg_nr_cache_ids;
Michal Hocko64219992015-09-08 15:01:07 -07001675void memcg_get_cache_ids(void);
1676void memcg_put_cache_ids(void);
Glauber Costaebe945c2012-12-18 14:23:10 -08001677
1678/*
1679 * Helper macro to loop through all memcg-specific caches. Callers must still
1680 * check if the cache is valid (it is either valid or NULL).
1681 * the slab_mutex must be held when looping through those caches
1682 */
Glauber Costa749c5412012-12-18 14:23:01 -08001683#define for_each_memcg_cache_index(_idx) \
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08001684 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
Glauber Costa749c5412012-12-18 14:23:01 -08001685
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08001686static inline bool memcg_kmem_enabled(void)
1687{
Roman Gushchineda330e2020-08-06 23:21:47 -07001688 return static_branch_likely(&memcg_kmem_enabled_key);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08001689}
1690
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001691static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1692 int order)
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08001693{
1694 if (memcg_kmem_enabled())
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001695 return __memcg_kmem_charge_page(page, gfp, order);
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08001696 return 0;
1697}
1698
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001699static inline void memcg_kmem_uncharge_page(struct page *page, int order)
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08001700{
1701 if (memcg_kmem_enabled())
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001702 __memcg_kmem_uncharge_page(page, order);
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08001703}
1704
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08001705/*
Roman Gushchina7cb8742020-12-14 19:06:45 -08001706 * A helper for accessing memcg's kmem_id, used for getting
1707 * corresponding LRU lists.
Michal Hocko33398cf2015-09-08 15:01:02 -07001708 */
1709static inline int memcg_cache_id(struct mem_cgroup *memcg)
1710{
1711 return memcg ? memcg->kmemcg_id : -1;
1712}
Vladimir Davydov5722d092014-04-07 15:39:24 -07001713
Roman Gushchin8380ce42020-03-28 19:17:25 -07001714struct mem_cgroup *mem_cgroup_from_obj(void *p);
1715
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08001716#else
Roman Gushchin4d5c8ae2021-06-02 18:09:30 -07001717static inline bool mem_cgroup_kmem_disabled(void)
1718{
1719 return true;
1720}
Roman Gushchin9b6f7e12018-10-26 15:03:19 -07001721
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001722static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1723 int order)
Roman Gushchin9b6f7e12018-10-26 15:03:19 -07001724{
1725 return 0;
1726}
1727
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001728static inline void memcg_kmem_uncharge_page(struct page *page, int order)
Roman Gushchin9b6f7e12018-10-26 15:03:19 -07001729{
1730}
1731
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001732static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1733 int order)
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08001734{
1735 return 0;
1736}
1737
Roman Gushchinf4b00ea2020-04-01 21:06:46 -07001738static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08001739{
1740}
1741
Glauber Costa749c5412012-12-18 14:23:01 -08001742#define for_each_memcg_cache_index(_idx) \
1743 for (; NULL; )
1744
Glauber Costab9ce5ef2012-12-18 14:22:46 -08001745static inline bool memcg_kmem_enabled(void)
1746{
1747 return false;
1748}
1749
Glauber Costa2633d7a2012-12-18 14:22:34 -08001750static inline int memcg_cache_id(struct mem_cgroup *memcg)
1751{
1752 return -1;
1753}
1754
Vladimir Davydov05257a12015-02-12 14:59:01 -08001755static inline void memcg_get_cache_ids(void)
1756{
1757}
1758
1759static inline void memcg_put_cache_ids(void)
1760{
1761}
1762
Roman Gushchin8380ce42020-03-28 19:17:25 -07001763static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1764{
1765 return NULL;
1766}
1767
Kirill Tkhai84c07d12018-08-17 15:47:25 -07001768#endif /* CONFIG_MEMCG_KMEM */
Johannes Weiner127424c2016-01-20 15:02:32 -08001769
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001770#endif /* _LINUX_MEMCONTROL_H */