blob: 25e35a8b8ba296b5f9c49f74d8c0f450ecc6ac8a [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002/* memcontrol.c - Memory Controller
3 *
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08007 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
9 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080010 * Memory thresholds
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
13 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080014 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
17 *
Johannes Weiner1575e682015-04-14 15:44:51 -070018 * Native page reclaim
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
Balbir Singh8cdea7c2008-02-07 00:13:50 -080023 */
24
Johannes Weiner3e32cb22014-12-10 15:42:31 -080025#include <linux/page_counter.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080026#include <linux/memcontrol.h>
27#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080028#include <linux/mm.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010029#include <linux/sched/mm.h>
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080030#include <linux/shmem_fs.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080031#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080032#include <linux/pagemap.h>
Chris Down1ff9e6e2019-03-05 15:48:09 -080033#include <linux/vm_event_item.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080034#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080035#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080036#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080037#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070039#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040040#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080041#include <linux/mutex.h>
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -070042#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070043#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080044#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080045#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080046#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080047#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050048#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080049#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080050#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080051#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070052#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070053#include <linux/mm_inline.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -080054#include <linux/swap_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080055#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070056#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070057#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050058#include <linux/file.h>
Tejun Heob23afb92015-11-05 18:46:11 -080059#include <linux/tracehook.h>
Johannes Weinerc8713d02019-07-11 20:55:59 -070060#include <linux/seq_buf.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080061#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000062#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070063#include <net/ip.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080064#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080065
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080066#include <linux/uaccess.h>
Balbir Singh8697d332008-02-07 00:13:59 -080067
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070068#include <trace/events/vmscan.h>
69
Tejun Heo073219e2014-02-08 10:36:58 -050070struct cgroup_subsys memory_cgrp_subsys __read_mostly;
71EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080072
Johannes Weiner7d828602016-01-14 15:20:56 -080073struct mem_cgroup *root_mem_cgroup __read_mostly;
74
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070075#define MEM_CGROUP_RECLAIM_RETRIES 5
Balbir Singh8cdea7c2008-02-07 00:13:50 -080076
Johannes Weinerf7e1cb62016-01-14 15:21:29 -080077/* Socket memory accounting disabled? */
78static bool cgroup_memory_nosocket;
79
Vladimir Davydov04823c82016-01-20 15:02:38 -080080/* Kernel memory accounting disabled? */
81static bool cgroup_memory_nokmem;
82
Johannes Weiner21afa382015-02-11 15:26:36 -080083/* Whether the swap controller is active */
Andrew Mortonc255a452012-07-31 16:43:02 -070084#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080085int do_swap_account __read_mostly;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080086#else
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -070087#define do_swap_account 0
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080088#endif
89
Johannes Weiner7941d212016-01-14 15:21:23 -080090/* Whether legacy memory+swap accounting is active */
91static bool do_memsw_account(void)
92{
93 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
94}
95
Johannes Weiner71cd3112017-05-03 14:55:13 -070096static const char *const mem_cgroup_lru_names[] = {
Sha Zhengju58cf1882013-02-22 16:32:05 -080097 "inactive_anon",
98 "active_anon",
99 "inactive_file",
100 "active_file",
101 "unevictable",
102};
103
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700104#define THRESHOLDS_EVENTS_TARGET 128
105#define SOFTLIMIT_EVENTS_TARGET 1024
106#define NUMAINFO_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700107
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700108/*
109 * Cgroups above their limits are maintained in a RB-Tree, independent of
110 * their hierarchy representation
111 */
112
Mel Gormanef8f2322016-07-28 15:46:05 -0700113struct mem_cgroup_tree_per_node {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700114 struct rb_root rb_root;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700115 struct rb_node *rb_rightmost;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700116 spinlock_t lock;
117};
118
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700119struct mem_cgroup_tree {
120 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
121};
122
123static struct mem_cgroup_tree soft_limit_tree __read_mostly;
124
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700125/* for OOM */
126struct mem_cgroup_eventfd_list {
127 struct list_head list;
128 struct eventfd_ctx *eventfd;
129};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800130
Tejun Heo79bd9812013-11-22 18:20:42 -0500131/*
132 * cgroup_event represents events which userspace want to receive.
133 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500134struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500135 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500136 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500137 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500138 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500139 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500140 * eventfd to signal userspace about the event.
141 */
142 struct eventfd_ctx *eventfd;
143 /*
144 * Each of these stored in a list by the cgroup.
145 */
146 struct list_head list;
147 /*
Tejun Heofba94802013-11-22 18:20:43 -0500148 * register_event() callback will be used to add new userspace
149 * waiter for changes related to this event. Use eventfd_signal()
150 * on eventfd to send notification to userspace.
151 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500152 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500153 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500154 /*
155 * unregister_event() callback will be called when userspace closes
156 * the eventfd or on cgroup removing. This callback must be set,
157 * if you want provide notification functionality.
158 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500159 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500160 struct eventfd_ctx *eventfd);
161 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500162 * All fields below needed to unregister event when
163 * userspace closes eventfd.
164 */
165 poll_table pt;
166 wait_queue_head_t *wqh;
Ingo Molnarac6424b2017-06-20 12:06:13 +0200167 wait_queue_entry_t wait;
Tejun Heo79bd9812013-11-22 18:20:42 -0500168 struct work_struct remove;
169};
170
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700171static void mem_cgroup_threshold(struct mem_cgroup *memcg);
172static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800173
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800174/* Stuffs for move charges at task migration. */
175/*
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800176 * Types of charges to be moved.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800177 */
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800178#define MOVE_ANON 0x1U
179#define MOVE_FILE 0x2U
180#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800181
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800182/* "mc" and its members are protected by cgroup_mutex */
183static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800184 spinlock_t lock; /* for from, to */
Tejun Heo264a0ae2016-04-21 19:09:02 -0400185 struct mm_struct *mm;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800186 struct mem_cgroup *from;
187 struct mem_cgroup *to;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800188 unsigned long flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800189 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800190 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800191 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800192 struct task_struct *moving_task; /* a task moving charges */
193 wait_queue_head_t waitq; /* a waitq for other context */
194} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700195 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800196 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
197};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800198
Balbir Singh4e416952009-09-23 15:56:39 -0700199/*
200 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
201 * limit reclaim to prevent infinite loops, if they ever occur.
202 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700203#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700204#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700205
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800206enum charge_type {
207 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -0700208 MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800209 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700210 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700211 NR_CHARGE_TYPE,
212};
213
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800214/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800215enum res_type {
216 _MEM,
217 _MEMSWAP,
218 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800219 _KMEM,
Vladimir Davydovd55f90b2016-01-20 15:02:44 -0800220 _TCP,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800221};
222
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700223#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
224#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800225#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700226/* Used for OOM nofiier */
227#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800228
Kirill Tkhaib05706f2018-08-17 15:47:33 -0700229/*
230 * Iteration constructs for visiting all cgroups (under a tree). If
231 * loops are exited prematurely (break), mem_cgroup_iter_break() must
232 * be used for reference counting.
233 */
234#define for_each_mem_cgroup_tree(iter, root) \
235 for (iter = mem_cgroup_iter(root, NULL, NULL); \
236 iter != NULL; \
237 iter = mem_cgroup_iter(root, iter, NULL))
238
239#define for_each_mem_cgroup(iter) \
240 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
241 iter != NULL; \
242 iter = mem_cgroup_iter(NULL, iter, NULL))
243
Tetsuo Handa7775fac2019-03-05 15:46:47 -0800244static inline bool should_force_charge(void)
245{
246 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
247 (current->flags & PF_EXITING);
248}
249
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700250/* Some nice accessors for the vmpressure. */
251struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
252{
253 if (!memcg)
254 memcg = root_mem_cgroup;
255 return &memcg->vmpressure;
256}
257
258struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
259{
260 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
261}
262
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700263#ifdef CONFIG_MEMCG_KMEM
Glauber Costa55007d82012-12-18 14:22:38 -0800264/*
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800265 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
Li Zefanb8627832013-09-23 16:56:47 +0800266 * The main reason for not using cgroup id for this:
267 * this works better in sparse environments, where we have a lot of memcgs,
268 * but only a few kmem-limited. Or also, if we have, for instance, 200
269 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
270 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800271 *
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800272 * The current size of the caches array is stored in memcg_nr_cache_ids. It
273 * will double each time we have to increase it.
Glauber Costa55007d82012-12-18 14:22:38 -0800274 */
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800275static DEFINE_IDA(memcg_cache_ida);
276int memcg_nr_cache_ids;
Glauber Costa749c5412012-12-18 14:23:01 -0800277
Vladimir Davydov05257a12015-02-12 14:59:01 -0800278/* Protects memcg_nr_cache_ids */
279static DECLARE_RWSEM(memcg_cache_ids_sem);
280
281void memcg_get_cache_ids(void)
282{
283 down_read(&memcg_cache_ids_sem);
284}
285
286void memcg_put_cache_ids(void)
287{
288 up_read(&memcg_cache_ids_sem);
289}
290
Glauber Costa55007d82012-12-18 14:22:38 -0800291/*
292 * MIN_SIZE is different than 1, because we would like to avoid going through
293 * the alloc/free process all the time. In a small machine, 4 kmem-limited
294 * cgroups is a reasonable guess. In the future, it could be a parameter or
295 * tunable, but that is strictly not necessary.
296 *
Li Zefanb8627832013-09-23 16:56:47 +0800297 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800298 * this constant directly from cgroup, but it is understandable that this is
299 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800300 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800301 * increase ours as well if it increases.
302 */
303#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800304#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800305
Glauber Costad7f25f82012-12-18 14:22:40 -0800306/*
307 * A lot of the calls to the cache allocation functions are expected to be
308 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
309 * conditional to this static branch, we'll have to allow modules that does
310 * kmem_cache_alloc and the such to see this symbol as well
311 */
Johannes Weineref129472016-01-14 15:21:34 -0800312DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
Glauber Costad7f25f82012-12-18 14:22:40 -0800313EXPORT_SYMBOL(memcg_kmem_enabled_key);
Glauber Costaa8964b92012-12-18 14:22:09 -0800314
Tejun Heo17cc4df2017-02-22 15:41:36 -0800315struct workqueue_struct *memcg_kmem_cache_wq;
316
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700317static int memcg_shrinker_map_size;
318static DEFINE_MUTEX(memcg_shrinker_map_mutex);
319
320static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
321{
322 kvfree(container_of(head, struct memcg_shrinker_map, rcu));
323}
324
325static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
326 int size, int old_size)
327{
328 struct memcg_shrinker_map *new, *old;
329 int nid;
330
331 lockdep_assert_held(&memcg_shrinker_map_mutex);
332
333 for_each_node(nid) {
334 old = rcu_dereference_protected(
335 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
336 /* Not yet online memcg */
337 if (!old)
338 return 0;
339
340 new = kvmalloc(sizeof(*new) + size, GFP_KERNEL);
341 if (!new)
342 return -ENOMEM;
343
344 /* Set all old bits, clear all new bits */
345 memset(new->map, (int)0xff, old_size);
346 memset((void *)new->map + old_size, 0, size - old_size);
347
348 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
349 call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
350 }
351
352 return 0;
353}
354
355static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
356{
357 struct mem_cgroup_per_node *pn;
358 struct memcg_shrinker_map *map;
359 int nid;
360
361 if (mem_cgroup_is_root(memcg))
362 return;
363
364 for_each_node(nid) {
365 pn = mem_cgroup_nodeinfo(memcg, nid);
366 map = rcu_dereference_protected(pn->shrinker_map, true);
367 if (map)
368 kvfree(map);
369 rcu_assign_pointer(pn->shrinker_map, NULL);
370 }
371}
372
373static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
374{
375 struct memcg_shrinker_map *map;
376 int nid, size, ret = 0;
377
378 if (mem_cgroup_is_root(memcg))
379 return 0;
380
381 mutex_lock(&memcg_shrinker_map_mutex);
382 size = memcg_shrinker_map_size;
383 for_each_node(nid) {
384 map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
385 if (!map) {
386 memcg_free_shrinker_maps(memcg);
387 ret = -ENOMEM;
388 break;
389 }
390 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
391 }
392 mutex_unlock(&memcg_shrinker_map_mutex);
393
394 return ret;
395}
396
397int memcg_expand_shrinker_maps(int new_id)
398{
399 int size, old_size, ret = 0;
400 struct mem_cgroup *memcg;
401
402 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
403 old_size = memcg_shrinker_map_size;
404 if (size <= old_size)
405 return 0;
406
407 mutex_lock(&memcg_shrinker_map_mutex);
408 if (!root_mem_cgroup)
409 goto unlock;
410
411 for_each_mem_cgroup(memcg) {
412 if (mem_cgroup_is_root(memcg))
413 continue;
414 ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
415 if (ret)
416 goto unlock;
417 }
418unlock:
419 if (!ret)
420 memcg_shrinker_map_size = size;
421 mutex_unlock(&memcg_shrinker_map_mutex);
422 return ret;
423}
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700424
425void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
426{
427 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
428 struct memcg_shrinker_map *map;
429
430 rcu_read_lock();
431 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
Kirill Tkhaif90280d2018-08-17 15:48:25 -0700432 /* Pairs with smp mb in shrink_slab() */
433 smp_mb__before_atomic();
Kirill Tkhaifae91d62018-08-17 15:48:10 -0700434 set_bit(shrinker_id, map->map);
435 rcu_read_unlock();
436 }
437}
438
Kirill Tkhai0a4465d2018-08-17 15:47:37 -0700439#else /* CONFIG_MEMCG_KMEM */
440static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
441{
442 return 0;
443}
444static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) { }
Kirill Tkhai84c07d12018-08-17 15:47:25 -0700445#endif /* CONFIG_MEMCG_KMEM */
Glauber Costaa8964b92012-12-18 14:22:09 -0800446
Tejun Heoad7fa852015-05-27 20:00:02 -0400447/**
448 * mem_cgroup_css_from_page - css of the memcg associated with a page
449 * @page: page of interest
450 *
451 * If memcg is bound to the default hierarchy, css of the memcg associated
452 * with @page is returned. The returned css remains associated with @page
453 * until it is released.
454 *
455 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
456 * is returned.
Tejun Heoad7fa852015-05-27 20:00:02 -0400457 */
458struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
459{
460 struct mem_cgroup *memcg;
461
Tejun Heoad7fa852015-05-27 20:00:02 -0400462 memcg = page->mem_cgroup;
463
Tejun Heo9e10a132015-09-18 11:56:28 -0400464 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heoad7fa852015-05-27 20:00:02 -0400465 memcg = root_mem_cgroup;
466
Tejun Heoad7fa852015-05-27 20:00:02 -0400467 return &memcg->css;
468}
469
Vladimir Davydov2fc04522015-09-09 15:35:28 -0700470/**
471 * page_cgroup_ino - return inode number of the memcg a page is charged to
472 * @page: the page
473 *
474 * Look up the closest online ancestor of the memory cgroup @page is charged to
475 * and return its inode number or 0 if @page is not charged to any cgroup. It
476 * is safe to call this function without holding a reference to @page.
477 *
478 * Note, this function is inherently racy, because there is nothing to prevent
479 * the cgroup inode from getting torn down and potentially reallocated a moment
480 * after page_cgroup_ino() returns, so it only should be used by callers that
481 * do not care (such as procfs interfaces).
482 */
483ino_t page_cgroup_ino(struct page *page)
484{
485 struct mem_cgroup *memcg;
486 unsigned long ino = 0;
487
488 rcu_read_lock();
489 memcg = READ_ONCE(page->mem_cgroup);
490 while (memcg && !(memcg->css.flags & CSS_ONLINE))
491 memcg = parent_mem_cgroup(memcg);
492 if (memcg)
493 ino = cgroup_ino(memcg->css.cgroup);
494 rcu_read_unlock();
495 return ino;
496}
497
Mel Gormanef8f2322016-07-28 15:46:05 -0700498static struct mem_cgroup_per_node *
499mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700500{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700501 int nid = page_to_nid(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700502
Mel Gormanef8f2322016-07-28 15:46:05 -0700503 return memcg->nodeinfo[nid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700504}
505
Mel Gormanef8f2322016-07-28 15:46:05 -0700506static struct mem_cgroup_tree_per_node *
507soft_limit_tree_node(int nid)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700508{
Mel Gormanef8f2322016-07-28 15:46:05 -0700509 return soft_limit_tree.rb_tree_per_node[nid];
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700510}
511
Mel Gormanef8f2322016-07-28 15:46:05 -0700512static struct mem_cgroup_tree_per_node *
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700513soft_limit_tree_from_page(struct page *page)
514{
515 int nid = page_to_nid(page);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700516
Mel Gormanef8f2322016-07-28 15:46:05 -0700517 return soft_limit_tree.rb_tree_per_node[nid];
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700518}
519
Mel Gormanef8f2322016-07-28 15:46:05 -0700520static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
521 struct mem_cgroup_tree_per_node *mctz,
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800522 unsigned long new_usage_in_excess)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700523{
524 struct rb_node **p = &mctz->rb_root.rb_node;
525 struct rb_node *parent = NULL;
Mel Gormanef8f2322016-07-28 15:46:05 -0700526 struct mem_cgroup_per_node *mz_node;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700527 bool rightmost = true;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700528
529 if (mz->on_tree)
530 return;
531
532 mz->usage_in_excess = new_usage_in_excess;
533 if (!mz->usage_in_excess)
534 return;
535 while (*p) {
536 parent = *p;
Mel Gormanef8f2322016-07-28 15:46:05 -0700537 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700538 tree_node);
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700539 if (mz->usage_in_excess < mz_node->usage_in_excess) {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700540 p = &(*p)->rb_left;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700541 rightmost = false;
542 }
543
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700544 /*
545 * We can't avoid mem cgroups that are over their soft
546 * limit by the same amount
547 */
548 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
549 p = &(*p)->rb_right;
550 }
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700551
552 if (rightmost)
553 mctz->rb_rightmost = &mz->tree_node;
554
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700555 rb_link_node(&mz->tree_node, parent, p);
556 rb_insert_color(&mz->tree_node, &mctz->rb_root);
557 mz->on_tree = true;
558}
559
Mel Gormanef8f2322016-07-28 15:46:05 -0700560static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
561 struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700562{
563 if (!mz->on_tree)
564 return;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700565
566 if (&mz->tree_node == mctz->rb_rightmost)
567 mctz->rb_rightmost = rb_prev(&mz->tree_node);
568
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700569 rb_erase(&mz->tree_node, &mctz->rb_root);
570 mz->on_tree = false;
571}
572
Mel Gormanef8f2322016-07-28 15:46:05 -0700573static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
574 struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700575{
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700576 unsigned long flags;
577
578 spin_lock_irqsave(&mctz->lock, flags);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700579 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700580 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700581}
582
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800583static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
584{
585 unsigned long nr_pages = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -0700586 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800587 unsigned long excess = 0;
588
589 if (nr_pages > soft_limit)
590 excess = nr_pages - soft_limit;
591
592 return excess;
593}
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700594
595static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
596{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800597 unsigned long excess;
Mel Gormanef8f2322016-07-28 15:46:05 -0700598 struct mem_cgroup_per_node *mz;
599 struct mem_cgroup_tree_per_node *mctz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700600
Jianyu Zhane2318752014-06-06 14:38:20 -0700601 mctz = soft_limit_tree_from_page(page);
Laurent Dufourbfc72282017-03-09 16:17:06 -0800602 if (!mctz)
603 return;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700604 /*
605 * Necessary to update all ancestors when hierarchy is used.
606 * because their event counter is not touched.
607 */
608 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700609 mz = mem_cgroup_page_nodeinfo(memcg, page);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800610 excess = soft_limit_excess(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700611 /*
612 * We have to update the tree if mz is on RB-tree or
613 * mem is over its softlimit.
614 */
615 if (excess || mz->on_tree) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700616 unsigned long flags;
617
618 spin_lock_irqsave(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700619 /* if on-tree, remove it */
620 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700621 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700622 /*
623 * Insert again. mz->usage_in_excess will be updated.
624 * If excess is 0, no tree ops.
625 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700626 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700627 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700628 }
629 }
630}
631
632static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
633{
Mel Gormanef8f2322016-07-28 15:46:05 -0700634 struct mem_cgroup_tree_per_node *mctz;
635 struct mem_cgroup_per_node *mz;
636 int nid;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700637
Jianyu Zhane2318752014-06-06 14:38:20 -0700638 for_each_node(nid) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700639 mz = mem_cgroup_nodeinfo(memcg, nid);
640 mctz = soft_limit_tree_node(nid);
Laurent Dufourbfc72282017-03-09 16:17:06 -0800641 if (mctz)
642 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700643 }
644}
645
Mel Gormanef8f2322016-07-28 15:46:05 -0700646static struct mem_cgroup_per_node *
647__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700648{
Mel Gormanef8f2322016-07-28 15:46:05 -0700649 struct mem_cgroup_per_node *mz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700650
651retry:
652 mz = NULL;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700653 if (!mctz->rb_rightmost)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700654 goto done; /* Nothing to reclaim from */
655
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700656 mz = rb_entry(mctz->rb_rightmost,
657 struct mem_cgroup_per_node, tree_node);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700658 /*
659 * Remove the node now but someone else can add it back,
660 * we will to add it back at the end of reclaim to its correct
661 * position in the tree.
662 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700663 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800664 if (!soft_limit_excess(mz->memcg) ||
Tejun Heoec903c02014-05-13 12:11:01 -0400665 !css_tryget_online(&mz->memcg->css))
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700666 goto retry;
667done:
668 return mz;
669}
670
Mel Gormanef8f2322016-07-28 15:46:05 -0700671static struct mem_cgroup_per_node *
672mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700673{
Mel Gormanef8f2322016-07-28 15:46:05 -0700674 struct mem_cgroup_per_node *mz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700675
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700676 spin_lock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700677 mz = __mem_cgroup_largest_soft_limit_node(mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700678 spin_unlock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700679 return mz;
680}
681
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700682/**
683 * __mod_memcg_state - update cgroup memory statistics
684 * @memcg: the memory cgroup
685 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
686 * @val: delta to add to the counter, can be negative
687 */
688void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
689{
690 long x;
691
692 if (mem_cgroup_disabled())
693 return;
694
Johannes Weiner815744d2019-06-13 15:55:46 -0700695 __this_cpu_add(memcg->vmstats_local->stat[idx], val);
696
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700697 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
698 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
Johannes Weiner42a30032019-05-14 15:47:12 -0700699 struct mem_cgroup *mi;
700
Johannes Weiner42a30032019-05-14 15:47:12 -0700701 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
702 atomic_long_add(x, &mi->vmstats[idx]);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700703 x = 0;
704 }
705 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
706}
707
Johannes Weiner42a30032019-05-14 15:47:12 -0700708static struct mem_cgroup_per_node *
709parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
710{
711 struct mem_cgroup *parent;
712
713 parent = parent_mem_cgroup(pn->memcg);
714 if (!parent)
715 return NULL;
716 return mem_cgroup_nodeinfo(parent, nid);
717}
718
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700719/**
720 * __mod_lruvec_state - update lruvec memory statistics
721 * @lruvec: the lruvec
722 * @idx: the stat item
723 * @val: delta to add to the counter, can be negative
724 *
725 * The lruvec is the intersection of the NUMA node and a cgroup. This
726 * function updates the all three counters that are affected by a
727 * change of state at this level: per-node, per-cgroup, per-lruvec.
728 */
729void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
730 int val)
731{
Johannes Weiner42a30032019-05-14 15:47:12 -0700732 pg_data_t *pgdat = lruvec_pgdat(lruvec);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700733 struct mem_cgroup_per_node *pn;
Johannes Weiner42a30032019-05-14 15:47:12 -0700734 struct mem_cgroup *memcg;
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700735 long x;
736
737 /* Update node */
Johannes Weiner42a30032019-05-14 15:47:12 -0700738 __mod_node_page_state(pgdat, idx, val);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700739
740 if (mem_cgroup_disabled())
741 return;
742
743 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
Johannes Weiner42a30032019-05-14 15:47:12 -0700744 memcg = pn->memcg;
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700745
746 /* Update memcg */
Johannes Weiner42a30032019-05-14 15:47:12 -0700747 __mod_memcg_state(memcg, idx, val);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700748
749 /* Update lruvec */
Johannes Weiner815744d2019-06-13 15:55:46 -0700750 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
751
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700752 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
753 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
Johannes Weiner42a30032019-05-14 15:47:12 -0700754 struct mem_cgroup_per_node *pi;
755
Johannes Weiner42a30032019-05-14 15:47:12 -0700756 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
757 atomic_long_add(x, &pi->lruvec_stat[idx]);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700758 x = 0;
759 }
760 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
761}
762
763/**
764 * __count_memcg_events - account VM events in a cgroup
765 * @memcg: the memory cgroup
766 * @idx: the event item
767 * @count: the number of events that occured
768 */
769void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
770 unsigned long count)
771{
772 unsigned long x;
773
774 if (mem_cgroup_disabled())
775 return;
776
Johannes Weiner815744d2019-06-13 15:55:46 -0700777 __this_cpu_add(memcg->vmstats_local->events[idx], count);
778
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700779 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
780 if (unlikely(x > MEMCG_CHARGE_BATCH)) {
Johannes Weiner42a30032019-05-14 15:47:12 -0700781 struct mem_cgroup *mi;
782
Johannes Weiner42a30032019-05-14 15:47:12 -0700783 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
784 atomic_long_add(x, &mi->vmevents[idx]);
Johannes Weinerdb9adbc2019-05-14 15:47:09 -0700785 x = 0;
786 }
787 __this_cpu_write(memcg->vmstats_percpu->events[idx], x);
788}
789
Johannes Weiner42a30032019-05-14 15:47:12 -0700790static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
Johannes Weinere9f89742011-03-23 16:42:37 -0700791{
Chris Down871789d2019-05-14 15:46:57 -0700792 return atomic_long_read(&memcg->vmevents[event]);
Johannes Weinere9f89742011-03-23 16:42:37 -0700793}
794
Johannes Weiner42a30032019-05-14 15:47:12 -0700795static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
796{
Johannes Weiner815744d2019-06-13 15:55:46 -0700797 long x = 0;
798 int cpu;
799
800 for_each_possible_cpu(cpu)
801 x += per_cpu(memcg->vmstats_local->events[event], cpu);
802 return x;
Johannes Weiner42a30032019-05-14 15:47:12 -0700803}
804
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700805static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700806 struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800807 bool compound, int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800808{
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700809 /*
810 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
811 * counted as CACHE even if it's on ANON LRU.
812 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700813 if (PageAnon(page))
Johannes Weinerc9019e92018-01-31 16:16:37 -0800814 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
Johannes Weiner9a4caf12017-05-03 14:52:45 -0700815 else {
Johannes Weinerc9019e92018-01-31 16:16:37 -0800816 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
Johannes Weiner9a4caf12017-05-03 14:52:45 -0700817 if (PageSwapBacked(page))
Johannes Weinerc9019e92018-01-31 16:16:37 -0800818 __mod_memcg_state(memcg, NR_SHMEM, nr_pages);
Johannes Weiner9a4caf12017-05-03 14:52:45 -0700819 }
Balaji Rao55e462b2008-05-01 04:35:12 -0700820
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800821 if (compound) {
822 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
Johannes Weinerc9019e92018-01-31 16:16:37 -0800823 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800824 }
David Rientjesb070e652013-05-07 16:18:09 -0700825
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800826 /* pagein of a big page is an event. So, ignore page size */
827 if (nr_pages > 0)
Johannes Weinerc9019e92018-01-31 16:16:37 -0800828 __count_memcg_events(memcg, PGPGIN, 1);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800829 else {
Johannes Weinerc9019e92018-01-31 16:16:37 -0800830 __count_memcg_events(memcg, PGPGOUT, 1);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800831 nr_pages = -nr_pages; /* for event */
832 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800833
Chris Down871789d2019-05-14 15:46:57 -0700834 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800835}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800836
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800837static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
838 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800839{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700840 unsigned long val, next;
841
Chris Down871789d2019-05-14 15:46:57 -0700842 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
843 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700844 /* from time_after() in jiffies.h */
Michal Hocko6a1a8b82017-07-10 15:48:53 -0700845 if ((long)(next - val) < 0) {
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800846 switch (target) {
847 case MEM_CGROUP_TARGET_THRESH:
848 next = val + THRESHOLDS_EVENTS_TARGET;
849 break;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700850 case MEM_CGROUP_TARGET_SOFTLIMIT:
851 next = val + SOFTLIMIT_EVENTS_TARGET;
852 break;
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800853 case MEM_CGROUP_TARGET_NUMAINFO:
854 next = val + NUMAINFO_EVENTS_TARGET;
855 break;
856 default:
857 break;
858 }
Chris Down871789d2019-05-14 15:46:57 -0700859 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800860 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700861 }
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800862 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800863}
864
865/*
866 * Check events in order.
867 *
868 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700869static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800870{
871 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800872 if (unlikely(mem_cgroup_event_ratelimit(memcg,
873 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700874 bool do_softlimit;
Andrew Morton82b3f2a2012-02-03 15:37:14 -0800875 bool do_numainfo __maybe_unused;
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800876
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700877 do_softlimit = mem_cgroup_event_ratelimit(memcg,
878 MEM_CGROUP_TARGET_SOFTLIMIT);
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -0700879#if MAX_NUMNODES > 1
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800880 do_numainfo = mem_cgroup_event_ratelimit(memcg,
881 MEM_CGROUP_TARGET_NUMAINFO);
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -0700882#endif
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800883 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700884 if (unlikely(do_softlimit))
885 mem_cgroup_update_tree(memcg, page);
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800886#if MAX_NUMNODES > 1
887 if (unlikely(do_numainfo))
888 atomic_inc(&memcg->numainfo_events);
889#endif
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700890 }
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800891}
892
Balbir Singhcf475ad2008-04-29 01:00:16 -0700893struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800894{
Balbir Singh31a78f22008-09-28 23:09:31 +0100895 /*
896 * mm_update_next_owner() may clear mm->owner to NULL
897 * if it races with swapoff, page migration, etc.
898 * So this can be called with p == NULL.
899 */
900 if (unlikely(!p))
901 return NULL;
902
Tejun Heo073219e2014-02-08 10:36:58 -0500903 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800904}
Michal Hocko33398cf2015-09-08 15:01:02 -0700905EXPORT_SYMBOL(mem_cgroup_from_task);
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800906
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700907/**
908 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
909 * @mm: mm from which memcg should be extracted. It can be NULL.
910 *
911 * Obtain a reference on mm->memcg and returns it if successful. Otherwise
912 * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
913 * returned.
914 */
915struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800916{
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700917 struct mem_cgroup *memcg;
918
919 if (mem_cgroup_disabled())
920 return NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700921
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800922 rcu_read_lock();
923 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -0700924 /*
925 * Page cache insertions can happen withou an
926 * actual mm context, e.g. during disk probing
927 * on boot, loopback IO, acct() writes etc.
928 */
929 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -0700930 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -0700931 else {
932 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
933 if (unlikely(!memcg))
934 memcg = root_mem_cgroup;
935 }
Tejun Heoec903c02014-05-13 12:11:01 -0400936 } while (!css_tryget_online(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800937 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700938 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800939}
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700940EXPORT_SYMBOL(get_mem_cgroup_from_mm);
941
942/**
Shakeel Buttf745c6f2018-08-17 15:46:44 -0700943 * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
944 * @page: page from which memcg should be extracted.
945 *
946 * Obtain a reference on page->memcg and returns it if successful. Otherwise
947 * root_mem_cgroup is returned.
948 */
949struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
950{
951 struct mem_cgroup *memcg = page->mem_cgroup;
952
953 if (mem_cgroup_disabled())
954 return NULL;
955
956 rcu_read_lock();
957 if (!memcg || !css_tryget_online(&memcg->css))
958 memcg = root_mem_cgroup;
959 rcu_read_unlock();
960 return memcg;
961}
962EXPORT_SYMBOL(get_mem_cgroup_from_page);
963
964/**
Shakeel Buttd46eb14b2018-08-17 15:46:39 -0700965 * If current->active_memcg is non-NULL, do not fallback to current->mm->memcg.
966 */
967static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
968{
969 if (unlikely(current->active_memcg)) {
970 struct mem_cgroup *memcg = root_mem_cgroup;
971
972 rcu_read_lock();
973 if (css_tryget_online(&current->active_memcg->css))
974 memcg = current->active_memcg;
975 rcu_read_unlock();
976 return memcg;
977 }
978 return get_mem_cgroup_from_mm(current->mm);
979}
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800980
Johannes Weiner56600482012-01-12 17:17:59 -0800981/**
982 * mem_cgroup_iter - iterate over memory cgroup hierarchy
983 * @root: hierarchy root
984 * @prev: previously returned memcg, NULL on first invocation
985 * @reclaim: cookie for shared reclaim walks, NULL for full walks
986 *
987 * Returns references to children of the hierarchy below @root, or
988 * @root itself, or %NULL after a full round-trip.
989 *
990 * Caller must pass the return value in @prev on subsequent
991 * invocations for reference counting, or use mem_cgroup_iter_break()
992 * to cancel a hierarchy walk before the round-trip is complete.
993 *
Honglei Wangb213b542018-03-28 16:01:12 -0700994 * Reclaimers can specify a node and a priority level in @reclaim to
Johannes Weiner56600482012-01-12 17:17:59 -0800995 * divide up the memcgs in the hierarchy among all concurrent
Honglei Wangb213b542018-03-28 16:01:12 -0700996 * reclaimers operating on the same node and priority.
Johannes Weiner56600482012-01-12 17:17:59 -0800997 */
Andrew Morton694fbc02013-09-24 15:27:37 -0700998struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -0800999 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -07001000 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07001001{
Michal Hocko33398cf2015-09-08 15:01:02 -07001002 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001003 struct cgroup_subsys_state *css = NULL;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001004 struct mem_cgroup *memcg = NULL;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001005 struct mem_cgroup *pos = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001006
Andrew Morton694fbc02013-09-24 15:27:37 -07001007 if (mem_cgroup_disabled())
1008 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -08001009
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001010 if (!root)
1011 root = root_mem_cgroup;
1012
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001013 if (prev && !reclaim)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001014 pos = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001015
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001016 if (!root->use_hierarchy && root != root_mem_cgroup) {
1017 if (prev)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001018 goto out;
Andrew Morton694fbc02013-09-24 15:27:37 -07001019 return root;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001020 }
1021
Michal Hocko542f85f2013-04-29 15:07:15 -07001022 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001023
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001024 if (reclaim) {
Mel Gormanef8f2322016-07-28 15:46:05 -07001025 struct mem_cgroup_per_node *mz;
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001026
Mel Gormanef8f2322016-07-28 15:46:05 -07001027 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001028 iter = &mz->iter[reclaim->priority];
Michal Hocko5f578162013-04-29 15:07:17 -07001029
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001030 if (prev && reclaim->generation != iter->generation)
Michal Hocko542f85f2013-04-29 15:07:15 -07001031 goto out_unlock;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001032
Vladimir Davydov6df38682015-12-29 14:54:10 -08001033 while (1) {
Jason Low4db0c3c2015-04-15 16:14:08 -07001034 pos = READ_ONCE(iter->position);
Vladimir Davydov6df38682015-12-29 14:54:10 -08001035 if (!pos || css_tryget(&pos->css))
1036 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001037 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -08001038 * css reference reached zero, so iter->position will
1039 * be cleared by ->css_released. However, we should not
1040 * rely on this happening soon, because ->css_released
1041 * is called from a work queue, and by busy-waiting we
1042 * might block it. So we clear iter->position right
1043 * away.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001044 */
Vladimir Davydov6df38682015-12-29 14:54:10 -08001045 (void)cmpxchg(&iter->position, pos, NULL);
1046 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001047 }
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001048
1049 if (pos)
1050 css = &pos->css;
1051
1052 for (;;) {
1053 css = css_next_descendant_pre(css, &root->css);
1054 if (!css) {
1055 /*
1056 * Reclaimers share the hierarchy walk, and a
1057 * new one might jump in right at the end of
1058 * the hierarchy - make sure they see at least
1059 * one group and restart from the beginning.
1060 */
1061 if (!prev)
1062 continue;
1063 break;
1064 }
1065
1066 /*
1067 * Verify the css and acquire a reference. The root
1068 * is provided by the caller, so we know it's alive
1069 * and kicking, and don't take an extra reference.
1070 */
1071 memcg = mem_cgroup_from_css(css);
1072
1073 if (css == &root->css)
1074 break;
1075
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08001076 if (css_tryget(css))
1077 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001078
1079 memcg = NULL;
1080 }
1081
1082 if (reclaim) {
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001083 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -08001084 * The position could have already been updated by a competing
1085 * thread, so check that the value hasn't changed since we read
1086 * it to avoid reclaiming from the same cgroup twice.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001087 */
Vladimir Davydov6df38682015-12-29 14:54:10 -08001088 (void)cmpxchg(&iter->position, pos, memcg);
1089
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001090 if (pos)
1091 css_put(&pos->css);
1092
1093 if (!memcg)
1094 iter->generation++;
1095 else if (!prev)
1096 reclaim->generation = iter->generation;
1097 }
1098
Michal Hocko542f85f2013-04-29 15:07:15 -07001099out_unlock:
1100 rcu_read_unlock();
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001101out:
Michal Hockoc40046f2013-04-29 15:07:14 -07001102 if (prev && prev != root)
1103 css_put(&prev->css);
1104
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001105 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001106}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001107
Johannes Weiner56600482012-01-12 17:17:59 -08001108/**
1109 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1110 * @root: hierarchy root
1111 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1112 */
1113void mem_cgroup_iter_break(struct mem_cgroup *root,
1114 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001115{
1116 if (!root)
1117 root = root_mem_cgroup;
1118 if (prev && prev != root)
1119 css_put(&prev->css);
1120}
1121
Vladimir Davydov6df38682015-12-29 14:54:10 -08001122static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1123{
1124 struct mem_cgroup *memcg = dead_memcg;
1125 struct mem_cgroup_reclaim_iter *iter;
Mel Gormanef8f2322016-07-28 15:46:05 -07001126 struct mem_cgroup_per_node *mz;
1127 int nid;
Vladimir Davydov6df38682015-12-29 14:54:10 -08001128 int i;
1129
Jing Xia9f15bde2018-07-20 17:53:48 -07001130 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Vladimir Davydov6df38682015-12-29 14:54:10 -08001131 for_each_node(nid) {
Mel Gormanef8f2322016-07-28 15:46:05 -07001132 mz = mem_cgroup_nodeinfo(memcg, nid);
1133 for (i = 0; i <= DEF_PRIORITY; i++) {
1134 iter = &mz->iter[i];
1135 cmpxchg(&iter->position,
1136 dead_memcg, NULL);
Vladimir Davydov6df38682015-12-29 14:54:10 -08001137 }
1138 }
1139 }
1140}
1141
Johannes Weiner925b7672012-01-12 17:18:15 -08001142/**
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001143 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1144 * @memcg: hierarchy root
1145 * @fn: function to call for each task
1146 * @arg: argument passed to @fn
1147 *
1148 * This function iterates over tasks attached to @memcg or to any of its
1149 * descendants and calls @fn for each task. If @fn returns a non-zero
1150 * value, the function breaks the iteration loop and returns the value.
1151 * Otherwise, it will iterate over all tasks and return 0.
1152 *
1153 * This function must not be called for the root memory cgroup.
1154 */
1155int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1156 int (*fn)(struct task_struct *, void *), void *arg)
1157{
1158 struct mem_cgroup *iter;
1159 int ret = 0;
1160
1161 BUG_ON(memcg == root_mem_cgroup);
1162
1163 for_each_mem_cgroup_tree(iter, memcg) {
1164 struct css_task_iter it;
1165 struct task_struct *task;
1166
Tejun Heobc2fb7e2017-05-15 09:34:01 -04001167 css_task_iter_start(&iter->css, 0, &it);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001168 while (!ret && (task = css_task_iter_next(&it)))
1169 ret = fn(task, arg);
1170 css_task_iter_end(&it);
1171 if (ret) {
1172 mem_cgroup_iter_break(memcg, iter);
1173 break;
1174 }
1175 }
1176 return ret;
1177}
1178
1179/**
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001180 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
Johannes Weiner925b7672012-01-12 17:18:15 -08001181 * @page: the page
Mike Rapoportf144c392018-02-06 15:42:16 -08001182 * @pgdat: pgdat of the page
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001183 *
1184 * This function is only safe when following the LRU page isolation
1185 * and putback protocol: the LRU lock must be held, and the page must
1186 * either be PageLRU() or the caller must have isolated/allocated it.
Minchan Kim3f58a822011-03-22 16:32:53 -07001187 */
Mel Gorman599d0c92016-07-28 15:45:31 -07001188struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
Minchan Kim3f58a822011-03-22 16:32:53 -07001189{
Mel Gormanef8f2322016-07-28 15:46:05 -07001190 struct mem_cgroup_per_node *mz;
Johannes Weiner925b7672012-01-12 17:18:15 -08001191 struct mem_cgroup *memcg;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001192 struct lruvec *lruvec;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001193
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001194 if (mem_cgroup_disabled()) {
Mel Gorman599d0c92016-07-28 15:45:31 -07001195 lruvec = &pgdat->lruvec;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001196 goto out;
1197 }
Christoph Lameterb69408e2008-10-18 20:26:14 -07001198
Johannes Weiner1306a852014-12-10 15:44:52 -08001199 memcg = page->mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001200 /*
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001201 * Swapcache readahead pages are added to the LRU - and
Johannes Weiner29833312014-12-10 15:44:02 -08001202 * possibly migrated - before they are charged.
Hugh Dickins75121022012-03-05 14:59:18 -08001203 */
Johannes Weiner29833312014-12-10 15:44:02 -08001204 if (!memcg)
1205 memcg = root_mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001206
Mel Gormanef8f2322016-07-28 15:46:05 -07001207 mz = mem_cgroup_page_nodeinfo(memcg, page);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001208 lruvec = &mz->lruvec;
1209out:
1210 /*
1211 * Since a node can be onlined after the mem_cgroup was created,
1212 * we have to be prepared to initialize lruvec->zone here;
1213 * and if offlined then reonlined, we need to reinitialize it.
1214 */
Mel Gorman599d0c92016-07-28 15:45:31 -07001215 if (unlikely(lruvec->pgdat != pgdat))
1216 lruvec->pgdat = pgdat;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001217 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001218}
1219
1220/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001221 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1222 * @lruvec: mem_cgroup per zone lru vector
1223 * @lru: index of lru list the page is sitting on
Michal Hockob4536f0c82017-01-10 16:58:04 -08001224 * @zid: zone id of the accounted pages
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001225 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -08001226 *
Hugh Dickinsca707232016-05-19 17:12:35 -07001227 * This function must be called under lru_lock, just before a page is added
1228 * to or just after a page is removed from an lru list (that ordering being
1229 * so as to allow it to check that lru_size 0 is consistent with list_empty).
Johannes Weiner925b7672012-01-12 17:18:15 -08001230 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001231void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
Michal Hockob4536f0c82017-01-10 16:58:04 -08001232 int zid, int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -08001233{
Mel Gormanef8f2322016-07-28 15:46:05 -07001234 struct mem_cgroup_per_node *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001235 unsigned long *lru_size;
Hugh Dickinsca707232016-05-19 17:12:35 -07001236 long size;
Johannes Weiner925b7672012-01-12 17:18:15 -08001237
1238 if (mem_cgroup_disabled())
1239 return;
1240
Mel Gormanef8f2322016-07-28 15:46:05 -07001241 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
Michal Hockob4536f0c82017-01-10 16:58:04 -08001242 lru_size = &mz->lru_zone_size[zid][lru];
Hugh Dickinsca707232016-05-19 17:12:35 -07001243
1244 if (nr_pages < 0)
1245 *lru_size += nr_pages;
1246
1247 size = *lru_size;
Michal Hockob4536f0c82017-01-10 16:58:04 -08001248 if (WARN_ONCE(size < 0,
1249 "%s(%p, %d, %d): lru_size %ld\n",
1250 __func__, lruvec, lru, nr_pages, size)) {
Hugh Dickinsca707232016-05-19 17:12:35 -07001251 VM_BUG_ON(1);
1252 *lru_size = 0;
1253 }
1254
1255 if (nr_pages > 0)
1256 *lru_size += nr_pages;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001257}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001258
Johannes Weiner2314b422014-12-10 15:44:33 -08001259bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001260{
Johannes Weiner2314b422014-12-10 15:44:33 -08001261 struct mem_cgroup *task_memcg;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001262 struct task_struct *p;
David Rientjesffbdccf2013-07-03 15:01:23 -07001263 bool ret;
David Rientjes4c4a2212008-02-07 00:14:06 -08001264
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001265 p = find_lock_task_mm(task);
David Rientjesde077d22012-01-12 17:18:52 -08001266 if (p) {
Johannes Weiner2314b422014-12-10 15:44:33 -08001267 task_memcg = get_mem_cgroup_from_mm(p->mm);
David Rientjesde077d22012-01-12 17:18:52 -08001268 task_unlock(p);
1269 } else {
1270 /*
1271 * All threads may have already detached their mm's, but the oom
1272 * killer still needs to detect if they have already been oom
1273 * killed to prevent needlessly killing additional tasks.
1274 */
David Rientjesffbdccf2013-07-03 15:01:23 -07001275 rcu_read_lock();
Johannes Weiner2314b422014-12-10 15:44:33 -08001276 task_memcg = mem_cgroup_from_task(task);
1277 css_get(&task_memcg->css);
David Rientjesffbdccf2013-07-03 15:01:23 -07001278 rcu_read_unlock();
David Rientjesde077d22012-01-12 17:18:52 -08001279 }
Johannes Weiner2314b422014-12-10 15:44:33 -08001280 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1281 css_put(&task_memcg->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001282 return ret;
1283}
1284
Johannes Weiner19942822011-02-01 15:52:43 -08001285/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001286 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001287 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001288 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001289 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001290 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001291 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001292static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001293{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001294 unsigned long margin = 0;
1295 unsigned long count;
1296 unsigned long limit;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001297
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001298 count = page_counter_read(&memcg->memory);
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001299 limit = READ_ONCE(memcg->memory.max);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001300 if (count < limit)
1301 margin = limit - count;
1302
Johannes Weiner7941d212016-01-14 15:21:23 -08001303 if (do_memsw_account()) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001304 count = page_counter_read(&memcg->memsw);
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001305 limit = READ_ONCE(memcg->memsw.max);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001306 if (count <= limit)
1307 margin = min(margin, limit - count);
Li RongQingcbedbac2016-05-27 14:27:43 -07001308 else
1309 margin = 0;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001310 }
1311
1312 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001313}
1314
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001315/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001316 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001317 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001318 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1319 * moving cgroups. This is for waiting at high-memory pressure
1320 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001321 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001322static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001323{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001324 struct mem_cgroup *from;
1325 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001326 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001327 /*
1328 * Unlike task_move routines, we access mc.to, mc.from not under
1329 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1330 */
1331 spin_lock(&mc.lock);
1332 from = mc.from;
1333 to = mc.to;
1334 if (!from)
1335 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001336
Johannes Weiner2314b422014-12-10 15:44:33 -08001337 ret = mem_cgroup_is_descendant(from, memcg) ||
1338 mem_cgroup_is_descendant(to, memcg);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001339unlock:
1340 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001341 return ret;
1342}
1343
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001344static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001345{
1346 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001347 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001348 DEFINE_WAIT(wait);
1349 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1350 /* moving charge context might have finished. */
1351 if (mc.moving_task)
1352 schedule();
1353 finish_wait(&mc.waitq, &wait);
1354 return true;
1355 }
1356 }
1357 return false;
1358}
1359
Johannes Weinerc8713d02019-07-11 20:55:59 -07001360static char *memory_stat_format(struct mem_cgroup *memcg)
1361{
1362 struct seq_buf s;
1363 int i;
Johannes Weiner71cd3112017-05-03 14:55:13 -07001364
Johannes Weinerc8713d02019-07-11 20:55:59 -07001365 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1366 if (!s.buffer)
1367 return NULL;
1368
1369 /*
1370 * Provide statistics on the state of the memory subsystem as
1371 * well as cumulative event counters that show past behavior.
1372 *
1373 * This list is ordered following a combination of these gradients:
1374 * 1) generic big picture -> specifics and details
1375 * 2) reflecting userspace activity -> reflecting kernel heuristics
1376 *
1377 * Current memory state:
1378 */
1379
1380 seq_buf_printf(&s, "anon %llu\n",
1381 (u64)memcg_page_state(memcg, MEMCG_RSS) *
1382 PAGE_SIZE);
1383 seq_buf_printf(&s, "file %llu\n",
1384 (u64)memcg_page_state(memcg, MEMCG_CACHE) *
1385 PAGE_SIZE);
1386 seq_buf_printf(&s, "kernel_stack %llu\n",
1387 (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) *
1388 1024);
1389 seq_buf_printf(&s, "slab %llu\n",
1390 (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) +
1391 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) *
1392 PAGE_SIZE);
1393 seq_buf_printf(&s, "sock %llu\n",
1394 (u64)memcg_page_state(memcg, MEMCG_SOCK) *
1395 PAGE_SIZE);
1396
1397 seq_buf_printf(&s, "shmem %llu\n",
1398 (u64)memcg_page_state(memcg, NR_SHMEM) *
1399 PAGE_SIZE);
1400 seq_buf_printf(&s, "file_mapped %llu\n",
1401 (u64)memcg_page_state(memcg, NR_FILE_MAPPED) *
1402 PAGE_SIZE);
1403 seq_buf_printf(&s, "file_dirty %llu\n",
1404 (u64)memcg_page_state(memcg, NR_FILE_DIRTY) *
1405 PAGE_SIZE);
1406 seq_buf_printf(&s, "file_writeback %llu\n",
1407 (u64)memcg_page_state(memcg, NR_WRITEBACK) *
1408 PAGE_SIZE);
1409
1410 /*
1411 * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter
1412 * with the NR_ANON_THP vm counter, but right now it's a pain in the
1413 * arse because it requires migrating the work out of rmap to a place
1414 * where the page->mem_cgroup is set up and stable.
1415 */
1416 seq_buf_printf(&s, "anon_thp %llu\n",
1417 (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) *
1418 PAGE_SIZE);
1419
1420 for (i = 0; i < NR_LRU_LISTS; i++)
1421 seq_buf_printf(&s, "%s %llu\n", mem_cgroup_lru_names[i],
1422 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
1423 PAGE_SIZE);
1424
1425 seq_buf_printf(&s, "slab_reclaimable %llu\n",
1426 (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) *
1427 PAGE_SIZE);
1428 seq_buf_printf(&s, "slab_unreclaimable %llu\n",
1429 (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) *
1430 PAGE_SIZE);
1431
1432 /* Accumulated memory events */
1433
1434 seq_buf_printf(&s, "pgfault %lu\n", memcg_events(memcg, PGFAULT));
1435 seq_buf_printf(&s, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT));
1436
1437 seq_buf_printf(&s, "workingset_refault %lu\n",
1438 memcg_page_state(memcg, WORKINGSET_REFAULT));
1439 seq_buf_printf(&s, "workingset_activate %lu\n",
1440 memcg_page_state(memcg, WORKINGSET_ACTIVATE));
1441 seq_buf_printf(&s, "workingset_nodereclaim %lu\n",
1442 memcg_page_state(memcg, WORKINGSET_NODERECLAIM));
1443
1444 seq_buf_printf(&s, "pgrefill %lu\n", memcg_events(memcg, PGREFILL));
1445 seq_buf_printf(&s, "pgscan %lu\n",
1446 memcg_events(memcg, PGSCAN_KSWAPD) +
1447 memcg_events(memcg, PGSCAN_DIRECT));
1448 seq_buf_printf(&s, "pgsteal %lu\n",
1449 memcg_events(memcg, PGSTEAL_KSWAPD) +
1450 memcg_events(memcg, PGSTEAL_DIRECT));
1451 seq_buf_printf(&s, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE));
1452 seq_buf_printf(&s, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE));
1453 seq_buf_printf(&s, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE));
1454 seq_buf_printf(&s, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED));
1455
1456#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1457 seq_buf_printf(&s, "thp_fault_alloc %lu\n",
1458 memcg_events(memcg, THP_FAULT_ALLOC));
1459 seq_buf_printf(&s, "thp_collapse_alloc %lu\n",
1460 memcg_events(memcg, THP_COLLAPSE_ALLOC));
1461#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1462
1463 /* The above should easily fit into one page */
1464 WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1465
1466 return s.buffer;
1467}
Johannes Weiner71cd3112017-05-03 14:55:13 -07001468
Sha Zhengju58cf1882013-02-22 16:32:05 -08001469#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001470/**
yuzhoujianf0c867d2018-12-28 00:36:10 -08001471 * mem_cgroup_print_oom_context: Print OOM information relevant to
1472 * memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001473 * @memcg: The memory cgroup that went over limit
1474 * @p: Task that is going to be killed
1475 *
1476 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1477 * enabled
1478 */
yuzhoujianf0c867d2018-12-28 00:36:10 -08001479void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1480{
1481 rcu_read_lock();
1482
1483 if (memcg) {
1484 pr_cont(",oom_memcg=");
1485 pr_cont_cgroup_path(memcg->css.cgroup);
1486 } else
1487 pr_cont(",global_oom");
1488 if (p) {
1489 pr_cont(",task_memcg=");
1490 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1491 }
1492 rcu_read_unlock();
1493}
1494
1495/**
1496 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1497 * memory controller.
1498 * @memcg: The memory cgroup that went over limit
1499 */
1500void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
Balbir Singhe2224322009-04-02 16:57:39 -07001501{
Johannes Weinerc8713d02019-07-11 20:55:59 -07001502 char *buf;
Balbir Singhe2224322009-04-02 16:57:39 -07001503
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001504 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1505 K((u64)page_counter_read(&memcg->memory)),
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001506 K((u64)memcg->memory.max), memcg->memory.failcnt);
Johannes Weinerc8713d02019-07-11 20:55:59 -07001507 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1508 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1509 K((u64)page_counter_read(&memcg->swap)),
1510 K((u64)memcg->swap.max), memcg->swap.failcnt);
1511 else {
1512 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1513 K((u64)page_counter_read(&memcg->memsw)),
1514 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1515 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1516 K((u64)page_counter_read(&memcg->kmem)),
1517 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001518 }
Johannes Weinerc8713d02019-07-11 20:55:59 -07001519
1520 pr_info("Memory cgroup stats for ");
1521 pr_cont_cgroup_path(memcg->css.cgroup);
1522 pr_cont(":");
1523 buf = memory_stat_format(memcg);
1524 if (!buf)
1525 return;
1526 pr_info("%s", buf);
1527 kfree(buf);
Balbir Singhe2224322009-04-02 16:57:39 -07001528}
1529
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001530/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001531 * Return the memory (and swap, if configured) limit for a memcg.
1532 */
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001533unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001534{
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001535 unsigned long max;
David Rientjesa63d83f2010-08-09 17:19:46 -07001536
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001537 max = memcg->memory.max;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001538 if (mem_cgroup_swappiness(memcg)) {
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001539 unsigned long memsw_max;
1540 unsigned long swap_max;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001541
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001542 memsw_max = memcg->memsw.max;
1543 swap_max = memcg->swap.max;
1544 swap_max = min(swap_max, (unsigned long)total_swap_pages);
1545 max = min(max + swap_max, memsw_max);
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001546 }
Roman Gushchinbbec2e12018-06-07 17:06:18 -07001547 return max;
David Rientjesa63d83f2010-08-09 17:19:46 -07001548}
1549
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07001550static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
David Rientjes19965462012-12-11 16:00:26 -08001551 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001552{
David Rientjes6e0fc462015-09-08 15:00:36 -07001553 struct oom_control oc = {
1554 .zonelist = NULL,
1555 .nodemask = NULL,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07001556 .memcg = memcg,
David Rientjes6e0fc462015-09-08 15:00:36 -07001557 .gfp_mask = gfp_mask,
1558 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07001559 };
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001560 bool ret;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001561
Tetsuo Handa7775fac2019-03-05 15:46:47 -08001562 if (mutex_lock_killable(&oom_lock))
1563 return true;
1564 /*
1565 * A few threads which were not waiting at mutex_lock_killable() can
1566 * fail to bail out. Therefore, check again after holding oom_lock.
1567 */
1568 ret = should_force_charge() || out_of_memory(&oc);
Johannes Weinerdc564012015-06-24 16:57:19 -07001569 mutex_unlock(&oom_lock);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001570 return ret;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001571}
1572
Michele Curtiae6e71d2014-12-12 16:56:35 -08001573#if MAX_NUMNODES > 1
1574
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001575/**
1576 * test_mem_cgroup_node_reclaimable
Wanpeng Lidad75572012-06-20 12:53:01 -07001577 * @memcg: the target memcg
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001578 * @nid: the node ID to be checked.
1579 * @noswap : specify true here if the user wants flle only information.
1580 *
1581 * This function returns whether the specified memcg contains any
1582 * reclaimable pages on a node. Returns true if there are any reclaimable
1583 * pages in the node.
1584 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001585static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001586 int nid, bool noswap)
1587{
Johannes Weiner2b487e52019-05-13 17:18:05 -07001588 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
1589
Johannes Weinerdef0fda2019-05-14 15:47:15 -07001590 if (lruvec_page_state(lruvec, NR_INACTIVE_FILE) ||
1591 lruvec_page_state(lruvec, NR_ACTIVE_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001592 return true;
1593 if (noswap || !total_swap_pages)
1594 return false;
Johannes Weinerdef0fda2019-05-14 15:47:15 -07001595 if (lruvec_page_state(lruvec, NR_INACTIVE_ANON) ||
1596 lruvec_page_state(lruvec, NR_ACTIVE_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001597 return true;
1598 return false;
1599
1600}
Ying Han889976d2011-05-26 16:25:33 -07001601
1602/*
1603 * Always updating the nodemask is not very good - even if we have an empty
1604 * list or the wrong list here, we can start from some node and traverse all
1605 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1606 *
1607 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001608static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001609{
1610 int nid;
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001611 /*
1612 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1613 * pagein/pageout changes since the last update.
1614 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001615 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001616 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001617 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001618 return;
1619
Ying Han889976d2011-05-26 16:25:33 -07001620 /* make a nodemask where this memcg uses memory from */
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001621 memcg->scan_nodes = node_states[N_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001622
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001623 for_each_node_mask(nid, node_states[N_MEMORY]) {
Ying Han889976d2011-05-26 16:25:33 -07001624
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001625 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1626 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001627 }
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001628
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001629 atomic_set(&memcg->numainfo_events, 0);
1630 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001631}
1632
1633/*
1634 * Selecting a node where we start reclaim from. Because what we need is just
1635 * reducing usage counter, start from anywhere is O,K. Considering
1636 * memory reclaim from current node, there are pros. and cons.
1637 *
1638 * Freeing memory from current node means freeing memory from a node which
1639 * we'll use or we've used. So, it may make LRU bad. And if several threads
1640 * hit limits, it will see a contention on a node. But freeing from remote
1641 * node means more costs for memory reclaim because of memory latency.
1642 *
1643 * Now, we use round-robin. Better algorithm is welcomed.
1644 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001645int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001646{
1647 int node;
1648
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001649 mem_cgroup_may_update_nodemask(memcg);
1650 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001651
Andrew Morton0edaf862016-05-19 17:10:58 -07001652 node = next_node_in(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001653 /*
Michal Hockofda3d692016-05-19 17:11:34 -07001654 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1655 * last time it really checked all the LRUs due to rate limiting.
1656 * Fallback to the current node in that case for simplicity.
Ying Han889976d2011-05-26 16:25:33 -07001657 */
1658 if (unlikely(node == MAX_NUMNODES))
1659 node = numa_node_id();
1660
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001661 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001662 return node;
1663}
Ying Han889976d2011-05-26 16:25:33 -07001664#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001665int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001666{
1667 return 0;
1668}
1669#endif
1670
Andrew Morton0608f432013-09-24 15:27:41 -07001671static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
Mel Gormanef8f2322016-07-28 15:46:05 -07001672 pg_data_t *pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07001673 gfp_t gfp_mask,
1674 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001675{
Andrew Morton0608f432013-09-24 15:27:41 -07001676 struct mem_cgroup *victim = NULL;
1677 int total = 0;
1678 int loop = 0;
1679 unsigned long excess;
1680 unsigned long nr_scanned;
1681 struct mem_cgroup_reclaim_cookie reclaim = {
Mel Gormanef8f2322016-07-28 15:46:05 -07001682 .pgdat = pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07001683 .priority = 0,
1684 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001685
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001686 excess = soft_limit_excess(root_memcg);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001687
Andrew Morton0608f432013-09-24 15:27:41 -07001688 while (1) {
1689 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1690 if (!victim) {
1691 loop++;
1692 if (loop >= 2) {
1693 /*
1694 * If we have not been able to reclaim
1695 * anything, it might because there are
1696 * no reclaimable pages under this hierarchy
1697 */
1698 if (!total)
1699 break;
1700 /*
1701 * We want to do more targeted reclaim.
1702 * excess >> 2 is not to excessive so as to
1703 * reclaim too much, nor too less that we keep
1704 * coming back to reclaim from this cgroup
1705 */
1706 if (total >= (excess >> 2) ||
1707 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1708 break;
1709 }
1710 continue;
1711 }
Mel Gormana9dd0a82016-07-28 15:46:02 -07001712 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
Mel Gormanef8f2322016-07-28 15:46:05 -07001713 pgdat, &nr_scanned);
Andrew Morton0608f432013-09-24 15:27:41 -07001714 *total_scanned += nr_scanned;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001715 if (!soft_limit_excess(root_memcg))
Andrew Morton0608f432013-09-24 15:27:41 -07001716 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001717 }
Andrew Morton0608f432013-09-24 15:27:41 -07001718 mem_cgroup_iter_break(root_memcg, victim);
1719 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001720}
1721
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001722#ifdef CONFIG_LOCKDEP
1723static struct lockdep_map memcg_oom_lock_dep_map = {
1724 .name = "memcg_oom_lock",
1725};
1726#endif
1727
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001728static DEFINE_SPINLOCK(memcg_oom_lock);
1729
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001730/*
1731 * Check OOM-Killer is already running under our hierarchy.
1732 * If someone is running, return false.
1733 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001734static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001735{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001736 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001737
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001738 spin_lock(&memcg_oom_lock);
1739
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001740 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001741 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001742 /*
1743 * this subtree of our hierarchy is already locked
1744 * so we cannot give a lock.
1745 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001746 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001747 mem_cgroup_iter_break(memcg, iter);
1748 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001749 } else
1750 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001751 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001752
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001753 if (failed) {
1754 /*
1755 * OK, we failed to lock the whole subtree so we have
1756 * to clean up what we set up to the failing subtree
1757 */
1758 for_each_mem_cgroup_tree(iter, memcg) {
1759 if (iter == failed) {
1760 mem_cgroup_iter_break(memcg, iter);
1761 break;
1762 }
1763 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001764 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001765 } else
1766 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001767
1768 spin_unlock(&memcg_oom_lock);
1769
1770 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001771}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001772
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001773static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001774{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001775 struct mem_cgroup *iter;
1776
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001777 spin_lock(&memcg_oom_lock);
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001778 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001779 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001780 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001781 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001782}
1783
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001784static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001785{
1786 struct mem_cgroup *iter;
1787
Tejun Heoc2b42d32015-06-24 16:58:23 -07001788 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001789 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001790 iter->under_oom++;
1791 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001792}
1793
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001794static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001795{
1796 struct mem_cgroup *iter;
1797
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001798 /*
1799 * When a new child is created while the hierarchy is under oom,
Tejun Heoc2b42d32015-06-24 16:58:23 -07001800 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001801 */
Tejun Heoc2b42d32015-06-24 16:58:23 -07001802 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001803 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001804 if (iter->under_oom > 0)
1805 iter->under_oom--;
1806 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001807}
1808
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001809static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1810
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001811struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001812 struct mem_cgroup *memcg;
Ingo Molnarac6424b2017-06-20 12:06:13 +02001813 wait_queue_entry_t wait;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001814};
1815
Ingo Molnarac6424b2017-06-20 12:06:13 +02001816static int memcg_oom_wake_function(wait_queue_entry_t *wait,
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001817 unsigned mode, int sync, void *arg)
1818{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001819 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1820 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001821 struct oom_wait_info *oom_wait_info;
1822
1823 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001824 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001825
Johannes Weiner2314b422014-12-10 15:44:33 -08001826 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1827 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001828 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001829 return autoremove_wake_function(wait, mode, sync, arg);
1830}
1831
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001832static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001833{
Tejun Heoc2b42d32015-06-24 16:58:23 -07001834 /*
1835 * For the following lockless ->under_oom test, the only required
1836 * guarantee is that it must see the state asserted by an OOM when
1837 * this function is called as a result of userland actions
1838 * triggered by the notification of the OOM. This is trivially
1839 * achieved by invoking mem_cgroup_mark_under_oom() before
1840 * triggering notification.
1841 */
1842 if (memcg && memcg->under_oom)
Tejun Heof4b90b702015-06-24 16:58:21 -07001843 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001844}
1845
Michal Hocko29ef6802018-08-17 15:47:11 -07001846enum oom_status {
1847 OOM_SUCCESS,
1848 OOM_FAILED,
1849 OOM_ASYNC,
1850 OOM_SKIPPED
1851};
1852
1853static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001854{
Michal Hocko7056d3a2018-12-28 00:39:57 -08001855 enum oom_status ret;
1856 bool locked;
1857
Michal Hocko29ef6802018-08-17 15:47:11 -07001858 if (order > PAGE_ALLOC_COSTLY_ORDER)
1859 return OOM_SKIPPED;
1860
Roman Gushchin7a1adfd2018-10-26 15:09:48 -07001861 memcg_memory_event(memcg, MEMCG_OOM);
1862
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001863 /*
Johannes Weiner49426422013-10-16 13:46:59 -07001864 * We are in the middle of the charge context here, so we
1865 * don't want to block when potentially sitting on a callstack
1866 * that holds all kinds of filesystem and mm locks.
1867 *
Michal Hocko29ef6802018-08-17 15:47:11 -07001868 * cgroup1 allows disabling the OOM killer and waiting for outside
1869 * handling until the charge can succeed; remember the context and put
1870 * the task to sleep at the end of the page fault when all locks are
1871 * released.
Johannes Weiner49426422013-10-16 13:46:59 -07001872 *
Michal Hocko29ef6802018-08-17 15:47:11 -07001873 * On the other hand, in-kernel OOM killer allows for an async victim
1874 * memory reclaim (oom_reaper) and that means that we are not solely
1875 * relying on the oom victim to make a forward progress and we can
1876 * invoke the oom killer here.
1877 *
1878 * Please note that mem_cgroup_out_of_memory might fail to find a
1879 * victim and then we have to bail out from the charge path.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001880 */
Michal Hocko29ef6802018-08-17 15:47:11 -07001881 if (memcg->oom_kill_disable) {
1882 if (!current->in_user_fault)
1883 return OOM_SKIPPED;
1884 css_get(&memcg->css);
1885 current->memcg_in_oom = memcg;
1886 current->memcg_oom_gfp_mask = mask;
1887 current->memcg_oom_order = order;
1888
1889 return OOM_ASYNC;
1890 }
1891
Michal Hocko7056d3a2018-12-28 00:39:57 -08001892 mem_cgroup_mark_under_oom(memcg);
Michal Hocko29ef6802018-08-17 15:47:11 -07001893
Michal Hocko7056d3a2018-12-28 00:39:57 -08001894 locked = mem_cgroup_oom_trylock(memcg);
1895
1896 if (locked)
1897 mem_cgroup_oom_notify(memcg);
1898
1899 mem_cgroup_unmark_under_oom(memcg);
1900 if (mem_cgroup_out_of_memory(memcg, mask, order))
1901 ret = OOM_SUCCESS;
1902 else
1903 ret = OOM_FAILED;
1904
1905 if (locked)
1906 mem_cgroup_oom_unlock(memcg);
1907
1908 return ret;
Johannes Weiner49426422013-10-16 13:46:59 -07001909}
1910
1911/**
1912 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1913 * @handle: actually kill/wait or just clean up the OOM state
1914 *
1915 * This has to be called at the end of a page fault if the memcg OOM
1916 * handler was enabled.
1917 *
1918 * Memcg supports userspace OOM handling where failed allocations must
1919 * sleep on a waitqueue until the userspace task resolves the
1920 * situation. Sleeping directly in the charge context with all kinds
1921 * of locks held is not a good idea, instead we remember an OOM state
1922 * in the task and mem_cgroup_oom_synchronize() has to be called at
1923 * the end of the page fault to complete the OOM handling.
1924 *
1925 * Returns %true if an ongoing memcg OOM situation was detected and
1926 * completed, %false otherwise.
1927 */
1928bool mem_cgroup_oom_synchronize(bool handle)
1929{
Tejun Heo626ebc42015-11-05 18:46:09 -08001930 struct mem_cgroup *memcg = current->memcg_in_oom;
Johannes Weiner49426422013-10-16 13:46:59 -07001931 struct oom_wait_info owait;
1932 bool locked;
1933
1934 /* OOM is global, do not handle */
1935 if (!memcg)
1936 return false;
1937
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001938 if (!handle)
Johannes Weiner49426422013-10-16 13:46:59 -07001939 goto cleanup;
1940
1941 owait.memcg = memcg;
1942 owait.wait.flags = 0;
1943 owait.wait.func = memcg_oom_wake_function;
1944 owait.wait.private = current;
Ingo Molnar2055da92017-06-20 12:06:46 +02001945 INIT_LIST_HEAD(&owait.wait.entry);
Johannes Weiner49426422013-10-16 13:46:59 -07001946
1947 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001948 mem_cgroup_mark_under_oom(memcg);
1949
1950 locked = mem_cgroup_oom_trylock(memcg);
1951
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001952 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001953 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001954
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001955 if (locked && !memcg->oom_kill_disable) {
1956 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07001957 finish_wait(&memcg_oom_waitq, &owait.wait);
Tejun Heo626ebc42015-11-05 18:46:09 -08001958 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1959 current->memcg_oom_order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001960 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001961 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07001962 mem_cgroup_unmark_under_oom(memcg);
1963 finish_wait(&memcg_oom_waitq, &owait.wait);
1964 }
1965
1966 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001967 mem_cgroup_oom_unlock(memcg);
1968 /*
1969 * There is no guarantee that an OOM-lock contender
1970 * sees the wakeups triggered by the OOM kill
1971 * uncharges. Wake any sleepers explicitely.
1972 */
1973 memcg_oom_recover(memcg);
1974 }
Johannes Weiner49426422013-10-16 13:46:59 -07001975cleanup:
Tejun Heo626ebc42015-11-05 18:46:09 -08001976 current->memcg_in_oom = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001977 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001978 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001979}
1980
Johannes Weinerd7365e72014-10-29 14:50:48 -07001981/**
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07001982 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1983 * @victim: task to be killed by the OOM killer
1984 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1985 *
1986 * Returns a pointer to a memory cgroup, which has to be cleaned up
1987 * by killing all belonging OOM-killable tasks.
1988 *
1989 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1990 */
1991struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1992 struct mem_cgroup *oom_domain)
1993{
1994 struct mem_cgroup *oom_group = NULL;
1995 struct mem_cgroup *memcg;
1996
1997 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1998 return NULL;
1999
2000 if (!oom_domain)
2001 oom_domain = root_mem_cgroup;
2002
2003 rcu_read_lock();
2004
2005 memcg = mem_cgroup_from_task(victim);
2006 if (memcg == root_mem_cgroup)
2007 goto out;
2008
2009 /*
2010 * Traverse the memory cgroup hierarchy from the victim task's
2011 * cgroup up to the OOMing cgroup (or root) to find the
2012 * highest-level memory cgroup with oom.group set.
2013 */
2014 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2015 if (memcg->oom_group)
2016 oom_group = memcg;
2017
2018 if (memcg == oom_domain)
2019 break;
2020 }
2021
2022 if (oom_group)
2023 css_get(&oom_group->css);
2024out:
2025 rcu_read_unlock();
2026
2027 return oom_group;
2028}
2029
2030void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2031{
2032 pr_info("Tasks in ");
2033 pr_cont_cgroup_path(memcg->css.cgroup);
2034 pr_cont(" are going to be killed due to memory.oom.group set\n");
2035}
2036
2037/**
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002038 * lock_page_memcg - lock a page->mem_cgroup binding
2039 * @page: the page
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07002040 *
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002041 * This function protects unlocked LRU pages from being moved to
Johannes Weiner739f79f2017-08-18 15:15:48 -07002042 * another cgroup.
2043 *
2044 * It ensures lifetime of the returned memcg. Caller is responsible
2045 * for the lifetime of the page; __unlock_page_memcg() is available
2046 * when @page might get freed inside the locked section.
Balbir Singhd69b0422009-06-17 16:26:34 -07002047 */
Johannes Weiner739f79f2017-08-18 15:15:48 -07002048struct mem_cgroup *lock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002049{
2050 struct mem_cgroup *memcg;
Johannes Weiner6de22612015-02-11 15:25:01 -08002051 unsigned long flags;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002052
Johannes Weiner6de22612015-02-11 15:25:01 -08002053 /*
2054 * The RCU lock is held throughout the transaction. The fast
2055 * path can get away without acquiring the memcg->move_lock
2056 * because page moving starts with an RCU grace period.
Johannes Weiner739f79f2017-08-18 15:15:48 -07002057 *
2058 * The RCU lock also protects the memcg from being freed when
2059 * the page state that is going to change is the only thing
2060 * preventing the page itself from being freed. E.g. writeback
2061 * doesn't hold a page reference and relies on PG_writeback to
2062 * keep off truncation, migration and so forth.
2063 */
Johannes Weinerd7365e72014-10-29 14:50:48 -07002064 rcu_read_lock();
2065
2066 if (mem_cgroup_disabled())
Johannes Weiner739f79f2017-08-18 15:15:48 -07002067 return NULL;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002068again:
Johannes Weiner1306a852014-12-10 15:44:52 -08002069 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08002070 if (unlikely(!memcg))
Johannes Weiner739f79f2017-08-18 15:15:48 -07002071 return NULL;
Johannes Weinerd7365e72014-10-29 14:50:48 -07002072
Qiang Huangbdcbb652014-06-04 16:08:21 -07002073 if (atomic_read(&memcg->moving_account) <= 0)
Johannes Weiner739f79f2017-08-18 15:15:48 -07002074 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002075
Johannes Weiner6de22612015-02-11 15:25:01 -08002076 spin_lock_irqsave(&memcg->move_lock, flags);
Johannes Weiner1306a852014-12-10 15:44:52 -08002077 if (memcg != page->mem_cgroup) {
Johannes Weiner6de22612015-02-11 15:25:01 -08002078 spin_unlock_irqrestore(&memcg->move_lock, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002079 goto again;
2080 }
Johannes Weiner6de22612015-02-11 15:25:01 -08002081
2082 /*
2083 * When charge migration first begins, we can have locked and
2084 * unlocked page stat updates happening concurrently. Track
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002085 * the task who has the lock for unlock_page_memcg().
Johannes Weiner6de22612015-02-11 15:25:01 -08002086 */
2087 memcg->move_lock_task = current;
2088 memcg->move_lock_flags = flags;
Johannes Weinerd7365e72014-10-29 14:50:48 -07002089
Johannes Weiner739f79f2017-08-18 15:15:48 -07002090 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002091}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002092EXPORT_SYMBOL(lock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002093
Johannes Weinerd7365e72014-10-29 14:50:48 -07002094/**
Johannes Weiner739f79f2017-08-18 15:15:48 -07002095 * __unlock_page_memcg - unlock and unpin a memcg
2096 * @memcg: the memcg
2097 *
2098 * Unlock and unpin a memcg returned by lock_page_memcg().
Johannes Weinerd7365e72014-10-29 14:50:48 -07002099 */
Johannes Weiner739f79f2017-08-18 15:15:48 -07002100void __unlock_page_memcg(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002101{
Johannes Weiner6de22612015-02-11 15:25:01 -08002102 if (memcg && memcg->move_lock_task == current) {
2103 unsigned long flags = memcg->move_lock_flags;
2104
2105 memcg->move_lock_task = NULL;
2106 memcg->move_lock_flags = 0;
2107
2108 spin_unlock_irqrestore(&memcg->move_lock, flags);
2109 }
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002110
Johannes Weinerd7365e72014-10-29 14:50:48 -07002111 rcu_read_unlock();
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002112}
Johannes Weiner739f79f2017-08-18 15:15:48 -07002113
2114/**
2115 * unlock_page_memcg - unlock a page->mem_cgroup binding
2116 * @page: the page
2117 */
2118void unlock_page_memcg(struct page *page)
2119{
2120 __unlock_page_memcg(page->mem_cgroup);
2121}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07002122EXPORT_SYMBOL(unlock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002123
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002124struct memcg_stock_pcp {
2125 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002126 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002127 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002128 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07002129#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002130};
2131static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002132static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002133
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002134/**
2135 * consume_stock: Try to consume stocked charge on this cpu.
2136 * @memcg: memcg to consume from.
2137 * @nr_pages: how many pages to charge.
2138 *
2139 * The charges will only happen if @memcg matches the current cpu's memcg
2140 * stock, and at least @nr_pages are available in that stock. Failure to
2141 * service an allocation will refill the stock.
2142 *
2143 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002144 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002145static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002146{
2147 struct memcg_stock_pcp *stock;
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002148 unsigned long flags;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002149 bool ret = false;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002150
Johannes Weinera983b5e2018-01-31 16:16:45 -08002151 if (nr_pages > MEMCG_CHARGE_BATCH)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002152 return ret;
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002153
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002154 local_irq_save(flags);
2155
2156 stock = this_cpu_ptr(&memcg_stock);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002157 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002158 stock->nr_pages -= nr_pages;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002159 ret = true;
2160 }
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002161
2162 local_irq_restore(flags);
2163
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002164 return ret;
2165}
2166
2167/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002168 * Returns stocks cached in percpu and reset cached information.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002169 */
2170static void drain_stock(struct memcg_stock_pcp *stock)
2171{
2172 struct mem_cgroup *old = stock->cached;
2173
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002174 if (stock->nr_pages) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002175 page_counter_uncharge(&old->memory, stock->nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002176 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002177 page_counter_uncharge(&old->memsw, stock->nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002178 css_put_many(&old->css, stock->nr_pages);
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002179 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002180 }
2181 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002182}
2183
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002184static void drain_local_stock(struct work_struct *dummy)
2185{
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002186 struct memcg_stock_pcp *stock;
2187 unsigned long flags;
2188
Michal Hocko72f01842017-10-03 16:14:53 -07002189 /*
2190 * The only protection from memory hotplug vs. drain_stock races is
2191 * that we always operate on local CPU stock here with IRQ disabled
2192 */
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002193 local_irq_save(flags);
2194
2195 stock = this_cpu_ptr(&memcg_stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002196 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002197 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002198
2199 local_irq_restore(flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002200}
2201
2202/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002203 * Cache charges(val) to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01002204 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002205 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002206static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002207{
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002208 struct memcg_stock_pcp *stock;
2209 unsigned long flags;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002210
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002211 local_irq_save(flags);
2212
2213 stock = this_cpu_ptr(&memcg_stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002214 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002215 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002216 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002217 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002218 stock->nr_pages += nr_pages;
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002219
Johannes Weinera983b5e2018-01-31 16:16:45 -08002220 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
Roman Gushchin475d0482017-09-08 16:13:09 -07002221 drain_stock(stock);
2222
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07002223 local_irq_restore(flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002224}
2225
2226/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002227 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002228 * of the hierarchy under it.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002229 */
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002230static void drain_all_stock(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002231{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002232 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07002233
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002234 /* If someone's already draining, avoid adding running more workers. */
2235 if (!mutex_trylock(&percpu_charge_mutex))
2236 return;
Michal Hocko72f01842017-10-03 16:14:53 -07002237 /*
2238 * Notify other cpus that system-wide "drain" is running
2239 * We do not care about races with the cpu hotplug because cpu down
2240 * as well as workers from this path always operate on the local
2241 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2242 */
Johannes Weiner5af12d02011-08-25 15:59:07 -07002243 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002244 for_each_online_cpu(cpu) {
2245 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002246 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002247
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002248 memcg = stock->cached;
Michal Hocko72f01842017-10-03 16:14:53 -07002249 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002250 continue;
Michal Hocko72f01842017-10-03 16:14:53 -07002251 if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
2252 css_put(&memcg->css);
Michal Hocko3e920412011-07-26 16:08:29 -07002253 continue;
Michal Hocko72f01842017-10-03 16:14:53 -07002254 }
Michal Hockod1a05b62011-07-26 16:08:27 -07002255 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2256 if (cpu == curcpu)
2257 drain_local_stock(&stock->work);
2258 else
2259 schedule_work_on(cpu, &stock->work);
2260 }
Michal Hocko72f01842017-10-03 16:14:53 -07002261 css_put(&memcg->css);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002262 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07002263 put_cpu();
Michal Hocko9f50fad2011-08-09 11:56:26 +02002264 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002265}
2266
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01002267static int memcg_hotplug_cpu_dead(unsigned int cpu)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002268{
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002269 struct memcg_stock_pcp *stock;
Johannes Weiner42a30032019-05-14 15:47:12 -07002270 struct mem_cgroup *memcg, *mi;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002271
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002272 stock = &per_cpu(memcg_stock, cpu);
2273 drain_stock(stock);
Johannes Weinera983b5e2018-01-31 16:16:45 -08002274
2275 for_each_mem_cgroup(memcg) {
2276 int i;
2277
2278 for (i = 0; i < MEMCG_NR_STAT; i++) {
2279 int nid;
2280 long x;
2281
Chris Down871789d2019-05-14 15:46:57 -07002282 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
Johannes Weiner815744d2019-06-13 15:55:46 -07002283 if (x)
Johannes Weiner42a30032019-05-14 15:47:12 -07002284 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2285 atomic_long_add(x, &memcg->vmstats[i]);
Johannes Weinera983b5e2018-01-31 16:16:45 -08002286
2287 if (i >= NR_VM_NODE_STAT_ITEMS)
2288 continue;
2289
2290 for_each_node(nid) {
2291 struct mem_cgroup_per_node *pn;
2292
2293 pn = mem_cgroup_nodeinfo(memcg, nid);
2294 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
Johannes Weiner815744d2019-06-13 15:55:46 -07002295 if (x)
Johannes Weiner42a30032019-05-14 15:47:12 -07002296 do {
2297 atomic_long_add(x, &pn->lruvec_stat[i]);
2298 } while ((pn = parent_nodeinfo(pn, nid)));
Johannes Weinera983b5e2018-01-31 16:16:45 -08002299 }
2300 }
2301
Johannes Weinere27be242018-04-10 16:29:45 -07002302 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
Johannes Weinera983b5e2018-01-31 16:16:45 -08002303 long x;
2304
Chris Down871789d2019-05-14 15:46:57 -07002305 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
Johannes Weiner815744d2019-06-13 15:55:46 -07002306 if (x)
Johannes Weiner42a30032019-05-14 15:47:12 -07002307 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2308 atomic_long_add(x, &memcg->vmevents[i]);
Johannes Weinera983b5e2018-01-31 16:16:45 -08002309 }
2310 }
2311
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01002312 return 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002313}
2314
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002315static void reclaim_high(struct mem_cgroup *memcg,
2316 unsigned int nr_pages,
2317 gfp_t gfp_mask)
2318{
2319 do {
2320 if (page_counter_read(&memcg->memory) <= memcg->high)
2321 continue;
Johannes Weinere27be242018-04-10 16:29:45 -07002322 memcg_memory_event(memcg, MEMCG_HIGH);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002323 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2324 } while ((memcg = parent_mem_cgroup(memcg)));
2325}
2326
2327static void high_work_func(struct work_struct *work)
2328{
2329 struct mem_cgroup *memcg;
2330
2331 memcg = container_of(work, struct mem_cgroup, high_work);
Johannes Weinera983b5e2018-01-31 16:16:45 -08002332 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002333}
2334
Tejun Heob23afb92015-11-05 18:46:11 -08002335/*
2336 * Scheduled by try_charge() to be executed from the userland return path
2337 * and reclaims memory over the high limit.
2338 */
2339void mem_cgroup_handle_over_high(void)
2340{
2341 unsigned int nr_pages = current->memcg_nr_pages_over_high;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002342 struct mem_cgroup *memcg;
Tejun Heob23afb92015-11-05 18:46:11 -08002343
2344 if (likely(!nr_pages))
2345 return;
2346
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002347 memcg = get_mem_cgroup_from_mm(current->mm);
2348 reclaim_high(memcg, nr_pages, GFP_KERNEL);
Tejun Heob23afb92015-11-05 18:46:11 -08002349 css_put(&memcg->css);
2350 current->memcg_nr_pages_over_high = 0;
2351}
2352
Johannes Weiner00501b52014-08-08 14:19:20 -07002353static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2354 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002355{
Johannes Weinera983b5e2018-01-31 16:16:45 -08002356 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
Johannes Weiner9b130612014-08-06 16:05:51 -07002357 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002358 struct mem_cgroup *mem_over_limit;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002359 struct page_counter *counter;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002360 unsigned long nr_reclaimed;
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002361 bool may_swap = true;
2362 bool drained = false;
Michal Hocko29ef6802018-08-17 15:47:11 -07002363 enum oom_status oom_status;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002364
Johannes Weinerce00a962014-09-05 08:43:57 -04002365 if (mem_cgroup_is_root(memcg))
Tejun Heo10d53c72015-11-05 18:46:17 -08002366 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002367retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07002368 if (consume_stock(memcg, nr_pages))
Tejun Heo10d53c72015-11-05 18:46:17 -08002369 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002370
Johannes Weiner7941d212016-01-14 15:21:23 -08002371 if (!do_memsw_account() ||
Johannes Weiner6071ca52015-11-05 18:50:26 -08002372 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2373 if (page_counter_try_charge(&memcg->memory, batch, &counter))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002374 goto done_restock;
Johannes Weiner7941d212016-01-14 15:21:23 -08002375 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002376 page_counter_uncharge(&memcg->memsw, batch);
2377 mem_over_limit = mem_cgroup_from_counter(counter, memory);
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002378 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002379 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002380 may_swap = false;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002381 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002382
Johannes Weiner6539cc02014-08-06 16:05:42 -07002383 if (batch > nr_pages) {
2384 batch = nr_pages;
2385 goto retry;
2386 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002387
Johannes Weiner06b078f2014-08-06 16:05:44 -07002388 /*
2389 * Unlike in global OOM situations, memcg is not in a physical
2390 * memory shortage. Allow dying and OOM-killed tasks to
2391 * bypass the last charges so that they can exit quickly and
2392 * free their memory.
2393 */
Tetsuo Handa7775fac2019-03-05 15:46:47 -08002394 if (unlikely(should_force_charge()))
Tejun Heo10d53c72015-11-05 18:46:17 -08002395 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07002396
Johannes Weiner89a28482016-10-27 17:46:56 -07002397 /*
2398 * Prevent unbounded recursion when reclaim operations need to
2399 * allocate memory. This might exceed the limits temporarily,
2400 * but we prefer facilitating memory reclaim and getting back
2401 * under the limit over triggering OOM kills in these cases.
2402 */
2403 if (unlikely(current->flags & PF_MEMALLOC))
2404 goto force;
2405
Johannes Weiner06b078f2014-08-06 16:05:44 -07002406 if (unlikely(task_in_memcg_oom(current)))
2407 goto nomem;
2408
Mel Gormand0164ad2015-11-06 16:28:21 -08002409 if (!gfpflags_allow_blocking(gfp_mask))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002410 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002411
Johannes Weinere27be242018-04-10 16:29:45 -07002412 memcg_memory_event(mem_over_limit, MEMCG_MAX);
Johannes Weiner241994ed2015-02-11 15:26:06 -08002413
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002414 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2415 gfp_mask, may_swap);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002416
Johannes Weiner61e02c72014-08-06 16:08:16 -07002417 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner6539cc02014-08-06 16:05:42 -07002418 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07002419
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002420 if (!drained) {
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002421 drain_all_stock(mem_over_limit);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002422 drained = true;
2423 goto retry;
2424 }
2425
Johannes Weiner28c34c22014-08-06 16:05:47 -07002426 if (gfp_mask & __GFP_NORETRY)
2427 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002428 /*
2429 * Even though the limit is exceeded at this point, reclaim
2430 * may have been able to free some pages. Retry the charge
2431 * before killing the task.
2432 *
2433 * Only for regular pages, though: huge pages are rather
2434 * unlikely to succeed so close to the limit, and we fall back
2435 * to regular pages anyway in case of failure.
2436 */
Johannes Weiner61e02c72014-08-06 16:08:16 -07002437 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002438 goto retry;
2439 /*
2440 * At task move, charge accounts can be doubly counted. So, it's
2441 * better to wait until the end of task_move if something is going on.
2442 */
2443 if (mem_cgroup_wait_acct_move(mem_over_limit))
2444 goto retry;
2445
Johannes Weiner9b130612014-08-06 16:05:51 -07002446 if (nr_retries--)
2447 goto retry;
2448
Shakeel Butt38d38492019-07-11 20:55:48 -07002449 if (gfp_mask & __GFP_RETRY_MAYFAIL)
Michal Hocko29ef6802018-08-17 15:47:11 -07002450 goto nomem;
2451
Johannes Weiner06b078f2014-08-06 16:05:44 -07002452 if (gfp_mask & __GFP_NOFAIL)
Tejun Heo10d53c72015-11-05 18:46:17 -08002453 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07002454
Johannes Weiner6539cc02014-08-06 16:05:42 -07002455 if (fatal_signal_pending(current))
Tejun Heo10d53c72015-11-05 18:46:17 -08002456 goto force;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002457
Michal Hocko29ef6802018-08-17 15:47:11 -07002458 /*
2459 * keep retrying as long as the memcg oom killer is able to make
2460 * a forward progress or bypass the charge if the oom killer
2461 * couldn't make any progress.
2462 */
2463 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
Jerome Marchand3608de02015-11-05 18:47:29 -08002464 get_order(nr_pages * PAGE_SIZE));
Michal Hocko29ef6802018-08-17 15:47:11 -07002465 switch (oom_status) {
2466 case OOM_SUCCESS:
2467 nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hocko29ef6802018-08-17 15:47:11 -07002468 goto retry;
2469 case OOM_FAILED:
2470 goto force;
2471 default:
2472 goto nomem;
2473 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002474nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002475 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07002476 return -ENOMEM;
Tejun Heo10d53c72015-11-05 18:46:17 -08002477force:
2478 /*
2479 * The allocation either can't fail or will lead to more memory
2480 * being freed very soon. Allow memory usage go over the limit
2481 * temporarily by force charging it.
2482 */
2483 page_counter_charge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002484 if (do_memsw_account())
Tejun Heo10d53c72015-11-05 18:46:17 -08002485 page_counter_charge(&memcg->memsw, nr_pages);
2486 css_get_many(&memcg->css, nr_pages);
2487
2488 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002489
2490done_restock:
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002491 css_get_many(&memcg->css, batch);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002492 if (batch > nr_pages)
2493 refill_stock(memcg, batch - nr_pages);
Tejun Heob23afb92015-11-05 18:46:11 -08002494
Johannes Weiner241994ed2015-02-11 15:26:06 -08002495 /*
Tejun Heob23afb92015-11-05 18:46:11 -08002496 * If the hierarchy is above the normal consumption range, schedule
2497 * reclaim on returning to userland. We can perform reclaim here
Mel Gorman71baba42015-11-06 16:28:28 -08002498 * if __GFP_RECLAIM but let's always punt for simplicity and so that
Tejun Heob23afb92015-11-05 18:46:11 -08002499 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2500 * not recorded as it most likely matches current's and won't
2501 * change in the meantime. As high limit is checked again before
2502 * reclaim, the cost of mismatch is negligible.
Johannes Weiner241994ed2015-02-11 15:26:06 -08002503 */
2504 do {
Tejun Heob23afb92015-11-05 18:46:11 -08002505 if (page_counter_read(&memcg->memory) > memcg->high) {
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002506 /* Don't bother a random interrupted task */
2507 if (in_interrupt()) {
2508 schedule_work(&memcg->high_work);
2509 break;
2510 }
Vladimir Davydov9516a182015-12-11 13:40:24 -08002511 current->memcg_nr_pages_over_high += batch;
Tejun Heob23afb92015-11-05 18:46:11 -08002512 set_notify_resume(current);
2513 break;
2514 }
Johannes Weiner241994ed2015-02-11 15:26:06 -08002515 } while ((memcg = parent_mem_cgroup(memcg)));
Tejun Heo10d53c72015-11-05 18:46:17 -08002516
2517 return 0;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002518}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002519
Johannes Weiner00501b52014-08-08 14:19:20 -07002520static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002521{
Johannes Weinerce00a962014-09-05 08:43:57 -04002522 if (mem_cgroup_is_root(memcg))
2523 return;
2524
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002525 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002526 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002527 page_counter_uncharge(&memcg->memsw, nr_pages);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002528
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002529 css_put_many(&memcg->css, nr_pages);
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002530}
2531
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002532static void lock_page_lru(struct page *page, int *isolated)
2533{
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08002534 pg_data_t *pgdat = page_pgdat(page);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002535
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08002536 spin_lock_irq(&pgdat->lru_lock);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002537 if (PageLRU(page)) {
2538 struct lruvec *lruvec;
2539
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08002540 lruvec = mem_cgroup_page_lruvec(page, pgdat);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002541 ClearPageLRU(page);
2542 del_page_from_lru_list(page, lruvec, page_lru(page));
2543 *isolated = 1;
2544 } else
2545 *isolated = 0;
2546}
2547
2548static void unlock_page_lru(struct page *page, int isolated)
2549{
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08002550 pg_data_t *pgdat = page_pgdat(page);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002551
2552 if (isolated) {
2553 struct lruvec *lruvec;
2554
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08002555 lruvec = mem_cgroup_page_lruvec(page, pgdat);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002556 VM_BUG_ON_PAGE(PageLRU(page), page);
2557 SetPageLRU(page);
2558 add_page_to_lru_list(page, lruvec, page_lru(page));
2559 }
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08002560 spin_unlock_irq(&pgdat->lru_lock);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002561}
2562
Johannes Weiner00501b52014-08-08 14:19:20 -07002563static void commit_charge(struct page *page, struct mem_cgroup *memcg,
Johannes Weiner6abb5a82014-08-08 14:19:33 -07002564 bool lrucare)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002565{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002566 int isolated;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002567
Johannes Weiner1306a852014-12-10 15:44:52 -08002568 VM_BUG_ON_PAGE(page->mem_cgroup, page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002569
2570 /*
2571 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2572 * may already be on some other mem_cgroup's LRU. Take care of it.
2573 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002574 if (lrucare)
2575 lock_page_lru(page, &isolated);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002576
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002577 /*
2578 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08002579 * page->mem_cgroup at this point:
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002580 *
2581 * - the page is uncharged
2582 *
2583 * - the page is off-LRU
2584 *
2585 * - an anonymous fault has exclusive page access, except for
2586 * a locked page table
2587 *
2588 * - a page cache insertion, a swapin fault, or a migration
2589 * have the page locked
2590 */
Johannes Weiner1306a852014-12-10 15:44:52 -08002591 page->mem_cgroup = memcg;
Hugh Dickins3be912772008-02-07 00:14:19 -08002592
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002593 if (lrucare)
2594 unlock_page_lru(page, isolated);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002595}
2596
Kirill Tkhai84c07d12018-08-17 15:47:25 -07002597#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002598static int memcg_alloc_cache_id(void)
Glauber Costa55007d82012-12-18 14:22:38 -08002599{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002600 int id, size;
2601 int err;
Glauber Costa55007d82012-12-18 14:22:38 -08002602
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002603 id = ida_simple_get(&memcg_cache_ida,
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002604 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2605 if (id < 0)
2606 return id;
2607
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002608 if (id < memcg_nr_cache_ids)
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002609 return id;
2610
2611 /*
2612 * There's no space for the new id in memcg_caches arrays,
2613 * so we have to grow them.
2614 */
Vladimir Davydov05257a12015-02-12 14:59:01 -08002615 down_write(&memcg_cache_ids_sem);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002616
2617 size = 2 * (id + 1);
Glauber Costa55007d82012-12-18 14:22:38 -08002618 if (size < MEMCG_CACHES_MIN_SIZE)
2619 size = MEMCG_CACHES_MIN_SIZE;
2620 else if (size > MEMCG_CACHES_MAX_SIZE)
2621 size = MEMCG_CACHES_MAX_SIZE;
2622
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002623 err = memcg_update_all_caches(size);
Vladimir Davydov05257a12015-02-12 14:59:01 -08002624 if (!err)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002625 err = memcg_update_all_list_lrus(size);
2626 if (!err)
Vladimir Davydov05257a12015-02-12 14:59:01 -08002627 memcg_nr_cache_ids = size;
2628
2629 up_write(&memcg_cache_ids_sem);
2630
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002631 if (err) {
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002632 ida_simple_remove(&memcg_cache_ida, id);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002633 return err;
2634 }
2635 return id;
2636}
2637
2638static void memcg_free_cache_id(int id)
2639{
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002640 ida_simple_remove(&memcg_cache_ida, id);
Glauber Costa55007d82012-12-18 14:22:38 -08002641}
2642
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002643struct memcg_kmem_cache_create_work {
Vladimir Davydov5722d092014-04-07 15:39:24 -07002644 struct mem_cgroup *memcg;
2645 struct kmem_cache *cachep;
2646 struct work_struct work;
2647};
2648
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002649static void memcg_kmem_cache_create_func(struct work_struct *w)
Glauber Costad7f25f82012-12-18 14:22:40 -08002650{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002651 struct memcg_kmem_cache_create_work *cw =
2652 container_of(w, struct memcg_kmem_cache_create_work, work);
Vladimir Davydov5722d092014-04-07 15:39:24 -07002653 struct mem_cgroup *memcg = cw->memcg;
2654 struct kmem_cache *cachep = cw->cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002655
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002656 memcg_create_kmem_cache(memcg, cachep);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002657
Vladimir Davydov5722d092014-04-07 15:39:24 -07002658 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002659 kfree(cw);
2660}
2661
2662/*
2663 * Enqueue the creation of a per-memcg kmem_cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002664 */
Shakeel Butt85cfb242018-10-26 15:07:41 -07002665static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002666 struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002667{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002668 struct memcg_kmem_cache_create_work *cw;
Glauber Costad7f25f82012-12-18 14:22:40 -08002669
Minchan Kimc892fd82018-04-20 14:56:17 -07002670 cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002671 if (!cw)
Glauber Costad7f25f82012-12-18 14:22:40 -08002672 return;
Vladimir Davydov8135be52014-12-12 16:56:38 -08002673
2674 css_get(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002675
2676 cw->memcg = memcg;
2677 cw->cachep = cachep;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002678 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
Glauber Costad7f25f82012-12-18 14:22:40 -08002679
Tejun Heo17cc4df2017-02-22 15:41:36 -08002680 queue_work(memcg_kmem_cache_wq, &cw->work);
Glauber Costad7f25f82012-12-18 14:22:40 -08002681}
2682
Vladimir Davydov45264772016-07-26 15:24:21 -07002683static inline bool memcg_kmem_bypass(void)
2684{
2685 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2686 return true;
2687 return false;
2688}
2689
2690/**
2691 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2692 * @cachep: the original global kmem cache
2693 *
Glauber Costad7f25f82012-12-18 14:22:40 -08002694 * Return the kmem_cache we're supposed to use for a slab allocation.
2695 * We try to use the current memcg's version of the cache.
2696 *
Vladimir Davydov45264772016-07-26 15:24:21 -07002697 * If the cache does not exist yet, if we are the first user of it, we
2698 * create it asynchronously in a workqueue and let the current allocation
2699 * go through with the original cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002700 *
Vladimir Davydov45264772016-07-26 15:24:21 -07002701 * This function takes a reference to the cache it returns to assure it
2702 * won't get destroyed while we are working with it. Once the caller is
2703 * done with it, memcg_kmem_put_cache() must be called to release the
2704 * reference.
Glauber Costad7f25f82012-12-18 14:22:40 -08002705 */
Vladimir Davydov45264772016-07-26 15:24:21 -07002706struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002707{
2708 struct mem_cgroup *memcg;
Vladimir Davydov959c8962014-01-23 15:52:59 -08002709 struct kmem_cache *memcg_cachep;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002710 int kmemcg_id;
Glauber Costad7f25f82012-12-18 14:22:40 -08002711
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002712 VM_BUG_ON(!is_root_cache(cachep));
Glauber Costad7f25f82012-12-18 14:22:40 -08002713
Vladimir Davydov45264772016-07-26 15:24:21 -07002714 if (memcg_kmem_bypass())
Vladimir Davydov230e9fc2016-01-14 15:18:15 -08002715 return cachep;
2716
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07002717 memcg = get_mem_cgroup_from_current();
Jason Low4db0c3c2015-04-15 16:14:08 -07002718 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002719 if (kmemcg_id < 0)
Li Zefanca0dde92013-04-29 15:08:57 -07002720 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08002721
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002722 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002723 if (likely(memcg_cachep))
2724 return memcg_cachep;
Li Zefanca0dde92013-04-29 15:08:57 -07002725
2726 /*
2727 * If we are in a safe context (can wait, and not in interrupt
2728 * context), we could be be predictable and return right away.
2729 * This would guarantee that the allocation being performed
2730 * already belongs in the new cache.
2731 *
2732 * However, there are some clashes that can arrive from locking.
2733 * For instance, because we acquire the slab_mutex while doing
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002734 * memcg_create_kmem_cache, this means no further allocation
2735 * could happen with the slab_mutex held. So it's better to
2736 * defer everything.
Li Zefanca0dde92013-04-29 15:08:57 -07002737 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002738 memcg_schedule_kmem_cache_create(memcg, cachep);
Li Zefanca0dde92013-04-29 15:08:57 -07002739out:
Vladimir Davydov8135be52014-12-12 16:56:38 -08002740 css_put(&memcg->css);
Li Zefanca0dde92013-04-29 15:08:57 -07002741 return cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002742}
Glauber Costad7f25f82012-12-18 14:22:40 -08002743
Vladimir Davydov45264772016-07-26 15:24:21 -07002744/**
2745 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2746 * @cachep: the cache returned by memcg_kmem_get_cache
2747 */
2748void memcg_kmem_put_cache(struct kmem_cache *cachep)
Vladimir Davydov8135be52014-12-12 16:56:38 -08002749{
2750 if (!is_root_cache(cachep))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002751 css_put(&cachep->memcg_params.memcg->css);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002752}
2753
Vladimir Davydov45264772016-07-26 15:24:21 -07002754/**
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08002755 * __memcg_kmem_charge_memcg: charge a kmem page
Vladimir Davydov45264772016-07-26 15:24:21 -07002756 * @page: page to charge
2757 * @gfp: reclaim mode
2758 * @order: allocation order
2759 * @memcg: memory cgroup to charge
2760 *
2761 * Returns 0 on success, an error code on failure.
2762 */
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08002763int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
Vladimir Davydov45264772016-07-26 15:24:21 -07002764 struct mem_cgroup *memcg)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002765{
2766 unsigned int nr_pages = 1 << order;
2767 struct page_counter *counter;
Johannes Weiner6071ca52015-11-05 18:50:26 -08002768 int ret;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002769
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002770 ret = try_charge(memcg, gfp, nr_pages);
Johannes Weiner52c29b02016-01-20 15:02:35 -08002771 if (ret)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002772 return ret;
Johannes Weiner52c29b02016-01-20 15:02:35 -08002773
2774 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2775 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2776 cancel_charge(memcg, nr_pages);
2777 return -ENOMEM;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002778 }
2779
2780 page->mem_cgroup = memcg;
2781
2782 return 0;
2783}
2784
Vladimir Davydov45264772016-07-26 15:24:21 -07002785/**
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08002786 * __memcg_kmem_charge: charge a kmem page to the current memory cgroup
Vladimir Davydov45264772016-07-26 15:24:21 -07002787 * @page: page to charge
2788 * @gfp: reclaim mode
2789 * @order: allocation order
2790 *
2791 * Returns 0 on success, an error code on failure.
2792 */
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08002793int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002794{
2795 struct mem_cgroup *memcg;
Vladimir Davydovfcff7d72016-03-17 14:17:29 -07002796 int ret = 0;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002797
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08002798 if (memcg_kmem_bypass())
Vladimir Davydov45264772016-07-26 15:24:21 -07002799 return 0;
2800
Shakeel Buttd46eb14b2018-08-17 15:46:39 -07002801 memcg = get_mem_cgroup_from_current();
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002802 if (!mem_cgroup_is_root(memcg)) {
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08002803 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002804 if (!ret)
2805 __SetPageKmemcg(page);
2806 }
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002807 css_put(&memcg->css);
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08002808 return ret;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002809}
Roman Gushchin49a18ea2019-07-11 20:56:13 -07002810
2811/**
2812 * __memcg_kmem_uncharge_memcg: uncharge a kmem page
2813 * @memcg: memcg to uncharge
2814 * @nr_pages: number of pages to uncharge
2815 */
2816void __memcg_kmem_uncharge_memcg(struct mem_cgroup *memcg,
2817 unsigned int nr_pages)
2818{
2819 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2820 page_counter_uncharge(&memcg->kmem, nr_pages);
2821
2822 page_counter_uncharge(&memcg->memory, nr_pages);
2823 if (do_memsw_account())
2824 page_counter_uncharge(&memcg->memsw, nr_pages);
2825}
Vladimir Davydov45264772016-07-26 15:24:21 -07002826/**
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08002827 * __memcg_kmem_uncharge: uncharge a kmem page
Vladimir Davydov45264772016-07-26 15:24:21 -07002828 * @page: page to uncharge
2829 * @order: allocation order
2830 */
Shakeel Butt60cd4bc2019-03-05 15:43:13 -08002831void __memcg_kmem_uncharge(struct page *page, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002832{
Johannes Weiner1306a852014-12-10 15:44:52 -08002833 struct mem_cgroup *memcg = page->mem_cgroup;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002834 unsigned int nr_pages = 1 << order;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002835
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002836 if (!memcg)
2837 return;
2838
Sasha Levin309381fea2014-01-23 15:52:54 -08002839 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Roman Gushchin49a18ea2019-07-11 20:56:13 -07002840 __memcg_kmem_uncharge_memcg(memcg, nr_pages);
Johannes Weiner1306a852014-12-10 15:44:52 -08002841 page->mem_cgroup = NULL;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002842
2843 /* slab pages do not have PageKmemcg flag set */
2844 if (PageKmemcg(page))
2845 __ClearPageKmemcg(page);
2846
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002847 css_put_many(&memcg->css, nr_pages);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002848}
Kirill Tkhai84c07d12018-08-17 15:47:25 -07002849#endif /* CONFIG_MEMCG_KMEM */
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002850
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002851#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2852
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002853/*
2854 * Because tail pages are not marked as "used", set it. We're under
Andrey Ryabininf4b7e272019-03-05 15:49:39 -08002855 * pgdat->lru_lock and migration entries setup in all page mappings.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002856 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002857void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002858{
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002859 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002860
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002861 if (mem_cgroup_disabled())
2862 return;
David Rientjesb070e652013-05-07 16:18:09 -07002863
Johannes Weiner29833312014-12-10 15:44:02 -08002864 for (i = 1; i < HPAGE_PMD_NR; i++)
Johannes Weiner1306a852014-12-10 15:44:52 -08002865 head[i].mem_cgroup = head->mem_cgroup;
Michal Hockob9982f82014-12-10 15:43:51 -08002866
Johannes Weinerc9019e92018-01-31 16:16:37 -08002867 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002868}
Hugh Dickins12d27102012-01-12 17:19:52 -08002869#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002870
Andrew Mortonc255a452012-07-31 16:43:02 -07002871#ifdef CONFIG_MEMCG_SWAP
Daisuke Nishimura02491442010-03-10 15:22:17 -08002872/**
2873 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2874 * @entry: swap entry to be moved
2875 * @from: mem_cgroup which the entry is moved from
2876 * @to: mem_cgroup which the entry is moved to
2877 *
2878 * It succeeds only when the swap_cgroup's record for this entry is the same
2879 * as the mem_cgroup's id of @from.
2880 *
2881 * Returns 0 on success, -EINVAL on failure.
2882 *
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002883 * The caller must have charged to @to, IOW, called page_counter_charge() about
Daisuke Nishimura02491442010-03-10 15:22:17 -08002884 * both res and memsw, and called css_get().
2885 */
2886static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002887 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002888{
2889 unsigned short old_id, new_id;
2890
Li Zefan34c00c32013-09-23 16:56:01 +08002891 old_id = mem_cgroup_id(from);
2892 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002893
2894 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08002895 mod_memcg_state(from, MEMCG_SWAP, -1);
2896 mod_memcg_state(to, MEMCG_SWAP, 1);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002897 return 0;
2898 }
2899 return -EINVAL;
2900}
2901#else
2902static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002903 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002904{
2905 return -EINVAL;
2906}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002907#endif
2908
Roman Gushchinbbec2e12018-06-07 17:06:18 -07002909static DEFINE_MUTEX(memcg_max_mutex);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07002910
Roman Gushchinbbec2e12018-06-07 17:06:18 -07002911static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
2912 unsigned long max, bool memsw)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002913{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002914 bool enlarge = false;
Shakeel Buttbb4a7ea2018-06-07 17:07:27 -07002915 bool drained = false;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002916 int ret;
Yu Zhaoc054a782018-01-31 16:20:02 -08002917 bool limits_invariant;
2918 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002919
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002920 do {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002921 if (signal_pending(current)) {
2922 ret = -EINTR;
2923 break;
2924 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002925
Roman Gushchinbbec2e12018-06-07 17:06:18 -07002926 mutex_lock(&memcg_max_mutex);
Yu Zhaoc054a782018-01-31 16:20:02 -08002927 /*
2928 * Make sure that the new limit (memsw or memory limit) doesn't
Roman Gushchinbbec2e12018-06-07 17:06:18 -07002929 * break our basic invariant rule memory.max <= memsw.max.
Yu Zhaoc054a782018-01-31 16:20:02 -08002930 */
Roman Gushchinbbec2e12018-06-07 17:06:18 -07002931 limits_invariant = memsw ? max >= memcg->memory.max :
2932 max <= memcg->memsw.max;
Yu Zhaoc054a782018-01-31 16:20:02 -08002933 if (!limits_invariant) {
Roman Gushchinbbec2e12018-06-07 17:06:18 -07002934 mutex_unlock(&memcg_max_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002935 ret = -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002936 break;
2937 }
Roman Gushchinbbec2e12018-06-07 17:06:18 -07002938 if (max > counter->max)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002939 enlarge = true;
Roman Gushchinbbec2e12018-06-07 17:06:18 -07002940 ret = page_counter_set_max(counter, max);
2941 mutex_unlock(&memcg_max_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002942
2943 if (!ret)
2944 break;
2945
Shakeel Buttbb4a7ea2018-06-07 17:07:27 -07002946 if (!drained) {
2947 drain_all_stock(memcg);
2948 drained = true;
2949 continue;
2950 }
2951
Andrey Ryabinin1ab5c052018-01-31 16:20:37 -08002952 if (!try_to_free_mem_cgroup_pages(memcg, 1,
2953 GFP_KERNEL, !memsw)) {
2954 ret = -EBUSY;
2955 break;
2956 }
2957 } while (true);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002958
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002959 if (!ret && enlarge)
2960 memcg_oom_recover(memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002961
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002962 return ret;
2963}
2964
Mel Gormanef8f2322016-07-28 15:46:05 -07002965unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
Andrew Morton0608f432013-09-24 15:27:41 -07002966 gfp_t gfp_mask,
2967 unsigned long *total_scanned)
2968{
2969 unsigned long nr_reclaimed = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002970 struct mem_cgroup_per_node *mz, *next_mz = NULL;
Andrew Morton0608f432013-09-24 15:27:41 -07002971 unsigned long reclaimed;
2972 int loop = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002973 struct mem_cgroup_tree_per_node *mctz;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002974 unsigned long excess;
Andrew Morton0608f432013-09-24 15:27:41 -07002975 unsigned long nr_scanned;
2976
2977 if (order > 0)
2978 return 0;
2979
Mel Gormanef8f2322016-07-28 15:46:05 -07002980 mctz = soft_limit_tree_node(pgdat->node_id);
Michal Hockod6507ff2016-08-02 14:02:37 -07002981
2982 /*
2983 * Do not even bother to check the largest node if the root
2984 * is empty. Do it lockless to prevent lock bouncing. Races
2985 * are acceptable as soft limit is best effort anyway.
2986 */
Laurent Dufourbfc72282017-03-09 16:17:06 -08002987 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
Michal Hockod6507ff2016-08-02 14:02:37 -07002988 return 0;
2989
Andrew Morton0608f432013-09-24 15:27:41 -07002990 /*
2991 * This loop can run a while, specially if mem_cgroup's continuously
2992 * keep exceeding their soft limit and putting the system under
2993 * pressure
2994 */
2995 do {
2996 if (next_mz)
2997 mz = next_mz;
2998 else
2999 mz = mem_cgroup_largest_soft_limit_node(mctz);
3000 if (!mz)
3001 break;
3002
3003 nr_scanned = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07003004 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07003005 gfp_mask, &nr_scanned);
3006 nr_reclaimed += reclaimed;
3007 *total_scanned += nr_scanned;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07003008 spin_lock_irq(&mctz->lock);
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08003009 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07003010
3011 /*
3012 * If we failed to reclaim anything from this memory cgroup
3013 * it is time to move on to the next cgroup
3014 */
3015 next_mz = NULL;
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08003016 if (!reclaimed)
3017 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3018
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003019 excess = soft_limit_excess(mz->memcg);
Andrew Morton0608f432013-09-24 15:27:41 -07003020 /*
3021 * One school of thought says that we should not add
3022 * back the node to the tree if reclaim returns 0.
3023 * But our reclaim could return 0, simply because due
3024 * to priority we are exposing a smaller subset of
3025 * memory to reclaim from. Consider this as a longer
3026 * term TODO.
3027 */
3028 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07003029 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07003030 spin_unlock_irq(&mctz->lock);
Andrew Morton0608f432013-09-24 15:27:41 -07003031 css_put(&mz->memcg->css);
3032 loop++;
3033 /*
3034 * Could not reclaim anything and there are no more
3035 * mem cgroups to try or we seem to be looping without
3036 * reclaiming anything.
3037 */
3038 if (!nr_reclaimed &&
3039 (next_mz == NULL ||
3040 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3041 break;
3042 } while (!nr_reclaimed);
3043 if (next_mz)
3044 css_put(&next_mz->memcg->css);
3045 return nr_reclaimed;
3046}
3047
Tejun Heoea280e72014-05-16 13:22:48 -04003048/*
3049 * Test whether @memcg has children, dead or alive. Note that this
3050 * function doesn't care whether @memcg has use_hierarchy enabled and
3051 * returns %true if there are child csses according to the cgroup
3052 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
3053 */
Glauber Costab5f99b52013-02-22 16:34:53 -08003054static inline bool memcg_has_children(struct mem_cgroup *memcg)
3055{
Tejun Heoea280e72014-05-16 13:22:48 -04003056 bool ret;
3057
Tejun Heoea280e72014-05-16 13:22:48 -04003058 rcu_read_lock();
3059 ret = css_next_child(NULL, &memcg->css);
3060 rcu_read_unlock();
3061 return ret;
Glauber Costab5f99b52013-02-22 16:34:53 -08003062}
3063
3064/*
Greg Thelen51038172016-05-20 16:58:18 -07003065 * Reclaims as many pages from the given memcg as possible.
Michal Hockoc26251f2012-10-26 13:37:28 +02003066 *
3067 * Caller is responsible for holding css reference for memcg.
3068 */
3069static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3070{
3071 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02003072
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003073 /* we call try-to-free pages for make this cgroup empty */
3074 lru_add_drain_all();
Junaid Shahidd12c60f2018-06-07 17:07:31 -07003075
3076 drain_all_stock(memcg);
3077
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003078 /* try to free all pages in this cgroup */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003079 while (nr_retries && page_counter_read(&memcg->memory)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003080 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003081
Michal Hockoc26251f2012-10-26 13:37:28 +02003082 if (signal_pending(current))
3083 return -EINTR;
3084
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003085 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3086 GFP_KERNEL, true);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003087 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003088 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003089 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02003090 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003091 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003092
3093 }
Michal Hockoab5196c2012-10-26 13:37:32 +02003094
3095 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003096}
3097
Tejun Heo6770c642014-05-13 12:16:21 -04003098static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3099 char *buf, size_t nbytes,
3100 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003101{
Tejun Heo6770c642014-05-13 12:16:21 -04003102 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02003103
Michal Hockod8423012012-10-26 13:37:29 +02003104 if (mem_cgroup_is_root(memcg))
3105 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04003106 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003107}
3108
Tejun Heo182446d2013-08-08 20:11:24 -04003109static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3110 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003111{
Tejun Heo182446d2013-08-08 20:11:24 -04003112 return mem_cgroup_from_css(css)->use_hierarchy;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003113}
3114
Tejun Heo182446d2013-08-08 20:11:24 -04003115static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3116 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003117{
3118 int retval = 0;
Tejun Heo182446d2013-08-08 20:11:24 -04003119 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04003120 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003121
Glauber Costa567fb432012-07-31 16:43:07 -07003122 if (memcg->use_hierarchy == val)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003123 return 0;
Glauber Costa567fb432012-07-31 16:43:07 -07003124
Balbir Singh18f59ea2009-01-07 18:08:07 -08003125 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003126 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08003127 * in the child subtrees. If it is unset, then the change can
3128 * occur, provided the current cgroup has no children.
3129 *
3130 * For the root cgroup, parent_mem is NULL, we allow value to be
3131 * set if there are no children.
3132 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003133 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08003134 (val == 1 || val == 0)) {
Tejun Heoea280e72014-05-16 13:22:48 -04003135 if (!memcg_has_children(memcg))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003136 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003137 else
3138 retval = -EBUSY;
3139 } else
3140 retval = -EINVAL;
Glauber Costa567fb432012-07-31 16:43:07 -07003141
Balbir Singh18f59ea2009-01-07 18:08:07 -08003142 return retval;
3143}
3144
Andrew Morton6f646152015-11-06 16:28:58 -08003145static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
Johannes Weinerce00a962014-09-05 08:43:57 -04003146{
Johannes Weiner42a30032019-05-14 15:47:12 -07003147 unsigned long val;
Johannes Weinerce00a962014-09-05 08:43:57 -04003148
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003149 if (mem_cgroup_is_root(memcg)) {
Johannes Weiner42a30032019-05-14 15:47:12 -07003150 val = memcg_page_state(memcg, MEMCG_CACHE) +
3151 memcg_page_state(memcg, MEMCG_RSS);
3152 if (swap)
3153 val += memcg_page_state(memcg, MEMCG_SWAP);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003154 } else {
Johannes Weinerce00a962014-09-05 08:43:57 -04003155 if (!swap)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003156 val = page_counter_read(&memcg->memory);
Johannes Weinerce00a962014-09-05 08:43:57 -04003157 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003158 val = page_counter_read(&memcg->memsw);
Johannes Weinerce00a962014-09-05 08:43:57 -04003159 }
Michal Hockoc12176d2015-11-05 18:50:29 -08003160 return val;
Johannes Weinerce00a962014-09-05 08:43:57 -04003161}
3162
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003163enum {
3164 RES_USAGE,
3165 RES_LIMIT,
3166 RES_MAX_USAGE,
3167 RES_FAILCNT,
3168 RES_SOFT_LIMIT,
3169};
Johannes Weinerce00a962014-09-05 08:43:57 -04003170
Tejun Heo791badb2013-12-05 12:28:02 -05003171static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07003172 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003173{
Tejun Heo182446d2013-08-08 20:11:24 -04003174 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003175 struct page_counter *counter;
Tejun Heoaf36f902012-04-01 12:09:55 -07003176
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003177 switch (MEMFILE_TYPE(cft->private)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003178 case _MEM:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003179 counter = &memcg->memory;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003180 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003181 case _MEMSWAP:
3182 counter = &memcg->memsw;
3183 break;
3184 case _KMEM:
3185 counter = &memcg->kmem;
3186 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003187 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08003188 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003189 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003190 default:
3191 BUG();
3192 }
3193
3194 switch (MEMFILE_ATTR(cft->private)) {
3195 case RES_USAGE:
3196 if (counter == &memcg->memory)
Michal Hockoc12176d2015-11-05 18:50:29 -08003197 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003198 if (counter == &memcg->memsw)
Michal Hockoc12176d2015-11-05 18:50:29 -08003199 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003200 return (u64)page_counter_read(counter) * PAGE_SIZE;
3201 case RES_LIMIT:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003202 return (u64)counter->max * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003203 case RES_MAX_USAGE:
3204 return (u64)counter->watermark * PAGE_SIZE;
3205 case RES_FAILCNT:
3206 return counter->failcnt;
3207 case RES_SOFT_LIMIT:
3208 return (u64)memcg->soft_limit * PAGE_SIZE;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003209 default:
3210 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003211 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003212}
Glauber Costa510fc4e2012-12-18 14:21:47 -08003213
Kirill Tkhai84c07d12018-08-17 15:47:25 -07003214#ifdef CONFIG_MEMCG_KMEM
Johannes Weiner567e9ab2016-01-20 15:02:24 -08003215static int memcg_online_kmem(struct mem_cgroup *memcg)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003216{
Vladimir Davydovd6441632014-01-23 15:53:09 -08003217 int memcg_id;
3218
Vladimir Davydovb313aee2016-03-17 14:18:27 -07003219 if (cgroup_memory_nokmem)
3220 return 0;
3221
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003222 BUG_ON(memcg->kmemcg_id >= 0);
Johannes Weiner567e9ab2016-01-20 15:02:24 -08003223 BUG_ON(memcg->kmem_state);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003224
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003225 memcg_id = memcg_alloc_cache_id();
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003226 if (memcg_id < 0)
3227 return memcg_id;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003228
Johannes Weineref129472016-01-14 15:21:34 -08003229 static_branch_inc(&memcg_kmem_enabled_key);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003230 /*
Johannes Weiner567e9ab2016-01-20 15:02:24 -08003231 * A memory cgroup is considered kmem-online as soon as it gets
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003232 * kmemcg_id. Setting the id after enabling static branching will
Vladimir Davydovd6441632014-01-23 15:53:09 -08003233 * guarantee no one starts accounting before all call sites are
3234 * patched.
3235 */
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003236 memcg->kmemcg_id = memcg_id;
Johannes Weiner567e9ab2016-01-20 15:02:24 -08003237 memcg->kmem_state = KMEM_ONLINE;
Tejun Heobc2791f2017-02-22 15:41:21 -08003238 INIT_LIST_HEAD(&memcg->kmem_caches);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003239
3240 return 0;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003241}
3242
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003243static void memcg_offline_kmem(struct mem_cgroup *memcg)
3244{
3245 struct cgroup_subsys_state *css;
3246 struct mem_cgroup *parent, *child;
3247 int kmemcg_id;
3248
3249 if (memcg->kmem_state != KMEM_ONLINE)
3250 return;
3251 /*
3252 * Clear the online state before clearing memcg_caches array
3253 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
3254 * guarantees that no cache will be created for this cgroup
3255 * after we are done (see memcg_create_kmem_cache()).
3256 */
3257 memcg->kmem_state = KMEM_ALLOCATED;
3258
3259 memcg_deactivate_kmem_caches(memcg);
3260
3261 kmemcg_id = memcg->kmemcg_id;
3262 BUG_ON(kmemcg_id < 0);
3263
3264 parent = parent_mem_cgroup(memcg);
3265 if (!parent)
3266 parent = root_mem_cgroup;
3267
3268 /*
3269 * Change kmemcg_id of this cgroup and all its descendants to the
3270 * parent's id, and then move all entries from this cgroup's list_lrus
3271 * to ones of the parent. After we have finished, all list_lrus
3272 * corresponding to this cgroup are guaranteed to remain empty. The
3273 * ordering is imposed by list_lru_node->lock taken by
3274 * memcg_drain_all_list_lrus().
3275 */
Tejun Heo3a06bb72016-06-03 14:55:44 -07003276 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003277 css_for_each_descendant_pre(css, &memcg->css) {
3278 child = mem_cgroup_from_css(css);
3279 BUG_ON(child->kmemcg_id != kmemcg_id);
3280 child->kmemcg_id = parent->kmemcg_id;
3281 if (!memcg->use_hierarchy)
3282 break;
3283 }
Tejun Heo3a06bb72016-06-03 14:55:44 -07003284 rcu_read_unlock();
3285
Kirill Tkhai9bec5c32018-08-17 15:47:58 -07003286 memcg_drain_all_list_lrus(kmemcg_id, parent);
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003287
3288 memcg_free_cache_id(kmemcg_id);
3289}
3290
3291static void memcg_free_kmem(struct mem_cgroup *memcg)
3292{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003293 /* css_alloc() failed, offlining didn't happen */
3294 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3295 memcg_offline_kmem(memcg);
3296
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003297 if (memcg->kmem_state == KMEM_ALLOCATED) {
3298 memcg_destroy_kmem_caches(memcg);
3299 static_branch_dec(&memcg_kmem_enabled_key);
3300 WARN_ON(page_counter_read(&memcg->kmem));
3301 }
Johannes Weiner8e0a8912016-01-20 15:02:26 -08003302}
Vladimir Davydovd6441632014-01-23 15:53:09 -08003303#else
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08003304static int memcg_online_kmem(struct mem_cgroup *memcg)
Johannes Weiner127424c2016-01-20 15:02:32 -08003305{
3306 return 0;
3307}
3308static void memcg_offline_kmem(struct mem_cgroup *memcg)
3309{
3310}
3311static void memcg_free_kmem(struct mem_cgroup *memcg)
3312{
3313}
Kirill Tkhai84c07d12018-08-17 15:47:25 -07003314#endif /* CONFIG_MEMCG_KMEM */
Johannes Weiner127424c2016-01-20 15:02:32 -08003315
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003316static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3317 unsigned long max)
Johannes Weiner127424c2016-01-20 15:02:32 -08003318{
Vladimir Davydovb313aee2016-03-17 14:18:27 -07003319 int ret;
Johannes Weiner127424c2016-01-20 15:02:32 -08003320
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003321 mutex_lock(&memcg_max_mutex);
3322 ret = page_counter_set_max(&memcg->kmem, max);
3323 mutex_unlock(&memcg_max_mutex);
Johannes Weiner127424c2016-01-20 15:02:32 -08003324 return ret;
3325}
Glauber Costa510fc4e2012-12-18 14:21:47 -08003326
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003327static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003328{
3329 int ret;
3330
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003331 mutex_lock(&memcg_max_mutex);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003332
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003333 ret = page_counter_set_max(&memcg->tcpmem, max);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003334 if (ret)
3335 goto out;
3336
Johannes Weiner0db15292016-01-20 15:02:50 -08003337 if (!memcg->tcpmem_active) {
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003338 /*
3339 * The active flag needs to be written after the static_key
3340 * update. This is what guarantees that the socket activation
Johannes Weiner2d758072016-10-07 17:00:58 -07003341 * function is the last one to run. See mem_cgroup_sk_alloc()
3342 * for details, and note that we don't mark any socket as
3343 * belonging to this memcg until that flag is up.
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003344 *
3345 * We need to do this, because static_keys will span multiple
3346 * sites, but we can't control their order. If we mark a socket
3347 * as accounted, but the accounting functions are not patched in
3348 * yet, we'll lose accounting.
3349 *
Johannes Weiner2d758072016-10-07 17:00:58 -07003350 * We never race with the readers in mem_cgroup_sk_alloc(),
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003351 * because when this value change, the code to process it is not
3352 * patched in yet.
3353 */
3354 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weiner0db15292016-01-20 15:02:50 -08003355 memcg->tcpmem_active = true;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003356 }
3357out:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003358 mutex_unlock(&memcg_max_mutex);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003359 return ret;
3360}
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003361
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003362/*
3363 * The user of this function is...
3364 * RES_LIMIT.
3365 */
Tejun Heo451af502014-05-13 12:16:21 -04003366static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3367 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003368{
Tejun Heo451af502014-05-13 12:16:21 -04003369 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003370 unsigned long nr_pages;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003371 int ret;
3372
Tejun Heo451af502014-05-13 12:16:21 -04003373 buf = strstrip(buf);
Johannes Weiner650c5e52015-02-11 15:26:03 -08003374 ret = page_counter_memparse(buf, "-1", &nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003375 if (ret)
3376 return ret;
Tejun Heoaf36f902012-04-01 12:09:55 -07003377
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003378 switch (MEMFILE_ATTR(of_cft(of)->private)) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003379 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07003380 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3381 ret = -EINVAL;
3382 break;
3383 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003384 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3385 case _MEM:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003386 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003387 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003388 case _MEMSWAP:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003389 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003390 break;
3391 case _KMEM:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003392 ret = memcg_update_kmem_max(memcg, nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003393 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003394 case _TCP:
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003395 ret = memcg_update_tcp_max(memcg, nr_pages);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003396 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003397 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003398 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07003399 case RES_SOFT_LIMIT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003400 memcg->soft_limit = nr_pages;
3401 ret = 0;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003402 break;
3403 }
Tejun Heo451af502014-05-13 12:16:21 -04003404 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003405}
3406
Tejun Heo6770c642014-05-13 12:16:21 -04003407static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3408 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003409{
Tejun Heo6770c642014-05-13 12:16:21 -04003410 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003411 struct page_counter *counter;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003412
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003413 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3414 case _MEM:
3415 counter = &memcg->memory;
3416 break;
3417 case _MEMSWAP:
3418 counter = &memcg->memsw;
3419 break;
3420 case _KMEM:
3421 counter = &memcg->kmem;
3422 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003423 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08003424 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003425 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003426 default:
3427 BUG();
3428 }
Tejun Heoaf36f902012-04-01 12:09:55 -07003429
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003430 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003431 case RES_MAX_USAGE:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003432 page_counter_reset_watermark(counter);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003433 break;
3434 case RES_FAILCNT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003435 counter->failcnt = 0;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003436 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003437 default:
3438 BUG();
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003439 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003440
Tejun Heo6770c642014-05-13 12:16:21 -04003441 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003442}
3443
Tejun Heo182446d2013-08-08 20:11:24 -04003444static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003445 struct cftype *cft)
3446{
Tejun Heo182446d2013-08-08 20:11:24 -04003447 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003448}
3449
Daisuke Nishimura02491442010-03-10 15:22:17 -08003450#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04003451static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003452 struct cftype *cft, u64 val)
3453{
Tejun Heo182446d2013-08-08 20:11:24 -04003454 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003455
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08003456 if (val & ~MOVE_MASK)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003457 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003458
Glauber Costaee5e8472013-02-22 16:34:50 -08003459 /*
3460 * No kind of locking is needed in here, because ->can_attach() will
3461 * check this value once in the beginning of the process, and then carry
3462 * on with stale data. This means that changes to this value will only
3463 * affect task migrations starting after the change.
3464 */
3465 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003466 return 0;
3467}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003468#else
Tejun Heo182446d2013-08-08 20:11:24 -04003469static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08003470 struct cftype *cft, u64 val)
3471{
3472 return -ENOSYS;
3473}
3474#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003475
Ying Han406eb0c2011-05-26 16:25:37 -07003476#ifdef CONFIG_NUMA
Johannes Weiner113b7df2019-05-13 17:18:11 -07003477
3478#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3479#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3480#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
3481
3482static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3483 int nid, unsigned int lru_mask)
3484{
3485 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
3486 unsigned long nr = 0;
3487 enum lru_list lru;
3488
3489 VM_BUG_ON((unsigned)nid >= nr_node_ids);
3490
3491 for_each_lru(lru) {
3492 if (!(BIT(lru) & lru_mask))
3493 continue;
Johannes Weiner205b20c2019-05-14 15:47:06 -07003494 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
Johannes Weiner113b7df2019-05-13 17:18:11 -07003495 }
3496 return nr;
3497}
3498
3499static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3500 unsigned int lru_mask)
3501{
3502 unsigned long nr = 0;
3503 enum lru_list lru;
3504
3505 for_each_lru(lru) {
3506 if (!(BIT(lru) & lru_mask))
3507 continue;
Johannes Weiner205b20c2019-05-14 15:47:06 -07003508 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
Johannes Weiner113b7df2019-05-13 17:18:11 -07003509 }
3510 return nr;
3511}
3512
Tejun Heo2da8ca82013-12-05 12:28:04 -05003513static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07003514{
Greg Thelen25485de2013-11-12 15:07:40 -08003515 struct numa_stat {
3516 const char *name;
3517 unsigned int lru_mask;
3518 };
3519
3520 static const struct numa_stat stats[] = {
3521 { "total", LRU_ALL },
3522 { "file", LRU_ALL_FILE },
3523 { "anon", LRU_ALL_ANON },
3524 { "unevictable", BIT(LRU_UNEVICTABLE) },
3525 };
3526 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07003527 int nid;
Greg Thelen25485de2013-11-12 15:07:40 -08003528 unsigned long nr;
Chris Downaa9694b2019-03-05 15:45:52 -08003529 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Ying Han406eb0c2011-05-26 16:25:37 -07003530
Greg Thelen25485de2013-11-12 15:07:40 -08003531 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3532 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3533 seq_printf(m, "%s=%lu", stat->name, nr);
3534 for_each_node_state(nid, N_MEMORY) {
3535 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3536 stat->lru_mask);
3537 seq_printf(m, " N%d=%lu", nid, nr);
3538 }
3539 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003540 }
Ying Han406eb0c2011-05-26 16:25:37 -07003541
Ying Han071aee12013-11-12 15:07:41 -08003542 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3543 struct mem_cgroup *iter;
Ying Han406eb0c2011-05-26 16:25:37 -07003544
Ying Han071aee12013-11-12 15:07:41 -08003545 nr = 0;
3546 for_each_mem_cgroup_tree(iter, memcg)
3547 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3548 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3549 for_each_node_state(nid, N_MEMORY) {
3550 nr = 0;
3551 for_each_mem_cgroup_tree(iter, memcg)
3552 nr += mem_cgroup_node_nr_lru_pages(
3553 iter, nid, stat->lru_mask);
3554 seq_printf(m, " N%d=%lu", nid, nr);
3555 }
3556 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003557 }
Ying Han406eb0c2011-05-26 16:25:37 -07003558
Ying Han406eb0c2011-05-26 16:25:37 -07003559 return 0;
3560}
3561#endif /* CONFIG_NUMA */
3562
Johannes Weinerc8713d02019-07-11 20:55:59 -07003563static const unsigned int memcg1_stats[] = {
3564 MEMCG_CACHE,
3565 MEMCG_RSS,
3566 MEMCG_RSS_HUGE,
3567 NR_SHMEM,
3568 NR_FILE_MAPPED,
3569 NR_FILE_DIRTY,
3570 NR_WRITEBACK,
3571 MEMCG_SWAP,
3572};
3573
3574static const char *const memcg1_stat_names[] = {
3575 "cache",
3576 "rss",
3577 "rss_huge",
3578 "shmem",
3579 "mapped_file",
3580 "dirty",
3581 "writeback",
3582 "swap",
3583};
3584
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07003585/* Universal VM events cgroup1 shows, original sort order */
Greg Thelen8dd53fd2018-06-07 17:07:23 -07003586static const unsigned int memcg1_events[] = {
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07003587 PGPGIN,
3588 PGPGOUT,
3589 PGFAULT,
3590 PGMAJFAULT,
3591};
3592
3593static const char *const memcg1_event_names[] = {
3594 "pgpgin",
3595 "pgpgout",
3596 "pgfault",
3597 "pgmajfault",
3598};
3599
Tejun Heo2da8ca82013-12-05 12:28:04 -05003600static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003601{
Chris Downaa9694b2019-03-05 15:45:52 -08003602 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003603 unsigned long memory, memsw;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003604 struct mem_cgroup *mi;
3605 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003606
Johannes Weiner71cd3112017-05-03 14:55:13 -07003607 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
Rickard Strandqvist70bc0682014-12-12 16:56:41 -08003608 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3609
Johannes Weiner71cd3112017-05-03 14:55:13 -07003610 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3611 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003612 continue;
Johannes Weiner71cd3112017-05-03 14:55:13 -07003613 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
Johannes Weiner205b20c2019-05-14 15:47:06 -07003614 memcg_page_state_local(memcg, memcg1_stats[i]) *
Johannes Weiner71cd3112017-05-03 14:55:13 -07003615 PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003616 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003617
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07003618 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3619 seq_printf(m, "%s %lu\n", memcg1_event_names[i],
Johannes Weiner205b20c2019-05-14 15:47:06 -07003620 memcg_events_local(memcg, memcg1_events[i]));
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003621
3622 for (i = 0; i < NR_LRU_LISTS; i++)
3623 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
Johannes Weiner205b20c2019-05-14 15:47:06 -07003624 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
Johannes Weiner21d89d12019-05-13 17:18:08 -07003625 PAGE_SIZE);
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003626
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003627 /* Hierarchical information */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003628 memory = memsw = PAGE_COUNTER_MAX;
3629 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
Roman Gushchinbbec2e12018-06-07 17:06:18 -07003630 memory = min(memory, mi->memory.max);
3631 memsw = min(memsw, mi->memsw.max);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003632 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003633 seq_printf(m, "hierarchical_memory_limit %llu\n",
3634 (u64)memory * PAGE_SIZE);
Johannes Weiner7941d212016-01-14 15:21:23 -08003635 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003636 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3637 (u64)memsw * PAGE_SIZE);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003638
Shakeel Butt8de7ecc62018-08-21 21:53:17 -07003639 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
Johannes Weiner71cd3112017-05-03 14:55:13 -07003640 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003641 continue;
Shakeel Butt8de7ecc62018-08-21 21:53:17 -07003642 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
Yafang Shaodd923992019-07-11 20:52:11 -07003643 (u64)memcg_page_state(memcg, memcg1_stats[i]) *
3644 PAGE_SIZE);
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003645 }
3646
Shakeel Butt8de7ecc62018-08-21 21:53:17 -07003647 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3648 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i],
Yafang Shaodd923992019-07-11 20:52:11 -07003649 (u64)memcg_events(memcg, memcg1_events[i]));
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003650
Shakeel Butt8de7ecc62018-08-21 21:53:17 -07003651 for (i = 0; i < NR_LRU_LISTS; i++)
3652 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i],
Johannes Weiner42a30032019-05-14 15:47:12 -07003653 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
3654 PAGE_SIZE);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003655
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003656#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003657 {
Mel Gormanef8f2322016-07-28 15:46:05 -07003658 pg_data_t *pgdat;
3659 struct mem_cgroup_per_node *mz;
Hugh Dickins89abfab2012-05-29 15:06:53 -07003660 struct zone_reclaim_stat *rstat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003661 unsigned long recent_rotated[2] = {0, 0};
3662 unsigned long recent_scanned[2] = {0, 0};
3663
Mel Gormanef8f2322016-07-28 15:46:05 -07003664 for_each_online_pgdat(pgdat) {
3665 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3666 rstat = &mz->lruvec.reclaim_stat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003667
Mel Gormanef8f2322016-07-28 15:46:05 -07003668 recent_rotated[0] += rstat->recent_rotated[0];
3669 recent_rotated[1] += rstat->recent_rotated[1];
3670 recent_scanned[0] += rstat->recent_scanned[0];
3671 recent_scanned[1] += rstat->recent_scanned[1];
3672 }
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07003673 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3674 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3675 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3676 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003677 }
3678#endif
3679
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003680 return 0;
3681}
3682
Tejun Heo182446d2013-08-08 20:11:24 -04003683static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3684 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003685{
Tejun Heo182446d2013-08-08 20:11:24 -04003686 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003687
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07003688 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003689}
3690
Tejun Heo182446d2013-08-08 20:11:24 -04003691static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3692 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003693{
Tejun Heo182446d2013-08-08 20:11:24 -04003694 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08003695
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003696 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003697 return -EINVAL;
3698
Linus Torvalds14208b02014-06-09 15:03:33 -07003699 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003700 memcg->swappiness = val;
3701 else
3702 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08003703
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003704 return 0;
3705}
3706
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003707static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3708{
3709 struct mem_cgroup_threshold_ary *t;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003710 unsigned long usage;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003711 int i;
3712
3713 rcu_read_lock();
3714 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003715 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003716 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003717 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003718
3719 if (!t)
3720 goto unlock;
3721
Johannes Weinerce00a962014-09-05 08:43:57 -04003722 usage = mem_cgroup_usage(memcg, swap);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003723
3724 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07003725 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003726 * If it's not true, a threshold was crossed after last
3727 * call of __mem_cgroup_threshold().
3728 */
Phil Carmody5407a562010-05-26 14:42:42 -07003729 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003730
3731 /*
3732 * Iterate backward over array of thresholds starting from
3733 * current_threshold and check if a threshold is crossed.
3734 * If none of thresholds below usage is crossed, we read
3735 * only one element of the array here.
3736 */
3737 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3738 eventfd_signal(t->entries[i].eventfd, 1);
3739
3740 /* i = current_threshold + 1 */
3741 i++;
3742
3743 /*
3744 * Iterate forward over array of thresholds starting from
3745 * current_threshold+1 and check if a threshold is crossed.
3746 * If none of thresholds above usage is crossed, we read
3747 * only one element of the array here.
3748 */
3749 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3750 eventfd_signal(t->entries[i].eventfd, 1);
3751
3752 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07003753 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003754unlock:
3755 rcu_read_unlock();
3756}
3757
3758static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3759{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003760 while (memcg) {
3761 __mem_cgroup_threshold(memcg, false);
Johannes Weiner7941d212016-01-14 15:21:23 -08003762 if (do_memsw_account())
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003763 __mem_cgroup_threshold(memcg, true);
3764
3765 memcg = parent_mem_cgroup(memcg);
3766 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003767}
3768
3769static int compare_thresholds(const void *a, const void *b)
3770{
3771 const struct mem_cgroup_threshold *_a = a;
3772 const struct mem_cgroup_threshold *_b = b;
3773
Greg Thelen2bff24a2013-09-11 14:23:08 -07003774 if (_a->threshold > _b->threshold)
3775 return 1;
3776
3777 if (_a->threshold < _b->threshold)
3778 return -1;
3779
3780 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003781}
3782
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003783static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003784{
3785 struct mem_cgroup_eventfd_list *ev;
3786
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003787 spin_lock(&memcg_oom_lock);
3788
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003789 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003790 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003791
3792 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003793 return 0;
3794}
3795
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003796static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003797{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003798 struct mem_cgroup *iter;
3799
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003800 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003801 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003802}
3803
Tejun Heo59b6f872013-11-22 18:20:43 -05003804static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003805 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003806{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003807 struct mem_cgroup_thresholds *thresholds;
3808 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003809 unsigned long threshold;
3810 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003811 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003812
Johannes Weiner650c5e52015-02-11 15:26:03 -08003813 ret = page_counter_memparse(args, "-1", &threshold);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003814 if (ret)
3815 return ret;
3816
3817 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003818
Johannes Weiner05b84302014-08-06 16:05:59 -07003819 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003820 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003821 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003822 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003823 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003824 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003825 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003826 BUG();
3827
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003828 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003829 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003830 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3831
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003832 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003833
3834 /* Allocate memory for new array of thresholds */
Gustavo A. R. Silva67b80462019-03-05 15:44:05 -08003835 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003836 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003837 ret = -ENOMEM;
3838 goto unlock;
3839 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003840 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003841
3842 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003843 if (thresholds->primary) {
3844 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003845 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003846 }
3847
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003848 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003849 new->entries[size - 1].eventfd = eventfd;
3850 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003851
3852 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003853 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003854 compare_thresholds, NULL);
3855
3856 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003857 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003858 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07003859 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003860 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003861 * new->current_threshold will not be used until
3862 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003863 * it here.
3864 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003865 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07003866 } else
3867 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003868 }
3869
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003870 /* Free old spare buffer and save old primary buffer as spare */
3871 kfree(thresholds->spare);
3872 thresholds->spare = thresholds->primary;
3873
3874 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003875
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003876 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003877 synchronize_rcu();
3878
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003879unlock:
3880 mutex_unlock(&memcg->thresholds_lock);
3881
3882 return ret;
3883}
3884
Tejun Heo59b6f872013-11-22 18:20:43 -05003885static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003886 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003887{
Tejun Heo59b6f872013-11-22 18:20:43 -05003888 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003889}
3890
Tejun Heo59b6f872013-11-22 18:20:43 -05003891static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003892 struct eventfd_ctx *eventfd, const char *args)
3893{
Tejun Heo59b6f872013-11-22 18:20:43 -05003894 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003895}
3896
Tejun Heo59b6f872013-11-22 18:20:43 -05003897static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003898 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003899{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003900 struct mem_cgroup_thresholds *thresholds;
3901 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003902 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003903 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003904
3905 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07003906
3907 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003908 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003909 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003910 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003911 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003912 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003913 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003914 BUG();
3915
Anton Vorontsov371528c2012-02-24 05:14:46 +04003916 if (!thresholds->primary)
3917 goto unlock;
3918
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003919 /* Check if a threshold crossed before removing */
3920 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3921
3922 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003923 size = 0;
3924 for (i = 0; i < thresholds->primary->size; i++) {
3925 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003926 size++;
3927 }
3928
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003929 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003930
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003931 /* Set thresholds array to NULL if we don't have thresholds */
3932 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003933 kfree(new);
3934 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003935 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003936 }
3937
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003938 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003939
3940 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003941 new->current_threshold = -1;
3942 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3943 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003944 continue;
3945
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003946 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07003947 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003948 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003949 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003950 * until rcu_assign_pointer(), so it's safe to increment
3951 * it here.
3952 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003953 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003954 }
3955 j++;
3956 }
3957
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003958swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003959 /* Swap primary and spare array */
3960 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07003961
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003962 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003963
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003964 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003965 synchronize_rcu();
Martijn Coenen6611d8d2016-01-15 16:57:49 -08003966
3967 /* If all events are unregistered, free the spare array */
3968 if (!new) {
3969 kfree(thresholds->spare);
3970 thresholds->spare = NULL;
3971 }
Anton Vorontsov371528c2012-02-24 05:14:46 +04003972unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003973 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003974}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003975
Tejun Heo59b6f872013-11-22 18:20:43 -05003976static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003977 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003978{
Tejun Heo59b6f872013-11-22 18:20:43 -05003979 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003980}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003981
Tejun Heo59b6f872013-11-22 18:20:43 -05003982static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003983 struct eventfd_ctx *eventfd)
3984{
Tejun Heo59b6f872013-11-22 18:20:43 -05003985 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003986}
3987
Tejun Heo59b6f872013-11-22 18:20:43 -05003988static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003989 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003990{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003991 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003992
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003993 event = kmalloc(sizeof(*event), GFP_KERNEL);
3994 if (!event)
3995 return -ENOMEM;
3996
Michal Hocko1af8efe2011-07-26 16:08:24 -07003997 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003998
3999 event->eventfd = eventfd;
4000 list_add(&event->list, &memcg->oom_notify);
4001
4002 /* already in OOM ? */
Tejun Heoc2b42d32015-06-24 16:58:23 -07004003 if (memcg->under_oom)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004004 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07004005 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004006
4007 return 0;
4008}
4009
Tejun Heo59b6f872013-11-22 18:20:43 -05004010static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05004011 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004012{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004013 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004014
Michal Hocko1af8efe2011-07-26 16:08:24 -07004015 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004016
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004017 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004018 if (ev->eventfd == eventfd) {
4019 list_del(&ev->list);
4020 kfree(ev);
4021 }
4022 }
4023
Michal Hocko1af8efe2011-07-26 16:08:24 -07004024 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004025}
4026
Tejun Heo2da8ca82013-12-05 12:28:04 -05004027static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004028{
Chris Downaa9694b2019-03-05 15:45:52 -08004029 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004030
Tejun Heo791badb2013-12-05 12:28:02 -05004031 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
Tejun Heoc2b42d32015-06-24 16:58:23 -07004032 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
Roman Gushchinfe6bdfc2018-06-14 15:28:05 -07004033 seq_printf(sf, "oom_kill %lu\n",
4034 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004035 return 0;
4036}
4037
Tejun Heo182446d2013-08-08 20:11:24 -04004038static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004039 struct cftype *cft, u64 val)
4040{
Tejun Heo182446d2013-08-08 20:11:24 -04004041 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004042
4043 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07004044 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004045 return -EINVAL;
4046
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004047 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07004048 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004049 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07004050
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004051 return 0;
4052}
4053
Tejun Heo52ebea72015-05-22 17:13:37 -04004054#ifdef CONFIG_CGROUP_WRITEBACK
4055
Tejun Heo841710a2015-05-22 18:23:33 -04004056static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4057{
4058 return wb_domain_init(&memcg->cgwb_domain, gfp);
4059}
4060
4061static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4062{
4063 wb_domain_exit(&memcg->cgwb_domain);
4064}
4065
Tejun Heo2529bb32015-05-22 18:23:34 -04004066static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4067{
4068 wb_domain_size_changed(&memcg->cgwb_domain);
4069}
4070
Tejun Heo841710a2015-05-22 18:23:33 -04004071struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4072{
4073 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4074
4075 if (!memcg->css.parent)
4076 return NULL;
4077
4078 return &memcg->cgwb_domain;
4079}
4080
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004081/*
4082 * idx can be of type enum memcg_stat_item or node_stat_item.
4083 * Keep in sync with memcg_exact_page().
4084 */
4085static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4086{
Chris Down871789d2019-05-14 15:46:57 -07004087 long x = atomic_long_read(&memcg->vmstats[idx]);
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004088 int cpu;
4089
4090 for_each_online_cpu(cpu)
Chris Down871789d2019-05-14 15:46:57 -07004091 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004092 if (x < 0)
4093 x = 0;
4094 return x;
4095}
4096
Tejun Heoc2aa7232015-05-22 18:23:35 -04004097/**
4098 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4099 * @wb: bdi_writeback in question
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004100 * @pfilepages: out parameter for number of file pages
4101 * @pheadroom: out parameter for number of allocatable pages according to memcg
Tejun Heoc2aa7232015-05-22 18:23:35 -04004102 * @pdirty: out parameter for number of dirty pages
4103 * @pwriteback: out parameter for number of pages under writeback
4104 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004105 * Determine the numbers of file, headroom, dirty, and writeback pages in
4106 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4107 * is a bit more involved.
Tejun Heoc2aa7232015-05-22 18:23:35 -04004108 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004109 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4110 * headroom is calculated as the lowest headroom of itself and the
4111 * ancestors. Note that this doesn't consider the actual amount of
4112 * available memory in the system. The caller should further cap
4113 * *@pheadroom accordingly.
Tejun Heoc2aa7232015-05-22 18:23:35 -04004114 */
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004115void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4116 unsigned long *pheadroom, unsigned long *pdirty,
4117 unsigned long *pwriteback)
Tejun Heoc2aa7232015-05-22 18:23:35 -04004118{
4119 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4120 struct mem_cgroup *parent;
Tejun Heoc2aa7232015-05-22 18:23:35 -04004121
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004122 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
Tejun Heoc2aa7232015-05-22 18:23:35 -04004123
4124 /* this should eventually include NR_UNSTABLE_NFS */
Greg Thelen0b3d6e62019-04-05 18:39:18 -07004125 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
Johannes Weiner21d89d12019-05-13 17:18:08 -07004126 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4127 memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004128 *pheadroom = PAGE_COUNTER_MAX;
Tejun Heoc2aa7232015-05-22 18:23:35 -04004129
Tejun Heoc2aa7232015-05-22 18:23:35 -04004130 while ((parent = parent_mem_cgroup(memcg))) {
Roman Gushchinbbec2e12018-06-07 17:06:18 -07004131 unsigned long ceiling = min(memcg->memory.max, memcg->high);
Tejun Heoc2aa7232015-05-22 18:23:35 -04004132 unsigned long used = page_counter_read(&memcg->memory);
4133
Tejun Heoc5edf9c2015-09-29 13:04:26 -04004134 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
Tejun Heoc2aa7232015-05-22 18:23:35 -04004135 memcg = parent;
4136 }
Tejun Heoc2aa7232015-05-22 18:23:35 -04004137}
4138
Tejun Heo841710a2015-05-22 18:23:33 -04004139#else /* CONFIG_CGROUP_WRITEBACK */
4140
4141static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4142{
4143 return 0;
4144}
4145
4146static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4147{
4148}
4149
Tejun Heo2529bb32015-05-22 18:23:34 -04004150static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4151{
4152}
4153
Tejun Heo52ebea72015-05-22 17:13:37 -04004154#endif /* CONFIG_CGROUP_WRITEBACK */
4155
Tejun Heo79bd9812013-11-22 18:20:42 -05004156/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004157 * DO NOT USE IN NEW FILES.
4158 *
4159 * "cgroup.event_control" implementation.
4160 *
4161 * This is way over-engineered. It tries to support fully configurable
4162 * events for each user. Such level of flexibility is completely
4163 * unnecessary especially in the light of the planned unified hierarchy.
4164 *
4165 * Please deprecate this and replace with something simpler if at all
4166 * possible.
4167 */
4168
4169/*
Tejun Heo79bd9812013-11-22 18:20:42 -05004170 * Unregister event and free resources.
4171 *
4172 * Gets called from workqueue.
4173 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05004174static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05004175{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004176 struct mem_cgroup_event *event =
4177 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05004178 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004179
4180 remove_wait_queue(event->wqh, &event->wait);
4181
Tejun Heo59b6f872013-11-22 18:20:43 -05004182 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05004183
4184 /* Notify userspace the event is going away. */
4185 eventfd_signal(event->eventfd, 1);
4186
4187 eventfd_ctx_put(event->eventfd);
4188 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05004189 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004190}
4191
4192/*
Linus Torvaldsa9a08842018-02-11 14:34:03 -08004193 * Gets called on EPOLLHUP on eventfd when user closes it.
Tejun Heo79bd9812013-11-22 18:20:42 -05004194 *
4195 * Called with wqh->lock held and interrupts disabled.
4196 */
Ingo Molnarac6424b2017-06-20 12:06:13 +02004197static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
Tejun Heo3bc942f2013-11-22 18:20:44 -05004198 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05004199{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004200 struct mem_cgroup_event *event =
4201 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05004202 struct mem_cgroup *memcg = event->memcg;
Al Viro3ad6f932017-07-03 20:14:56 -04004203 __poll_t flags = key_to_poll(key);
Tejun Heo79bd9812013-11-22 18:20:42 -05004204
Linus Torvaldsa9a08842018-02-11 14:34:03 -08004205 if (flags & EPOLLHUP) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004206 /*
4207 * If the event has been detached at cgroup removal, we
4208 * can simply return knowing the other side will cleanup
4209 * for us.
4210 *
4211 * We can't race against event freeing since the other
4212 * side will require wqh->lock via remove_wait_queue(),
4213 * which we hold.
4214 */
Tejun Heofba94802013-11-22 18:20:43 -05004215 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004216 if (!list_empty(&event->list)) {
4217 list_del_init(&event->list);
4218 /*
4219 * We are in atomic context, but cgroup_event_remove()
4220 * may sleep, so we have to call it in workqueue.
4221 */
4222 schedule_work(&event->remove);
4223 }
Tejun Heofba94802013-11-22 18:20:43 -05004224 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004225 }
4226
4227 return 0;
4228}
4229
Tejun Heo3bc942f2013-11-22 18:20:44 -05004230static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05004231 wait_queue_head_t *wqh, poll_table *pt)
4232{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004233 struct mem_cgroup_event *event =
4234 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05004235
4236 event->wqh = wqh;
4237 add_wait_queue(wqh, &event->wait);
4238}
4239
4240/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004241 * DO NOT USE IN NEW FILES.
4242 *
Tejun Heo79bd9812013-11-22 18:20:42 -05004243 * Parse input and register new cgroup event handler.
4244 *
4245 * Input must be in format '<event_fd> <control_fd> <args>'.
4246 * Interpretation of args is defined by control file implementation.
4247 */
Tejun Heo451af502014-05-13 12:16:21 -04004248static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4249 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05004250{
Tejun Heo451af502014-05-13 12:16:21 -04004251 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05004252 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004253 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05004254 struct cgroup_subsys_state *cfile_css;
4255 unsigned int efd, cfd;
4256 struct fd efile;
4257 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05004258 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05004259 char *endp;
4260 int ret;
4261
Tejun Heo451af502014-05-13 12:16:21 -04004262 buf = strstrip(buf);
4263
4264 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004265 if (*endp != ' ')
4266 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004267 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004268
Tejun Heo451af502014-05-13 12:16:21 -04004269 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004270 if ((*endp != ' ') && (*endp != '\0'))
4271 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004272 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004273
4274 event = kzalloc(sizeof(*event), GFP_KERNEL);
4275 if (!event)
4276 return -ENOMEM;
4277
Tejun Heo59b6f872013-11-22 18:20:43 -05004278 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004279 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004280 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4281 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4282 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05004283
4284 efile = fdget(efd);
4285 if (!efile.file) {
4286 ret = -EBADF;
4287 goto out_kfree;
4288 }
4289
4290 event->eventfd = eventfd_ctx_fileget(efile.file);
4291 if (IS_ERR(event->eventfd)) {
4292 ret = PTR_ERR(event->eventfd);
4293 goto out_put_efile;
4294 }
4295
4296 cfile = fdget(cfd);
4297 if (!cfile.file) {
4298 ret = -EBADF;
4299 goto out_put_eventfd;
4300 }
4301
4302 /* the process need read permission on control file */
4303 /* AV: shouldn't we check that it's been opened for read instead? */
4304 ret = inode_permission(file_inode(cfile.file), MAY_READ);
4305 if (ret < 0)
4306 goto out_put_cfile;
4307
Tejun Heo79bd9812013-11-22 18:20:42 -05004308 /*
Tejun Heofba94802013-11-22 18:20:43 -05004309 * Determine the event callbacks and set them in @event. This used
4310 * to be done via struct cftype but cgroup core no longer knows
4311 * about these events. The following is crude but the whole thing
4312 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05004313 *
4314 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05004315 */
Al Virob5830432014-10-31 01:22:04 -04004316 name = cfile.file->f_path.dentry->d_name.name;
Tejun Heofba94802013-11-22 18:20:43 -05004317
4318 if (!strcmp(name, "memory.usage_in_bytes")) {
4319 event->register_event = mem_cgroup_usage_register_event;
4320 event->unregister_event = mem_cgroup_usage_unregister_event;
4321 } else if (!strcmp(name, "memory.oom_control")) {
4322 event->register_event = mem_cgroup_oom_register_event;
4323 event->unregister_event = mem_cgroup_oom_unregister_event;
4324 } else if (!strcmp(name, "memory.pressure_level")) {
4325 event->register_event = vmpressure_register_event;
4326 event->unregister_event = vmpressure_unregister_event;
4327 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05004328 event->register_event = memsw_cgroup_usage_register_event;
4329 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05004330 } else {
4331 ret = -EINVAL;
4332 goto out_put_cfile;
4333 }
4334
4335 /*
Tejun Heob5557c42013-11-22 18:20:42 -05004336 * Verify @cfile should belong to @css. Also, remaining events are
4337 * automatically removed on cgroup destruction but the removal is
4338 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05004339 */
Al Virob5830432014-10-31 01:22:04 -04004340 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
Tejun Heoec903c02014-05-13 12:11:01 -04004341 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05004342 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05004343 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05004344 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05004345 if (cfile_css != css) {
4346 css_put(cfile_css);
4347 goto out_put_cfile;
4348 }
Tejun Heo79bd9812013-11-22 18:20:42 -05004349
Tejun Heo451af502014-05-13 12:16:21 -04004350 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05004351 if (ret)
4352 goto out_put_css;
4353
Christoph Hellwig9965ed172018-03-05 07:26:05 -08004354 vfs_poll(efile.file, &event->pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05004355
Tejun Heofba94802013-11-22 18:20:43 -05004356 spin_lock(&memcg->event_list_lock);
4357 list_add(&event->list, &memcg->event_list);
4358 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004359
4360 fdput(cfile);
4361 fdput(efile);
4362
Tejun Heo451af502014-05-13 12:16:21 -04004363 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05004364
4365out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05004366 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004367out_put_cfile:
4368 fdput(cfile);
4369out_put_eventfd:
4370 eventfd_ctx_put(event->eventfd);
4371out_put_efile:
4372 fdput(efile);
4373out_kfree:
4374 kfree(event);
4375
4376 return ret;
4377}
4378
Johannes Weiner241994ed2015-02-11 15:26:06 -08004379static struct cftype mem_cgroup_legacy_files[] = {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004380 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004381 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004382 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004383 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004384 },
4385 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004386 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004387 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004388 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004389 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004390 },
4391 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004392 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004393 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004394 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004395 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004396 },
4397 {
Balbir Singh296c81d2009-09-23 15:56:36 -07004398 .name = "soft_limit_in_bytes",
4399 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004400 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004401 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07004402 },
4403 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004404 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004405 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004406 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004407 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004408 },
Balbir Singh8697d332008-02-07 00:13:59 -08004409 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004410 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004411 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004412 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004413 {
4414 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04004415 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004416 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08004417 {
4418 .name = "use_hierarchy",
4419 .write_u64 = mem_cgroup_hierarchy_write,
4420 .read_u64 = mem_cgroup_hierarchy_read,
4421 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004422 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05004423 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04004424 .write = memcg_write_event_control,
Tejun Heo7dbdb192015-09-18 17:54:23 -04004425 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
Tejun Heo79bd9812013-11-22 18:20:42 -05004426 },
4427 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004428 .name = "swappiness",
4429 .read_u64 = mem_cgroup_swappiness_read,
4430 .write_u64 = mem_cgroup_swappiness_write,
4431 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004432 {
4433 .name = "move_charge_at_immigrate",
4434 .read_u64 = mem_cgroup_move_charge_read,
4435 .write_u64 = mem_cgroup_move_charge_write,
4436 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004437 {
4438 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004439 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004440 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004441 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4442 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004443 {
4444 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004445 },
Ying Han406eb0c2011-05-26 16:25:37 -07004446#ifdef CONFIG_NUMA
4447 {
4448 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004449 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07004450 },
4451#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08004452 {
4453 .name = "kmem.limit_in_bytes",
4454 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004455 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004456 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004457 },
4458 {
4459 .name = "kmem.usage_in_bytes",
4460 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004461 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004462 },
4463 {
4464 .name = "kmem.failcnt",
4465 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004466 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004467 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004468 },
4469 {
4470 .name = "kmem.max_usage_in_bytes",
4471 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004472 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004473 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004474 },
Yang Shi5b365772017-11-15 17:32:03 -08004475#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
Glauber Costa749c5412012-12-18 14:23:01 -08004476 {
4477 .name = "kmem.slabinfo",
Tejun Heobc2791f2017-02-22 15:41:21 -08004478 .seq_start = memcg_slab_start,
4479 .seq_next = memcg_slab_next,
4480 .seq_stop = memcg_slab_stop,
Vladimir Davydovb0475012014-12-10 15:44:19 -08004481 .seq_show = memcg_slab_show,
Glauber Costa749c5412012-12-18 14:23:01 -08004482 },
4483#endif
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08004484 {
4485 .name = "kmem.tcp.limit_in_bytes",
4486 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4487 .write = mem_cgroup_write,
4488 .read_u64 = mem_cgroup_read_u64,
4489 },
4490 {
4491 .name = "kmem.tcp.usage_in_bytes",
4492 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4493 .read_u64 = mem_cgroup_read_u64,
4494 },
4495 {
4496 .name = "kmem.tcp.failcnt",
4497 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4498 .write = mem_cgroup_reset,
4499 .read_u64 = mem_cgroup_read_u64,
4500 },
4501 {
4502 .name = "kmem.tcp.max_usage_in_bytes",
4503 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4504 .write = mem_cgroup_reset,
4505 .read_u64 = mem_cgroup_read_u64,
4506 },
Tejun Heo6bc10342012-04-01 12:09:55 -07004507 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07004508};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004509
Johannes Weiner73f576c2016-07-20 15:44:57 -07004510/*
4511 * Private memory cgroup IDR
4512 *
4513 * Swap-out records and page cache shadow entries need to store memcg
4514 * references in constrained space, so we maintain an ID space that is
4515 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4516 * memory-controlled cgroups to 64k.
4517 *
4518 * However, there usually are many references to the oflline CSS after
4519 * the cgroup has been destroyed, such as page cache or reclaimable
4520 * slab objects, that don't need to hang on to the ID. We want to keep
4521 * those dead CSS from occupying IDs, or we might quickly exhaust the
4522 * relatively small ID space and prevent the creation of new cgroups
4523 * even when there are much fewer than 64k cgroups - possibly none.
4524 *
4525 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4526 * be freed and recycled when it's no longer needed, which is usually
4527 * when the CSS is offlined.
4528 *
4529 * The only exception to that are records of swapped out tmpfs/shmem
4530 * pages that need to be attributed to live ancestors on swapin. But
4531 * those references are manageable from userspace.
4532 */
4533
4534static DEFINE_IDR(mem_cgroup_idr);
4535
Kirill Tkhai7e97de02018-08-02 15:36:01 -07004536static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4537{
4538 if (memcg->id.id > 0) {
4539 idr_remove(&mem_cgroup_idr, memcg->id.id);
4540 memcg->id.id = 0;
4541 }
4542}
4543
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004544static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
Johannes Weiner73f576c2016-07-20 15:44:57 -07004545{
Kirill Tkhai1c2d4792018-10-26 15:09:28 -07004546 refcount_add(n, &memcg->id.ref);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004547}
4548
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004549static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
Johannes Weiner73f576c2016-07-20 15:44:57 -07004550{
Kirill Tkhai1c2d4792018-10-26 15:09:28 -07004551 if (refcount_sub_and_test(n, &memcg->id.ref)) {
Kirill Tkhai7e97de02018-08-02 15:36:01 -07004552 mem_cgroup_id_remove(memcg);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004553
4554 /* Memcg ID pins CSS */
4555 css_put(&memcg->css);
4556 }
4557}
4558
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004559static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4560{
4561 mem_cgroup_id_get_many(memcg, 1);
4562}
4563
4564static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4565{
4566 mem_cgroup_id_put_many(memcg, 1);
4567}
4568
Johannes Weiner73f576c2016-07-20 15:44:57 -07004569/**
4570 * mem_cgroup_from_id - look up a memcg from a memcg id
4571 * @id: the memcg id to look up
4572 *
4573 * Caller must hold rcu_read_lock().
4574 */
4575struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4576{
4577 WARN_ON_ONCE(!rcu_read_lock_held());
4578 return idr_find(&mem_cgroup_idr, id);
4579}
4580
Mel Gormanef8f2322016-07-28 15:46:05 -07004581static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004582{
4583 struct mem_cgroup_per_node *pn;
Mel Gormanef8f2322016-07-28 15:46:05 -07004584 int tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004585 /*
4586 * This routine is called against possible nodes.
4587 * But it's BUG to call kmalloc() against offline node.
4588 *
4589 * TODO: this routine can waste much memory for nodes which will
4590 * never be onlined. It's better to use memory hotplug callback
4591 * function.
4592 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004593 if (!node_state(node, N_NORMAL_MEMORY))
4594 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004595 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004596 if (!pn)
4597 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004598
Johannes Weiner815744d2019-06-13 15:55:46 -07004599 pn->lruvec_stat_local = alloc_percpu(struct lruvec_stat);
4600 if (!pn->lruvec_stat_local) {
4601 kfree(pn);
4602 return 1;
4603 }
4604
Johannes Weinera983b5e2018-01-31 16:16:45 -08004605 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4606 if (!pn->lruvec_stat_cpu) {
Johannes Weiner815744d2019-06-13 15:55:46 -07004607 free_percpu(pn->lruvec_stat_local);
Johannes Weiner00f3ca22017-07-06 15:40:52 -07004608 kfree(pn);
4609 return 1;
4610 }
4611
Mel Gormanef8f2322016-07-28 15:46:05 -07004612 lruvec_init(&pn->lruvec);
4613 pn->usage_in_excess = 0;
4614 pn->on_tree = false;
4615 pn->memcg = memcg;
4616
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004617 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004618 return 0;
4619}
4620
Mel Gormanef8f2322016-07-28 15:46:05 -07004621static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004622{
Johannes Weiner00f3ca22017-07-06 15:40:52 -07004623 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4624
Michal Hocko4eaf4312018-04-10 16:29:52 -07004625 if (!pn)
4626 return;
4627
Johannes Weinera983b5e2018-01-31 16:16:45 -08004628 free_percpu(pn->lruvec_stat_cpu);
Johannes Weiner815744d2019-06-13 15:55:46 -07004629 free_percpu(pn->lruvec_stat_local);
Johannes Weiner00f3ca22017-07-06 15:40:52 -07004630 kfree(pn);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004631}
4632
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08004633static void __mem_cgroup_free(struct mem_cgroup *memcg)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004634{
4635 int node;
4636
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004637 for_each_node(node)
Mel Gormanef8f2322016-07-28 15:46:05 -07004638 free_mem_cgroup_per_node_info(memcg, node);
Chris Down871789d2019-05-14 15:46:57 -07004639 free_percpu(memcg->vmstats_percpu);
Johannes Weiner815744d2019-06-13 15:55:46 -07004640 free_percpu(memcg->vmstats_local);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004641 kfree(memcg);
4642}
4643
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08004644static void mem_cgroup_free(struct mem_cgroup *memcg)
4645{
4646 memcg_wb_domain_exit(memcg);
4647 __mem_cgroup_free(memcg);
4648}
4649
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004650static struct mem_cgroup *mem_cgroup_alloc(void)
4651{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004652 struct mem_cgroup *memcg;
Alexey Dobriyanb9726c22019-03-05 15:48:26 -08004653 unsigned int size;
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004654 int node;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004655
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004656 size = sizeof(struct mem_cgroup);
4657 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004658
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004659 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004660 if (!memcg)
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004661 return NULL;
4662
Johannes Weiner73f576c2016-07-20 15:44:57 -07004663 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4664 1, MEM_CGROUP_ID_MAX,
4665 GFP_KERNEL);
4666 if (memcg->id.id < 0)
4667 goto fail;
4668
Johannes Weiner815744d2019-06-13 15:55:46 -07004669 memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
4670 if (!memcg->vmstats_local)
4671 goto fail;
4672
Chris Down871789d2019-05-14 15:46:57 -07004673 memcg->vmstats_percpu = alloc_percpu(struct memcg_vmstats_percpu);
4674 if (!memcg->vmstats_percpu)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004675 goto fail;
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004676
Bob Liu3ed28fa2012-01-12 17:19:04 -08004677 for_each_node(node)
Mel Gormanef8f2322016-07-28 15:46:05 -07004678 if (alloc_mem_cgroup_per_node_info(memcg, node))
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004679 goto fail;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004680
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004681 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4682 goto fail;
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004683
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004684 INIT_WORK(&memcg->high_work, high_work_func);
Glauber Costad142e3e2013-02-22 16:34:52 -08004685 memcg->last_scanned_node = MAX_NUMNODES;
4686 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08004687 mutex_init(&memcg->thresholds_lock);
4688 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004689 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05004690 INIT_LIST_HEAD(&memcg->event_list);
4691 spin_lock_init(&memcg->event_list_lock);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08004692 memcg->socket_pressure = jiffies;
Kirill Tkhai84c07d12018-08-17 15:47:25 -07004693#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004694 memcg->kmemcg_id = -1;
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004695#endif
Tejun Heo52ebea72015-05-22 17:13:37 -04004696#ifdef CONFIG_CGROUP_WRITEBACK
4697 INIT_LIST_HEAD(&memcg->cgwb_list);
4698#endif
Johannes Weiner73f576c2016-07-20 15:44:57 -07004699 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004700 return memcg;
4701fail:
Kirill Tkhai7e97de02018-08-02 15:36:01 -07004702 mem_cgroup_id_remove(memcg);
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08004703 __mem_cgroup_free(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004704 return NULL;
Glauber Costad142e3e2013-02-22 16:34:52 -08004705}
4706
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004707static struct cgroup_subsys_state * __ref
4708mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Glauber Costad142e3e2013-02-22 16:34:52 -08004709{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004710 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4711 struct mem_cgroup *memcg;
4712 long error = -ENOMEM;
Glauber Costad142e3e2013-02-22 16:34:52 -08004713
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004714 memcg = mem_cgroup_alloc();
4715 if (!memcg)
4716 return ERR_PTR(error);
Li Zefan4219b2d2013-09-23 16:56:29 +08004717
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004718 memcg->high = PAGE_COUNTER_MAX;
4719 memcg->soft_limit = PAGE_COUNTER_MAX;
4720 if (parent) {
4721 memcg->swappiness = mem_cgroup_swappiness(parent);
4722 memcg->oom_kill_disable = parent->oom_kill_disable;
4723 }
4724 if (parent && parent->use_hierarchy) {
4725 memcg->use_hierarchy = true;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004726 page_counter_init(&memcg->memory, &parent->memory);
Vladimir Davydov37e84352016-01-20 15:02:56 -08004727 page_counter_init(&memcg->swap, &parent->swap);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004728 page_counter_init(&memcg->memsw, &parent->memsw);
4729 page_counter_init(&memcg->kmem, &parent->kmem);
Johannes Weiner0db15292016-01-20 15:02:50 -08004730 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004731 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004732 page_counter_init(&memcg->memory, NULL);
Vladimir Davydov37e84352016-01-20 15:02:56 -08004733 page_counter_init(&memcg->swap, NULL);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004734 page_counter_init(&memcg->memsw, NULL);
4735 page_counter_init(&memcg->kmem, NULL);
Johannes Weiner0db15292016-01-20 15:02:50 -08004736 page_counter_init(&memcg->tcpmem, NULL);
Tejun Heo8c7f6ed2012-09-13 12:20:58 -07004737 /*
4738 * Deeper hierachy with use_hierarchy == false doesn't make
4739 * much sense so let cgroup subsystem know about this
4740 * unfortunate state in our controller.
4741 */
Glauber Costad142e3e2013-02-22 16:34:52 -08004742 if (parent != root_mem_cgroup)
Tejun Heo073219e2014-02-08 10:36:58 -05004743 memory_cgrp_subsys.broken_hierarchy = true;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004744 }
Vladimir Davydovd6441632014-01-23 15:53:09 -08004745
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004746 /* The following stuff does not apply to the root */
4747 if (!parent) {
4748 root_mem_cgroup = memcg;
4749 return &memcg->css;
4750 }
4751
Vladimir Davydovb313aee2016-03-17 14:18:27 -07004752 error = memcg_online_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004753 if (error)
4754 goto fail;
Johannes Weiner127424c2016-01-20 15:02:32 -08004755
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004756 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08004757 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004758
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004759 return &memcg->css;
4760fail:
Kirill Tkhai7e97de02018-08-02 15:36:01 -07004761 mem_cgroup_id_remove(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004762 mem_cgroup_free(memcg);
Tejun Heoea3a9642016-06-24 14:49:58 -07004763 return ERR_PTR(-ENOMEM);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004764}
4765
Johannes Weiner73f576c2016-07-20 15:44:57 -07004766static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004767{
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004768 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4769
Kirill Tkhai0a4465d2018-08-17 15:47:37 -07004770 /*
4771 * A memcg must be visible for memcg_expand_shrinker_maps()
4772 * by the time the maps are allocated. So, we allocate maps
4773 * here, when for_each_mem_cgroup() can't skip it.
4774 */
4775 if (memcg_alloc_shrinker_maps(memcg)) {
4776 mem_cgroup_id_remove(memcg);
4777 return -ENOMEM;
4778 }
4779
Johannes Weiner73f576c2016-07-20 15:44:57 -07004780 /* Online state pins memcg ID, memcg ID pins CSS */
Kirill Tkhai1c2d4792018-10-26 15:09:28 -07004781 refcount_set(&memcg->id.ref, 1);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004782 css_get(css);
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004783 return 0;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004784}
4785
Tejun Heoeb954192013-08-08 20:11:23 -04004786static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004787{
Tejun Heoeb954192013-08-08 20:11:23 -04004788 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004789 struct mem_cgroup_event *event, *tmp;
Tejun Heo79bd9812013-11-22 18:20:42 -05004790
4791 /*
4792 * Unregister events and notify userspace.
4793 * Notify userspace about cgroup removing only after rmdir of cgroup
4794 * directory to avoid race between userspace and kernelspace.
4795 */
Tejun Heofba94802013-11-22 18:20:43 -05004796 spin_lock(&memcg->event_list_lock);
4797 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004798 list_del_init(&event->list);
4799 schedule_work(&event->remove);
4800 }
Tejun Heofba94802013-11-22 18:20:43 -05004801 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004802
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07004803 page_counter_set_min(&memcg->memory, 0);
Roman Gushchin23067152018-06-07 17:06:22 -07004804 page_counter_set_low(&memcg->memory, 0);
Roman Gushchin63677c742017-09-06 16:21:47 -07004805
Johannes Weiner567e9ab2016-01-20 15:02:24 -08004806 memcg_offline_kmem(memcg);
Tejun Heo52ebea72015-05-22 17:13:37 -04004807 wb_memcg_offline(memcg);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004808
Roman Gushchin591edfb2018-10-26 15:03:23 -07004809 drain_all_stock(memcg);
4810
Johannes Weiner73f576c2016-07-20 15:44:57 -07004811 mem_cgroup_id_put(memcg);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004812}
4813
Vladimir Davydov6df38682015-12-29 14:54:10 -08004814static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4815{
4816 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4817
4818 invalidate_reclaim_iterators(memcg);
4819}
4820
Tejun Heoeb954192013-08-08 20:11:23 -04004821static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004822{
Tejun Heoeb954192013-08-08 20:11:23 -04004823 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004824
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004825 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08004826 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08004827
Johannes Weiner0db15292016-01-20 15:02:50 -08004828 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08004829 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08004830
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004831 vmpressure_cleanup(&memcg->vmpressure);
4832 cancel_work_sync(&memcg->high_work);
4833 mem_cgroup_remove_from_trees(memcg);
Kirill Tkhai0a4465d2018-08-17 15:47:37 -07004834 memcg_free_shrinker_maps(memcg);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08004835 memcg_free_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004836 mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004837}
4838
Tejun Heo1ced9532014-07-08 18:02:57 -04004839/**
4840 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4841 * @css: the target css
4842 *
4843 * Reset the states of the mem_cgroup associated with @css. This is
4844 * invoked when the userland requests disabling on the default hierarchy
4845 * but the memcg is pinned through dependency. The memcg should stop
4846 * applying policies and should revert to the vanilla state as it may be
4847 * made visible again.
4848 *
4849 * The current implementation only resets the essential configurations.
4850 * This needs to be expanded to cover all the visible parts.
4851 */
4852static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4853{
4854 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4855
Roman Gushchinbbec2e12018-06-07 17:06:18 -07004856 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
4857 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
4858 page_counter_set_max(&memcg->memsw, PAGE_COUNTER_MAX);
4859 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
4860 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07004861 page_counter_set_min(&memcg->memory, 0);
Roman Gushchin23067152018-06-07 17:06:22 -07004862 page_counter_set_low(&memcg->memory, 0);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004863 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004864 memcg->soft_limit = PAGE_COUNTER_MAX;
Tejun Heo2529bb32015-05-22 18:23:34 -04004865 memcg_wb_domain_size_changed(memcg);
Tejun Heo1ced9532014-07-08 18:02:57 -04004866}
4867
Daisuke Nishimura02491442010-03-10 15:22:17 -08004868#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004869/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004870static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004871{
Johannes Weiner05b84302014-08-06 16:05:59 -07004872 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07004873
Mel Gormand0164ad2015-11-06 16:28:21 -08004874 /* Try a single bulk charge without reclaim first, kswapd may wake */
4875 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
Johannes Weiner9476db92014-08-06 16:05:55 -07004876 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004877 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004878 return ret;
4879 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004880
David Rientjes36745342017-01-24 15:18:10 -08004881 /* Try charges one by one with reclaim, but do not retry */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004882 while (count--) {
David Rientjes36745342017-01-24 15:18:10 -08004883 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004884 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004885 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004886 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07004887 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004888 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004889 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004890}
4891
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004892union mc_target {
4893 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004894 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004895};
4896
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004897enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004898 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004899 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08004900 MC_TARGET_SWAP,
Jérôme Glissec733a822017-09-08 16:11:54 -07004901 MC_TARGET_DEVICE,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004902};
4903
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004904static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4905 unsigned long addr, pte_t ptent)
4906{
Jérôme Glissec733a822017-09-08 16:11:54 -07004907 struct page *page = _vm_normal_page(vma, addr, ptent, true);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004908
4909 if (!page || !page_mapped(page))
4910 return NULL;
4911 if (PageAnon(page)) {
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004912 if (!(mc.flags & MOVE_ANON))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004913 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004914 } else {
4915 if (!(mc.flags & MOVE_FILE))
4916 return NULL;
4917 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004918 if (!get_page_unless_zero(page))
4919 return NULL;
4920
4921 return page;
4922}
4923
Jérôme Glissec733a822017-09-08 16:11:54 -07004924#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004925static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
Li RongQing48406ef2016-07-26 15:22:14 -07004926 pte_t ptent, swp_entry_t *entry)
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004927{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004928 struct page *page = NULL;
4929 swp_entry_t ent = pte_to_swp_entry(ptent);
4930
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004931 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004932 return NULL;
Jérôme Glissec733a822017-09-08 16:11:54 -07004933
4934 /*
4935 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
4936 * a device and because they are not accessible by CPU they are store
4937 * as special swap entry in the CPU page table.
4938 */
4939 if (is_device_private_entry(ent)) {
4940 page = device_private_entry_to_page(ent);
4941 /*
4942 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
4943 * a refcount of 1 when free (unlike normal page)
4944 */
4945 if (!page_ref_add_unless(page, 1, 1))
4946 return NULL;
4947 return page;
4948 }
4949
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004950 /*
4951 * Because lookup_swap_cache() updates some statistics counter,
4952 * we call find_get_page() with swapper_space directly.
4953 */
Huang Yingf6ab1f72016-10-07 17:00:21 -07004954 page = find_get_page(swap_address_space(ent), swp_offset(ent));
Johannes Weiner7941d212016-01-14 15:21:23 -08004955 if (do_memsw_account())
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004956 entry->val = ent.val;
4957
4958 return page;
4959}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004960#else
4961static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
Li RongQing48406ef2016-07-26 15:22:14 -07004962 pte_t ptent, swp_entry_t *entry)
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004963{
4964 return NULL;
4965}
4966#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004967
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004968static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4969 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4970{
4971 struct page *page = NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004972 struct address_space *mapping;
4973 pgoff_t pgoff;
4974
4975 if (!vma->vm_file) /* anonymous vma */
4976 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004977 if (!(mc.flags & MOVE_FILE))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004978 return NULL;
4979
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004980 mapping = vma->vm_file->f_mapping;
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004981 pgoff = linear_page_index(vma, addr);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004982
4983 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004984#ifdef CONFIG_SWAP
4985 /* shmem/tmpfs may report page out on swap: account for that too. */
Johannes Weiner139b6a62014-05-06 12:50:05 -07004986 if (shmem_mapping(mapping)) {
4987 page = find_get_entry(mapping, pgoff);
Matthew Wilcox3159f942017-11-03 13:30:42 -04004988 if (xa_is_value(page)) {
Johannes Weiner139b6a62014-05-06 12:50:05 -07004989 swp_entry_t swp = radix_to_swp_entry(page);
Johannes Weiner7941d212016-01-14 15:21:23 -08004990 if (do_memsw_account())
Johannes Weiner139b6a62014-05-06 12:50:05 -07004991 *entry = swp;
Huang Yingf6ab1f72016-10-07 17:00:21 -07004992 page = find_get_page(swap_address_space(swp),
4993 swp_offset(swp));
Johannes Weiner139b6a62014-05-06 12:50:05 -07004994 }
4995 } else
4996 page = find_get_page(mapping, pgoff);
4997#else
4998 page = find_get_page(mapping, pgoff);
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004999#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005000 return page;
5001}
5002
Chen Gangb1b0dea2015-04-14 15:47:35 -07005003/**
5004 * mem_cgroup_move_account - move account of the page
5005 * @page: the page
Li RongQing25843c22016-07-26 15:26:56 -07005006 * @compound: charge the page as compound or small page
Chen Gangb1b0dea2015-04-14 15:47:35 -07005007 * @from: mem_cgroup which the page is moved from.
5008 * @to: mem_cgroup which the page is moved to. @from != @to.
5009 *
Kirill A. Shutemov3ac808f2016-01-15 16:53:07 -08005010 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
Chen Gangb1b0dea2015-04-14 15:47:35 -07005011 *
5012 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5013 * from old cgroup.
5014 */
5015static int mem_cgroup_move_account(struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005016 bool compound,
Chen Gangb1b0dea2015-04-14 15:47:35 -07005017 struct mem_cgroup *from,
5018 struct mem_cgroup *to)
5019{
5020 unsigned long flags;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005021 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Chen Gangb1b0dea2015-04-14 15:47:35 -07005022 int ret;
Greg Thelenc4843a72015-05-22 17:13:16 -04005023 bool anon;
Chen Gangb1b0dea2015-04-14 15:47:35 -07005024
5025 VM_BUG_ON(from == to);
5026 VM_BUG_ON_PAGE(PageLRU(page), page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005027 VM_BUG_ON(compound && !PageTransHuge(page));
Chen Gangb1b0dea2015-04-14 15:47:35 -07005028
5029 /*
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005030 * Prevent mem_cgroup_migrate() from looking at
Hugh Dickins45637ba2015-11-05 18:49:40 -08005031 * page->mem_cgroup of its source page while we change it.
Chen Gangb1b0dea2015-04-14 15:47:35 -07005032 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005033 ret = -EBUSY;
Chen Gangb1b0dea2015-04-14 15:47:35 -07005034 if (!trylock_page(page))
5035 goto out;
5036
5037 ret = -EINVAL;
5038 if (page->mem_cgroup != from)
5039 goto out_unlock;
5040
Greg Thelenc4843a72015-05-22 17:13:16 -04005041 anon = PageAnon(page);
5042
Chen Gangb1b0dea2015-04-14 15:47:35 -07005043 spin_lock_irqsave(&from->move_lock, flags);
5044
Greg Thelenc4843a72015-05-22 17:13:16 -04005045 if (!anon && page_mapped(page)) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08005046 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
5047 __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005048 }
5049
Greg Thelenc4843a72015-05-22 17:13:16 -04005050 /*
5051 * move_lock grabbed above and caller set from->moving_account, so
Johannes Weinerccda7f42017-05-03 14:55:16 -07005052 * mod_memcg_page_state will serialize updates to PageDirty.
Greg Thelenc4843a72015-05-22 17:13:16 -04005053 * So mapping should be stable for dirty pages.
5054 */
5055 if (!anon && PageDirty(page)) {
5056 struct address_space *mapping = page_mapping(page);
5057
5058 if (mapping_cap_account_dirty(mapping)) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08005059 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
5060 __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
Greg Thelenc4843a72015-05-22 17:13:16 -04005061 }
5062 }
5063
Chen Gangb1b0dea2015-04-14 15:47:35 -07005064 if (PageWriteback(page)) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08005065 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
5066 __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005067 }
5068
5069 /*
5070 * It is safe to change page->mem_cgroup here because the page
5071 * is referenced, charged, and isolated - we can't race with
5072 * uncharging, charging, migration, or LRU putback.
5073 */
5074
5075 /* caller should have done css_get */
5076 page->mem_cgroup = to;
5077 spin_unlock_irqrestore(&from->move_lock, flags);
5078
5079 ret = 0;
5080
5081 local_irq_disable();
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005082 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005083 memcg_check_events(to, page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005084 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07005085 memcg_check_events(from, page);
5086 local_irq_enable();
5087out_unlock:
5088 unlock_page(page);
5089out:
5090 return ret;
5091}
5092
Li RongQing7cf78062016-05-27 14:27:46 -07005093/**
5094 * get_mctgt_type - get target type of moving charge
5095 * @vma: the vma the pte to be checked belongs
5096 * @addr: the address corresponding to the pte to be checked
5097 * @ptent: the pte to be checked
5098 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5099 *
5100 * Returns
5101 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5102 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5103 * move charge. if @target is not NULL, the page is stored in target->page
5104 * with extra refcnt got(Callers should handle it).
5105 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5106 * target for charge migration. if @target is not NULL, the entry is stored
5107 * in target->ent.
Jérôme Glissedf6ad692017-09-08 16:12:24 -07005108 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PUBLIC
5109 * or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru).
5110 * For now we such page is charge like a regular page would be as for all
5111 * intent and purposes it is just special memory taking the place of a
5112 * regular page.
Jérôme Glissec733a822017-09-08 16:11:54 -07005113 *
5114 * See Documentations/vm/hmm.txt and include/linux/hmm.h
Li RongQing7cf78062016-05-27 14:27:46 -07005115 *
5116 * Called with pte lock held.
5117 */
5118
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005119static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005120 unsigned long addr, pte_t ptent, union mc_target *target)
5121{
Daisuke Nishimura02491442010-03-10 15:22:17 -08005122 struct page *page = NULL;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005123 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005124 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005125
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005126 if (pte_present(ptent))
5127 page = mc_handle_present_pte(vma, addr, ptent);
5128 else if (is_swap_pte(ptent))
Li RongQing48406ef2016-07-26 15:22:14 -07005129 page = mc_handle_swap_pte(vma, ptent, &ent);
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08005130 else if (pte_none(ptent))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07005131 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07005132
5133 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005134 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005135 if (page) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005136 /*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005137 * Do only loose check w/o serialization.
Johannes Weiner1306a852014-12-10 15:44:52 -08005138 * mem_cgroup_move_account() checks the page is valid or
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005139 * not under LRU exclusion.
Daisuke Nishimura02491442010-03-10 15:22:17 -08005140 */
Johannes Weiner1306a852014-12-10 15:44:52 -08005141 if (page->mem_cgroup == mc.from) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005142 ret = MC_TARGET_PAGE;
Jérôme Glissedf6ad692017-09-08 16:12:24 -07005143 if (is_device_private_page(page) ||
5144 is_device_public_page(page))
Jérôme Glissec733a822017-09-08 16:11:54 -07005145 ret = MC_TARGET_DEVICE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005146 if (target)
5147 target->page = page;
5148 }
5149 if (!ret || !target)
5150 put_page(page);
5151 }
Huang Ying3e14a572017-09-06 16:22:37 -07005152 /*
5153 * There is a swap entry and a page doesn't exist or isn't charged.
5154 * But we cannot move a tail-page in a THP.
5155 */
5156 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
Li Zefan34c00c32013-09-23 16:56:01 +08005157 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07005158 ret = MC_TARGET_SWAP;
5159 if (target)
5160 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005161 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005162 return ret;
5163}
5164
Naoya Horiguchi12724852012-03-21 16:34:28 -07005165#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5166/*
Huang Yingd6810d72017-09-06 16:22:45 -07005167 * We don't consider PMD mapped swapping or file mapped pages because THP does
5168 * not support them for now.
Naoya Horiguchi12724852012-03-21 16:34:28 -07005169 * Caller should make sure that pmd_trans_huge(pmd) is true.
5170 */
5171static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5172 unsigned long addr, pmd_t pmd, union mc_target *target)
5173{
5174 struct page *page = NULL;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005175 enum mc_target_type ret = MC_TARGET_NONE;
5176
Zi Yan84c3fc42017-09-08 16:11:01 -07005177 if (unlikely(is_swap_pmd(pmd))) {
5178 VM_BUG_ON(thp_migration_supported() &&
5179 !is_pmd_migration_entry(pmd));
5180 return ret;
5181 }
Naoya Horiguchi12724852012-03-21 16:34:28 -07005182 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08005183 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005184 if (!(mc.flags & MOVE_ANON))
Naoya Horiguchi12724852012-03-21 16:34:28 -07005185 return ret;
Johannes Weiner1306a852014-12-10 15:44:52 -08005186 if (page->mem_cgroup == mc.from) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005187 ret = MC_TARGET_PAGE;
5188 if (target) {
5189 get_page(page);
5190 target->page = page;
5191 }
5192 }
5193 return ret;
5194}
5195#else
5196static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5197 unsigned long addr, pmd_t pmd, union mc_target *target)
5198{
5199 return MC_TARGET_NONE;
5200}
5201#endif
5202
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005203static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5204 unsigned long addr, unsigned long end,
5205 struct mm_walk *walk)
5206{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005207 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005208 pte_t *pte;
5209 spinlock_t *ptl;
5210
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08005211 ptl = pmd_trans_huge_lock(pmd, vma);
5212 if (ptl) {
Jérôme Glissec733a822017-09-08 16:11:54 -07005213 /*
5214 * Note their can not be MC_TARGET_DEVICE for now as we do not
5215 * support transparent huge page with MEMORY_DEVICE_PUBLIC or
5216 * MEMORY_DEVICE_PRIVATE but this might change.
5217 */
Naoya Horiguchi12724852012-03-21 16:34:28 -07005218 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5219 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005220 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07005221 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005222 }
Dave Hansen03319322011-03-22 16:32:56 -07005223
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07005224 if (pmd_trans_unstable(pmd))
5225 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005226 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5227 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005228 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005229 mc.precharge++; /* increment precharge temporarily */
5230 pte_unmap_unlock(pte - 1, ptl);
5231 cond_resched();
5232
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005233 return 0;
5234}
5235
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005236static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5237{
5238 unsigned long precharge;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005239
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005240 struct mm_walk mem_cgroup_count_precharge_walk = {
5241 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5242 .mm = mm,
5243 };
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005244 down_read(&mm->mmap_sem);
James Morse0247f3f2016-10-07 17:00:12 -07005245 walk_page_range(0, mm->highest_vm_end,
5246 &mem_cgroup_count_precharge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005247 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005248
5249 precharge = mc.precharge;
5250 mc.precharge = 0;
5251
5252 return precharge;
5253}
5254
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005255static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5256{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005257 unsigned long precharge = mem_cgroup_count_precharge(mm);
5258
5259 VM_BUG_ON(mc.moving_task);
5260 mc.moving_task = current;
5261 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005262}
5263
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005264/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5265static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005266{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005267 struct mem_cgroup *from = mc.from;
5268 struct mem_cgroup *to = mc.to;
5269
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005270 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005271 if (mc.precharge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005272 cancel_charge(mc.to, mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005273 mc.precharge = 0;
5274 }
5275 /*
5276 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5277 * we must uncharge here.
5278 */
5279 if (mc.moved_charge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005280 cancel_charge(mc.from, mc.moved_charge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005281 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005282 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005283 /* we must fixup refcnts and charges */
5284 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005285 /* uncharge swap account from the old cgroup */
Johannes Weinerce00a962014-09-05 08:43:57 -04005286 if (!mem_cgroup_is_root(mc.from))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005287 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005288
Vladimir Davydov615d66c2016-08-11 15:33:03 -07005289 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5290
Johannes Weiner05b84302014-08-06 16:05:59 -07005291 /*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005292 * we charged both to->memory and to->memsw, so we
5293 * should uncharge to->memory.
Johannes Weiner05b84302014-08-06 16:05:59 -07005294 */
Johannes Weinerce00a962014-09-05 08:43:57 -04005295 if (!mem_cgroup_is_root(mc.to))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005296 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005297
Vladimir Davydov615d66c2016-08-11 15:33:03 -07005298 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
5299 css_put_many(&mc.to->css, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005300
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005301 mc.moved_swap = 0;
5302 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005303 memcg_oom_recover(from);
5304 memcg_oom_recover(to);
5305 wake_up_all(&mc.waitq);
5306}
5307
5308static void mem_cgroup_clear_mc(void)
5309{
Tejun Heo264a0ae2016-04-21 19:09:02 -04005310 struct mm_struct *mm = mc.mm;
5311
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005312 /*
5313 * we must clear moving_task before waking up waiters at the end of
5314 * task migration.
5315 */
5316 mc.moving_task = NULL;
5317 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005318 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005319 mc.from = NULL;
5320 mc.to = NULL;
Tejun Heo264a0ae2016-04-21 19:09:02 -04005321 mc.mm = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005322 spin_unlock(&mc.lock);
Tejun Heo264a0ae2016-04-21 19:09:02 -04005323
5324 mmput(mm);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005325}
5326
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005327static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005328{
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005329 struct cgroup_subsys_state *css;
Ross Zwislereed67d72015-12-23 14:53:27 -07005330 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
Tejun Heo9f2115f2015-09-08 15:01:10 -07005331 struct mem_cgroup *from;
Tejun Heo4530edd2015-09-11 15:00:19 -04005332 struct task_struct *leader, *p;
Tejun Heo9f2115f2015-09-08 15:01:10 -07005333 struct mm_struct *mm;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005334 unsigned long move_flags;
Tejun Heo9f2115f2015-09-08 15:01:10 -07005335 int ret = 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005336
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005337 /* charge immigration isn't supported on the default hierarchy */
5338 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heo9f2115f2015-09-08 15:01:10 -07005339 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005340
Tejun Heo4530edd2015-09-11 15:00:19 -04005341 /*
5342 * Multi-process migrations only happen on the default hierarchy
5343 * where charge immigration is not used. Perform charge
5344 * immigration if @tset contains a leader and whine if there are
5345 * multiple.
5346 */
5347 p = NULL;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005348 cgroup_taskset_for_each_leader(leader, css, tset) {
Tejun Heo4530edd2015-09-11 15:00:19 -04005349 WARN_ON_ONCE(p);
5350 p = leader;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005351 memcg = mem_cgroup_from_css(css);
Tejun Heo4530edd2015-09-11 15:00:19 -04005352 }
5353 if (!p)
5354 return 0;
5355
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005356 /*
5357 * We are now commited to this value whatever it is. Changes in this
5358 * tunable will only affect upcoming migrations, not the current one.
5359 * So we need to save it, and keep it going.
5360 */
5361 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5362 if (!move_flags)
5363 return 0;
5364
Tejun Heo9f2115f2015-09-08 15:01:10 -07005365 from = mem_cgroup_from_task(p);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005366
Tejun Heo9f2115f2015-09-08 15:01:10 -07005367 VM_BUG_ON(from == memcg);
Johannes Weiner247b1442014-12-10 15:44:11 -08005368
Tejun Heo9f2115f2015-09-08 15:01:10 -07005369 mm = get_task_mm(p);
5370 if (!mm)
5371 return 0;
5372 /* We move charges only when we move a owner of the mm */
5373 if (mm->owner == p) {
5374 VM_BUG_ON(mc.from);
5375 VM_BUG_ON(mc.to);
5376 VM_BUG_ON(mc.precharge);
5377 VM_BUG_ON(mc.moved_charge);
5378 VM_BUG_ON(mc.moved_swap);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005379
Tejun Heo9f2115f2015-09-08 15:01:10 -07005380 spin_lock(&mc.lock);
Tejun Heo264a0ae2016-04-21 19:09:02 -04005381 mc.mm = mm;
Tejun Heo9f2115f2015-09-08 15:01:10 -07005382 mc.from = from;
5383 mc.to = memcg;
5384 mc.flags = move_flags;
5385 spin_unlock(&mc.lock);
5386 /* We set mc.moving_task later */
5387
5388 ret = mem_cgroup_precharge_mc(mm);
5389 if (ret)
5390 mem_cgroup_clear_mc();
Tejun Heo264a0ae2016-04-21 19:09:02 -04005391 } else {
5392 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005393 }
5394 return ret;
5395}
5396
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005397static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005398{
Johannes Weiner4e2f2452014-12-10 15:44:08 -08005399 if (mc.to)
5400 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005401}
5402
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005403static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5404 unsigned long addr, unsigned long end,
5405 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005406{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005407 int ret = 0;
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005408 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005409 pte_t *pte;
5410 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005411 enum mc_target_type target_type;
5412 union mc_target target;
5413 struct page *page;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005414
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08005415 ptl = pmd_trans_huge_lock(pmd, vma);
5416 if (ptl) {
Hugh Dickins62ade862012-05-18 11:28:34 -07005417 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005418 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07005419 return 0;
5420 }
5421 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5422 if (target_type == MC_TARGET_PAGE) {
5423 page = target.page;
5424 if (!isolate_lru_page(page)) {
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005425 if (!mem_cgroup_move_account(page, true,
Johannes Weiner1306a852014-12-10 15:44:52 -08005426 mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005427 mc.precharge -= HPAGE_PMD_NR;
5428 mc.moved_charge += HPAGE_PMD_NR;
5429 }
5430 putback_lru_page(page);
5431 }
5432 put_page(page);
Jérôme Glissec733a822017-09-08 16:11:54 -07005433 } else if (target_type == MC_TARGET_DEVICE) {
5434 page = target.page;
5435 if (!mem_cgroup_move_account(page, true,
5436 mc.from, mc.to)) {
5437 mc.precharge -= HPAGE_PMD_NR;
5438 mc.moved_charge += HPAGE_PMD_NR;
5439 }
5440 put_page(page);
Naoya Horiguchi12724852012-03-21 16:34:28 -07005441 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005442 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07005443 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005444 }
5445
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07005446 if (pmd_trans_unstable(pmd))
5447 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005448retry:
5449 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5450 for (; addr != end; addr += PAGE_SIZE) {
5451 pte_t ptent = *(pte++);
Jérôme Glissec733a822017-09-08 16:11:54 -07005452 bool device = false;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005453 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005454
5455 if (!mc.precharge)
5456 break;
5457
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005458 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Jérôme Glissec733a822017-09-08 16:11:54 -07005459 case MC_TARGET_DEVICE:
5460 device = true;
5461 /* fall through */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005462 case MC_TARGET_PAGE:
5463 page = target.page;
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08005464 /*
5465 * We can have a part of the split pmd here. Moving it
5466 * can be done but it would be too convoluted so simply
5467 * ignore such a partial THP and keep it in original
5468 * memcg. There should be somebody mapping the head.
5469 */
5470 if (PageTransCompound(page))
5471 goto put;
Jérôme Glissec733a822017-09-08 16:11:54 -07005472 if (!device && isolate_lru_page(page))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005473 goto put;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005474 if (!mem_cgroup_move_account(page, false,
5475 mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005476 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005477 /* we uncharge from mc.from later. */
5478 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005479 }
Jérôme Glissec733a822017-09-08 16:11:54 -07005480 if (!device)
5481 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005482put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005483 put_page(page);
5484 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005485 case MC_TARGET_SWAP:
5486 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07005487 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005488 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005489 /* we fixup refcnts and charges later. */
5490 mc.moved_swap++;
5491 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08005492 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005493 default:
5494 break;
5495 }
5496 }
5497 pte_unmap_unlock(pte - 1, ptl);
5498 cond_resched();
5499
5500 if (addr != end) {
5501 /*
5502 * We have consumed all precharges we got in can_attach().
5503 * We try charge one by one, but don't do any additional
5504 * charges to mc.to if we have failed in charge once in attach()
5505 * phase.
5506 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005507 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005508 if (!ret)
5509 goto retry;
5510 }
5511
5512 return ret;
5513}
5514
Tejun Heo264a0ae2016-04-21 19:09:02 -04005515static void mem_cgroup_move_charge(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005516{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005517 struct mm_walk mem_cgroup_move_charge_walk = {
5518 .pmd_entry = mem_cgroup_move_charge_pte_range,
Tejun Heo264a0ae2016-04-21 19:09:02 -04005519 .mm = mc.mm,
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005520 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005521
5522 lru_add_drain_all();
Johannes Weiner312722c2014-12-10 15:44:25 -08005523 /*
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07005524 * Signal lock_page_memcg() to take the memcg's move_lock
5525 * while we're moving its pages to another memcg. Then wait
5526 * for already started RCU-only updates to finish.
Johannes Weiner312722c2014-12-10 15:44:25 -08005527 */
5528 atomic_inc(&mc.from->moving_account);
5529 synchronize_rcu();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005530retry:
Tejun Heo264a0ae2016-04-21 19:09:02 -04005531 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005532 /*
5533 * Someone who are holding the mmap_sem might be waiting in
5534 * waitq. So we cancel all extra charges, wake up all waiters,
5535 * and retry. Because we cancel precharges, we might not be able
5536 * to move enough charges, but moving charge is a best-effort
5537 * feature anyway, so it wouldn't be a big problem.
5538 */
5539 __mem_cgroup_clear_mc();
5540 cond_resched();
5541 goto retry;
5542 }
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005543 /*
5544 * When we have consumed all precharges and failed in doing
5545 * additional charge, the page walk just aborts.
5546 */
James Morse0247f3f2016-10-07 17:00:12 -07005547 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
5548
Tejun Heo264a0ae2016-04-21 19:09:02 -04005549 up_read(&mc.mm->mmap_sem);
Johannes Weiner312722c2014-12-10 15:44:25 -08005550 atomic_dec(&mc.from->moving_account);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005551}
5552
Tejun Heo264a0ae2016-04-21 19:09:02 -04005553static void mem_cgroup_move_task(void)
Balbir Singh67e465a2008-02-07 00:13:54 -08005554{
Tejun Heo264a0ae2016-04-21 19:09:02 -04005555 if (mc.to) {
5556 mem_cgroup_move_charge();
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005557 mem_cgroup_clear_mc();
Tejun Heo264a0ae2016-04-21 19:09:02 -04005558 }
Balbir Singh67e465a2008-02-07 00:13:54 -08005559}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005560#else /* !CONFIG_MMU */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005561static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005562{
5563 return 0;
5564}
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005565static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005566{
5567}
Tejun Heo264a0ae2016-04-21 19:09:02 -04005568static void mem_cgroup_move_task(void)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005569{
5570}
5571#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08005572
Tejun Heof00baae2013-04-15 13:41:15 -07005573/*
5574 * Cgroup retains root cgroups across [un]mount cycles making it necessary
Tejun Heoaa6ec292014-07-09 10:08:08 -04005575 * to verify whether we're attached to the default hierarchy on each mount
5576 * attempt.
Tejun Heof00baae2013-04-15 13:41:15 -07005577 */
Tejun Heoeb954192013-08-08 20:11:23 -04005578static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
Tejun Heof00baae2013-04-15 13:41:15 -07005579{
5580 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -04005581 * use_hierarchy is forced on the default hierarchy. cgroup core
Tejun Heof00baae2013-04-15 13:41:15 -07005582 * guarantees that @root doesn't have any children, so turning it
5583 * on for the root memcg is enough.
5584 */
Tejun Heo9e10a132015-09-18 11:56:28 -04005585 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Vladimir Davydov7feee5902015-03-12 16:26:19 -07005586 root_mem_cgroup->use_hierarchy = true;
5587 else
5588 root_mem_cgroup->use_hierarchy = false;
Tejun Heof00baae2013-04-15 13:41:15 -07005589}
5590
Chris Down677dc972019-03-05 15:45:55 -08005591static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
5592{
5593 if (value == PAGE_COUNTER_MAX)
5594 seq_puts(m, "max\n");
5595 else
5596 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
5597
5598 return 0;
5599}
5600
Johannes Weiner241994ed2015-02-11 15:26:06 -08005601static u64 memory_current_read(struct cgroup_subsys_state *css,
5602 struct cftype *cft)
5603{
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08005604 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5605
5606 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005607}
5608
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005609static int memory_min_show(struct seq_file *m, void *v)
5610{
Chris Down677dc972019-03-05 15:45:55 -08005611 return seq_puts_memcg_tunable(m,
5612 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005613}
5614
5615static ssize_t memory_min_write(struct kernfs_open_file *of,
5616 char *buf, size_t nbytes, loff_t off)
5617{
5618 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5619 unsigned long min;
5620 int err;
5621
5622 buf = strstrip(buf);
5623 err = page_counter_memparse(buf, "max", &min);
5624 if (err)
5625 return err;
5626
5627 page_counter_set_min(&memcg->memory, min);
5628
5629 return nbytes;
5630}
5631
Johannes Weiner241994ed2015-02-11 15:26:06 -08005632static int memory_low_show(struct seq_file *m, void *v)
5633{
Chris Down677dc972019-03-05 15:45:55 -08005634 return seq_puts_memcg_tunable(m,
5635 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
Johannes Weiner241994ed2015-02-11 15:26:06 -08005636}
5637
5638static ssize_t memory_low_write(struct kernfs_open_file *of,
5639 char *buf, size_t nbytes, loff_t off)
5640{
5641 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5642 unsigned long low;
5643 int err;
5644
5645 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005646 err = page_counter_memparse(buf, "max", &low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005647 if (err)
5648 return err;
5649
Roman Gushchin23067152018-06-07 17:06:22 -07005650 page_counter_set_low(&memcg->memory, low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005651
5652 return nbytes;
5653}
5654
5655static int memory_high_show(struct seq_file *m, void *v)
5656{
Chris Down677dc972019-03-05 15:45:55 -08005657 return seq_puts_memcg_tunable(m, READ_ONCE(mem_cgroup_from_seq(m)->high));
Johannes Weiner241994ed2015-02-11 15:26:06 -08005658}
5659
5660static ssize_t memory_high_write(struct kernfs_open_file *of,
5661 char *buf, size_t nbytes, loff_t off)
5662{
5663 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner588083b2016-03-17 14:20:25 -07005664 unsigned long nr_pages;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005665 unsigned long high;
5666 int err;
5667
5668 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005669 err = page_counter_memparse(buf, "max", &high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005670 if (err)
5671 return err;
5672
5673 memcg->high = high;
5674
Johannes Weiner588083b2016-03-17 14:20:25 -07005675 nr_pages = page_counter_read(&memcg->memory);
5676 if (nr_pages > high)
5677 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5678 GFP_KERNEL, true);
5679
Tejun Heo2529bb32015-05-22 18:23:34 -04005680 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005681 return nbytes;
5682}
5683
5684static int memory_max_show(struct seq_file *m, void *v)
5685{
Chris Down677dc972019-03-05 15:45:55 -08005686 return seq_puts_memcg_tunable(m,
5687 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
Johannes Weiner241994ed2015-02-11 15:26:06 -08005688}
5689
5690static ssize_t memory_max_write(struct kernfs_open_file *of,
5691 char *buf, size_t nbytes, loff_t off)
5692{
5693 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07005694 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5695 bool drained = false;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005696 unsigned long max;
5697 int err;
5698
5699 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005700 err = page_counter_memparse(buf, "max", &max);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005701 if (err)
5702 return err;
5703
Roman Gushchinbbec2e12018-06-07 17:06:18 -07005704 xchg(&memcg->memory.max, max);
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07005705
5706 for (;;) {
5707 unsigned long nr_pages = page_counter_read(&memcg->memory);
5708
5709 if (nr_pages <= max)
5710 break;
5711
5712 if (signal_pending(current)) {
5713 err = -EINTR;
5714 break;
5715 }
5716
5717 if (!drained) {
5718 drain_all_stock(memcg);
5719 drained = true;
5720 continue;
5721 }
5722
5723 if (nr_reclaims) {
5724 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5725 GFP_KERNEL, true))
5726 nr_reclaims--;
5727 continue;
5728 }
5729
Johannes Weinere27be242018-04-10 16:29:45 -07005730 memcg_memory_event(memcg, MEMCG_OOM);
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07005731 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5732 break;
5733 }
Johannes Weiner241994ed2015-02-11 15:26:06 -08005734
Tejun Heo2529bb32015-05-22 18:23:34 -04005735 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005736 return nbytes;
5737}
5738
Shakeel Butt1e577f92019-07-11 20:55:55 -07005739static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
5740{
5741 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
5742 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
5743 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
5744 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
5745 seq_printf(m, "oom_kill %lu\n",
5746 atomic_long_read(&events[MEMCG_OOM_KILL]));
5747}
5748
Johannes Weiner241994ed2015-02-11 15:26:06 -08005749static int memory_events_show(struct seq_file *m, void *v)
5750{
Chris Downaa9694b2019-03-05 15:45:52 -08005751 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005752
Shakeel Butt1e577f92019-07-11 20:55:55 -07005753 __memory_events_show(m, memcg->memory_events);
5754 return 0;
5755}
Johannes Weiner241994ed2015-02-11 15:26:06 -08005756
Shakeel Butt1e577f92019-07-11 20:55:55 -07005757static int memory_events_local_show(struct seq_file *m, void *v)
5758{
5759 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
5760
5761 __memory_events_show(m, memcg->memory_events_local);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005762 return 0;
5763}
5764
Johannes Weiner587d9f72016-01-20 15:03:19 -08005765static int memory_stat_show(struct seq_file *m, void *v)
5766{
Chris Downaa9694b2019-03-05 15:45:52 -08005767 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Johannes Weinerc8713d02019-07-11 20:55:59 -07005768 char *buf;
Johannes Weiner587d9f72016-01-20 15:03:19 -08005769
Johannes Weinerc8713d02019-07-11 20:55:59 -07005770 buf = memory_stat_format(memcg);
5771 if (!buf)
5772 return -ENOMEM;
5773 seq_puts(m, buf);
5774 kfree(buf);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005775 return 0;
5776}
5777
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07005778static int memory_oom_group_show(struct seq_file *m, void *v)
5779{
Chris Downaa9694b2019-03-05 15:45:52 -08005780 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07005781
5782 seq_printf(m, "%d\n", memcg->oom_group);
5783
5784 return 0;
5785}
5786
5787static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
5788 char *buf, size_t nbytes, loff_t off)
5789{
5790 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5791 int ret, oom_group;
5792
5793 buf = strstrip(buf);
5794 if (!buf)
5795 return -EINVAL;
5796
5797 ret = kstrtoint(buf, 0, &oom_group);
5798 if (ret)
5799 return ret;
5800
5801 if (oom_group != 0 && oom_group != 1)
5802 return -EINVAL;
5803
5804 memcg->oom_group = oom_group;
5805
5806 return nbytes;
5807}
5808
Johannes Weiner241994ed2015-02-11 15:26:06 -08005809static struct cftype memory_files[] = {
5810 {
5811 .name = "current",
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08005812 .flags = CFTYPE_NOT_ON_ROOT,
Johannes Weiner241994ed2015-02-11 15:26:06 -08005813 .read_u64 = memory_current_read,
5814 },
5815 {
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005816 .name = "min",
5817 .flags = CFTYPE_NOT_ON_ROOT,
5818 .seq_show = memory_min_show,
5819 .write = memory_min_write,
5820 },
5821 {
Johannes Weiner241994ed2015-02-11 15:26:06 -08005822 .name = "low",
5823 .flags = CFTYPE_NOT_ON_ROOT,
5824 .seq_show = memory_low_show,
5825 .write = memory_low_write,
5826 },
5827 {
5828 .name = "high",
5829 .flags = CFTYPE_NOT_ON_ROOT,
5830 .seq_show = memory_high_show,
5831 .write = memory_high_write,
5832 },
5833 {
5834 .name = "max",
5835 .flags = CFTYPE_NOT_ON_ROOT,
5836 .seq_show = memory_max_show,
5837 .write = memory_max_write,
5838 },
5839 {
5840 .name = "events",
5841 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo472912a2015-09-18 18:01:59 -04005842 .file_offset = offsetof(struct mem_cgroup, events_file),
Johannes Weiner241994ed2015-02-11 15:26:06 -08005843 .seq_show = memory_events_show,
5844 },
Johannes Weiner587d9f72016-01-20 15:03:19 -08005845 {
Shakeel Butt1e577f92019-07-11 20:55:55 -07005846 .name = "events.local",
5847 .flags = CFTYPE_NOT_ON_ROOT,
5848 .file_offset = offsetof(struct mem_cgroup, events_local_file),
5849 .seq_show = memory_events_local_show,
5850 },
5851 {
Johannes Weiner587d9f72016-01-20 15:03:19 -08005852 .name = "stat",
5853 .flags = CFTYPE_NOT_ON_ROOT,
5854 .seq_show = memory_stat_show,
5855 },
Roman Gushchin3d8b38e2018-08-21 21:53:54 -07005856 {
5857 .name = "oom.group",
5858 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
5859 .seq_show = memory_oom_group_show,
5860 .write = memory_oom_group_write,
5861 },
Johannes Weiner241994ed2015-02-11 15:26:06 -08005862 { } /* terminate */
5863};
5864
Tejun Heo073219e2014-02-08 10:36:58 -05005865struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08005866 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08005867 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08005868 .css_offline = mem_cgroup_css_offline,
Vladimir Davydov6df38682015-12-29 14:54:10 -08005869 .css_released = mem_cgroup_css_released,
Tejun Heo92fb9742012-11-19 08:13:38 -08005870 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04005871 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005872 .can_attach = mem_cgroup_can_attach,
5873 .cancel_attach = mem_cgroup_cancel_attach,
Tejun Heo264a0ae2016-04-21 19:09:02 -04005874 .post_attach = mem_cgroup_move_task,
Tejun Heof00baae2013-04-15 13:41:15 -07005875 .bind = mem_cgroup_bind,
Johannes Weiner241994ed2015-02-11 15:26:06 -08005876 .dfl_cftypes = memory_files,
5877 .legacy_cftypes = mem_cgroup_legacy_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005878 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005879};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005880
Johannes Weiner241994ed2015-02-11 15:26:06 -08005881/**
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005882 * mem_cgroup_protected - check if memory consumption is in the normal range
Sean Christopherson34c81052017-07-10 15:48:05 -07005883 * @root: the top ancestor of the sub-tree being checked
Johannes Weiner241994ed2015-02-11 15:26:06 -08005884 * @memcg: the memory cgroup to check
5885 *
Roman Gushchin23067152018-06-07 17:06:22 -07005886 * WARNING: This function is not stateless! It can only be used as part
5887 * of a top-down tree iteration, not for isolated queries.
Sean Christopherson34c81052017-07-10 15:48:05 -07005888 *
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005889 * Returns one of the following:
5890 * MEMCG_PROT_NONE: cgroup memory is not protected
5891 * MEMCG_PROT_LOW: cgroup memory is protected as long there is
5892 * an unprotected supply of reclaimable memory from other cgroups.
5893 * MEMCG_PROT_MIN: cgroup memory is protected
Sean Christopherson34c81052017-07-10 15:48:05 -07005894 *
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005895 * @root is exclusive; it is never protected when looked at directly
Sean Christopherson34c81052017-07-10 15:48:05 -07005896 *
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005897 * To provide a proper hierarchical behavior, effective memory.min/low values
5898 * are used. Below is the description of how effective memory.low is calculated.
5899 * Effective memory.min values is calculated in the same way.
Sean Christopherson34c81052017-07-10 15:48:05 -07005900 *
Roman Gushchin23067152018-06-07 17:06:22 -07005901 * Effective memory.low is always equal or less than the original memory.low.
5902 * If there is no memory.low overcommittment (which is always true for
5903 * top-level memory cgroups), these two values are equal.
5904 * Otherwise, it's a part of parent's effective memory.low,
5905 * calculated as a cgroup's memory.low usage divided by sum of sibling's
5906 * memory.low usages, where memory.low usage is the size of actually
5907 * protected memory.
Sean Christopherson34c81052017-07-10 15:48:05 -07005908 *
Roman Gushchin23067152018-06-07 17:06:22 -07005909 * low_usage
5910 * elow = min( memory.low, parent->elow * ------------------ ),
5911 * siblings_low_usage
Sean Christopherson34c81052017-07-10 15:48:05 -07005912 *
Roman Gushchin23067152018-06-07 17:06:22 -07005913 * | memory.current, if memory.current < memory.low
5914 * low_usage = |
Qian Cai82ede7e2019-03-05 15:49:53 -08005915 * | 0, otherwise.
Sean Christopherson34c81052017-07-10 15:48:05 -07005916 *
Roman Gushchin23067152018-06-07 17:06:22 -07005917 *
5918 * Such definition of the effective memory.low provides the expected
5919 * hierarchical behavior: parent's memory.low value is limiting
5920 * children, unprotected memory is reclaimed first and cgroups,
5921 * which are not using their guarantee do not affect actual memory
5922 * distribution.
5923 *
5924 * For example, if there are memcgs A, A/B, A/C, A/D and A/E:
5925 *
5926 * A A/memory.low = 2G, A/memory.current = 6G
5927 * //\\
5928 * BC DE B/memory.low = 3G B/memory.current = 2G
5929 * C/memory.low = 1G C/memory.current = 2G
5930 * D/memory.low = 0 D/memory.current = 2G
5931 * E/memory.low = 10G E/memory.current = 0
5932 *
5933 * and the memory pressure is applied, the following memory distribution
5934 * is expected (approximately):
5935 *
5936 * A/memory.current = 2G
5937 *
5938 * B/memory.current = 1.3G
5939 * C/memory.current = 0.6G
5940 * D/memory.current = 0
5941 * E/memory.current = 0
5942 *
5943 * These calculations require constant tracking of the actual low usages
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005944 * (see propagate_protected_usage()), as well as recursive calculation of
5945 * effective memory.low values. But as we do call mem_cgroup_protected()
Roman Gushchin23067152018-06-07 17:06:22 -07005946 * path for each memory cgroup top-down from the reclaim,
5947 * it's possible to optimize this part, and save calculated elow
5948 * for next usage. This part is intentionally racy, but it's ok,
5949 * as memory.low is a best-effort mechanism.
Johannes Weiner241994ed2015-02-11 15:26:06 -08005950 */
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005951enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
5952 struct mem_cgroup *memcg)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005953{
Roman Gushchin23067152018-06-07 17:06:22 -07005954 struct mem_cgroup *parent;
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005955 unsigned long emin, parent_emin;
5956 unsigned long elow, parent_elow;
5957 unsigned long usage;
Roman Gushchin23067152018-06-07 17:06:22 -07005958
Johannes Weiner241994ed2015-02-11 15:26:06 -08005959 if (mem_cgroup_disabled())
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005960 return MEMCG_PROT_NONE;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005961
Sean Christopherson34c81052017-07-10 15:48:05 -07005962 if (!root)
5963 root = root_mem_cgroup;
5964 if (memcg == root)
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005965 return MEMCG_PROT_NONE;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005966
Roman Gushchin23067152018-06-07 17:06:22 -07005967 usage = page_counter_read(&memcg->memory);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005968 if (!usage)
5969 return MEMCG_PROT_NONE;
Sean Christopherson34c81052017-07-10 15:48:05 -07005970
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005971 emin = memcg->memory.min;
5972 elow = memcg->memory.low;
5973
5974 parent = parent_mem_cgroup(memcg);
Roman Gushchindf2a4192018-06-14 15:26:17 -07005975 /* No parent means a non-hierarchical mode on v1 memcg */
5976 if (!parent)
5977 return MEMCG_PROT_NONE;
5978
Roman Gushchin23067152018-06-07 17:06:22 -07005979 if (parent == root)
5980 goto exit;
5981
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005982 parent_emin = READ_ONCE(parent->memory.emin);
5983 emin = min(emin, parent_emin);
5984 if (emin && parent_emin) {
5985 unsigned long min_usage, siblings_min_usage;
5986
5987 min_usage = min(usage, memcg->memory.min);
5988 siblings_min_usage = atomic_long_read(
5989 &parent->memory.children_min_usage);
5990
5991 if (min_usage && siblings_min_usage)
5992 emin = min(emin, parent_emin * min_usage /
5993 siblings_min_usage);
5994 }
5995
Roman Gushchin23067152018-06-07 17:06:22 -07005996 parent_elow = READ_ONCE(parent->memory.elow);
5997 elow = min(elow, parent_elow);
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07005998 if (elow && parent_elow) {
5999 unsigned long low_usage, siblings_low_usage;
Roman Gushchin23067152018-06-07 17:06:22 -07006000
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006001 low_usage = min(usage, memcg->memory.low);
6002 siblings_low_usage = atomic_long_read(
6003 &parent->memory.children_low_usage);
Roman Gushchin23067152018-06-07 17:06:22 -07006004
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006005 if (low_usage && siblings_low_usage)
6006 elow = min(elow, parent_elow * low_usage /
6007 siblings_low_usage);
6008 }
Roman Gushchin23067152018-06-07 17:06:22 -07006009
Roman Gushchin23067152018-06-07 17:06:22 -07006010exit:
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006011 memcg->memory.emin = emin;
Roman Gushchin23067152018-06-07 17:06:22 -07006012 memcg->memory.elow = elow;
Roman Gushchinbf8d5d52018-06-07 17:07:46 -07006013
6014 if (usage <= emin)
6015 return MEMCG_PROT_MIN;
6016 else if (usage <= elow)
6017 return MEMCG_PROT_LOW;
6018 else
6019 return MEMCG_PROT_NONE;
Johannes Weiner241994ed2015-02-11 15:26:06 -08006020}
6021
Johannes Weiner00501b52014-08-08 14:19:20 -07006022/**
6023 * mem_cgroup_try_charge - try charging a page
6024 * @page: page to charge
6025 * @mm: mm context of the victim
6026 * @gfp_mask: reclaim mode
6027 * @memcgp: charged memcg return
Li RongQing25843c22016-07-26 15:26:56 -07006028 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07006029 *
6030 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6031 * pages according to @gfp_mask if necessary.
6032 *
6033 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
6034 * Otherwise, an error code is returned.
6035 *
6036 * After page->mapping has been set up, the caller must finalize the
6037 * charge with mem_cgroup_commit_charge(). Or abort the transaction
6038 * with mem_cgroup_cancel_charge() in case page instantiation fails.
6039 */
6040int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006041 gfp_t gfp_mask, struct mem_cgroup **memcgp,
6042 bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07006043{
6044 struct mem_cgroup *memcg = NULL;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006045 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07006046 int ret = 0;
6047
6048 if (mem_cgroup_disabled())
6049 goto out;
6050
6051 if (PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07006052 /*
6053 * Every swap fault against a single page tries to charge the
6054 * page, bail as early as possible. shmem_unuse() encounters
6055 * already charged pages, too. The USED bit is protected by
6056 * the page lock, which serializes swap cache removal, which
6057 * in turn serializes uncharging.
6058 */
Vladimir Davydove993d902015-09-09 15:35:35 -07006059 VM_BUG_ON_PAGE(!PageLocked(page), page);
Huang Yingabe28952017-09-06 16:22:41 -07006060 if (compound_head(page)->mem_cgroup)
Johannes Weiner00501b52014-08-08 14:19:20 -07006061 goto out;
Vladimir Davydove993d902015-09-09 15:35:35 -07006062
Vladimir Davydov37e84352016-01-20 15:02:56 -08006063 if (do_swap_account) {
Vladimir Davydove993d902015-09-09 15:35:35 -07006064 swp_entry_t ent = { .val = page_private(page), };
6065 unsigned short id = lookup_swap_cgroup_id(ent);
6066
6067 rcu_read_lock();
6068 memcg = mem_cgroup_from_id(id);
6069 if (memcg && !css_tryget_online(&memcg->css))
6070 memcg = NULL;
6071 rcu_read_unlock();
6072 }
Johannes Weiner00501b52014-08-08 14:19:20 -07006073 }
6074
Johannes Weiner00501b52014-08-08 14:19:20 -07006075 if (!memcg)
6076 memcg = get_mem_cgroup_from_mm(mm);
6077
6078 ret = try_charge(memcg, gfp_mask, nr_pages);
6079
6080 css_put(&memcg->css);
Johannes Weiner00501b52014-08-08 14:19:20 -07006081out:
6082 *memcgp = memcg;
6083 return ret;
6084}
6085
Tejun Heo2cf85582018-07-03 11:14:56 -04006086int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
6087 gfp_t gfp_mask, struct mem_cgroup **memcgp,
6088 bool compound)
6089{
6090 struct mem_cgroup *memcg;
6091 int ret;
6092
6093 ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
6094 memcg = *memcgp;
6095 mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
6096 return ret;
6097}
6098
Johannes Weiner00501b52014-08-08 14:19:20 -07006099/**
6100 * mem_cgroup_commit_charge - commit a page charge
6101 * @page: page to charge
6102 * @memcg: memcg to charge the page to
6103 * @lrucare: page might be on LRU already
Li RongQing25843c22016-07-26 15:26:56 -07006104 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07006105 *
6106 * Finalize a charge transaction started by mem_cgroup_try_charge(),
6107 * after page->mapping has been set up. This must happen atomically
6108 * as part of the page instantiation, i.e. under the page table lock
6109 * for anonymous pages, under the page lock for page and swap cache.
6110 *
6111 * In addition, the page must not be on the LRU during the commit, to
6112 * prevent racing with task migration. If it might be, use @lrucare.
6113 *
6114 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
6115 */
6116void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006117 bool lrucare, bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07006118{
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006119 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07006120
6121 VM_BUG_ON_PAGE(!page->mapping, page);
6122 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
6123
6124 if (mem_cgroup_disabled())
6125 return;
6126 /*
6127 * Swap faults will attempt to charge the same page multiple
6128 * times. But reuse_swap_page() might have removed the page
6129 * from swapcache already, so we can't check PageSwapCache().
6130 */
6131 if (!memcg)
6132 return;
6133
Johannes Weiner6abb5a82014-08-08 14:19:33 -07006134 commit_charge(page, memcg, lrucare);
6135
Johannes Weiner6abb5a82014-08-08 14:19:33 -07006136 local_irq_disable();
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006137 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07006138 memcg_check_events(memcg, page);
6139 local_irq_enable();
Johannes Weiner00501b52014-08-08 14:19:20 -07006140
Johannes Weiner7941d212016-01-14 15:21:23 -08006141 if (do_memsw_account() && PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07006142 swp_entry_t entry = { .val = page_private(page) };
6143 /*
6144 * The swap entry might not get freed for a long time,
6145 * let's not wait for it. The page already received a
6146 * memory+swap charge, drop the swap entry duplicate.
6147 */
Huang Ying38d8b4e2017-07-06 15:37:18 -07006148 mem_cgroup_uncharge_swap(entry, nr_pages);
Johannes Weiner00501b52014-08-08 14:19:20 -07006149 }
6150}
6151
6152/**
6153 * mem_cgroup_cancel_charge - cancel a page charge
6154 * @page: page to charge
6155 * @memcg: memcg to charge the page to
Li RongQing25843c22016-07-26 15:26:56 -07006156 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07006157 *
6158 * Cancel a charge transaction started by mem_cgroup_try_charge().
6159 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006160void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
6161 bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07006162{
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08006163 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07006164
6165 if (mem_cgroup_disabled())
6166 return;
6167 /*
6168 * Swap faults will attempt to charge the same page multiple
6169 * times. But reuse_swap_page() might have removed the page
6170 * from swapcache already, so we can't check PageSwapCache().
6171 */
6172 if (!memcg)
6173 return;
6174
Johannes Weiner00501b52014-08-08 14:19:20 -07006175 cancel_charge(memcg, nr_pages);
6176}
6177
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006178struct uncharge_gather {
6179 struct mem_cgroup *memcg;
6180 unsigned long pgpgout;
6181 unsigned long nr_anon;
6182 unsigned long nr_file;
6183 unsigned long nr_kmem;
6184 unsigned long nr_huge;
6185 unsigned long nr_shmem;
6186 struct page *dummy_page;
6187};
6188
6189static inline void uncharge_gather_clear(struct uncharge_gather *ug)
Johannes Weiner747db952014-08-08 14:19:24 -07006190{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006191 memset(ug, 0, sizeof(*ug));
6192}
6193
6194static void uncharge_batch(const struct uncharge_gather *ug)
6195{
6196 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
Johannes Weiner747db952014-08-08 14:19:24 -07006197 unsigned long flags;
6198
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006199 if (!mem_cgroup_is_root(ug->memcg)) {
6200 page_counter_uncharge(&ug->memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08006201 if (do_memsw_account())
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006202 page_counter_uncharge(&ug->memcg->memsw, nr_pages);
6203 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6204 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6205 memcg_oom_recover(ug->memcg);
Johannes Weinerce00a962014-09-05 08:43:57 -04006206 }
Johannes Weiner747db952014-08-08 14:19:24 -07006207
6208 local_irq_save(flags);
Johannes Weinerc9019e92018-01-31 16:16:37 -08006209 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
6210 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
6211 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
6212 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
6213 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
Chris Down871789d2019-05-14 15:46:57 -07006214 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006215 memcg_check_events(ug->memcg, ug->dummy_page);
Johannes Weiner747db952014-08-08 14:19:24 -07006216 local_irq_restore(flags);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08006217
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006218 if (!mem_cgroup_is_root(ug->memcg))
6219 css_put_many(&ug->memcg->css, nr_pages);
6220}
6221
6222static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6223{
6224 VM_BUG_ON_PAGE(PageLRU(page), page);
Jérôme Glisse3f2eb022017-10-03 16:14:57 -07006225 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
6226 !PageHWPoison(page) , page);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006227
6228 if (!page->mem_cgroup)
6229 return;
6230
6231 /*
6232 * Nobody should be changing or seriously looking at
6233 * page->mem_cgroup at this point, we have fully
6234 * exclusive access to the page.
6235 */
6236
6237 if (ug->memcg != page->mem_cgroup) {
6238 if (ug->memcg) {
6239 uncharge_batch(ug);
6240 uncharge_gather_clear(ug);
6241 }
6242 ug->memcg = page->mem_cgroup;
6243 }
6244
6245 if (!PageKmemcg(page)) {
6246 unsigned int nr_pages = 1;
6247
6248 if (PageTransHuge(page)) {
6249 nr_pages <<= compound_order(page);
6250 ug->nr_huge += nr_pages;
6251 }
6252 if (PageAnon(page))
6253 ug->nr_anon += nr_pages;
6254 else {
6255 ug->nr_file += nr_pages;
6256 if (PageSwapBacked(page))
6257 ug->nr_shmem += nr_pages;
6258 }
6259 ug->pgpgout++;
6260 } else {
6261 ug->nr_kmem += 1 << compound_order(page);
6262 __ClearPageKmemcg(page);
6263 }
6264
6265 ug->dummy_page = page;
6266 page->mem_cgroup = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07006267}
6268
6269static void uncharge_list(struct list_head *page_list)
6270{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006271 struct uncharge_gather ug;
Johannes Weiner747db952014-08-08 14:19:24 -07006272 struct list_head *next;
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006273
6274 uncharge_gather_clear(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07006275
Johannes Weiner8b592652016-03-17 14:20:31 -07006276 /*
6277 * Note that the list can be a single page->lru; hence the
6278 * do-while loop instead of a simple list_for_each_entry().
6279 */
Johannes Weiner747db952014-08-08 14:19:24 -07006280 next = page_list->next;
6281 do {
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006282 struct page *page;
6283
Johannes Weiner747db952014-08-08 14:19:24 -07006284 page = list_entry(next, struct page, lru);
6285 next = page->lru.next;
6286
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006287 uncharge_page(page, &ug);
Johannes Weiner747db952014-08-08 14:19:24 -07006288 } while (next != page_list);
6289
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006290 if (ug.memcg)
6291 uncharge_batch(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07006292}
6293
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006294/**
6295 * mem_cgroup_uncharge - uncharge a page
6296 * @page: page to uncharge
6297 *
6298 * Uncharge a page previously charged with mem_cgroup_try_charge() and
6299 * mem_cgroup_commit_charge().
6300 */
6301void mem_cgroup_uncharge(struct page *page)
6302{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006303 struct uncharge_gather ug;
6304
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006305 if (mem_cgroup_disabled())
6306 return;
6307
Johannes Weiner747db952014-08-08 14:19:24 -07006308 /* Don't touch page->lru of any random page, pre-check: */
Johannes Weiner1306a852014-12-10 15:44:52 -08006309 if (!page->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006310 return;
6311
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07006312 uncharge_gather_clear(&ug);
6313 uncharge_page(page, &ug);
6314 uncharge_batch(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07006315}
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006316
Johannes Weiner747db952014-08-08 14:19:24 -07006317/**
6318 * mem_cgroup_uncharge_list - uncharge a list of page
6319 * @page_list: list of pages to uncharge
6320 *
6321 * Uncharge a list of pages previously charged with
6322 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
6323 */
6324void mem_cgroup_uncharge_list(struct list_head *page_list)
6325{
6326 if (mem_cgroup_disabled())
6327 return;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006328
Johannes Weiner747db952014-08-08 14:19:24 -07006329 if (!list_empty(page_list))
6330 uncharge_list(page_list);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006331}
6332
6333/**
Johannes Weiner6a93ca82016-03-15 14:57:19 -07006334 * mem_cgroup_migrate - charge a page's replacement
6335 * @oldpage: currently circulating page
6336 * @newpage: replacement page
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006337 *
Johannes Weiner6a93ca82016-03-15 14:57:19 -07006338 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6339 * be uncharged upon free.
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006340 *
6341 * Both pages must be locked, @newpage->mapping must be set up.
6342 */
Johannes Weiner6a93ca82016-03-15 14:57:19 -07006343void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006344{
Johannes Weiner29833312014-12-10 15:44:02 -08006345 struct mem_cgroup *memcg;
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006346 unsigned int nr_pages;
6347 bool compound;
Tejun Heod93c4132016-06-24 14:49:54 -07006348 unsigned long flags;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006349
6350 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6351 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006352 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07006353 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6354 newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006355
6356 if (mem_cgroup_disabled())
6357 return;
6358
6359 /* Page cache replacement: new page already charged? */
Johannes Weiner1306a852014-12-10 15:44:52 -08006360 if (newpage->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006361 return;
6362
Hugh Dickins45637ba2015-11-05 18:49:40 -08006363 /* Swapcache readahead pages can get replaced before being charged */
Johannes Weiner1306a852014-12-10 15:44:52 -08006364 memcg = oldpage->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08006365 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006366 return;
6367
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006368 /* Force-charge the new page. The old one will be freed soon */
6369 compound = PageTransHuge(newpage);
6370 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
6371
6372 page_counter_charge(&memcg->memory, nr_pages);
6373 if (do_memsw_account())
6374 page_counter_charge(&memcg->memsw, nr_pages);
6375 css_get_many(&memcg->css, nr_pages);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006376
Johannes Weiner9cf76662016-03-15 14:57:58 -07006377 commit_charge(newpage, memcg, false);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006378
Tejun Heod93c4132016-06-24 14:49:54 -07006379 local_irq_save(flags);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08006380 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
6381 memcg_check_events(memcg, newpage);
Tejun Heod93c4132016-06-24 14:49:54 -07006382 local_irq_restore(flags);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07006383}
6384
Johannes Weineref129472016-01-14 15:21:34 -08006385DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
Johannes Weiner11092082016-01-14 15:21:26 -08006386EXPORT_SYMBOL(memcg_sockets_enabled_key);
6387
Johannes Weiner2d758072016-10-07 17:00:58 -07006388void mem_cgroup_sk_alloc(struct sock *sk)
Johannes Weiner11092082016-01-14 15:21:26 -08006389{
6390 struct mem_cgroup *memcg;
6391
Johannes Weiner2d758072016-10-07 17:00:58 -07006392 if (!mem_cgroup_sockets_enabled)
6393 return;
6394
Roman Gushchinedbe69e2018-02-02 15:26:57 +00006395 /*
6396 * Socket cloning can throw us here with sk_memcg already
6397 * filled. It won't however, necessarily happen from
6398 * process context. So the test for root memcg given
6399 * the current task's memcg won't help us in this case.
6400 *
6401 * Respecting the original socket's memcg is a better
6402 * decision in this case.
6403 */
6404 if (sk->sk_memcg) {
6405 css_get(&sk->sk_memcg->css);
6406 return;
6407 }
6408
Johannes Weiner11092082016-01-14 15:21:26 -08006409 rcu_read_lock();
6410 memcg = mem_cgroup_from_task(current);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006411 if (memcg == root_mem_cgroup)
6412 goto out;
Johannes Weiner0db15292016-01-20 15:02:50 -08006413 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006414 goto out;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006415 if (css_tryget_online(&memcg->css))
Johannes Weiner11092082016-01-14 15:21:26 -08006416 sk->sk_memcg = memcg;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006417out:
Johannes Weiner11092082016-01-14 15:21:26 -08006418 rcu_read_unlock();
6419}
Johannes Weiner11092082016-01-14 15:21:26 -08006420
Johannes Weiner2d758072016-10-07 17:00:58 -07006421void mem_cgroup_sk_free(struct sock *sk)
Johannes Weiner11092082016-01-14 15:21:26 -08006422{
Johannes Weiner2d758072016-10-07 17:00:58 -07006423 if (sk->sk_memcg)
6424 css_put(&sk->sk_memcg->css);
Johannes Weiner11092082016-01-14 15:21:26 -08006425}
6426
6427/**
6428 * mem_cgroup_charge_skmem - charge socket memory
6429 * @memcg: memcg to charge
6430 * @nr_pages: number of pages to charge
6431 *
6432 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6433 * @memcg's configured limit, %false if the charge had to be forced.
6434 */
6435bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6436{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006437 gfp_t gfp_mask = GFP_KERNEL;
Johannes Weiner11092082016-01-14 15:21:26 -08006438
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006439 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08006440 struct page_counter *fail;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006441
Johannes Weiner0db15292016-01-20 15:02:50 -08006442 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6443 memcg->tcpmem_pressure = 0;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006444 return true;
6445 }
Johannes Weiner0db15292016-01-20 15:02:50 -08006446 page_counter_charge(&memcg->tcpmem, nr_pages);
6447 memcg->tcpmem_pressure = 1;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006448 return false;
Johannes Weiner11092082016-01-14 15:21:26 -08006449 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08006450
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006451 /* Don't block in the packet receive path */
6452 if (in_softirq())
6453 gfp_mask = GFP_NOWAIT;
6454
Johannes Weinerc9019e92018-01-31 16:16:37 -08006455 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
Johannes Weinerb2807f02016-01-20 15:03:22 -08006456
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006457 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
6458 return true;
6459
6460 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08006461 return false;
6462}
6463
6464/**
6465 * mem_cgroup_uncharge_skmem - uncharge socket memory
Mike Rapoportb7701a52018-02-06 15:42:13 -08006466 * @memcg: memcg to uncharge
6467 * @nr_pages: number of pages to uncharge
Johannes Weiner11092082016-01-14 15:21:26 -08006468 */
6469void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6470{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006471 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08006472 page_counter_uncharge(&memcg->tcpmem, nr_pages);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006473 return;
6474 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08006475
Johannes Weinerc9019e92018-01-31 16:16:37 -08006476 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
Johannes Weinerb2807f02016-01-20 15:03:22 -08006477
Roman Gushchin475d0482017-09-08 16:13:09 -07006478 refill_stock(memcg, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08006479}
6480
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006481static int __init cgroup_memory(char *s)
6482{
6483 char *token;
6484
6485 while ((token = strsep(&s, ",")) != NULL) {
6486 if (!*token)
6487 continue;
6488 if (!strcmp(token, "nosocket"))
6489 cgroup_memory_nosocket = true;
Vladimir Davydov04823c82016-01-20 15:02:38 -08006490 if (!strcmp(token, "nokmem"))
6491 cgroup_memory_nokmem = true;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08006492 }
6493 return 0;
6494}
6495__setup("cgroup.memory=", cgroup_memory);
Johannes Weiner11092082016-01-14 15:21:26 -08006496
Michal Hocko2d110852013-02-22 16:34:43 -08006497/*
Michal Hocko10813122013-02-22 16:35:41 -08006498 * subsys_initcall() for memory controller.
6499 *
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01006500 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
6501 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6502 * basically everything that doesn't depend on a specific mem_cgroup structure
6503 * should be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08006504 */
6505static int __init mem_cgroup_init(void)
6506{
Johannes Weiner95a045f2015-02-11 15:26:33 -08006507 int cpu, node;
6508
Kirill Tkhai84c07d12018-08-17 15:47:25 -07006509#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov13583c32016-12-12 16:41:29 -08006510 /*
6511 * Kmem cache creation is mostly done with the slab_mutex held,
Tejun Heo17cc4df2017-02-22 15:41:36 -08006512 * so use a workqueue with limited concurrency to avoid stalling
6513 * all worker threads in case lots of cgroups are created and
6514 * destroyed simultaneously.
Vladimir Davydov13583c32016-12-12 16:41:29 -08006515 */
Tejun Heo17cc4df2017-02-22 15:41:36 -08006516 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
6517 BUG_ON(!memcg_kmem_cache_wq);
Vladimir Davydov13583c32016-12-12 16:41:29 -08006518#endif
6519
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01006520 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
6521 memcg_hotplug_cpu_dead);
Johannes Weiner95a045f2015-02-11 15:26:33 -08006522
6523 for_each_possible_cpu(cpu)
6524 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
6525 drain_local_stock);
6526
6527 for_each_node(node) {
6528 struct mem_cgroup_tree_per_node *rtpn;
Johannes Weiner95a045f2015-02-11 15:26:33 -08006529
6530 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
6531 node_online(node) ? node : NUMA_NO_NODE);
6532
Mel Gormanef8f2322016-07-28 15:46:05 -07006533 rtpn->rb_root = RB_ROOT;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -07006534 rtpn->rb_rightmost = NULL;
Mel Gormanef8f2322016-07-28 15:46:05 -07006535 spin_lock_init(&rtpn->lock);
Johannes Weiner95a045f2015-02-11 15:26:33 -08006536 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6537 }
6538
Michal Hocko2d110852013-02-22 16:34:43 -08006539 return 0;
6540}
6541subsys_initcall(mem_cgroup_init);
Johannes Weiner21afa382015-02-11 15:26:36 -08006542
6543#ifdef CONFIG_MEMCG_SWAP
Arnd Bergmann358c07f2016-08-25 15:17:08 -07006544static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
6545{
Kirill Tkhai1c2d4792018-10-26 15:09:28 -07006546 while (!refcount_inc_not_zero(&memcg->id.ref)) {
Arnd Bergmann358c07f2016-08-25 15:17:08 -07006547 /*
6548 * The root cgroup cannot be destroyed, so it's refcount must
6549 * always be >= 1.
6550 */
6551 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
6552 VM_BUG_ON(1);
6553 break;
6554 }
6555 memcg = parent_mem_cgroup(memcg);
6556 if (!memcg)
6557 memcg = root_mem_cgroup;
6558 }
6559 return memcg;
6560}
6561
Johannes Weiner21afa382015-02-11 15:26:36 -08006562/**
6563 * mem_cgroup_swapout - transfer a memsw charge to swap
6564 * @page: page whose memsw charge to transfer
6565 * @entry: swap entry to move the charge to
6566 *
6567 * Transfer the memsw charge of @page to @entry.
6568 */
6569void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
6570{
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006571 struct mem_cgroup *memcg, *swap_memcg;
Huang Yingd6810d72017-09-06 16:22:45 -07006572 unsigned int nr_entries;
Johannes Weiner21afa382015-02-11 15:26:36 -08006573 unsigned short oldid;
6574
6575 VM_BUG_ON_PAGE(PageLRU(page), page);
6576 VM_BUG_ON_PAGE(page_count(page), page);
6577
Johannes Weiner7941d212016-01-14 15:21:23 -08006578 if (!do_memsw_account())
Johannes Weiner21afa382015-02-11 15:26:36 -08006579 return;
6580
6581 memcg = page->mem_cgroup;
6582
6583 /* Readahead page, never charged */
6584 if (!memcg)
6585 return;
6586
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006587 /*
6588 * In case the memcg owning these pages has been offlined and doesn't
6589 * have an ID allocated to it anymore, charge the closest online
6590 * ancestor for the swap instead and transfer the memory+swap charge.
6591 */
6592 swap_memcg = mem_cgroup_id_get_online(memcg);
Huang Yingd6810d72017-09-06 16:22:45 -07006593 nr_entries = hpage_nr_pages(page);
6594 /* Get references for the tail pages, too */
6595 if (nr_entries > 1)
6596 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
6597 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
6598 nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08006599 VM_BUG_ON_PAGE(oldid, page);
Johannes Weinerc9019e92018-01-31 16:16:37 -08006600 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08006601
6602 page->mem_cgroup = NULL;
6603
6604 if (!mem_cgroup_is_root(memcg))
Huang Yingd6810d72017-09-06 16:22:45 -07006605 page_counter_uncharge(&memcg->memory, nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08006606
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006607 if (memcg != swap_memcg) {
6608 if (!mem_cgroup_is_root(swap_memcg))
Huang Yingd6810d72017-09-06 16:22:45 -07006609 page_counter_charge(&swap_memcg->memsw, nr_entries);
6610 page_counter_uncharge(&memcg->memsw, nr_entries);
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006611 }
6612
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07006613 /*
6614 * Interrupts should be disabled here because the caller holds the
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07006615 * i_pages lock which is taken with interrupts-off. It is
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07006616 * important here to have the interrupts disabled because it is the
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07006617 * only synchronisation we have for updating the per-CPU variables.
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07006618 */
6619 VM_BUG_ON(!irqs_disabled());
Huang Yingd6810d72017-09-06 16:22:45 -07006620 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
6621 -nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08006622 memcg_check_events(memcg, page);
Johannes Weiner73f576c2016-07-20 15:44:57 -07006623
6624 if (!mem_cgroup_is_root(memcg))
Shakeel Buttd08afa12017-11-29 16:11:15 -08006625 css_put_many(&memcg->css, nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08006626}
6627
Huang Ying38d8b4e2017-07-06 15:37:18 -07006628/**
6629 * mem_cgroup_try_charge_swap - try charging swap space for a page
Vladimir Davydov37e84352016-01-20 15:02:56 -08006630 * @page: page being added to swap
6631 * @entry: swap entry to charge
6632 *
Huang Ying38d8b4e2017-07-06 15:37:18 -07006633 * Try to charge @page's memcg for the swap space at @entry.
Vladimir Davydov37e84352016-01-20 15:02:56 -08006634 *
6635 * Returns 0 on success, -ENOMEM on failure.
6636 */
6637int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6638{
Huang Ying38d8b4e2017-07-06 15:37:18 -07006639 unsigned int nr_pages = hpage_nr_pages(page);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006640 struct page_counter *counter;
Huang Ying38d8b4e2017-07-06 15:37:18 -07006641 struct mem_cgroup *memcg;
Vladimir Davydov37e84352016-01-20 15:02:56 -08006642 unsigned short oldid;
6643
6644 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
6645 return 0;
6646
6647 memcg = page->mem_cgroup;
6648
6649 /* Readahead page, never charged */
6650 if (!memcg)
6651 return 0;
6652
Tejun Heof3a53a32018-06-07 17:05:35 -07006653 if (!entry.val) {
6654 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
Tejun Heobb98f2c2018-06-07 17:05:31 -07006655 return 0;
Tejun Heof3a53a32018-06-07 17:05:35 -07006656 }
Tejun Heobb98f2c2018-06-07 17:05:31 -07006657
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006658 memcg = mem_cgroup_id_get_online(memcg);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006659
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006660 if (!mem_cgroup_is_root(memcg) &&
Huang Ying38d8b4e2017-07-06 15:37:18 -07006661 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
Tejun Heof3a53a32018-06-07 17:05:35 -07006662 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
6663 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006664 mem_cgroup_id_put(memcg);
6665 return -ENOMEM;
6666 }
6667
Huang Ying38d8b4e2017-07-06 15:37:18 -07006668 /* Get references for the tail pages, too */
6669 if (nr_pages > 1)
6670 mem_cgroup_id_get_many(memcg, nr_pages - 1);
6671 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006672 VM_BUG_ON_PAGE(oldid, page);
Johannes Weinerc9019e92018-01-31 16:16:37 -08006673 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006674
Vladimir Davydov37e84352016-01-20 15:02:56 -08006675 return 0;
6676}
6677
Johannes Weiner21afa382015-02-11 15:26:36 -08006678/**
Huang Ying38d8b4e2017-07-06 15:37:18 -07006679 * mem_cgroup_uncharge_swap - uncharge swap space
Johannes Weiner21afa382015-02-11 15:26:36 -08006680 * @entry: swap entry to uncharge
Huang Ying38d8b4e2017-07-06 15:37:18 -07006681 * @nr_pages: the amount of swap space to uncharge
Johannes Weiner21afa382015-02-11 15:26:36 -08006682 */
Huang Ying38d8b4e2017-07-06 15:37:18 -07006683void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
Johannes Weiner21afa382015-02-11 15:26:36 -08006684{
6685 struct mem_cgroup *memcg;
6686 unsigned short id;
6687
Vladimir Davydov37e84352016-01-20 15:02:56 -08006688 if (!do_swap_account)
Johannes Weiner21afa382015-02-11 15:26:36 -08006689 return;
6690
Huang Ying38d8b4e2017-07-06 15:37:18 -07006691 id = swap_cgroup_record(entry, 0, nr_pages);
Johannes Weiner21afa382015-02-11 15:26:36 -08006692 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07006693 memcg = mem_cgroup_from_id(id);
Johannes Weiner21afa382015-02-11 15:26:36 -08006694 if (memcg) {
Vladimir Davydov37e84352016-01-20 15:02:56 -08006695 if (!mem_cgroup_is_root(memcg)) {
6696 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Huang Ying38d8b4e2017-07-06 15:37:18 -07006697 page_counter_uncharge(&memcg->swap, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006698 else
Huang Ying38d8b4e2017-07-06 15:37:18 -07006699 page_counter_uncharge(&memcg->memsw, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006700 }
Johannes Weinerc9019e92018-01-31 16:16:37 -08006701 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
Huang Ying38d8b4e2017-07-06 15:37:18 -07006702 mem_cgroup_id_put_many(memcg, nr_pages);
Johannes Weiner21afa382015-02-11 15:26:36 -08006703 }
6704 rcu_read_unlock();
6705}
6706
Vladimir Davydovd8b38432016-01-20 15:03:07 -08006707long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
6708{
6709 long nr_swap_pages = get_nr_swap_pages();
6710
6711 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6712 return nr_swap_pages;
6713 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6714 nr_swap_pages = min_t(long, nr_swap_pages,
Roman Gushchinbbec2e12018-06-07 17:06:18 -07006715 READ_ONCE(memcg->swap.max) -
Vladimir Davydovd8b38432016-01-20 15:03:07 -08006716 page_counter_read(&memcg->swap));
6717 return nr_swap_pages;
6718}
6719
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08006720bool mem_cgroup_swap_full(struct page *page)
6721{
6722 struct mem_cgroup *memcg;
6723
6724 VM_BUG_ON_PAGE(!PageLocked(page), page);
6725
6726 if (vm_swap_full())
6727 return true;
6728 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6729 return false;
6730
6731 memcg = page->mem_cgroup;
6732 if (!memcg)
6733 return false;
6734
6735 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
Roman Gushchinbbec2e12018-06-07 17:06:18 -07006736 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.max)
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08006737 return true;
6738
6739 return false;
6740}
6741
Johannes Weiner21afa382015-02-11 15:26:36 -08006742/* for remember boot option*/
6743#ifdef CONFIG_MEMCG_SWAP_ENABLED
6744static int really_do_swap_account __initdata = 1;
6745#else
6746static int really_do_swap_account __initdata;
6747#endif
6748
6749static int __init enable_swap_account(char *s)
6750{
6751 if (!strcmp(s, "1"))
6752 really_do_swap_account = 1;
6753 else if (!strcmp(s, "0"))
6754 really_do_swap_account = 0;
6755 return 1;
6756}
6757__setup("swapaccount=", enable_swap_account);
6758
Vladimir Davydov37e84352016-01-20 15:02:56 -08006759static u64 swap_current_read(struct cgroup_subsys_state *css,
6760 struct cftype *cft)
6761{
6762 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6763
6764 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6765}
6766
6767static int swap_max_show(struct seq_file *m, void *v)
6768{
Chris Down677dc972019-03-05 15:45:55 -08006769 return seq_puts_memcg_tunable(m,
6770 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
Vladimir Davydov37e84352016-01-20 15:02:56 -08006771}
6772
6773static ssize_t swap_max_write(struct kernfs_open_file *of,
6774 char *buf, size_t nbytes, loff_t off)
6775{
6776 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6777 unsigned long max;
6778 int err;
6779
6780 buf = strstrip(buf);
6781 err = page_counter_memparse(buf, "max", &max);
6782 if (err)
6783 return err;
6784
Tejun Heobe091022018-06-07 17:09:21 -07006785 xchg(&memcg->swap.max, max);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006786
6787 return nbytes;
6788}
6789
Tejun Heof3a53a32018-06-07 17:05:35 -07006790static int swap_events_show(struct seq_file *m, void *v)
6791{
Chris Downaa9694b2019-03-05 15:45:52 -08006792 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
Tejun Heof3a53a32018-06-07 17:05:35 -07006793
6794 seq_printf(m, "max %lu\n",
6795 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
6796 seq_printf(m, "fail %lu\n",
6797 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
6798
6799 return 0;
6800}
6801
Vladimir Davydov37e84352016-01-20 15:02:56 -08006802static struct cftype swap_files[] = {
6803 {
6804 .name = "swap.current",
6805 .flags = CFTYPE_NOT_ON_ROOT,
6806 .read_u64 = swap_current_read,
6807 },
6808 {
6809 .name = "swap.max",
6810 .flags = CFTYPE_NOT_ON_ROOT,
6811 .seq_show = swap_max_show,
6812 .write = swap_max_write,
6813 },
Tejun Heof3a53a32018-06-07 17:05:35 -07006814 {
6815 .name = "swap.events",
6816 .flags = CFTYPE_NOT_ON_ROOT,
6817 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
6818 .seq_show = swap_events_show,
6819 },
Vladimir Davydov37e84352016-01-20 15:02:56 -08006820 { } /* terminate */
6821};
6822
Johannes Weiner21afa382015-02-11 15:26:36 -08006823static struct cftype memsw_cgroup_files[] = {
6824 {
6825 .name = "memsw.usage_in_bytes",
6826 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6827 .read_u64 = mem_cgroup_read_u64,
6828 },
6829 {
6830 .name = "memsw.max_usage_in_bytes",
6831 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6832 .write = mem_cgroup_reset,
6833 .read_u64 = mem_cgroup_read_u64,
6834 },
6835 {
6836 .name = "memsw.limit_in_bytes",
6837 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6838 .write = mem_cgroup_write,
6839 .read_u64 = mem_cgroup_read_u64,
6840 },
6841 {
6842 .name = "memsw.failcnt",
6843 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6844 .write = mem_cgroup_reset,
6845 .read_u64 = mem_cgroup_read_u64,
6846 },
6847 { }, /* terminate */
6848};
6849
6850static int __init mem_cgroup_swap_init(void)
6851{
6852 if (!mem_cgroup_disabled() && really_do_swap_account) {
6853 do_swap_account = 1;
Vladimir Davydov37e84352016-01-20 15:02:56 -08006854 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6855 swap_files));
Johannes Weiner21afa382015-02-11 15:26:36 -08006856 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6857 memsw_cgroup_files));
6858 }
6859 return 0;
6860}
6861subsys_initcall(mem_cgroup_swap_init);
6862
6863#endif /* CONFIG_MEMCG_SWAP */