blob: 695d9f10906ee5656247c0c11430a5be7621050b [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080013 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
Johannes Weiner1575e682015-04-14 15:44:51 -070017 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080023 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
Johannes Weiner3e32cb22014-12-10 15:42:31 -080034#include <linux/page_counter.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080035#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080037#include <linux/mm.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010038#include <linux/sched/mm.h>
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080039#include <linux/shmem_fs.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080040#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080041#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080042#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080043#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080044#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080045#include <linux/bit_spinlock.h>
46#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070047#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040048#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080049#include <linux/mutex.h>
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -070050#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070051#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080052#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080053#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080054#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080055#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050056#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080057#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080058#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080059#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070060#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070061#include <linux/mm_inline.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -080062#include <linux/swap_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080063#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070064#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070065#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050066#include <linux/file.h>
Tejun Heob23afb92015-11-05 18:46:11 -080067#include <linux/tracehook.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080068#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000069#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070070#include <net/ip.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080071#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080072
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080073#include <linux/uaccess.h>
Balbir Singh8697d332008-02-07 00:13:59 -080074
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070075#include <trace/events/vmscan.h>
76
Tejun Heo073219e2014-02-08 10:36:58 -050077struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080079
Johannes Weiner7d828602016-01-14 15:20:56 -080080struct mem_cgroup *root_mem_cgroup __read_mostly;
81
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070082#define MEM_CGROUP_RECLAIM_RETRIES 5
Balbir Singh8cdea7c2008-02-07 00:13:50 -080083
Johannes Weinerf7e1cb62016-01-14 15:21:29 -080084/* Socket memory accounting disabled? */
85static bool cgroup_memory_nosocket;
86
Vladimir Davydov04823c82016-01-20 15:02:38 -080087/* Kernel memory accounting disabled? */
88static bool cgroup_memory_nokmem;
89
Johannes Weiner21afa382015-02-11 15:26:36 -080090/* Whether the swap controller is active */
Andrew Mortonc255a452012-07-31 16:43:02 -070091#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080092int do_swap_account __read_mostly;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080093#else
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -070094#define do_swap_account 0
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080095#endif
96
Johannes Weiner7941d212016-01-14 15:21:23 -080097/* Whether legacy memory+swap accounting is active */
98static bool do_memsw_account(void)
99{
100 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
101}
102
Johannes Weiner71cd3112017-05-03 14:55:13 -0700103static const char *const mem_cgroup_lru_names[] = {
Sha Zhengju58cf1882013-02-22 16:32:05 -0800104 "inactive_anon",
105 "active_anon",
106 "inactive_file",
107 "active_file",
108 "unevictable",
109};
110
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700111#define THRESHOLDS_EVENTS_TARGET 128
112#define SOFTLIMIT_EVENTS_TARGET 1024
113#define NUMAINFO_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700114
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700115/*
116 * Cgroups above their limits are maintained in a RB-Tree, independent of
117 * their hierarchy representation
118 */
119
Mel Gormanef8f2322016-07-28 15:46:05 -0700120struct mem_cgroup_tree_per_node {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700121 struct rb_root rb_root;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700122 struct rb_node *rb_rightmost;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700123 spinlock_t lock;
124};
125
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700126struct mem_cgroup_tree {
127 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
128};
129
130static struct mem_cgroup_tree soft_limit_tree __read_mostly;
131
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700132/* for OOM */
133struct mem_cgroup_eventfd_list {
134 struct list_head list;
135 struct eventfd_ctx *eventfd;
136};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800137
Tejun Heo79bd9812013-11-22 18:20:42 -0500138/*
139 * cgroup_event represents events which userspace want to receive.
140 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500141struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500142 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500143 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500144 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500145 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500146 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500147 * eventfd to signal userspace about the event.
148 */
149 struct eventfd_ctx *eventfd;
150 /*
151 * Each of these stored in a list by the cgroup.
152 */
153 struct list_head list;
154 /*
Tejun Heofba94802013-11-22 18:20:43 -0500155 * register_event() callback will be used to add new userspace
156 * waiter for changes related to this event. Use eventfd_signal()
157 * on eventfd to send notification to userspace.
158 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500159 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500160 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500161 /*
162 * unregister_event() callback will be called when userspace closes
163 * the eventfd or on cgroup removing. This callback must be set,
164 * if you want provide notification functionality.
165 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500166 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500167 struct eventfd_ctx *eventfd);
168 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500169 * All fields below needed to unregister event when
170 * userspace closes eventfd.
171 */
172 poll_table pt;
173 wait_queue_head_t *wqh;
Ingo Molnarac6424b2017-06-20 12:06:13 +0200174 wait_queue_entry_t wait;
Tejun Heo79bd9812013-11-22 18:20:42 -0500175 struct work_struct remove;
176};
177
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700178static void mem_cgroup_threshold(struct mem_cgroup *memcg);
179static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800180
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800181/* Stuffs for move charges at task migration. */
182/*
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800183 * Types of charges to be moved.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800184 */
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800185#define MOVE_ANON 0x1U
186#define MOVE_FILE 0x2U
187#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800188
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800189/* "mc" and its members are protected by cgroup_mutex */
190static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800191 spinlock_t lock; /* for from, to */
Tejun Heo264a0ae2016-04-21 19:09:02 -0400192 struct mm_struct *mm;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800193 struct mem_cgroup *from;
194 struct mem_cgroup *to;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800195 unsigned long flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800196 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800197 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800198 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800199 struct task_struct *moving_task; /* a task moving charges */
200 wait_queue_head_t waitq; /* a waitq for other context */
201} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700202 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800203 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
204};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800205
Balbir Singh4e416952009-09-23 15:56:39 -0700206/*
207 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
208 * limit reclaim to prevent infinite loops, if they ever occur.
209 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700210#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700211#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700212
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800213enum charge_type {
214 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -0700215 MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800216 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700217 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700218 NR_CHARGE_TYPE,
219};
220
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800221/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800222enum res_type {
223 _MEM,
224 _MEMSWAP,
225 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800226 _KMEM,
Vladimir Davydovd55f90b2016-01-20 15:02:44 -0800227 _TCP,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800228};
229
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700230#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
231#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800232#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700233/* Used for OOM nofiier */
234#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800235
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700236/* Some nice accessors for the vmpressure. */
237struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
238{
239 if (!memcg)
240 memcg = root_mem_cgroup;
241 return &memcg->vmpressure;
242}
243
244struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
245{
246 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
247}
248
Michal Hocko7ffc0ed2012-10-08 16:33:13 -0700249static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
250{
251 return (memcg == root_mem_cgroup);
252}
253
Johannes Weiner127424c2016-01-20 15:02:32 -0800254#ifndef CONFIG_SLOB
Glauber Costa55007d82012-12-18 14:22:38 -0800255/*
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800256 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
Li Zefanb8627832013-09-23 16:56:47 +0800257 * The main reason for not using cgroup id for this:
258 * this works better in sparse environments, where we have a lot of memcgs,
259 * but only a few kmem-limited. Or also, if we have, for instance, 200
260 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
261 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800262 *
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800263 * The current size of the caches array is stored in memcg_nr_cache_ids. It
264 * will double each time we have to increase it.
Glauber Costa55007d82012-12-18 14:22:38 -0800265 */
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800266static DEFINE_IDA(memcg_cache_ida);
267int memcg_nr_cache_ids;
Glauber Costa749c5412012-12-18 14:23:01 -0800268
Vladimir Davydov05257a12015-02-12 14:59:01 -0800269/* Protects memcg_nr_cache_ids */
270static DECLARE_RWSEM(memcg_cache_ids_sem);
271
272void memcg_get_cache_ids(void)
273{
274 down_read(&memcg_cache_ids_sem);
275}
276
277void memcg_put_cache_ids(void)
278{
279 up_read(&memcg_cache_ids_sem);
280}
281
Glauber Costa55007d82012-12-18 14:22:38 -0800282/*
283 * MIN_SIZE is different than 1, because we would like to avoid going through
284 * the alloc/free process all the time. In a small machine, 4 kmem-limited
285 * cgroups is a reasonable guess. In the future, it could be a parameter or
286 * tunable, but that is strictly not necessary.
287 *
Li Zefanb8627832013-09-23 16:56:47 +0800288 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800289 * this constant directly from cgroup, but it is understandable that this is
290 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800291 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800292 * increase ours as well if it increases.
293 */
294#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800295#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800296
Glauber Costad7f25f82012-12-18 14:22:40 -0800297/*
298 * A lot of the calls to the cache allocation functions are expected to be
299 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
300 * conditional to this static branch, we'll have to allow modules that does
301 * kmem_cache_alloc and the such to see this symbol as well
302 */
Johannes Weineref129472016-01-14 15:21:34 -0800303DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
Glauber Costad7f25f82012-12-18 14:22:40 -0800304EXPORT_SYMBOL(memcg_kmem_enabled_key);
Glauber Costaa8964b92012-12-18 14:22:09 -0800305
Tejun Heo17cc4df2017-02-22 15:41:36 -0800306struct workqueue_struct *memcg_kmem_cache_wq;
307
Johannes Weiner127424c2016-01-20 15:02:32 -0800308#endif /* !CONFIG_SLOB */
Glauber Costaa8964b92012-12-18 14:22:09 -0800309
Tejun Heoad7fa852015-05-27 20:00:02 -0400310/**
311 * mem_cgroup_css_from_page - css of the memcg associated with a page
312 * @page: page of interest
313 *
314 * If memcg is bound to the default hierarchy, css of the memcg associated
315 * with @page is returned. The returned css remains associated with @page
316 * until it is released.
317 *
318 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
319 * is returned.
Tejun Heoad7fa852015-05-27 20:00:02 -0400320 */
321struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
322{
323 struct mem_cgroup *memcg;
324
Tejun Heoad7fa852015-05-27 20:00:02 -0400325 memcg = page->mem_cgroup;
326
Tejun Heo9e10a132015-09-18 11:56:28 -0400327 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heoad7fa852015-05-27 20:00:02 -0400328 memcg = root_mem_cgroup;
329
Tejun Heoad7fa852015-05-27 20:00:02 -0400330 return &memcg->css;
331}
332
Vladimir Davydov2fc04522015-09-09 15:35:28 -0700333/**
334 * page_cgroup_ino - return inode number of the memcg a page is charged to
335 * @page: the page
336 *
337 * Look up the closest online ancestor of the memory cgroup @page is charged to
338 * and return its inode number or 0 if @page is not charged to any cgroup. It
339 * is safe to call this function without holding a reference to @page.
340 *
341 * Note, this function is inherently racy, because there is nothing to prevent
342 * the cgroup inode from getting torn down and potentially reallocated a moment
343 * after page_cgroup_ino() returns, so it only should be used by callers that
344 * do not care (such as procfs interfaces).
345 */
346ino_t page_cgroup_ino(struct page *page)
347{
348 struct mem_cgroup *memcg;
349 unsigned long ino = 0;
350
351 rcu_read_lock();
352 memcg = READ_ONCE(page->mem_cgroup);
353 while (memcg && !(memcg->css.flags & CSS_ONLINE))
354 memcg = parent_mem_cgroup(memcg);
355 if (memcg)
356 ino = cgroup_ino(memcg->css.cgroup);
357 rcu_read_unlock();
358 return ino;
359}
360
Mel Gormanef8f2322016-07-28 15:46:05 -0700361static struct mem_cgroup_per_node *
362mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700363{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700364 int nid = page_to_nid(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700365
Mel Gormanef8f2322016-07-28 15:46:05 -0700366 return memcg->nodeinfo[nid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700367}
368
Mel Gormanef8f2322016-07-28 15:46:05 -0700369static struct mem_cgroup_tree_per_node *
370soft_limit_tree_node(int nid)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700371{
Mel Gormanef8f2322016-07-28 15:46:05 -0700372 return soft_limit_tree.rb_tree_per_node[nid];
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700373}
374
Mel Gormanef8f2322016-07-28 15:46:05 -0700375static struct mem_cgroup_tree_per_node *
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700376soft_limit_tree_from_page(struct page *page)
377{
378 int nid = page_to_nid(page);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700379
Mel Gormanef8f2322016-07-28 15:46:05 -0700380 return soft_limit_tree.rb_tree_per_node[nid];
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700381}
382
Mel Gormanef8f2322016-07-28 15:46:05 -0700383static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
384 struct mem_cgroup_tree_per_node *mctz,
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800385 unsigned long new_usage_in_excess)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700386{
387 struct rb_node **p = &mctz->rb_root.rb_node;
388 struct rb_node *parent = NULL;
Mel Gormanef8f2322016-07-28 15:46:05 -0700389 struct mem_cgroup_per_node *mz_node;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700390 bool rightmost = true;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700391
392 if (mz->on_tree)
393 return;
394
395 mz->usage_in_excess = new_usage_in_excess;
396 if (!mz->usage_in_excess)
397 return;
398 while (*p) {
399 parent = *p;
Mel Gormanef8f2322016-07-28 15:46:05 -0700400 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700401 tree_node);
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700402 if (mz->usage_in_excess < mz_node->usage_in_excess) {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700403 p = &(*p)->rb_left;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700404 rightmost = false;
405 }
406
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700407 /*
408 * We can't avoid mem cgroups that are over their soft
409 * limit by the same amount
410 */
411 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
412 p = &(*p)->rb_right;
413 }
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700414
415 if (rightmost)
416 mctz->rb_rightmost = &mz->tree_node;
417
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700418 rb_link_node(&mz->tree_node, parent, p);
419 rb_insert_color(&mz->tree_node, &mctz->rb_root);
420 mz->on_tree = true;
421}
422
Mel Gormanef8f2322016-07-28 15:46:05 -0700423static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
424 struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700425{
426 if (!mz->on_tree)
427 return;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700428
429 if (&mz->tree_node == mctz->rb_rightmost)
430 mctz->rb_rightmost = rb_prev(&mz->tree_node);
431
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700432 rb_erase(&mz->tree_node, &mctz->rb_root);
433 mz->on_tree = false;
434}
435
Mel Gormanef8f2322016-07-28 15:46:05 -0700436static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
437 struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700438{
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700439 unsigned long flags;
440
441 spin_lock_irqsave(&mctz->lock, flags);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700442 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700443 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700444}
445
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800446static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
447{
448 unsigned long nr_pages = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -0700449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800450 unsigned long excess = 0;
451
452 if (nr_pages > soft_limit)
453 excess = nr_pages - soft_limit;
454
455 return excess;
456}
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700457
458static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
459{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800460 unsigned long excess;
Mel Gormanef8f2322016-07-28 15:46:05 -0700461 struct mem_cgroup_per_node *mz;
462 struct mem_cgroup_tree_per_node *mctz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700463
Jianyu Zhane2318752014-06-06 14:38:20 -0700464 mctz = soft_limit_tree_from_page(page);
Laurent Dufourbfc72282017-03-09 16:17:06 -0800465 if (!mctz)
466 return;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700467 /*
468 * Necessary to update all ancestors when hierarchy is used.
469 * because their event counter is not touched.
470 */
471 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700472 mz = mem_cgroup_page_nodeinfo(memcg, page);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800473 excess = soft_limit_excess(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700474 /*
475 * We have to update the tree if mz is on RB-tree or
476 * mem is over its softlimit.
477 */
478 if (excess || mz->on_tree) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700479 unsigned long flags;
480
481 spin_lock_irqsave(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700482 /* if on-tree, remove it */
483 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700484 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700485 /*
486 * Insert again. mz->usage_in_excess will be updated.
487 * If excess is 0, no tree ops.
488 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700489 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700490 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700491 }
492 }
493}
494
495static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
496{
Mel Gormanef8f2322016-07-28 15:46:05 -0700497 struct mem_cgroup_tree_per_node *mctz;
498 struct mem_cgroup_per_node *mz;
499 int nid;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700500
Jianyu Zhane2318752014-06-06 14:38:20 -0700501 for_each_node(nid) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700502 mz = mem_cgroup_nodeinfo(memcg, nid);
503 mctz = soft_limit_tree_node(nid);
Laurent Dufourbfc72282017-03-09 16:17:06 -0800504 if (mctz)
505 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700506 }
507}
508
Mel Gormanef8f2322016-07-28 15:46:05 -0700509static struct mem_cgroup_per_node *
510__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700511{
Mel Gormanef8f2322016-07-28 15:46:05 -0700512 struct mem_cgroup_per_node *mz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700513
514retry:
515 mz = NULL;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700516 if (!mctz->rb_rightmost)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700517 goto done; /* Nothing to reclaim from */
518
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -0700519 mz = rb_entry(mctz->rb_rightmost,
520 struct mem_cgroup_per_node, tree_node);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700521 /*
522 * Remove the node now but someone else can add it back,
523 * we will to add it back at the end of reclaim to its correct
524 * position in the tree.
525 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700526 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800527 if (!soft_limit_excess(mz->memcg) ||
Tejun Heoec903c02014-05-13 12:11:01 -0400528 !css_tryget_online(&mz->memcg->css))
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700529 goto retry;
530done:
531 return mz;
532}
533
Mel Gormanef8f2322016-07-28 15:46:05 -0700534static struct mem_cgroup_per_node *
535mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700536{
Mel Gormanef8f2322016-07-28 15:46:05 -0700537 struct mem_cgroup_per_node *mz;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700538
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700539 spin_lock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700540 mz = __mem_cgroup_largest_soft_limit_node(mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700541 spin_unlock_irq(&mctz->lock);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700542 return mz;
543}
544
Johannes Weinerccda7f42017-05-03 14:55:16 -0700545static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
Matthias Kaehlcke04fecbf2017-09-06 16:22:09 -0700546 int event)
Johannes Weinere9f89742011-03-23 16:42:37 -0700547{
Johannes Weinera983b5e2018-01-31 16:16:45 -0800548 return atomic_long_read(&memcg->events[event]);
Johannes Weinere9f89742011-03-23 16:42:37 -0700549}
550
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700551static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700552 struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800553 bool compound, int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800554{
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700555 /*
556 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
557 * counted as CACHE even if it's on ANON LRU.
558 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700559 if (PageAnon(page))
Johannes Weinerc9019e92018-01-31 16:16:37 -0800560 __mod_memcg_state(memcg, MEMCG_RSS, nr_pages);
Johannes Weiner9a4caf12017-05-03 14:52:45 -0700561 else {
Johannes Weinerc9019e92018-01-31 16:16:37 -0800562 __mod_memcg_state(memcg, MEMCG_CACHE, nr_pages);
Johannes Weiner9a4caf12017-05-03 14:52:45 -0700563 if (PageSwapBacked(page))
Johannes Weinerc9019e92018-01-31 16:16:37 -0800564 __mod_memcg_state(memcg, NR_SHMEM, nr_pages);
Johannes Weiner9a4caf12017-05-03 14:52:45 -0700565 }
Balaji Rao55e462b2008-05-01 04:35:12 -0700566
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800567 if (compound) {
568 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
Johannes Weinerc9019e92018-01-31 16:16:37 -0800569 __mod_memcg_state(memcg, MEMCG_RSS_HUGE, nr_pages);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800570 }
David Rientjesb070e652013-05-07 16:18:09 -0700571
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800572 /* pagein of a big page is an event. So, ignore page size */
573 if (nr_pages > 0)
Johannes Weinerc9019e92018-01-31 16:16:37 -0800574 __count_memcg_events(memcg, PGPGIN, 1);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800575 else {
Johannes Weinerc9019e92018-01-31 16:16:37 -0800576 __count_memcg_events(memcg, PGPGOUT, 1);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800577 nr_pages = -nr_pages; /* for event */
578 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800579
Johannes Weinera983b5e2018-01-31 16:16:45 -0800580 __this_cpu_add(memcg->stat_cpu->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800581}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800582
Vladimir Davydov0a6b76d2016-03-17 14:18:42 -0700583unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
584 int nid, unsigned int lru_mask)
Ying Han889976d2011-05-26 16:25:33 -0700585{
Michal Hockob4536f0c82017-01-10 16:58:04 -0800586 struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
Jianyu Zhane2318752014-06-06 14:38:20 -0700587 unsigned long nr = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -0700588 enum lru_list lru;
Ying Han889976d2011-05-26 16:25:33 -0700589
Jianyu Zhane2318752014-06-06 14:38:20 -0700590 VM_BUG_ON((unsigned)nid >= nr_node_ids);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700591
Mel Gormanef8f2322016-07-28 15:46:05 -0700592 for_each_lru(lru) {
593 if (!(BIT(lru) & lru_mask))
594 continue;
Michal Hockob4536f0c82017-01-10 16:58:04 -0800595 nr += mem_cgroup_get_lru_size(lruvec, lru);
Jianyu Zhane2318752014-06-06 14:38:20 -0700596 }
597 return nr;
Ying Han889976d2011-05-26 16:25:33 -0700598}
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700599
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700600static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700601 unsigned int lru_mask)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800602{
Jianyu Zhane2318752014-06-06 14:38:20 -0700603 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700604 int nid;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800605
Lai Jiangshan31aaea42012-12-12 13:51:27 -0800606 for_each_node_state(nid, N_MEMORY)
Jianyu Zhane2318752014-06-06 14:38:20 -0700607 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
608 return nr;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800609}
610
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800611static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
612 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800613{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700614 unsigned long val, next;
615
Johannes Weinera983b5e2018-01-31 16:16:45 -0800616 val = __this_cpu_read(memcg->stat_cpu->nr_page_events);
617 next = __this_cpu_read(memcg->stat_cpu->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700618 /* from time_after() in jiffies.h */
Michal Hocko6a1a8b82017-07-10 15:48:53 -0700619 if ((long)(next - val) < 0) {
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800620 switch (target) {
621 case MEM_CGROUP_TARGET_THRESH:
622 next = val + THRESHOLDS_EVENTS_TARGET;
623 break;
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700624 case MEM_CGROUP_TARGET_SOFTLIMIT:
625 next = val + SOFTLIMIT_EVENTS_TARGET;
626 break;
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800627 case MEM_CGROUP_TARGET_NUMAINFO:
628 next = val + NUMAINFO_EVENTS_TARGET;
629 break;
630 default:
631 break;
632 }
Johannes Weinera983b5e2018-01-31 16:16:45 -0800633 __this_cpu_write(memcg->stat_cpu->targets[target], next);
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800634 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700635 }
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800636 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800637}
638
639/*
640 * Check events in order.
641 *
642 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700643static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800644{
645 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800646 if (unlikely(mem_cgroup_event_ratelimit(memcg,
647 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700648 bool do_softlimit;
Andrew Morton82b3f2a2012-02-03 15:37:14 -0800649 bool do_numainfo __maybe_unused;
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800650
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700651 do_softlimit = mem_cgroup_event_ratelimit(memcg,
652 MEM_CGROUP_TARGET_SOFTLIMIT);
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -0700653#if MAX_NUMNODES > 1
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800654 do_numainfo = mem_cgroup_event_ratelimit(memcg,
655 MEM_CGROUP_TARGET_NUMAINFO);
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -0700656#endif
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800657 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a2013-09-24 15:27:40 -0700658 if (unlikely(do_softlimit))
659 mem_cgroup_update_tree(memcg, page);
Johannes Weinerf53d7ce32012-01-12 17:18:23 -0800660#if MAX_NUMNODES > 1
661 if (unlikely(do_numainfo))
662 atomic_inc(&memcg->numainfo_events);
663#endif
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700664 }
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800665}
666
Balbir Singhcf475ad2008-04-29 01:00:16 -0700667struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800668{
Balbir Singh31a78f22008-09-28 23:09:31 +0100669 /*
670 * mm_update_next_owner() may clear mm->owner to NULL
671 * if it races with swapoff, page migration, etc.
672 * So this can be called with p == NULL.
673 */
674 if (unlikely(!p))
675 return NULL;
676
Tejun Heo073219e2014-02-08 10:36:58 -0500677 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800678}
Michal Hocko33398cf2015-09-08 15:01:02 -0700679EXPORT_SYMBOL(mem_cgroup_from_task);
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800680
Johannes Weinerdf381972014-04-07 15:37:43 -0700681static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800682{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700683 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700684
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800685 rcu_read_lock();
686 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -0700687 /*
688 * Page cache insertions can happen withou an
689 * actual mm context, e.g. during disk probing
690 * on boot, loopback IO, acct() writes etc.
691 */
692 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -0700693 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -0700694 else {
695 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
696 if (unlikely(!memcg))
697 memcg = root_mem_cgroup;
698 }
Tejun Heoec903c02014-05-13 12:11:01 -0400699 } while (!css_tryget_online(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800700 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700701 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800702}
703
Johannes Weiner56600482012-01-12 17:17:59 -0800704/**
705 * mem_cgroup_iter - iterate over memory cgroup hierarchy
706 * @root: hierarchy root
707 * @prev: previously returned memcg, NULL on first invocation
708 * @reclaim: cookie for shared reclaim walks, NULL for full walks
709 *
710 * Returns references to children of the hierarchy below @root, or
711 * @root itself, or %NULL after a full round-trip.
712 *
713 * Caller must pass the return value in @prev on subsequent
714 * invocations for reference counting, or use mem_cgroup_iter_break()
715 * to cancel a hierarchy walk before the round-trip is complete.
716 *
717 * Reclaimers can specify a zone and a priority level in @reclaim to
718 * divide up the memcgs in the hierarchy among all concurrent
719 * reclaimers operating on the same zone and priority.
720 */
Andrew Morton694fbc02013-09-24 15:27:37 -0700721struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -0800722 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -0700723 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -0700724{
Michal Hocko33398cf2015-09-08 15:01:02 -0700725 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800726 struct cgroup_subsys_state *css = NULL;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800727 struct mem_cgroup *memcg = NULL;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800728 struct mem_cgroup *pos = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700729
Andrew Morton694fbc02013-09-24 15:27:37 -0700730 if (mem_cgroup_disabled())
731 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -0800732
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700733 if (!root)
734 root = root_mem_cgroup;
735
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800736 if (prev && !reclaim)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800737 pos = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800738
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800739 if (!root->use_hierarchy && root != root_mem_cgroup) {
740 if (prev)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800741 goto out;
Andrew Morton694fbc02013-09-24 15:27:37 -0700742 return root;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800743 }
744
Michal Hocko542f85f2013-04-29 15:07:15 -0700745 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800746
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800747 if (reclaim) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700748 struct mem_cgroup_per_node *mz;
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800749
Mel Gormanef8f2322016-07-28 15:46:05 -0700750 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800751 iter = &mz->iter[reclaim->priority];
Michal Hocko5f578162013-04-29 15:07:17 -0700752
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800753 if (prev && reclaim->generation != iter->generation)
Michal Hocko542f85f2013-04-29 15:07:15 -0700754 goto out_unlock;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800755
Vladimir Davydov6df38682015-12-29 14:54:10 -0800756 while (1) {
Jason Low4db0c3c2015-04-15 16:14:08 -0700757 pos = READ_ONCE(iter->position);
Vladimir Davydov6df38682015-12-29 14:54:10 -0800758 if (!pos || css_tryget(&pos->css))
759 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800760 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -0800761 * css reference reached zero, so iter->position will
762 * be cleared by ->css_released. However, we should not
763 * rely on this happening soon, because ->css_released
764 * is called from a work queue, and by busy-waiting we
765 * might block it. So we clear iter->position right
766 * away.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800767 */
Vladimir Davydov6df38682015-12-29 14:54:10 -0800768 (void)cmpxchg(&iter->position, pos, NULL);
769 }
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800770 }
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800771
772 if (pos)
773 css = &pos->css;
774
775 for (;;) {
776 css = css_next_descendant_pre(css, &root->css);
777 if (!css) {
778 /*
779 * Reclaimers share the hierarchy walk, and a
780 * new one might jump in right at the end of
781 * the hierarchy - make sure they see at least
782 * one group and restart from the beginning.
783 */
784 if (!prev)
785 continue;
786 break;
787 }
788
789 /*
790 * Verify the css and acquire a reference. The root
791 * is provided by the caller, so we know it's alive
792 * and kicking, and don't take an extra reference.
793 */
794 memcg = mem_cgroup_from_css(css);
795
796 if (css == &root->css)
797 break;
798
Johannes Weiner0b8f73e2016-01-20 15:02:53 -0800799 if (css_tryget(css))
800 break;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800801
802 memcg = NULL;
803 }
804
805 if (reclaim) {
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800806 /*
Vladimir Davydov6df38682015-12-29 14:54:10 -0800807 * The position could have already been updated by a competing
808 * thread, so check that the value hasn't changed since we read
809 * it to avoid reclaiming from the same cgroup twice.
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800810 */
Vladimir Davydov6df38682015-12-29 14:54:10 -0800811 (void)cmpxchg(&iter->position, pos, memcg);
812
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800813 if (pos)
814 css_put(&pos->css);
815
816 if (!memcg)
817 iter->generation++;
818 else if (!prev)
819 reclaim->generation = iter->generation;
820 }
821
Michal Hocko542f85f2013-04-29 15:07:15 -0700822out_unlock:
823 rcu_read_unlock();
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800824out:
Michal Hockoc40046f2013-04-29 15:07:14 -0700825 if (prev && prev != root)
826 css_put(&prev->css);
827
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800828 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700829}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800830
Johannes Weiner56600482012-01-12 17:17:59 -0800831/**
832 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
833 * @root: hierarchy root
834 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
835 */
836void mem_cgroup_iter_break(struct mem_cgroup *root,
837 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800838{
839 if (!root)
840 root = root_mem_cgroup;
841 if (prev && prev != root)
842 css_put(&prev->css);
843}
844
Vladimir Davydov6df38682015-12-29 14:54:10 -0800845static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
846{
847 struct mem_cgroup *memcg = dead_memcg;
848 struct mem_cgroup_reclaim_iter *iter;
Mel Gormanef8f2322016-07-28 15:46:05 -0700849 struct mem_cgroup_per_node *mz;
850 int nid;
Vladimir Davydov6df38682015-12-29 14:54:10 -0800851 int i;
852
853 while ((memcg = parent_mem_cgroup(memcg))) {
854 for_each_node(nid) {
Mel Gormanef8f2322016-07-28 15:46:05 -0700855 mz = mem_cgroup_nodeinfo(memcg, nid);
856 for (i = 0; i <= DEF_PRIORITY; i++) {
857 iter = &mz->iter[i];
858 cmpxchg(&iter->position,
859 dead_memcg, NULL);
Vladimir Davydov6df38682015-12-29 14:54:10 -0800860 }
861 }
862 }
863}
864
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700865/*
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800866 * Iteration constructs for visiting all cgroups (under a tree). If
867 * loops are exited prematurely (break), mem_cgroup_iter_break() must
868 * be used for reference counting.
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700869 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800870#define for_each_mem_cgroup_tree(iter, root) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800871 for (iter = mem_cgroup_iter(root, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800872 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800873 iter = mem_cgroup_iter(root, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700874
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800875#define for_each_mem_cgroup(iter) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800876 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -0800877 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800878 iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700879
Johannes Weiner925b7672012-01-12 17:18:15 -0800880/**
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700881 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
882 * @memcg: hierarchy root
883 * @fn: function to call for each task
884 * @arg: argument passed to @fn
885 *
886 * This function iterates over tasks attached to @memcg or to any of its
887 * descendants and calls @fn for each task. If @fn returns a non-zero
888 * value, the function breaks the iteration loop and returns the value.
889 * Otherwise, it will iterate over all tasks and return 0.
890 *
891 * This function must not be called for the root memory cgroup.
892 */
893int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
894 int (*fn)(struct task_struct *, void *), void *arg)
895{
896 struct mem_cgroup *iter;
897 int ret = 0;
898
899 BUG_ON(memcg == root_mem_cgroup);
900
901 for_each_mem_cgroup_tree(iter, memcg) {
902 struct css_task_iter it;
903 struct task_struct *task;
904
Tejun Heobc2fb7e2017-05-15 09:34:01 -0400905 css_task_iter_start(&iter->css, 0, &it);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -0700906 while (!ret && (task = css_task_iter_next(&it)))
907 ret = fn(task, arg);
908 css_task_iter_end(&it);
909 if (ret) {
910 mem_cgroup_iter_break(memcg, iter);
911 break;
912 }
913 }
914 return ret;
915}
916
917/**
Johannes Weinerdfe0e772014-12-10 15:43:43 -0800918 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
Johannes Weiner925b7672012-01-12 17:18:15 -0800919 * @page: the page
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700920 * @zone: zone of the page
Johannes Weinerdfe0e772014-12-10 15:43:43 -0800921 *
922 * This function is only safe when following the LRU page isolation
923 * and putback protocol: the LRU lock must be held, and the page must
924 * either be PageLRU() or the caller must have isolated/allocated it.
Minchan Kim3f58a822011-03-22 16:32:53 -0700925 */
Mel Gorman599d0c92016-07-28 15:45:31 -0700926struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
Minchan Kim3f58a822011-03-22 16:32:53 -0700927{
Mel Gormanef8f2322016-07-28 15:46:05 -0700928 struct mem_cgroup_per_node *mz;
Johannes Weiner925b7672012-01-12 17:18:15 -0800929 struct mem_cgroup *memcg;
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800930 struct lruvec *lruvec;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800931
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800932 if (mem_cgroup_disabled()) {
Mel Gorman599d0c92016-07-28 15:45:31 -0700933 lruvec = &pgdat->lruvec;
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800934 goto out;
935 }
Christoph Lameterb69408e2008-10-18 20:26:14 -0700936
Johannes Weiner1306a852014-12-10 15:44:52 -0800937 memcg = page->mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -0800938 /*
Johannes Weinerdfe0e772014-12-10 15:43:43 -0800939 * Swapcache readahead pages are added to the LRU - and
Johannes Weiner29833312014-12-10 15:44:02 -0800940 * possibly migrated - before they are charged.
Hugh Dickins75121022012-03-05 14:59:18 -0800941 */
Johannes Weiner29833312014-12-10 15:44:02 -0800942 if (!memcg)
943 memcg = root_mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -0800944
Mel Gormanef8f2322016-07-28 15:46:05 -0700945 mz = mem_cgroup_page_nodeinfo(memcg, page);
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800946 lruvec = &mz->lruvec;
947out:
948 /*
949 * Since a node can be onlined after the mem_cgroup was created,
950 * we have to be prepared to initialize lruvec->zone here;
951 * and if offlined then reonlined, we need to reinitialize it.
952 */
Mel Gorman599d0c92016-07-28 15:45:31 -0700953 if (unlikely(lruvec->pgdat != pgdat))
954 lruvec->pgdat = pgdat;
Hugh Dickinsbea8c152012-11-16 14:14:54 -0800955 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -0800956}
957
958/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700959 * mem_cgroup_update_lru_size - account for adding or removing an lru page
960 * @lruvec: mem_cgroup per zone lru vector
961 * @lru: index of lru list the page is sitting on
Michal Hockob4536f0c82017-01-10 16:58:04 -0800962 * @zid: zone id of the accounted pages
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700963 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -0800964 *
Hugh Dickinsca707232016-05-19 17:12:35 -0700965 * This function must be called under lru_lock, just before a page is added
966 * to or just after a page is removed from an lru list (that ordering being
967 * so as to allow it to check that lru_size 0 is consistent with list_empty).
Johannes Weiner925b7672012-01-12 17:18:15 -0800968 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700969void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
Michal Hockob4536f0c82017-01-10 16:58:04 -0800970 int zid, int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -0800971{
Mel Gormanef8f2322016-07-28 15:46:05 -0700972 struct mem_cgroup_per_node *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700973 unsigned long *lru_size;
Hugh Dickinsca707232016-05-19 17:12:35 -0700974 long size;
Johannes Weiner925b7672012-01-12 17:18:15 -0800975
976 if (mem_cgroup_disabled())
977 return;
978
Mel Gormanef8f2322016-07-28 15:46:05 -0700979 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
Michal Hockob4536f0c82017-01-10 16:58:04 -0800980 lru_size = &mz->lru_zone_size[zid][lru];
Hugh Dickinsca707232016-05-19 17:12:35 -0700981
982 if (nr_pages < 0)
983 *lru_size += nr_pages;
984
985 size = *lru_size;
Michal Hockob4536f0c82017-01-10 16:58:04 -0800986 if (WARN_ONCE(size < 0,
987 "%s(%p, %d, %d): lru_size %ld\n",
988 __func__, lruvec, lru, nr_pages, size)) {
Hugh Dickinsca707232016-05-19 17:12:35 -0700989 VM_BUG_ON(1);
990 *lru_size = 0;
991 }
992
993 if (nr_pages > 0)
994 *lru_size += nr_pages;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800995}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -0800996
Johannes Weiner2314b422014-12-10 15:44:33 -0800997bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
Johannes Weinerc3ac9a82012-05-29 15:06:25 -0700998{
Johannes Weiner2314b422014-12-10 15:44:33 -0800999 struct mem_cgroup *task_memcg;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001000 struct task_struct *p;
David Rientjesffbdccf2013-07-03 15:01:23 -07001001 bool ret;
David Rientjes4c4a2212008-02-07 00:14:06 -08001002
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001003 p = find_lock_task_mm(task);
David Rientjesde077d22012-01-12 17:18:52 -08001004 if (p) {
Johannes Weiner2314b422014-12-10 15:44:33 -08001005 task_memcg = get_mem_cgroup_from_mm(p->mm);
David Rientjesde077d22012-01-12 17:18:52 -08001006 task_unlock(p);
1007 } else {
1008 /*
1009 * All threads may have already detached their mm's, but the oom
1010 * killer still needs to detect if they have already been oom
1011 * killed to prevent needlessly killing additional tasks.
1012 */
David Rientjesffbdccf2013-07-03 15:01:23 -07001013 rcu_read_lock();
Johannes Weiner2314b422014-12-10 15:44:33 -08001014 task_memcg = mem_cgroup_from_task(task);
1015 css_get(&task_memcg->css);
David Rientjesffbdccf2013-07-03 15:01:23 -07001016 rcu_read_unlock();
David Rientjesde077d22012-01-12 17:18:52 -08001017 }
Johannes Weiner2314b422014-12-10 15:44:33 -08001018 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1019 css_put(&task_memcg->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001020 return ret;
1021}
1022
Johannes Weiner19942822011-02-01 15:52:43 -08001023/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001024 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001025 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001026 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001027 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001028 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001029 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001030static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001031{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001032 unsigned long margin = 0;
1033 unsigned long count;
1034 unsigned long limit;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001035
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001036 count = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -07001037 limit = READ_ONCE(memcg->memory.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001038 if (count < limit)
1039 margin = limit - count;
1040
Johannes Weiner7941d212016-01-14 15:21:23 -08001041 if (do_memsw_account()) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001042 count = page_counter_read(&memcg->memsw);
Jason Low4db0c3c2015-04-15 16:14:08 -07001043 limit = READ_ONCE(memcg->memsw.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001044 if (count <= limit)
1045 margin = min(margin, limit - count);
Li RongQingcbedbac2016-05-27 14:27:43 -07001046 else
1047 margin = 0;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001048 }
1049
1050 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001051}
1052
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001053/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001054 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001055 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001056 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1057 * moving cgroups. This is for waiting at high-memory pressure
1058 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001059 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001060static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001061{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001062 struct mem_cgroup *from;
1063 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001064 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001065 /*
1066 * Unlike task_move routines, we access mc.to, mc.from not under
1067 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1068 */
1069 spin_lock(&mc.lock);
1070 from = mc.from;
1071 to = mc.to;
1072 if (!from)
1073 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001074
Johannes Weiner2314b422014-12-10 15:44:33 -08001075 ret = mem_cgroup_is_descendant(from, memcg) ||
1076 mem_cgroup_is_descendant(to, memcg);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001077unlock:
1078 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001079 return ret;
1080}
1081
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001082static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001083{
1084 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001085 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001086 DEFINE_WAIT(wait);
1087 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1088 /* moving charge context might have finished. */
1089 if (mc.moving_task)
1090 schedule();
1091 finish_wait(&mc.waitq, &wait);
1092 return true;
1093 }
1094 }
1095 return false;
1096}
1097
Johannes Weiner71cd3112017-05-03 14:55:13 -07001098unsigned int memcg1_stats[] = {
1099 MEMCG_CACHE,
1100 MEMCG_RSS,
1101 MEMCG_RSS_HUGE,
1102 NR_SHMEM,
1103 NR_FILE_MAPPED,
1104 NR_FILE_DIRTY,
1105 NR_WRITEBACK,
1106 MEMCG_SWAP,
1107};
1108
1109static const char *const memcg1_stat_names[] = {
1110 "cache",
1111 "rss",
1112 "rss_huge",
1113 "shmem",
1114 "mapped_file",
1115 "dirty",
1116 "writeback",
1117 "swap",
1118};
1119
Sha Zhengju58cf1882013-02-22 16:32:05 -08001120#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001121/**
Sha Zhengju58cf1882013-02-22 16:32:05 -08001122 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001123 * @memcg: The memory cgroup that went over limit
1124 * @p: Task that is going to be killed
1125 *
1126 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1127 * enabled
1128 */
1129void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1130{
Sha Zhengju58cf1882013-02-22 16:32:05 -08001131 struct mem_cgroup *iter;
1132 unsigned int i;
Balbir Singhe2224322009-04-02 16:57:39 -07001133
Balbir Singhe2224322009-04-02 16:57:39 -07001134 rcu_read_lock();
1135
Balasubramani Vivekanandan2415b9f2015-04-14 15:48:18 -07001136 if (p) {
1137 pr_info("Task in ");
1138 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1139 pr_cont(" killed as a result of limit of ");
1140 } else {
1141 pr_info("Memory limit reached of cgroup ");
1142 }
1143
Tejun Heoe61734c2014-02-12 09:29:50 -05001144 pr_cont_cgroup_path(memcg->css.cgroup);
Greg Thelen0346dad2015-01-26 12:58:38 -08001145 pr_cont("\n");
Balbir Singhe2224322009-04-02 16:57:39 -07001146
Balbir Singhe2224322009-04-02 16:57:39 -07001147 rcu_read_unlock();
1148
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001149 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1150 K((u64)page_counter_read(&memcg->memory)),
1151 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1152 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1153 K((u64)page_counter_read(&memcg->memsw)),
1154 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1155 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1156 K((u64)page_counter_read(&memcg->kmem)),
1157 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001158
1159 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heoe61734c2014-02-12 09:29:50 -05001160 pr_info("Memory cgroup stats for ");
1161 pr_cont_cgroup_path(iter->css.cgroup);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001162 pr_cont(":");
1163
Johannes Weiner71cd3112017-05-03 14:55:13 -07001164 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
1165 if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
Sha Zhengju58cf1882013-02-22 16:32:05 -08001166 continue;
Johannes Weiner71cd3112017-05-03 14:55:13 -07001167 pr_cont(" %s:%luKB", memcg1_stat_names[i],
Johannes Weinerccda7f42017-05-03 14:55:16 -07001168 K(memcg_page_state(iter, memcg1_stats[i])));
Sha Zhengju58cf1882013-02-22 16:32:05 -08001169 }
1170
1171 for (i = 0; i < NR_LRU_LISTS; i++)
1172 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1173 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1174
1175 pr_cont("\n");
1176 }
Balbir Singhe2224322009-04-02 16:57:39 -07001177}
1178
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001179/*
1180 * This function returns the number of memcg under hierarchy tree. Returns
1181 * 1(self count) if no children.
1182 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001183static int mem_cgroup_count_children(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001184{
1185 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001186 struct mem_cgroup *iter;
1187
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001188 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001189 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001190 return num;
1191}
1192
Balbir Singh6d61ef42009-01-07 18:08:06 -08001193/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001194 * Return the memory (and swap, if configured) limit for a memcg.
1195 */
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001196unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001197{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001198 unsigned long limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001199
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001200 limit = memcg->memory.limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001201 if (mem_cgroup_swappiness(memcg)) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001202 unsigned long memsw_limit;
Vladimir Davydov37e84352016-01-20 15:02:56 -08001203 unsigned long swap_limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001204
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001205 memsw_limit = memcg->memsw.limit;
Vladimir Davydov37e84352016-01-20 15:02:56 -08001206 swap_limit = memcg->swap.limit;
1207 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1208 limit = min(limit + swap_limit, memsw_limit);
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001209 }
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001210 return limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001211}
1212
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07001213static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
David Rientjes19965462012-12-11 16:00:26 -08001214 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001215{
David Rientjes6e0fc462015-09-08 15:00:36 -07001216 struct oom_control oc = {
1217 .zonelist = NULL,
1218 .nodemask = NULL,
Vladimir Davydov2a966b72016-07-26 15:22:33 -07001219 .memcg = memcg,
David Rientjes6e0fc462015-09-08 15:00:36 -07001220 .gfp_mask = gfp_mask,
1221 .order = order,
David Rientjes6e0fc462015-09-08 15:00:36 -07001222 };
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001223 bool ret;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001224
Johannes Weinerdc564012015-06-24 16:57:19 -07001225 mutex_lock(&oom_lock);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001226 ret = out_of_memory(&oc);
Johannes Weinerdc564012015-06-24 16:57:19 -07001227 mutex_unlock(&oom_lock);
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001228 return ret;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001229}
1230
Michele Curtiae6e71d2014-12-12 16:56:35 -08001231#if MAX_NUMNODES > 1
1232
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001233/**
1234 * test_mem_cgroup_node_reclaimable
Wanpeng Lidad75572012-06-20 12:53:01 -07001235 * @memcg: the target memcg
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001236 * @nid: the node ID to be checked.
1237 * @noswap : specify true here if the user wants flle only information.
1238 *
1239 * This function returns whether the specified memcg contains any
1240 * reclaimable pages on a node. Returns true if there are any reclaimable
1241 * pages in the node.
1242 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001243static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001244 int nid, bool noswap)
1245{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001246 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001247 return true;
1248 if (noswap || !total_swap_pages)
1249 return false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001250 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001251 return true;
1252 return false;
1253
1254}
Ying Han889976d2011-05-26 16:25:33 -07001255
1256/*
1257 * Always updating the nodemask is not very good - even if we have an empty
1258 * list or the wrong list here, we can start from some node and traverse all
1259 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1260 *
1261 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001262static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001263{
1264 int nid;
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001265 /*
1266 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1267 * pagein/pageout changes since the last update.
1268 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001269 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001270 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001271 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001272 return;
1273
Ying Han889976d2011-05-26 16:25:33 -07001274 /* make a nodemask where this memcg uses memory from */
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001275 memcg->scan_nodes = node_states[N_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001276
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001277 for_each_node_mask(nid, node_states[N_MEMORY]) {
Ying Han889976d2011-05-26 16:25:33 -07001278
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001279 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1280 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001281 }
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001282
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001283 atomic_set(&memcg->numainfo_events, 0);
1284 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001285}
1286
1287/*
1288 * Selecting a node where we start reclaim from. Because what we need is just
1289 * reducing usage counter, start from anywhere is O,K. Considering
1290 * memory reclaim from current node, there are pros. and cons.
1291 *
1292 * Freeing memory from current node means freeing memory from a node which
1293 * we'll use or we've used. So, it may make LRU bad. And if several threads
1294 * hit limits, it will see a contention on a node. But freeing from remote
1295 * node means more costs for memory reclaim because of memory latency.
1296 *
1297 * Now, we use round-robin. Better algorithm is welcomed.
1298 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001299int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001300{
1301 int node;
1302
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001303 mem_cgroup_may_update_nodemask(memcg);
1304 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001305
Andrew Morton0edaf862016-05-19 17:10:58 -07001306 node = next_node_in(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001307 /*
Michal Hockofda3d692016-05-19 17:11:34 -07001308 * mem_cgroup_may_update_nodemask might have seen no reclaimmable pages
1309 * last time it really checked all the LRUs due to rate limiting.
1310 * Fallback to the current node in that case for simplicity.
Ying Han889976d2011-05-26 16:25:33 -07001311 */
1312 if (unlikely(node == MAX_NUMNODES))
1313 node = numa_node_id();
1314
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001315 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001316 return node;
1317}
Ying Han889976d2011-05-26 16:25:33 -07001318#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001319int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001320{
1321 return 0;
1322}
1323#endif
1324
Andrew Morton0608f432013-09-24 15:27:41 -07001325static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
Mel Gormanef8f2322016-07-28 15:46:05 -07001326 pg_data_t *pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07001327 gfp_t gfp_mask,
1328 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001329{
Andrew Morton0608f432013-09-24 15:27:41 -07001330 struct mem_cgroup *victim = NULL;
1331 int total = 0;
1332 int loop = 0;
1333 unsigned long excess;
1334 unsigned long nr_scanned;
1335 struct mem_cgroup_reclaim_cookie reclaim = {
Mel Gormanef8f2322016-07-28 15:46:05 -07001336 .pgdat = pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07001337 .priority = 0,
1338 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001339
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001340 excess = soft_limit_excess(root_memcg);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001341
Andrew Morton0608f432013-09-24 15:27:41 -07001342 while (1) {
1343 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1344 if (!victim) {
1345 loop++;
1346 if (loop >= 2) {
1347 /*
1348 * If we have not been able to reclaim
1349 * anything, it might because there are
1350 * no reclaimable pages under this hierarchy
1351 */
1352 if (!total)
1353 break;
1354 /*
1355 * We want to do more targeted reclaim.
1356 * excess >> 2 is not to excessive so as to
1357 * reclaim too much, nor too less that we keep
1358 * coming back to reclaim from this cgroup
1359 */
1360 if (total >= (excess >> 2) ||
1361 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1362 break;
1363 }
1364 continue;
1365 }
Mel Gormana9dd0a82016-07-28 15:46:02 -07001366 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
Mel Gormanef8f2322016-07-28 15:46:05 -07001367 pgdat, &nr_scanned);
Andrew Morton0608f432013-09-24 15:27:41 -07001368 *total_scanned += nr_scanned;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001369 if (!soft_limit_excess(root_memcg))
Andrew Morton0608f432013-09-24 15:27:41 -07001370 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001371 }
Andrew Morton0608f432013-09-24 15:27:41 -07001372 mem_cgroup_iter_break(root_memcg, victim);
1373 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001374}
1375
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001376#ifdef CONFIG_LOCKDEP
1377static struct lockdep_map memcg_oom_lock_dep_map = {
1378 .name = "memcg_oom_lock",
1379};
1380#endif
1381
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001382static DEFINE_SPINLOCK(memcg_oom_lock);
1383
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001384/*
1385 * Check OOM-Killer is already running under our hierarchy.
1386 * If someone is running, return false.
1387 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001388static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001389{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001390 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001391
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001392 spin_lock(&memcg_oom_lock);
1393
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001394 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001395 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001396 /*
1397 * this subtree of our hierarchy is already locked
1398 * so we cannot give a lock.
1399 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001400 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001401 mem_cgroup_iter_break(memcg, iter);
1402 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001403 } else
1404 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001405 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001406
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001407 if (failed) {
1408 /*
1409 * OK, we failed to lock the whole subtree so we have
1410 * to clean up what we set up to the failing subtree
1411 */
1412 for_each_mem_cgroup_tree(iter, memcg) {
1413 if (iter == failed) {
1414 mem_cgroup_iter_break(memcg, iter);
1415 break;
1416 }
1417 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001418 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001419 } else
1420 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001421
1422 spin_unlock(&memcg_oom_lock);
1423
1424 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001425}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001426
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001427static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001428{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001429 struct mem_cgroup *iter;
1430
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001431 spin_lock(&memcg_oom_lock);
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001432 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001433 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001434 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001435 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001436}
1437
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001438static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001439{
1440 struct mem_cgroup *iter;
1441
Tejun Heoc2b42d32015-06-24 16:58:23 -07001442 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001443 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001444 iter->under_oom++;
1445 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001446}
1447
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001448static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001449{
1450 struct mem_cgroup *iter;
1451
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001452 /*
1453 * When a new child is created while the hierarchy is under oom,
Tejun Heoc2b42d32015-06-24 16:58:23 -07001454 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001455 */
Tejun Heoc2b42d32015-06-24 16:58:23 -07001456 spin_lock(&memcg_oom_lock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001457 for_each_mem_cgroup_tree(iter, memcg)
Tejun Heoc2b42d32015-06-24 16:58:23 -07001458 if (iter->under_oom > 0)
1459 iter->under_oom--;
1460 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001461}
1462
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001463static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1464
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001465struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001466 struct mem_cgroup *memcg;
Ingo Molnarac6424b2017-06-20 12:06:13 +02001467 wait_queue_entry_t wait;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001468};
1469
Ingo Molnarac6424b2017-06-20 12:06:13 +02001470static int memcg_oom_wake_function(wait_queue_entry_t *wait,
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001471 unsigned mode, int sync, void *arg)
1472{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001473 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1474 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001475 struct oom_wait_info *oom_wait_info;
1476
1477 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001478 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001479
Johannes Weiner2314b422014-12-10 15:44:33 -08001480 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1481 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001482 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001483 return autoremove_wake_function(wait, mode, sync, arg);
1484}
1485
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001486static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001487{
Tejun Heoc2b42d32015-06-24 16:58:23 -07001488 /*
1489 * For the following lockless ->under_oom test, the only required
1490 * guarantee is that it must see the state asserted by an OOM when
1491 * this function is called as a result of userland actions
1492 * triggered by the notification of the OOM. This is trivially
1493 * achieved by invoking mem_cgroup_mark_under_oom() before
1494 * triggering notification.
1495 */
1496 if (memcg && memcg->under_oom)
Tejun Heof4b90b702015-06-24 16:58:21 -07001497 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001498}
1499
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001500static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001501{
Andrew Mortond0db7af2016-06-08 15:33:47 -07001502 if (!current->memcg_may_oom)
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001503 return;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001504 /*
Johannes Weiner49426422013-10-16 13:46:59 -07001505 * We are in the middle of the charge context here, so we
1506 * don't want to block when potentially sitting on a callstack
1507 * that holds all kinds of filesystem and mm locks.
1508 *
1509 * Also, the caller may handle a failed allocation gracefully
1510 * (like optional page cache readahead) and so an OOM killer
1511 * invocation might not even be necessary.
1512 *
1513 * That's why we don't do anything here except remember the
1514 * OOM context and then deal with it at the end of the page
1515 * fault when the stack is unwound, the locks are released,
1516 * and when we know whether the fault was overall successful.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001517 */
Johannes Weiner49426422013-10-16 13:46:59 -07001518 css_get(&memcg->css);
Tejun Heo626ebc42015-11-05 18:46:09 -08001519 current->memcg_in_oom = memcg;
1520 current->memcg_oom_gfp_mask = mask;
1521 current->memcg_oom_order = order;
Johannes Weiner49426422013-10-16 13:46:59 -07001522}
1523
1524/**
1525 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1526 * @handle: actually kill/wait or just clean up the OOM state
1527 *
1528 * This has to be called at the end of a page fault if the memcg OOM
1529 * handler was enabled.
1530 *
1531 * Memcg supports userspace OOM handling where failed allocations must
1532 * sleep on a waitqueue until the userspace task resolves the
1533 * situation. Sleeping directly in the charge context with all kinds
1534 * of locks held is not a good idea, instead we remember an OOM state
1535 * in the task and mem_cgroup_oom_synchronize() has to be called at
1536 * the end of the page fault to complete the OOM handling.
1537 *
1538 * Returns %true if an ongoing memcg OOM situation was detected and
1539 * completed, %false otherwise.
1540 */
1541bool mem_cgroup_oom_synchronize(bool handle)
1542{
Tejun Heo626ebc42015-11-05 18:46:09 -08001543 struct mem_cgroup *memcg = current->memcg_in_oom;
Johannes Weiner49426422013-10-16 13:46:59 -07001544 struct oom_wait_info owait;
1545 bool locked;
1546
1547 /* OOM is global, do not handle */
1548 if (!memcg)
1549 return false;
1550
Vladimir Davydov7c5f64f2016-10-07 16:57:23 -07001551 if (!handle)
Johannes Weiner49426422013-10-16 13:46:59 -07001552 goto cleanup;
1553
1554 owait.memcg = memcg;
1555 owait.wait.flags = 0;
1556 owait.wait.func = memcg_oom_wake_function;
1557 owait.wait.private = current;
Ingo Molnar2055da92017-06-20 12:06:46 +02001558 INIT_LIST_HEAD(&owait.wait.entry);
Johannes Weiner49426422013-10-16 13:46:59 -07001559
1560 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001561 mem_cgroup_mark_under_oom(memcg);
1562
1563 locked = mem_cgroup_oom_trylock(memcg);
1564
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001565 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001566 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001567
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001568 if (locked && !memcg->oom_kill_disable) {
1569 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07001570 finish_wait(&memcg_oom_waitq, &owait.wait);
Tejun Heo626ebc42015-11-05 18:46:09 -08001571 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1572 current->memcg_oom_order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001573 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001574 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07001575 mem_cgroup_unmark_under_oom(memcg);
1576 finish_wait(&memcg_oom_waitq, &owait.wait);
1577 }
1578
1579 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001580 mem_cgroup_oom_unlock(memcg);
1581 /*
1582 * There is no guarantee that an OOM-lock contender
1583 * sees the wakeups triggered by the OOM kill
1584 * uncharges. Wake any sleepers explicitely.
1585 */
1586 memcg_oom_recover(memcg);
1587 }
Johannes Weiner49426422013-10-16 13:46:59 -07001588cleanup:
Tejun Heo626ebc42015-11-05 18:46:09 -08001589 current->memcg_in_oom = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001590 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001591 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001592}
1593
Johannes Weinerd7365e72014-10-29 14:50:48 -07001594/**
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001595 * lock_page_memcg - lock a page->mem_cgroup binding
1596 * @page: the page
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001597 *
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001598 * This function protects unlocked LRU pages from being moved to
Johannes Weiner739f79f2017-08-18 15:15:48 -07001599 * another cgroup.
1600 *
1601 * It ensures lifetime of the returned memcg. Caller is responsible
1602 * for the lifetime of the page; __unlock_page_memcg() is available
1603 * when @page might get freed inside the locked section.
Balbir Singhd69b0422009-06-17 16:26:34 -07001604 */
Johannes Weiner739f79f2017-08-18 15:15:48 -07001605struct mem_cgroup *lock_page_memcg(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001606{
1607 struct mem_cgroup *memcg;
Johannes Weiner6de22612015-02-11 15:25:01 -08001608 unsigned long flags;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001609
Johannes Weiner6de22612015-02-11 15:25:01 -08001610 /*
1611 * The RCU lock is held throughout the transaction. The fast
1612 * path can get away without acquiring the memcg->move_lock
1613 * because page moving starts with an RCU grace period.
Johannes Weiner739f79f2017-08-18 15:15:48 -07001614 *
1615 * The RCU lock also protects the memcg from being freed when
1616 * the page state that is going to change is the only thing
1617 * preventing the page itself from being freed. E.g. writeback
1618 * doesn't hold a page reference and relies on PG_writeback to
1619 * keep off truncation, migration and so forth.
1620 */
Johannes Weinerd7365e72014-10-29 14:50:48 -07001621 rcu_read_lock();
1622
1623 if (mem_cgroup_disabled())
Johannes Weiner739f79f2017-08-18 15:15:48 -07001624 return NULL;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001625again:
Johannes Weiner1306a852014-12-10 15:44:52 -08001626 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08001627 if (unlikely(!memcg))
Johannes Weiner739f79f2017-08-18 15:15:48 -07001628 return NULL;
Johannes Weinerd7365e72014-10-29 14:50:48 -07001629
Qiang Huangbdcbb652014-06-04 16:08:21 -07001630 if (atomic_read(&memcg->moving_account) <= 0)
Johannes Weiner739f79f2017-08-18 15:15:48 -07001631 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001632
Johannes Weiner6de22612015-02-11 15:25:01 -08001633 spin_lock_irqsave(&memcg->move_lock, flags);
Johannes Weiner1306a852014-12-10 15:44:52 -08001634 if (memcg != page->mem_cgroup) {
Johannes Weiner6de22612015-02-11 15:25:01 -08001635 spin_unlock_irqrestore(&memcg->move_lock, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001636 goto again;
1637 }
Johannes Weiner6de22612015-02-11 15:25:01 -08001638
1639 /*
1640 * When charge migration first begins, we can have locked and
1641 * unlocked page stat updates happening concurrently. Track
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001642 * the task who has the lock for unlock_page_memcg().
Johannes Weiner6de22612015-02-11 15:25:01 -08001643 */
1644 memcg->move_lock_task = current;
1645 memcg->move_lock_flags = flags;
Johannes Weinerd7365e72014-10-29 14:50:48 -07001646
Johannes Weiner739f79f2017-08-18 15:15:48 -07001647 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001648}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001649EXPORT_SYMBOL(lock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001650
Johannes Weinerd7365e72014-10-29 14:50:48 -07001651/**
Johannes Weiner739f79f2017-08-18 15:15:48 -07001652 * __unlock_page_memcg - unlock and unpin a memcg
1653 * @memcg: the memcg
1654 *
1655 * Unlock and unpin a memcg returned by lock_page_memcg().
Johannes Weinerd7365e72014-10-29 14:50:48 -07001656 */
Johannes Weiner739f79f2017-08-18 15:15:48 -07001657void __unlock_page_memcg(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001658{
Johannes Weiner6de22612015-02-11 15:25:01 -08001659 if (memcg && memcg->move_lock_task == current) {
1660 unsigned long flags = memcg->move_lock_flags;
1661
1662 memcg->move_lock_task = NULL;
1663 memcg->move_lock_flags = 0;
1664
1665 spin_unlock_irqrestore(&memcg->move_lock, flags);
1666 }
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001667
Johannes Weinerd7365e72014-10-29 14:50:48 -07001668 rcu_read_unlock();
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001669}
Johannes Weiner739f79f2017-08-18 15:15:48 -07001670
1671/**
1672 * unlock_page_memcg - unlock a page->mem_cgroup binding
1673 * @page: the page
1674 */
1675void unlock_page_memcg(struct page *page)
1676{
1677 __unlock_page_memcg(page->mem_cgroup);
1678}
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07001679EXPORT_SYMBOL(unlock_page_memcg);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001680
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001681struct memcg_stock_pcp {
1682 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001683 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001684 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001685 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07001686#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001687};
1688static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02001689static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001690
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001691/**
1692 * consume_stock: Try to consume stocked charge on this cpu.
1693 * @memcg: memcg to consume from.
1694 * @nr_pages: how many pages to charge.
1695 *
1696 * The charges will only happen if @memcg matches the current cpu's memcg
1697 * stock, and at least @nr_pages are available in that stock. Failure to
1698 * service an allocation will refill the stock.
1699 *
1700 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001701 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001702static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001703{
1704 struct memcg_stock_pcp *stock;
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001705 unsigned long flags;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001706 bool ret = false;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001707
Johannes Weinera983b5e2018-01-31 16:16:45 -08001708 if (nr_pages > MEMCG_CHARGE_BATCH)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001709 return ret;
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001710
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001711 local_irq_save(flags);
1712
1713 stock = this_cpu_ptr(&memcg_stock);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001714 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
Suleiman Souhlala0956d52012-12-18 14:21:36 -08001715 stock->nr_pages -= nr_pages;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001716 ret = true;
1717 }
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001718
1719 local_irq_restore(flags);
1720
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001721 return ret;
1722}
1723
1724/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001725 * Returns stocks cached in percpu and reset cached information.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001726 */
1727static void drain_stock(struct memcg_stock_pcp *stock)
1728{
1729 struct mem_cgroup *old = stock->cached;
1730
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001731 if (stock->nr_pages) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001732 page_counter_uncharge(&old->memory, stock->nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08001733 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001734 page_counter_uncharge(&old->memsw, stock->nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08001735 css_put_many(&old->css, stock->nr_pages);
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001736 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001737 }
1738 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001739}
1740
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001741static void drain_local_stock(struct work_struct *dummy)
1742{
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001743 struct memcg_stock_pcp *stock;
1744 unsigned long flags;
1745
Michal Hocko72f01842017-10-03 16:14:53 -07001746 /*
1747 * The only protection from memory hotplug vs. drain_stock races is
1748 * that we always operate on local CPU stock here with IRQ disabled
1749 */
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001750 local_irq_save(flags);
1751
1752 stock = this_cpu_ptr(&memcg_stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001753 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001754 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001755
1756 local_irq_restore(flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001757}
1758
1759/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001760 * Cache charges(val) to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01001761 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001762 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001763static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001764{
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001765 struct memcg_stock_pcp *stock;
1766 unsigned long flags;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001767
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001768 local_irq_save(flags);
1769
1770 stock = this_cpu_ptr(&memcg_stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001771 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001772 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001773 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001774 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07001775 stock->nr_pages += nr_pages;
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001776
Johannes Weinera983b5e2018-01-31 16:16:45 -08001777 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
Roman Gushchin475d0482017-09-08 16:13:09 -07001778 drain_stock(stock);
1779
Johannes Weinerdb2ba40c2016-09-19 14:44:36 -07001780 local_irq_restore(flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001781}
1782
1783/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001784 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001785 * of the hierarchy under it.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001786 */
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001787static void drain_all_stock(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001788{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001789 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07001790
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001791 /* If someone's already draining, avoid adding running more workers. */
1792 if (!mutex_trylock(&percpu_charge_mutex))
1793 return;
Michal Hocko72f01842017-10-03 16:14:53 -07001794 /*
1795 * Notify other cpus that system-wide "drain" is running
1796 * We do not care about races with the cpu hotplug because cpu down
1797 * as well as workers from this path always operate on the local
1798 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1799 */
Johannes Weiner5af12d02011-08-25 15:59:07 -07001800 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001801 for_each_online_cpu(cpu) {
1802 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001803 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001804
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001805 memcg = stock->cached;
Michal Hocko72f01842017-10-03 16:14:53 -07001806 if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css))
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07001807 continue;
Michal Hocko72f01842017-10-03 16:14:53 -07001808 if (!mem_cgroup_is_descendant(memcg, root_memcg)) {
1809 css_put(&memcg->css);
Michal Hocko3e920412011-07-26 16:08:29 -07001810 continue;
Michal Hocko72f01842017-10-03 16:14:53 -07001811 }
Michal Hockod1a05b62011-07-26 16:08:27 -07001812 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1813 if (cpu == curcpu)
1814 drain_local_stock(&stock->work);
1815 else
1816 schedule_work_on(cpu, &stock->work);
1817 }
Michal Hocko72f01842017-10-03 16:14:53 -07001818 css_put(&memcg->css);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001819 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07001820 put_cpu();
Michal Hocko9f50fad2011-08-09 11:56:26 +02001821 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001822}
1823
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01001824static int memcg_hotplug_cpu_dead(unsigned int cpu)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001825{
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001826 struct memcg_stock_pcp *stock;
Johannes Weinera983b5e2018-01-31 16:16:45 -08001827 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001828
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001829 stock = &per_cpu(memcg_stock, cpu);
1830 drain_stock(stock);
Johannes Weinera983b5e2018-01-31 16:16:45 -08001831
1832 for_each_mem_cgroup(memcg) {
1833 int i;
1834
1835 for (i = 0; i < MEMCG_NR_STAT; i++) {
1836 int nid;
1837 long x;
1838
1839 x = this_cpu_xchg(memcg->stat_cpu->count[i], 0);
1840 if (x)
1841 atomic_long_add(x, &memcg->stat[i]);
1842
1843 if (i >= NR_VM_NODE_STAT_ITEMS)
1844 continue;
1845
1846 for_each_node(nid) {
1847 struct mem_cgroup_per_node *pn;
1848
1849 pn = mem_cgroup_nodeinfo(memcg, nid);
1850 x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
1851 if (x)
1852 atomic_long_add(x, &pn->lruvec_stat[i]);
1853 }
1854 }
1855
1856 for (i = 0; i < MEMCG_NR_EVENTS; i++) {
1857 long x;
1858
1859 x = this_cpu_xchg(memcg->stat_cpu->events[i], 0);
1860 if (x)
1861 atomic_long_add(x, &memcg->events[i]);
1862 }
1863 }
1864
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01001865 return 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001866}
1867
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001868static void reclaim_high(struct mem_cgroup *memcg,
1869 unsigned int nr_pages,
1870 gfp_t gfp_mask)
1871{
1872 do {
1873 if (page_counter_read(&memcg->memory) <= memcg->high)
1874 continue;
Johannes Weiner31176c72017-05-03 14:55:07 -07001875 mem_cgroup_event(memcg, MEMCG_HIGH);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001876 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1877 } while ((memcg = parent_mem_cgroup(memcg)));
1878}
1879
1880static void high_work_func(struct work_struct *work)
1881{
1882 struct mem_cgroup *memcg;
1883
1884 memcg = container_of(work, struct mem_cgroup, high_work);
Johannes Weinera983b5e2018-01-31 16:16:45 -08001885 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001886}
1887
Tejun Heob23afb92015-11-05 18:46:11 -08001888/*
1889 * Scheduled by try_charge() to be executed from the userland return path
1890 * and reclaims memory over the high limit.
1891 */
1892void mem_cgroup_handle_over_high(void)
1893{
1894 unsigned int nr_pages = current->memcg_nr_pages_over_high;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001895 struct mem_cgroup *memcg;
Tejun Heob23afb92015-11-05 18:46:11 -08001896
1897 if (likely(!nr_pages))
1898 return;
1899
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08001900 memcg = get_mem_cgroup_from_mm(current->mm);
1901 reclaim_high(memcg, nr_pages, GFP_KERNEL);
Tejun Heob23afb92015-11-05 18:46:11 -08001902 css_put(&memcg->css);
1903 current->memcg_nr_pages_over_high = 0;
1904}
1905
Johannes Weiner00501b52014-08-08 14:19:20 -07001906static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1907 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001908{
Johannes Weinera983b5e2018-01-31 16:16:45 -08001909 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
Johannes Weiner9b130612014-08-06 16:05:51 -07001910 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001911 struct mem_cgroup *mem_over_limit;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001912 struct page_counter *counter;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001913 unsigned long nr_reclaimed;
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001914 bool may_swap = true;
1915 bool drained = false;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001916
Johannes Weinerce00a962014-09-05 08:43:57 -04001917 if (mem_cgroup_is_root(memcg))
Tejun Heo10d53c72015-11-05 18:46:17 -08001918 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001919retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07001920 if (consume_stock(memcg, nr_pages))
Tejun Heo10d53c72015-11-05 18:46:17 -08001921 return 0;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001922
Johannes Weiner7941d212016-01-14 15:21:23 -08001923 if (!do_memsw_account() ||
Johannes Weiner6071ca52015-11-05 18:50:26 -08001924 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1925 if (page_counter_try_charge(&memcg->memory, batch, &counter))
Johannes Weiner6539cc02014-08-06 16:05:42 -07001926 goto done_restock;
Johannes Weiner7941d212016-01-14 15:21:23 -08001927 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001928 page_counter_uncharge(&memcg->memsw, batch);
1929 mem_over_limit = mem_cgroup_from_counter(counter, memory);
Johannes Weiner3fbe7242014-10-09 15:28:54 -07001930 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001931 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001932 may_swap = false;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07001933 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08001934
Johannes Weiner6539cc02014-08-06 16:05:42 -07001935 if (batch > nr_pages) {
1936 batch = nr_pages;
1937 goto retry;
1938 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001939
Johannes Weiner06b078f2014-08-06 16:05:44 -07001940 /*
1941 * Unlike in global OOM situations, memcg is not in a physical
1942 * memory shortage. Allow dying and OOM-killed tasks to
1943 * bypass the last charges so that they can exit quickly and
1944 * free their memory.
1945 */
Michal Hockoda99ecf2017-09-06 16:24:53 -07001946 if (unlikely(tsk_is_oom_victim(current) ||
Johannes Weiner06b078f2014-08-06 16:05:44 -07001947 fatal_signal_pending(current) ||
1948 current->flags & PF_EXITING))
Tejun Heo10d53c72015-11-05 18:46:17 -08001949 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07001950
Johannes Weiner89a28482016-10-27 17:46:56 -07001951 /*
1952 * Prevent unbounded recursion when reclaim operations need to
1953 * allocate memory. This might exceed the limits temporarily,
1954 * but we prefer facilitating memory reclaim and getting back
1955 * under the limit over triggering OOM kills in these cases.
1956 */
1957 if (unlikely(current->flags & PF_MEMALLOC))
1958 goto force;
1959
Johannes Weiner06b078f2014-08-06 16:05:44 -07001960 if (unlikely(task_in_memcg_oom(current)))
1961 goto nomem;
1962
Mel Gormand0164ad2015-11-06 16:28:21 -08001963 if (!gfpflags_allow_blocking(gfp_mask))
Johannes Weiner6539cc02014-08-06 16:05:42 -07001964 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001965
Johannes Weiner31176c72017-05-03 14:55:07 -07001966 mem_cgroup_event(mem_over_limit, MEMCG_MAX);
Johannes Weiner241994ed2015-02-11 15:26:06 -08001967
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001968 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1969 gfp_mask, may_swap);
Johannes Weiner6539cc02014-08-06 16:05:42 -07001970
Johannes Weiner61e02c72014-08-06 16:08:16 -07001971 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner6539cc02014-08-06 16:05:42 -07001972 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07001973
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001974 if (!drained) {
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08001975 drain_all_stock(mem_over_limit);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07001976 drained = true;
1977 goto retry;
1978 }
1979
Johannes Weiner28c34c22014-08-06 16:05:47 -07001980 if (gfp_mask & __GFP_NORETRY)
1981 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07001982 /*
1983 * Even though the limit is exceeded at this point, reclaim
1984 * may have been able to free some pages. Retry the charge
1985 * before killing the task.
1986 *
1987 * Only for regular pages, though: huge pages are rather
1988 * unlikely to succeed so close to the limit, and we fall back
1989 * to regular pages anyway in case of failure.
1990 */
Johannes Weiner61e02c72014-08-06 16:08:16 -07001991 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
Johannes Weiner6539cc02014-08-06 16:05:42 -07001992 goto retry;
1993 /*
1994 * At task move, charge accounts can be doubly counted. So, it's
1995 * better to wait until the end of task_move if something is going on.
1996 */
1997 if (mem_cgroup_wait_acct_move(mem_over_limit))
1998 goto retry;
1999
Johannes Weiner9b130612014-08-06 16:05:51 -07002000 if (nr_retries--)
2001 goto retry;
2002
Johannes Weiner06b078f2014-08-06 16:05:44 -07002003 if (gfp_mask & __GFP_NOFAIL)
Tejun Heo10d53c72015-11-05 18:46:17 -08002004 goto force;
Johannes Weiner06b078f2014-08-06 16:05:44 -07002005
Johannes Weiner6539cc02014-08-06 16:05:42 -07002006 if (fatal_signal_pending(current))
Tejun Heo10d53c72015-11-05 18:46:17 -08002007 goto force;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002008
Johannes Weiner31176c72017-05-03 14:55:07 -07002009 mem_cgroup_event(mem_over_limit, MEMCG_OOM);
Johannes Weiner241994ed2015-02-11 15:26:06 -08002010
Jerome Marchand3608de02015-11-05 18:47:29 -08002011 mem_cgroup_oom(mem_over_limit, gfp_mask,
2012 get_order(nr_pages * PAGE_SIZE));
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002013nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002014 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07002015 return -ENOMEM;
Tejun Heo10d53c72015-11-05 18:46:17 -08002016force:
2017 /*
2018 * The allocation either can't fail or will lead to more memory
2019 * being freed very soon. Allow memory usage go over the limit
2020 * temporarily by force charging it.
2021 */
2022 page_counter_charge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002023 if (do_memsw_account())
Tejun Heo10d53c72015-11-05 18:46:17 -08002024 page_counter_charge(&memcg->memsw, nr_pages);
2025 css_get_many(&memcg->css, nr_pages);
2026
2027 return 0;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002028
2029done_restock:
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002030 css_get_many(&memcg->css, batch);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002031 if (batch > nr_pages)
2032 refill_stock(memcg, batch - nr_pages);
Tejun Heob23afb92015-11-05 18:46:11 -08002033
Johannes Weiner241994ed2015-02-11 15:26:06 -08002034 /*
Tejun Heob23afb92015-11-05 18:46:11 -08002035 * If the hierarchy is above the normal consumption range, schedule
2036 * reclaim on returning to userland. We can perform reclaim here
Mel Gorman71baba42015-11-06 16:28:28 -08002037 * if __GFP_RECLAIM but let's always punt for simplicity and so that
Tejun Heob23afb92015-11-05 18:46:11 -08002038 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2039 * not recorded as it most likely matches current's and won't
2040 * change in the meantime. As high limit is checked again before
2041 * reclaim, the cost of mismatch is negligible.
Johannes Weiner241994ed2015-02-11 15:26:06 -08002042 */
2043 do {
Tejun Heob23afb92015-11-05 18:46:11 -08002044 if (page_counter_read(&memcg->memory) > memcg->high) {
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08002045 /* Don't bother a random interrupted task */
2046 if (in_interrupt()) {
2047 schedule_work(&memcg->high_work);
2048 break;
2049 }
Vladimir Davydov9516a182015-12-11 13:40:24 -08002050 current->memcg_nr_pages_over_high += batch;
Tejun Heob23afb92015-11-05 18:46:11 -08002051 set_notify_resume(current);
2052 break;
2053 }
Johannes Weiner241994ed2015-02-11 15:26:06 -08002054 } while ((memcg = parent_mem_cgroup(memcg)));
Tejun Heo10d53c72015-11-05 18:46:17 -08002055
2056 return 0;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002057}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002058
Johannes Weiner00501b52014-08-08 14:19:20 -07002059static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002060{
Johannes Weinerce00a962014-09-05 08:43:57 -04002061 if (mem_cgroup_is_root(memcg))
2062 return;
2063
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002064 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002065 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002066 page_counter_uncharge(&memcg->memsw, nr_pages);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002067
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002068 css_put_many(&memcg->css, nr_pages);
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002069}
2070
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002071static void lock_page_lru(struct page *page, int *isolated)
2072{
2073 struct zone *zone = page_zone(page);
2074
Mel Gormana52633d2016-07-28 15:45:28 -07002075 spin_lock_irq(zone_lru_lock(zone));
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002076 if (PageLRU(page)) {
2077 struct lruvec *lruvec;
2078
Mel Gorman599d0c92016-07-28 15:45:31 -07002079 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002080 ClearPageLRU(page);
2081 del_page_from_lru_list(page, lruvec, page_lru(page));
2082 *isolated = 1;
2083 } else
2084 *isolated = 0;
2085}
2086
2087static void unlock_page_lru(struct page *page, int isolated)
2088{
2089 struct zone *zone = page_zone(page);
2090
2091 if (isolated) {
2092 struct lruvec *lruvec;
2093
Mel Gorman599d0c92016-07-28 15:45:31 -07002094 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002095 VM_BUG_ON_PAGE(PageLRU(page), page);
2096 SetPageLRU(page);
2097 add_page_to_lru_list(page, lruvec, page_lru(page));
2098 }
Mel Gormana52633d2016-07-28 15:45:28 -07002099 spin_unlock_irq(zone_lru_lock(zone));
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002100}
2101
Johannes Weiner00501b52014-08-08 14:19:20 -07002102static void commit_charge(struct page *page, struct mem_cgroup *memcg,
Johannes Weiner6abb5a82014-08-08 14:19:33 -07002103 bool lrucare)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002104{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002105 int isolated;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002106
Johannes Weiner1306a852014-12-10 15:44:52 -08002107 VM_BUG_ON_PAGE(page->mem_cgroup, page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002108
2109 /*
2110 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2111 * may already be on some other mem_cgroup's LRU. Take care of it.
2112 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002113 if (lrucare)
2114 lock_page_lru(page, &isolated);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002115
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002116 /*
2117 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08002118 * page->mem_cgroup at this point:
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002119 *
2120 * - the page is uncharged
2121 *
2122 * - the page is off-LRU
2123 *
2124 * - an anonymous fault has exclusive page access, except for
2125 * a locked page table
2126 *
2127 * - a page cache insertion, a swapin fault, or a migration
2128 * have the page locked
2129 */
Johannes Weiner1306a852014-12-10 15:44:52 -08002130 page->mem_cgroup = memcg;
Hugh Dickins3be912772008-02-07 00:14:19 -08002131
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002132 if (lrucare)
2133 unlock_page_lru(page, isolated);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002134}
2135
Johannes Weiner127424c2016-01-20 15:02:32 -08002136#ifndef CONFIG_SLOB
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002137static int memcg_alloc_cache_id(void)
Glauber Costa55007d82012-12-18 14:22:38 -08002138{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002139 int id, size;
2140 int err;
Glauber Costa55007d82012-12-18 14:22:38 -08002141
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002142 id = ida_simple_get(&memcg_cache_ida,
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002143 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2144 if (id < 0)
2145 return id;
2146
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002147 if (id < memcg_nr_cache_ids)
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002148 return id;
2149
2150 /*
2151 * There's no space for the new id in memcg_caches arrays,
2152 * so we have to grow them.
2153 */
Vladimir Davydov05257a12015-02-12 14:59:01 -08002154 down_write(&memcg_cache_ids_sem);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002155
2156 size = 2 * (id + 1);
Glauber Costa55007d82012-12-18 14:22:38 -08002157 if (size < MEMCG_CACHES_MIN_SIZE)
2158 size = MEMCG_CACHES_MIN_SIZE;
2159 else if (size > MEMCG_CACHES_MAX_SIZE)
2160 size = MEMCG_CACHES_MAX_SIZE;
2161
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002162 err = memcg_update_all_caches(size);
Vladimir Davydov05257a12015-02-12 14:59:01 -08002163 if (!err)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002164 err = memcg_update_all_list_lrus(size);
2165 if (!err)
Vladimir Davydov05257a12015-02-12 14:59:01 -08002166 memcg_nr_cache_ids = size;
2167
2168 up_write(&memcg_cache_ids_sem);
2169
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002170 if (err) {
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002171 ida_simple_remove(&memcg_cache_ida, id);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002172 return err;
2173 }
2174 return id;
2175}
2176
2177static void memcg_free_cache_id(int id)
2178{
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002179 ida_simple_remove(&memcg_cache_ida, id);
Glauber Costa55007d82012-12-18 14:22:38 -08002180}
2181
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002182struct memcg_kmem_cache_create_work {
Vladimir Davydov5722d092014-04-07 15:39:24 -07002183 struct mem_cgroup *memcg;
2184 struct kmem_cache *cachep;
2185 struct work_struct work;
2186};
2187
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002188static void memcg_kmem_cache_create_func(struct work_struct *w)
Glauber Costad7f25f82012-12-18 14:22:40 -08002189{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002190 struct memcg_kmem_cache_create_work *cw =
2191 container_of(w, struct memcg_kmem_cache_create_work, work);
Vladimir Davydov5722d092014-04-07 15:39:24 -07002192 struct mem_cgroup *memcg = cw->memcg;
2193 struct kmem_cache *cachep = cw->cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002194
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002195 memcg_create_kmem_cache(memcg, cachep);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002196
Vladimir Davydov5722d092014-04-07 15:39:24 -07002197 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002198 kfree(cw);
2199}
2200
2201/*
2202 * Enqueue the creation of a per-memcg kmem_cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002203 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002204static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2205 struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002206{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002207 struct memcg_kmem_cache_create_work *cw;
Glauber Costad7f25f82012-12-18 14:22:40 -08002208
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002209 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002210 if (!cw)
Glauber Costad7f25f82012-12-18 14:22:40 -08002211 return;
Vladimir Davydov8135be52014-12-12 16:56:38 -08002212
2213 css_get(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002214
2215 cw->memcg = memcg;
2216 cw->cachep = cachep;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002217 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
Glauber Costad7f25f82012-12-18 14:22:40 -08002218
Tejun Heo17cc4df2017-02-22 15:41:36 -08002219 queue_work(memcg_kmem_cache_wq, &cw->work);
Glauber Costad7f25f82012-12-18 14:22:40 -08002220}
2221
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002222static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2223 struct kmem_cache *cachep)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002224{
2225 /*
2226 * We need to stop accounting when we kmalloc, because if the
2227 * corresponding kmalloc cache is not yet created, the first allocation
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002228 * in __memcg_schedule_kmem_cache_create will recurse.
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002229 *
2230 * However, it is better to enclose the whole function. Depending on
2231 * the debugging options enabled, INIT_WORK(), for instance, can
2232 * trigger an allocation. This too, will make us recurse. Because at
2233 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2234 * the safest choice is to do it like this, wrapping the whole function.
2235 */
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002236 current->memcg_kmem_skip_account = 1;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002237 __memcg_schedule_kmem_cache_create(memcg, cachep);
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002238 current->memcg_kmem_skip_account = 0;
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002239}
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07002240
Vladimir Davydov45264772016-07-26 15:24:21 -07002241static inline bool memcg_kmem_bypass(void)
2242{
2243 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2244 return true;
2245 return false;
2246}
2247
2248/**
2249 * memcg_kmem_get_cache: select the correct per-memcg cache for allocation
2250 * @cachep: the original global kmem cache
2251 *
Glauber Costad7f25f82012-12-18 14:22:40 -08002252 * Return the kmem_cache we're supposed to use for a slab allocation.
2253 * We try to use the current memcg's version of the cache.
2254 *
Vladimir Davydov45264772016-07-26 15:24:21 -07002255 * If the cache does not exist yet, if we are the first user of it, we
2256 * create it asynchronously in a workqueue and let the current allocation
2257 * go through with the original cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002258 *
Vladimir Davydov45264772016-07-26 15:24:21 -07002259 * This function takes a reference to the cache it returns to assure it
2260 * won't get destroyed while we are working with it. Once the caller is
2261 * done with it, memcg_kmem_put_cache() must be called to release the
2262 * reference.
Glauber Costad7f25f82012-12-18 14:22:40 -08002263 */
Vladimir Davydov45264772016-07-26 15:24:21 -07002264struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002265{
2266 struct mem_cgroup *memcg;
Vladimir Davydov959c8962014-01-23 15:52:59 -08002267 struct kmem_cache *memcg_cachep;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002268 int kmemcg_id;
Glauber Costad7f25f82012-12-18 14:22:40 -08002269
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002270 VM_BUG_ON(!is_root_cache(cachep));
Glauber Costad7f25f82012-12-18 14:22:40 -08002271
Vladimir Davydov45264772016-07-26 15:24:21 -07002272 if (memcg_kmem_bypass())
Vladimir Davydov230e9fc2016-01-14 15:18:15 -08002273 return cachep;
2274
Vladimir Davydov9d100c52014-12-12 16:54:53 -08002275 if (current->memcg_kmem_skip_account)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002276 return cachep;
2277
Vladimir Davydov8135be52014-12-12 16:56:38 -08002278 memcg = get_mem_cgroup_from_mm(current->mm);
Jason Low4db0c3c2015-04-15 16:14:08 -07002279 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002280 if (kmemcg_id < 0)
Li Zefanca0dde92013-04-29 15:08:57 -07002281 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08002282
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002283 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002284 if (likely(memcg_cachep))
2285 return memcg_cachep;
Li Zefanca0dde92013-04-29 15:08:57 -07002286
2287 /*
2288 * If we are in a safe context (can wait, and not in interrupt
2289 * context), we could be be predictable and return right away.
2290 * This would guarantee that the allocation being performed
2291 * already belongs in the new cache.
2292 *
2293 * However, there are some clashes that can arrive from locking.
2294 * For instance, because we acquire the slab_mutex while doing
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002295 * memcg_create_kmem_cache, this means no further allocation
2296 * could happen with the slab_mutex held. So it's better to
2297 * defer everything.
Li Zefanca0dde92013-04-29 15:08:57 -07002298 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002299 memcg_schedule_kmem_cache_create(memcg, cachep);
Li Zefanca0dde92013-04-29 15:08:57 -07002300out:
Vladimir Davydov8135be52014-12-12 16:56:38 -08002301 css_put(&memcg->css);
Li Zefanca0dde92013-04-29 15:08:57 -07002302 return cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002303}
Glauber Costad7f25f82012-12-18 14:22:40 -08002304
Vladimir Davydov45264772016-07-26 15:24:21 -07002305/**
2306 * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache
2307 * @cachep: the cache returned by memcg_kmem_get_cache
2308 */
2309void memcg_kmem_put_cache(struct kmem_cache *cachep)
Vladimir Davydov8135be52014-12-12 16:56:38 -08002310{
2311 if (!is_root_cache(cachep))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002312 css_put(&cachep->memcg_params.memcg->css);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002313}
2314
Vladimir Davydov45264772016-07-26 15:24:21 -07002315/**
2316 * memcg_kmem_charge: charge a kmem page
2317 * @page: page to charge
2318 * @gfp: reclaim mode
2319 * @order: allocation order
2320 * @memcg: memory cgroup to charge
2321 *
2322 * Returns 0 on success, an error code on failure.
2323 */
2324int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2325 struct mem_cgroup *memcg)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002326{
2327 unsigned int nr_pages = 1 << order;
2328 struct page_counter *counter;
Johannes Weiner6071ca52015-11-05 18:50:26 -08002329 int ret;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002330
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002331 ret = try_charge(memcg, gfp, nr_pages);
Johannes Weiner52c29b02016-01-20 15:02:35 -08002332 if (ret)
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002333 return ret;
Johannes Weiner52c29b02016-01-20 15:02:35 -08002334
2335 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2336 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2337 cancel_charge(memcg, nr_pages);
2338 return -ENOMEM;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002339 }
2340
2341 page->mem_cgroup = memcg;
2342
2343 return 0;
2344}
2345
Vladimir Davydov45264772016-07-26 15:24:21 -07002346/**
2347 * memcg_kmem_charge: charge a kmem page to the current memory cgroup
2348 * @page: page to charge
2349 * @gfp: reclaim mode
2350 * @order: allocation order
2351 *
2352 * Returns 0 on success, an error code on failure.
2353 */
2354int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002355{
2356 struct mem_cgroup *memcg;
Vladimir Davydovfcff7d72016-03-17 14:17:29 -07002357 int ret = 0;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002358
Vladimir Davydov45264772016-07-26 15:24:21 -07002359 if (memcg_kmem_bypass())
2360 return 0;
2361
Johannes Weinerdf381972014-04-07 15:37:43 -07002362 memcg = get_mem_cgroup_from_mm(current->mm);
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002363 if (!mem_cgroup_is_root(memcg)) {
Vladimir Davydov45264772016-07-26 15:24:21 -07002364 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002365 if (!ret)
2366 __SetPageKmemcg(page);
2367 }
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002368 css_put(&memcg->css);
Vladimir Davydovd05e83a2015-11-05 18:48:59 -08002369 return ret;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002370}
Vladimir Davydov45264772016-07-26 15:24:21 -07002371/**
2372 * memcg_kmem_uncharge: uncharge a kmem page
2373 * @page: page to uncharge
2374 * @order: allocation order
2375 */
2376void memcg_kmem_uncharge(struct page *page, int order)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002377{
Johannes Weiner1306a852014-12-10 15:44:52 -08002378 struct mem_cgroup *memcg = page->mem_cgroup;
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002379 unsigned int nr_pages = 1 << order;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002380
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002381 if (!memcg)
2382 return;
2383
Sasha Levin309381fea2014-01-23 15:52:54 -08002384 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Johannes Weiner29833312014-12-10 15:44:02 -08002385
Johannes Weiner52c29b02016-01-20 15:02:35 -08002386 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2387 page_counter_uncharge(&memcg->kmem, nr_pages);
2388
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002389 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08002390 if (do_memsw_account())
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002391 page_counter_uncharge(&memcg->memsw, nr_pages);
2392
Johannes Weiner1306a852014-12-10 15:44:52 -08002393 page->mem_cgroup = NULL;
Vladimir Davydovc4159a72016-08-08 23:03:12 +03002394
2395 /* slab pages do not have PageKmemcg flag set */
2396 if (PageKmemcg(page))
2397 __ClearPageKmemcg(page);
2398
Vladimir Davydovf3ccb2c42015-11-05 18:49:01 -08002399 css_put_many(&memcg->css, nr_pages);
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002400}
Johannes Weiner127424c2016-01-20 15:02:32 -08002401#endif /* !CONFIG_SLOB */
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002402
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002403#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2404
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002405/*
2406 * Because tail pages are not marked as "used", set it. We're under
Mel Gormana52633d2016-07-28 15:45:28 -07002407 * zone_lru_lock and migration entries setup in all page mappings.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002408 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002409void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002410{
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002411 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002412
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002413 if (mem_cgroup_disabled())
2414 return;
David Rientjesb070e652013-05-07 16:18:09 -07002415
Johannes Weiner29833312014-12-10 15:44:02 -08002416 for (i = 1; i < HPAGE_PMD_NR; i++)
Johannes Weiner1306a852014-12-10 15:44:52 -08002417 head[i].mem_cgroup = head->mem_cgroup;
Michal Hockob9982f82014-12-10 15:43:51 -08002418
Johannes Weinerc9019e92018-01-31 16:16:37 -08002419 __mod_memcg_state(head->mem_cgroup, MEMCG_RSS_HUGE, -HPAGE_PMD_NR);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002420}
Hugh Dickins12d27102012-01-12 17:19:52 -08002421#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002422
Andrew Mortonc255a452012-07-31 16:43:02 -07002423#ifdef CONFIG_MEMCG_SWAP
Daisuke Nishimura02491442010-03-10 15:22:17 -08002424/**
2425 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2426 * @entry: swap entry to be moved
2427 * @from: mem_cgroup which the entry is moved from
2428 * @to: mem_cgroup which the entry is moved to
2429 *
2430 * It succeeds only when the swap_cgroup's record for this entry is the same
2431 * as the mem_cgroup's id of @from.
2432 *
2433 * Returns 0 on success, -EINVAL on failure.
2434 *
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002435 * The caller must have charged to @to, IOW, called page_counter_charge() about
Daisuke Nishimura02491442010-03-10 15:22:17 -08002436 * both res and memsw, and called css_get().
2437 */
2438static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002439 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002440{
2441 unsigned short old_id, new_id;
2442
Li Zefan34c00c32013-09-23 16:56:01 +08002443 old_id = mem_cgroup_id(from);
2444 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002445
2446 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08002447 mod_memcg_state(from, MEMCG_SWAP, -1);
2448 mod_memcg_state(to, MEMCG_SWAP, 1);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002449 return 0;
2450 }
2451 return -EINVAL;
2452}
2453#else
2454static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002455 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002456{
2457 return -EINVAL;
2458}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002459#endif
2460
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002461static DEFINE_MUTEX(memcg_limit_mutex);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07002462
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08002463static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
Yu Zhaoc054a782018-01-31 16:20:02 -08002464 unsigned long limit, bool memsw)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002465{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002466 unsigned long curusage;
2467 unsigned long oldusage;
2468 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002469 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002470 int ret;
Yu Zhaoc054a782018-01-31 16:20:02 -08002471 bool limits_invariant;
2472 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002473
2474 /*
2475 * For keeping hierarchical_reclaim simple, how long we should retry
2476 * is depends on callers. We set our retry-count to be function
2477 * of # of children which we should visit in this loop.
2478 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002479 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2480 mem_cgroup_count_children(memcg);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002481
Yu Zhaoc054a782018-01-31 16:20:02 -08002482 oldusage = page_counter_read(counter);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002483
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002484 do {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002485 if (signal_pending(current)) {
2486 ret = -EINTR;
2487 break;
2488 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002489
2490 mutex_lock(&memcg_limit_mutex);
Yu Zhaoc054a782018-01-31 16:20:02 -08002491 /*
2492 * Make sure that the new limit (memsw or memory limit) doesn't
2493 * break our basic invariant rule memory.limit <= memsw.limit.
2494 */
2495 limits_invariant = memsw ? limit >= memcg->memory.limit :
2496 limit <= memcg->memsw.limit;
2497 if (!limits_invariant) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002498 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002499 ret = -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002500 break;
2501 }
Yu Zhaoc054a782018-01-31 16:20:02 -08002502 if (limit > counter->limit)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002503 enlarge = true;
Yu Zhaoc054a782018-01-31 16:20:02 -08002504 ret = page_counter_limit(counter, limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002505 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002506
2507 if (!ret)
2508 break;
2509
Yu Zhaoc054a782018-01-31 16:20:02 -08002510 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, !memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002511
Yu Zhaoc054a782018-01-31 16:20:02 -08002512 curusage = page_counter_read(counter);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002513 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002514 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002515 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002516 else
2517 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002518 } while (retry_count);
2519
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002520 if (!ret && enlarge)
2521 memcg_oom_recover(memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002522
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002523 return ret;
2524}
2525
Mel Gormanef8f2322016-07-28 15:46:05 -07002526unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
Andrew Morton0608f432013-09-24 15:27:41 -07002527 gfp_t gfp_mask,
2528 unsigned long *total_scanned)
2529{
2530 unsigned long nr_reclaimed = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002531 struct mem_cgroup_per_node *mz, *next_mz = NULL;
Andrew Morton0608f432013-09-24 15:27:41 -07002532 unsigned long reclaimed;
2533 int loop = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002534 struct mem_cgroup_tree_per_node *mctz;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002535 unsigned long excess;
Andrew Morton0608f432013-09-24 15:27:41 -07002536 unsigned long nr_scanned;
2537
2538 if (order > 0)
2539 return 0;
2540
Mel Gormanef8f2322016-07-28 15:46:05 -07002541 mctz = soft_limit_tree_node(pgdat->node_id);
Michal Hockod6507ff2016-08-02 14:02:37 -07002542
2543 /*
2544 * Do not even bother to check the largest node if the root
2545 * is empty. Do it lockless to prevent lock bouncing. Races
2546 * are acceptable as soft limit is best effort anyway.
2547 */
Laurent Dufourbfc72282017-03-09 16:17:06 -08002548 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
Michal Hockod6507ff2016-08-02 14:02:37 -07002549 return 0;
2550
Andrew Morton0608f432013-09-24 15:27:41 -07002551 /*
2552 * This loop can run a while, specially if mem_cgroup's continuously
2553 * keep exceeding their soft limit and putting the system under
2554 * pressure
2555 */
2556 do {
2557 if (next_mz)
2558 mz = next_mz;
2559 else
2560 mz = mem_cgroup_largest_soft_limit_node(mctz);
2561 if (!mz)
2562 break;
2563
2564 nr_scanned = 0;
Mel Gormanef8f2322016-07-28 15:46:05 -07002565 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
Andrew Morton0608f432013-09-24 15:27:41 -07002566 gfp_mask, &nr_scanned);
2567 nr_reclaimed += reclaimed;
2568 *total_scanned += nr_scanned;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002569 spin_lock_irq(&mctz->lock);
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002570 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07002571
2572 /*
2573 * If we failed to reclaim anything from this memory cgroup
2574 * it is time to move on to the next cgroup
2575 */
2576 next_mz = NULL;
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002577 if (!reclaimed)
2578 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2579
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002580 excess = soft_limit_excess(mz->memcg);
Andrew Morton0608f432013-09-24 15:27:41 -07002581 /*
2582 * One school of thought says that we should not add
2583 * back the node to the tree if reclaim returns 0.
2584 * But our reclaim could return 0, simply because due
2585 * to priority we are exposing a smaller subset of
2586 * memory to reclaim from. Consider this as a longer
2587 * term TODO.
2588 */
2589 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07002590 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002591 spin_unlock_irq(&mctz->lock);
Andrew Morton0608f432013-09-24 15:27:41 -07002592 css_put(&mz->memcg->css);
2593 loop++;
2594 /*
2595 * Could not reclaim anything and there are no more
2596 * mem cgroups to try or we seem to be looping without
2597 * reclaiming anything.
2598 */
2599 if (!nr_reclaimed &&
2600 (next_mz == NULL ||
2601 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2602 break;
2603 } while (!nr_reclaimed);
2604 if (next_mz)
2605 css_put(&next_mz->memcg->css);
2606 return nr_reclaimed;
2607}
2608
Tejun Heoea280e72014-05-16 13:22:48 -04002609/*
2610 * Test whether @memcg has children, dead or alive. Note that this
2611 * function doesn't care whether @memcg has use_hierarchy enabled and
2612 * returns %true if there are child csses according to the cgroup
2613 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2614 */
Glauber Costab5f99b52013-02-22 16:34:53 -08002615static inline bool memcg_has_children(struct mem_cgroup *memcg)
2616{
Tejun Heoea280e72014-05-16 13:22:48 -04002617 bool ret;
2618
Tejun Heoea280e72014-05-16 13:22:48 -04002619 rcu_read_lock();
2620 ret = css_next_child(NULL, &memcg->css);
2621 rcu_read_unlock();
2622 return ret;
Glauber Costab5f99b52013-02-22 16:34:53 -08002623}
2624
2625/*
Greg Thelen51038172016-05-20 16:58:18 -07002626 * Reclaims as many pages from the given memcg as possible.
Michal Hockoc26251f2012-10-26 13:37:28 +02002627 *
2628 * Caller is responsible for holding css reference for memcg.
2629 */
2630static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2631{
2632 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02002633
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002634 /* we call try-to-free pages for make this cgroup empty */
2635 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002636 /* try to free all pages in this cgroup */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002637 while (nr_retries && page_counter_read(&memcg->memory)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002638 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002639
Michal Hockoc26251f2012-10-26 13:37:28 +02002640 if (signal_pending(current))
2641 return -EINTR;
2642
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002643 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2644 GFP_KERNEL, true);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002645 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002646 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002647 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02002648 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002649 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002650
2651 }
Michal Hockoab5196c2012-10-26 13:37:32 +02002652
2653 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08002654}
2655
Tejun Heo6770c642014-05-13 12:16:21 -04002656static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2657 char *buf, size_t nbytes,
2658 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002659{
Tejun Heo6770c642014-05-13 12:16:21 -04002660 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02002661
Michal Hockod8423012012-10-26 13:37:29 +02002662 if (mem_cgroup_is_root(memcg))
2663 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04002664 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08002665}
2666
Tejun Heo182446d2013-08-08 20:11:24 -04002667static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2668 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08002669{
Tejun Heo182446d2013-08-08 20:11:24 -04002670 return mem_cgroup_from_css(css)->use_hierarchy;
Balbir Singh18f59ea2009-01-07 18:08:07 -08002671}
2672
Tejun Heo182446d2013-08-08 20:11:24 -04002673static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2674 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08002675{
2676 int retval = 0;
Tejun Heo182446d2013-08-08 20:11:24 -04002677 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04002678 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08002679
Glauber Costa567fb432012-07-31 16:43:07 -07002680 if (memcg->use_hierarchy == val)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002681 return 0;
Glauber Costa567fb432012-07-31 16:43:07 -07002682
Balbir Singh18f59ea2009-01-07 18:08:07 -08002683 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002684 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08002685 * in the child subtrees. If it is unset, then the change can
2686 * occur, provided the current cgroup has no children.
2687 *
2688 * For the root cgroup, parent_mem is NULL, we allow value to be
2689 * set if there are no children.
2690 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002691 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08002692 (val == 1 || val == 0)) {
Tejun Heoea280e72014-05-16 13:22:48 -04002693 if (!memcg_has_children(memcg))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002694 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08002695 else
2696 retval = -EBUSY;
2697 } else
2698 retval = -EINVAL;
Glauber Costa567fb432012-07-31 16:43:07 -07002699
Balbir Singh18f59ea2009-01-07 18:08:07 -08002700 return retval;
2701}
2702
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002703static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
Johannes Weinerce00a962014-09-05 08:43:57 -04002704{
2705 struct mem_cgroup *iter;
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002706 int i;
Johannes Weinerce00a962014-09-05 08:43:57 -04002707
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002708 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
Johannes Weinerce00a962014-09-05 08:43:57 -04002709
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002710 for_each_mem_cgroup_tree(iter, memcg) {
2711 for (i = 0; i < MEMCG_NR_STAT; i++)
Johannes Weinerccda7f42017-05-03 14:55:16 -07002712 stat[i] += memcg_page_state(iter, i);
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002713 }
Johannes Weinerce00a962014-09-05 08:43:57 -04002714}
2715
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002716static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
Johannes Weiner587d9f72016-01-20 15:03:19 -08002717{
2718 struct mem_cgroup *iter;
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002719 int i;
Johannes Weiner587d9f72016-01-20 15:03:19 -08002720
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002721 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
Johannes Weiner587d9f72016-01-20 15:03:19 -08002722
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002723 for_each_mem_cgroup_tree(iter, memcg) {
2724 for (i = 0; i < MEMCG_NR_EVENTS; i++)
Johannes Weinerccda7f42017-05-03 14:55:16 -07002725 events[i] += memcg_sum_events(iter, i);
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002726 }
Johannes Weiner587d9f72016-01-20 15:03:19 -08002727}
2728
Andrew Morton6f646152015-11-06 16:28:58 -08002729static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
Johannes Weinerce00a962014-09-05 08:43:57 -04002730{
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002731 unsigned long val = 0;
Johannes Weinerce00a962014-09-05 08:43:57 -04002732
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002733 if (mem_cgroup_is_root(memcg)) {
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002734 struct mem_cgroup *iter;
2735
2736 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weinerccda7f42017-05-03 14:55:16 -07002737 val += memcg_page_state(iter, MEMCG_CACHE);
2738 val += memcg_page_state(iter, MEMCG_RSS);
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002739 if (swap)
Johannes Weinerccda7f42017-05-03 14:55:16 -07002740 val += memcg_page_state(iter, MEMCG_SWAP);
Vladimir Davydov72b54e72016-03-17 14:17:32 -07002741 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002742 } else {
Johannes Weinerce00a962014-09-05 08:43:57 -04002743 if (!swap)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002744 val = page_counter_read(&memcg->memory);
Johannes Weinerce00a962014-09-05 08:43:57 -04002745 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002746 val = page_counter_read(&memcg->memsw);
Johannes Weinerce00a962014-09-05 08:43:57 -04002747 }
Michal Hockoc12176d2015-11-05 18:50:29 -08002748 return val;
Johannes Weinerce00a962014-09-05 08:43:57 -04002749}
2750
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002751enum {
2752 RES_USAGE,
2753 RES_LIMIT,
2754 RES_MAX_USAGE,
2755 RES_FAILCNT,
2756 RES_SOFT_LIMIT,
2757};
Johannes Weinerce00a962014-09-05 08:43:57 -04002758
Tejun Heo791badb2013-12-05 12:28:02 -05002759static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07002760 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002761{
Tejun Heo182446d2013-08-08 20:11:24 -04002762 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002763 struct page_counter *counter;
Tejun Heoaf36f902012-04-01 12:09:55 -07002764
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002765 switch (MEMFILE_TYPE(cft->private)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002766 case _MEM:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002767 counter = &memcg->memory;
Glauber Costa510fc4e2012-12-18 14:21:47 -08002768 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002769 case _MEMSWAP:
2770 counter = &memcg->memsw;
2771 break;
2772 case _KMEM:
2773 counter = &memcg->kmem;
2774 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002775 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08002776 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002777 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002778 default:
2779 BUG();
2780 }
2781
2782 switch (MEMFILE_ATTR(cft->private)) {
2783 case RES_USAGE:
2784 if (counter == &memcg->memory)
Michal Hockoc12176d2015-11-05 18:50:29 -08002785 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002786 if (counter == &memcg->memsw)
Michal Hockoc12176d2015-11-05 18:50:29 -08002787 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002788 return (u64)page_counter_read(counter) * PAGE_SIZE;
2789 case RES_LIMIT:
2790 return (u64)counter->limit * PAGE_SIZE;
2791 case RES_MAX_USAGE:
2792 return (u64)counter->watermark * PAGE_SIZE;
2793 case RES_FAILCNT:
2794 return counter->failcnt;
2795 case RES_SOFT_LIMIT:
2796 return (u64)memcg->soft_limit * PAGE_SIZE;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002797 default:
2798 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002799 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002800}
Glauber Costa510fc4e2012-12-18 14:21:47 -08002801
Johannes Weiner127424c2016-01-20 15:02:32 -08002802#ifndef CONFIG_SLOB
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002803static int memcg_online_kmem(struct mem_cgroup *memcg)
Vladimir Davydovd6441632014-01-23 15:53:09 -08002804{
Vladimir Davydovd6441632014-01-23 15:53:09 -08002805 int memcg_id;
2806
Vladimir Davydovb313aee2016-03-17 14:18:27 -07002807 if (cgroup_memory_nokmem)
2808 return 0;
2809
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002810 BUG_ON(memcg->kmemcg_id >= 0);
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002811 BUG_ON(memcg->kmem_state);
Vladimir Davydovd6441632014-01-23 15:53:09 -08002812
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002813 memcg_id = memcg_alloc_cache_id();
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002814 if (memcg_id < 0)
2815 return memcg_id;
Vladimir Davydovd6441632014-01-23 15:53:09 -08002816
Johannes Weineref129472016-01-14 15:21:34 -08002817 static_branch_inc(&memcg_kmem_enabled_key);
Vladimir Davydovd6441632014-01-23 15:53:09 -08002818 /*
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002819 * A memory cgroup is considered kmem-online as soon as it gets
Vladimir Davydov900a38f2014-12-12 16:55:10 -08002820 * kmemcg_id. Setting the id after enabling static branching will
Vladimir Davydovd6441632014-01-23 15:53:09 -08002821 * guarantee no one starts accounting before all call sites are
2822 * patched.
2823 */
Vladimir Davydov900a38f2014-12-12 16:55:10 -08002824 memcg->kmemcg_id = memcg_id;
Johannes Weiner567e9ab2016-01-20 15:02:24 -08002825 memcg->kmem_state = KMEM_ONLINE;
Tejun Heobc2791f2017-02-22 15:41:21 -08002826 INIT_LIST_HEAD(&memcg->kmem_caches);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002827
2828 return 0;
Vladimir Davydovd6441632014-01-23 15:53:09 -08002829}
2830
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002831static void memcg_offline_kmem(struct mem_cgroup *memcg)
2832{
2833 struct cgroup_subsys_state *css;
2834 struct mem_cgroup *parent, *child;
2835 int kmemcg_id;
2836
2837 if (memcg->kmem_state != KMEM_ONLINE)
2838 return;
2839 /*
2840 * Clear the online state before clearing memcg_caches array
2841 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2842 * guarantees that no cache will be created for this cgroup
2843 * after we are done (see memcg_create_kmem_cache()).
2844 */
2845 memcg->kmem_state = KMEM_ALLOCATED;
2846
2847 memcg_deactivate_kmem_caches(memcg);
2848
2849 kmemcg_id = memcg->kmemcg_id;
2850 BUG_ON(kmemcg_id < 0);
2851
2852 parent = parent_mem_cgroup(memcg);
2853 if (!parent)
2854 parent = root_mem_cgroup;
2855
2856 /*
2857 * Change kmemcg_id of this cgroup and all its descendants to the
2858 * parent's id, and then move all entries from this cgroup's list_lrus
2859 * to ones of the parent. After we have finished, all list_lrus
2860 * corresponding to this cgroup are guaranteed to remain empty. The
2861 * ordering is imposed by list_lru_node->lock taken by
2862 * memcg_drain_all_list_lrus().
2863 */
Tejun Heo3a06bb72016-06-03 14:55:44 -07002864 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002865 css_for_each_descendant_pre(css, &memcg->css) {
2866 child = mem_cgroup_from_css(css);
2867 BUG_ON(child->kmemcg_id != kmemcg_id);
2868 child->kmemcg_id = parent->kmemcg_id;
2869 if (!memcg->use_hierarchy)
2870 break;
2871 }
Tejun Heo3a06bb72016-06-03 14:55:44 -07002872 rcu_read_unlock();
2873
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002874 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2875
2876 memcg_free_cache_id(kmemcg_id);
2877}
2878
2879static void memcg_free_kmem(struct mem_cgroup *memcg)
2880{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002881 /* css_alloc() failed, offlining didn't happen */
2882 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2883 memcg_offline_kmem(memcg);
2884
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002885 if (memcg->kmem_state == KMEM_ALLOCATED) {
2886 memcg_destroy_kmem_caches(memcg);
2887 static_branch_dec(&memcg_kmem_enabled_key);
2888 WARN_ON(page_counter_read(&memcg->kmem));
2889 }
Johannes Weiner8e0a8912016-01-20 15:02:26 -08002890}
Vladimir Davydovd6441632014-01-23 15:53:09 -08002891#else
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08002892static int memcg_online_kmem(struct mem_cgroup *memcg)
Johannes Weiner127424c2016-01-20 15:02:32 -08002893{
2894 return 0;
2895}
2896static void memcg_offline_kmem(struct mem_cgroup *memcg)
2897{
2898}
2899static void memcg_free_kmem(struct mem_cgroup *memcg)
2900{
2901}
2902#endif /* !CONFIG_SLOB */
2903
Johannes Weiner127424c2016-01-20 15:02:32 -08002904static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2905 unsigned long limit)
2906{
Vladimir Davydovb313aee2016-03-17 14:18:27 -07002907 int ret;
Johannes Weiner127424c2016-01-20 15:02:32 -08002908
2909 mutex_lock(&memcg_limit_mutex);
Johannes Weiner127424c2016-01-20 15:02:32 -08002910 ret = page_counter_limit(&memcg->kmem, limit);
Johannes Weiner127424c2016-01-20 15:02:32 -08002911 mutex_unlock(&memcg_limit_mutex);
2912 return ret;
2913}
Glauber Costa510fc4e2012-12-18 14:21:47 -08002914
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002915static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2916{
2917 int ret;
2918
2919 mutex_lock(&memcg_limit_mutex);
2920
Johannes Weiner0db15292016-01-20 15:02:50 -08002921 ret = page_counter_limit(&memcg->tcpmem, limit);
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002922 if (ret)
2923 goto out;
2924
Johannes Weiner0db15292016-01-20 15:02:50 -08002925 if (!memcg->tcpmem_active) {
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002926 /*
2927 * The active flag needs to be written after the static_key
2928 * update. This is what guarantees that the socket activation
Johannes Weiner2d758072016-10-07 17:00:58 -07002929 * function is the last one to run. See mem_cgroup_sk_alloc()
2930 * for details, and note that we don't mark any socket as
2931 * belonging to this memcg until that flag is up.
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002932 *
2933 * We need to do this, because static_keys will span multiple
2934 * sites, but we can't control their order. If we mark a socket
2935 * as accounted, but the accounting functions are not patched in
2936 * yet, we'll lose accounting.
2937 *
Johannes Weiner2d758072016-10-07 17:00:58 -07002938 * We never race with the readers in mem_cgroup_sk_alloc(),
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002939 * because when this value change, the code to process it is not
2940 * patched in yet.
2941 */
2942 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weiner0db15292016-01-20 15:02:50 -08002943 memcg->tcpmem_active = true;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002944 }
2945out:
2946 mutex_unlock(&memcg_limit_mutex);
2947 return ret;
2948}
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002949
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002950/*
2951 * The user of this function is...
2952 * RES_LIMIT.
2953 */
Tejun Heo451af502014-05-13 12:16:21 -04002954static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2955 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002956{
Tejun Heo451af502014-05-13 12:16:21 -04002957 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002958 unsigned long nr_pages;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002959 int ret;
2960
Tejun Heo451af502014-05-13 12:16:21 -04002961 buf = strstrip(buf);
Johannes Weiner650c5e52015-02-11 15:26:03 -08002962 ret = page_counter_memparse(buf, "-1", &nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002963 if (ret)
2964 return ret;
Tejun Heoaf36f902012-04-01 12:09:55 -07002965
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002966 switch (MEMFILE_ATTR(of_cft(of)->private)) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002967 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07002968 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2969 ret = -EINVAL;
2970 break;
2971 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002972 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2973 case _MEM:
Yu Zhaoc054a782018-01-31 16:20:02 -08002974 ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002975 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002976 case _MEMSWAP:
Yu Zhaoc054a782018-01-31 16:20:02 -08002977 ret = mem_cgroup_resize_limit(memcg, nr_pages, true);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002978 break;
2979 case _KMEM:
2980 ret = memcg_update_kmem_limit(memcg, nr_pages);
2981 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08002982 case _TCP:
2983 ret = memcg_update_tcp_limit(memcg, nr_pages);
2984 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002985 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002986 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07002987 case RES_SOFT_LIMIT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002988 memcg->soft_limit = nr_pages;
2989 ret = 0;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002990 break;
2991 }
Tejun Heo451af502014-05-13 12:16:21 -04002992 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08002993}
2994
Tejun Heo6770c642014-05-13 12:16:21 -04002995static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
2996 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07002997{
Tejun Heo6770c642014-05-13 12:16:21 -04002998 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002999 struct page_counter *counter;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003000
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003001 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3002 case _MEM:
3003 counter = &memcg->memory;
3004 break;
3005 case _MEMSWAP:
3006 counter = &memcg->memsw;
3007 break;
3008 case _KMEM:
3009 counter = &memcg->kmem;
3010 break;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003011 case _TCP:
Johannes Weiner0db15292016-01-20 15:02:50 -08003012 counter = &memcg->tcpmem;
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08003013 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003014 default:
3015 BUG();
3016 }
Tejun Heoaf36f902012-04-01 12:09:55 -07003017
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003018 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003019 case RES_MAX_USAGE:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003020 page_counter_reset_watermark(counter);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003021 break;
3022 case RES_FAILCNT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003023 counter->failcnt = 0;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003024 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003025 default:
3026 BUG();
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003027 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003028
Tejun Heo6770c642014-05-13 12:16:21 -04003029 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003030}
3031
Tejun Heo182446d2013-08-08 20:11:24 -04003032static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003033 struct cftype *cft)
3034{
Tejun Heo182446d2013-08-08 20:11:24 -04003035 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003036}
3037
Daisuke Nishimura02491442010-03-10 15:22:17 -08003038#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04003039static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003040 struct cftype *cft, u64 val)
3041{
Tejun Heo182446d2013-08-08 20:11:24 -04003042 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003043
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08003044 if (val & ~MOVE_MASK)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003045 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003046
Glauber Costaee5e8472013-02-22 16:34:50 -08003047 /*
3048 * No kind of locking is needed in here, because ->can_attach() will
3049 * check this value once in the beginning of the process, and then carry
3050 * on with stale data. This means that changes to this value will only
3051 * affect task migrations starting after the change.
3052 */
3053 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003054 return 0;
3055}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003056#else
Tejun Heo182446d2013-08-08 20:11:24 -04003057static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08003058 struct cftype *cft, u64 val)
3059{
3060 return -ENOSYS;
3061}
3062#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003063
Ying Han406eb0c2011-05-26 16:25:37 -07003064#ifdef CONFIG_NUMA
Tejun Heo2da8ca82013-12-05 12:28:04 -05003065static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07003066{
Greg Thelen25485de2013-11-12 15:07:40 -08003067 struct numa_stat {
3068 const char *name;
3069 unsigned int lru_mask;
3070 };
3071
3072 static const struct numa_stat stats[] = {
3073 { "total", LRU_ALL },
3074 { "file", LRU_ALL_FILE },
3075 { "anon", LRU_ALL_ANON },
3076 { "unevictable", BIT(LRU_UNEVICTABLE) },
3077 };
3078 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07003079 int nid;
Greg Thelen25485de2013-11-12 15:07:40 -08003080 unsigned long nr;
Tejun Heo2da8ca82013-12-05 12:28:04 -05003081 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Ying Han406eb0c2011-05-26 16:25:37 -07003082
Greg Thelen25485de2013-11-12 15:07:40 -08003083 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3084 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3085 seq_printf(m, "%s=%lu", stat->name, nr);
3086 for_each_node_state(nid, N_MEMORY) {
3087 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3088 stat->lru_mask);
3089 seq_printf(m, " N%d=%lu", nid, nr);
3090 }
3091 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003092 }
Ying Han406eb0c2011-05-26 16:25:37 -07003093
Ying Han071aee12013-11-12 15:07:41 -08003094 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3095 struct mem_cgroup *iter;
Ying Han406eb0c2011-05-26 16:25:37 -07003096
Ying Han071aee12013-11-12 15:07:41 -08003097 nr = 0;
3098 for_each_mem_cgroup_tree(iter, memcg)
3099 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3100 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3101 for_each_node_state(nid, N_MEMORY) {
3102 nr = 0;
3103 for_each_mem_cgroup_tree(iter, memcg)
3104 nr += mem_cgroup_node_nr_lru_pages(
3105 iter, nid, stat->lru_mask);
3106 seq_printf(m, " N%d=%lu", nid, nr);
3107 }
3108 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003109 }
Ying Han406eb0c2011-05-26 16:25:37 -07003110
Ying Han406eb0c2011-05-26 16:25:37 -07003111 return 0;
3112}
3113#endif /* CONFIG_NUMA */
3114
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07003115/* Universal VM events cgroup1 shows, original sort order */
3116unsigned int memcg1_events[] = {
3117 PGPGIN,
3118 PGPGOUT,
3119 PGFAULT,
3120 PGMAJFAULT,
3121};
3122
3123static const char *const memcg1_event_names[] = {
3124 "pgpgin",
3125 "pgpgout",
3126 "pgfault",
3127 "pgmajfault",
3128};
3129
Tejun Heo2da8ca82013-12-05 12:28:04 -05003130static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003131{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003132 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003133 unsigned long memory, memsw;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003134 struct mem_cgroup *mi;
3135 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003136
Johannes Weiner71cd3112017-05-03 14:55:13 -07003137 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
Rickard Strandqvist70bc0682014-12-12 16:56:41 -08003138 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3139
Johannes Weiner71cd3112017-05-03 14:55:13 -07003140 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3141 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003142 continue;
Johannes Weiner71cd3112017-05-03 14:55:13 -07003143 seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
Johannes Weinerccda7f42017-05-03 14:55:16 -07003144 memcg_page_state(memcg, memcg1_stats[i]) *
Johannes Weiner71cd3112017-05-03 14:55:13 -07003145 PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003146 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003147
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07003148 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3149 seq_printf(m, "%s %lu\n", memcg1_event_names[i],
Johannes Weinerccda7f42017-05-03 14:55:16 -07003150 memcg_sum_events(memcg, memcg1_events[i]));
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003151
3152 for (i = 0; i < NR_LRU_LISTS; i++)
3153 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3154 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3155
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003156 /* Hierarchical information */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003157 memory = memsw = PAGE_COUNTER_MAX;
3158 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3159 memory = min(memory, mi->memory.limit);
3160 memsw = min(memsw, mi->memsw.limit);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003161 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003162 seq_printf(m, "hierarchical_memory_limit %llu\n",
3163 (u64)memory * PAGE_SIZE);
Johannes Weiner7941d212016-01-14 15:21:23 -08003164 if (do_memsw_account())
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003165 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3166 (u64)memsw * PAGE_SIZE);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003167
Johannes Weiner71cd3112017-05-03 14:55:13 -07003168 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
Greg Thelen484ebb32015-10-01 15:37:05 -07003169 unsigned long long val = 0;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003170
Johannes Weiner71cd3112017-05-03 14:55:13 -07003171 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003172 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003173 for_each_mem_cgroup_tree(mi, memcg)
Johannes Weinerccda7f42017-05-03 14:55:16 -07003174 val += memcg_page_state(mi, memcg1_stats[i]) *
Johannes Weiner71cd3112017-05-03 14:55:13 -07003175 PAGE_SIZE;
3176 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003177 }
3178
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07003179 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) {
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003180 unsigned long long val = 0;
3181
3182 for_each_mem_cgroup_tree(mi, memcg)
Johannes Weinerccda7f42017-05-03 14:55:16 -07003183 val += memcg_sum_events(mi, memcg1_events[i]);
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07003184 seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003185 }
3186
3187 for (i = 0; i < NR_LRU_LISTS; i++) {
3188 unsigned long long val = 0;
3189
3190 for_each_mem_cgroup_tree(mi, memcg)
3191 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3192 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003193 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003194
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003195#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003196 {
Mel Gormanef8f2322016-07-28 15:46:05 -07003197 pg_data_t *pgdat;
3198 struct mem_cgroup_per_node *mz;
Hugh Dickins89abfab2012-05-29 15:06:53 -07003199 struct zone_reclaim_stat *rstat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003200 unsigned long recent_rotated[2] = {0, 0};
3201 unsigned long recent_scanned[2] = {0, 0};
3202
Mel Gormanef8f2322016-07-28 15:46:05 -07003203 for_each_online_pgdat(pgdat) {
3204 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3205 rstat = &mz->lruvec.reclaim_stat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003206
Mel Gormanef8f2322016-07-28 15:46:05 -07003207 recent_rotated[0] += rstat->recent_rotated[0];
3208 recent_rotated[1] += rstat->recent_rotated[1];
3209 recent_scanned[0] += rstat->recent_scanned[0];
3210 recent_scanned[1] += rstat->recent_scanned[1];
3211 }
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07003212 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3213 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3214 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3215 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003216 }
3217#endif
3218
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003219 return 0;
3220}
3221
Tejun Heo182446d2013-08-08 20:11:24 -04003222static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3223 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003224{
Tejun Heo182446d2013-08-08 20:11:24 -04003225 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003226
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07003227 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003228}
3229
Tejun Heo182446d2013-08-08 20:11:24 -04003230static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3231 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003232{
Tejun Heo182446d2013-08-08 20:11:24 -04003233 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08003234
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003235 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003236 return -EINVAL;
3237
Linus Torvalds14208b02014-06-09 15:03:33 -07003238 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003239 memcg->swappiness = val;
3240 else
3241 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08003242
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003243 return 0;
3244}
3245
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003246static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3247{
3248 struct mem_cgroup_threshold_ary *t;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003249 unsigned long usage;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003250 int i;
3251
3252 rcu_read_lock();
3253 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003254 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003255 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003256 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003257
3258 if (!t)
3259 goto unlock;
3260
Johannes Weinerce00a962014-09-05 08:43:57 -04003261 usage = mem_cgroup_usage(memcg, swap);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003262
3263 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07003264 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003265 * If it's not true, a threshold was crossed after last
3266 * call of __mem_cgroup_threshold().
3267 */
Phil Carmody5407a562010-05-26 14:42:42 -07003268 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003269
3270 /*
3271 * Iterate backward over array of thresholds starting from
3272 * current_threshold and check if a threshold is crossed.
3273 * If none of thresholds below usage is crossed, we read
3274 * only one element of the array here.
3275 */
3276 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3277 eventfd_signal(t->entries[i].eventfd, 1);
3278
3279 /* i = current_threshold + 1 */
3280 i++;
3281
3282 /*
3283 * Iterate forward over array of thresholds starting from
3284 * current_threshold+1 and check if a threshold is crossed.
3285 * If none of thresholds above usage is crossed, we read
3286 * only one element of the array here.
3287 */
3288 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3289 eventfd_signal(t->entries[i].eventfd, 1);
3290
3291 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07003292 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003293unlock:
3294 rcu_read_unlock();
3295}
3296
3297static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3298{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003299 while (memcg) {
3300 __mem_cgroup_threshold(memcg, false);
Johannes Weiner7941d212016-01-14 15:21:23 -08003301 if (do_memsw_account())
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003302 __mem_cgroup_threshold(memcg, true);
3303
3304 memcg = parent_mem_cgroup(memcg);
3305 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003306}
3307
3308static int compare_thresholds(const void *a, const void *b)
3309{
3310 const struct mem_cgroup_threshold *_a = a;
3311 const struct mem_cgroup_threshold *_b = b;
3312
Greg Thelen2bff24a2013-09-11 14:23:08 -07003313 if (_a->threshold > _b->threshold)
3314 return 1;
3315
3316 if (_a->threshold < _b->threshold)
3317 return -1;
3318
3319 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003320}
3321
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003322static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003323{
3324 struct mem_cgroup_eventfd_list *ev;
3325
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003326 spin_lock(&memcg_oom_lock);
3327
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003328 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003329 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003330
3331 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003332 return 0;
3333}
3334
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003335static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003336{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003337 struct mem_cgroup *iter;
3338
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003339 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003340 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003341}
3342
Tejun Heo59b6f872013-11-22 18:20:43 -05003343static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003344 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003345{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003346 struct mem_cgroup_thresholds *thresholds;
3347 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003348 unsigned long threshold;
3349 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003350 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003351
Johannes Weiner650c5e52015-02-11 15:26:03 -08003352 ret = page_counter_memparse(args, "-1", &threshold);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003353 if (ret)
3354 return ret;
3355
3356 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003357
Johannes Weiner05b84302014-08-06 16:05:59 -07003358 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003359 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003360 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003361 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003362 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003363 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003364 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003365 BUG();
3366
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003367 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003368 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003369 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3370
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003371 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003372
3373 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003374 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003375 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003376 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003377 ret = -ENOMEM;
3378 goto unlock;
3379 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003380 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003381
3382 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003383 if (thresholds->primary) {
3384 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003385 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003386 }
3387
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003388 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003389 new->entries[size - 1].eventfd = eventfd;
3390 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003391
3392 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003393 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003394 compare_thresholds, NULL);
3395
3396 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003397 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003398 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07003399 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003400 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003401 * new->current_threshold will not be used until
3402 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003403 * it here.
3404 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003405 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07003406 } else
3407 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003408 }
3409
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003410 /* Free old spare buffer and save old primary buffer as spare */
3411 kfree(thresholds->spare);
3412 thresholds->spare = thresholds->primary;
3413
3414 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003415
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003416 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003417 synchronize_rcu();
3418
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003419unlock:
3420 mutex_unlock(&memcg->thresholds_lock);
3421
3422 return ret;
3423}
3424
Tejun Heo59b6f872013-11-22 18:20:43 -05003425static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003426 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003427{
Tejun Heo59b6f872013-11-22 18:20:43 -05003428 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003429}
3430
Tejun Heo59b6f872013-11-22 18:20:43 -05003431static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003432 struct eventfd_ctx *eventfd, const char *args)
3433{
Tejun Heo59b6f872013-11-22 18:20:43 -05003434 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003435}
3436
Tejun Heo59b6f872013-11-22 18:20:43 -05003437static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003438 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003439{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003440 struct mem_cgroup_thresholds *thresholds;
3441 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003442 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003443 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003444
3445 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07003446
3447 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003448 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003449 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003450 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003451 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003452 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003453 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003454 BUG();
3455
Anton Vorontsov371528c2012-02-24 05:14:46 +04003456 if (!thresholds->primary)
3457 goto unlock;
3458
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003459 /* Check if a threshold crossed before removing */
3460 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3461
3462 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003463 size = 0;
3464 for (i = 0; i < thresholds->primary->size; i++) {
3465 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003466 size++;
3467 }
3468
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003469 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003470
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003471 /* Set thresholds array to NULL if we don't have thresholds */
3472 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003473 kfree(new);
3474 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003475 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003476 }
3477
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003478 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003479
3480 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003481 new->current_threshold = -1;
3482 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3483 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003484 continue;
3485
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003486 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07003487 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003488 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003489 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003490 * until rcu_assign_pointer(), so it's safe to increment
3491 * it here.
3492 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003493 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003494 }
3495 j++;
3496 }
3497
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003498swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003499 /* Swap primary and spare array */
3500 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07003501
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003502 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003503
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003504 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003505 synchronize_rcu();
Martijn Coenen6611d8d2016-01-15 16:57:49 -08003506
3507 /* If all events are unregistered, free the spare array */
3508 if (!new) {
3509 kfree(thresholds->spare);
3510 thresholds->spare = NULL;
3511 }
Anton Vorontsov371528c2012-02-24 05:14:46 +04003512unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003513 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003514}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003515
Tejun Heo59b6f872013-11-22 18:20:43 -05003516static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003517 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003518{
Tejun Heo59b6f872013-11-22 18:20:43 -05003519 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003520}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003521
Tejun Heo59b6f872013-11-22 18:20:43 -05003522static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003523 struct eventfd_ctx *eventfd)
3524{
Tejun Heo59b6f872013-11-22 18:20:43 -05003525 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003526}
3527
Tejun Heo59b6f872013-11-22 18:20:43 -05003528static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003529 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003530{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003531 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003532
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003533 event = kmalloc(sizeof(*event), GFP_KERNEL);
3534 if (!event)
3535 return -ENOMEM;
3536
Michal Hocko1af8efe2011-07-26 16:08:24 -07003537 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003538
3539 event->eventfd = eventfd;
3540 list_add(&event->list, &memcg->oom_notify);
3541
3542 /* already in OOM ? */
Tejun Heoc2b42d32015-06-24 16:58:23 -07003543 if (memcg->under_oom)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003544 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07003545 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003546
3547 return 0;
3548}
3549
Tejun Heo59b6f872013-11-22 18:20:43 -05003550static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003551 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003552{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003553 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003554
Michal Hocko1af8efe2011-07-26 16:08:24 -07003555 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003556
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003557 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003558 if (ev->eventfd == eventfd) {
3559 list_del(&ev->list);
3560 kfree(ev);
3561 }
3562 }
3563
Michal Hocko1af8efe2011-07-26 16:08:24 -07003564 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003565}
3566
Tejun Heo2da8ca82013-12-05 12:28:04 -05003567static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003568{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003569 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003570
Tejun Heo791badb2013-12-05 12:28:02 -05003571 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
Tejun Heoc2b42d32015-06-24 16:58:23 -07003572 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
Konstantin Khlebnikov8e675f72017-07-06 15:40:28 -07003573 seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003574 return 0;
3575}
3576
Tejun Heo182446d2013-08-08 20:11:24 -04003577static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003578 struct cftype *cft, u64 val)
3579{
Tejun Heo182446d2013-08-08 20:11:24 -04003580 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003581
3582 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07003583 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003584 return -EINVAL;
3585
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003586 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07003587 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003588 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003589
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003590 return 0;
3591}
3592
Tejun Heo52ebea72015-05-22 17:13:37 -04003593#ifdef CONFIG_CGROUP_WRITEBACK
3594
3595struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3596{
3597 return &memcg->cgwb_list;
3598}
3599
Tejun Heo841710a2015-05-22 18:23:33 -04003600static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3601{
3602 return wb_domain_init(&memcg->cgwb_domain, gfp);
3603}
3604
3605static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3606{
3607 wb_domain_exit(&memcg->cgwb_domain);
3608}
3609
Tejun Heo2529bb32015-05-22 18:23:34 -04003610static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3611{
3612 wb_domain_size_changed(&memcg->cgwb_domain);
3613}
3614
Tejun Heo841710a2015-05-22 18:23:33 -04003615struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3616{
3617 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3618
3619 if (!memcg->css.parent)
3620 return NULL;
3621
3622 return &memcg->cgwb_domain;
3623}
3624
Tejun Heoc2aa7232015-05-22 18:23:35 -04003625/**
3626 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3627 * @wb: bdi_writeback in question
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003628 * @pfilepages: out parameter for number of file pages
3629 * @pheadroom: out parameter for number of allocatable pages according to memcg
Tejun Heoc2aa7232015-05-22 18:23:35 -04003630 * @pdirty: out parameter for number of dirty pages
3631 * @pwriteback: out parameter for number of pages under writeback
3632 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003633 * Determine the numbers of file, headroom, dirty, and writeback pages in
3634 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3635 * is a bit more involved.
Tejun Heoc2aa7232015-05-22 18:23:35 -04003636 *
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003637 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3638 * headroom is calculated as the lowest headroom of itself and the
3639 * ancestors. Note that this doesn't consider the actual amount of
3640 * available memory in the system. The caller should further cap
3641 * *@pheadroom accordingly.
Tejun Heoc2aa7232015-05-22 18:23:35 -04003642 */
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003643void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3644 unsigned long *pheadroom, unsigned long *pdirty,
3645 unsigned long *pwriteback)
Tejun Heoc2aa7232015-05-22 18:23:35 -04003646{
3647 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3648 struct mem_cgroup *parent;
Tejun Heoc2aa7232015-05-22 18:23:35 -04003649
Johannes Weinerccda7f42017-05-03 14:55:16 -07003650 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
Tejun Heoc2aa7232015-05-22 18:23:35 -04003651
3652 /* this should eventually include NR_UNSTABLE_NFS */
Johannes Weinerccda7f42017-05-03 14:55:16 -07003653 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003654 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3655 (1 << LRU_ACTIVE_FILE));
3656 *pheadroom = PAGE_COUNTER_MAX;
Tejun Heoc2aa7232015-05-22 18:23:35 -04003657
Tejun Heoc2aa7232015-05-22 18:23:35 -04003658 while ((parent = parent_mem_cgroup(memcg))) {
3659 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3660 unsigned long used = page_counter_read(&memcg->memory);
3661
Tejun Heoc5edf9c2015-09-29 13:04:26 -04003662 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
Tejun Heoc2aa7232015-05-22 18:23:35 -04003663 memcg = parent;
3664 }
Tejun Heoc2aa7232015-05-22 18:23:35 -04003665}
3666
Tejun Heo841710a2015-05-22 18:23:33 -04003667#else /* CONFIG_CGROUP_WRITEBACK */
3668
3669static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3670{
3671 return 0;
3672}
3673
3674static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3675{
3676}
3677
Tejun Heo2529bb32015-05-22 18:23:34 -04003678static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3679{
3680}
3681
Tejun Heo52ebea72015-05-22 17:13:37 -04003682#endif /* CONFIG_CGROUP_WRITEBACK */
3683
Tejun Heo79bd9812013-11-22 18:20:42 -05003684/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05003685 * DO NOT USE IN NEW FILES.
3686 *
3687 * "cgroup.event_control" implementation.
3688 *
3689 * This is way over-engineered. It tries to support fully configurable
3690 * events for each user. Such level of flexibility is completely
3691 * unnecessary especially in the light of the planned unified hierarchy.
3692 *
3693 * Please deprecate this and replace with something simpler if at all
3694 * possible.
3695 */
3696
3697/*
Tejun Heo79bd9812013-11-22 18:20:42 -05003698 * Unregister event and free resources.
3699 *
3700 * Gets called from workqueue.
3701 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05003702static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05003703{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003704 struct mem_cgroup_event *event =
3705 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05003706 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05003707
3708 remove_wait_queue(event->wqh, &event->wait);
3709
Tejun Heo59b6f872013-11-22 18:20:43 -05003710 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05003711
3712 /* Notify userspace the event is going away. */
3713 eventfd_signal(event->eventfd, 1);
3714
3715 eventfd_ctx_put(event->eventfd);
3716 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05003717 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05003718}
3719
3720/*
3721 * Gets called on POLLHUP on eventfd when user closes it.
3722 *
3723 * Called with wqh->lock held and interrupts disabled.
3724 */
Ingo Molnarac6424b2017-06-20 12:06:13 +02003725static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
Tejun Heo3bc942f2013-11-22 18:20:44 -05003726 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05003727{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003728 struct mem_cgroup_event *event =
3729 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05003730 struct mem_cgroup *memcg = event->memcg;
Al Viro3ad6f932017-07-03 20:14:56 -04003731 __poll_t flags = key_to_poll(key);
Tejun Heo79bd9812013-11-22 18:20:42 -05003732
3733 if (flags & POLLHUP) {
3734 /*
3735 * If the event has been detached at cgroup removal, we
3736 * can simply return knowing the other side will cleanup
3737 * for us.
3738 *
3739 * We can't race against event freeing since the other
3740 * side will require wqh->lock via remove_wait_queue(),
3741 * which we hold.
3742 */
Tejun Heofba94802013-11-22 18:20:43 -05003743 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003744 if (!list_empty(&event->list)) {
3745 list_del_init(&event->list);
3746 /*
3747 * We are in atomic context, but cgroup_event_remove()
3748 * may sleep, so we have to call it in workqueue.
3749 */
3750 schedule_work(&event->remove);
3751 }
Tejun Heofba94802013-11-22 18:20:43 -05003752 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003753 }
3754
3755 return 0;
3756}
3757
Tejun Heo3bc942f2013-11-22 18:20:44 -05003758static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05003759 wait_queue_head_t *wqh, poll_table *pt)
3760{
Tejun Heo3bc942f2013-11-22 18:20:44 -05003761 struct mem_cgroup_event *event =
3762 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05003763
3764 event->wqh = wqh;
3765 add_wait_queue(wqh, &event->wait);
3766}
3767
3768/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05003769 * DO NOT USE IN NEW FILES.
3770 *
Tejun Heo79bd9812013-11-22 18:20:42 -05003771 * Parse input and register new cgroup event handler.
3772 *
3773 * Input must be in format '<event_fd> <control_fd> <args>'.
3774 * Interpretation of args is defined by control file implementation.
3775 */
Tejun Heo451af502014-05-13 12:16:21 -04003776static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3777 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05003778{
Tejun Heo451af502014-05-13 12:16:21 -04003779 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05003780 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05003781 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05003782 struct cgroup_subsys_state *cfile_css;
3783 unsigned int efd, cfd;
3784 struct fd efile;
3785 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05003786 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05003787 char *endp;
3788 int ret;
3789
Tejun Heo451af502014-05-13 12:16:21 -04003790 buf = strstrip(buf);
3791
3792 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05003793 if (*endp != ' ')
3794 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04003795 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05003796
Tejun Heo451af502014-05-13 12:16:21 -04003797 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05003798 if ((*endp != ' ') && (*endp != '\0'))
3799 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04003800 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05003801
3802 event = kzalloc(sizeof(*event), GFP_KERNEL);
3803 if (!event)
3804 return -ENOMEM;
3805
Tejun Heo59b6f872013-11-22 18:20:43 -05003806 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05003807 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05003808 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3809 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3810 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05003811
3812 efile = fdget(efd);
3813 if (!efile.file) {
3814 ret = -EBADF;
3815 goto out_kfree;
3816 }
3817
3818 event->eventfd = eventfd_ctx_fileget(efile.file);
3819 if (IS_ERR(event->eventfd)) {
3820 ret = PTR_ERR(event->eventfd);
3821 goto out_put_efile;
3822 }
3823
3824 cfile = fdget(cfd);
3825 if (!cfile.file) {
3826 ret = -EBADF;
3827 goto out_put_eventfd;
3828 }
3829
3830 /* the process need read permission on control file */
3831 /* AV: shouldn't we check that it's been opened for read instead? */
3832 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3833 if (ret < 0)
3834 goto out_put_cfile;
3835
Tejun Heo79bd9812013-11-22 18:20:42 -05003836 /*
Tejun Heofba94802013-11-22 18:20:43 -05003837 * Determine the event callbacks and set them in @event. This used
3838 * to be done via struct cftype but cgroup core no longer knows
3839 * about these events. The following is crude but the whole thing
3840 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05003841 *
3842 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05003843 */
Al Virob5830432014-10-31 01:22:04 -04003844 name = cfile.file->f_path.dentry->d_name.name;
Tejun Heofba94802013-11-22 18:20:43 -05003845
3846 if (!strcmp(name, "memory.usage_in_bytes")) {
3847 event->register_event = mem_cgroup_usage_register_event;
3848 event->unregister_event = mem_cgroup_usage_unregister_event;
3849 } else if (!strcmp(name, "memory.oom_control")) {
3850 event->register_event = mem_cgroup_oom_register_event;
3851 event->unregister_event = mem_cgroup_oom_unregister_event;
3852 } else if (!strcmp(name, "memory.pressure_level")) {
3853 event->register_event = vmpressure_register_event;
3854 event->unregister_event = vmpressure_unregister_event;
3855 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05003856 event->register_event = memsw_cgroup_usage_register_event;
3857 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05003858 } else {
3859 ret = -EINVAL;
3860 goto out_put_cfile;
3861 }
3862
3863 /*
Tejun Heob5557c42013-11-22 18:20:42 -05003864 * Verify @cfile should belong to @css. Also, remaining events are
3865 * automatically removed on cgroup destruction but the removal is
3866 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05003867 */
Al Virob5830432014-10-31 01:22:04 -04003868 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
Tejun Heoec903c02014-05-13 12:11:01 -04003869 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05003870 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05003871 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05003872 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05003873 if (cfile_css != css) {
3874 css_put(cfile_css);
3875 goto out_put_cfile;
3876 }
Tejun Heo79bd9812013-11-22 18:20:42 -05003877
Tejun Heo451af502014-05-13 12:16:21 -04003878 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05003879 if (ret)
3880 goto out_put_css;
3881
3882 efile.file->f_op->poll(efile.file, &event->pt);
3883
Tejun Heofba94802013-11-22 18:20:43 -05003884 spin_lock(&memcg->event_list_lock);
3885 list_add(&event->list, &memcg->event_list);
3886 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05003887
3888 fdput(cfile);
3889 fdput(efile);
3890
Tejun Heo451af502014-05-13 12:16:21 -04003891 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05003892
3893out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05003894 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05003895out_put_cfile:
3896 fdput(cfile);
3897out_put_eventfd:
3898 eventfd_ctx_put(event->eventfd);
3899out_put_efile:
3900 fdput(efile);
3901out_kfree:
3902 kfree(event);
3903
3904 return ret;
3905}
3906
Johannes Weiner241994ed2015-02-11 15:26:06 -08003907static struct cftype mem_cgroup_legacy_files[] = {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003908 {
Balbir Singh0eea1032008-02-07 00:13:57 -08003909 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003910 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05003911 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003912 },
3913 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003914 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003915 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04003916 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05003917 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003918 },
3919 {
Balbir Singh0eea1032008-02-07 00:13:57 -08003920 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003921 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04003922 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05003923 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003924 },
3925 {
Balbir Singh296c81d2009-09-23 15:56:36 -07003926 .name = "soft_limit_in_bytes",
3927 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04003928 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05003929 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07003930 },
3931 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003932 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003933 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04003934 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05003935 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003936 },
Balbir Singh8697d332008-02-07 00:13:59 -08003937 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003938 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05003939 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003940 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003941 {
3942 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04003943 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003944 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08003945 {
3946 .name = "use_hierarchy",
3947 .write_u64 = mem_cgroup_hierarchy_write,
3948 .read_u64 = mem_cgroup_hierarchy_read,
3949 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003950 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05003951 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04003952 .write = memcg_write_event_control,
Tejun Heo7dbdb192015-09-18 17:54:23 -04003953 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
Tejun Heo79bd9812013-11-22 18:20:42 -05003954 },
3955 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003956 .name = "swappiness",
3957 .read_u64 = mem_cgroup_swappiness_read,
3958 .write_u64 = mem_cgroup_swappiness_write,
3959 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003960 {
3961 .name = "move_charge_at_immigrate",
3962 .read_u64 = mem_cgroup_move_charge_read,
3963 .write_u64 = mem_cgroup_move_charge_write,
3964 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003965 {
3966 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05003967 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003968 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003969 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3970 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07003971 {
3972 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07003973 },
Ying Han406eb0c2011-05-26 16:25:37 -07003974#ifdef CONFIG_NUMA
3975 {
3976 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05003977 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07003978 },
3979#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08003980 {
3981 .name = "kmem.limit_in_bytes",
3982 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04003983 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05003984 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08003985 },
3986 {
3987 .name = "kmem.usage_in_bytes",
3988 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05003989 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08003990 },
3991 {
3992 .name = "kmem.failcnt",
3993 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04003994 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05003995 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08003996 },
3997 {
3998 .name = "kmem.max_usage_in_bytes",
3999 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004000 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004001 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004002 },
Yang Shi5b365772017-11-15 17:32:03 -08004003#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
Glauber Costa749c5412012-12-18 14:23:01 -08004004 {
4005 .name = "kmem.slabinfo",
Tejun Heobc2791f2017-02-22 15:41:21 -08004006 .seq_start = memcg_slab_start,
4007 .seq_next = memcg_slab_next,
4008 .seq_stop = memcg_slab_stop,
Vladimir Davydovb0475012014-12-10 15:44:19 -08004009 .seq_show = memcg_slab_show,
Glauber Costa749c5412012-12-18 14:23:01 -08004010 },
4011#endif
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08004012 {
4013 .name = "kmem.tcp.limit_in_bytes",
4014 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4015 .write = mem_cgroup_write,
4016 .read_u64 = mem_cgroup_read_u64,
4017 },
4018 {
4019 .name = "kmem.tcp.usage_in_bytes",
4020 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4021 .read_u64 = mem_cgroup_read_u64,
4022 },
4023 {
4024 .name = "kmem.tcp.failcnt",
4025 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4026 .write = mem_cgroup_reset,
4027 .read_u64 = mem_cgroup_read_u64,
4028 },
4029 {
4030 .name = "kmem.tcp.max_usage_in_bytes",
4031 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4032 .write = mem_cgroup_reset,
4033 .read_u64 = mem_cgroup_read_u64,
4034 },
Tejun Heo6bc10342012-04-01 12:09:55 -07004035 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07004036};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004037
Johannes Weiner73f576c2016-07-20 15:44:57 -07004038/*
4039 * Private memory cgroup IDR
4040 *
4041 * Swap-out records and page cache shadow entries need to store memcg
4042 * references in constrained space, so we maintain an ID space that is
4043 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4044 * memory-controlled cgroups to 64k.
4045 *
4046 * However, there usually are many references to the oflline CSS after
4047 * the cgroup has been destroyed, such as page cache or reclaimable
4048 * slab objects, that don't need to hang on to the ID. We want to keep
4049 * those dead CSS from occupying IDs, or we might quickly exhaust the
4050 * relatively small ID space and prevent the creation of new cgroups
4051 * even when there are much fewer than 64k cgroups - possibly none.
4052 *
4053 * Maintain a private 16-bit ID space for memcg, and allow the ID to
4054 * be freed and recycled when it's no longer needed, which is usually
4055 * when the CSS is offlined.
4056 *
4057 * The only exception to that are records of swapped out tmpfs/shmem
4058 * pages that need to be attributed to live ancestors on swapin. But
4059 * those references are manageable from userspace.
4060 */
4061
4062static DEFINE_IDR(mem_cgroup_idr);
4063
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004064static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
Johannes Weiner73f576c2016-07-20 15:44:57 -07004065{
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004066 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004067 atomic_add(n, &memcg->id.ref);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004068}
4069
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004070static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
Johannes Weiner73f576c2016-07-20 15:44:57 -07004071{
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004072 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004073 if (atomic_sub_and_test(n, &memcg->id.ref)) {
Johannes Weiner73f576c2016-07-20 15:44:57 -07004074 idr_remove(&mem_cgroup_idr, memcg->id.id);
4075 memcg->id.id = 0;
4076
4077 /* Memcg ID pins CSS */
4078 css_put(&memcg->css);
4079 }
4080}
4081
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004082static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4083{
4084 mem_cgroup_id_get_many(memcg, 1);
4085}
4086
4087static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4088{
4089 mem_cgroup_id_put_many(memcg, 1);
4090}
4091
Johannes Weiner73f576c2016-07-20 15:44:57 -07004092/**
4093 * mem_cgroup_from_id - look up a memcg from a memcg id
4094 * @id: the memcg id to look up
4095 *
4096 * Caller must hold rcu_read_lock().
4097 */
4098struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4099{
4100 WARN_ON_ONCE(!rcu_read_lock_held());
4101 return idr_find(&mem_cgroup_idr, id);
4102}
4103
Mel Gormanef8f2322016-07-28 15:46:05 -07004104static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004105{
4106 struct mem_cgroup_per_node *pn;
Mel Gormanef8f2322016-07-28 15:46:05 -07004107 int tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004108 /*
4109 * This routine is called against possible nodes.
4110 * But it's BUG to call kmalloc() against offline node.
4111 *
4112 * TODO: this routine can waste much memory for nodes which will
4113 * never be onlined. It's better to use memory hotplug callback
4114 * function.
4115 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004116 if (!node_state(node, N_NORMAL_MEMORY))
4117 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004118 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004119 if (!pn)
4120 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004121
Johannes Weinera983b5e2018-01-31 16:16:45 -08004122 pn->lruvec_stat_cpu = alloc_percpu(struct lruvec_stat);
4123 if (!pn->lruvec_stat_cpu) {
Johannes Weiner00f3ca22017-07-06 15:40:52 -07004124 kfree(pn);
4125 return 1;
4126 }
4127
Mel Gormanef8f2322016-07-28 15:46:05 -07004128 lruvec_init(&pn->lruvec);
4129 pn->usage_in_excess = 0;
4130 pn->on_tree = false;
4131 pn->memcg = memcg;
4132
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004133 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004134 return 0;
4135}
4136
Mel Gormanef8f2322016-07-28 15:46:05 -07004137static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004138{
Johannes Weiner00f3ca22017-07-06 15:40:52 -07004139 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
4140
Johannes Weinera983b5e2018-01-31 16:16:45 -08004141 free_percpu(pn->lruvec_stat_cpu);
Johannes Weiner00f3ca22017-07-06 15:40:52 -07004142 kfree(pn);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004143}
4144
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08004145static void __mem_cgroup_free(struct mem_cgroup *memcg)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004146{
4147 int node;
4148
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004149 for_each_node(node)
Mel Gormanef8f2322016-07-28 15:46:05 -07004150 free_mem_cgroup_per_node_info(memcg, node);
Johannes Weinera983b5e2018-01-31 16:16:45 -08004151 free_percpu(memcg->stat_cpu);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004152 kfree(memcg);
4153}
4154
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08004155static void mem_cgroup_free(struct mem_cgroup *memcg)
4156{
4157 memcg_wb_domain_exit(memcg);
4158 __mem_cgroup_free(memcg);
4159}
4160
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004161static struct mem_cgroup *mem_cgroup_alloc(void)
4162{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004163 struct mem_cgroup *memcg;
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004164 size_t size;
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004165 int node;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004166
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004167 size = sizeof(struct mem_cgroup);
4168 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004169
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004170 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004171 if (!memcg)
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004172 return NULL;
4173
Johannes Weiner73f576c2016-07-20 15:44:57 -07004174 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4175 1, MEM_CGROUP_ID_MAX,
4176 GFP_KERNEL);
4177 if (memcg->id.id < 0)
4178 goto fail;
4179
Johannes Weinera983b5e2018-01-31 16:16:45 -08004180 memcg->stat_cpu = alloc_percpu(struct mem_cgroup_stat_cpu);
4181 if (!memcg->stat_cpu)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004182 goto fail;
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004183
Bob Liu3ed28fa2012-01-12 17:19:04 -08004184 for_each_node(node)
Mel Gormanef8f2322016-07-28 15:46:05 -07004185 if (alloc_mem_cgroup_per_node_info(memcg, node))
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004186 goto fail;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004187
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004188 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4189 goto fail;
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004190
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004191 INIT_WORK(&memcg->high_work, high_work_func);
Glauber Costad142e3e2013-02-22 16:34:52 -08004192 memcg->last_scanned_node = MAX_NUMNODES;
4193 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08004194 mutex_init(&memcg->thresholds_lock);
4195 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004196 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05004197 INIT_LIST_HEAD(&memcg->event_list);
4198 spin_lock_init(&memcg->event_list_lock);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08004199 memcg->socket_pressure = jiffies;
Johannes Weiner127424c2016-01-20 15:02:32 -08004200#ifndef CONFIG_SLOB
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004201 memcg->kmemcg_id = -1;
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004202#endif
Tejun Heo52ebea72015-05-22 17:13:37 -04004203#ifdef CONFIG_CGROUP_WRITEBACK
4204 INIT_LIST_HEAD(&memcg->cgwb_list);
4205#endif
Johannes Weiner73f576c2016-07-20 15:44:57 -07004206 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004207 return memcg;
4208fail:
Johannes Weiner73f576c2016-07-20 15:44:57 -07004209 if (memcg->id.id > 0)
4210 idr_remove(&mem_cgroup_idr, memcg->id.id);
Tahsin Erdogan40e952f2017-03-09 16:17:26 -08004211 __mem_cgroup_free(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004212 return NULL;
Glauber Costad142e3e2013-02-22 16:34:52 -08004213}
4214
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004215static struct cgroup_subsys_state * __ref
4216mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Glauber Costad142e3e2013-02-22 16:34:52 -08004217{
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004218 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4219 struct mem_cgroup *memcg;
4220 long error = -ENOMEM;
Glauber Costad142e3e2013-02-22 16:34:52 -08004221
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004222 memcg = mem_cgroup_alloc();
4223 if (!memcg)
4224 return ERR_PTR(error);
Li Zefan4219b2d2013-09-23 16:56:29 +08004225
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004226 memcg->high = PAGE_COUNTER_MAX;
4227 memcg->soft_limit = PAGE_COUNTER_MAX;
4228 if (parent) {
4229 memcg->swappiness = mem_cgroup_swappiness(parent);
4230 memcg->oom_kill_disable = parent->oom_kill_disable;
4231 }
4232 if (parent && parent->use_hierarchy) {
4233 memcg->use_hierarchy = true;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004234 page_counter_init(&memcg->memory, &parent->memory);
Vladimir Davydov37e84352016-01-20 15:02:56 -08004235 page_counter_init(&memcg->swap, &parent->swap);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004236 page_counter_init(&memcg->memsw, &parent->memsw);
4237 page_counter_init(&memcg->kmem, &parent->kmem);
Johannes Weiner0db15292016-01-20 15:02:50 -08004238 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004239 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004240 page_counter_init(&memcg->memory, NULL);
Vladimir Davydov37e84352016-01-20 15:02:56 -08004241 page_counter_init(&memcg->swap, NULL);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004242 page_counter_init(&memcg->memsw, NULL);
4243 page_counter_init(&memcg->kmem, NULL);
Johannes Weiner0db15292016-01-20 15:02:50 -08004244 page_counter_init(&memcg->tcpmem, NULL);
Tejun Heo8c7f6ed2012-09-13 12:20:58 -07004245 /*
4246 * Deeper hierachy with use_hierarchy == false doesn't make
4247 * much sense so let cgroup subsystem know about this
4248 * unfortunate state in our controller.
4249 */
Glauber Costad142e3e2013-02-22 16:34:52 -08004250 if (parent != root_mem_cgroup)
Tejun Heo073219e2014-02-08 10:36:58 -05004251 memory_cgrp_subsys.broken_hierarchy = true;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004252 }
Vladimir Davydovd6441632014-01-23 15:53:09 -08004253
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004254 /* The following stuff does not apply to the root */
4255 if (!parent) {
4256 root_mem_cgroup = memcg;
4257 return &memcg->css;
4258 }
4259
Vladimir Davydovb313aee2016-03-17 14:18:27 -07004260 error = memcg_online_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004261 if (error)
4262 goto fail;
Johannes Weiner127424c2016-01-20 15:02:32 -08004263
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004264 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08004265 static_branch_inc(&memcg_sockets_enabled_key);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004266
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004267 return &memcg->css;
4268fail:
4269 mem_cgroup_free(memcg);
Tejun Heoea3a9642016-06-24 14:49:58 -07004270 return ERR_PTR(-ENOMEM);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004271}
4272
Johannes Weiner73f576c2016-07-20 15:44:57 -07004273static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004274{
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004275 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4276
Johannes Weiner73f576c2016-07-20 15:44:57 -07004277 /* Online state pins memcg ID, memcg ID pins CSS */
Vladimir Davydov58fa2a52016-10-07 16:57:29 -07004278 atomic_set(&memcg->id.ref, 1);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004279 css_get(css);
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004280 return 0;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004281}
4282
Tejun Heoeb954192013-08-08 20:11:23 -04004283static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004284{
Tejun Heoeb954192013-08-08 20:11:23 -04004285 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004286 struct mem_cgroup_event *event, *tmp;
Tejun Heo79bd9812013-11-22 18:20:42 -05004287
4288 /*
4289 * Unregister events and notify userspace.
4290 * Notify userspace about cgroup removing only after rmdir of cgroup
4291 * directory to avoid race between userspace and kernelspace.
4292 */
Tejun Heofba94802013-11-22 18:20:43 -05004293 spin_lock(&memcg->event_list_lock);
4294 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004295 list_del_init(&event->list);
4296 schedule_work(&event->remove);
4297 }
Tejun Heofba94802013-11-22 18:20:43 -05004298 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004299
Roman Gushchin63677c742017-09-06 16:21:47 -07004300 memcg->low = 0;
4301
Johannes Weiner567e9ab2016-01-20 15:02:24 -08004302 memcg_offline_kmem(memcg);
Tejun Heo52ebea72015-05-22 17:13:37 -04004303 wb_memcg_offline(memcg);
Johannes Weiner73f576c2016-07-20 15:44:57 -07004304
4305 mem_cgroup_id_put(memcg);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004306}
4307
Vladimir Davydov6df38682015-12-29 14:54:10 -08004308static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4309{
4310 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4311
4312 invalidate_reclaim_iterators(memcg);
4313}
4314
Tejun Heoeb954192013-08-08 20:11:23 -04004315static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004316{
Tejun Heoeb954192013-08-08 20:11:23 -04004317 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004318
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08004319 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
Johannes Weineref129472016-01-14 15:21:34 -08004320 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08004321
Johannes Weiner0db15292016-01-20 15:02:50 -08004322 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
Vladimir Davydovd55f90b2016-01-20 15:02:44 -08004323 static_branch_dec(&memcg_sockets_enabled_key);
Johannes Weiner3893e302016-01-20 15:02:29 -08004324
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004325 vmpressure_cleanup(&memcg->vmpressure);
4326 cancel_work_sync(&memcg->high_work);
4327 mem_cgroup_remove_from_trees(memcg);
Johannes Weinerd886f4e2016-01-20 15:02:47 -08004328 memcg_free_kmem(memcg);
Johannes Weiner0b8f73e2016-01-20 15:02:53 -08004329 mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004330}
4331
Tejun Heo1ced9532014-07-08 18:02:57 -04004332/**
4333 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4334 * @css: the target css
4335 *
4336 * Reset the states of the mem_cgroup associated with @css. This is
4337 * invoked when the userland requests disabling on the default hierarchy
4338 * but the memcg is pinned through dependency. The memcg should stop
4339 * applying policies and should revert to the vanilla state as it may be
4340 * made visible again.
4341 *
4342 * The current implementation only resets the essential configurations.
4343 * This needs to be expanded to cover all the visible parts.
4344 */
4345static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4346{
4347 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4348
Vladimir Davydovd334c9bc2016-03-17 14:19:38 -07004349 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4350 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4351 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4352 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4353 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004354 memcg->low = 0;
4355 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004356 memcg->soft_limit = PAGE_COUNTER_MAX;
Tejun Heo2529bb32015-05-22 18:23:34 -04004357 memcg_wb_domain_size_changed(memcg);
Tejun Heo1ced9532014-07-08 18:02:57 -04004358}
4359
Daisuke Nishimura02491442010-03-10 15:22:17 -08004360#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004361/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004362static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004363{
Johannes Weiner05b84302014-08-06 16:05:59 -07004364 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07004365
Mel Gormand0164ad2015-11-06 16:28:21 -08004366 /* Try a single bulk charge without reclaim first, kswapd may wake */
4367 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
Johannes Weiner9476db92014-08-06 16:05:55 -07004368 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004369 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004370 return ret;
4371 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004372
David Rientjes36745342017-01-24 15:18:10 -08004373 /* Try charges one by one with reclaim, but do not retry */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004374 while (count--) {
David Rientjes36745342017-01-24 15:18:10 -08004375 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004376 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004377 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004378 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07004379 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004380 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004381 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004382}
4383
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004384union mc_target {
4385 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004386 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004387};
4388
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004389enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004390 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004391 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08004392 MC_TARGET_SWAP,
Jérôme Glissec733a822017-09-08 16:11:54 -07004393 MC_TARGET_DEVICE,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004394};
4395
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004396static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4397 unsigned long addr, pte_t ptent)
4398{
Jérôme Glissec733a822017-09-08 16:11:54 -07004399 struct page *page = _vm_normal_page(vma, addr, ptent, true);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004400
4401 if (!page || !page_mapped(page))
4402 return NULL;
4403 if (PageAnon(page)) {
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004404 if (!(mc.flags & MOVE_ANON))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004405 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004406 } else {
4407 if (!(mc.flags & MOVE_FILE))
4408 return NULL;
4409 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004410 if (!get_page_unless_zero(page))
4411 return NULL;
4412
4413 return page;
4414}
4415
Jérôme Glissec733a822017-09-08 16:11:54 -07004416#if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004417static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
Li RongQing48406ef2016-07-26 15:22:14 -07004418 pte_t ptent, swp_entry_t *entry)
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004419{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004420 struct page *page = NULL;
4421 swp_entry_t ent = pte_to_swp_entry(ptent);
4422
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004423 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004424 return NULL;
Jérôme Glissec733a822017-09-08 16:11:54 -07004425
4426 /*
4427 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
4428 * a device and because they are not accessible by CPU they are store
4429 * as special swap entry in the CPU page table.
4430 */
4431 if (is_device_private_entry(ent)) {
4432 page = device_private_entry_to_page(ent);
4433 /*
4434 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
4435 * a refcount of 1 when free (unlike normal page)
4436 */
4437 if (!page_ref_add_unless(page, 1, 1))
4438 return NULL;
4439 return page;
4440 }
4441
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004442 /*
4443 * Because lookup_swap_cache() updates some statistics counter,
4444 * we call find_get_page() with swapper_space directly.
4445 */
Huang Yingf6ab1f72016-10-07 17:00:21 -07004446 page = find_get_page(swap_address_space(ent), swp_offset(ent));
Johannes Weiner7941d212016-01-14 15:21:23 -08004447 if (do_memsw_account())
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004448 entry->val = ent.val;
4449
4450 return page;
4451}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004452#else
4453static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
Li RongQing48406ef2016-07-26 15:22:14 -07004454 pte_t ptent, swp_entry_t *entry)
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004455{
4456 return NULL;
4457}
4458#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004459
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004460static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4461 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4462{
4463 struct page *page = NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004464 struct address_space *mapping;
4465 pgoff_t pgoff;
4466
4467 if (!vma->vm_file) /* anonymous vma */
4468 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004469 if (!(mc.flags & MOVE_FILE))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004470 return NULL;
4471
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004472 mapping = vma->vm_file->f_mapping;
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004473 pgoff = linear_page_index(vma, addr);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004474
4475 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004476#ifdef CONFIG_SWAP
4477 /* shmem/tmpfs may report page out on swap: account for that too. */
Johannes Weiner139b6a62014-05-06 12:50:05 -07004478 if (shmem_mapping(mapping)) {
4479 page = find_get_entry(mapping, pgoff);
4480 if (radix_tree_exceptional_entry(page)) {
4481 swp_entry_t swp = radix_to_swp_entry(page);
Johannes Weiner7941d212016-01-14 15:21:23 -08004482 if (do_memsw_account())
Johannes Weiner139b6a62014-05-06 12:50:05 -07004483 *entry = swp;
Huang Yingf6ab1f72016-10-07 17:00:21 -07004484 page = find_get_page(swap_address_space(swp),
4485 swp_offset(swp));
Johannes Weiner139b6a62014-05-06 12:50:05 -07004486 }
4487 } else
4488 page = find_get_page(mapping, pgoff);
4489#else
4490 page = find_get_page(mapping, pgoff);
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004491#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004492 return page;
4493}
4494
Chen Gangb1b0dea2015-04-14 15:47:35 -07004495/**
4496 * mem_cgroup_move_account - move account of the page
4497 * @page: the page
Li RongQing25843c22016-07-26 15:26:56 -07004498 * @compound: charge the page as compound or small page
Chen Gangb1b0dea2015-04-14 15:47:35 -07004499 * @from: mem_cgroup which the page is moved from.
4500 * @to: mem_cgroup which the page is moved to. @from != @to.
4501 *
Kirill A. Shutemov3ac808f2016-01-15 16:53:07 -08004502 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
Chen Gangb1b0dea2015-04-14 15:47:35 -07004503 *
4504 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4505 * from old cgroup.
4506 */
4507static int mem_cgroup_move_account(struct page *page,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004508 bool compound,
Chen Gangb1b0dea2015-04-14 15:47:35 -07004509 struct mem_cgroup *from,
4510 struct mem_cgroup *to)
4511{
4512 unsigned long flags;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004513 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004514 int ret;
Greg Thelenc4843a72015-05-22 17:13:16 -04004515 bool anon;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004516
4517 VM_BUG_ON(from == to);
4518 VM_BUG_ON_PAGE(PageLRU(page), page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004519 VM_BUG_ON(compound && !PageTransHuge(page));
Chen Gangb1b0dea2015-04-14 15:47:35 -07004520
4521 /*
Johannes Weiner6a93ca82016-03-15 14:57:19 -07004522 * Prevent mem_cgroup_migrate() from looking at
Hugh Dickins45637ba2015-11-05 18:49:40 -08004523 * page->mem_cgroup of its source page while we change it.
Chen Gangb1b0dea2015-04-14 15:47:35 -07004524 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004525 ret = -EBUSY;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004526 if (!trylock_page(page))
4527 goto out;
4528
4529 ret = -EINVAL;
4530 if (page->mem_cgroup != from)
4531 goto out_unlock;
4532
Greg Thelenc4843a72015-05-22 17:13:16 -04004533 anon = PageAnon(page);
4534
Chen Gangb1b0dea2015-04-14 15:47:35 -07004535 spin_lock_irqsave(&from->move_lock, flags);
4536
Greg Thelenc4843a72015-05-22 17:13:16 -04004537 if (!anon && page_mapped(page)) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08004538 __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
4539 __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07004540 }
4541
Greg Thelenc4843a72015-05-22 17:13:16 -04004542 /*
4543 * move_lock grabbed above and caller set from->moving_account, so
Johannes Weinerccda7f42017-05-03 14:55:16 -07004544 * mod_memcg_page_state will serialize updates to PageDirty.
Greg Thelenc4843a72015-05-22 17:13:16 -04004545 * So mapping should be stable for dirty pages.
4546 */
4547 if (!anon && PageDirty(page)) {
4548 struct address_space *mapping = page_mapping(page);
4549
4550 if (mapping_cap_account_dirty(mapping)) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08004551 __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
4552 __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
Greg Thelenc4843a72015-05-22 17:13:16 -04004553 }
4554 }
4555
Chen Gangb1b0dea2015-04-14 15:47:35 -07004556 if (PageWriteback(page)) {
Johannes Weinerc9019e92018-01-31 16:16:37 -08004557 __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
4558 __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07004559 }
4560
4561 /*
4562 * It is safe to change page->mem_cgroup here because the page
4563 * is referenced, charged, and isolated - we can't race with
4564 * uncharging, charging, migration, or LRU putback.
4565 */
4566
4567 /* caller should have done css_get */
4568 page->mem_cgroup = to;
4569 spin_unlock_irqrestore(&from->move_lock, flags);
4570
4571 ret = 0;
4572
4573 local_irq_disable();
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004574 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07004575 memcg_check_events(to, page);
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004576 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
Chen Gangb1b0dea2015-04-14 15:47:35 -07004577 memcg_check_events(from, page);
4578 local_irq_enable();
4579out_unlock:
4580 unlock_page(page);
4581out:
4582 return ret;
4583}
4584
Li RongQing7cf78062016-05-27 14:27:46 -07004585/**
4586 * get_mctgt_type - get target type of moving charge
4587 * @vma: the vma the pte to be checked belongs
4588 * @addr: the address corresponding to the pte to be checked
4589 * @ptent: the pte to be checked
4590 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4591 *
4592 * Returns
4593 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4594 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4595 * move charge. if @target is not NULL, the page is stored in target->page
4596 * with extra refcnt got(Callers should handle it).
4597 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4598 * target for charge migration. if @target is not NULL, the entry is stored
4599 * in target->ent.
Jérôme Glissedf6ad692017-09-08 16:12:24 -07004600 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PUBLIC
4601 * or MEMORY_DEVICE_PRIVATE (so ZONE_DEVICE page and thus not on the lru).
4602 * For now we such page is charge like a regular page would be as for all
4603 * intent and purposes it is just special memory taking the place of a
4604 * regular page.
Jérôme Glissec733a822017-09-08 16:11:54 -07004605 *
4606 * See Documentations/vm/hmm.txt and include/linux/hmm.h
Li RongQing7cf78062016-05-27 14:27:46 -07004607 *
4608 * Called with pte lock held.
4609 */
4610
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004611static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004612 unsigned long addr, pte_t ptent, union mc_target *target)
4613{
Daisuke Nishimura02491442010-03-10 15:22:17 -08004614 struct page *page = NULL;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004615 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004616 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004617
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004618 if (pte_present(ptent))
4619 page = mc_handle_present_pte(vma, addr, ptent);
4620 else if (is_swap_pte(ptent))
Li RongQing48406ef2016-07-26 15:22:14 -07004621 page = mc_handle_swap_pte(vma, ptent, &ent);
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004622 else if (pte_none(ptent))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004623 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004624
4625 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004626 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004627 if (page) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004628 /*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004629 * Do only loose check w/o serialization.
Johannes Weiner1306a852014-12-10 15:44:52 -08004630 * mem_cgroup_move_account() checks the page is valid or
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004631 * not under LRU exclusion.
Daisuke Nishimura02491442010-03-10 15:22:17 -08004632 */
Johannes Weiner1306a852014-12-10 15:44:52 -08004633 if (page->mem_cgroup == mc.from) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004634 ret = MC_TARGET_PAGE;
Jérôme Glissedf6ad692017-09-08 16:12:24 -07004635 if (is_device_private_page(page) ||
4636 is_device_public_page(page))
Jérôme Glissec733a822017-09-08 16:11:54 -07004637 ret = MC_TARGET_DEVICE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004638 if (target)
4639 target->page = page;
4640 }
4641 if (!ret || !target)
4642 put_page(page);
4643 }
Huang Ying3e14a572017-09-06 16:22:37 -07004644 /*
4645 * There is a swap entry and a page doesn't exist or isn't charged.
4646 * But we cannot move a tail-page in a THP.
4647 */
4648 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
Li Zefan34c00c32013-09-23 16:56:01 +08004649 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07004650 ret = MC_TARGET_SWAP;
4651 if (target)
4652 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004653 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004654 return ret;
4655}
4656
Naoya Horiguchi12724852012-03-21 16:34:28 -07004657#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4658/*
Huang Yingd6810d72017-09-06 16:22:45 -07004659 * We don't consider PMD mapped swapping or file mapped pages because THP does
4660 * not support them for now.
Naoya Horiguchi12724852012-03-21 16:34:28 -07004661 * Caller should make sure that pmd_trans_huge(pmd) is true.
4662 */
4663static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4664 unsigned long addr, pmd_t pmd, union mc_target *target)
4665{
4666 struct page *page = NULL;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004667 enum mc_target_type ret = MC_TARGET_NONE;
4668
Zi Yan84c3fc42017-09-08 16:11:01 -07004669 if (unlikely(is_swap_pmd(pmd))) {
4670 VM_BUG_ON(thp_migration_supported() &&
4671 !is_pmd_migration_entry(pmd));
4672 return ret;
4673 }
Naoya Horiguchi12724852012-03-21 16:34:28 -07004674 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08004675 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004676 if (!(mc.flags & MOVE_ANON))
Naoya Horiguchi12724852012-03-21 16:34:28 -07004677 return ret;
Johannes Weiner1306a852014-12-10 15:44:52 -08004678 if (page->mem_cgroup == mc.from) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004679 ret = MC_TARGET_PAGE;
4680 if (target) {
4681 get_page(page);
4682 target->page = page;
4683 }
4684 }
4685 return ret;
4686}
4687#else
4688static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4689 unsigned long addr, pmd_t pmd, union mc_target *target)
4690{
4691 return MC_TARGET_NONE;
4692}
4693#endif
4694
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004695static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4696 unsigned long addr, unsigned long end,
4697 struct mm_walk *walk)
4698{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004699 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004700 pte_t *pte;
4701 spinlock_t *ptl;
4702
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08004703 ptl = pmd_trans_huge_lock(pmd, vma);
4704 if (ptl) {
Jérôme Glissec733a822017-09-08 16:11:54 -07004705 /*
4706 * Note their can not be MC_TARGET_DEVICE for now as we do not
4707 * support transparent huge page with MEMORY_DEVICE_PUBLIC or
4708 * MEMORY_DEVICE_PRIVATE but this might change.
4709 */
Naoya Horiguchi12724852012-03-21 16:34:28 -07004710 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4711 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004712 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07004713 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004714 }
Dave Hansen03319322011-03-22 16:32:56 -07004715
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07004716 if (pmd_trans_unstable(pmd))
4717 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004718 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4719 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004720 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004721 mc.precharge++; /* increment precharge temporarily */
4722 pte_unmap_unlock(pte - 1, ptl);
4723 cond_resched();
4724
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004725 return 0;
4726}
4727
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004728static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4729{
4730 unsigned long precharge;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004731
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004732 struct mm_walk mem_cgroup_count_precharge_walk = {
4733 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4734 .mm = mm,
4735 };
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004736 down_read(&mm->mmap_sem);
James Morse0247f3f2016-10-07 17:00:12 -07004737 walk_page_range(0, mm->highest_vm_end,
4738 &mem_cgroup_count_precharge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004739 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004740
4741 precharge = mc.precharge;
4742 mc.precharge = 0;
4743
4744 return precharge;
4745}
4746
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004747static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4748{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004749 unsigned long precharge = mem_cgroup_count_precharge(mm);
4750
4751 VM_BUG_ON(mc.moving_task);
4752 mc.moving_task = current;
4753 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004754}
4755
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004756/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4757static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004758{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004759 struct mem_cgroup *from = mc.from;
4760 struct mem_cgroup *to = mc.to;
4761
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004762 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004763 if (mc.precharge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004764 cancel_charge(mc.to, mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004765 mc.precharge = 0;
4766 }
4767 /*
4768 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4769 * we must uncharge here.
4770 */
4771 if (mc.moved_charge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004772 cancel_charge(mc.from, mc.moved_charge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004773 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004774 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004775 /* we must fixup refcnts and charges */
4776 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004777 /* uncharge swap account from the old cgroup */
Johannes Weinerce00a962014-09-05 08:43:57 -04004778 if (!mem_cgroup_is_root(mc.from))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004779 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004780
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004781 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4782
Johannes Weiner05b84302014-08-06 16:05:59 -07004783 /*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004784 * we charged both to->memory and to->memsw, so we
4785 * should uncharge to->memory.
Johannes Weiner05b84302014-08-06 16:05:59 -07004786 */
Johannes Weinerce00a962014-09-05 08:43:57 -04004787 if (!mem_cgroup_is_root(mc.to))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004788 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004789
Vladimir Davydov615d66c2016-08-11 15:33:03 -07004790 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4791 css_put_many(&mc.to->css, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004792
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004793 mc.moved_swap = 0;
4794 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004795 memcg_oom_recover(from);
4796 memcg_oom_recover(to);
4797 wake_up_all(&mc.waitq);
4798}
4799
4800static void mem_cgroup_clear_mc(void)
4801{
Tejun Heo264a0ae2016-04-21 19:09:02 -04004802 struct mm_struct *mm = mc.mm;
4803
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004804 /*
4805 * we must clear moving_task before waking up waiters at the end of
4806 * task migration.
4807 */
4808 mc.moving_task = NULL;
4809 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004810 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004811 mc.from = NULL;
4812 mc.to = NULL;
Tejun Heo264a0ae2016-04-21 19:09:02 -04004813 mc.mm = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004814 spin_unlock(&mc.lock);
Tejun Heo264a0ae2016-04-21 19:09:02 -04004815
4816 mmput(mm);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004817}
4818
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004819static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004820{
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004821 struct cgroup_subsys_state *css;
Ross Zwislereed67d72015-12-23 14:53:27 -07004822 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
Tejun Heo9f2115f2015-09-08 15:01:10 -07004823 struct mem_cgroup *from;
Tejun Heo4530edd2015-09-11 15:00:19 -04004824 struct task_struct *leader, *p;
Tejun Heo9f2115f2015-09-08 15:01:10 -07004825 struct mm_struct *mm;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004826 unsigned long move_flags;
Tejun Heo9f2115f2015-09-08 15:01:10 -07004827 int ret = 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004828
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004829 /* charge immigration isn't supported on the default hierarchy */
4830 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Tejun Heo9f2115f2015-09-08 15:01:10 -07004831 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004832
Tejun Heo4530edd2015-09-11 15:00:19 -04004833 /*
4834 * Multi-process migrations only happen on the default hierarchy
4835 * where charge immigration is not used. Perform charge
4836 * immigration if @tset contains a leader and whine if there are
4837 * multiple.
4838 */
4839 p = NULL;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004840 cgroup_taskset_for_each_leader(leader, css, tset) {
Tejun Heo4530edd2015-09-11 15:00:19 -04004841 WARN_ON_ONCE(p);
4842 p = leader;
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004843 memcg = mem_cgroup_from_css(css);
Tejun Heo4530edd2015-09-11 15:00:19 -04004844 }
4845 if (!p)
4846 return 0;
4847
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004848 /*
4849 * We are now commited to this value whatever it is. Changes in this
4850 * tunable will only affect upcoming migrations, not the current one.
4851 * So we need to save it, and keep it going.
4852 */
4853 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4854 if (!move_flags)
4855 return 0;
4856
Tejun Heo9f2115f2015-09-08 15:01:10 -07004857 from = mem_cgroup_from_task(p);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004858
Tejun Heo9f2115f2015-09-08 15:01:10 -07004859 VM_BUG_ON(from == memcg);
Johannes Weiner247b1442014-12-10 15:44:11 -08004860
Tejun Heo9f2115f2015-09-08 15:01:10 -07004861 mm = get_task_mm(p);
4862 if (!mm)
4863 return 0;
4864 /* We move charges only when we move a owner of the mm */
4865 if (mm->owner == p) {
4866 VM_BUG_ON(mc.from);
4867 VM_BUG_ON(mc.to);
4868 VM_BUG_ON(mc.precharge);
4869 VM_BUG_ON(mc.moved_charge);
4870 VM_BUG_ON(mc.moved_swap);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004871
Tejun Heo9f2115f2015-09-08 15:01:10 -07004872 spin_lock(&mc.lock);
Tejun Heo264a0ae2016-04-21 19:09:02 -04004873 mc.mm = mm;
Tejun Heo9f2115f2015-09-08 15:01:10 -07004874 mc.from = from;
4875 mc.to = memcg;
4876 mc.flags = move_flags;
4877 spin_unlock(&mc.lock);
4878 /* We set mc.moving_task later */
4879
4880 ret = mem_cgroup_precharge_mc(mm);
4881 if (ret)
4882 mem_cgroup_clear_mc();
Tejun Heo264a0ae2016-04-21 19:09:02 -04004883 } else {
4884 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004885 }
4886 return ret;
4887}
4888
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05004889static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004890{
Johannes Weiner4e2f2452014-12-10 15:44:08 -08004891 if (mc.to)
4892 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004893}
4894
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004895static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4896 unsigned long addr, unsigned long end,
4897 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004898{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004899 int ret = 0;
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08004900 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004901 pte_t *pte;
4902 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004903 enum mc_target_type target_type;
4904 union mc_target target;
4905 struct page *page;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004906
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -08004907 ptl = pmd_trans_huge_lock(pmd, vma);
4908 if (ptl) {
Hugh Dickins62ade862012-05-18 11:28:34 -07004909 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004910 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07004911 return 0;
4912 }
4913 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4914 if (target_type == MC_TARGET_PAGE) {
4915 page = target.page;
4916 if (!isolate_lru_page(page)) {
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004917 if (!mem_cgroup_move_account(page, true,
Johannes Weiner1306a852014-12-10 15:44:52 -08004918 mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004919 mc.precharge -= HPAGE_PMD_NR;
4920 mc.moved_charge += HPAGE_PMD_NR;
4921 }
4922 putback_lru_page(page);
4923 }
4924 put_page(page);
Jérôme Glissec733a822017-09-08 16:11:54 -07004925 } else if (target_type == MC_TARGET_DEVICE) {
4926 page = target.page;
4927 if (!mem_cgroup_move_account(page, true,
4928 mc.from, mc.to)) {
4929 mc.precharge -= HPAGE_PMD_NR;
4930 mc.moved_charge += HPAGE_PMD_NR;
4931 }
4932 put_page(page);
Naoya Horiguchi12724852012-03-21 16:34:28 -07004933 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08004934 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07004935 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004936 }
4937
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07004938 if (pmd_trans_unstable(pmd))
4939 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004940retry:
4941 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4942 for (; addr != end; addr += PAGE_SIZE) {
4943 pte_t ptent = *(pte++);
Jérôme Glissec733a822017-09-08 16:11:54 -07004944 bool device = false;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004945 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004946
4947 if (!mc.precharge)
4948 break;
4949
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004950 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Jérôme Glissec733a822017-09-08 16:11:54 -07004951 case MC_TARGET_DEVICE:
4952 device = true;
4953 /* fall through */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004954 case MC_TARGET_PAGE:
4955 page = target.page;
Kirill A. Shutemov53f92632016-01-15 16:53:42 -08004956 /*
4957 * We can have a part of the split pmd here. Moving it
4958 * can be done but it would be too convoluted so simply
4959 * ignore such a partial THP and keep it in original
4960 * memcg. There should be somebody mapping the head.
4961 */
4962 if (PageTransCompound(page))
4963 goto put;
Jérôme Glissec733a822017-09-08 16:11:54 -07004964 if (!device && isolate_lru_page(page))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004965 goto put;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08004966 if (!mem_cgroup_move_account(page, false,
4967 mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004968 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004969 /* we uncharge from mc.from later. */
4970 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004971 }
Jérôme Glissec733a822017-09-08 16:11:54 -07004972 if (!device)
4973 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004974put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004975 put_page(page);
4976 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004977 case MC_TARGET_SWAP:
4978 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07004979 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004980 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004981 /* we fixup refcnts and charges later. */
4982 mc.moved_swap++;
4983 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08004984 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004985 default:
4986 break;
4987 }
4988 }
4989 pte_unmap_unlock(pte - 1, ptl);
4990 cond_resched();
4991
4992 if (addr != end) {
4993 /*
4994 * We have consumed all precharges we got in can_attach().
4995 * We try charge one by one, but don't do any additional
4996 * charges to mc.to if we have failed in charge once in attach()
4997 * phase.
4998 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004999 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005000 if (!ret)
5001 goto retry;
5002 }
5003
5004 return ret;
5005}
5006
Tejun Heo264a0ae2016-04-21 19:09:02 -04005007static void mem_cgroup_move_charge(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005008{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005009 struct mm_walk mem_cgroup_move_charge_walk = {
5010 .pmd_entry = mem_cgroup_move_charge_pte_range,
Tejun Heo264a0ae2016-04-21 19:09:02 -04005011 .mm = mc.mm,
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005012 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005013
5014 lru_add_drain_all();
Johannes Weiner312722c2014-12-10 15:44:25 -08005015 /*
Johannes Weiner81f8c3a2016-03-15 14:57:04 -07005016 * Signal lock_page_memcg() to take the memcg's move_lock
5017 * while we're moving its pages to another memcg. Then wait
5018 * for already started RCU-only updates to finish.
Johannes Weiner312722c2014-12-10 15:44:25 -08005019 */
5020 atomic_inc(&mc.from->moving_account);
5021 synchronize_rcu();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005022retry:
Tejun Heo264a0ae2016-04-21 19:09:02 -04005023 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005024 /*
5025 * Someone who are holding the mmap_sem might be waiting in
5026 * waitq. So we cancel all extra charges, wake up all waiters,
5027 * and retry. Because we cancel precharges, we might not be able
5028 * to move enough charges, but moving charge is a best-effort
5029 * feature anyway, so it wouldn't be a big problem.
5030 */
5031 __mem_cgroup_clear_mc();
5032 cond_resched();
5033 goto retry;
5034 }
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005035 /*
5036 * When we have consumed all precharges and failed in doing
5037 * additional charge, the page walk just aborts.
5038 */
James Morse0247f3f2016-10-07 17:00:12 -07005039 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
5040
Tejun Heo264a0ae2016-04-21 19:09:02 -04005041 up_read(&mc.mm->mmap_sem);
Johannes Weiner312722c2014-12-10 15:44:25 -08005042 atomic_dec(&mc.from->moving_account);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005043}
5044
Tejun Heo264a0ae2016-04-21 19:09:02 -04005045static void mem_cgroup_move_task(void)
Balbir Singh67e465a2008-02-07 00:13:54 -08005046{
Tejun Heo264a0ae2016-04-21 19:09:02 -04005047 if (mc.to) {
5048 mem_cgroup_move_charge();
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005049 mem_cgroup_clear_mc();
Tejun Heo264a0ae2016-04-21 19:09:02 -04005050 }
Balbir Singh67e465a2008-02-07 00:13:54 -08005051}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005052#else /* !CONFIG_MMU */
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005053static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005054{
5055 return 0;
5056}
Tejun Heo1f7dd3e52015-12-03 10:18:21 -05005057static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005058{
5059}
Tejun Heo264a0ae2016-04-21 19:09:02 -04005060static void mem_cgroup_move_task(void)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005061{
5062}
5063#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08005064
Tejun Heof00baae2013-04-15 13:41:15 -07005065/*
5066 * Cgroup retains root cgroups across [un]mount cycles making it necessary
Tejun Heoaa6ec292014-07-09 10:08:08 -04005067 * to verify whether we're attached to the default hierarchy on each mount
5068 * attempt.
Tejun Heof00baae2013-04-15 13:41:15 -07005069 */
Tejun Heoeb954192013-08-08 20:11:23 -04005070static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
Tejun Heof00baae2013-04-15 13:41:15 -07005071{
5072 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -04005073 * use_hierarchy is forced on the default hierarchy. cgroup core
Tejun Heof00baae2013-04-15 13:41:15 -07005074 * guarantees that @root doesn't have any children, so turning it
5075 * on for the root memcg is enough.
5076 */
Tejun Heo9e10a132015-09-18 11:56:28 -04005077 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Vladimir Davydov7feee5902015-03-12 16:26:19 -07005078 root_mem_cgroup->use_hierarchy = true;
5079 else
5080 root_mem_cgroup->use_hierarchy = false;
Tejun Heof00baae2013-04-15 13:41:15 -07005081}
5082
Johannes Weiner241994ed2015-02-11 15:26:06 -08005083static u64 memory_current_read(struct cgroup_subsys_state *css,
5084 struct cftype *cft)
5085{
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08005086 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5087
5088 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005089}
5090
5091static int memory_low_show(struct seq_file *m, void *v)
5092{
5093 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005094 unsigned long low = READ_ONCE(memcg->low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005095
5096 if (low == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005097 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005098 else
5099 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5100
5101 return 0;
5102}
5103
5104static ssize_t memory_low_write(struct kernfs_open_file *of,
5105 char *buf, size_t nbytes, loff_t off)
5106{
5107 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5108 unsigned long low;
5109 int err;
5110
5111 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005112 err = page_counter_memparse(buf, "max", &low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005113 if (err)
5114 return err;
5115
5116 memcg->low = low;
5117
5118 return nbytes;
5119}
5120
5121static int memory_high_show(struct seq_file *m, void *v)
5122{
5123 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005124 unsigned long high = READ_ONCE(memcg->high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005125
5126 if (high == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005127 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005128 else
5129 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5130
5131 return 0;
5132}
5133
5134static ssize_t memory_high_write(struct kernfs_open_file *of,
5135 char *buf, size_t nbytes, loff_t off)
5136{
5137 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner588083b2016-03-17 14:20:25 -07005138 unsigned long nr_pages;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005139 unsigned long high;
5140 int err;
5141
5142 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005143 err = page_counter_memparse(buf, "max", &high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005144 if (err)
5145 return err;
5146
5147 memcg->high = high;
5148
Johannes Weiner588083b2016-03-17 14:20:25 -07005149 nr_pages = page_counter_read(&memcg->memory);
5150 if (nr_pages > high)
5151 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5152 GFP_KERNEL, true);
5153
Tejun Heo2529bb32015-05-22 18:23:34 -04005154 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005155 return nbytes;
5156}
5157
5158static int memory_max_show(struct seq_file *m, void *v)
5159{
5160 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005161 unsigned long max = READ_ONCE(memcg->memory.limit);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005162
5163 if (max == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005164 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005165 else
5166 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5167
5168 return 0;
5169}
5170
5171static ssize_t memory_max_write(struct kernfs_open_file *of,
5172 char *buf, size_t nbytes, loff_t off)
5173{
5174 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07005175 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5176 bool drained = false;
Johannes Weiner241994ed2015-02-11 15:26:06 -08005177 unsigned long max;
5178 int err;
5179
5180 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005181 err = page_counter_memparse(buf, "max", &max);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005182 if (err)
5183 return err;
5184
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07005185 xchg(&memcg->memory.limit, max);
5186
5187 for (;;) {
5188 unsigned long nr_pages = page_counter_read(&memcg->memory);
5189
5190 if (nr_pages <= max)
5191 break;
5192
5193 if (signal_pending(current)) {
5194 err = -EINTR;
5195 break;
5196 }
5197
5198 if (!drained) {
5199 drain_all_stock(memcg);
5200 drained = true;
5201 continue;
5202 }
5203
5204 if (nr_reclaims) {
5205 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5206 GFP_KERNEL, true))
5207 nr_reclaims--;
5208 continue;
5209 }
5210
Johannes Weiner31176c72017-05-03 14:55:07 -07005211 mem_cgroup_event(memcg, MEMCG_OOM);
Johannes Weinerb6e6edc2016-03-17 14:20:28 -07005212 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5213 break;
5214 }
Johannes Weiner241994ed2015-02-11 15:26:06 -08005215
Tejun Heo2529bb32015-05-22 18:23:34 -04005216 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005217 return nbytes;
5218}
5219
5220static int memory_events_show(struct seq_file *m, void *v)
5221{
5222 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5223
Johannes Weinerccda7f42017-05-03 14:55:16 -07005224 seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW));
5225 seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
5226 seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
5227 seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
Konstantin Khlebnikov8e675f72017-07-06 15:40:28 -07005228 seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
Johannes Weiner241994ed2015-02-11 15:26:06 -08005229
5230 return 0;
5231}
5232
Johannes Weiner587d9f72016-01-20 15:03:19 -08005233static int memory_stat_show(struct seq_file *m, void *v)
5234{
5235 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005236 unsigned long stat[MEMCG_NR_STAT];
5237 unsigned long events[MEMCG_NR_EVENTS];
Johannes Weiner587d9f72016-01-20 15:03:19 -08005238 int i;
5239
5240 /*
5241 * Provide statistics on the state of the memory subsystem as
5242 * well as cumulative event counters that show past behavior.
5243 *
5244 * This list is ordered following a combination of these gradients:
5245 * 1) generic big picture -> specifics and details
5246 * 2) reflecting userspace activity -> reflecting kernel heuristics
5247 *
5248 * Current memory state:
5249 */
5250
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005251 tree_stat(memcg, stat);
5252 tree_events(memcg, events);
5253
Johannes Weiner587d9f72016-01-20 15:03:19 -08005254 seq_printf(m, "anon %llu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005255 (u64)stat[MEMCG_RSS] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005256 seq_printf(m, "file %llu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005257 (u64)stat[MEMCG_CACHE] * PAGE_SIZE);
Vladimir Davydov12580e42016-03-17 14:17:38 -07005258 seq_printf(m, "kernel_stack %llu\n",
Andy Lutomirskiefdc9492016-07-28 15:48:17 -07005259 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07005260 seq_printf(m, "slab %llu\n",
Johannes Weiner32049292017-07-06 15:40:46 -07005261 (u64)(stat[NR_SLAB_RECLAIMABLE] +
5262 stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
Johannes Weinerb2807f02016-01-20 15:03:22 -08005263 seq_printf(m, "sock %llu\n",
Vladimir Davydov72b54e72016-03-17 14:17:32 -07005264 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005265
Johannes Weiner9a4caf12017-05-03 14:52:45 -07005266 seq_printf(m, "shmem %llu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005267 (u64)stat[NR_SHMEM] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005268 seq_printf(m, "file_mapped %llu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005269 (u64)stat[NR_FILE_MAPPED] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005270 seq_printf(m, "file_dirty %llu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005271 (u64)stat[NR_FILE_DIRTY] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005272 seq_printf(m, "file_writeback %llu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005273 (u64)stat[NR_WRITEBACK] * PAGE_SIZE);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005274
5275 for (i = 0; i < NR_LRU_LISTS; i++) {
5276 struct mem_cgroup *mi;
5277 unsigned long val = 0;
5278
5279 for_each_mem_cgroup_tree(mi, memcg)
5280 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5281 seq_printf(m, "%s %llu\n",
5282 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5283 }
5284
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07005285 seq_printf(m, "slab_reclaimable %llu\n",
Johannes Weiner32049292017-07-06 15:40:46 -07005286 (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07005287 seq_printf(m, "slab_unreclaimable %llu\n",
Johannes Weiner32049292017-07-06 15:40:46 -07005288 (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
Vladimir Davydov27ee57c2016-03-17 14:17:35 -07005289
Johannes Weiner587d9f72016-01-20 15:03:19 -08005290 /* Accumulated memory events */
5291
Johannes Weinerdf0e53d2017-05-03 14:55:10 -07005292 seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
5293 seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
Johannes Weiner587d9f72016-01-20 15:03:19 -08005294
Roman Gushchin22621852017-07-06 15:40:25 -07005295 seq_printf(m, "pgrefill %lu\n", events[PGREFILL]);
5296 seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] +
5297 events[PGSCAN_DIRECT]);
5298 seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] +
5299 events[PGSTEAL_DIRECT]);
5300 seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]);
5301 seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]);
5302 seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]);
5303 seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]);
5304
Johannes Weiner2a2e4882017-05-03 14:55:03 -07005305 seq_printf(m, "workingset_refault %lu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005306 stat[WORKINGSET_REFAULT]);
Johannes Weiner2a2e4882017-05-03 14:55:03 -07005307 seq_printf(m, "workingset_activate %lu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005308 stat[WORKINGSET_ACTIVATE]);
Johannes Weiner2a2e4882017-05-03 14:55:03 -07005309 seq_printf(m, "workingset_nodereclaim %lu\n",
Johannes Weiner71cd3112017-05-03 14:55:13 -07005310 stat[WORKINGSET_NODERECLAIM]);
Johannes Weiner2a2e4882017-05-03 14:55:03 -07005311
Johannes Weiner587d9f72016-01-20 15:03:19 -08005312 return 0;
5313}
5314
Johannes Weiner241994ed2015-02-11 15:26:06 -08005315static struct cftype memory_files[] = {
5316 {
5317 .name = "current",
Johannes Weinerf5fc3c5d2015-11-05 18:50:23 -08005318 .flags = CFTYPE_NOT_ON_ROOT,
Johannes Weiner241994ed2015-02-11 15:26:06 -08005319 .read_u64 = memory_current_read,
5320 },
5321 {
5322 .name = "low",
5323 .flags = CFTYPE_NOT_ON_ROOT,
5324 .seq_show = memory_low_show,
5325 .write = memory_low_write,
5326 },
5327 {
5328 .name = "high",
5329 .flags = CFTYPE_NOT_ON_ROOT,
5330 .seq_show = memory_high_show,
5331 .write = memory_high_write,
5332 },
5333 {
5334 .name = "max",
5335 .flags = CFTYPE_NOT_ON_ROOT,
5336 .seq_show = memory_max_show,
5337 .write = memory_max_write,
5338 },
5339 {
5340 .name = "events",
5341 .flags = CFTYPE_NOT_ON_ROOT,
Tejun Heo472912a2015-09-18 18:01:59 -04005342 .file_offset = offsetof(struct mem_cgroup, events_file),
Johannes Weiner241994ed2015-02-11 15:26:06 -08005343 .seq_show = memory_events_show,
5344 },
Johannes Weiner587d9f72016-01-20 15:03:19 -08005345 {
5346 .name = "stat",
5347 .flags = CFTYPE_NOT_ON_ROOT,
5348 .seq_show = memory_stat_show,
5349 },
Johannes Weiner241994ed2015-02-11 15:26:06 -08005350 { } /* terminate */
5351};
5352
Tejun Heo073219e2014-02-08 10:36:58 -05005353struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08005354 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08005355 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08005356 .css_offline = mem_cgroup_css_offline,
Vladimir Davydov6df38682015-12-29 14:54:10 -08005357 .css_released = mem_cgroup_css_released,
Tejun Heo92fb9742012-11-19 08:13:38 -08005358 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04005359 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005360 .can_attach = mem_cgroup_can_attach,
5361 .cancel_attach = mem_cgroup_cancel_attach,
Tejun Heo264a0ae2016-04-21 19:09:02 -04005362 .post_attach = mem_cgroup_move_task,
Tejun Heof00baae2013-04-15 13:41:15 -07005363 .bind = mem_cgroup_bind,
Johannes Weiner241994ed2015-02-11 15:26:06 -08005364 .dfl_cftypes = memory_files,
5365 .legacy_cftypes = mem_cgroup_legacy_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005366 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005367};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005368
Johannes Weiner241994ed2015-02-11 15:26:06 -08005369/**
Johannes Weiner241994ed2015-02-11 15:26:06 -08005370 * mem_cgroup_low - check if memory consumption is below the normal range
Sean Christopherson34c81052017-07-10 15:48:05 -07005371 * @root: the top ancestor of the sub-tree being checked
Johannes Weiner241994ed2015-02-11 15:26:06 -08005372 * @memcg: the memory cgroup to check
5373 *
5374 * Returns %true if memory consumption of @memcg, and that of all
Sean Christopherson34c81052017-07-10 15:48:05 -07005375 * ancestors up to (but not including) @root, is below the normal range.
5376 *
5377 * @root is exclusive; it is never low when looked at directly and isn't
5378 * checked when traversing the hierarchy.
5379 *
5380 * Excluding @root enables using memory.low to prioritize memory usage
5381 * between cgroups within a subtree of the hierarchy that is limited by
5382 * memory.high or memory.max.
5383 *
5384 * For example, given cgroup A with children B and C:
5385 *
5386 * A
5387 * / \
5388 * B C
5389 *
5390 * and
5391 *
5392 * 1. A/memory.current > A/memory.high
5393 * 2. A/B/memory.current < A/B/memory.low
5394 * 3. A/C/memory.current >= A/C/memory.low
5395 *
5396 * As 'A' is high, i.e. triggers reclaim from 'A', and 'B' is low, we
5397 * should reclaim from 'C' until 'A' is no longer high or until we can
5398 * no longer reclaim from 'C'. If 'A', i.e. @root, isn't excluded by
5399 * mem_cgroup_low when reclaming from 'A', then 'B' won't be considered
5400 * low and we will reclaim indiscriminately from both 'B' and 'C'.
Johannes Weiner241994ed2015-02-11 15:26:06 -08005401 */
5402bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5403{
5404 if (mem_cgroup_disabled())
5405 return false;
5406
Sean Christopherson34c81052017-07-10 15:48:05 -07005407 if (!root)
5408 root = root_mem_cgroup;
5409 if (memcg == root)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005410 return false;
5411
Sean Christopherson34c81052017-07-10 15:48:05 -07005412 for (; memcg != root; memcg = parent_mem_cgroup(memcg)) {
Michal Hocko4e54ded2015-02-27 15:51:46 -08005413 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005414 return false;
5415 }
Sean Christopherson34c81052017-07-10 15:48:05 -07005416
Johannes Weiner241994ed2015-02-11 15:26:06 -08005417 return true;
5418}
5419
Johannes Weiner00501b52014-08-08 14:19:20 -07005420/**
5421 * mem_cgroup_try_charge - try charging a page
5422 * @page: page to charge
5423 * @mm: mm context of the victim
5424 * @gfp_mask: reclaim mode
5425 * @memcgp: charged memcg return
Li RongQing25843c22016-07-26 15:26:56 -07005426 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07005427 *
5428 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5429 * pages according to @gfp_mask if necessary.
5430 *
5431 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5432 * Otherwise, an error code is returned.
5433 *
5434 * After page->mapping has been set up, the caller must finalize the
5435 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5436 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5437 */
5438int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005439 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5440 bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005441{
5442 struct mem_cgroup *memcg = NULL;
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005443 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005444 int ret = 0;
5445
5446 if (mem_cgroup_disabled())
5447 goto out;
5448
5449 if (PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005450 /*
5451 * Every swap fault against a single page tries to charge the
5452 * page, bail as early as possible. shmem_unuse() encounters
5453 * already charged pages, too. The USED bit is protected by
5454 * the page lock, which serializes swap cache removal, which
5455 * in turn serializes uncharging.
5456 */
Vladimir Davydove993d902015-09-09 15:35:35 -07005457 VM_BUG_ON_PAGE(!PageLocked(page), page);
Huang Yingabe28952017-09-06 16:22:41 -07005458 if (compound_head(page)->mem_cgroup)
Johannes Weiner00501b52014-08-08 14:19:20 -07005459 goto out;
Vladimir Davydove993d902015-09-09 15:35:35 -07005460
Vladimir Davydov37e84352016-01-20 15:02:56 -08005461 if (do_swap_account) {
Vladimir Davydove993d902015-09-09 15:35:35 -07005462 swp_entry_t ent = { .val = page_private(page), };
5463 unsigned short id = lookup_swap_cgroup_id(ent);
5464
5465 rcu_read_lock();
5466 memcg = mem_cgroup_from_id(id);
5467 if (memcg && !css_tryget_online(&memcg->css))
5468 memcg = NULL;
5469 rcu_read_unlock();
5470 }
Johannes Weiner00501b52014-08-08 14:19:20 -07005471 }
5472
Johannes Weiner00501b52014-08-08 14:19:20 -07005473 if (!memcg)
5474 memcg = get_mem_cgroup_from_mm(mm);
5475
5476 ret = try_charge(memcg, gfp_mask, nr_pages);
5477
5478 css_put(&memcg->css);
Johannes Weiner00501b52014-08-08 14:19:20 -07005479out:
5480 *memcgp = memcg;
5481 return ret;
5482}
5483
5484/**
5485 * mem_cgroup_commit_charge - commit a page charge
5486 * @page: page to charge
5487 * @memcg: memcg to charge the page to
5488 * @lrucare: page might be on LRU already
Li RongQing25843c22016-07-26 15:26:56 -07005489 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07005490 *
5491 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5492 * after page->mapping has been set up. This must happen atomically
5493 * as part of the page instantiation, i.e. under the page table lock
5494 * for anonymous pages, under the page lock for page and swap cache.
5495 *
5496 * In addition, the page must not be on the LRU during the commit, to
5497 * prevent racing with task migration. If it might be, use @lrucare.
5498 *
5499 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5500 */
5501void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005502 bool lrucare, bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005503{
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005504 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005505
5506 VM_BUG_ON_PAGE(!page->mapping, page);
5507 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5508
5509 if (mem_cgroup_disabled())
5510 return;
5511 /*
5512 * Swap faults will attempt to charge the same page multiple
5513 * times. But reuse_swap_page() might have removed the page
5514 * from swapcache already, so we can't check PageSwapCache().
5515 */
5516 if (!memcg)
5517 return;
5518
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005519 commit_charge(page, memcg, lrucare);
5520
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005521 local_irq_disable();
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005522 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005523 memcg_check_events(memcg, page);
5524 local_irq_enable();
Johannes Weiner00501b52014-08-08 14:19:20 -07005525
Johannes Weiner7941d212016-01-14 15:21:23 -08005526 if (do_memsw_account() && PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005527 swp_entry_t entry = { .val = page_private(page) };
5528 /*
5529 * The swap entry might not get freed for a long time,
5530 * let's not wait for it. The page already received a
5531 * memory+swap charge, drop the swap entry duplicate.
5532 */
Huang Ying38d8b4e2017-07-06 15:37:18 -07005533 mem_cgroup_uncharge_swap(entry, nr_pages);
Johannes Weiner00501b52014-08-08 14:19:20 -07005534 }
5535}
5536
5537/**
5538 * mem_cgroup_cancel_charge - cancel a page charge
5539 * @page: page to charge
5540 * @memcg: memcg to charge the page to
Li RongQing25843c22016-07-26 15:26:56 -07005541 * @compound: charge the page as compound or small page
Johannes Weiner00501b52014-08-08 14:19:20 -07005542 *
5543 * Cancel a charge transaction started by mem_cgroup_try_charge().
5544 */
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005545void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5546 bool compound)
Johannes Weiner00501b52014-08-08 14:19:20 -07005547{
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -08005548 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
Johannes Weiner00501b52014-08-08 14:19:20 -07005549
5550 if (mem_cgroup_disabled())
5551 return;
5552 /*
5553 * Swap faults will attempt to charge the same page multiple
5554 * times. But reuse_swap_page() might have removed the page
5555 * from swapcache already, so we can't check PageSwapCache().
5556 */
5557 if (!memcg)
5558 return;
5559
Johannes Weiner00501b52014-08-08 14:19:20 -07005560 cancel_charge(memcg, nr_pages);
5561}
5562
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005563struct uncharge_gather {
5564 struct mem_cgroup *memcg;
5565 unsigned long pgpgout;
5566 unsigned long nr_anon;
5567 unsigned long nr_file;
5568 unsigned long nr_kmem;
5569 unsigned long nr_huge;
5570 unsigned long nr_shmem;
5571 struct page *dummy_page;
5572};
5573
5574static inline void uncharge_gather_clear(struct uncharge_gather *ug)
Johannes Weiner747db952014-08-08 14:19:24 -07005575{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005576 memset(ug, 0, sizeof(*ug));
5577}
5578
5579static void uncharge_batch(const struct uncharge_gather *ug)
5580{
5581 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
Johannes Weiner747db952014-08-08 14:19:24 -07005582 unsigned long flags;
5583
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005584 if (!mem_cgroup_is_root(ug->memcg)) {
5585 page_counter_uncharge(&ug->memcg->memory, nr_pages);
Johannes Weiner7941d212016-01-14 15:21:23 -08005586 if (do_memsw_account())
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005587 page_counter_uncharge(&ug->memcg->memsw, nr_pages);
5588 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
5589 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
5590 memcg_oom_recover(ug->memcg);
Johannes Weinerce00a962014-09-05 08:43:57 -04005591 }
Johannes Weiner747db952014-08-08 14:19:24 -07005592
5593 local_irq_save(flags);
Johannes Weinerc9019e92018-01-31 16:16:37 -08005594 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
5595 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
5596 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
5597 __mod_memcg_state(ug->memcg, NR_SHMEM, -ug->nr_shmem);
5598 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
Johannes Weinera983b5e2018-01-31 16:16:45 -08005599 __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005600 memcg_check_events(ug->memcg, ug->dummy_page);
Johannes Weiner747db952014-08-08 14:19:24 -07005601 local_irq_restore(flags);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08005602
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005603 if (!mem_cgroup_is_root(ug->memcg))
5604 css_put_many(&ug->memcg->css, nr_pages);
5605}
5606
5607static void uncharge_page(struct page *page, struct uncharge_gather *ug)
5608{
5609 VM_BUG_ON_PAGE(PageLRU(page), page);
Jérôme Glisse3f2eb022017-10-03 16:14:57 -07005610 VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) &&
5611 !PageHWPoison(page) , page);
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005612
5613 if (!page->mem_cgroup)
5614 return;
5615
5616 /*
5617 * Nobody should be changing or seriously looking at
5618 * page->mem_cgroup at this point, we have fully
5619 * exclusive access to the page.
5620 */
5621
5622 if (ug->memcg != page->mem_cgroup) {
5623 if (ug->memcg) {
5624 uncharge_batch(ug);
5625 uncharge_gather_clear(ug);
5626 }
5627 ug->memcg = page->mem_cgroup;
5628 }
5629
5630 if (!PageKmemcg(page)) {
5631 unsigned int nr_pages = 1;
5632
5633 if (PageTransHuge(page)) {
5634 nr_pages <<= compound_order(page);
5635 ug->nr_huge += nr_pages;
5636 }
5637 if (PageAnon(page))
5638 ug->nr_anon += nr_pages;
5639 else {
5640 ug->nr_file += nr_pages;
5641 if (PageSwapBacked(page))
5642 ug->nr_shmem += nr_pages;
5643 }
5644 ug->pgpgout++;
5645 } else {
5646 ug->nr_kmem += 1 << compound_order(page);
5647 __ClearPageKmemcg(page);
5648 }
5649
5650 ug->dummy_page = page;
5651 page->mem_cgroup = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005652}
5653
5654static void uncharge_list(struct list_head *page_list)
5655{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005656 struct uncharge_gather ug;
Johannes Weiner747db952014-08-08 14:19:24 -07005657 struct list_head *next;
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005658
5659 uncharge_gather_clear(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07005660
Johannes Weiner8b592652016-03-17 14:20:31 -07005661 /*
5662 * Note that the list can be a single page->lru; hence the
5663 * do-while loop instead of a simple list_for_each_entry().
5664 */
Johannes Weiner747db952014-08-08 14:19:24 -07005665 next = page_list->next;
5666 do {
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005667 struct page *page;
5668
Johannes Weiner747db952014-08-08 14:19:24 -07005669 page = list_entry(next, struct page, lru);
5670 next = page->lru.next;
5671
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005672 uncharge_page(page, &ug);
Johannes Weiner747db952014-08-08 14:19:24 -07005673 } while (next != page_list);
5674
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005675 if (ug.memcg)
5676 uncharge_batch(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07005677}
5678
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005679/**
5680 * mem_cgroup_uncharge - uncharge a page
5681 * @page: page to uncharge
5682 *
5683 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5684 * mem_cgroup_commit_charge().
5685 */
5686void mem_cgroup_uncharge(struct page *page)
5687{
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005688 struct uncharge_gather ug;
5689
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005690 if (mem_cgroup_disabled())
5691 return;
5692
Johannes Weiner747db952014-08-08 14:19:24 -07005693 /* Don't touch page->lru of any random page, pre-check: */
Johannes Weiner1306a852014-12-10 15:44:52 -08005694 if (!page->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005695 return;
5696
Jérôme Glissea9d5ade2017-09-08 16:11:50 -07005697 uncharge_gather_clear(&ug);
5698 uncharge_page(page, &ug);
5699 uncharge_batch(&ug);
Johannes Weiner747db952014-08-08 14:19:24 -07005700}
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005701
Johannes Weiner747db952014-08-08 14:19:24 -07005702/**
5703 * mem_cgroup_uncharge_list - uncharge a list of page
5704 * @page_list: list of pages to uncharge
5705 *
5706 * Uncharge a list of pages previously charged with
5707 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5708 */
5709void mem_cgroup_uncharge_list(struct list_head *page_list)
5710{
5711 if (mem_cgroup_disabled())
5712 return;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005713
Johannes Weiner747db952014-08-08 14:19:24 -07005714 if (!list_empty(page_list))
5715 uncharge_list(page_list);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005716}
5717
5718/**
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005719 * mem_cgroup_migrate - charge a page's replacement
5720 * @oldpage: currently circulating page
5721 * @newpage: replacement page
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005722 *
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005723 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5724 * be uncharged upon free.
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005725 *
5726 * Both pages must be locked, @newpage->mapping must be set up.
5727 */
Johannes Weiner6a93ca82016-03-15 14:57:19 -07005728void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005729{
Johannes Weiner29833312014-12-10 15:44:02 -08005730 struct mem_cgroup *memcg;
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005731 unsigned int nr_pages;
5732 bool compound;
Tejun Heod93c4132016-06-24 14:49:54 -07005733 unsigned long flags;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005734
5735 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5736 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005737 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005738 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5739 newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005740
5741 if (mem_cgroup_disabled())
5742 return;
5743
5744 /* Page cache replacement: new page already charged? */
Johannes Weiner1306a852014-12-10 15:44:52 -08005745 if (newpage->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005746 return;
5747
Hugh Dickins45637ba2015-11-05 18:49:40 -08005748 /* Swapcache readahead pages can get replaced before being charged */
Johannes Weiner1306a852014-12-10 15:44:52 -08005749 memcg = oldpage->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08005750 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005751 return;
5752
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005753 /* Force-charge the new page. The old one will be freed soon */
5754 compound = PageTransHuge(newpage);
5755 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5756
5757 page_counter_charge(&memcg->memory, nr_pages);
5758 if (do_memsw_account())
5759 page_counter_charge(&memcg->memsw, nr_pages);
5760 css_get_many(&memcg->css, nr_pages);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005761
Johannes Weiner9cf76662016-03-15 14:57:58 -07005762 commit_charge(newpage, memcg, false);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005763
Tejun Heod93c4132016-06-24 14:49:54 -07005764 local_irq_save(flags);
Johannes Weiner44b7a8d2016-01-20 15:03:16 -08005765 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5766 memcg_check_events(memcg, newpage);
Tejun Heod93c4132016-06-24 14:49:54 -07005767 local_irq_restore(flags);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005768}
5769
Johannes Weineref129472016-01-14 15:21:34 -08005770DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
Johannes Weiner11092082016-01-14 15:21:26 -08005771EXPORT_SYMBOL(memcg_sockets_enabled_key);
5772
Johannes Weiner2d758072016-10-07 17:00:58 -07005773void mem_cgroup_sk_alloc(struct sock *sk)
Johannes Weiner11092082016-01-14 15:21:26 -08005774{
5775 struct mem_cgroup *memcg;
5776
Johannes Weiner2d758072016-10-07 17:00:58 -07005777 if (!mem_cgroup_sockets_enabled)
5778 return;
5779
Johannes Weiner11092082016-01-14 15:21:26 -08005780 rcu_read_lock();
5781 memcg = mem_cgroup_from_task(current);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005782 if (memcg == root_mem_cgroup)
5783 goto out;
Johannes Weiner0db15292016-01-20 15:02:50 -08005784 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005785 goto out;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005786 if (css_tryget_online(&memcg->css))
Johannes Weiner11092082016-01-14 15:21:26 -08005787 sk->sk_memcg = memcg;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005788out:
Johannes Weiner11092082016-01-14 15:21:26 -08005789 rcu_read_unlock();
5790}
Johannes Weiner11092082016-01-14 15:21:26 -08005791
Johannes Weiner2d758072016-10-07 17:00:58 -07005792void mem_cgroup_sk_free(struct sock *sk)
Johannes Weiner11092082016-01-14 15:21:26 -08005793{
Johannes Weiner2d758072016-10-07 17:00:58 -07005794 if (sk->sk_memcg)
5795 css_put(&sk->sk_memcg->css);
Johannes Weiner11092082016-01-14 15:21:26 -08005796}
5797
5798/**
5799 * mem_cgroup_charge_skmem - charge socket memory
5800 * @memcg: memcg to charge
5801 * @nr_pages: number of pages to charge
5802 *
5803 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5804 * @memcg's configured limit, %false if the charge had to be forced.
5805 */
5806bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5807{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005808 gfp_t gfp_mask = GFP_KERNEL;
Johannes Weiner11092082016-01-14 15:21:26 -08005809
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005810 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08005811 struct page_counter *fail;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005812
Johannes Weiner0db15292016-01-20 15:02:50 -08005813 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5814 memcg->tcpmem_pressure = 0;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005815 return true;
5816 }
Johannes Weiner0db15292016-01-20 15:02:50 -08005817 page_counter_charge(&memcg->tcpmem, nr_pages);
5818 memcg->tcpmem_pressure = 1;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005819 return false;
Johannes Weiner11092082016-01-14 15:21:26 -08005820 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08005821
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005822 /* Don't block in the packet receive path */
5823 if (in_softirq())
5824 gfp_mask = GFP_NOWAIT;
5825
Johannes Weinerc9019e92018-01-31 16:16:37 -08005826 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
Johannes Weinerb2807f02016-01-20 15:03:22 -08005827
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005828 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5829 return true;
5830
5831 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08005832 return false;
5833}
5834
5835/**
5836 * mem_cgroup_uncharge_skmem - uncharge socket memory
5837 * @memcg - memcg to uncharge
5838 * @nr_pages - number of pages to uncharge
5839 */
5840void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5841{
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005842 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
Johannes Weiner0db15292016-01-20 15:02:50 -08005843 page_counter_uncharge(&memcg->tcpmem, nr_pages);
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005844 return;
5845 }
Johannes Weinerd886f4e2016-01-20 15:02:47 -08005846
Johannes Weinerc9019e92018-01-31 16:16:37 -08005847 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
Johannes Weinerb2807f02016-01-20 15:03:22 -08005848
Roman Gushchin475d0482017-09-08 16:13:09 -07005849 refill_stock(memcg, nr_pages);
Johannes Weiner11092082016-01-14 15:21:26 -08005850}
5851
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005852static int __init cgroup_memory(char *s)
5853{
5854 char *token;
5855
5856 while ((token = strsep(&s, ",")) != NULL) {
5857 if (!*token)
5858 continue;
5859 if (!strcmp(token, "nosocket"))
5860 cgroup_memory_nosocket = true;
Vladimir Davydov04823c82016-01-20 15:02:38 -08005861 if (!strcmp(token, "nokmem"))
5862 cgroup_memory_nokmem = true;
Johannes Weinerf7e1cb62016-01-14 15:21:29 -08005863 }
5864 return 0;
5865}
5866__setup("cgroup.memory=", cgroup_memory);
Johannes Weiner11092082016-01-14 15:21:26 -08005867
Michal Hocko2d110852013-02-22 16:34:43 -08005868/*
Michal Hocko10813122013-02-22 16:35:41 -08005869 * subsys_initcall() for memory controller.
5870 *
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01005871 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
5872 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5873 * basically everything that doesn't depend on a specific mem_cgroup structure
5874 * should be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08005875 */
5876static int __init mem_cgroup_init(void)
5877{
Johannes Weiner95a045f2015-02-11 15:26:33 -08005878 int cpu, node;
5879
Vladimir Davydov13583c32016-12-12 16:41:29 -08005880#ifndef CONFIG_SLOB
5881 /*
5882 * Kmem cache creation is mostly done with the slab_mutex held,
Tejun Heo17cc4df2017-02-22 15:41:36 -08005883 * so use a workqueue with limited concurrency to avoid stalling
5884 * all worker threads in case lots of cgroups are created and
5885 * destroyed simultaneously.
Vladimir Davydov13583c32016-12-12 16:41:29 -08005886 */
Tejun Heo17cc4df2017-02-22 15:41:36 -08005887 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
5888 BUG_ON(!memcg_kmem_cache_wq);
Vladimir Davydov13583c32016-12-12 16:41:29 -08005889#endif
5890
Sebastian Andrzej Siewior308167f2016-11-03 15:49:59 +01005891 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
5892 memcg_hotplug_cpu_dead);
Johannes Weiner95a045f2015-02-11 15:26:33 -08005893
5894 for_each_possible_cpu(cpu)
5895 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5896 drain_local_stock);
5897
5898 for_each_node(node) {
5899 struct mem_cgroup_tree_per_node *rtpn;
Johannes Weiner95a045f2015-02-11 15:26:33 -08005900
5901 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5902 node_online(node) ? node : NUMA_NO_NODE);
5903
Mel Gormanef8f2322016-07-28 15:46:05 -07005904 rtpn->rb_root = RB_ROOT;
Davidlohr Buesofa90b2f2017-09-08 16:15:21 -07005905 rtpn->rb_rightmost = NULL;
Mel Gormanef8f2322016-07-28 15:46:05 -07005906 spin_lock_init(&rtpn->lock);
Johannes Weiner95a045f2015-02-11 15:26:33 -08005907 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5908 }
5909
Michal Hocko2d110852013-02-22 16:34:43 -08005910 return 0;
5911}
5912subsys_initcall(mem_cgroup_init);
Johannes Weiner21afa382015-02-11 15:26:36 -08005913
5914#ifdef CONFIG_MEMCG_SWAP
Arnd Bergmann358c07f2016-08-25 15:17:08 -07005915static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5916{
5917 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5918 /*
5919 * The root cgroup cannot be destroyed, so it's refcount must
5920 * always be >= 1.
5921 */
5922 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5923 VM_BUG_ON(1);
5924 break;
5925 }
5926 memcg = parent_mem_cgroup(memcg);
5927 if (!memcg)
5928 memcg = root_mem_cgroup;
5929 }
5930 return memcg;
5931}
5932
Johannes Weiner21afa382015-02-11 15:26:36 -08005933/**
5934 * mem_cgroup_swapout - transfer a memsw charge to swap
5935 * @page: page whose memsw charge to transfer
5936 * @entry: swap entry to move the charge to
5937 *
5938 * Transfer the memsw charge of @page to @entry.
5939 */
5940void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5941{
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005942 struct mem_cgroup *memcg, *swap_memcg;
Huang Yingd6810d72017-09-06 16:22:45 -07005943 unsigned int nr_entries;
Johannes Weiner21afa382015-02-11 15:26:36 -08005944 unsigned short oldid;
5945
5946 VM_BUG_ON_PAGE(PageLRU(page), page);
5947 VM_BUG_ON_PAGE(page_count(page), page);
5948
Johannes Weiner7941d212016-01-14 15:21:23 -08005949 if (!do_memsw_account())
Johannes Weiner21afa382015-02-11 15:26:36 -08005950 return;
5951
5952 memcg = page->mem_cgroup;
5953
5954 /* Readahead page, never charged */
5955 if (!memcg)
5956 return;
5957
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005958 /*
5959 * In case the memcg owning these pages has been offlined and doesn't
5960 * have an ID allocated to it anymore, charge the closest online
5961 * ancestor for the swap instead and transfer the memory+swap charge.
5962 */
5963 swap_memcg = mem_cgroup_id_get_online(memcg);
Huang Yingd6810d72017-09-06 16:22:45 -07005964 nr_entries = hpage_nr_pages(page);
5965 /* Get references for the tail pages, too */
5966 if (nr_entries > 1)
5967 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
5968 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
5969 nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08005970 VM_BUG_ON_PAGE(oldid, page);
Johannes Weinerc9019e92018-01-31 16:16:37 -08005971 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08005972
5973 page->mem_cgroup = NULL;
5974
5975 if (!mem_cgroup_is_root(memcg))
Huang Yingd6810d72017-09-06 16:22:45 -07005976 page_counter_uncharge(&memcg->memory, nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08005977
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005978 if (memcg != swap_memcg) {
5979 if (!mem_cgroup_is_root(swap_memcg))
Huang Yingd6810d72017-09-06 16:22:45 -07005980 page_counter_charge(&swap_memcg->memsw, nr_entries);
5981 page_counter_uncharge(&memcg->memsw, nr_entries);
Vladimir Davydov1f47b612016-08-11 15:33:00 -07005982 }
5983
Sebastian Andrzej Siewiorce9ce662015-09-04 15:47:50 -07005984 /*
5985 * Interrupts should be disabled here because the caller holds the
5986 * mapping->tree_lock lock which is taken with interrupts-off. It is
5987 * important here to have the interrupts disabled because it is the
5988 * only synchronisation we have for udpating the per-CPU variables.
5989 */
5990 VM_BUG_ON(!irqs_disabled());
Huang Yingd6810d72017-09-06 16:22:45 -07005991 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
5992 -nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08005993 memcg_check_events(memcg, page);
Johannes Weiner73f576c2016-07-20 15:44:57 -07005994
5995 if (!mem_cgroup_is_root(memcg))
Shakeel Buttd08afa12017-11-29 16:11:15 -08005996 css_put_many(&memcg->css, nr_entries);
Johannes Weiner21afa382015-02-11 15:26:36 -08005997}
5998
Huang Ying38d8b4e2017-07-06 15:37:18 -07005999/**
6000 * mem_cgroup_try_charge_swap - try charging swap space for a page
Vladimir Davydov37e84352016-01-20 15:02:56 -08006001 * @page: page being added to swap
6002 * @entry: swap entry to charge
6003 *
Huang Ying38d8b4e2017-07-06 15:37:18 -07006004 * Try to charge @page's memcg for the swap space at @entry.
Vladimir Davydov37e84352016-01-20 15:02:56 -08006005 *
6006 * Returns 0 on success, -ENOMEM on failure.
6007 */
6008int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
6009{
Huang Ying38d8b4e2017-07-06 15:37:18 -07006010 unsigned int nr_pages = hpage_nr_pages(page);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006011 struct page_counter *counter;
Huang Ying38d8b4e2017-07-06 15:37:18 -07006012 struct mem_cgroup *memcg;
Vladimir Davydov37e84352016-01-20 15:02:56 -08006013 unsigned short oldid;
6014
6015 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
6016 return 0;
6017
6018 memcg = page->mem_cgroup;
6019
6020 /* Readahead page, never charged */
6021 if (!memcg)
6022 return 0;
6023
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006024 memcg = mem_cgroup_id_get_online(memcg);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006025
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006026 if (!mem_cgroup_is_root(memcg) &&
Huang Ying38d8b4e2017-07-06 15:37:18 -07006027 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
Vladimir Davydov1f47b612016-08-11 15:33:00 -07006028 mem_cgroup_id_put(memcg);
6029 return -ENOMEM;
6030 }
6031
Huang Ying38d8b4e2017-07-06 15:37:18 -07006032 /* Get references for the tail pages, too */
6033 if (nr_pages > 1)
6034 mem_cgroup_id_get_many(memcg, nr_pages - 1);
6035 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006036 VM_BUG_ON_PAGE(oldid, page);
Johannes Weinerc9019e92018-01-31 16:16:37 -08006037 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006038
Vladimir Davydov37e84352016-01-20 15:02:56 -08006039 return 0;
6040}
6041
Johannes Weiner21afa382015-02-11 15:26:36 -08006042/**
Huang Ying38d8b4e2017-07-06 15:37:18 -07006043 * mem_cgroup_uncharge_swap - uncharge swap space
Johannes Weiner21afa382015-02-11 15:26:36 -08006044 * @entry: swap entry to uncharge
Huang Ying38d8b4e2017-07-06 15:37:18 -07006045 * @nr_pages: the amount of swap space to uncharge
Johannes Weiner21afa382015-02-11 15:26:36 -08006046 */
Huang Ying38d8b4e2017-07-06 15:37:18 -07006047void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
Johannes Weiner21afa382015-02-11 15:26:36 -08006048{
6049 struct mem_cgroup *memcg;
6050 unsigned short id;
6051
Vladimir Davydov37e84352016-01-20 15:02:56 -08006052 if (!do_swap_account)
Johannes Weiner21afa382015-02-11 15:26:36 -08006053 return;
6054
Huang Ying38d8b4e2017-07-06 15:37:18 -07006055 id = swap_cgroup_record(entry, 0, nr_pages);
Johannes Weiner21afa382015-02-11 15:26:36 -08006056 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07006057 memcg = mem_cgroup_from_id(id);
Johannes Weiner21afa382015-02-11 15:26:36 -08006058 if (memcg) {
Vladimir Davydov37e84352016-01-20 15:02:56 -08006059 if (!mem_cgroup_is_root(memcg)) {
6060 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
Huang Ying38d8b4e2017-07-06 15:37:18 -07006061 page_counter_uncharge(&memcg->swap, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006062 else
Huang Ying38d8b4e2017-07-06 15:37:18 -07006063 page_counter_uncharge(&memcg->memsw, nr_pages);
Vladimir Davydov37e84352016-01-20 15:02:56 -08006064 }
Johannes Weinerc9019e92018-01-31 16:16:37 -08006065 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
Huang Ying38d8b4e2017-07-06 15:37:18 -07006066 mem_cgroup_id_put_many(memcg, nr_pages);
Johannes Weiner21afa382015-02-11 15:26:36 -08006067 }
6068 rcu_read_unlock();
6069}
6070
Vladimir Davydovd8b38432016-01-20 15:03:07 -08006071long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
6072{
6073 long nr_swap_pages = get_nr_swap_pages();
6074
6075 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6076 return nr_swap_pages;
6077 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6078 nr_swap_pages = min_t(long, nr_swap_pages,
6079 READ_ONCE(memcg->swap.limit) -
6080 page_counter_read(&memcg->swap));
6081 return nr_swap_pages;
6082}
6083
Vladimir Davydov5ccc5ab2016-01-20 15:03:10 -08006084bool mem_cgroup_swap_full(struct page *page)
6085{
6086 struct mem_cgroup *memcg;
6087
6088 VM_BUG_ON_PAGE(!PageLocked(page), page);
6089
6090 if (vm_swap_full())
6091 return true;
6092 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
6093 return false;
6094
6095 memcg = page->mem_cgroup;
6096 if (!memcg)
6097 return false;
6098
6099 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
6100 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
6101 return true;
6102
6103 return false;
6104}
6105
Johannes Weiner21afa382015-02-11 15:26:36 -08006106/* for remember boot option*/
6107#ifdef CONFIG_MEMCG_SWAP_ENABLED
6108static int really_do_swap_account __initdata = 1;
6109#else
6110static int really_do_swap_account __initdata;
6111#endif
6112
6113static int __init enable_swap_account(char *s)
6114{
6115 if (!strcmp(s, "1"))
6116 really_do_swap_account = 1;
6117 else if (!strcmp(s, "0"))
6118 really_do_swap_account = 0;
6119 return 1;
6120}
6121__setup("swapaccount=", enable_swap_account);
6122
Vladimir Davydov37e84352016-01-20 15:02:56 -08006123static u64 swap_current_read(struct cgroup_subsys_state *css,
6124 struct cftype *cft)
6125{
6126 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6127
6128 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6129}
6130
6131static int swap_max_show(struct seq_file *m, void *v)
6132{
6133 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6134 unsigned long max = READ_ONCE(memcg->swap.limit);
6135
6136 if (max == PAGE_COUNTER_MAX)
6137 seq_puts(m, "max\n");
6138 else
6139 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6140
6141 return 0;
6142}
6143
6144static ssize_t swap_max_write(struct kernfs_open_file *of,
6145 char *buf, size_t nbytes, loff_t off)
6146{
6147 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6148 unsigned long max;
6149 int err;
6150
6151 buf = strstrip(buf);
6152 err = page_counter_memparse(buf, "max", &max);
6153 if (err)
6154 return err;
6155
6156 mutex_lock(&memcg_limit_mutex);
6157 err = page_counter_limit(&memcg->swap, max);
6158 mutex_unlock(&memcg_limit_mutex);
6159 if (err)
6160 return err;
6161
6162 return nbytes;
6163}
6164
6165static struct cftype swap_files[] = {
6166 {
6167 .name = "swap.current",
6168 .flags = CFTYPE_NOT_ON_ROOT,
6169 .read_u64 = swap_current_read,
6170 },
6171 {
6172 .name = "swap.max",
6173 .flags = CFTYPE_NOT_ON_ROOT,
6174 .seq_show = swap_max_show,
6175 .write = swap_max_write,
6176 },
6177 { } /* terminate */
6178};
6179
Johannes Weiner21afa382015-02-11 15:26:36 -08006180static struct cftype memsw_cgroup_files[] = {
6181 {
6182 .name = "memsw.usage_in_bytes",
6183 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6184 .read_u64 = mem_cgroup_read_u64,
6185 },
6186 {
6187 .name = "memsw.max_usage_in_bytes",
6188 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6189 .write = mem_cgroup_reset,
6190 .read_u64 = mem_cgroup_read_u64,
6191 },
6192 {
6193 .name = "memsw.limit_in_bytes",
6194 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6195 .write = mem_cgroup_write,
6196 .read_u64 = mem_cgroup_read_u64,
6197 },
6198 {
6199 .name = "memsw.failcnt",
6200 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6201 .write = mem_cgroup_reset,
6202 .read_u64 = mem_cgroup_read_u64,
6203 },
6204 { }, /* terminate */
6205};
6206
6207static int __init mem_cgroup_swap_init(void)
6208{
6209 if (!mem_cgroup_disabled() && really_do_swap_account) {
6210 do_swap_account = 1;
Vladimir Davydov37e84352016-01-20 15:02:56 -08006211 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6212 swap_files));
Johannes Weiner21afa382015-02-11 15:26:36 -08006213 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6214 memsw_cgroup_files));
6215 }
6216 return 0;
6217}
6218subsys_initcall(mem_cgroup_swap_init);
6219
6220#endif /* CONFIG_MEMCG_SWAP */