blob: f816d91c643b7ee59af809b72f846ddddced8924 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Glauber Costa7ae1e1d2012-12-18 14:21:56 -080013 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
Johannes Weiner1575e682015-04-14 15:44:51 -070017 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080023 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
Johannes Weiner3e32cb22014-12-10 15:42:31 -080034#include <linux/page_counter.h>
Balbir Singh8cdea7c2008-02-07 00:13:50 -080035#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080037#include <linux/mm.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080038#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080039#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080040#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080041#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080042#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080043#include <linux/bit_spinlock.h>
44#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070045#include <linux/limits.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040046#include <linux/export.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080047#include <linux/mutex.h>
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -070048#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070049#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080050#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080051#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080052#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080053#include <linux/eventfd.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050054#include <linux/poll.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080055#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080056#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080057#include <linux/seq_file.h>
Anton Vorontsov70ddf632013-04-29 15:08:31 -070058#include <linux/vmpressure.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070059#include <linux/mm_inline.h>
Johannes Weiner5d1ea482014-12-10 15:44:55 -080060#include <linux/swap_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080061#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070062#include <linux/oom.h>
Johannes Weiner0056f4e2013-10-31 16:34:14 -070063#include <linux/lockdep.h>
Tejun Heo79bd9812013-11-22 18:20:42 -050064#include <linux/file.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080065#include "internal.h"
Glauber Costad1a4c0b2011-12-11 21:47:04 +000066#include <net/sock.h>
Michal Hocko4bd2c1e2012-10-08 16:33:10 -070067#include <net/ip.h>
Glauber Costad1a4c0b2011-12-11 21:47:04 +000068#include <net/tcp_memcontrol.h>
Qiang Huangf35c3a82013-11-12 15:08:22 -080069#include "slab.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080070
Balbir Singh8697d332008-02-07 00:13:59 -080071#include <asm/uaccess.h>
72
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070073#include <trace/events/vmscan.h>
74
Tejun Heo073219e2014-02-08 10:36:58 -050075struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76EXPORT_SYMBOL(memory_cgrp_subsys);
David Rientjes68ae5642012-12-12 13:51:57 -080077
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070078#define MEM_CGROUP_RECLAIM_RETRIES 5
Kirill A. Shutemov6bbda352012-05-29 15:06:55 -070079static struct mem_cgroup *root_mem_cgroup __read_mostly;
Tejun Heo56161632015-05-22 17:13:20 -040080struct cgroup_subsys_state *mem_cgroup_root_css __read_mostly;
Balbir Singh8cdea7c2008-02-07 00:13:50 -080081
Johannes Weiner21afa382015-02-11 15:26:36 -080082/* Whether the swap controller is active */
Andrew Mortonc255a452012-07-31 16:43:02 -070083#ifdef CONFIG_MEMCG_SWAP
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080084int do_swap_account __read_mostly;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080085#else
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -070086#define do_swap_account 0
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080087#endif
88
Johannes Weineraf7c4b02012-05-29 15:07:08 -070089static const char * const mem_cgroup_stat_names[] = {
90 "cache",
91 "rss",
David Rientjesb070e652013-05-07 16:18:09 -070092 "rss_huge",
Johannes Weineraf7c4b02012-05-29 15:07:08 -070093 "mapped_file",
Greg Thelenc4843a72015-05-22 17:13:16 -040094 "dirty",
Sha Zhengju3ea67d02013-09-12 15:13:53 -070095 "writeback",
Johannes Weineraf7c4b02012-05-29 15:07:08 -070096 "swap",
97};
98
Johannes Weineraf7c4b02012-05-29 15:07:08 -070099static const char * const mem_cgroup_events_names[] = {
100 "pgpgin",
101 "pgpgout",
102 "pgfault",
103 "pgmajfault",
104};
105
Sha Zhengju58cf1882013-02-22 16:32:05 -0800106static const char * const mem_cgroup_lru_names[] = {
107 "inactive_anon",
108 "active_anon",
109 "inactive_file",
110 "active_file",
111 "unevictable",
112};
113
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700114/*
115 * Per memcg event counter is incremented at every pagein/pageout. With THP,
116 * it will be incremated by the number of pages. This counter is used for
117 * for trigger some periodic events. This is straightforward and better
118 * than using jiffies etc. to handle periodic memcg event.
119 */
120enum mem_cgroup_events_target {
121 MEM_CGROUP_TARGET_THRESH,
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700122 MEM_CGROUP_TARGET_SOFTLIMIT,
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -0700123 MEM_CGROUP_TARGET_NUMAINFO,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700124 MEM_CGROUP_NTARGETS,
125};
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700126#define THRESHOLDS_EVENTS_TARGET 128
127#define SOFTLIMIT_EVENTS_TARGET 1024
128#define NUMAINFO_EVENTS_TARGET 1024
Johannes Weinere9f89742011-03-23 16:42:37 -0700129
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800130struct mem_cgroup_stat_cpu {
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700131 long count[MEM_CGROUP_STAT_NSTATS];
Johannes Weiner241994ed2015-02-11 15:26:06 -0800132 unsigned long events[MEMCG_NR_EVENTS];
Johannes Weiner13114712012-05-29 15:07:07 -0700133 unsigned long nr_page_events;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700134 unsigned long targets[MEM_CGROUP_NTARGETS];
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800135};
136
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800137struct reclaim_iter {
138 struct mem_cgroup *position;
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800139 /* scan generation, increased every round-trip */
140 unsigned int generation;
141};
142
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800143/*
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800144 * per-zone information in memory controller.
145 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800146struct mem_cgroup_per_zone {
Johannes Weiner6290df52012-01-12 17:18:10 -0800147 struct lruvec lruvec;
Hugh Dickins1eb49272012-03-21 16:34:19 -0700148 unsigned long lru_size[NR_LRU_LISTS];
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800149
Johannes Weiner5ac8fb32014-12-10 15:42:39 -0800150 struct reclaim_iter iter[DEF_PRIORITY + 1];
Johannes Weiner527a5ec2012-01-12 17:17:55 -0800151
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700152 struct rb_node tree_node; /* RB tree node */
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800153 unsigned long usage_in_excess;/* Set to the value by which */
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700154 /* the soft limit is exceeded*/
155 bool on_tree;
Hugh Dickinsd79154b2012-03-21 16:34:18 -0700156 struct mem_cgroup *memcg; /* Back pointer, we cannot */
Balbir Singh4e416952009-09-23 15:56:39 -0700157 /* use container_of */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800158};
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800159
160struct mem_cgroup_per_node {
161 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
162};
163
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700164/*
165 * Cgroups above their limits are maintained in a RB-Tree, independent of
166 * their hierarchy representation
167 */
168
169struct mem_cgroup_tree_per_zone {
170 struct rb_root rb_root;
171 spinlock_t lock;
172};
173
174struct mem_cgroup_tree_per_node {
175 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
176};
177
178struct mem_cgroup_tree {
179 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
180};
181
182static struct mem_cgroup_tree soft_limit_tree __read_mostly;
183
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800184struct mem_cgroup_threshold {
185 struct eventfd_ctx *eventfd;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800186 unsigned long threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800187};
188
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700189/* For threshold */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800190struct mem_cgroup_threshold_ary {
Sha Zhengju748dad32012-05-29 15:06:57 -0700191 /* An array index points to threshold just below or equal to usage. */
Phil Carmody5407a562010-05-26 14:42:42 -0700192 int current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800193 /* Size of entries[] */
194 unsigned int size;
195 /* Array of thresholds */
196 struct mem_cgroup_threshold entries[0];
197};
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700198
199struct mem_cgroup_thresholds {
200 /* Primary thresholds array */
201 struct mem_cgroup_threshold_ary *primary;
202 /*
203 * Spare threshold array.
204 * This is needed to make mem_cgroup_unregister_event() "never fail".
205 * It must be able to store at least primary->size - 1 entries.
206 */
207 struct mem_cgroup_threshold_ary *spare;
208};
209
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700210/* for OOM */
211struct mem_cgroup_eventfd_list {
212 struct list_head list;
213 struct eventfd_ctx *eventfd;
214};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800215
Tejun Heo79bd9812013-11-22 18:20:42 -0500216/*
217 * cgroup_event represents events which userspace want to receive.
218 */
Tejun Heo3bc942f2013-11-22 18:20:44 -0500219struct mem_cgroup_event {
Tejun Heo79bd9812013-11-22 18:20:42 -0500220 /*
Tejun Heo59b6f872013-11-22 18:20:43 -0500221 * memcg which the event belongs to.
Tejun Heo79bd9812013-11-22 18:20:42 -0500222 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500223 struct mem_cgroup *memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -0500224 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500225 * eventfd to signal userspace about the event.
226 */
227 struct eventfd_ctx *eventfd;
228 /*
229 * Each of these stored in a list by the cgroup.
230 */
231 struct list_head list;
232 /*
Tejun Heofba94802013-11-22 18:20:43 -0500233 * register_event() callback will be used to add new userspace
234 * waiter for changes related to this event. Use eventfd_signal()
235 * on eventfd to send notification to userspace.
236 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500237 int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -0500238 struct eventfd_ctx *eventfd, const char *args);
Tejun Heofba94802013-11-22 18:20:43 -0500239 /*
240 * unregister_event() callback will be called when userspace closes
241 * the eventfd or on cgroup removing. This callback must be set,
242 * if you want provide notification functionality.
243 */
Tejun Heo59b6f872013-11-22 18:20:43 -0500244 void (*unregister_event)(struct mem_cgroup *memcg,
Tejun Heofba94802013-11-22 18:20:43 -0500245 struct eventfd_ctx *eventfd);
246 /*
Tejun Heo79bd9812013-11-22 18:20:42 -0500247 * All fields below needed to unregister event when
248 * userspace closes eventfd.
249 */
250 poll_table pt;
251 wait_queue_head_t *wqh;
252 wait_queue_t wait;
253 struct work_struct remove;
254};
255
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700256static void mem_cgroup_threshold(struct mem_cgroup *memcg);
257static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800258
Balbir Singhf64c3f52009-09-23 15:56:37 -0700259/*
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800260 * The memory controller data structure. The memory controller controls both
261 * page cache and RSS per cgroup. We would eventually like to provide
262 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
263 * to help the administrator determine what knobs to tune.
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800264 */
265struct mem_cgroup {
266 struct cgroup_subsys_state css;
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800267
268 /* Accounted resources */
269 struct page_counter memory;
270 struct page_counter memsw;
271 struct page_counter kmem;
272
Johannes Weiner241994ed2015-02-11 15:26:06 -0800273 /* Normal memory consumption range */
274 unsigned long low;
275 unsigned long high;
276
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800277 unsigned long soft_limit;
Hugh Dickins59927fb2012-03-15 15:17:07 -0700278
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700279 /* vmpressure notifications */
280 struct vmpressure vmpressure;
281
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -0700282 /* css_online() has been completed */
283 int initialized;
284
Li Zefan465939a2013-07-08 16:00:38 -0700285 /*
Balbir Singh18f59ea2009-01-07 18:08:07 -0800286 * Should the accounting and control be hierarchical, per subtree?
287 */
288 bool use_hierarchy;
Michal Hocko79dfdac2011-07-26 16:08:23 -0700289
290 bool oom_lock;
291 atomic_t under_oom;
Johannes Weiner3812c8c2013-09-12 15:13:44 -0700292 atomic_t oom_wakeups;
Michal Hocko79dfdac2011-07-26 16:08:23 -0700293
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -0700294 int swappiness;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -0700295 /* OOM-Killer disable */
296 int oom_kill_disable;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -0800297
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800298 /* protect arrays of thresholds */
299 struct mutex thresholds_lock;
300
301 /* thresholds for memory usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700302 struct mem_cgroup_thresholds thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700303
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800304 /* thresholds for mem+swap usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700305 struct mem_cgroup_thresholds memsw_thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700306
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700307 /* For oom notifier event fd */
308 struct list_head oom_notify;
Johannes Weiner185efc02011-09-14 16:21:58 -0700309
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800310 /*
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800311 * Should we move charges of a task when a task is moved into this
312 * mem_cgroup ? And what type of charges should we move ?
313 */
Andrew Mortonf894ffa2013-09-12 15:13:35 -0700314 unsigned long move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800315 /*
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -0700316 * set > 0 if pages under this cgroup are moving to other cgroup.
317 */
Johannes Weiner6de22612015-02-11 15:25:01 -0800318 atomic_t moving_account;
KAMEZAWA Hiroyuki312734c02012-03-21 16:34:24 -0700319 /* taken only while moving_account > 0 */
Johannes Weiner6de22612015-02-11 15:25:01 -0800320 spinlock_t move_lock;
321 struct task_struct *move_lock_task;
322 unsigned long move_lock_flags;
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -0700323 /*
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800324 * percpu counter.
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800325 */
Kirill A. Shutemov3a7951b2012-05-29 15:06:56 -0700326 struct mem_cgroup_stat_cpu __percpu *stat;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700327 spinlock_t pcp_counter_lock;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000328
Michal Hocko4bd2c1e2012-10-08 16:33:10 -0700329#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700330 struct cg_proto tcp_mem;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000331#endif
Glauber Costa2633d7a2012-12-18 14:22:34 -0800332#if defined(CONFIG_MEMCG_KMEM)
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800333 /* Index in the kmem_cache->memcg_params.memcg_caches array */
Glauber Costa2633d7a2012-12-18 14:22:34 -0800334 int kmemcg_id;
Vladimir Davydov2788cf02015-02-12 14:59:38 -0800335 bool kmem_acct_activated;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800336 bool kmem_acct_active;
Glauber Costa2633d7a2012-12-18 14:22:34 -0800337#endif
Glauber Costa45cf7eb2013-02-22 16:34:49 -0800338
339 int last_scanned_node;
340#if MAX_NUMNODES > 1
341 nodemask_t scan_nodes;
342 atomic_t numainfo_events;
343 atomic_t numainfo_updating;
344#endif
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700345
Tejun Heo52ebea72015-05-22 17:13:37 -0400346#ifdef CONFIG_CGROUP_WRITEBACK
347 struct list_head cgwb_list;
Tejun Heo841710a2015-05-22 18:23:33 -0400348 struct wb_domain cgwb_domain;
Tejun Heo52ebea72015-05-22 17:13:37 -0400349#endif
350
Tejun Heofba94802013-11-22 18:20:43 -0500351 /* List of events which userspace want to receive */
352 struct list_head event_list;
353 spinlock_t event_list_lock;
354
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700355 struct mem_cgroup_per_node *nodeinfo[0];
356 /* WARNING: nodeinfo must be the last member here */
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800357};
358
Glauber Costa510fc4e2012-12-18 14:21:47 -0800359#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800360bool memcg_kmem_is_active(struct mem_cgroup *memcg)
Glauber Costa7de37682012-12-18 14:22:07 -0800361{
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -0800362 return memcg->kmem_acct_active;
Glauber Costa7de37682012-12-18 14:22:07 -0800363}
Glauber Costa510fc4e2012-12-18 14:21:47 -0800364#endif
365
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800366/* Stuffs for move charges at task migration. */
367/*
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800368 * Types of charges to be moved.
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800369 */
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800370#define MOVE_ANON 0x1U
371#define MOVE_FILE 0x2U
372#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800373
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800374/* "mc" and its members are protected by cgroup_mutex */
375static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800376 spinlock_t lock; /* for from, to */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800377 struct mem_cgroup *from;
378 struct mem_cgroup *to;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -0800379 unsigned long flags;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800380 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800381 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800382 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800383 struct task_struct *moving_task; /* a task moving charges */
384 wait_queue_head_t waitq; /* a waitq for other context */
385} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700386 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800387 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
388};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800389
Balbir Singh4e416952009-09-23 15:56:39 -0700390/*
391 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
392 * limit reclaim to prevent infinite loops, if they ever occur.
393 */
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700394#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700395#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
Balbir Singh4e416952009-09-23 15:56:39 -0700396
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800397enum charge_type {
398 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
Kamezawa Hiroyuki41326c12012-07-31 16:41:40 -0700399 MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800400 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700401 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700402 NR_CHARGE_TYPE,
403};
404
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800405/* for encoding cft->private value on file */
Glauber Costa86ae53e2012-12-18 14:21:45 -0800406enum res_type {
407 _MEM,
408 _MEMSWAP,
409 _OOM_TYPE,
Glauber Costa510fc4e2012-12-18 14:21:47 -0800410 _KMEM,
Glauber Costa86ae53e2012-12-18 14:21:45 -0800411};
412
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -0700413#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
414#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800415#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700416/* Used for OOM nofiier */
417#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800418
Balbir Singh75822b42009-09-23 15:56:38 -0700419/*
Glauber Costa09998212013-02-22 16:34:55 -0800420 * The memcg_create_mutex will be held whenever a new cgroup is created.
421 * As a consequence, any change that needs to protect against new child cgroups
422 * appearing has to hold it as well.
423 */
424static DEFINE_MUTEX(memcg_create_mutex);
425
Wanpeng Lib2145142012-07-31 16:46:01 -0700426struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
427{
Tejun Heoa7c6d552013-08-08 20:11:23 -0400428 return s ? container_of(s, struct mem_cgroup, css) : NULL;
Wanpeng Lib2145142012-07-31 16:46:01 -0700429}
430
Anton Vorontsov70ddf632013-04-29 15:08:31 -0700431/* Some nice accessors for the vmpressure. */
432struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
433{
434 if (!memcg)
435 memcg = root_mem_cgroup;
436 return &memcg->vmpressure;
437}
438
439struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
440{
441 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
442}
443
Michal Hocko7ffc0ed2012-10-08 16:33:13 -0700444static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
445{
446 return (memcg == root_mem_cgroup);
447}
448
Li Zefan4219b2d2013-09-23 16:56:29 +0800449/*
450 * We restrict the id in the range of [1, 65535], so it can fit into
451 * an unsigned short.
452 */
453#define MEM_CGROUP_ID_MAX USHRT_MAX
454
Li Zefan34c00c32013-09-23 16:56:01 +0800455static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
456{
Tejun Heo15a4c832014-05-04 15:09:14 -0400457 return memcg->css.id;
Li Zefan34c00c32013-09-23 16:56:01 +0800458}
459
Vladimir Davydovadbe4272015-04-15 16:13:00 -0700460/*
461 * A helper function to get mem_cgroup from ID. must be called under
462 * rcu_read_lock(). The caller is responsible for calling
463 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
464 * refcnt from swap can be called against removed memcg.)
465 */
Li Zefan34c00c32013-09-23 16:56:01 +0800466static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
467{
468 struct cgroup_subsys_state *css;
469
Tejun Heo7d699dd2014-05-04 15:09:13 -0400470 css = css_from_id(id, &memory_cgrp_subsys);
Li Zefan34c00c32013-09-23 16:56:01 +0800471 return mem_cgroup_from_css(css);
472}
473
Glauber Costae1aab162011-12-11 21:47:03 +0000474/* Writing them here to avoid exposing memcg's inner layout */
Michal Hocko4bd2c1e2012-10-08 16:33:10 -0700475#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
Glauber Costae1aab162011-12-11 21:47:03 +0000476
Glauber Costae1aab162011-12-11 21:47:03 +0000477void sock_update_memcg(struct sock *sk)
478{
Glauber Costa376be5f2012-01-20 04:57:14 +0000479 if (mem_cgroup_sockets_enabled) {
Glauber Costae1aab162011-12-11 21:47:03 +0000480 struct mem_cgroup *memcg;
Glauber Costa3f134612012-05-29 15:07:11 -0700481 struct cg_proto *cg_proto;
Glauber Costae1aab162011-12-11 21:47:03 +0000482
483 BUG_ON(!sk->sk_prot->proto_cgroup);
484
Glauber Costaf3f511e2012-01-05 20:16:39 +0000485 /* Socket cloning can throw us here with sk_cgrp already
486 * filled. It won't however, necessarily happen from
487 * process context. So the test for root memcg given
488 * the current task's memcg won't help us in this case.
489 *
490 * Respecting the original socket's memcg is a better
491 * decision in this case.
492 */
493 if (sk->sk_cgrp) {
494 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
Li Zefan5347e5a2013-07-08 16:00:30 -0700495 css_get(&sk->sk_cgrp->memcg->css);
Glauber Costaf3f511e2012-01-05 20:16:39 +0000496 return;
497 }
498
Glauber Costae1aab162011-12-11 21:47:03 +0000499 rcu_read_lock();
500 memcg = mem_cgroup_from_task(current);
Glauber Costa3f134612012-05-29 15:07:11 -0700501 cg_proto = sk->sk_prot->proto_cgroup(memcg);
Li Zefan5347e5a2013-07-08 16:00:30 -0700502 if (!mem_cgroup_is_root(memcg) &&
Tejun Heoec903c02014-05-13 12:11:01 -0400503 memcg_proto_active(cg_proto) &&
504 css_tryget_online(&memcg->css)) {
Glauber Costa3f134612012-05-29 15:07:11 -0700505 sk->sk_cgrp = cg_proto;
Glauber Costae1aab162011-12-11 21:47:03 +0000506 }
507 rcu_read_unlock();
508 }
509}
510EXPORT_SYMBOL(sock_update_memcg);
511
512void sock_release_memcg(struct sock *sk)
513{
Glauber Costa376be5f2012-01-20 04:57:14 +0000514 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
Glauber Costae1aab162011-12-11 21:47:03 +0000515 struct mem_cgroup *memcg;
516 WARN_ON(!sk->sk_cgrp->memcg);
517 memcg = sk->sk_cgrp->memcg;
Li Zefan5347e5a2013-07-08 16:00:30 -0700518 css_put(&sk->sk_cgrp->memcg->css);
Glauber Costae1aab162011-12-11 21:47:03 +0000519 }
520}
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000521
522struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
523{
524 if (!memcg || mem_cgroup_is_root(memcg))
525 return NULL;
526
Eric W. Biederman2e685ca2013-10-19 16:26:19 -0700527 return &memcg->tcp_mem;
Glauber Costad1a4c0b2011-12-11 21:47:04 +0000528}
529EXPORT_SYMBOL(tcp_proto_cgroup);
Glauber Costae1aab162011-12-11 21:47:03 +0000530
Glauber Costa3f134612012-05-29 15:07:11 -0700531#endif
532
Glauber Costaa8964b92012-12-18 14:22:09 -0800533#ifdef CONFIG_MEMCG_KMEM
Glauber Costa55007d82012-12-18 14:22:38 -0800534/*
Vladimir Davydovf7ce3192015-02-12 14:59:20 -0800535 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
Li Zefanb8627832013-09-23 16:56:47 +0800536 * The main reason for not using cgroup id for this:
537 * this works better in sparse environments, where we have a lot of memcgs,
538 * but only a few kmem-limited. Or also, if we have, for instance, 200
539 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
540 * 200 entry array for that.
Glauber Costa55007d82012-12-18 14:22:38 -0800541 *
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800542 * The current size of the caches array is stored in memcg_nr_cache_ids. It
543 * will double each time we have to increase it.
Glauber Costa55007d82012-12-18 14:22:38 -0800544 */
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -0800545static DEFINE_IDA(memcg_cache_ida);
546int memcg_nr_cache_ids;
Glauber Costa749c5412012-12-18 14:23:01 -0800547
Vladimir Davydov05257a12015-02-12 14:59:01 -0800548/* Protects memcg_nr_cache_ids */
549static DECLARE_RWSEM(memcg_cache_ids_sem);
550
551void memcg_get_cache_ids(void)
552{
553 down_read(&memcg_cache_ids_sem);
554}
555
556void memcg_put_cache_ids(void)
557{
558 up_read(&memcg_cache_ids_sem);
559}
560
Glauber Costa55007d82012-12-18 14:22:38 -0800561/*
562 * MIN_SIZE is different than 1, because we would like to avoid going through
563 * the alloc/free process all the time. In a small machine, 4 kmem-limited
564 * cgroups is a reasonable guess. In the future, it could be a parameter or
565 * tunable, but that is strictly not necessary.
566 *
Li Zefanb8627832013-09-23 16:56:47 +0800567 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
Glauber Costa55007d82012-12-18 14:22:38 -0800568 * this constant directly from cgroup, but it is understandable that this is
569 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefanb8627832013-09-23 16:56:47 +0800570 * cgrp_id space is not getting any smaller, and we don't have to necessarily
Glauber Costa55007d82012-12-18 14:22:38 -0800571 * increase ours as well if it increases.
572 */
573#define MEMCG_CACHES_MIN_SIZE 4
Li Zefanb8627832013-09-23 16:56:47 +0800574#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
Glauber Costa55007d82012-12-18 14:22:38 -0800575
Glauber Costad7f25f82012-12-18 14:22:40 -0800576/*
577 * A lot of the calls to the cache allocation functions are expected to be
578 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
579 * conditional to this static branch, we'll have to allow modules that does
580 * kmem_cache_alloc and the such to see this symbol as well
581 */
Glauber Costaa8964b92012-12-18 14:22:09 -0800582struct static_key memcg_kmem_enabled_key;
Glauber Costad7f25f82012-12-18 14:22:40 -0800583EXPORT_SYMBOL(memcg_kmem_enabled_key);
Glauber Costaa8964b92012-12-18 14:22:09 -0800584
Glauber Costaa8964b92012-12-18 14:22:09 -0800585#endif /* CONFIG_MEMCG_KMEM */
586
Balbir Singhf64c3f52009-09-23 15:56:37 -0700587static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700588mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700589{
Jianyu Zhane2318752014-06-06 14:38:20 -0700590 int nid = zone_to_nid(zone);
591 int zid = zone_idx(zone);
592
Johannes Weiner54f72fe2013-07-08 15:59:49 -0700593 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700594}
595
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700596struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
Wu Fengguangd3242362009-12-16 12:19:59 +0100597{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700598 return &memcg->css;
Wu Fengguangd3242362009-12-16 12:19:59 +0100599}
600
Tejun Heoad7fa852015-05-27 20:00:02 -0400601/**
602 * mem_cgroup_css_from_page - css of the memcg associated with a page
603 * @page: page of interest
604 *
605 * If memcg is bound to the default hierarchy, css of the memcg associated
606 * with @page is returned. The returned css remains associated with @page
607 * until it is released.
608 *
609 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
610 * is returned.
611 *
612 * XXX: The above description of behavior on the default hierarchy isn't
613 * strictly true yet as replace_page_cache_page() can modify the
614 * association before @page is released even on the default hierarchy;
615 * however, the current and planned usages don't mix the the two functions
616 * and replace_page_cache_page() will soon be updated to make the invariant
617 * actually true.
618 */
619struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
620{
621 struct mem_cgroup *memcg;
622
623 rcu_read_lock();
624
625 memcg = page->mem_cgroup;
626
627 if (!memcg || !cgroup_on_dfl(memcg->css.cgroup))
628 memcg = root_mem_cgroup;
629
630 rcu_read_unlock();
631 return &memcg->css;
632}
633
Balbir Singhf64c3f52009-09-23 15:56:37 -0700634static struct mem_cgroup_per_zone *
Jianyu Zhane2318752014-06-06 14:38:20 -0700635mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700636{
Johannes Weiner97a6c372011-03-23 16:42:27 -0700637 int nid = page_to_nid(page);
638 int zid = page_zonenum(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700639
Jianyu Zhane2318752014-06-06 14:38:20 -0700640 return &memcg->nodeinfo[nid]->zoneinfo[zid];
Balbir Singhf64c3f52009-09-23 15:56:37 -0700641}
642
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700643static struct mem_cgroup_tree_per_zone *
644soft_limit_tree_node_zone(int nid, int zid)
645{
646 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
647}
648
649static struct mem_cgroup_tree_per_zone *
650soft_limit_tree_from_page(struct page *page)
651{
652 int nid = page_to_nid(page);
653 int zid = page_zonenum(page);
654
655 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
656}
657
Johannes Weinercf2c8122014-06-06 14:38:21 -0700658static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
659 struct mem_cgroup_tree_per_zone *mctz,
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800660 unsigned long new_usage_in_excess)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700661{
662 struct rb_node **p = &mctz->rb_root.rb_node;
663 struct rb_node *parent = NULL;
664 struct mem_cgroup_per_zone *mz_node;
665
666 if (mz->on_tree)
667 return;
668
669 mz->usage_in_excess = new_usage_in_excess;
670 if (!mz->usage_in_excess)
671 return;
672 while (*p) {
673 parent = *p;
674 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
675 tree_node);
676 if (mz->usage_in_excess < mz_node->usage_in_excess)
677 p = &(*p)->rb_left;
678 /*
679 * We can't avoid mem cgroups that are over their soft
680 * limit by the same amount
681 */
682 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
683 p = &(*p)->rb_right;
684 }
685 rb_link_node(&mz->tree_node, parent, p);
686 rb_insert_color(&mz->tree_node, &mctz->rb_root);
687 mz->on_tree = true;
688}
689
Johannes Weinercf2c8122014-06-06 14:38:21 -0700690static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
691 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700692{
693 if (!mz->on_tree)
694 return;
695 rb_erase(&mz->tree_node, &mctz->rb_root);
696 mz->on_tree = false;
697}
698
Johannes Weinercf2c8122014-06-06 14:38:21 -0700699static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
700 struct mem_cgroup_tree_per_zone *mctz)
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700701{
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700702 unsigned long flags;
703
704 spin_lock_irqsave(&mctz->lock, flags);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700705 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700706 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700707}
708
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800709static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
710{
711 unsigned long nr_pages = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -0700712 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800713 unsigned long excess = 0;
714
715 if (nr_pages > soft_limit)
716 excess = nr_pages - soft_limit;
717
718 return excess;
719}
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700720
721static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
722{
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800723 unsigned long excess;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700724 struct mem_cgroup_per_zone *mz;
725 struct mem_cgroup_tree_per_zone *mctz;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700726
Jianyu Zhane2318752014-06-06 14:38:20 -0700727 mctz = soft_limit_tree_from_page(page);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700728 /*
729 * Necessary to update all ancestors when hierarchy is used.
730 * because their event counter is not touched.
731 */
732 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
Jianyu Zhane2318752014-06-06 14:38:20 -0700733 mz = mem_cgroup_page_zoneinfo(memcg, page);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800734 excess = soft_limit_excess(memcg);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700735 /*
736 * We have to update the tree if mz is on RB-tree or
737 * mem is over its softlimit.
738 */
739 if (excess || mz->on_tree) {
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700740 unsigned long flags;
741
742 spin_lock_irqsave(&mctz->lock, flags);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700743 /* if on-tree, remove it */
744 if (mz->on_tree)
Johannes Weinercf2c8122014-06-06 14:38:21 -0700745 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700746 /*
747 * Insert again. mz->usage_in_excess will be updated.
748 * If excess is 0, no tree ops.
749 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700750 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700751 spin_unlock_irqrestore(&mctz->lock, flags);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700752 }
753 }
754}
755
756static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
757{
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700758 struct mem_cgroup_tree_per_zone *mctz;
Jianyu Zhane2318752014-06-06 14:38:20 -0700759 struct mem_cgroup_per_zone *mz;
760 int nid, zid;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700761
Jianyu Zhane2318752014-06-06 14:38:20 -0700762 for_each_node(nid) {
763 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
764 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
765 mctz = soft_limit_tree_node_zone(nid, zid);
Johannes Weinercf2c8122014-06-06 14:38:21 -0700766 mem_cgroup_remove_exceeded(mz, mctz);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700767 }
768 }
769}
770
771static struct mem_cgroup_per_zone *
772__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
773{
774 struct rb_node *rightmost = NULL;
775 struct mem_cgroup_per_zone *mz;
776
777retry:
778 mz = NULL;
779 rightmost = rb_last(&mctz->rb_root);
780 if (!rightmost)
781 goto done; /* Nothing to reclaim from */
782
783 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
784 /*
785 * Remove the node now but someone else can add it back,
786 * we will to add it back at the end of reclaim to its correct
787 * position in the tree.
788 */
Johannes Weinercf2c8122014-06-06 14:38:21 -0700789 __mem_cgroup_remove_exceeded(mz, mctz);
Johannes Weiner3e32cb22014-12-10 15:42:31 -0800790 if (!soft_limit_excess(mz->memcg) ||
Tejun Heoec903c02014-05-13 12:11:01 -0400791 !css_tryget_online(&mz->memcg->css))
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700792 goto retry;
793done:
794 return mz;
795}
796
797static struct mem_cgroup_per_zone *
798mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
799{
800 struct mem_cgroup_per_zone *mz;
801
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700802 spin_lock_irq(&mctz->lock);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700803 mz = __mem_cgroup_largest_soft_limit_node(mctz);
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700804 spin_unlock_irq(&mctz->lock);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700805 return mz;
806}
807
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700808/*
809 * Implementation Note: reading percpu statistics for memcg.
810 *
811 * Both of vmstat[] and percpu_counter has threshold and do periodic
812 * synchronization to implement "quick" read. There are trade-off between
813 * reading cost and precision of value. Then, we may have a chance to implement
814 * a periodic synchronizion of counter in memcg's counter.
815 *
816 * But this _read() function is used for user interface now. The user accounts
817 * memory usage by memory cgroup and he _always_ requires exact value because
818 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
819 * have to visit all online cpus and make sum. So, for now, unnecessary
820 * synchronization is not implemented. (just implemented for cpu hotplug)
821 *
822 * If there are kernel internal actions which can make use of some not-exact
823 * value, and reading all cpu value can be performance bottleneck in some
824 * common workload, threashold and synchonization as vmstat[] should be
825 * implemented.
826 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700827static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700828 enum mem_cgroup_stat_index idx)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800829{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700830 long val = 0;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800831 int cpu;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800832
Tejun Heo733a5722015-05-22 18:23:18 -0400833 for_each_possible_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700834 val += per_cpu(memcg->stat->count[idx], cpu);
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800835 return val;
836}
837
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700838static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
Johannes Weinere9f89742011-03-23 16:42:37 -0700839 enum mem_cgroup_events_index idx)
840{
841 unsigned long val = 0;
842 int cpu;
843
Tejun Heo733a5722015-05-22 18:23:18 -0400844 for_each_possible_cpu(cpu)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700845 val += per_cpu(memcg->stat->events[idx], cpu);
Johannes Weinere9f89742011-03-23 16:42:37 -0700846 return val;
847}
848
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700849static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
David Rientjesb070e652013-05-07 16:18:09 -0700850 struct page *page,
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700851 int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800852{
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700853 /*
854 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
855 * counted as CACHE even if it's on ANON LRU.
856 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700857 if (PageAnon(page))
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700858 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700859 nr_pages);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800860 else
KAMEZAWA Hiroyukib24028572012-03-21 16:34:22 -0700861 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700862 nr_pages);
Balaji Rao55e462b2008-05-01 04:35:12 -0700863
David Rientjesb070e652013-05-07 16:18:09 -0700864 if (PageTransHuge(page))
865 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
866 nr_pages);
867
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800868 /* pagein of a big page is an event. So, ignore page size */
869 if (nr_pages > 0)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700870 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800871 else {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700872 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800873 nr_pages = -nr_pages; /* for event */
874 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800875
Johannes Weiner13114712012-05-29 15:07:07 -0700876 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800877}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800878
Jianyu Zhane2318752014-06-06 14:38:20 -0700879unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
Konstantin Khlebnikov074291f2012-05-29 15:07:00 -0700880{
881 struct mem_cgroup_per_zone *mz;
882
883 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
884 return mz->lru_size[lru];
885}
886
Jianyu Zhane2318752014-06-06 14:38:20 -0700887static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
888 int nid,
889 unsigned int lru_mask)
Ying Han889976d2011-05-26 16:25:33 -0700890{
Jianyu Zhane2318752014-06-06 14:38:20 -0700891 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700892 int zid;
893
Jianyu Zhane2318752014-06-06 14:38:20 -0700894 VM_BUG_ON((unsigned)nid >= nr_node_ids);
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700895
Jianyu Zhane2318752014-06-06 14:38:20 -0700896 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
897 struct mem_cgroup_per_zone *mz;
898 enum lru_list lru;
899
900 for_each_lru(lru) {
901 if (!(BIT(lru) & lru_mask))
902 continue;
903 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
904 nr += mz->lru_size[lru];
905 }
906 }
907 return nr;
Ying Han889976d2011-05-26 16:25:33 -0700908}
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700909
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700910static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
KAMEZAWA Hiroyukibb2a0de2011-07-26 16:08:22 -0700911 unsigned int lru_mask)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800912{
Jianyu Zhane2318752014-06-06 14:38:20 -0700913 unsigned long nr = 0;
Ying Han889976d2011-05-26 16:25:33 -0700914 int nid;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800915
Lai Jiangshan31aaea42012-12-12 13:51:27 -0800916 for_each_node_state(nid, N_MEMORY)
Jianyu Zhane2318752014-06-06 14:38:20 -0700917 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
918 return nr;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800919}
920
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800921static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
922 enum mem_cgroup_events_target target)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800923{
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700924 unsigned long val, next;
925
Johannes Weiner13114712012-05-29 15:07:07 -0700926 val = __this_cpu_read(memcg->stat->nr_page_events);
Steven Rostedt47994012011-11-02 13:38:33 -0700927 next = __this_cpu_read(memcg->stat->targets[target]);
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700928 /* from time_after() in jiffies.h */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800929 if ((long)next - (long)val < 0) {
930 switch (target) {
931 case MEM_CGROUP_TARGET_THRESH:
932 next = val + THRESHOLDS_EVENTS_TARGET;
933 break;
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700934 case MEM_CGROUP_TARGET_SOFTLIMIT:
935 next = val + SOFTLIMIT_EVENTS_TARGET;
936 break;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800937 case MEM_CGROUP_TARGET_NUMAINFO:
938 next = val + NUMAINFO_EVENTS_TARGET;
939 break;
940 default:
941 break;
942 }
943 __this_cpu_write(memcg->stat->targets[target], next);
944 return true;
Johannes Weiner7a159cc2011-03-23 16:42:38 -0700945 }
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800946 return false;
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800947}
948
949/*
950 * Check events in order.
951 *
952 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700953static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800954{
955 /* threshold event is triggered in finer grain than soft limit */
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800956 if (unlikely(mem_cgroup_event_ratelimit(memcg,
957 MEM_CGROUP_TARGET_THRESH))) {
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700958 bool do_softlimit;
Andrew Morton82b3f2a2012-02-03 15:37:14 -0800959 bool do_numainfo __maybe_unused;
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800960
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700961 do_softlimit = mem_cgroup_event_ratelimit(memcg,
962 MEM_CGROUP_TARGET_SOFTLIMIT);
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -0700963#if MAX_NUMNODES > 1
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800964 do_numainfo = mem_cgroup_event_ratelimit(memcg,
965 MEM_CGROUP_TARGET_NUMAINFO);
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -0700966#endif
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800967 mem_cgroup_threshold(memcg);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -0700968 if (unlikely(do_softlimit))
969 mem_cgroup_update_tree(memcg, page);
Johannes Weinerf53d7ce2012-01-12 17:18:23 -0800970#if MAX_NUMNODES > 1
971 if (unlikely(do_numainfo))
972 atomic_inc(&memcg->numainfo_events);
973#endif
Johannes Weiner0a31bc92014-08-08 14:19:22 -0700974 }
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800975}
976
Balbir Singhcf475ad2008-04-29 01:00:16 -0700977struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800978{
Balbir Singh31a78f22008-09-28 23:09:31 +0100979 /*
980 * mm_update_next_owner() may clear mm->owner to NULL
981 * if it races with swapoff, page migration, etc.
982 * So this can be called with p == NULL.
983 */
984 if (unlikely(!p))
985 return NULL;
986
Tejun Heo073219e2014-02-08 10:36:58 -0500987 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800988}
989
Johannes Weinerdf381972014-04-07 15:37:43 -0700990static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800991{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -0700992 struct mem_cgroup *memcg = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700993
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800994 rcu_read_lock();
995 do {
Michal Hocko6f6acb02014-05-22 11:54:19 -0700996 /*
997 * Page cache insertions can happen withou an
998 * actual mm context, e.g. during disk probing
999 * on boot, loopback IO, acct() writes etc.
1000 */
1001 if (unlikely(!mm))
Johannes Weinerdf381972014-04-07 15:37:43 -07001002 memcg = root_mem_cgroup;
Michal Hocko6f6acb02014-05-22 11:54:19 -07001003 else {
1004 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1005 if (unlikely(!memcg))
1006 memcg = root_mem_cgroup;
1007 }
Tejun Heoec903c02014-05-13 12:11:01 -04001008 } while (!css_tryget_online(&memcg->css));
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001009 rcu_read_unlock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001010 return memcg;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001011}
1012
Johannes Weiner56600482012-01-12 17:17:59 -08001013/**
1014 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1015 * @root: hierarchy root
1016 * @prev: previously returned memcg, NULL on first invocation
1017 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1018 *
1019 * Returns references to children of the hierarchy below @root, or
1020 * @root itself, or %NULL after a full round-trip.
1021 *
1022 * Caller must pass the return value in @prev on subsequent
1023 * invocations for reference counting, or use mem_cgroup_iter_break()
1024 * to cancel a hierarchy walk before the round-trip is complete.
1025 *
1026 * Reclaimers can specify a zone and a priority level in @reclaim to
1027 * divide up the memcgs in the hierarchy among all concurrent
1028 * reclaimers operating on the same zone and priority.
1029 */
Andrew Morton694fbc02013-09-24 15:27:37 -07001030struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
Johannes Weiner56600482012-01-12 17:17:59 -08001031 struct mem_cgroup *prev,
Andrew Morton694fbc02013-09-24 15:27:37 -07001032 struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07001033{
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001034 struct reclaim_iter *uninitialized_var(iter);
1035 struct cgroup_subsys_state *css = NULL;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001036 struct mem_cgroup *memcg = NULL;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001037 struct mem_cgroup *pos = NULL;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001038
Andrew Morton694fbc02013-09-24 15:27:37 -07001039 if (mem_cgroup_disabled())
1040 return NULL;
Johannes Weiner56600482012-01-12 17:17:59 -08001041
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001042 if (!root)
1043 root = root_mem_cgroup;
1044
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001045 if (prev && !reclaim)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001046 pos = prev;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001047
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001048 if (!root->use_hierarchy && root != root_mem_cgroup) {
1049 if (prev)
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001050 goto out;
Andrew Morton694fbc02013-09-24 15:27:37 -07001051 return root;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001052 }
1053
Michal Hocko542f85f2013-04-29 15:07:15 -07001054 rcu_read_lock();
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001055
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001056 if (reclaim) {
1057 struct mem_cgroup_per_zone *mz;
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001058
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001059 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
1060 iter = &mz->iter[reclaim->priority];
Michal Hocko5f578162013-04-29 15:07:17 -07001061
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001062 if (prev && reclaim->generation != iter->generation)
Michal Hocko542f85f2013-04-29 15:07:15 -07001063 goto out_unlock;
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001064
1065 do {
Jason Low4db0c3c2015-04-15 16:14:08 -07001066 pos = READ_ONCE(iter->position);
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001067 /*
1068 * A racing update may change the position and
1069 * put the last reference, hence css_tryget(),
1070 * or retry to see the updated position.
1071 */
1072 } while (pos && !css_tryget(&pos->css));
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001073 }
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001074
1075 if (pos)
1076 css = &pos->css;
1077
1078 for (;;) {
1079 css = css_next_descendant_pre(css, &root->css);
1080 if (!css) {
1081 /*
1082 * Reclaimers share the hierarchy walk, and a
1083 * new one might jump in right at the end of
1084 * the hierarchy - make sure they see at least
1085 * one group and restart from the beginning.
1086 */
1087 if (!prev)
1088 continue;
1089 break;
1090 }
1091
1092 /*
1093 * Verify the css and acquire a reference. The root
1094 * is provided by the caller, so we know it's alive
1095 * and kicking, and don't take an extra reference.
1096 */
1097 memcg = mem_cgroup_from_css(css);
1098
1099 if (css == &root->css)
1100 break;
1101
Johannes Weinerb2052562014-12-10 15:42:48 -08001102 if (css_tryget(css)) {
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001103 /*
1104 * Make sure the memcg is initialized:
1105 * mem_cgroup_css_online() orders the the
1106 * initialization against setting the flag.
1107 */
1108 if (smp_load_acquire(&memcg->initialized))
1109 break;
1110
1111 css_put(css);
1112 }
1113
1114 memcg = NULL;
1115 }
1116
1117 if (reclaim) {
1118 if (cmpxchg(&iter->position, pos, memcg) == pos) {
1119 if (memcg)
1120 css_get(&memcg->css);
1121 if (pos)
1122 css_put(&pos->css);
1123 }
1124
1125 /*
1126 * pairs with css_tryget when dereferencing iter->position
1127 * above.
1128 */
1129 if (pos)
1130 css_put(&pos->css);
1131
1132 if (!memcg)
1133 iter->generation++;
1134 else if (!prev)
1135 reclaim->generation = iter->generation;
1136 }
1137
Michal Hocko542f85f2013-04-29 15:07:15 -07001138out_unlock:
1139 rcu_read_unlock();
Johannes Weiner5ac8fb32014-12-10 15:42:39 -08001140out:
Michal Hockoc40046f2013-04-29 15:07:14 -07001141 if (prev && prev != root)
1142 css_put(&prev->css);
1143
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001144 return memcg;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001145}
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001146
Johannes Weiner56600482012-01-12 17:17:59 -08001147/**
1148 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1149 * @root: hierarchy root
1150 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1151 */
1152void mem_cgroup_iter_break(struct mem_cgroup *root,
1153 struct mem_cgroup *prev)
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001154{
1155 if (!root)
1156 root = root_mem_cgroup;
1157 if (prev && prev != root)
1158 css_put(&prev->css);
1159}
1160
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001161/*
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001162 * Iteration constructs for visiting all cgroups (under a tree). If
1163 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1164 * be used for reference counting.
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001165 */
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001166#define for_each_mem_cgroup_tree(iter, root) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001167 for (iter = mem_cgroup_iter(root, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001168 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001169 iter = mem_cgroup_iter(root, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001170
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001171#define for_each_mem_cgroup(iter) \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001172 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001173 iter != NULL; \
Johannes Weiner527a5ec2012-01-12 17:17:55 -08001174 iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001175
David Rientjes68ae5642012-12-12 13:51:57 -08001176void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
Ying Han456f9982011-05-26 16:25:38 -07001177{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001178 struct mem_cgroup *memcg;
Ying Han456f9982011-05-26 16:25:38 -07001179
Ying Han456f9982011-05-26 16:25:38 -07001180 rcu_read_lock();
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001181 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1182 if (unlikely(!memcg))
Ying Han456f9982011-05-26 16:25:38 -07001183 goto out;
1184
1185 switch (idx) {
Ying Han456f9982011-05-26 16:25:38 -07001186 case PGFAULT:
Johannes Weiner0e574a92012-01-12 17:18:35 -08001187 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1188 break;
1189 case PGMAJFAULT:
1190 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
Ying Han456f9982011-05-26 16:25:38 -07001191 break;
1192 default:
1193 BUG();
1194 }
1195out:
1196 rcu_read_unlock();
1197}
David Rientjes68ae5642012-12-12 13:51:57 -08001198EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
Ying Han456f9982011-05-26 16:25:38 -07001199
Johannes Weiner925b7672012-01-12 17:18:15 -08001200/**
1201 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1202 * @zone: zone of the wanted lruvec
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001203 * @memcg: memcg of the wanted lruvec
Johannes Weiner925b7672012-01-12 17:18:15 -08001204 *
1205 * Returns the lru list vector holding pages for the given @zone and
1206 * @mem. This can be the global zone lruvec, if the memory controller
1207 * is disabled.
1208 */
1209struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1210 struct mem_cgroup *memcg)
1211{
1212 struct mem_cgroup_per_zone *mz;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001213 struct lruvec *lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001214
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001215 if (mem_cgroup_disabled()) {
1216 lruvec = &zone->lruvec;
1217 goto out;
1218 }
Johannes Weiner925b7672012-01-12 17:18:15 -08001219
Jianyu Zhane2318752014-06-06 14:38:20 -07001220 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001221 lruvec = &mz->lruvec;
1222out:
1223 /*
1224 * Since a node can be onlined after the mem_cgroup was created,
1225 * we have to be prepared to initialize lruvec->zone here;
1226 * and if offlined then reonlined, we need to reinitialize it.
1227 */
1228 if (unlikely(lruvec->zone != zone))
1229 lruvec->zone = zone;
1230 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001231}
1232
Johannes Weiner925b7672012-01-12 17:18:15 -08001233/**
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001234 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
Johannes Weiner925b7672012-01-12 17:18:15 -08001235 * @page: the page
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001236 * @zone: zone of the page
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001237 *
1238 * This function is only safe when following the LRU page isolation
1239 * and putback protocol: the LRU lock must be held, and the page must
1240 * either be PageLRU() or the caller must have isolated/allocated it.
Minchan Kim3f58a822011-03-22 16:32:53 -07001241 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001242struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
Minchan Kim3f58a822011-03-22 16:32:53 -07001243{
1244 struct mem_cgroup_per_zone *mz;
Johannes Weiner925b7672012-01-12 17:18:15 -08001245 struct mem_cgroup *memcg;
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001246 struct lruvec *lruvec;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08001247
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001248 if (mem_cgroup_disabled()) {
1249 lruvec = &zone->lruvec;
1250 goto out;
1251 }
Christoph Lameterb69408e2008-10-18 20:26:14 -07001252
Johannes Weiner1306a852014-12-10 15:44:52 -08001253 memcg = page->mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001254 /*
Johannes Weinerdfe0e772014-12-10 15:43:43 -08001255 * Swapcache readahead pages are added to the LRU - and
Johannes Weiner29833312014-12-10 15:44:02 -08001256 * possibly migrated - before they are charged.
Hugh Dickins75121022012-03-05 14:59:18 -08001257 */
Johannes Weiner29833312014-12-10 15:44:02 -08001258 if (!memcg)
1259 memcg = root_mem_cgroup;
Hugh Dickins75121022012-03-05 14:59:18 -08001260
Jianyu Zhane2318752014-06-06 14:38:20 -07001261 mz = mem_cgroup_page_zoneinfo(memcg, page);
Hugh Dickinsbea8c152012-11-16 14:14:54 -08001262 lruvec = &mz->lruvec;
1263out:
1264 /*
1265 * Since a node can be onlined after the mem_cgroup was created,
1266 * we have to be prepared to initialize lruvec->zone here;
1267 * and if offlined then reonlined, we need to reinitialize it.
1268 */
1269 if (unlikely(lruvec->zone != zone))
1270 lruvec->zone = zone;
1271 return lruvec;
Johannes Weiner925b7672012-01-12 17:18:15 -08001272}
1273
1274/**
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001275 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1276 * @lruvec: mem_cgroup per zone lru vector
1277 * @lru: index of lru list the page is sitting on
1278 * @nr_pages: positive when adding or negative when removing
Johannes Weiner925b7672012-01-12 17:18:15 -08001279 *
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001280 * This function must be called when a page is added to or removed from an
1281 * lru list.
Johannes Weiner925b7672012-01-12 17:18:15 -08001282 */
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001283void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1284 int nr_pages)
Johannes Weiner925b7672012-01-12 17:18:15 -08001285{
1286 struct mem_cgroup_per_zone *mz;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001287 unsigned long *lru_size;
Johannes Weiner925b7672012-01-12 17:18:15 -08001288
1289 if (mem_cgroup_disabled())
1290 return;
1291
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001292 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1293 lru_size = mz->lru_size + lru;
1294 *lru_size += nr_pages;
1295 VM_BUG_ON((long)(*lru_size) < 0);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001296}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08001297
Johannes Weiner2314b422014-12-10 15:44:33 -08001298bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root)
Michal Hocko3e920412011-07-26 16:08:29 -07001299{
Johannes Weiner2314b422014-12-10 15:44:33 -08001300 if (root == memcg)
Johannes Weiner91c637342012-05-29 15:06:24 -07001301 return true;
Johannes Weiner2314b422014-12-10 15:44:33 -08001302 if (!root->use_hierarchy)
Johannes Weiner91c637342012-05-29 15:06:24 -07001303 return false;
Johannes Weiner2314b422014-12-10 15:44:33 -08001304 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001305}
1306
Johannes Weiner2314b422014-12-10 15:44:33 -08001307bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001308{
Johannes Weiner2314b422014-12-10 15:44:33 -08001309 struct mem_cgroup *task_memcg;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001310 struct task_struct *p;
David Rientjesffbdccf2013-07-03 15:01:23 -07001311 bool ret;
David Rientjes4c4a2212008-02-07 00:14:06 -08001312
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -07001313 p = find_lock_task_mm(task);
David Rientjesde077d22012-01-12 17:18:52 -08001314 if (p) {
Johannes Weiner2314b422014-12-10 15:44:33 -08001315 task_memcg = get_mem_cgroup_from_mm(p->mm);
David Rientjesde077d22012-01-12 17:18:52 -08001316 task_unlock(p);
1317 } else {
1318 /*
1319 * All threads may have already detached their mm's, but the oom
1320 * killer still needs to detect if they have already been oom
1321 * killed to prevent needlessly killing additional tasks.
1322 */
David Rientjesffbdccf2013-07-03 15:01:23 -07001323 rcu_read_lock();
Johannes Weiner2314b422014-12-10 15:44:33 -08001324 task_memcg = mem_cgroup_from_task(task);
1325 css_get(&task_memcg->css);
David Rientjesffbdccf2013-07-03 15:01:23 -07001326 rcu_read_unlock();
David Rientjesde077d22012-01-12 17:18:52 -08001327 }
Johannes Weiner2314b422014-12-10 15:44:33 -08001328 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1329 css_put(&task_memcg->css);
David Rientjes4c4a2212008-02-07 00:14:06 -08001330 return ret;
1331}
1332
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001333int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001334{
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001335 unsigned long inactive_ratio;
Johannes Weiner9b272972011-11-02 13:38:23 -07001336 unsigned long inactive;
1337 unsigned long active;
1338 unsigned long gb;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001339
Hugh Dickins4d7dcca2012-05-29 15:07:08 -07001340 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1341 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001342
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001343 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1344 if (gb)
1345 inactive_ratio = int_sqrt(10 * gb);
1346 else
1347 inactive_ratio = 1;
1348
Johannes Weiner9b272972011-11-02 13:38:23 -07001349 return inactive * inactive_ratio < active;
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08001350}
1351
Vladimir Davydov90cbc252015-02-11 15:25:55 -08001352bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
1353{
1354 struct mem_cgroup_per_zone *mz;
1355 struct mem_cgroup *memcg;
1356
1357 if (mem_cgroup_disabled())
1358 return true;
1359
1360 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1361 memcg = mz->memcg;
1362
1363 return !!(memcg->css.flags & CSS_ONLINE);
1364}
1365
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001366#define mem_cgroup_from_counter(counter, member) \
Balbir Singh6d61ef42009-01-07 18:08:06 -08001367 container_of(counter, struct mem_cgroup, member)
1368
Johannes Weiner19942822011-02-01 15:52:43 -08001369/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001370 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
Wanpeng Lidad75572012-06-20 12:53:01 -07001371 * @memcg: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001372 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001373 * Returns the maximum amount of memory @mem can be charged with, in
Johannes Weiner7ec99d62011-03-23 16:42:36 -07001374 * pages.
Johannes Weiner19942822011-02-01 15:52:43 -08001375 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001376static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
Johannes Weiner19942822011-02-01 15:52:43 -08001377{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001378 unsigned long margin = 0;
1379 unsigned long count;
1380 unsigned long limit;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001381
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001382 count = page_counter_read(&memcg->memory);
Jason Low4db0c3c2015-04-15 16:14:08 -07001383 limit = READ_ONCE(memcg->memory.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001384 if (count < limit)
1385 margin = limit - count;
1386
1387 if (do_swap_account) {
1388 count = page_counter_read(&memcg->memsw);
Jason Low4db0c3c2015-04-15 16:14:08 -07001389 limit = READ_ONCE(memcg->memsw.limit);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001390 if (count <= limit)
1391 margin = min(margin, limit - count);
1392 }
1393
1394 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001395}
1396
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001397int mem_cgroup_swappiness(struct mem_cgroup *memcg)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001398{
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001399 /* root ? */
Linus Torvalds14208b02014-06-09 15:03:33 -07001400 if (mem_cgroup_disabled() || !memcg->css.parent)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001401 return vm_swappiness;
1402
Johannes Weinerbf1ff262011-03-23 16:42:32 -07001403 return memcg->swappiness;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001404}
1405
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07001406/*
Qiang Huangbdcbb652014-06-04 16:08:21 -07001407 * A routine for checking "mem" is under move_account() or not.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001408 *
Qiang Huangbdcbb652014-06-04 16:08:21 -07001409 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1410 * moving cgroups. This is for waiting at high-memory pressure
1411 * caused by "move".
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001412 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001413static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001414{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001415 struct mem_cgroup *from;
1416 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001417 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001418 /*
1419 * Unlike task_move routines, we access mc.to, mc.from not under
1420 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1421 */
1422 spin_lock(&mc.lock);
1423 from = mc.from;
1424 to = mc.to;
1425 if (!from)
1426 goto unlock;
Michal Hocko3e920412011-07-26 16:08:29 -07001427
Johannes Weiner2314b422014-12-10 15:44:33 -08001428 ret = mem_cgroup_is_descendant(from, memcg) ||
1429 mem_cgroup_is_descendant(to, memcg);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001430unlock:
1431 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001432 return ret;
1433}
1434
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001435static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001436{
1437 if (mc.moving_task && current != mc.moving_task) {
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001438 if (mem_cgroup_under_move(memcg)) {
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001439 DEFINE_WAIT(wait);
1440 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1441 /* moving charge context might have finished. */
1442 if (mc.moving_task)
1443 schedule();
1444 finish_wait(&mc.waitq, &wait);
1445 return true;
1446 }
1447 }
1448 return false;
1449}
1450
Sha Zhengju58cf1882013-02-22 16:32:05 -08001451#define K(x) ((x) << (PAGE_SHIFT-10))
Balbir Singhe2224322009-04-02 16:57:39 -07001452/**
Sha Zhengju58cf1882013-02-22 16:32:05 -08001453 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
Balbir Singhe2224322009-04-02 16:57:39 -07001454 * @memcg: The memory cgroup that went over limit
1455 * @p: Task that is going to be killed
1456 *
1457 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1458 * enabled
1459 */
1460void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1461{
Tejun Heoe61734c2014-02-12 09:29:50 -05001462 /* oom_info_lock ensures that parallel ooms do not interleave */
Michal Hocko08088cb2014-02-25 15:01:44 -08001463 static DEFINE_MUTEX(oom_info_lock);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001464 struct mem_cgroup *iter;
1465 unsigned int i;
Balbir Singhe2224322009-04-02 16:57:39 -07001466
Michal Hocko08088cb2014-02-25 15:01:44 -08001467 mutex_lock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001468 rcu_read_lock();
1469
Balasubramani Vivekanandan2415b9f2015-04-14 15:48:18 -07001470 if (p) {
1471 pr_info("Task in ");
1472 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1473 pr_cont(" killed as a result of limit of ");
1474 } else {
1475 pr_info("Memory limit reached of cgroup ");
1476 }
1477
Tejun Heoe61734c2014-02-12 09:29:50 -05001478 pr_cont_cgroup_path(memcg->css.cgroup);
Greg Thelen0346dad2015-01-26 12:58:38 -08001479 pr_cont("\n");
Balbir Singhe2224322009-04-02 16:57:39 -07001480
Balbir Singhe2224322009-04-02 16:57:39 -07001481 rcu_read_unlock();
1482
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001483 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1484 K((u64)page_counter_read(&memcg->memory)),
1485 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1486 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1487 K((u64)page_counter_read(&memcg->memsw)),
1488 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1489 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1490 K((u64)page_counter_read(&memcg->kmem)),
1491 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001492
1493 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heoe61734c2014-02-12 09:29:50 -05001494 pr_info("Memory cgroup stats for ");
1495 pr_cont_cgroup_path(iter->css.cgroup);
Sha Zhengju58cf1882013-02-22 16:32:05 -08001496 pr_cont(":");
1497
1498 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1499 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1500 continue;
1501 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1502 K(mem_cgroup_read_stat(iter, i)));
1503 }
1504
1505 for (i = 0; i < NR_LRU_LISTS; i++)
1506 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1507 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1508
1509 pr_cont("\n");
1510 }
Michal Hocko08088cb2014-02-25 15:01:44 -08001511 mutex_unlock(&oom_info_lock);
Balbir Singhe2224322009-04-02 16:57:39 -07001512}
1513
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001514/*
1515 * This function returns the number of memcg under hierarchy tree. Returns
1516 * 1(self count) if no children.
1517 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001518static int mem_cgroup_count_children(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001519{
1520 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001521 struct mem_cgroup *iter;
1522
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001523 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001524 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001525 return num;
1526}
1527
Balbir Singh6d61ef42009-01-07 18:08:06 -08001528/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001529 * Return the memory (and swap, if configured) limit for a memcg.
1530 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001531static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
David Rientjesa63d83f2010-08-09 17:19:46 -07001532{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001533 unsigned long limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001534
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001535 limit = memcg->memory.limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001536 if (mem_cgroup_swappiness(memcg)) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001537 unsigned long memsw_limit;
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001538
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001539 memsw_limit = memcg->memsw.limit;
1540 limit = min(limit + total_swap_pages, memsw_limit);
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001541 }
Michal Hocko9a5a8f12012-11-16 14:14:49 -08001542 return limit;
David Rientjesa63d83f2010-08-09 17:19:46 -07001543}
1544
David Rientjes19965462012-12-11 16:00:26 -08001545static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1546 int order)
David Rientjes9cbb78b2012-07-31 16:43:44 -07001547{
1548 struct mem_cgroup *iter;
1549 unsigned long chosen_points = 0;
1550 unsigned long totalpages;
1551 unsigned int points = 0;
1552 struct task_struct *chosen = NULL;
1553
David Rientjes876aafb2012-07-31 16:43:48 -07001554 /*
David Rientjes465adcf2013-04-29 15:08:45 -07001555 * If current has a pending SIGKILL or is exiting, then automatically
1556 * select it. The goal is to allow it to allocate so that it may
1557 * quickly exit and free its memory.
David Rientjes876aafb2012-07-31 16:43:48 -07001558 */
Oleg Nesterovd003f372014-12-12 16:56:24 -08001559 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
Michal Hocko49550b62015-02-11 15:26:12 -08001560 mark_tsk_oom_victim(current);
David Rientjes876aafb2012-07-31 16:43:48 -07001561 return;
1562 }
1563
Balasubramani Vivekanandan2415b9f2015-04-14 15:48:18 -07001564 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001565 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001566 for_each_mem_cgroup_tree(iter, memcg) {
Tejun Heo72ec7022013-08-08 20:11:26 -04001567 struct css_task_iter it;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001568 struct task_struct *task;
1569
Tejun Heo72ec7022013-08-08 20:11:26 -04001570 css_task_iter_start(&iter->css, &it);
1571 while ((task = css_task_iter_next(&it))) {
David Rientjes9cbb78b2012-07-31 16:43:44 -07001572 switch (oom_scan_process_thread(task, totalpages, NULL,
1573 false)) {
1574 case OOM_SCAN_SELECT:
1575 if (chosen)
1576 put_task_struct(chosen);
1577 chosen = task;
1578 chosen_points = ULONG_MAX;
1579 get_task_struct(chosen);
1580 /* fall through */
1581 case OOM_SCAN_CONTINUE:
1582 continue;
1583 case OOM_SCAN_ABORT:
Tejun Heo72ec7022013-08-08 20:11:26 -04001584 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001585 mem_cgroup_iter_break(memcg, iter);
1586 if (chosen)
1587 put_task_struct(chosen);
1588 return;
1589 case OOM_SCAN_OK:
1590 break;
1591 };
1592 points = oom_badness(task, memcg, NULL, totalpages);
David Rientjesd49ad932014-01-23 15:53:34 -08001593 if (!points || points < chosen_points)
1594 continue;
1595 /* Prefer thread group leaders for display purposes */
1596 if (points == chosen_points &&
1597 thread_group_leader(chosen))
1598 continue;
1599
1600 if (chosen)
1601 put_task_struct(chosen);
1602 chosen = task;
1603 chosen_points = points;
1604 get_task_struct(chosen);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001605 }
Tejun Heo72ec7022013-08-08 20:11:26 -04001606 css_task_iter_end(&it);
David Rientjes9cbb78b2012-07-31 16:43:44 -07001607 }
1608
1609 if (!chosen)
1610 return;
1611 points = chosen_points * 1000 / totalpages;
David Rientjes9cbb78b2012-07-31 16:43:44 -07001612 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1613 NULL, "Memory cgroup out of memory");
David Rientjes9cbb78b2012-07-31 16:43:44 -07001614}
1615
Michele Curtiae6e71d2014-12-12 16:56:35 -08001616#if MAX_NUMNODES > 1
1617
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001618/**
1619 * test_mem_cgroup_node_reclaimable
Wanpeng Lidad75572012-06-20 12:53:01 -07001620 * @memcg: the target memcg
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001621 * @nid: the node ID to be checked.
1622 * @noswap : specify true here if the user wants flle only information.
1623 *
1624 * This function returns whether the specified memcg contains any
1625 * reclaimable pages on a node. Returns true if there are any reclaimable
1626 * pages in the node.
1627 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001628static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001629 int nid, bool noswap)
1630{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001631 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001632 return true;
1633 if (noswap || !total_swap_pages)
1634 return false;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001635 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
KAMEZAWA Hiroyuki4d0c0662011-07-08 15:39:42 -07001636 return true;
1637 return false;
1638
1639}
Ying Han889976d2011-05-26 16:25:33 -07001640
1641/*
1642 * Always updating the nodemask is not very good - even if we have an empty
1643 * list or the wrong list here, we can start from some node and traverse all
1644 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1645 *
1646 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001647static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001648{
1649 int nid;
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001650 /*
1651 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1652 * pagein/pageout changes since the last update.
1653 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001654 if (!atomic_read(&memcg->numainfo_events))
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001655 return;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001656 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
Ying Han889976d2011-05-26 16:25:33 -07001657 return;
1658
Ying Han889976d2011-05-26 16:25:33 -07001659 /* make a nodemask where this memcg uses memory from */
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001660 memcg->scan_nodes = node_states[N_MEMORY];
Ying Han889976d2011-05-26 16:25:33 -07001661
Lai Jiangshan31aaea42012-12-12 13:51:27 -08001662 for_each_node_mask(nid, node_states[N_MEMORY]) {
Ying Han889976d2011-05-26 16:25:33 -07001663
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001664 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1665 node_clear(nid, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001666 }
KAMEZAWA Hiroyuki453a9bf32011-07-08 15:39:43 -07001667
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001668 atomic_set(&memcg->numainfo_events, 0);
1669 atomic_set(&memcg->numainfo_updating, 0);
Ying Han889976d2011-05-26 16:25:33 -07001670}
1671
1672/*
1673 * Selecting a node where we start reclaim from. Because what we need is just
1674 * reducing usage counter, start from anywhere is O,K. Considering
1675 * memory reclaim from current node, there are pros. and cons.
1676 *
1677 * Freeing memory from current node means freeing memory from a node which
1678 * we'll use or we've used. So, it may make LRU bad. And if several threads
1679 * hit limits, it will see a contention on a node. But freeing from remote
1680 * node means more costs for memory reclaim because of memory latency.
1681 *
1682 * Now, we use round-robin. Better algorithm is welcomed.
1683 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001684int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001685{
1686 int node;
1687
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001688 mem_cgroup_may_update_nodemask(memcg);
1689 node = memcg->last_scanned_node;
Ying Han889976d2011-05-26 16:25:33 -07001690
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001691 node = next_node(node, memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001692 if (node == MAX_NUMNODES)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001693 node = first_node(memcg->scan_nodes);
Ying Han889976d2011-05-26 16:25:33 -07001694 /*
1695 * We call this when we hit limit, not when pages are added to LRU.
1696 * No LRU may hold pages because all pages are UNEVICTABLE or
1697 * memcg is too small and all pages are not on LRU. In that case,
1698 * we use curret node.
1699 */
1700 if (unlikely(node == MAX_NUMNODES))
1701 node = numa_node_id();
1702
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001703 memcg->last_scanned_node = node;
Ying Han889976d2011-05-26 16:25:33 -07001704 return node;
1705}
Ying Han889976d2011-05-26 16:25:33 -07001706#else
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001707int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
Ying Han889976d2011-05-26 16:25:33 -07001708{
1709 return 0;
1710}
1711#endif
1712
Andrew Morton0608f432013-09-24 15:27:41 -07001713static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1714 struct zone *zone,
1715 gfp_t gfp_mask,
1716 unsigned long *total_scanned)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001717{
Andrew Morton0608f432013-09-24 15:27:41 -07001718 struct mem_cgroup *victim = NULL;
1719 int total = 0;
1720 int loop = 0;
1721 unsigned long excess;
1722 unsigned long nr_scanned;
1723 struct mem_cgroup_reclaim_cookie reclaim = {
1724 .zone = zone,
1725 .priority = 0,
1726 };
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001727
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001728 excess = soft_limit_excess(root_memcg);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001729
Andrew Morton0608f432013-09-24 15:27:41 -07001730 while (1) {
1731 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1732 if (!victim) {
1733 loop++;
1734 if (loop >= 2) {
1735 /*
1736 * If we have not been able to reclaim
1737 * anything, it might because there are
1738 * no reclaimable pages under this hierarchy
1739 */
1740 if (!total)
1741 break;
1742 /*
1743 * We want to do more targeted reclaim.
1744 * excess >> 2 is not to excessive so as to
1745 * reclaim too much, nor too less that we keep
1746 * coming back to reclaim from this cgroup
1747 */
1748 if (total >= (excess >> 2) ||
1749 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1750 break;
1751 }
1752 continue;
1753 }
Andrew Morton0608f432013-09-24 15:27:41 -07001754 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1755 zone, &nr_scanned);
1756 *total_scanned += nr_scanned;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08001757 if (!soft_limit_excess(root_memcg))
Andrew Morton0608f432013-09-24 15:27:41 -07001758 break;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001759 }
Andrew Morton0608f432013-09-24 15:27:41 -07001760 mem_cgroup_iter_break(root_memcg, victim);
1761 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001762}
1763
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001764#ifdef CONFIG_LOCKDEP
1765static struct lockdep_map memcg_oom_lock_dep_map = {
1766 .name = "memcg_oom_lock",
1767};
1768#endif
1769
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001770static DEFINE_SPINLOCK(memcg_oom_lock);
1771
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001772/*
1773 * Check OOM-Killer is already running under our hierarchy.
1774 * If someone is running, return false.
1775 */
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001776static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001777{
Michal Hocko79dfdac2011-07-26 16:08:23 -07001778 struct mem_cgroup *iter, *failed = NULL;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001779
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001780 spin_lock(&memcg_oom_lock);
1781
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001782 for_each_mem_cgroup_tree(iter, memcg) {
Johannes Weiner23751be2011-08-25 15:59:16 -07001783 if (iter->oom_lock) {
Michal Hocko79dfdac2011-07-26 16:08:23 -07001784 /*
1785 * this subtree of our hierarchy is already locked
1786 * so we cannot give a lock.
1787 */
Michal Hocko79dfdac2011-07-26 16:08:23 -07001788 failed = iter;
Johannes Weiner9f3a0d02012-01-12 17:17:48 -08001789 mem_cgroup_iter_break(memcg, iter);
1790 break;
Johannes Weiner23751be2011-08-25 15:59:16 -07001791 } else
1792 iter->oom_lock = true;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001793 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001794
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001795 if (failed) {
1796 /*
1797 * OK, we failed to lock the whole subtree so we have
1798 * to clean up what we set up to the failing subtree
1799 */
1800 for_each_mem_cgroup_tree(iter, memcg) {
1801 if (iter == failed) {
1802 mem_cgroup_iter_break(memcg, iter);
1803 break;
1804 }
1805 iter->oom_lock = false;
Michal Hocko79dfdac2011-07-26 16:08:23 -07001806 }
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001807 } else
1808 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001809
1810 spin_unlock(&memcg_oom_lock);
1811
1812 return !failed;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001813}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001814
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001815static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001816{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001817 struct mem_cgroup *iter;
1818
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001819 spin_lock(&memcg_oom_lock);
Johannes Weiner0056f4e2013-10-31 16:34:14 -07001820 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001821 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001822 iter->oom_lock = false;
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001823 spin_unlock(&memcg_oom_lock);
Michal Hocko79dfdac2011-07-26 16:08:23 -07001824}
1825
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001826static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001827{
1828 struct mem_cgroup *iter;
1829
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001830 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001831 atomic_inc(&iter->under_oom);
1832}
1833
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001834static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001835{
1836 struct mem_cgroup *iter;
1837
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001838 /*
1839 * When a new child is created while the hierarchy is under oom,
1840 * mem_cgroup_oom_lock() may not be called. We have to use
1841 * atomic_add_unless() here.
1842 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001843 for_each_mem_cgroup_tree(iter, memcg)
Michal Hocko79dfdac2011-07-26 16:08:23 -07001844 atomic_add_unless(&iter->under_oom, -1, 0);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001845}
1846
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001847static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1848
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001849struct oom_wait_info {
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001850 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001851 wait_queue_t wait;
1852};
1853
1854static int memcg_oom_wake_function(wait_queue_t *wait,
1855 unsigned mode, int sync, void *arg)
1856{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001857 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1858 struct mem_cgroup *oom_wait_memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001859 struct oom_wait_info *oom_wait_info;
1860
1861 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07001862 oom_wait_memcg = oom_wait_info->memcg;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001863
Johannes Weiner2314b422014-12-10 15:44:33 -08001864 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1865 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001866 return 0;
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001867 return autoremove_wake_function(wait, mode, sync, arg);
1868}
1869
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001870static void memcg_wakeup_oom(struct mem_cgroup *memcg)
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001871{
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001872 atomic_inc(&memcg->oom_wakeups);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001873 /* for filtering, pass "memcg" as argument. */
1874 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001875}
1876
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001877static void memcg_oom_recover(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001878{
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001879 if (memcg && atomic_read(&memcg->under_oom))
1880 memcg_wakeup_oom(memcg);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001881}
1882
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001883static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001884{
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001885 if (!current->memcg_oom.may_oom)
1886 return;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001887 /*
Johannes Weiner49426422013-10-16 13:46:59 -07001888 * We are in the middle of the charge context here, so we
1889 * don't want to block when potentially sitting on a callstack
1890 * that holds all kinds of filesystem and mm locks.
1891 *
1892 * Also, the caller may handle a failed allocation gracefully
1893 * (like optional page cache readahead) and so an OOM killer
1894 * invocation might not even be necessary.
1895 *
1896 * That's why we don't do anything here except remember the
1897 * OOM context and then deal with it at the end of the page
1898 * fault when the stack is unwound, the locks are released,
1899 * and when we know whether the fault was overall successful.
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001900 */
Johannes Weiner49426422013-10-16 13:46:59 -07001901 css_get(&memcg->css);
1902 current->memcg_oom.memcg = memcg;
1903 current->memcg_oom.gfp_mask = mask;
1904 current->memcg_oom.order = order;
1905}
1906
1907/**
1908 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1909 * @handle: actually kill/wait or just clean up the OOM state
1910 *
1911 * This has to be called at the end of a page fault if the memcg OOM
1912 * handler was enabled.
1913 *
1914 * Memcg supports userspace OOM handling where failed allocations must
1915 * sleep on a waitqueue until the userspace task resolves the
1916 * situation. Sleeping directly in the charge context with all kinds
1917 * of locks held is not a good idea, instead we remember an OOM state
1918 * in the task and mem_cgroup_oom_synchronize() has to be called at
1919 * the end of the page fault to complete the OOM handling.
1920 *
1921 * Returns %true if an ongoing memcg OOM situation was detected and
1922 * completed, %false otherwise.
1923 */
1924bool mem_cgroup_oom_synchronize(bool handle)
1925{
1926 struct mem_cgroup *memcg = current->memcg_oom.memcg;
1927 struct oom_wait_info owait;
1928 bool locked;
1929
1930 /* OOM is global, do not handle */
1931 if (!memcg)
1932 return false;
1933
Michal Hockoc32b3cb2015-02-11 15:26:24 -08001934 if (!handle || oom_killer_disabled)
Johannes Weiner49426422013-10-16 13:46:59 -07001935 goto cleanup;
1936
1937 owait.memcg = memcg;
1938 owait.wait.flags = 0;
1939 owait.wait.func = memcg_oom_wake_function;
1940 owait.wait.private = current;
1941 INIT_LIST_HEAD(&owait.wait.task_list);
1942
1943 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001944 mem_cgroup_mark_under_oom(memcg);
1945
1946 locked = mem_cgroup_oom_trylock(memcg);
1947
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001948 if (locked)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07001949 mem_cgroup_oom_notify(memcg);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001950
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001951 if (locked && !memcg->oom_kill_disable) {
1952 mem_cgroup_unmark_under_oom(memcg);
Johannes Weiner49426422013-10-16 13:46:59 -07001953 finish_wait(&memcg_oom_waitq, &owait.wait);
1954 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
1955 current->memcg_oom.order);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001956 } else {
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001957 schedule();
Johannes Weiner49426422013-10-16 13:46:59 -07001958 mem_cgroup_unmark_under_oom(memcg);
1959 finish_wait(&memcg_oom_waitq, &owait.wait);
1960 }
1961
1962 if (locked) {
Johannes Weinerfb2a6fc2013-09-12 15:13:43 -07001963 mem_cgroup_oom_unlock(memcg);
1964 /*
1965 * There is no guarantee that an OOM-lock contender
1966 * sees the wakeups triggered by the OOM kill
1967 * uncharges. Wake any sleepers explicitely.
1968 */
1969 memcg_oom_recover(memcg);
1970 }
Johannes Weiner49426422013-10-16 13:46:59 -07001971cleanup:
1972 current->memcg_oom.memcg = NULL;
Johannes Weiner3812c8c2013-09-12 15:13:44 -07001973 css_put(&memcg->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001974 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001975}
1976
Johannes Weinerd7365e72014-10-29 14:50:48 -07001977/**
1978 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
1979 * @page: page that is going to change accounted state
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001980 *
Johannes Weinerd7365e72014-10-29 14:50:48 -07001981 * This function must mark the beginning of an accounted page state
1982 * change to prevent double accounting when the page is concurrently
1983 * being moved to another memcg:
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001984 *
Johannes Weiner6de22612015-02-11 15:25:01 -08001985 * memcg = mem_cgroup_begin_page_stat(page);
Johannes Weinerd7365e72014-10-29 14:50:48 -07001986 * if (TestClearPageState(page))
1987 * mem_cgroup_update_page_stat(memcg, state, -1);
Johannes Weiner6de22612015-02-11 15:25:01 -08001988 * mem_cgroup_end_page_stat(memcg);
Balbir Singhd69b0422009-06-17 16:26:34 -07001989 */
Johannes Weiner6de22612015-02-11 15:25:01 -08001990struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001991{
1992 struct mem_cgroup *memcg;
Johannes Weiner6de22612015-02-11 15:25:01 -08001993 unsigned long flags;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07001994
Johannes Weiner6de22612015-02-11 15:25:01 -08001995 /*
1996 * The RCU lock is held throughout the transaction. The fast
1997 * path can get away without acquiring the memcg->move_lock
1998 * because page moving starts with an RCU grace period.
1999 *
2000 * The RCU lock also protects the memcg from being freed when
2001 * the page state that is going to change is the only thing
2002 * preventing the page from being uncharged.
2003 * E.g. end-writeback clearing PageWriteback(), which allows
2004 * migration to go ahead and uncharge the page before the
2005 * account transaction might be complete.
2006 */
Johannes Weinerd7365e72014-10-29 14:50:48 -07002007 rcu_read_lock();
2008
2009 if (mem_cgroup_disabled())
2010 return NULL;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002011again:
Johannes Weiner1306a852014-12-10 15:44:52 -08002012 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08002013 if (unlikely(!memcg))
Johannes Weinerd7365e72014-10-29 14:50:48 -07002014 return NULL;
2015
Qiang Huangbdcbb652014-06-04 16:08:21 -07002016 if (atomic_read(&memcg->moving_account) <= 0)
Johannes Weinerd7365e72014-10-29 14:50:48 -07002017 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002018
Johannes Weiner6de22612015-02-11 15:25:01 -08002019 spin_lock_irqsave(&memcg->move_lock, flags);
Johannes Weiner1306a852014-12-10 15:44:52 -08002020 if (memcg != page->mem_cgroup) {
Johannes Weiner6de22612015-02-11 15:25:01 -08002021 spin_unlock_irqrestore(&memcg->move_lock, flags);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002022 goto again;
2023 }
Johannes Weiner6de22612015-02-11 15:25:01 -08002024
2025 /*
2026 * When charge migration first begins, we can have locked and
2027 * unlocked page stat updates happening concurrently. Track
2028 * the task who has the lock for mem_cgroup_end_page_stat().
2029 */
2030 memcg->move_lock_task = current;
2031 memcg->move_lock_flags = flags;
Johannes Weinerd7365e72014-10-29 14:50:48 -07002032
2033 return memcg;
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002034}
Greg Thelenc4843a72015-05-22 17:13:16 -04002035EXPORT_SYMBOL(mem_cgroup_begin_page_stat);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002036
Johannes Weinerd7365e72014-10-29 14:50:48 -07002037/**
2038 * mem_cgroup_end_page_stat - finish a page state statistics transaction
2039 * @memcg: the memcg that was accounted against
Johannes Weinerd7365e72014-10-29 14:50:48 -07002040 */
Johannes Weiner6de22612015-02-11 15:25:01 -08002041void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002042{
Johannes Weiner6de22612015-02-11 15:25:01 -08002043 if (memcg && memcg->move_lock_task == current) {
2044 unsigned long flags = memcg->move_lock_flags;
2045
2046 memcg->move_lock_task = NULL;
2047 memcg->move_lock_flags = 0;
2048
2049 spin_unlock_irqrestore(&memcg->move_lock, flags);
2050 }
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002051
Johannes Weinerd7365e72014-10-29 14:50:48 -07002052 rcu_read_unlock();
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002053}
Greg Thelenc4843a72015-05-22 17:13:16 -04002054EXPORT_SYMBOL(mem_cgroup_end_page_stat);
KAMEZAWA Hiroyuki89c06bd2012-03-21 16:34:25 -07002055
Johannes Weinerd7365e72014-10-29 14:50:48 -07002056/**
2057 * mem_cgroup_update_page_stat - update page state statistics
2058 * @memcg: memcg to account against
2059 * @idx: page state item to account
2060 * @val: number of pages (positive or negative)
2061 *
2062 * See mem_cgroup_begin_page_stat() for locking requirements.
2063 */
2064void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
Sha Zhengju68b48762013-09-12 15:13:50 -07002065 enum mem_cgroup_stat_index idx, int val)
Balbir Singhd69b0422009-06-17 16:26:34 -07002066{
Sha Zhengju658b72c2013-09-12 15:13:52 -07002067 VM_BUG_ON(!rcu_read_lock_held());
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002068
Johannes Weinerd7365e72014-10-29 14:50:48 -07002069 if (memcg)
2070 this_cpu_add(memcg->stat->count[idx], val);
Balbir Singhd69b0422009-06-17 16:26:34 -07002071}
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07002072
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002073/*
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002074 * size of first charge trial. "32" comes from vmscan.c's magic value.
2075 * TODO: maybe necessary to use big numbers in big irons.
2076 */
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002077#define CHARGE_BATCH 32U
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002078struct memcg_stock_pcp {
2079 struct mem_cgroup *cached; /* this never be root cgroup */
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002080 unsigned int nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002081 struct work_struct work;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002082 unsigned long flags;
Kirill A. Shutemova0db00f2012-05-29 15:06:56 -07002083#define FLUSHING_CACHED_CHARGE 0
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002084};
2085static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
Michal Hocko9f50fad2011-08-09 11:56:26 +02002086static DEFINE_MUTEX(percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002087
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002088/**
2089 * consume_stock: Try to consume stocked charge on this cpu.
2090 * @memcg: memcg to consume from.
2091 * @nr_pages: how many pages to charge.
2092 *
2093 * The charges will only happen if @memcg matches the current cpu's memcg
2094 * stock, and at least @nr_pages are available in that stock. Failure to
2095 * service an allocation will refill the stock.
2096 *
2097 * returns true if successful, false otherwise.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002098 */
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002099static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002100{
2101 struct memcg_stock_pcp *stock;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002102 bool ret = false;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002103
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002104 if (nr_pages > CHARGE_BATCH)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002105 return ret;
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002106
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002107 stock = &get_cpu_var(memcg_stock);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002108 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
Suleiman Souhlala0956d52012-12-18 14:21:36 -08002109 stock->nr_pages -= nr_pages;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002110 ret = true;
2111 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002112 put_cpu_var(memcg_stock);
2113 return ret;
2114}
2115
2116/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002117 * Returns stocks cached in percpu and reset cached information.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002118 */
2119static void drain_stock(struct memcg_stock_pcp *stock)
2120{
2121 struct mem_cgroup *old = stock->cached;
2122
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002123 if (stock->nr_pages) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002124 page_counter_uncharge(&old->memory, stock->nr_pages);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002125 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002126 page_counter_uncharge(&old->memsw, stock->nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002127 css_put_many(&old->css, stock->nr_pages);
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002128 stock->nr_pages = 0;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002129 }
2130 stock->cached = NULL;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002131}
2132
2133/*
2134 * This must be called under preempt disabled or must be called by
2135 * a thread which is pinned to local cpu.
2136 */
2137static void drain_local_stock(struct work_struct *dummy)
2138{
Christoph Lameter7c8e0182014-06-04 16:07:56 -07002139 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002140 drain_stock(stock);
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002141 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002142}
2143
2144/*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002145 * Cache charges(val) to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01002146 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002147 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002148static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002149{
2150 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2151
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002152 if (stock->cached != memcg) { /* reset if necessary */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002153 drain_stock(stock);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002154 stock->cached = memcg;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002155 }
Johannes Weiner11c9ea42011-03-23 16:42:34 -07002156 stock->nr_pages += nr_pages;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002157 put_cpu_var(memcg_stock);
2158}
2159
2160/*
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002161 * Drains all per-CPU charge caches for given root_memcg resp. subtree
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002162 * of the hierarchy under it.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002163 */
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002164static void drain_all_stock(struct mem_cgroup *root_memcg)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002165{
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002166 int cpu, curcpu;
Michal Hockod38144b2011-07-26 16:08:28 -07002167
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002168 /* If someone's already draining, avoid adding running more workers. */
2169 if (!mutex_trylock(&percpu_charge_mutex))
2170 return;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002171 /* Notify other cpus that system-wide "drain" is running */
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002172 get_online_cpus();
Johannes Weiner5af12d02011-08-25 15:59:07 -07002173 curcpu = get_cpu();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002174 for_each_online_cpu(cpu) {
2175 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002176 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002177
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002178 memcg = stock->cached;
2179 if (!memcg || !stock->nr_pages)
KAMEZAWA Hiroyuki26fe6162011-06-15 15:08:45 -07002180 continue;
Johannes Weiner2314b422014-12-10 15:44:33 -08002181 if (!mem_cgroup_is_descendant(memcg, root_memcg))
Michal Hocko3e920412011-07-26 16:08:29 -07002182 continue;
Michal Hockod1a05b62011-07-26 16:08:27 -07002183 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2184 if (cpu == curcpu)
2185 drain_local_stock(&stock->work);
2186 else
2187 schedule_work_on(cpu, &stock->work);
2188 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002189 }
Johannes Weiner5af12d02011-08-25 15:59:07 -07002190 put_cpu();
Andrew Mortonf894ffa2013-09-12 15:13:35 -07002191 put_online_cpus();
Michal Hocko9f50fad2011-08-09 11:56:26 +02002192 mutex_unlock(&percpu_charge_mutex);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002193}
2194
Paul Gortmaker0db06282013-06-19 14:53:51 -04002195static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002196 unsigned long action,
2197 void *hcpu)
2198{
2199 int cpu = (unsigned long)hcpu;
2200 struct memcg_stock_pcp *stock;
2201
KAMEZAWA Hiroyuki619d0942012-03-21 16:34:23 -07002202 if (action == CPU_ONLINE)
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002203 return NOTIFY_OK;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07002204
Kirill A. Shutemovd8330492012-04-12 12:49:11 -07002205 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002206 return NOTIFY_OK;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07002207
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002208 stock = &per_cpu(memcg_stock, cpu);
2209 drain_stock(stock);
2210 return NOTIFY_OK;
2211}
2212
Johannes Weiner00501b52014-08-08 14:19:20 -07002213static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2214 unsigned int nr_pages)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002215{
Johannes Weiner7ec99d62011-03-23 16:42:36 -07002216 unsigned int batch = max(CHARGE_BATCH, nr_pages);
Johannes Weiner9b130612014-08-06 16:05:51 -07002217 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002218 struct mem_cgroup *mem_over_limit;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002219 struct page_counter *counter;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002220 unsigned long nr_reclaimed;
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002221 bool may_swap = true;
2222 bool drained = false;
Johannes Weiner05b84302014-08-06 16:05:59 -07002223 int ret = 0;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002224
Johannes Weinerce00a962014-09-05 08:43:57 -04002225 if (mem_cgroup_is_root(memcg))
2226 goto done;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002227retry:
Michal Hockob6b6cc72014-04-07 15:37:44 -07002228 if (consume_stock(memcg, nr_pages))
2229 goto done;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002230
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002231 if (!do_swap_account ||
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002232 !page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2233 if (!page_counter_try_charge(&memcg->memory, batch, &counter))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002234 goto done_restock;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002235 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002236 page_counter_uncharge(&memcg->memsw, batch);
2237 mem_over_limit = mem_cgroup_from_counter(counter, memory);
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002238 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002239 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002240 may_swap = false;
Johannes Weiner3fbe7242014-10-09 15:28:54 -07002241 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002242
Johannes Weiner6539cc02014-08-06 16:05:42 -07002243 if (batch > nr_pages) {
2244 batch = nr_pages;
2245 goto retry;
2246 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002247
Johannes Weiner06b078f2014-08-06 16:05:44 -07002248 /*
2249 * Unlike in global OOM situations, memcg is not in a physical
2250 * memory shortage. Allow dying and OOM-killed tasks to
2251 * bypass the last charges so that they can exit quickly and
2252 * free their memory.
2253 */
2254 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2255 fatal_signal_pending(current) ||
2256 current->flags & PF_EXITING))
2257 goto bypass;
2258
2259 if (unlikely(task_in_memcg_oom(current)))
2260 goto nomem;
2261
Johannes Weiner6539cc02014-08-06 16:05:42 -07002262 if (!(gfp_mask & __GFP_WAIT))
2263 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002264
Johannes Weiner241994ed2015-02-11 15:26:06 -08002265 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
2266
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002267 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2268 gfp_mask, may_swap);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002269
Johannes Weiner61e02c72014-08-06 16:08:16 -07002270 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
Johannes Weiner6539cc02014-08-06 16:05:42 -07002271 goto retry;
Johannes Weiner28c34c22014-08-06 16:05:47 -07002272
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002273 if (!drained) {
Johannes Weiner6d3d6aa2014-12-10 15:42:50 -08002274 drain_all_stock(mem_over_limit);
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002275 drained = true;
2276 goto retry;
2277 }
2278
Johannes Weiner28c34c22014-08-06 16:05:47 -07002279 if (gfp_mask & __GFP_NORETRY)
2280 goto nomem;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002281 /*
2282 * Even though the limit is exceeded at this point, reclaim
2283 * may have been able to free some pages. Retry the charge
2284 * before killing the task.
2285 *
2286 * Only for regular pages, though: huge pages are rather
2287 * unlikely to succeed so close to the limit, and we fall back
2288 * to regular pages anyway in case of failure.
2289 */
Johannes Weiner61e02c72014-08-06 16:08:16 -07002290 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
Johannes Weiner6539cc02014-08-06 16:05:42 -07002291 goto retry;
2292 /*
2293 * At task move, charge accounts can be doubly counted. So, it's
2294 * better to wait until the end of task_move if something is going on.
2295 */
2296 if (mem_cgroup_wait_acct_move(mem_over_limit))
2297 goto retry;
2298
Johannes Weiner9b130612014-08-06 16:05:51 -07002299 if (nr_retries--)
2300 goto retry;
2301
Johannes Weiner06b078f2014-08-06 16:05:44 -07002302 if (gfp_mask & __GFP_NOFAIL)
2303 goto bypass;
2304
Johannes Weiner6539cc02014-08-06 16:05:42 -07002305 if (fatal_signal_pending(current))
2306 goto bypass;
2307
Johannes Weiner241994ed2015-02-11 15:26:06 -08002308 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2309
Johannes Weiner61e02c72014-08-06 16:08:16 -07002310 mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(nr_pages));
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002311nomem:
Johannes Weiner6d1fdc42014-04-07 15:37:45 -07002312 if (!(gfp_mask & __GFP_NOFAIL))
Johannes Weiner3168ecb2013-10-31 16:34:13 -07002313 return -ENOMEM;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002314bypass:
Johannes Weinerce00a962014-09-05 08:43:57 -04002315 return -EINTR;
Johannes Weiner6539cc02014-08-06 16:05:42 -07002316
2317done_restock:
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002318 css_get_many(&memcg->css, batch);
Johannes Weiner6539cc02014-08-06 16:05:42 -07002319 if (batch > nr_pages)
2320 refill_stock(memcg, batch - nr_pages);
Johannes Weiner241994ed2015-02-11 15:26:06 -08002321 /*
2322 * If the hierarchy is above the normal consumption range,
2323 * make the charging task trim their excess contribution.
2324 */
2325 do {
2326 if (page_counter_read(&memcg->memory) <= memcg->high)
2327 continue;
2328 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
2329 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
2330 } while ((memcg = parent_mem_cgroup(memcg)));
Johannes Weiner6539cc02014-08-06 16:05:42 -07002331done:
Johannes Weiner05b84302014-08-06 16:05:59 -07002332 return ret;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002333}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002334
Johannes Weiner00501b52014-08-08 14:19:20 -07002335static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002336{
Johannes Weinerce00a962014-09-05 08:43:57 -04002337 if (mem_cgroup_is_root(memcg))
2338 return;
2339
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002340 page_counter_uncharge(&memcg->memory, nr_pages);
Johannes Weiner05b84302014-08-06 16:05:59 -07002341 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002342 page_counter_uncharge(&memcg->memsw, nr_pages);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002343
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002344 css_put_many(&memcg->css, nr_pages);
KAMEZAWA Hiroyukid01dd172012-05-29 15:07:03 -07002345}
2346
2347/*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002348 * try_get_mem_cgroup_from_page - look up page's memcg association
2349 * @page: the page
2350 *
2351 * Look up, get a css reference, and return the memcg that owns @page.
2352 *
2353 * The page must be locked to prevent racing with swap-in and page
2354 * cache charges. If coming from an unlocked page table, the caller
2355 * must ensure the page is on the LRU or this can race with charging.
2356 */
Wu Fengguange42d9d52009-12-16 12:19:59 +01002357struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002358{
Johannes Weiner29833312014-12-10 15:44:02 -08002359 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002360 unsigned short id;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002361 swp_entry_t ent;
2362
Sasha Levin309381fea2014-01-23 15:52:54 -08002363 VM_BUG_ON_PAGE(!PageLocked(page), page);
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002364
Johannes Weiner1306a852014-12-10 15:44:52 -08002365 memcg = page->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08002366 if (memcg) {
2367 if (!css_tryget_online(&memcg->css))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002368 memcg = NULL;
Wu Fengguange42d9d52009-12-16 12:19:59 +01002369 } else if (PageSwapCache(page)) {
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002370 ent.val = page_private(page);
Bob Liu9fb4b7c2012-01-12 17:18:48 -08002371 id = lookup_swap_cgroup_id(ent);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002372 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07002373 memcg = mem_cgroup_from_id(id);
Tejun Heoec903c02014-05-13 12:11:01 -04002374 if (memcg && !css_tryget_online(&memcg->css))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002375 memcg = NULL;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002376 rcu_read_unlock();
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002377 }
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07002378 return memcg;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002379}
2380
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002381static void lock_page_lru(struct page *page, int *isolated)
2382{
2383 struct zone *zone = page_zone(page);
2384
2385 spin_lock_irq(&zone->lru_lock);
2386 if (PageLRU(page)) {
2387 struct lruvec *lruvec;
2388
2389 lruvec = mem_cgroup_page_lruvec(page, zone);
2390 ClearPageLRU(page);
2391 del_page_from_lru_list(page, lruvec, page_lru(page));
2392 *isolated = 1;
2393 } else
2394 *isolated = 0;
2395}
2396
2397static void unlock_page_lru(struct page *page, int isolated)
2398{
2399 struct zone *zone = page_zone(page);
2400
2401 if (isolated) {
2402 struct lruvec *lruvec;
2403
2404 lruvec = mem_cgroup_page_lruvec(page, zone);
2405 VM_BUG_ON_PAGE(PageLRU(page), page);
2406 SetPageLRU(page);
2407 add_page_to_lru_list(page, lruvec, page_lru(page));
2408 }
2409 spin_unlock_irq(&zone->lru_lock);
2410}
2411
Johannes Weiner00501b52014-08-08 14:19:20 -07002412static void commit_charge(struct page *page, struct mem_cgroup *memcg,
Johannes Weiner6abb5a82014-08-08 14:19:33 -07002413 bool lrucare)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002414{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002415 int isolated;
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002416
Johannes Weiner1306a852014-12-10 15:44:52 -08002417 VM_BUG_ON_PAGE(page->mem_cgroup, page);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002418
2419 /*
2420 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2421 * may already be on some other mem_cgroup's LRU. Take care of it.
2422 */
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002423 if (lrucare)
2424 lock_page_lru(page, &isolated);
Hugh Dickins9ce70c02012-03-05 14:59:16 -08002425
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002426 /*
2427 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08002428 * page->mem_cgroup at this point:
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002429 *
2430 * - the page is uncharged
2431 *
2432 * - the page is off-LRU
2433 *
2434 * - an anonymous fault has exclusive page access, except for
2435 * a locked page table
2436 *
2437 * - a page cache insertion, a swapin fault, or a migration
2438 * have the page locked
2439 */
Johannes Weiner1306a852014-12-10 15:44:52 -08002440 page->mem_cgroup = memcg;
Hugh Dickins3be912772008-02-07 00:14:19 -08002441
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002442 if (lrucare)
2443 unlock_page_lru(page, isolated);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002444}
2445
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002446#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydovdbf22eb2015-02-10 14:11:41 -08002447int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
2448 unsigned long nr_pages)
Glauber Costa749c5412012-12-18 14:23:01 -08002449{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002450 struct page_counter *counter;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002451 int ret = 0;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002452
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002453 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
2454 if (ret < 0)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002455 return ret;
2456
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002457 ret = try_charge(memcg, gfp, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002458 if (ret == -EINTR) {
2459 /*
Johannes Weiner00501b52014-08-08 14:19:20 -07002460 * try_charge() chose to bypass to root due to OOM kill or
2461 * fatal signal. Since our only options are to either fail
2462 * the allocation or charge it to this cgroup, do it as a
2463 * temporary condition. But we can't fail. From a kmem/slab
2464 * perspective, the cache has already been selected, by
2465 * mem_cgroup_kmem_get_cache(), so it is too late to change
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002466 * our minds.
2467 *
2468 * This condition will only trigger if the task entered
Johannes Weiner00501b52014-08-08 14:19:20 -07002469 * memcg_charge_kmem in a sane state, but was OOM-killed
2470 * during try_charge() above. Tasks that were already dying
2471 * when the allocation triggers should have been already
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002472 * directed to the root cgroup in memcontrol.h
2473 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002474 page_counter_charge(&memcg->memory, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002475 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002476 page_counter_charge(&memcg->memsw, nr_pages);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002477 css_get_many(&memcg->css, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002478 ret = 0;
2479 } else if (ret)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002480 page_counter_uncharge(&memcg->kmem, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002481
2482 return ret;
2483}
2484
Vladimir Davydovdbf22eb2015-02-10 14:11:41 -08002485void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002486{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002487 page_counter_uncharge(&memcg->memory, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002488 if (do_swap_account)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002489 page_counter_uncharge(&memcg->memsw, nr_pages);
Glauber Costa7de37682012-12-18 14:22:07 -08002490
Johannes Weiner64f21992014-12-10 15:42:45 -08002491 page_counter_uncharge(&memcg->kmem, nr_pages);
Glauber Costa7de37682012-12-18 14:22:07 -08002492
Johannes Weinere8ea14c2014-12-10 15:42:42 -08002493 css_put_many(&memcg->css, nr_pages);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002494}
2495
Glauber Costa2633d7a2012-12-18 14:22:34 -08002496/*
2497 * helper for acessing a memcg's index. It will be used as an index in the
2498 * child cache array in kmem_cache, and also to derive its name. This function
2499 * will return -1 when this is not a kmem-limited memcg.
2500 */
2501int memcg_cache_id(struct mem_cgroup *memcg)
2502{
2503 return memcg ? memcg->kmemcg_id : -1;
2504}
2505
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002506static int memcg_alloc_cache_id(void)
Glauber Costa55007d82012-12-18 14:22:38 -08002507{
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002508 int id, size;
2509 int err;
Glauber Costa55007d82012-12-18 14:22:38 -08002510
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002511 id = ida_simple_get(&memcg_cache_ida,
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002512 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2513 if (id < 0)
2514 return id;
2515
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002516 if (id < memcg_nr_cache_ids)
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002517 return id;
2518
2519 /*
2520 * There's no space for the new id in memcg_caches arrays,
2521 * so we have to grow them.
2522 */
Vladimir Davydov05257a12015-02-12 14:59:01 -08002523 down_write(&memcg_cache_ids_sem);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002524
2525 size = 2 * (id + 1);
Glauber Costa55007d82012-12-18 14:22:38 -08002526 if (size < MEMCG_CACHES_MIN_SIZE)
2527 size = MEMCG_CACHES_MIN_SIZE;
2528 else if (size > MEMCG_CACHES_MAX_SIZE)
2529 size = MEMCG_CACHES_MAX_SIZE;
2530
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002531 err = memcg_update_all_caches(size);
Vladimir Davydov05257a12015-02-12 14:59:01 -08002532 if (!err)
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002533 err = memcg_update_all_list_lrus(size);
2534 if (!err)
Vladimir Davydov05257a12015-02-12 14:59:01 -08002535 memcg_nr_cache_ids = size;
2536
2537 up_write(&memcg_cache_ids_sem);
2538
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002539 if (err) {
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002540 ida_simple_remove(&memcg_cache_ida, id);
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07002541 return err;
2542 }
2543 return id;
2544}
2545
2546static void memcg_free_cache_id(int id)
2547{
Vladimir Davydovdbcf73e2015-02-12 14:58:57 -08002548 ida_simple_remove(&memcg_cache_ida, id);
Glauber Costa55007d82012-12-18 14:22:38 -08002549}
2550
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002551struct memcg_kmem_cache_create_work {
Vladimir Davydov5722d092014-04-07 15:39:24 -07002552 struct mem_cgroup *memcg;
2553 struct kmem_cache *cachep;
2554 struct work_struct work;
2555};
2556
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002557static void memcg_kmem_cache_create_func(struct work_struct *w)
Glauber Costad7f25f82012-12-18 14:22:40 -08002558{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002559 struct memcg_kmem_cache_create_work *cw =
2560 container_of(w, struct memcg_kmem_cache_create_work, work);
Vladimir Davydov5722d092014-04-07 15:39:24 -07002561 struct mem_cgroup *memcg = cw->memcg;
2562 struct kmem_cache *cachep = cw->cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002563
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002564 memcg_create_kmem_cache(memcg, cachep);
Vladimir Davydovbd673142014-06-04 16:07:40 -07002565
Vladimir Davydov5722d092014-04-07 15:39:24 -07002566 css_put(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002567 kfree(cw);
2568}
2569
2570/*
2571 * Enqueue the creation of a per-memcg kmem_cache.
Glauber Costad7f25f82012-12-18 14:22:40 -08002572 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002573static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2574 struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002575{
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002576 struct memcg_kmem_cache_create_work *cw;
Glauber Costad7f25f82012-12-18 14:22:40 -08002577
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002578 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002579 if (!cw)
Glauber Costad7f25f82012-12-18 14:22:40 -08002580 return;
Vladimir Davydov8135be52014-12-12 16:56:38 -08002581
2582 css_get(&memcg->css);
Glauber Costad7f25f82012-12-18 14:22:40 -08002583
2584 cw->memcg = memcg;
2585 cw->cachep = cachep;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002586 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
Glauber Costad7f25f82012-12-18 14:22:40 -08002587
Glauber Costad7f25f82012-12-18 14:22:40 -08002588 schedule_work(&cw->work);
2589}
2590
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002591static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2592 struct kmem_cache *cachep)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002593{
2594 /*
2595 * We need to stop accounting when we kmalloc, because if the
2596 * corresponding kmalloc cache is not yet created, the first allocation
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002597 * in __memcg_schedule_kmem_cache_create will recurse.
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002598 *
2599 * However, it is better to enclose the whole function. Depending on
2600 * the debugging options enabled, INIT_WORK(), for instance, can
2601 * trigger an allocation. This too, will make us recurse. Because at
2602 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2603 * the safest choice is to do it like this, wrapping the whole function.
2604 */
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002605 current->memcg_kmem_skip_account = 1;
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002606 __memcg_schedule_kmem_cache_create(memcg, cachep);
Vladimir Davydov6f185c22014-12-12 16:55:15 -08002607 current->memcg_kmem_skip_account = 0;
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002608}
Vladimir Davydovc67a8a62014-06-04 16:07:39 -07002609
Glauber Costad7f25f82012-12-18 14:22:40 -08002610/*
2611 * Return the kmem_cache we're supposed to use for a slab allocation.
2612 * We try to use the current memcg's version of the cache.
2613 *
2614 * If the cache does not exist yet, if we are the first user of it,
2615 * we either create it immediately, if possible, or create it asynchronously
2616 * in a workqueue.
2617 * In the latter case, we will let the current allocation go through with
2618 * the original cache.
2619 *
2620 * Can't be called in interrupt context or from kernel threads.
2621 * This function needs to be called with rcu_read_lock() held.
2622 */
Zhang Zhen056b7cc2014-12-12 16:55:38 -08002623struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
Glauber Costad7f25f82012-12-18 14:22:40 -08002624{
2625 struct mem_cgroup *memcg;
Vladimir Davydov959c8962014-01-23 15:52:59 -08002626 struct kmem_cache *memcg_cachep;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002627 int kmemcg_id;
Glauber Costad7f25f82012-12-18 14:22:40 -08002628
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002629 VM_BUG_ON(!is_root_cache(cachep));
Glauber Costad7f25f82012-12-18 14:22:40 -08002630
Vladimir Davydov9d100c52014-12-12 16:54:53 -08002631 if (current->memcg_kmem_skip_account)
Glauber Costa0e9d92f2012-12-18 14:22:42 -08002632 return cachep;
2633
Vladimir Davydov8135be52014-12-12 16:56:38 -08002634 memcg = get_mem_cgroup_from_mm(current->mm);
Jason Low4db0c3c2015-04-15 16:14:08 -07002635 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002636 if (kmemcg_id < 0)
Li Zefanca0dde92013-04-29 15:08:57 -07002637 goto out;
Glauber Costad7f25f82012-12-18 14:22:40 -08002638
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08002639 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002640 if (likely(memcg_cachep))
2641 return memcg_cachep;
Li Zefanca0dde92013-04-29 15:08:57 -07002642
2643 /*
2644 * If we are in a safe context (can wait, and not in interrupt
2645 * context), we could be be predictable and return right away.
2646 * This would guarantee that the allocation being performed
2647 * already belongs in the new cache.
2648 *
2649 * However, there are some clashes that can arrive from locking.
2650 * For instance, because we acquire the slab_mutex while doing
Vladimir Davydov776ed0f2014-06-04 16:10:02 -07002651 * memcg_create_kmem_cache, this means no further allocation
2652 * could happen with the slab_mutex held. So it's better to
2653 * defer everything.
Li Zefanca0dde92013-04-29 15:08:57 -07002654 */
Vladimir Davydovd5b3cf72015-02-10 14:11:47 -08002655 memcg_schedule_kmem_cache_create(memcg, cachep);
Li Zefanca0dde92013-04-29 15:08:57 -07002656out:
Vladimir Davydov8135be52014-12-12 16:56:38 -08002657 css_put(&memcg->css);
Li Zefanca0dde92013-04-29 15:08:57 -07002658 return cachep;
Glauber Costad7f25f82012-12-18 14:22:40 -08002659}
Glauber Costad7f25f82012-12-18 14:22:40 -08002660
Vladimir Davydov8135be52014-12-12 16:56:38 -08002661void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2662{
2663 if (!is_root_cache(cachep))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002664 css_put(&cachep->memcg_params.memcg->css);
Vladimir Davydov8135be52014-12-12 16:56:38 -08002665}
2666
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002667/*
2668 * We need to verify if the allocation against current->mm->owner's memcg is
2669 * possible for the given order. But the page is not allocated yet, so we'll
2670 * need a further commit step to do the final arrangements.
2671 *
2672 * It is possible for the task to switch cgroups in this mean time, so at
2673 * commit time, we can't rely on task conversion any longer. We'll then use
2674 * the handle argument to return to the caller which cgroup we should commit
2675 * against. We could also return the memcg directly and avoid the pointer
2676 * passing, but a boolean return value gives better semantics considering
2677 * the compiled-out case as well.
2678 *
2679 * Returning true means the allocation is possible.
2680 */
2681bool
2682__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
2683{
2684 struct mem_cgroup *memcg;
2685 int ret;
2686
2687 *_memcg = NULL;
Glauber Costa6d42c232013-07-08 16:00:00 -07002688
Johannes Weinerdf381972014-04-07 15:37:43 -07002689 memcg = get_mem_cgroup_from_mm(current->mm);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002690
Vladimir Davydovcf2b8fb2014-10-09 15:28:59 -07002691 if (!memcg_kmem_is_active(memcg)) {
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002692 css_put(&memcg->css);
2693 return true;
2694 }
2695
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002696 ret = memcg_charge_kmem(memcg, gfp, 1 << order);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002697 if (!ret)
2698 *_memcg = memcg;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002699
2700 css_put(&memcg->css);
2701 return (ret == 0);
2702}
2703
2704void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
2705 int order)
2706{
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002707 VM_BUG_ON(mem_cgroup_is_root(memcg));
2708
2709 /* The page allocation failed. Revert */
2710 if (!page) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002711 memcg_uncharge_kmem(memcg, 1 << order);
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002712 return;
2713 }
Johannes Weiner1306a852014-12-10 15:44:52 -08002714 page->mem_cgroup = memcg;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002715}
2716
2717void __memcg_kmem_uncharge_pages(struct page *page, int order)
2718{
Johannes Weiner1306a852014-12-10 15:44:52 -08002719 struct mem_cgroup *memcg = page->mem_cgroup;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002720
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002721 if (!memcg)
2722 return;
2723
Sasha Levin309381fea2014-01-23 15:52:54 -08002724 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
Johannes Weiner29833312014-12-10 15:44:02 -08002725
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002726 memcg_uncharge_kmem(memcg, 1 << order);
Johannes Weiner1306a852014-12-10 15:44:52 -08002727 page->mem_cgroup = NULL;
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002728}
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002729
2730struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
2731{
2732 struct mem_cgroup *memcg = NULL;
2733 struct kmem_cache *cachep;
2734 struct page *page;
2735
2736 page = virt_to_head_page(ptr);
2737 if (PageSlab(page)) {
2738 cachep = page->slab_cache;
2739 if (!is_root_cache(cachep))
Vladimir Davydovf7ce3192015-02-12 14:59:20 -08002740 memcg = cachep->memcg_params.memcg;
Vladimir Davydov60d3fd32015-02-12 14:59:10 -08002741 } else
2742 /* page allocated by alloc_kmem_pages */
2743 memcg = page->mem_cgroup;
2744
2745 return memcg;
2746}
Glauber Costa7ae1e1d2012-12-18 14:21:56 -08002747#endif /* CONFIG_MEMCG_KMEM */
2748
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002749#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2750
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002751/*
2752 * Because tail pages are not marked as "used", set it. We're under
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002753 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2754 * charge/uncharge will be never happen and move_account() is done under
2755 * compound_lock(), so we don't have to take care of races.
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002756 */
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002757void mem_cgroup_split_huge_fixup(struct page *head)
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002758{
KAMEZAWA Hiroyukie94c8a92012-01-12 17:18:20 -08002759 int i;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002760
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002761 if (mem_cgroup_disabled())
2762 return;
David Rientjesb070e652013-05-07 16:18:09 -07002763
Johannes Weiner29833312014-12-10 15:44:02 -08002764 for (i = 1; i < HPAGE_PMD_NR; i++)
Johannes Weiner1306a852014-12-10 15:44:52 -08002765 head[i].mem_cgroup = head->mem_cgroup;
Michal Hockob9982f82014-12-10 15:43:51 -08002766
Johannes Weiner1306a852014-12-10 15:44:52 -08002767 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
David Rientjesb070e652013-05-07 16:18:09 -07002768 HPAGE_PMD_NR);
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002769}
Hugh Dickins12d27102012-01-12 17:19:52 -08002770#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002771
Andrew Mortonc255a452012-07-31 16:43:02 -07002772#ifdef CONFIG_MEMCG_SWAP
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002773static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2774 bool charge)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002775{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002776 int val = (charge) ? 1 : -1;
2777 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002778}
Daisuke Nishimura02491442010-03-10 15:22:17 -08002779
2780/**
2781 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2782 * @entry: swap entry to be moved
2783 * @from: mem_cgroup which the entry is moved from
2784 * @to: mem_cgroup which the entry is moved to
2785 *
2786 * It succeeds only when the swap_cgroup's record for this entry is the same
2787 * as the mem_cgroup's id of @from.
2788 *
2789 * Returns 0 on success, -EINVAL on failure.
2790 *
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002791 * The caller must have charged to @to, IOW, called page_counter_charge() about
Daisuke Nishimura02491442010-03-10 15:22:17 -08002792 * both res and memsw, and called css_get().
2793 */
2794static int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002795 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002796{
2797 unsigned short old_id, new_id;
2798
Li Zefan34c00c32013-09-23 16:56:01 +08002799 old_id = mem_cgroup_id(from);
2800 new_id = mem_cgroup_id(to);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002801
2802 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08002803 mem_cgroup_swap_statistics(from, false);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002804 mem_cgroup_swap_statistics(to, true);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002805 return 0;
2806 }
2807 return -EINVAL;
2808}
2809#else
2810static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Hugh Dickinse91cbb42012-05-29 15:06:51 -07002811 struct mem_cgroup *from, struct mem_cgroup *to)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002812{
2813 return -EINVAL;
2814}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002815#endif
2816
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002817static DEFINE_MUTEX(memcg_limit_mutex);
Daisuke Nishimuraf212ad72011-03-23 16:42:25 -07002818
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08002819static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002820 unsigned long limit)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002821{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002822 unsigned long curusage;
2823 unsigned long oldusage;
2824 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002825 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002826 int ret;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002827
2828 /*
2829 * For keeping hierarchical_reclaim simple, how long we should retry
2830 * is depends on callers. We set our retry-count to be function
2831 * of # of children which we should visit in this loop.
2832 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002833 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2834 mem_cgroup_count_children(memcg);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002835
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002836 oldusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002837
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002838 do {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002839 if (signal_pending(current)) {
2840 ret = -EINTR;
2841 break;
2842 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002843
2844 mutex_lock(&memcg_limit_mutex);
2845 if (limit > memcg->memsw.limit) {
2846 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002847 ret = -EINVAL;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002848 break;
2849 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002850 if (limit > memcg->memory.limit)
2851 enlarge = true;
2852 ret = page_counter_limit(&memcg->memory, limit);
2853 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002854
2855 if (!ret)
2856 break;
2857
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002858 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2859
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002860 curusage = page_counter_read(&memcg->memory);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002861 /* Usage is reduced ? */
Andrew Mortonf894ffa2013-09-12 15:13:35 -07002862 if (curusage >= oldusage)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002863 retry_count--;
2864 else
2865 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002866 } while (retry_count);
2867
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002868 if (!ret && enlarge)
2869 memcg_oom_recover(memcg);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08002870
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002871 return ret;
2872}
2873
Li Zefan338c8432009-06-17 16:27:15 -07002874static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002875 unsigned long limit)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002876{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002877 unsigned long curusage;
2878 unsigned long oldusage;
2879 bool enlarge = false;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002880 int retry_count;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002881 int ret;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002882
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002883 /* see mem_cgroup_resize_res_limit */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002884 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2885 mem_cgroup_count_children(memcg);
2886
2887 oldusage = page_counter_read(&memcg->memsw);
2888
2889 do {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002890 if (signal_pending(current)) {
2891 ret = -EINTR;
2892 break;
2893 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002894
2895 mutex_lock(&memcg_limit_mutex);
2896 if (limit < memcg->memory.limit) {
2897 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002898 ret = -EINVAL;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002899 break;
2900 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002901 if (limit > memcg->memsw.limit)
2902 enlarge = true;
2903 ret = page_counter_limit(&memcg->memsw, limit);
2904 mutex_unlock(&memcg_limit_mutex);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002905
2906 if (!ret)
2907 break;
2908
Johannes Weinerb70a2a22014-10-09 15:28:56 -07002909 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2910
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002911 curusage = page_counter_read(&memcg->memsw);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002912 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002913 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002914 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07002915 else
2916 oldusage = curusage;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002917 } while (retry_count);
2918
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002919 if (!ret && enlarge)
2920 memcg_oom_recover(memcg);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002921
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07002922 return ret;
2923}
2924
Andrew Morton0608f432013-09-24 15:27:41 -07002925unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2926 gfp_t gfp_mask,
2927 unsigned long *total_scanned)
2928{
2929 unsigned long nr_reclaimed = 0;
2930 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2931 unsigned long reclaimed;
2932 int loop = 0;
2933 struct mem_cgroup_tree_per_zone *mctz;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002934 unsigned long excess;
Andrew Morton0608f432013-09-24 15:27:41 -07002935 unsigned long nr_scanned;
2936
2937 if (order > 0)
2938 return 0;
2939
2940 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2941 /*
2942 * This loop can run a while, specially if mem_cgroup's continuously
2943 * keep exceeding their soft limit and putting the system under
2944 * pressure
2945 */
2946 do {
2947 if (next_mz)
2948 mz = next_mz;
2949 else
2950 mz = mem_cgroup_largest_soft_limit_node(mctz);
2951 if (!mz)
2952 break;
2953
2954 nr_scanned = 0;
2955 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2956 gfp_mask, &nr_scanned);
2957 nr_reclaimed += reclaimed;
2958 *total_scanned += nr_scanned;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002959 spin_lock_irq(&mctz->lock);
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002960 __mem_cgroup_remove_exceeded(mz, mctz);
Andrew Morton0608f432013-09-24 15:27:41 -07002961
2962 /*
2963 * If we failed to reclaim anything from this memory cgroup
2964 * it is time to move on to the next cgroup
2965 */
2966 next_mz = NULL;
Vladimir Davydovbc2f2e72014-12-10 15:43:40 -08002967 if (!reclaimed)
2968 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2969
Johannes Weiner3e32cb22014-12-10 15:42:31 -08002970 excess = soft_limit_excess(mz->memcg);
Andrew Morton0608f432013-09-24 15:27:41 -07002971 /*
2972 * One school of thought says that we should not add
2973 * back the node to the tree if reclaim returns 0.
2974 * But our reclaim could return 0, simply because due
2975 * to priority we are exposing a smaller subset of
2976 * memory to reclaim from. Consider this as a longer
2977 * term TODO.
2978 */
2979 /* If excess == 0, no tree ops */
Johannes Weinercf2c8122014-06-06 14:38:21 -07002980 __mem_cgroup_insert_exceeded(mz, mctz, excess);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07002981 spin_unlock_irq(&mctz->lock);
Andrew Morton0608f432013-09-24 15:27:41 -07002982 css_put(&mz->memcg->css);
2983 loop++;
2984 /*
2985 * Could not reclaim anything and there are no more
2986 * mem cgroups to try or we seem to be looping without
2987 * reclaiming anything.
2988 */
2989 if (!nr_reclaimed &&
2990 (next_mz == NULL ||
2991 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2992 break;
2993 } while (!nr_reclaimed);
2994 if (next_mz)
2995 css_put(&next_mz->memcg->css);
2996 return nr_reclaimed;
2997}
2998
Tejun Heoea280e72014-05-16 13:22:48 -04002999/*
3000 * Test whether @memcg has children, dead or alive. Note that this
3001 * function doesn't care whether @memcg has use_hierarchy enabled and
3002 * returns %true if there are child csses according to the cgroup
3003 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
3004 */
Glauber Costab5f99b52013-02-22 16:34:53 -08003005static inline bool memcg_has_children(struct mem_cgroup *memcg)
3006{
Tejun Heoea280e72014-05-16 13:22:48 -04003007 bool ret;
3008
Johannes Weiner696ac172013-10-31 16:34:15 -07003009 /*
Tejun Heoea280e72014-05-16 13:22:48 -04003010 * The lock does not prevent addition or deletion of children, but
3011 * it prevents a new child from being initialized based on this
3012 * parent in css_online(), so it's enough to decide whether
3013 * hierarchically inherited attributes can still be changed or not.
Johannes Weiner696ac172013-10-31 16:34:15 -07003014 */
Tejun Heoea280e72014-05-16 13:22:48 -04003015 lockdep_assert_held(&memcg_create_mutex);
3016
3017 rcu_read_lock();
3018 ret = css_next_child(NULL, &memcg->css);
3019 rcu_read_unlock();
3020 return ret;
Glauber Costab5f99b52013-02-22 16:34:53 -08003021}
3022
3023/*
Michal Hockoc26251f2012-10-26 13:37:28 +02003024 * Reclaims as many pages from the given memcg as possible and moves
3025 * the rest to the parent.
3026 *
3027 * Caller is responsible for holding css reference for memcg.
3028 */
3029static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3030{
3031 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
Michal Hockoc26251f2012-10-26 13:37:28 +02003032
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003033 /* we call try-to-free pages for make this cgroup empty */
3034 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003035 /* try to free all pages in this cgroup */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003036 while (nr_retries && page_counter_read(&memcg->memory)) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003037 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003038
Michal Hockoc26251f2012-10-26 13:37:28 +02003039 if (signal_pending(current))
3040 return -EINTR;
3041
Johannes Weinerb70a2a22014-10-09 15:28:56 -07003042 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3043 GFP_KERNEL, true);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003044 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003045 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003046 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02003047 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003048 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003049
3050 }
Michal Hockoab5196c2012-10-26 13:37:32 +02003051
3052 return 0;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003053}
3054
Tejun Heo6770c642014-05-13 12:16:21 -04003055static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3056 char *buf, size_t nbytes,
3057 loff_t off)
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003058{
Tejun Heo6770c642014-05-13 12:16:21 -04003059 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Michal Hockoc26251f2012-10-26 13:37:28 +02003060
Michal Hockod8423012012-10-26 13:37:29 +02003061 if (mem_cgroup_is_root(memcg))
3062 return -EINVAL;
Tejun Heo6770c642014-05-13 12:16:21 -04003063 return mem_cgroup_force_empty(memcg) ?: nbytes;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003064}
3065
Tejun Heo182446d2013-08-08 20:11:24 -04003066static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3067 struct cftype *cft)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003068{
Tejun Heo182446d2013-08-08 20:11:24 -04003069 return mem_cgroup_from_css(css)->use_hierarchy;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003070}
3071
Tejun Heo182446d2013-08-08 20:11:24 -04003072static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3073 struct cftype *cft, u64 val)
Balbir Singh18f59ea2009-01-07 18:08:07 -08003074{
3075 int retval = 0;
Tejun Heo182446d2013-08-08 20:11:24 -04003076 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04003077 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003078
Glauber Costa09998212013-02-22 16:34:55 -08003079 mutex_lock(&memcg_create_mutex);
Glauber Costa567fb432012-07-31 16:43:07 -07003080
3081 if (memcg->use_hierarchy == val)
3082 goto out;
3083
Balbir Singh18f59ea2009-01-07 18:08:07 -08003084 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003085 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08003086 * in the child subtrees. If it is unset, then the change can
3087 * occur, provided the current cgroup has no children.
3088 *
3089 * For the root cgroup, parent_mem is NULL, we allow value to be
3090 * set if there are no children.
3091 */
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003092 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
Balbir Singh18f59ea2009-01-07 18:08:07 -08003093 (val == 1 || val == 0)) {
Tejun Heoea280e72014-05-16 13:22:48 -04003094 if (!memcg_has_children(memcg))
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003095 memcg->use_hierarchy = val;
Balbir Singh18f59ea2009-01-07 18:08:07 -08003096 else
3097 retval = -EBUSY;
3098 } else
3099 retval = -EINVAL;
Glauber Costa567fb432012-07-31 16:43:07 -07003100
3101out:
Glauber Costa09998212013-02-22 16:34:55 -08003102 mutex_unlock(&memcg_create_mutex);
Balbir Singh18f59ea2009-01-07 18:08:07 -08003103
3104 return retval;
3105}
3106
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003107static unsigned long tree_stat(struct mem_cgroup *memcg,
3108 enum mem_cgroup_stat_index idx)
Johannes Weinerce00a962014-09-05 08:43:57 -04003109{
3110 struct mem_cgroup *iter;
3111 long val = 0;
3112
3113 /* Per-cpu values can be negative, use a signed accumulator */
3114 for_each_mem_cgroup_tree(iter, memcg)
3115 val += mem_cgroup_read_stat(iter, idx);
3116
3117 if (val < 0) /* race ? */
3118 val = 0;
3119 return val;
3120}
3121
3122static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3123{
3124 u64 val;
3125
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003126 if (mem_cgroup_is_root(memcg)) {
3127 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
3128 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
3129 if (swap)
3130 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
3131 } else {
Johannes Weinerce00a962014-09-05 08:43:57 -04003132 if (!swap)
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003133 val = page_counter_read(&memcg->memory);
Johannes Weinerce00a962014-09-05 08:43:57 -04003134 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003135 val = page_counter_read(&memcg->memsw);
Johannes Weinerce00a962014-09-05 08:43:57 -04003136 }
Johannes Weinerce00a962014-09-05 08:43:57 -04003137 return val << PAGE_SHIFT;
3138}
3139
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003140enum {
3141 RES_USAGE,
3142 RES_LIMIT,
3143 RES_MAX_USAGE,
3144 RES_FAILCNT,
3145 RES_SOFT_LIMIT,
3146};
Johannes Weinerce00a962014-09-05 08:43:57 -04003147
Tejun Heo791badb2013-12-05 12:28:02 -05003148static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
Johannes Weiner05b84302014-08-06 16:05:59 -07003149 struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003150{
Tejun Heo182446d2013-08-08 20:11:24 -04003151 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003152 struct page_counter *counter;
Tejun Heoaf36f902012-04-01 12:09:55 -07003153
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003154 switch (MEMFILE_TYPE(cft->private)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003155 case _MEM:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003156 counter = &memcg->memory;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003157 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003158 case _MEMSWAP:
3159 counter = &memcg->memsw;
3160 break;
3161 case _KMEM:
3162 counter = &memcg->kmem;
3163 break;
3164 default:
3165 BUG();
3166 }
3167
3168 switch (MEMFILE_ATTR(cft->private)) {
3169 case RES_USAGE:
3170 if (counter == &memcg->memory)
3171 return mem_cgroup_usage(memcg, false);
3172 if (counter == &memcg->memsw)
3173 return mem_cgroup_usage(memcg, true);
3174 return (u64)page_counter_read(counter) * PAGE_SIZE;
3175 case RES_LIMIT:
3176 return (u64)counter->limit * PAGE_SIZE;
3177 case RES_MAX_USAGE:
3178 return (u64)counter->watermark * PAGE_SIZE;
3179 case RES_FAILCNT:
3180 return counter->failcnt;
3181 case RES_SOFT_LIMIT:
3182 return (u64)memcg->soft_limit * PAGE_SIZE;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003183 default:
3184 BUG();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003185 }
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003186}
Glauber Costa510fc4e2012-12-18 14:21:47 -08003187
Glauber Costa510fc4e2012-12-18 14:21:47 -08003188#ifdef CONFIG_MEMCG_KMEM
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003189static int memcg_activate_kmem(struct mem_cgroup *memcg,
3190 unsigned long nr_pages)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003191{
3192 int err = 0;
3193 int memcg_id;
3194
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003195 BUG_ON(memcg->kmemcg_id >= 0);
Vladimir Davydov2788cf02015-02-12 14:59:38 -08003196 BUG_ON(memcg->kmem_acct_activated);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003197 BUG_ON(memcg->kmem_acct_active);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003198
3199 /*
Glauber Costa510fc4e2012-12-18 14:21:47 -08003200 * For simplicity, we won't allow this to be disabled. It also can't
3201 * be changed if the cgroup has children already, or if tasks had
3202 * already joined.
3203 *
3204 * If tasks join before we set the limit, a person looking at
3205 * kmem.usage_in_bytes will have no way to determine when it took
3206 * place, which makes the value quite meaningless.
3207 *
3208 * After it first became limited, changes in the value of the limit are
3209 * of course permitted.
Glauber Costa510fc4e2012-12-18 14:21:47 -08003210 */
Glauber Costa09998212013-02-22 16:34:55 -08003211 mutex_lock(&memcg_create_mutex);
Tejun Heoea280e72014-05-16 13:22:48 -04003212 if (cgroup_has_tasks(memcg->css.cgroup) ||
3213 (memcg->use_hierarchy && memcg_has_children(memcg)))
Vladimir Davydovd6441632014-01-23 15:53:09 -08003214 err = -EBUSY;
Glauber Costa09998212013-02-22 16:34:55 -08003215 mutex_unlock(&memcg_create_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003216 if (err)
3217 goto out;
3218
Vladimir Davydovf3bb3042014-10-09 15:28:45 -07003219 memcg_id = memcg_alloc_cache_id();
Vladimir Davydovd6441632014-01-23 15:53:09 -08003220 if (memcg_id < 0) {
3221 err = memcg_id;
3222 goto out;
3223 }
3224
Vladimir Davydovd6441632014-01-23 15:53:09 -08003225 /*
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003226 * We couldn't have accounted to this cgroup, because it hasn't got
3227 * activated yet, so this should succeed.
Vladimir Davydovd6441632014-01-23 15:53:09 -08003228 */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003229 err = page_counter_limit(&memcg->kmem, nr_pages);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003230 VM_BUG_ON(err);
3231
3232 static_key_slow_inc(&memcg_kmem_enabled_key);
3233 /*
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003234 * A memory cgroup is considered kmem-active as soon as it gets
3235 * kmemcg_id. Setting the id after enabling static branching will
Vladimir Davydovd6441632014-01-23 15:53:09 -08003236 * guarantee no one starts accounting before all call sites are
3237 * patched.
3238 */
Vladimir Davydov900a38f2014-12-12 16:55:10 -08003239 memcg->kmemcg_id = memcg_id;
Vladimir Davydov2788cf02015-02-12 14:59:38 -08003240 memcg->kmem_acct_activated = true;
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003241 memcg->kmem_acct_active = true;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003242out:
Vladimir Davydovd6441632014-01-23 15:53:09 -08003243 return err;
Vladimir Davydovd6441632014-01-23 15:53:09 -08003244}
3245
Vladimir Davydovd6441632014-01-23 15:53:09 -08003246static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003247 unsigned long limit)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003248{
3249 int ret;
3250
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003251 mutex_lock(&memcg_limit_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003252 if (!memcg_kmem_is_active(memcg))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003253 ret = memcg_activate_kmem(memcg, limit);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003254 else
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003255 ret = page_counter_limit(&memcg->kmem, limit);
3256 mutex_unlock(&memcg_limit_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003257 return ret;
3258}
3259
Glauber Costa55007d82012-12-18 14:22:38 -08003260static int memcg_propagate_kmem(struct mem_cgroup *memcg)
Glauber Costa510fc4e2012-12-18 14:21:47 -08003261{
Glauber Costa55007d82012-12-18 14:22:38 -08003262 int ret = 0;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003263 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
Vladimir Davydovd6441632014-01-23 15:53:09 -08003264
Glauber Costa510fc4e2012-12-18 14:21:47 -08003265 if (!parent)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003266 return 0;
Glauber Costa55007d82012-12-18 14:22:38 -08003267
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003268 mutex_lock(&memcg_limit_mutex);
Glauber Costaa8964b92012-12-18 14:22:09 -08003269 /*
Vladimir Davydovd6441632014-01-23 15:53:09 -08003270 * If the parent cgroup is not kmem-active now, it cannot be activated
3271 * after this point, because it has at least one child already.
Glauber Costaa8964b92012-12-18 14:22:09 -08003272 */
Vladimir Davydovd6441632014-01-23 15:53:09 -08003273 if (memcg_kmem_is_active(parent))
Vladimir Davydov8c0145b2014-12-10 15:43:48 -08003274 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
3275 mutex_unlock(&memcg_limit_mutex);
Glauber Costa55007d82012-12-18 14:22:38 -08003276 return ret;
Glauber Costa510fc4e2012-12-18 14:21:47 -08003277}
Vladimir Davydovd6441632014-01-23 15:53:09 -08003278#else
3279static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003280 unsigned long limit)
Vladimir Davydovd6441632014-01-23 15:53:09 -08003281{
3282 return -EINVAL;
3283}
Hugh Dickins6d0439902013-02-22 16:35:50 -08003284#endif /* CONFIG_MEMCG_KMEM */
Glauber Costa510fc4e2012-12-18 14:21:47 -08003285
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003286/*
3287 * The user of this function is...
3288 * RES_LIMIT.
3289 */
Tejun Heo451af502014-05-13 12:16:21 -04003290static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3291 char *buf, size_t nbytes, loff_t off)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003292{
Tejun Heo451af502014-05-13 12:16:21 -04003293 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003294 unsigned long nr_pages;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003295 int ret;
3296
Tejun Heo451af502014-05-13 12:16:21 -04003297 buf = strstrip(buf);
Johannes Weiner650c5e52015-02-11 15:26:03 -08003298 ret = page_counter_memparse(buf, "-1", &nr_pages);
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003299 if (ret)
3300 return ret;
Tejun Heoaf36f902012-04-01 12:09:55 -07003301
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003302 switch (MEMFILE_ATTR(of_cft(of)->private)) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003303 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07003304 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3305 ret = -EINVAL;
3306 break;
3307 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003308 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3309 case _MEM:
3310 ret = mem_cgroup_resize_limit(memcg, nr_pages);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003311 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003312 case _MEMSWAP:
3313 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3314 break;
3315 case _KMEM:
3316 ret = memcg_update_kmem_limit(memcg, nr_pages);
3317 break;
3318 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003319 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07003320 case RES_SOFT_LIMIT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003321 memcg->soft_limit = nr_pages;
3322 ret = 0;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003323 break;
3324 }
Tejun Heo451af502014-05-13 12:16:21 -04003325 return ret ?: nbytes;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003326}
3327
Tejun Heo6770c642014-05-13 12:16:21 -04003328static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3329 size_t nbytes, loff_t off)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003330{
Tejun Heo6770c642014-05-13 12:16:21 -04003331 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003332 struct page_counter *counter;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003333
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003334 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3335 case _MEM:
3336 counter = &memcg->memory;
3337 break;
3338 case _MEMSWAP:
3339 counter = &memcg->memsw;
3340 break;
3341 case _KMEM:
3342 counter = &memcg->kmem;
3343 break;
3344 default:
3345 BUG();
3346 }
Tejun Heoaf36f902012-04-01 12:09:55 -07003347
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003348 switch (MEMFILE_ATTR(of_cft(of)->private)) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003349 case RES_MAX_USAGE:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003350 page_counter_reset_watermark(counter);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003351 break;
3352 case RES_FAILCNT:
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003353 counter->failcnt = 0;
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003354 break;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003355 default:
3356 BUG();
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003357 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003358
Tejun Heo6770c642014-05-13 12:16:21 -04003359 return nbytes;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003360}
3361
Tejun Heo182446d2013-08-08 20:11:24 -04003362static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003363 struct cftype *cft)
3364{
Tejun Heo182446d2013-08-08 20:11:24 -04003365 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003366}
3367
Daisuke Nishimura02491442010-03-10 15:22:17 -08003368#ifdef CONFIG_MMU
Tejun Heo182446d2013-08-08 20:11:24 -04003369static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003370 struct cftype *cft, u64 val)
3371{
Tejun Heo182446d2013-08-08 20:11:24 -04003372 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003373
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08003374 if (val & ~MOVE_MASK)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003375 return -EINVAL;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003376
Glauber Costaee5e8472013-02-22 16:34:50 -08003377 /*
3378 * No kind of locking is needed in here, because ->can_attach() will
3379 * check this value once in the beginning of the process, and then carry
3380 * on with stale data. This means that changes to this value will only
3381 * affect task migrations starting after the change.
3382 */
3383 memcg->move_charge_at_immigrate = val;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003384 return 0;
3385}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003386#else
Tejun Heo182446d2013-08-08 20:11:24 -04003387static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
Daisuke Nishimura02491442010-03-10 15:22:17 -08003388 struct cftype *cft, u64 val)
3389{
3390 return -ENOSYS;
3391}
3392#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003393
Ying Han406eb0c2011-05-26 16:25:37 -07003394#ifdef CONFIG_NUMA
Tejun Heo2da8ca82013-12-05 12:28:04 -05003395static int memcg_numa_stat_show(struct seq_file *m, void *v)
Ying Han406eb0c2011-05-26 16:25:37 -07003396{
Greg Thelen25485de2013-11-12 15:07:40 -08003397 struct numa_stat {
3398 const char *name;
3399 unsigned int lru_mask;
3400 };
3401
3402 static const struct numa_stat stats[] = {
3403 { "total", LRU_ALL },
3404 { "file", LRU_ALL_FILE },
3405 { "anon", LRU_ALL_ANON },
3406 { "unevictable", BIT(LRU_UNEVICTABLE) },
3407 };
3408 const struct numa_stat *stat;
Ying Han406eb0c2011-05-26 16:25:37 -07003409 int nid;
Greg Thelen25485de2013-11-12 15:07:40 -08003410 unsigned long nr;
Tejun Heo2da8ca82013-12-05 12:28:04 -05003411 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Ying Han406eb0c2011-05-26 16:25:37 -07003412
Greg Thelen25485de2013-11-12 15:07:40 -08003413 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3414 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3415 seq_printf(m, "%s=%lu", stat->name, nr);
3416 for_each_node_state(nid, N_MEMORY) {
3417 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3418 stat->lru_mask);
3419 seq_printf(m, " N%d=%lu", nid, nr);
3420 }
3421 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003422 }
Ying Han406eb0c2011-05-26 16:25:37 -07003423
Ying Han071aee12013-11-12 15:07:41 -08003424 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3425 struct mem_cgroup *iter;
Ying Han406eb0c2011-05-26 16:25:37 -07003426
Ying Han071aee12013-11-12 15:07:41 -08003427 nr = 0;
3428 for_each_mem_cgroup_tree(iter, memcg)
3429 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3430 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3431 for_each_node_state(nid, N_MEMORY) {
3432 nr = 0;
3433 for_each_mem_cgroup_tree(iter, memcg)
3434 nr += mem_cgroup_node_nr_lru_pages(
3435 iter, nid, stat->lru_mask);
3436 seq_printf(m, " N%d=%lu", nid, nr);
3437 }
3438 seq_putc(m, '\n');
Ying Han406eb0c2011-05-26 16:25:37 -07003439 }
Ying Han406eb0c2011-05-26 16:25:37 -07003440
Ying Han406eb0c2011-05-26 16:25:37 -07003441 return 0;
3442}
3443#endif /* CONFIG_NUMA */
3444
Tejun Heo2da8ca82013-12-05 12:28:04 -05003445static int memcg_stat_show(struct seq_file *m, void *v)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003446{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003447 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003448 unsigned long memory, memsw;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003449 struct mem_cgroup *mi;
3450 unsigned int i;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003451
Greg Thelen0ca44b12015-02-11 15:25:58 -08003452 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3453 MEM_CGROUP_STAT_NSTATS);
3454 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3455 MEM_CGROUP_EVENTS_NSTATS);
Rickard Strandqvist70bc0682014-12-12 16:56:41 -08003456 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3457
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003458 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -07003459 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003460 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003461 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
3462 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003463 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003464
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003465 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3466 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3467 mem_cgroup_read_events(memcg, i));
3468
3469 for (i = 0; i < NR_LRU_LISTS; i++)
3470 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3471 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3472
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003473 /* Hierarchical information */
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003474 memory = memsw = PAGE_COUNTER_MAX;
3475 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3476 memory = min(memory, mi->memory.limit);
3477 memsw = min(memsw, mi->memsw.limit);
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003478 }
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003479 seq_printf(m, "hierarchical_memory_limit %llu\n",
3480 (u64)memory * PAGE_SIZE);
3481 if (do_swap_account)
3482 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3483 (u64)memsw * PAGE_SIZE);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003484
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003485 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3486 long long val = 0;
3487
Kamezawa Hiroyukibff6bb82012-07-31 16:41:38 -07003488 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003489 continue;
Johannes Weineraf7c4b02012-05-29 15:07:08 -07003490 for_each_mem_cgroup_tree(mi, memcg)
3491 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3492 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
3493 }
3494
3495 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3496 unsigned long long val = 0;
3497
3498 for_each_mem_cgroup_tree(mi, memcg)
3499 val += mem_cgroup_read_events(mi, i);
3500 seq_printf(m, "total_%s %llu\n",
3501 mem_cgroup_events_names[i], val);
3502 }
3503
3504 for (i = 0; i < NR_LRU_LISTS; i++) {
3505 unsigned long long val = 0;
3506
3507 for_each_mem_cgroup_tree(mi, memcg)
3508 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3509 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003510 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003511
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003512#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003513 {
3514 int nid, zid;
3515 struct mem_cgroup_per_zone *mz;
Hugh Dickins89abfab2012-05-29 15:06:53 -07003516 struct zone_reclaim_stat *rstat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003517 unsigned long recent_rotated[2] = {0, 0};
3518 unsigned long recent_scanned[2] = {0, 0};
3519
3520 for_each_online_node(nid)
3521 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
Jianyu Zhane2318752014-06-06 14:38:20 -07003522 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
Hugh Dickins89abfab2012-05-29 15:06:53 -07003523 rstat = &mz->lruvec.reclaim_stat;
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003524
Hugh Dickins89abfab2012-05-29 15:06:53 -07003525 recent_rotated[0] += rstat->recent_rotated[0];
3526 recent_rotated[1] += rstat->recent_rotated[1];
3527 recent_scanned[0] += rstat->recent_scanned[0];
3528 recent_scanned[1] += rstat->recent_scanned[1];
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003529 }
Johannes Weiner78ccf5b2012-05-29 15:07:06 -07003530 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3531 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3532 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3533 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003534 }
3535#endif
3536
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003537 return 0;
3538}
3539
Tejun Heo182446d2013-08-08 20:11:24 -04003540static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3541 struct cftype *cft)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003542{
Tejun Heo182446d2013-08-08 20:11:24 -04003543 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003544
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07003545 return mem_cgroup_swappiness(memcg);
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003546}
3547
Tejun Heo182446d2013-08-08 20:11:24 -04003548static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3549 struct cftype *cft, u64 val)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003550{
Tejun Heo182446d2013-08-08 20:11:24 -04003551 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Li Zefan068b38c2009-01-15 13:51:26 -08003552
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003553 if (val > 100)
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003554 return -EINVAL;
3555
Linus Torvalds14208b02014-06-09 15:03:33 -07003556 if (css->parent)
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003557 memcg->swappiness = val;
3558 else
3559 vm_swappiness = val;
Li Zefan068b38c2009-01-15 13:51:26 -08003560
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003561 return 0;
3562}
3563
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003564static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3565{
3566 struct mem_cgroup_threshold_ary *t;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003567 unsigned long usage;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003568 int i;
3569
3570 rcu_read_lock();
3571 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003572 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003573 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003574 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003575
3576 if (!t)
3577 goto unlock;
3578
Johannes Weinerce00a962014-09-05 08:43:57 -04003579 usage = mem_cgroup_usage(memcg, swap);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003580
3581 /*
Sha Zhengju748dad32012-05-29 15:06:57 -07003582 * current_threshold points to threshold just below or equal to usage.
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003583 * If it's not true, a threshold was crossed after last
3584 * call of __mem_cgroup_threshold().
3585 */
Phil Carmody5407a562010-05-26 14:42:42 -07003586 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003587
3588 /*
3589 * Iterate backward over array of thresholds starting from
3590 * current_threshold and check if a threshold is crossed.
3591 * If none of thresholds below usage is crossed, we read
3592 * only one element of the array here.
3593 */
3594 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3595 eventfd_signal(t->entries[i].eventfd, 1);
3596
3597 /* i = current_threshold + 1 */
3598 i++;
3599
3600 /*
3601 * Iterate forward over array of thresholds starting from
3602 * current_threshold+1 and check if a threshold is crossed.
3603 * If none of thresholds above usage is crossed, we read
3604 * only one element of the array here.
3605 */
3606 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3607 eventfd_signal(t->entries[i].eventfd, 1);
3608
3609 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07003610 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003611unlock:
3612 rcu_read_unlock();
3613}
3614
3615static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3616{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003617 while (memcg) {
3618 __mem_cgroup_threshold(memcg, false);
3619 if (do_swap_account)
3620 __mem_cgroup_threshold(memcg, true);
3621
3622 memcg = parent_mem_cgroup(memcg);
3623 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003624}
3625
3626static int compare_thresholds(const void *a, const void *b)
3627{
3628 const struct mem_cgroup_threshold *_a = a;
3629 const struct mem_cgroup_threshold *_b = b;
3630
Greg Thelen2bff24a2013-09-11 14:23:08 -07003631 if (_a->threshold > _b->threshold)
3632 return 1;
3633
3634 if (_a->threshold < _b->threshold)
3635 return -1;
3636
3637 return 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003638}
3639
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003640static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003641{
3642 struct mem_cgroup_eventfd_list *ev;
3643
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003644 spin_lock(&memcg_oom_lock);
3645
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003646 list_for_each_entry(ev, &memcg->oom_notify, list)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003647 eventfd_signal(ev->eventfd, 1);
Michal Hocko2bcf2e92014-07-30 16:08:33 -07003648
3649 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003650 return 0;
3651}
3652
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003653static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003654{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003655 struct mem_cgroup *iter;
3656
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003657 for_each_mem_cgroup_tree(iter, memcg)
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003658 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003659}
3660
Tejun Heo59b6f872013-11-22 18:20:43 -05003661static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003662 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003663{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003664 struct mem_cgroup_thresholds *thresholds;
3665 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003666 unsigned long threshold;
3667 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003668 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003669
Johannes Weiner650c5e52015-02-11 15:26:03 -08003670 ret = page_counter_memparse(args, "-1", &threshold);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003671 if (ret)
3672 return ret;
3673
3674 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003675
Johannes Weiner05b84302014-08-06 16:05:59 -07003676 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003677 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003678 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003679 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003680 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003681 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003682 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003683 BUG();
3684
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003685 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003686 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003687 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3688
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003689 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003690
3691 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003692 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003693 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003694 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003695 ret = -ENOMEM;
3696 goto unlock;
3697 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003698 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003699
3700 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003701 if (thresholds->primary) {
3702 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003703 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003704 }
3705
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003706 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003707 new->entries[size - 1].eventfd = eventfd;
3708 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003709
3710 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003711 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003712 compare_thresholds, NULL);
3713
3714 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003715 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003716 for (i = 0; i < size; i++) {
Sha Zhengju748dad32012-05-29 15:06:57 -07003717 if (new->entries[i].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003718 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003719 * new->current_threshold will not be used until
3720 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003721 * it here.
3722 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003723 ++new->current_threshold;
Sha Zhengju748dad32012-05-29 15:06:57 -07003724 } else
3725 break;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003726 }
3727
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003728 /* Free old spare buffer and save old primary buffer as spare */
3729 kfree(thresholds->spare);
3730 thresholds->spare = thresholds->primary;
3731
3732 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003733
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003734 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003735 synchronize_rcu();
3736
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003737unlock:
3738 mutex_unlock(&memcg->thresholds_lock);
3739
3740 return ret;
3741}
3742
Tejun Heo59b6f872013-11-22 18:20:43 -05003743static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003744 struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003745{
Tejun Heo59b6f872013-11-22 18:20:43 -05003746 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003747}
3748
Tejun Heo59b6f872013-11-22 18:20:43 -05003749static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003750 struct eventfd_ctx *eventfd, const char *args)
3751{
Tejun Heo59b6f872013-11-22 18:20:43 -05003752 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003753}
3754
Tejun Heo59b6f872013-11-22 18:20:43 -05003755static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003756 struct eventfd_ctx *eventfd, enum res_type type)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003757{
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003758 struct mem_cgroup_thresholds *thresholds;
3759 struct mem_cgroup_threshold_ary *new;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08003760 unsigned long usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003761 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003762
3763 mutex_lock(&memcg->thresholds_lock);
Johannes Weiner05b84302014-08-06 16:05:59 -07003764
3765 if (type == _MEM) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003766 thresholds = &memcg->thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003767 usage = mem_cgroup_usage(memcg, false);
Johannes Weiner05b84302014-08-06 16:05:59 -07003768 } else if (type == _MEMSWAP) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003769 thresholds = &memcg->memsw_thresholds;
Johannes Weinerce00a962014-09-05 08:43:57 -04003770 usage = mem_cgroup_usage(memcg, true);
Johannes Weiner05b84302014-08-06 16:05:59 -07003771 } else
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003772 BUG();
3773
Anton Vorontsov371528c2012-02-24 05:14:46 +04003774 if (!thresholds->primary)
3775 goto unlock;
3776
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003777 /* Check if a threshold crossed before removing */
3778 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3779
3780 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003781 size = 0;
3782 for (i = 0; i < thresholds->primary->size; i++) {
3783 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003784 size++;
3785 }
3786
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003787 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003788
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003789 /* Set thresholds array to NULL if we don't have thresholds */
3790 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003791 kfree(new);
3792 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003793 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003794 }
3795
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003796 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003797
3798 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003799 new->current_threshold = -1;
3800 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3801 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003802 continue;
3803
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003804 new->entries[j] = thresholds->primary->entries[i];
Sha Zhengju748dad32012-05-29 15:06:57 -07003805 if (new->entries[j].threshold <= usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003806 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003807 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003808 * until rcu_assign_pointer(), so it's safe to increment
3809 * it here.
3810 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003811 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003812 }
3813 j++;
3814 }
3815
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003816swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003817 /* Swap primary and spare array */
3818 thresholds->spare = thresholds->primary;
Sha Zhengju8c757762012-05-10 13:01:45 -07003819 /* If all events are unregistered, free the spare array */
3820 if (!new) {
3821 kfree(thresholds->spare);
3822 thresholds->spare = NULL;
3823 }
3824
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003825 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003826
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07003827 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003828 synchronize_rcu();
Anton Vorontsov371528c2012-02-24 05:14:46 +04003829unlock:
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003830 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003831}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003832
Tejun Heo59b6f872013-11-22 18:20:43 -05003833static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003834 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003835{
Tejun Heo59b6f872013-11-22 18:20:43 -05003836 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
Tejun Heo347c4a82013-11-22 18:20:43 -05003837}
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003838
Tejun Heo59b6f872013-11-22 18:20:43 -05003839static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003840 struct eventfd_ctx *eventfd)
3841{
Tejun Heo59b6f872013-11-22 18:20:43 -05003842 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
Tejun Heo347c4a82013-11-22 18:20:43 -05003843}
3844
Tejun Heo59b6f872013-11-22 18:20:43 -05003845static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003846 struct eventfd_ctx *eventfd, const char *args)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003847{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003848 struct mem_cgroup_eventfd_list *event;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003849
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003850 event = kmalloc(sizeof(*event), GFP_KERNEL);
3851 if (!event)
3852 return -ENOMEM;
3853
Michal Hocko1af8efe2011-07-26 16:08:24 -07003854 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003855
3856 event->eventfd = eventfd;
3857 list_add(&event->list, &memcg->oom_notify);
3858
3859 /* already in OOM ? */
Michal Hocko79dfdac2011-07-26 16:08:23 -07003860 if (atomic_read(&memcg->under_oom))
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003861 eventfd_signal(eventfd, 1);
Michal Hocko1af8efe2011-07-26 16:08:24 -07003862 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003863
3864 return 0;
3865}
3866
Tejun Heo59b6f872013-11-22 18:20:43 -05003867static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
Tejun Heo347c4a82013-11-22 18:20:43 -05003868 struct eventfd_ctx *eventfd)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003869{
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003870 struct mem_cgroup_eventfd_list *ev, *tmp;
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003871
Michal Hocko1af8efe2011-07-26 16:08:24 -07003872 spin_lock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003873
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003874 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003875 if (ev->eventfd == eventfd) {
3876 list_del(&ev->list);
3877 kfree(ev);
3878 }
3879 }
3880
Michal Hocko1af8efe2011-07-26 16:08:24 -07003881 spin_unlock(&memcg_oom_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003882}
3883
Tejun Heo2da8ca82013-12-05 12:28:04 -05003884static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003885{
Tejun Heo2da8ca82013-12-05 12:28:04 -05003886 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003887
Tejun Heo791badb2013-12-05 12:28:02 -05003888 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3889 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom));
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003890 return 0;
3891}
3892
Tejun Heo182446d2013-08-08 20:11:24 -04003893static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003894 struct cftype *cft, u64 val)
3895{
Tejun Heo182446d2013-08-08 20:11:24 -04003896 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003897
3898 /* cannot set to root cgroup and only 0 and 1 are allowed */
Linus Torvalds14208b02014-06-09 15:03:33 -07003899 if (!css->parent || !((val == 0) || (val == 1)))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003900 return -EINVAL;
3901
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003902 memcg->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07003903 if (!val)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07003904 memcg_oom_recover(memcg);
Johannes Weiner3dae7fe2014-06-04 16:07:01 -07003905
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003906 return 0;
3907}
3908
Andrew Mortonc255a452012-07-31 16:43:02 -07003909#ifdef CONFIG_MEMCG_KMEM
Glauber Costacbe128e32012-04-09 19:36:34 -03003910static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costae5671df2011-12-11 21:47:01 +00003911{
Glauber Costa55007d82012-12-18 14:22:38 -08003912 int ret;
3913
Glauber Costa55007d82012-12-18 14:22:38 -08003914 ret = memcg_propagate_kmem(memcg);
3915 if (ret)
3916 return ret;
Glauber Costa2633d7a2012-12-18 14:22:34 -08003917
Glauber Costa1d62e432012-04-09 19:36:33 -03003918 return mem_cgroup_sockets_init(memcg, ss);
Michel Lespinasse573b4002013-04-29 15:08:13 -07003919}
Glauber Costae5671df2011-12-11 21:47:01 +00003920
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003921static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3922{
Vladimir Davydov2788cf02015-02-12 14:59:38 -08003923 struct cgroup_subsys_state *css;
3924 struct mem_cgroup *parent, *child;
3925 int kmemcg_id;
3926
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003927 if (!memcg->kmem_acct_active)
3928 return;
3929
3930 /*
3931 * Clear the 'active' flag before clearing memcg_caches arrays entries.
3932 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
3933 * guarantees no cache will be created for this cgroup after we are
3934 * done (see memcg_create_kmem_cache()).
3935 */
3936 memcg->kmem_acct_active = false;
3937
3938 memcg_deactivate_kmem_caches(memcg);
Vladimir Davydov2788cf02015-02-12 14:59:38 -08003939
3940 kmemcg_id = memcg->kmemcg_id;
3941 BUG_ON(kmemcg_id < 0);
3942
3943 parent = parent_mem_cgroup(memcg);
3944 if (!parent)
3945 parent = root_mem_cgroup;
3946
3947 /*
3948 * Change kmemcg_id of this cgroup and all its descendants to the
3949 * parent's id, and then move all entries from this cgroup's list_lrus
3950 * to ones of the parent. After we have finished, all list_lrus
3951 * corresponding to this cgroup are guaranteed to remain empty. The
3952 * ordering is imposed by list_lru_node->lock taken by
3953 * memcg_drain_all_list_lrus().
3954 */
3955 css_for_each_descendant_pre(css, &memcg->css) {
3956 child = mem_cgroup_from_css(css);
3957 BUG_ON(child->kmemcg_id != kmemcg_id);
3958 child->kmemcg_id = parent->kmemcg_id;
3959 if (!memcg->use_hierarchy)
3960 break;
3961 }
3962 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
3963
3964 memcg_free_cache_id(kmemcg_id);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003965}
3966
Li Zefan10d5ebf2013-07-08 16:00:33 -07003967static void memcg_destroy_kmem(struct mem_cgroup *memcg)
Glauber Costad1a4c0b2011-12-11 21:47:04 +00003968{
Vladimir Davydovf48b80a2015-02-12 14:59:56 -08003969 if (memcg->kmem_acct_activated) {
3970 memcg_destroy_kmem_caches(memcg);
3971 static_key_slow_dec(&memcg_kmem_enabled_key);
3972 WARN_ON(page_counter_read(&memcg->kmem));
3973 }
Glauber Costa1d62e432012-04-09 19:36:33 -03003974 mem_cgroup_sockets_destroy(memcg);
Li Zefan10d5ebf2013-07-08 16:00:33 -07003975}
Glauber Costae5671df2011-12-11 21:47:01 +00003976#else
Glauber Costacbe128e32012-04-09 19:36:34 -03003977static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costae5671df2011-12-11 21:47:01 +00003978{
3979 return 0;
3980}
Glauber Costad1a4c0b2011-12-11 21:47:04 +00003981
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08003982static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3983{
3984}
3985
Li Zefan10d5ebf2013-07-08 16:00:33 -07003986static void memcg_destroy_kmem(struct mem_cgroup *memcg)
3987{
3988}
Glauber Costae5671df2011-12-11 21:47:01 +00003989#endif
3990
Tejun Heo52ebea72015-05-22 17:13:37 -04003991#ifdef CONFIG_CGROUP_WRITEBACK
3992
3993struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3994{
3995 return &memcg->cgwb_list;
3996}
3997
Tejun Heo841710a2015-05-22 18:23:33 -04003998static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3999{
4000 return wb_domain_init(&memcg->cgwb_domain, gfp);
4001}
4002
4003static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4004{
4005 wb_domain_exit(&memcg->cgwb_domain);
4006}
4007
Tejun Heo2529bb32015-05-22 18:23:34 -04004008static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4009{
4010 wb_domain_size_changed(&memcg->cgwb_domain);
4011}
4012
Tejun Heo841710a2015-05-22 18:23:33 -04004013struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4014{
4015 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4016
4017 if (!memcg->css.parent)
4018 return NULL;
4019
4020 return &memcg->cgwb_domain;
4021}
4022
Tejun Heoc2aa7232015-05-22 18:23:35 -04004023/**
4024 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4025 * @wb: bdi_writeback in question
4026 * @pavail: out parameter for number of available pages
4027 * @pdirty: out parameter for number of dirty pages
4028 * @pwriteback: out parameter for number of pages under writeback
4029 *
4030 * Determine the numbers of available, dirty, and writeback pages in @wb's
4031 * memcg. Dirty and writeback are self-explanatory. Available is a bit
4032 * more involved.
4033 *
4034 * A memcg's headroom is "min(max, high) - used". The available memory is
4035 * calculated as the lowest headroom of itself and the ancestors plus the
4036 * number of pages already being used for file pages. Note that this
4037 * doesn't consider the actual amount of available memory in the system.
4038 * The caller should further cap *@pavail accordingly.
4039 */
4040void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail,
4041 unsigned long *pdirty, unsigned long *pwriteback)
4042{
4043 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4044 struct mem_cgroup *parent;
4045 unsigned long head_room = PAGE_COUNTER_MAX;
4046 unsigned long file_pages;
4047
4048 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
4049
4050 /* this should eventually include NR_UNSTABLE_NFS */
4051 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
4052
4053 file_pages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
4054 (1 << LRU_ACTIVE_FILE));
4055 while ((parent = parent_mem_cgroup(memcg))) {
4056 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
4057 unsigned long used = page_counter_read(&memcg->memory);
4058
4059 head_room = min(head_room, ceiling - min(ceiling, used));
4060 memcg = parent;
4061 }
4062
4063 *pavail = file_pages + head_room;
4064}
4065
Tejun Heo841710a2015-05-22 18:23:33 -04004066#else /* CONFIG_CGROUP_WRITEBACK */
4067
4068static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4069{
4070 return 0;
4071}
4072
4073static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4074{
4075}
4076
Tejun Heo2529bb32015-05-22 18:23:34 -04004077static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4078{
4079}
4080
Tejun Heo52ebea72015-05-22 17:13:37 -04004081#endif /* CONFIG_CGROUP_WRITEBACK */
4082
Tejun Heo79bd9812013-11-22 18:20:42 -05004083/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004084 * DO NOT USE IN NEW FILES.
4085 *
4086 * "cgroup.event_control" implementation.
4087 *
4088 * This is way over-engineered. It tries to support fully configurable
4089 * events for each user. Such level of flexibility is completely
4090 * unnecessary especially in the light of the planned unified hierarchy.
4091 *
4092 * Please deprecate this and replace with something simpler if at all
4093 * possible.
4094 */
4095
4096/*
Tejun Heo79bd9812013-11-22 18:20:42 -05004097 * Unregister event and free resources.
4098 *
4099 * Gets called from workqueue.
4100 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05004101static void memcg_event_remove(struct work_struct *work)
Tejun Heo79bd9812013-11-22 18:20:42 -05004102{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004103 struct mem_cgroup_event *event =
4104 container_of(work, struct mem_cgroup_event, remove);
Tejun Heo59b6f872013-11-22 18:20:43 -05004105 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004106
4107 remove_wait_queue(event->wqh, &event->wait);
4108
Tejun Heo59b6f872013-11-22 18:20:43 -05004109 event->unregister_event(memcg, event->eventfd);
Tejun Heo79bd9812013-11-22 18:20:42 -05004110
4111 /* Notify userspace the event is going away. */
4112 eventfd_signal(event->eventfd, 1);
4113
4114 eventfd_ctx_put(event->eventfd);
4115 kfree(event);
Tejun Heo59b6f872013-11-22 18:20:43 -05004116 css_put(&memcg->css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004117}
4118
4119/*
4120 * Gets called on POLLHUP on eventfd when user closes it.
4121 *
4122 * Called with wqh->lock held and interrupts disabled.
4123 */
Tejun Heo3bc942f2013-11-22 18:20:44 -05004124static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
4125 int sync, void *key)
Tejun Heo79bd9812013-11-22 18:20:42 -05004126{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004127 struct mem_cgroup_event *event =
4128 container_of(wait, struct mem_cgroup_event, wait);
Tejun Heo59b6f872013-11-22 18:20:43 -05004129 struct mem_cgroup *memcg = event->memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004130 unsigned long flags = (unsigned long)key;
4131
4132 if (flags & POLLHUP) {
4133 /*
4134 * If the event has been detached at cgroup removal, we
4135 * can simply return knowing the other side will cleanup
4136 * for us.
4137 *
4138 * We can't race against event freeing since the other
4139 * side will require wqh->lock via remove_wait_queue(),
4140 * which we hold.
4141 */
Tejun Heofba94802013-11-22 18:20:43 -05004142 spin_lock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004143 if (!list_empty(&event->list)) {
4144 list_del_init(&event->list);
4145 /*
4146 * We are in atomic context, but cgroup_event_remove()
4147 * may sleep, so we have to call it in workqueue.
4148 */
4149 schedule_work(&event->remove);
4150 }
Tejun Heofba94802013-11-22 18:20:43 -05004151 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004152 }
4153
4154 return 0;
4155}
4156
Tejun Heo3bc942f2013-11-22 18:20:44 -05004157static void memcg_event_ptable_queue_proc(struct file *file,
Tejun Heo79bd9812013-11-22 18:20:42 -05004158 wait_queue_head_t *wqh, poll_table *pt)
4159{
Tejun Heo3bc942f2013-11-22 18:20:44 -05004160 struct mem_cgroup_event *event =
4161 container_of(pt, struct mem_cgroup_event, pt);
Tejun Heo79bd9812013-11-22 18:20:42 -05004162
4163 event->wqh = wqh;
4164 add_wait_queue(wqh, &event->wait);
4165}
4166
4167/*
Tejun Heo3bc942f2013-11-22 18:20:44 -05004168 * DO NOT USE IN NEW FILES.
4169 *
Tejun Heo79bd9812013-11-22 18:20:42 -05004170 * Parse input and register new cgroup event handler.
4171 *
4172 * Input must be in format '<event_fd> <control_fd> <args>'.
4173 * Interpretation of args is defined by control file implementation.
4174 */
Tejun Heo451af502014-05-13 12:16:21 -04004175static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4176 char *buf, size_t nbytes, loff_t off)
Tejun Heo79bd9812013-11-22 18:20:42 -05004177{
Tejun Heo451af502014-05-13 12:16:21 -04004178 struct cgroup_subsys_state *css = of_css(of);
Tejun Heofba94802013-11-22 18:20:43 -05004179 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004180 struct mem_cgroup_event *event;
Tejun Heo79bd9812013-11-22 18:20:42 -05004181 struct cgroup_subsys_state *cfile_css;
4182 unsigned int efd, cfd;
4183 struct fd efile;
4184 struct fd cfile;
Tejun Heofba94802013-11-22 18:20:43 -05004185 const char *name;
Tejun Heo79bd9812013-11-22 18:20:42 -05004186 char *endp;
4187 int ret;
4188
Tejun Heo451af502014-05-13 12:16:21 -04004189 buf = strstrip(buf);
4190
4191 efd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004192 if (*endp != ' ')
4193 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004194 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004195
Tejun Heo451af502014-05-13 12:16:21 -04004196 cfd = simple_strtoul(buf, &endp, 10);
Tejun Heo79bd9812013-11-22 18:20:42 -05004197 if ((*endp != ' ') && (*endp != '\0'))
4198 return -EINVAL;
Tejun Heo451af502014-05-13 12:16:21 -04004199 buf = endp + 1;
Tejun Heo79bd9812013-11-22 18:20:42 -05004200
4201 event = kzalloc(sizeof(*event), GFP_KERNEL);
4202 if (!event)
4203 return -ENOMEM;
4204
Tejun Heo59b6f872013-11-22 18:20:43 -05004205 event->memcg = memcg;
Tejun Heo79bd9812013-11-22 18:20:42 -05004206 INIT_LIST_HEAD(&event->list);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004207 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4208 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4209 INIT_WORK(&event->remove, memcg_event_remove);
Tejun Heo79bd9812013-11-22 18:20:42 -05004210
4211 efile = fdget(efd);
4212 if (!efile.file) {
4213 ret = -EBADF;
4214 goto out_kfree;
4215 }
4216
4217 event->eventfd = eventfd_ctx_fileget(efile.file);
4218 if (IS_ERR(event->eventfd)) {
4219 ret = PTR_ERR(event->eventfd);
4220 goto out_put_efile;
4221 }
4222
4223 cfile = fdget(cfd);
4224 if (!cfile.file) {
4225 ret = -EBADF;
4226 goto out_put_eventfd;
4227 }
4228
4229 /* the process need read permission on control file */
4230 /* AV: shouldn't we check that it's been opened for read instead? */
4231 ret = inode_permission(file_inode(cfile.file), MAY_READ);
4232 if (ret < 0)
4233 goto out_put_cfile;
4234
Tejun Heo79bd9812013-11-22 18:20:42 -05004235 /*
Tejun Heofba94802013-11-22 18:20:43 -05004236 * Determine the event callbacks and set them in @event. This used
4237 * to be done via struct cftype but cgroup core no longer knows
4238 * about these events. The following is crude but the whole thing
4239 * is for compatibility anyway.
Tejun Heo3bc942f2013-11-22 18:20:44 -05004240 *
4241 * DO NOT ADD NEW FILES.
Tejun Heofba94802013-11-22 18:20:43 -05004242 */
Al Virob5830432014-10-31 01:22:04 -04004243 name = cfile.file->f_path.dentry->d_name.name;
Tejun Heofba94802013-11-22 18:20:43 -05004244
4245 if (!strcmp(name, "memory.usage_in_bytes")) {
4246 event->register_event = mem_cgroup_usage_register_event;
4247 event->unregister_event = mem_cgroup_usage_unregister_event;
4248 } else if (!strcmp(name, "memory.oom_control")) {
4249 event->register_event = mem_cgroup_oom_register_event;
4250 event->unregister_event = mem_cgroup_oom_unregister_event;
4251 } else if (!strcmp(name, "memory.pressure_level")) {
4252 event->register_event = vmpressure_register_event;
4253 event->unregister_event = vmpressure_unregister_event;
4254 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
Tejun Heo347c4a82013-11-22 18:20:43 -05004255 event->register_event = memsw_cgroup_usage_register_event;
4256 event->unregister_event = memsw_cgroup_usage_unregister_event;
Tejun Heofba94802013-11-22 18:20:43 -05004257 } else {
4258 ret = -EINVAL;
4259 goto out_put_cfile;
4260 }
4261
4262 /*
Tejun Heob5557c42013-11-22 18:20:42 -05004263 * Verify @cfile should belong to @css. Also, remaining events are
4264 * automatically removed on cgroup destruction but the removal is
4265 * asynchronous, so take an extra ref on @css.
Tejun Heo79bd9812013-11-22 18:20:42 -05004266 */
Al Virob5830432014-10-31 01:22:04 -04004267 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
Tejun Heoec903c02014-05-13 12:11:01 -04004268 &memory_cgrp_subsys);
Tejun Heo79bd9812013-11-22 18:20:42 -05004269 ret = -EINVAL;
Tejun Heo5a17f542014-02-11 11:52:47 -05004270 if (IS_ERR(cfile_css))
Tejun Heo79bd9812013-11-22 18:20:42 -05004271 goto out_put_cfile;
Tejun Heo5a17f542014-02-11 11:52:47 -05004272 if (cfile_css != css) {
4273 css_put(cfile_css);
4274 goto out_put_cfile;
4275 }
Tejun Heo79bd9812013-11-22 18:20:42 -05004276
Tejun Heo451af502014-05-13 12:16:21 -04004277 ret = event->register_event(memcg, event->eventfd, buf);
Tejun Heo79bd9812013-11-22 18:20:42 -05004278 if (ret)
4279 goto out_put_css;
4280
4281 efile.file->f_op->poll(efile.file, &event->pt);
4282
Tejun Heofba94802013-11-22 18:20:43 -05004283 spin_lock(&memcg->event_list_lock);
4284 list_add(&event->list, &memcg->event_list);
4285 spin_unlock(&memcg->event_list_lock);
Tejun Heo79bd9812013-11-22 18:20:42 -05004286
4287 fdput(cfile);
4288 fdput(efile);
4289
Tejun Heo451af502014-05-13 12:16:21 -04004290 return nbytes;
Tejun Heo79bd9812013-11-22 18:20:42 -05004291
4292out_put_css:
Tejun Heob5557c42013-11-22 18:20:42 -05004293 css_put(css);
Tejun Heo79bd9812013-11-22 18:20:42 -05004294out_put_cfile:
4295 fdput(cfile);
4296out_put_eventfd:
4297 eventfd_ctx_put(event->eventfd);
4298out_put_efile:
4299 fdput(efile);
4300out_kfree:
4301 kfree(event);
4302
4303 return ret;
4304}
4305
Johannes Weiner241994ed2015-02-11 15:26:06 -08004306static struct cftype mem_cgroup_legacy_files[] = {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004307 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004308 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004309 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004310 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004311 },
4312 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004313 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004314 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004315 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004316 .read_u64 = mem_cgroup_read_u64,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004317 },
4318 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004319 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004320 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004321 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004322 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004323 },
4324 {
Balbir Singh296c81d2009-09-23 15:56:36 -07004325 .name = "soft_limit_in_bytes",
4326 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004327 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004328 .read_u64 = mem_cgroup_read_u64,
Balbir Singh296c81d2009-09-23 15:56:36 -07004329 },
4330 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004331 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004332 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004333 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004334 .read_u64 = mem_cgroup_read_u64,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004335 },
Balbir Singh8697d332008-02-07 00:13:59 -08004336 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004337 .name = "stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004338 .seq_show = memcg_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004339 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004340 {
4341 .name = "force_empty",
Tejun Heo6770c642014-05-13 12:16:21 -04004342 .write = mem_cgroup_force_empty_write,
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004343 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08004344 {
4345 .name = "use_hierarchy",
4346 .write_u64 = mem_cgroup_hierarchy_write,
4347 .read_u64 = mem_cgroup_hierarchy_read,
4348 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004349 {
Tejun Heo3bc942f2013-11-22 18:20:44 -05004350 .name = "cgroup.event_control", /* XXX: for compat */
Tejun Heo451af502014-05-13 12:16:21 -04004351 .write = memcg_write_event_control,
Tejun Heo79bd9812013-11-22 18:20:42 -05004352 .flags = CFTYPE_NO_PREFIX,
4353 .mode = S_IWUGO,
4354 },
4355 {
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004356 .name = "swappiness",
4357 .read_u64 = mem_cgroup_swappiness_read,
4358 .write_u64 = mem_cgroup_swappiness_write,
4359 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004360 {
4361 .name = "move_charge_at_immigrate",
4362 .read_u64 = mem_cgroup_move_charge_read,
4363 .write_u64 = mem_cgroup_move_charge_write,
4364 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004365 {
4366 .name = "oom_control",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004367 .seq_show = mem_cgroup_oom_control_read,
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004368 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004369 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4370 },
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004371 {
4372 .name = "pressure_level",
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004373 },
Ying Han406eb0c2011-05-26 16:25:37 -07004374#ifdef CONFIG_NUMA
4375 {
4376 .name = "numa_stat",
Tejun Heo2da8ca82013-12-05 12:28:04 -05004377 .seq_show = memcg_numa_stat_show,
Ying Han406eb0c2011-05-26 16:25:37 -07004378 },
4379#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08004380#ifdef CONFIG_MEMCG_KMEM
4381 {
4382 .name = "kmem.limit_in_bytes",
4383 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
Tejun Heo451af502014-05-13 12:16:21 -04004384 .write = mem_cgroup_write,
Tejun Heo791badb2013-12-05 12:28:02 -05004385 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004386 },
4387 {
4388 .name = "kmem.usage_in_bytes",
4389 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
Tejun Heo791badb2013-12-05 12:28:02 -05004390 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004391 },
4392 {
4393 .name = "kmem.failcnt",
4394 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
Tejun Heo6770c642014-05-13 12:16:21 -04004395 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004396 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004397 },
4398 {
4399 .name = "kmem.max_usage_in_bytes",
4400 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
Tejun Heo6770c642014-05-13 12:16:21 -04004401 .write = mem_cgroup_reset,
Tejun Heo791badb2013-12-05 12:28:02 -05004402 .read_u64 = mem_cgroup_read_u64,
Glauber Costa510fc4e2012-12-18 14:21:47 -08004403 },
Glauber Costa749c5412012-12-18 14:23:01 -08004404#ifdef CONFIG_SLABINFO
4405 {
4406 .name = "kmem.slabinfo",
Vladimir Davydovb0475012014-12-10 15:44:19 -08004407 .seq_start = slab_start,
4408 .seq_next = slab_next,
4409 .seq_stop = slab_stop,
4410 .seq_show = memcg_slab_show,
Glauber Costa749c5412012-12-18 14:23:01 -08004411 },
4412#endif
Glauber Costa510fc4e2012-12-18 14:21:47 -08004413#endif
Tejun Heo6bc10342012-04-01 12:09:55 -07004414 { }, /* terminate */
Tejun Heoaf36f902012-04-01 12:09:55 -07004415};
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004416
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004417static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004418{
4419 struct mem_cgroup_per_node *pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004420 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004421 int zone, tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004422 /*
4423 * This routine is called against possible nodes.
4424 * But it's BUG to call kmalloc() against offline node.
4425 *
4426 * TODO: this routine can waste much memory for nodes which will
4427 * never be onlined. It's better to use memory hotplug callback
4428 * function.
4429 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004430 if (!node_state(node, N_NORMAL_MEMORY))
4431 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004432 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004433 if (!pn)
4434 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004435
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004436 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4437 mz = &pn->zoneinfo[zone];
Hugh Dickinsbea8c152012-11-16 14:14:54 -08004438 lruvec_init(&mz->lruvec);
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07004439 mz->usage_in_excess = 0;
4440 mz->on_tree = false;
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004441 mz->memcg = memcg;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004442 }
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004443 memcg->nodeinfo[node] = pn;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004444 return 0;
4445}
4446
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004447static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004448{
Johannes Weiner54f72fe2013-07-08 15:59:49 -07004449 kfree(memcg->nodeinfo[node]);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004450}
4451
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004452static struct mem_cgroup *mem_cgroup_alloc(void)
4453{
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004454 struct mem_cgroup *memcg;
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004455 size_t size;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004456
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004457 size = sizeof(struct mem_cgroup);
4458 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004459
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004460 memcg = kzalloc(size, GFP_KERNEL);
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004461 if (!memcg)
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004462 return NULL;
4463
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004464 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4465 if (!memcg->stat)
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004466 goto out_free;
Tejun Heo841710a2015-05-22 18:23:33 -04004467
4468 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4469 goto out_free_stat;
4470
Hugh Dickinsd79154b2012-03-21 16:34:18 -07004471 spin_lock_init(&memcg->pcp_counter_lock);
4472 return memcg;
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004473
Tejun Heo841710a2015-05-22 18:23:33 -04004474out_free_stat:
4475 free_percpu(memcg->stat);
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004476out_free:
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004477 kfree(memcg);
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004478 return NULL;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004479}
4480
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004481/*
Glauber Costac8b2a362012-12-18 14:22:13 -08004482 * At destroying mem_cgroup, references from swap_cgroup can remain.
4483 * (scanning all at force_empty is too costly...)
4484 *
4485 * Instead of clearing all references at force_empty, we remember
4486 * the number of reference from swap_cgroup and free mem_cgroup when
4487 * it goes down to 0.
4488 *
4489 * Removal of cgroup itself succeeds regardless of refs from swap.
Hugh Dickins59927fb2012-03-15 15:17:07 -07004490 */
Glauber Costac8b2a362012-12-18 14:22:13 -08004491
4492static void __mem_cgroup_free(struct mem_cgroup *memcg)
Hugh Dickins59927fb2012-03-15 15:17:07 -07004493{
Glauber Costac8b2a362012-12-18 14:22:13 -08004494 int node;
Hugh Dickins59927fb2012-03-15 15:17:07 -07004495
Andrew Mortonbb4cc1a82013-09-24 15:27:40 -07004496 mem_cgroup_remove_from_trees(memcg);
Glauber Costac8b2a362012-12-18 14:22:13 -08004497
4498 for_each_node(node)
4499 free_mem_cgroup_per_zone_info(memcg, node);
4500
4501 free_percpu(memcg->stat);
Tejun Heo841710a2015-05-22 18:23:33 -04004502 memcg_wb_domain_exit(memcg);
Vladimir Davydov8ff69e22014-01-23 15:52:52 -08004503 kfree(memcg);
Hugh Dickins59927fb2012-03-15 15:17:07 -07004504}
Glauber Costa3afe36b2012-05-29 15:07:10 -07004505
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004506/*
4507 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4508 */
Glauber Costae1aab162011-12-11 21:47:03 +00004509struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004510{
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004511 if (!memcg->memory.parent)
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004512 return NULL;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004513 return mem_cgroup_from_counter(memcg->memory.parent, memory);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004514}
Glauber Costae1aab162011-12-11 21:47:03 +00004515EXPORT_SYMBOL(parent_mem_cgroup);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004516
Li Zefan0eb253e2009-01-15 13:51:25 -08004517static struct cgroup_subsys_state * __ref
Tejun Heoeb954192013-08-08 20:11:23 -04004518mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004519{
Glauber Costad142e3e2013-02-22 16:34:52 -08004520 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004521 long error = -ENOMEM;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004522 int node;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004523
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004524 memcg = mem_cgroup_alloc();
4525 if (!memcg)
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004526 return ERR_PTR(error);
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004527
Bob Liu3ed28fa2012-01-12 17:19:04 -08004528 for_each_node(node)
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07004529 if (alloc_mem_cgroup_per_zone_info(memcg, node))
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004530 goto free_out;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004531
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004532 /* root ? */
Tejun Heoeb954192013-08-08 20:11:23 -04004533 if (parent_css == NULL) {
Hillf Dantona41c58a2011-12-19 17:11:57 -08004534 root_mem_cgroup = memcg;
Tejun Heo56161632015-05-22 17:13:20 -04004535 mem_cgroup_root_css = &memcg->css;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004536 page_counter_init(&memcg->memory, NULL);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004537 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004538 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004539 page_counter_init(&memcg->memsw, NULL);
4540 page_counter_init(&memcg->kmem, NULL);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004541 }
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004542
Glauber Costad142e3e2013-02-22 16:34:52 -08004543 memcg->last_scanned_node = MAX_NUMNODES;
4544 INIT_LIST_HEAD(&memcg->oom_notify);
Glauber Costad142e3e2013-02-22 16:34:52 -08004545 memcg->move_charge_at_immigrate = 0;
4546 mutex_init(&memcg->thresholds_lock);
4547 spin_lock_init(&memcg->move_lock);
Anton Vorontsov70ddf632013-04-29 15:08:31 -07004548 vmpressure_init(&memcg->vmpressure);
Tejun Heofba94802013-11-22 18:20:43 -05004549 INIT_LIST_HEAD(&memcg->event_list);
4550 spin_lock_init(&memcg->event_list_lock);
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004551#ifdef CONFIG_MEMCG_KMEM
4552 memcg->kmemcg_id = -1;
Vladimir Davydov900a38f2014-12-12 16:55:10 -08004553#endif
Tejun Heo52ebea72015-05-22 17:13:37 -04004554#ifdef CONFIG_CGROUP_WRITEBACK
4555 INIT_LIST_HEAD(&memcg->cgwb_list);
4556#endif
Glauber Costad142e3e2013-02-22 16:34:52 -08004557 return &memcg->css;
4558
4559free_out:
4560 __mem_cgroup_free(memcg);
4561 return ERR_PTR(error);
4562}
4563
4564static int
Tejun Heoeb954192013-08-08 20:11:23 -04004565mem_cgroup_css_online(struct cgroup_subsys_state *css)
Glauber Costad142e3e2013-02-22 16:34:52 -08004566{
Tejun Heoeb954192013-08-08 20:11:23 -04004567 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo5c9d5352014-05-16 13:22:48 -04004568 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004569 int ret;
Glauber Costad142e3e2013-02-22 16:34:52 -08004570
Tejun Heo15a4c832014-05-04 15:09:14 -04004571 if (css->id > MEM_CGROUP_ID_MAX)
Li Zefan4219b2d2013-09-23 16:56:29 +08004572 return -ENOSPC;
4573
Tejun Heo63876982013-08-08 20:11:23 -04004574 if (!parent)
Glauber Costad142e3e2013-02-22 16:34:52 -08004575 return 0;
4576
Glauber Costa09998212013-02-22 16:34:55 -08004577 mutex_lock(&memcg_create_mutex);
Glauber Costad142e3e2013-02-22 16:34:52 -08004578
4579 memcg->use_hierarchy = parent->use_hierarchy;
4580 memcg->oom_kill_disable = parent->oom_kill_disable;
4581 memcg->swappiness = mem_cgroup_swappiness(parent);
4582
4583 if (parent->use_hierarchy) {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004584 page_counter_init(&memcg->memory, &parent->memory);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004585 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004586 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004587 page_counter_init(&memcg->memsw, &parent->memsw);
4588 page_counter_init(&memcg->kmem, &parent->kmem);
Glauber Costa55007d82012-12-18 14:22:38 -08004589
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004590 /*
Li Zefan8d76a972013-07-08 16:00:36 -07004591 * No need to take a reference to the parent because cgroup
4592 * core guarantees its existence.
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004593 */
Balbir Singh18f59ea2009-01-07 18:08:07 -08004594 } else {
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004595 page_counter_init(&memcg->memory, NULL);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004596 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004597 memcg->soft_limit = PAGE_COUNTER_MAX;
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004598 page_counter_init(&memcg->memsw, NULL);
4599 page_counter_init(&memcg->kmem, NULL);
Tejun Heo8c7f6ed2012-09-13 12:20:58 -07004600 /*
4601 * Deeper hierachy with use_hierarchy == false doesn't make
4602 * much sense so let cgroup subsystem know about this
4603 * unfortunate state in our controller.
4604 */
Glauber Costad142e3e2013-02-22 16:34:52 -08004605 if (parent != root_mem_cgroup)
Tejun Heo073219e2014-02-08 10:36:58 -05004606 memory_cgrp_subsys.broken_hierarchy = true;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004607 }
Glauber Costa09998212013-02-22 16:34:55 -08004608 mutex_unlock(&memcg_create_mutex);
Vladimir Davydovd6441632014-01-23 15:53:09 -08004609
Johannes Weiner2f7dd7a2014-10-02 16:16:57 -07004610 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
4611 if (ret)
4612 return ret;
4613
4614 /*
4615 * Make sure the memcg is initialized: mem_cgroup_iter()
4616 * orders reading memcg->initialized against its callers
4617 * reading the memcg members.
4618 */
4619 smp_store_release(&memcg->initialized, 1);
4620
4621 return 0;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004622}
4623
Tejun Heoeb954192013-08-08 20:11:23 -04004624static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004625{
Tejun Heoeb954192013-08-08 20:11:23 -04004626 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Tejun Heo3bc942f2013-11-22 18:20:44 -05004627 struct mem_cgroup_event *event, *tmp;
Tejun Heo79bd9812013-11-22 18:20:42 -05004628
4629 /*
4630 * Unregister events and notify userspace.
4631 * Notify userspace about cgroup removing only after rmdir of cgroup
4632 * directory to avoid race between userspace and kernelspace.
4633 */
Tejun Heofba94802013-11-22 18:20:43 -05004634 spin_lock(&memcg->event_list_lock);
4635 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
Tejun Heo79bd9812013-11-22 18:20:42 -05004636 list_del_init(&event->list);
4637 schedule_work(&event->remove);
4638 }
Tejun Heofba94802013-11-22 18:20:43 -05004639 spin_unlock(&memcg->event_list_lock);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004640
Michal Hocko33cb8762013-07-31 13:53:51 -07004641 vmpressure_cleanup(&memcg->vmpressure);
Vladimir Davydov2a4db7e2015-02-12 14:59:32 -08004642
4643 memcg_deactivate_kmem(memcg);
Tejun Heo52ebea72015-05-22 17:13:37 -04004644
4645 wb_memcg_offline(memcg);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004646}
4647
Tejun Heoeb954192013-08-08 20:11:23 -04004648static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004649{
Tejun Heoeb954192013-08-08 20:11:23 -04004650 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004651
Li Zefan10d5ebf2013-07-08 16:00:33 -07004652 memcg_destroy_kmem(memcg);
Li Zefan465939a2013-07-08 16:00:38 -07004653 __mem_cgroup_free(memcg);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004654}
4655
Tejun Heo1ced9532014-07-08 18:02:57 -04004656/**
4657 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4658 * @css: the target css
4659 *
4660 * Reset the states of the mem_cgroup associated with @css. This is
4661 * invoked when the userland requests disabling on the default hierarchy
4662 * but the memcg is pinned through dependency. The memcg should stop
4663 * applying policies and should revert to the vanilla state as it may be
4664 * made visible again.
4665 *
4666 * The current implementation only resets the essential configurations.
4667 * This needs to be expanded to cover all the visible parts.
4668 */
4669static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4670{
4671 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4672
Johannes Weiner3e32cb22014-12-10 15:42:31 -08004673 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4674 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4675 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
Johannes Weiner241994ed2015-02-11 15:26:06 -08004676 memcg->low = 0;
4677 memcg->high = PAGE_COUNTER_MAX;
Johannes Weiner24d404d2015-01-08 14:32:35 -08004678 memcg->soft_limit = PAGE_COUNTER_MAX;
Tejun Heo2529bb32015-05-22 18:23:34 -04004679 memcg_wb_domain_size_changed(memcg);
Tejun Heo1ced9532014-07-08 18:02:57 -04004680}
4681
Daisuke Nishimura02491442010-03-10 15:22:17 -08004682#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004683/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004684static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004685{
Johannes Weiner05b84302014-08-06 16:05:59 -07004686 int ret;
Johannes Weiner9476db92014-08-06 16:05:55 -07004687
4688 /* Try a single bulk charge without reclaim first */
Johannes Weiner00501b52014-08-08 14:19:20 -07004689 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
Johannes Weiner9476db92014-08-06 16:05:55 -07004690 if (!ret) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004691 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004692 return ret;
4693 }
Johannes Weiner692e7c42014-08-06 16:05:57 -07004694 if (ret == -EINTR) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004695 cancel_charge(root_mem_cgroup, count);
Johannes Weiner692e7c42014-08-06 16:05:57 -07004696 return ret;
4697 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004698
4699 /* Try charges one by one with reclaim */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004700 while (count--) {
Johannes Weiner00501b52014-08-08 14:19:20 -07004701 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
Johannes Weiner9476db92014-08-06 16:05:55 -07004702 /*
4703 * In case of failure, any residual charges against
4704 * mc.to will be dropped by mem_cgroup_clear_mc()
Johannes Weiner692e7c42014-08-06 16:05:57 -07004705 * later on. However, cancel any charges that are
4706 * bypassed to root right away or they'll be lost.
Johannes Weiner9476db92014-08-06 16:05:55 -07004707 */
Johannes Weiner692e7c42014-08-06 16:05:57 -07004708 if (ret == -EINTR)
Johannes Weiner00501b52014-08-08 14:19:20 -07004709 cancel_charge(root_mem_cgroup, 1);
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004710 if (ret)
KAMEZAWA Hiroyuki38c5d722012-01-12 17:19:01 -08004711 return ret;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004712 mc.precharge++;
Johannes Weiner9476db92014-08-06 16:05:55 -07004713 cond_resched();
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004714 }
Johannes Weiner9476db92014-08-06 16:05:55 -07004715 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004716}
4717
4718/**
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004719 * get_mctgt_type - get target type of moving charge
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004720 * @vma: the vma the pte to be checked belongs
4721 * @addr: the address corresponding to the pte to be checked
4722 * @ptent: the pte to be checked
Daisuke Nishimura02491442010-03-10 15:22:17 -08004723 * @target: the pointer the target page or swap ent will be stored(can be NULL)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004724 *
4725 * Returns
4726 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4727 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4728 * move charge. if @target is not NULL, the page is stored in target->page
4729 * with extra refcnt got(Callers should handle it).
Daisuke Nishimura02491442010-03-10 15:22:17 -08004730 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4731 * target for charge migration. if @target is not NULL, the entry is stored
4732 * in target->ent.
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004733 *
4734 * Called with pte lock held.
4735 */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004736union mc_target {
4737 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004738 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004739};
4740
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004741enum mc_target_type {
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004742 MC_TARGET_NONE = 0,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004743 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08004744 MC_TARGET_SWAP,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004745};
4746
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004747static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4748 unsigned long addr, pte_t ptent)
4749{
4750 struct page *page = vm_normal_page(vma, addr, ptent);
4751
4752 if (!page || !page_mapped(page))
4753 return NULL;
4754 if (PageAnon(page)) {
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004755 if (!(mc.flags & MOVE_ANON))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004756 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004757 } else {
4758 if (!(mc.flags & MOVE_FILE))
4759 return NULL;
4760 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004761 if (!get_page_unless_zero(page))
4762 return NULL;
4763
4764 return page;
4765}
4766
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004767#ifdef CONFIG_SWAP
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004768static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4769 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4770{
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004771 struct page *page = NULL;
4772 swp_entry_t ent = pte_to_swp_entry(ptent);
4773
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004774 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004775 return NULL;
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004776 /*
4777 * Because lookup_swap_cache() updates some statistics counter,
4778 * we call find_get_page() with swapper_space directly.
4779 */
Shaohua Li33806f02013-02-22 16:34:37 -08004780 page = find_get_page(swap_address_space(ent), ent.val);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004781 if (do_swap_account)
4782 entry->val = ent.val;
4783
4784 return page;
4785}
KAMEZAWA Hiroyuki4b913552012-05-29 15:06:51 -07004786#else
4787static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4788 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4789{
4790 return NULL;
4791}
4792#endif
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004793
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004794static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4795 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4796{
4797 struct page *page = NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004798 struct address_space *mapping;
4799 pgoff_t pgoff;
4800
4801 if (!vma->vm_file) /* anonymous vma */
4802 return NULL;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004803 if (!(mc.flags & MOVE_FILE))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004804 return NULL;
4805
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004806 mapping = vma->vm_file->f_mapping;
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004807 pgoff = linear_page_index(vma, addr);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004808
4809 /* page is moved even if it's not RSS of this task(page-faulted). */
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004810#ifdef CONFIG_SWAP
4811 /* shmem/tmpfs may report page out on swap: account for that too. */
Johannes Weiner139b6a62014-05-06 12:50:05 -07004812 if (shmem_mapping(mapping)) {
4813 page = find_get_entry(mapping, pgoff);
4814 if (radix_tree_exceptional_entry(page)) {
4815 swp_entry_t swp = radix_to_swp_entry(page);
4816 if (do_swap_account)
4817 *entry = swp;
4818 page = find_get_page(swap_address_space(swp), swp.val);
4819 }
4820 } else
4821 page = find_get_page(mapping, pgoff);
4822#else
4823 page = find_get_page(mapping, pgoff);
Hugh Dickinsaa3b1892011-08-03 16:21:24 -07004824#endif
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004825 return page;
4826}
4827
Chen Gangb1b0dea2015-04-14 15:47:35 -07004828/**
4829 * mem_cgroup_move_account - move account of the page
4830 * @page: the page
4831 * @nr_pages: number of regular pages (>1 for huge pages)
4832 * @from: mem_cgroup which the page is moved from.
4833 * @to: mem_cgroup which the page is moved to. @from != @to.
4834 *
4835 * The caller must confirm following.
4836 * - page is not on LRU (isolate_page() is useful.)
4837 * - compound_lock is held when nr_pages > 1
4838 *
4839 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4840 * from old cgroup.
4841 */
4842static int mem_cgroup_move_account(struct page *page,
4843 unsigned int nr_pages,
4844 struct mem_cgroup *from,
4845 struct mem_cgroup *to)
4846{
4847 unsigned long flags;
4848 int ret;
Greg Thelenc4843a72015-05-22 17:13:16 -04004849 bool anon;
Chen Gangb1b0dea2015-04-14 15:47:35 -07004850
4851 VM_BUG_ON(from == to);
4852 VM_BUG_ON_PAGE(PageLRU(page), page);
4853 /*
4854 * The page is isolated from LRU. So, collapse function
4855 * will not handle this page. But page splitting can happen.
4856 * Do this check under compound_page_lock(). The caller should
4857 * hold it.
4858 */
4859 ret = -EBUSY;
4860 if (nr_pages > 1 && !PageTransHuge(page))
4861 goto out;
4862
4863 /*
4864 * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
4865 * of its source page while we change it: page migration takes
4866 * both pages off the LRU, but page cache replacement doesn't.
4867 */
4868 if (!trylock_page(page))
4869 goto out;
4870
4871 ret = -EINVAL;
4872 if (page->mem_cgroup != from)
4873 goto out_unlock;
4874
Greg Thelenc4843a72015-05-22 17:13:16 -04004875 anon = PageAnon(page);
4876
Chen Gangb1b0dea2015-04-14 15:47:35 -07004877 spin_lock_irqsave(&from->move_lock, flags);
4878
Greg Thelenc4843a72015-05-22 17:13:16 -04004879 if (!anon && page_mapped(page)) {
Chen Gangb1b0dea2015-04-14 15:47:35 -07004880 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4881 nr_pages);
4882 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4883 nr_pages);
4884 }
4885
Greg Thelenc4843a72015-05-22 17:13:16 -04004886 /*
4887 * move_lock grabbed above and caller set from->moving_account, so
4888 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4889 * So mapping should be stable for dirty pages.
4890 */
4891 if (!anon && PageDirty(page)) {
4892 struct address_space *mapping = page_mapping(page);
4893
4894 if (mapping_cap_account_dirty(mapping)) {
4895 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4896 nr_pages);
4897 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4898 nr_pages);
4899 }
4900 }
4901
Chen Gangb1b0dea2015-04-14 15:47:35 -07004902 if (PageWriteback(page)) {
4903 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4904 nr_pages);
4905 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4906 nr_pages);
4907 }
4908
4909 /*
4910 * It is safe to change page->mem_cgroup here because the page
4911 * is referenced, charged, and isolated - we can't race with
4912 * uncharging, charging, migration, or LRU putback.
4913 */
4914
4915 /* caller should have done css_get */
4916 page->mem_cgroup = to;
4917 spin_unlock_irqrestore(&from->move_lock, flags);
4918
4919 ret = 0;
4920
4921 local_irq_disable();
4922 mem_cgroup_charge_statistics(to, page, nr_pages);
4923 memcg_check_events(to, page);
4924 mem_cgroup_charge_statistics(from, page, -nr_pages);
4925 memcg_check_events(from, page);
4926 local_irq_enable();
4927out_unlock:
4928 unlock_page(page);
4929out:
4930 return ret;
4931}
4932
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004933static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004934 unsigned long addr, pte_t ptent, union mc_target *target)
4935{
Daisuke Nishimura02491442010-03-10 15:22:17 -08004936 struct page *page = NULL;
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004937 enum mc_target_type ret = MC_TARGET_NONE;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004938 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004939
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004940 if (pte_present(ptent))
4941 page = mc_handle_present_pte(vma, addr, ptent);
4942 else if (is_swap_pte(ptent))
4943 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
Kirill A. Shutemov0661a332015-02-10 14:10:04 -08004944 else if (pte_none(ptent))
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004945 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004946
4947 if (!page && !ent.val)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07004948 return ret;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004949 if (page) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004950 /*
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004951 * Do only loose check w/o serialization.
Johannes Weiner1306a852014-12-10 15:44:52 -08004952 * mem_cgroup_move_account() checks the page is valid or
Johannes Weiner0a31bc92014-08-08 14:19:22 -07004953 * not under LRU exclusion.
Daisuke Nishimura02491442010-03-10 15:22:17 -08004954 */
Johannes Weiner1306a852014-12-10 15:44:52 -08004955 if (page->mem_cgroup == mc.from) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004956 ret = MC_TARGET_PAGE;
4957 if (target)
4958 target->page = page;
4959 }
4960 if (!ret || !target)
4961 put_page(page);
4962 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004963 /* There is a swap entry and a page doesn't exist or isn't charged */
4964 if (ent.val && !ret &&
Li Zefan34c00c32013-09-23 16:56:01 +08004965 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07004966 ret = MC_TARGET_SWAP;
4967 if (target)
4968 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004969 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004970 return ret;
4971}
4972
Naoya Horiguchi12724852012-03-21 16:34:28 -07004973#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4974/*
4975 * We don't consider swapping or file mapped pages because THP does not
4976 * support them for now.
4977 * Caller should make sure that pmd_trans_huge(pmd) is true.
4978 */
4979static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4980 unsigned long addr, pmd_t pmd, union mc_target *target)
4981{
4982 struct page *page = NULL;
Naoya Horiguchi12724852012-03-21 16:34:28 -07004983 enum mc_target_type ret = MC_TARGET_NONE;
4984
4985 page = pmd_page(pmd);
Sasha Levin309381fea2014-01-23 15:52:54 -08004986 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08004987 if (!(mc.flags & MOVE_ANON))
Naoya Horiguchi12724852012-03-21 16:34:28 -07004988 return ret;
Johannes Weiner1306a852014-12-10 15:44:52 -08004989 if (page->mem_cgroup == mc.from) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07004990 ret = MC_TARGET_PAGE;
4991 if (target) {
4992 get_page(page);
4993 target->page = page;
4994 }
4995 }
4996 return ret;
4997}
4998#else
4999static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5000 unsigned long addr, pmd_t pmd, union mc_target *target)
5001{
5002 return MC_TARGET_NONE;
5003}
5004#endif
5005
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005006static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5007 unsigned long addr, unsigned long end,
5008 struct mm_walk *walk)
5009{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005010 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005011 pte_t *pte;
5012 spinlock_t *ptl;
5013
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005014 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005015 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5016 mc.precharge += HPAGE_PMD_NR;
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005017 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07005018 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005019 }
Dave Hansen03319322011-03-22 16:32:56 -07005020
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07005021 if (pmd_trans_unstable(pmd))
5022 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005023 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5024 for (; addr != end; pte++, addr += PAGE_SIZE)
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005025 if (get_mctgt_type(vma, addr, *pte, NULL))
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005026 mc.precharge++; /* increment precharge temporarily */
5027 pte_unmap_unlock(pte - 1, ptl);
5028 cond_resched();
5029
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005030 return 0;
5031}
5032
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005033static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5034{
5035 unsigned long precharge;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005036
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005037 struct mm_walk mem_cgroup_count_precharge_walk = {
5038 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5039 .mm = mm,
5040 };
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005041 down_read(&mm->mmap_sem);
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005042 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005043 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005044
5045 precharge = mc.precharge;
5046 mc.precharge = 0;
5047
5048 return precharge;
5049}
5050
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005051static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5052{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005053 unsigned long precharge = mem_cgroup_count_precharge(mm);
5054
5055 VM_BUG_ON(mc.moving_task);
5056 mc.moving_task = current;
5057 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005058}
5059
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005060/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5061static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005062{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005063 struct mem_cgroup *from = mc.from;
5064 struct mem_cgroup *to = mc.to;
5065
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005066 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005067 if (mc.precharge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005068 cancel_charge(mc.to, mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005069 mc.precharge = 0;
5070 }
5071 /*
5072 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5073 * we must uncharge here.
5074 */
5075 if (mc.moved_charge) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005076 cancel_charge(mc.from, mc.moved_charge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005077 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005078 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005079 /* we must fixup refcnts and charges */
5080 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005081 /* uncharge swap account from the old cgroup */
Johannes Weinerce00a962014-09-05 08:43:57 -04005082 if (!mem_cgroup_is_root(mc.from))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005083 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005084
Johannes Weiner05b84302014-08-06 16:05:59 -07005085 /*
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005086 * we charged both to->memory and to->memsw, so we
5087 * should uncharge to->memory.
Johannes Weiner05b84302014-08-06 16:05:59 -07005088 */
Johannes Weinerce00a962014-09-05 08:43:57 -04005089 if (!mem_cgroup_is_root(mc.to))
Johannes Weiner3e32cb22014-12-10 15:42:31 -08005090 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005091
Johannes Weinere8ea14c2014-12-10 15:42:42 -08005092 css_put_many(&mc.from->css, mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005093
Li Zefan40503772013-07-08 16:00:34 -07005094 /* we've already done css_get(mc.to) */
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005095 mc.moved_swap = 0;
5096 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005097 memcg_oom_recover(from);
5098 memcg_oom_recover(to);
5099 wake_up_all(&mc.waitq);
5100}
5101
5102static void mem_cgroup_clear_mc(void)
5103{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005104 /*
5105 * we must clear moving_task before waking up waiters at the end of
5106 * task migration.
5107 */
5108 mc.moving_task = NULL;
5109 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005110 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005111 mc.from = NULL;
5112 mc.to = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005113 spin_unlock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005114}
5115
Tejun Heoeb954192013-08-08 20:11:23 -04005116static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08005117 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005118{
Tejun Heo2f7ee562011-12-12 18:12:21 -08005119 struct task_struct *p = cgroup_taskset_first(tset);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005120 int ret = 0;
Tejun Heoeb954192013-08-08 20:11:23 -04005121 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005122 unsigned long move_flags;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005123
Glauber Costaee5e8472013-02-22 16:34:50 -08005124 /*
5125 * We are now commited to this value whatever it is. Changes in this
5126 * tunable will only affect upcoming migrations, not the current one.
5127 * So we need to save it, and keep it going.
5128 */
Jason Low4db0c3c2015-04-15 16:14:08 -07005129 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005130 if (move_flags) {
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005131 struct mm_struct *mm;
5132 struct mem_cgroup *from = mem_cgroup_from_task(p);
5133
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005134 VM_BUG_ON(from == memcg);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005135
5136 mm = get_task_mm(p);
5137 if (!mm)
5138 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005139 /* We move charges only when we move a owner of the mm */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005140 if (mm->owner == p) {
5141 VM_BUG_ON(mc.from);
5142 VM_BUG_ON(mc.to);
5143 VM_BUG_ON(mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005144 VM_BUG_ON(mc.moved_charge);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005145 VM_BUG_ON(mc.moved_swap);
Johannes Weiner247b1442014-12-10 15:44:11 -08005146
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005147 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005148 mc.from = from;
Raghavendra K Tc0ff4b82011-11-02 13:38:15 -07005149 mc.to = memcg;
Johannes Weiner1dfab5a2015-02-11 15:26:09 -08005150 mc.flags = move_flags;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07005151 spin_unlock(&mc.lock);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005152 /* We set mc.moving_task later */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005153
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005154 ret = mem_cgroup_precharge_mc(mm);
5155 if (ret)
5156 mem_cgroup_clear_mc();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005157 }
5158 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005159 }
5160 return ret;
5161}
5162
Tejun Heoeb954192013-08-08 20:11:23 -04005163static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08005164 struct cgroup_taskset *tset)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005165{
Johannes Weiner4e2f2452014-12-10 15:44:08 -08005166 if (mc.to)
5167 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005168}
5169
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005170static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5171 unsigned long addr, unsigned long end,
5172 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005173{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005174 int ret = 0;
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005175 struct vm_area_struct *vma = walk->vma;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005176 pte_t *pte;
5177 spinlock_t *ptl;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005178 enum mc_target_type target_type;
5179 union mc_target target;
5180 struct page *page;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005181
Naoya Horiguchi12724852012-03-21 16:34:28 -07005182 /*
5183 * We don't take compound_lock() here but no race with splitting thp
5184 * happens because:
5185 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
5186 * under splitting, which means there's no concurrent thp split,
5187 * - if another thread runs into split_huge_page() just after we
5188 * entered this if-block, the thread must wait for page table lock
5189 * to be unlocked in __split_huge_page_splitting(), where the main
5190 * part of thp split is not executed yet.
5191 */
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005192 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
Hugh Dickins62ade862012-05-18 11:28:34 -07005193 if (mc.precharge < HPAGE_PMD_NR) {
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005194 spin_unlock(ptl);
Naoya Horiguchi12724852012-03-21 16:34:28 -07005195 return 0;
5196 }
5197 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5198 if (target_type == MC_TARGET_PAGE) {
5199 page = target.page;
5200 if (!isolate_lru_page(page)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005201 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
Johannes Weiner1306a852014-12-10 15:44:52 -08005202 mc.from, mc.to)) {
Naoya Horiguchi12724852012-03-21 16:34:28 -07005203 mc.precharge -= HPAGE_PMD_NR;
5204 mc.moved_charge += HPAGE_PMD_NR;
5205 }
5206 putback_lru_page(page);
5207 }
5208 put_page(page);
5209 }
Kirill A. Shutemovbf929152013-11-14 14:30:54 -08005210 spin_unlock(ptl);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07005211 return 0;
Naoya Horiguchi12724852012-03-21 16:34:28 -07005212 }
5213
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07005214 if (pmd_trans_unstable(pmd))
5215 return 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005216retry:
5217 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5218 for (; addr != end; addr += PAGE_SIZE) {
5219 pte_t ptent = *(pte++);
Daisuke Nishimura02491442010-03-10 15:22:17 -08005220 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005221
5222 if (!mc.precharge)
5223 break;
5224
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005225 switch (get_mctgt_type(vma, addr, ptent, &target)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005226 case MC_TARGET_PAGE:
5227 page = target.page;
5228 if (isolate_lru_page(page))
5229 goto put;
Johannes Weiner1306a852014-12-10 15:44:52 -08005230 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005231 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005232 /* we uncharge from mc.from later. */
5233 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005234 }
5235 putback_lru_page(page);
Naoya Horiguchi8d32ff82012-03-21 16:34:27 -07005236put: /* get_mctgt_type() gets the page */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005237 put_page(page);
5238 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08005239 case MC_TARGET_SWAP:
5240 ent = target.ent;
Hugh Dickinse91cbb42012-05-29 15:06:51 -07005241 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08005242 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08005243 /* we fixup refcnts and charges later. */
5244 mc.moved_swap++;
5245 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08005246 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005247 default:
5248 break;
5249 }
5250 }
5251 pte_unmap_unlock(pte - 1, ptl);
5252 cond_resched();
5253
5254 if (addr != end) {
5255 /*
5256 * We have consumed all precharges we got in can_attach().
5257 * We try charge one by one, but don't do any additional
5258 * charges to mc.to if we have failed in charge once in attach()
5259 * phase.
5260 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08005261 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005262 if (!ret)
5263 goto retry;
5264 }
5265
5266 return ret;
5267}
5268
5269static void mem_cgroup_move_charge(struct mm_struct *mm)
5270{
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005271 struct mm_walk mem_cgroup_move_charge_walk = {
5272 .pmd_entry = mem_cgroup_move_charge_pte_range,
5273 .mm = mm,
5274 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005275
5276 lru_add_drain_all();
Johannes Weiner312722c2014-12-10 15:44:25 -08005277 /*
5278 * Signal mem_cgroup_begin_page_stat() to take the memcg's
5279 * move_lock while we're moving its pages to another memcg.
5280 * Then wait for already started RCU-only updates to finish.
5281 */
5282 atomic_inc(&mc.from->moving_account);
5283 synchronize_rcu();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005284retry:
5285 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5286 /*
5287 * Someone who are holding the mmap_sem might be waiting in
5288 * waitq. So we cancel all extra charges, wake up all waiters,
5289 * and retry. Because we cancel precharges, we might not be able
5290 * to move enough charges, but moving charge is a best-effort
5291 * feature anyway, so it wouldn't be a big problem.
5292 */
5293 __mem_cgroup_clear_mc();
5294 cond_resched();
5295 goto retry;
5296 }
Naoya Horiguchi26bcd642015-02-11 15:27:57 -08005297 /*
5298 * When we have consumed all precharges and failed in doing
5299 * additional charge, the page walk just aborts.
5300 */
5301 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005302 up_read(&mm->mmap_sem);
Johannes Weiner312722c2014-12-10 15:44:25 -08005303 atomic_dec(&mc.from->moving_account);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005304}
5305
Tejun Heoeb954192013-08-08 20:11:23 -04005306static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08005307 struct cgroup_taskset *tset)
Balbir Singh67e465a2008-02-07 00:13:54 -08005308{
Tejun Heo2f7ee562011-12-12 18:12:21 -08005309 struct task_struct *p = cgroup_taskset_first(tset);
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005310 struct mm_struct *mm = get_task_mm(p);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005311
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005312 if (mm) {
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005313 if (mc.to)
5314 mem_cgroup_move_charge(mm);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005315 mmput(mm);
5316 }
KOSAKI Motohiroa4336582011-06-15 15:08:13 -07005317 if (mc.to)
5318 mem_cgroup_clear_mc();
Balbir Singh67e465a2008-02-07 00:13:54 -08005319}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005320#else /* !CONFIG_MMU */
Tejun Heoeb954192013-08-08 20:11:23 -04005321static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08005322 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005323{
5324 return 0;
5325}
Tejun Heoeb954192013-08-08 20:11:23 -04005326static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08005327 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005328{
5329}
Tejun Heoeb954192013-08-08 20:11:23 -04005330static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
Li Zefan761b3ef52012-01-31 13:47:36 +08005331 struct cgroup_taskset *tset)
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005332{
5333}
5334#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08005335
Tejun Heof00baae2013-04-15 13:41:15 -07005336/*
5337 * Cgroup retains root cgroups across [un]mount cycles making it necessary
Tejun Heoaa6ec292014-07-09 10:08:08 -04005338 * to verify whether we're attached to the default hierarchy on each mount
5339 * attempt.
Tejun Heof00baae2013-04-15 13:41:15 -07005340 */
Tejun Heoeb954192013-08-08 20:11:23 -04005341static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
Tejun Heof00baae2013-04-15 13:41:15 -07005342{
5343 /*
Tejun Heoaa6ec292014-07-09 10:08:08 -04005344 * use_hierarchy is forced on the default hierarchy. cgroup core
Tejun Heof00baae2013-04-15 13:41:15 -07005345 * guarantees that @root doesn't have any children, so turning it
5346 * on for the root memcg is enough.
5347 */
Tejun Heoaa6ec292014-07-09 10:08:08 -04005348 if (cgroup_on_dfl(root_css->cgroup))
Vladimir Davydov7feee5902015-03-12 16:26:19 -07005349 root_mem_cgroup->use_hierarchy = true;
5350 else
5351 root_mem_cgroup->use_hierarchy = false;
Tejun Heof00baae2013-04-15 13:41:15 -07005352}
5353
Johannes Weiner241994ed2015-02-11 15:26:06 -08005354static u64 memory_current_read(struct cgroup_subsys_state *css,
5355 struct cftype *cft)
5356{
5357 return mem_cgroup_usage(mem_cgroup_from_css(css), false);
5358}
5359
5360static int memory_low_show(struct seq_file *m, void *v)
5361{
5362 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005363 unsigned long low = READ_ONCE(memcg->low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005364
5365 if (low == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005366 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005367 else
5368 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5369
5370 return 0;
5371}
5372
5373static ssize_t memory_low_write(struct kernfs_open_file *of,
5374 char *buf, size_t nbytes, loff_t off)
5375{
5376 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5377 unsigned long low;
5378 int err;
5379
5380 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005381 err = page_counter_memparse(buf, "max", &low);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005382 if (err)
5383 return err;
5384
5385 memcg->low = low;
5386
5387 return nbytes;
5388}
5389
5390static int memory_high_show(struct seq_file *m, void *v)
5391{
5392 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005393 unsigned long high = READ_ONCE(memcg->high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005394
5395 if (high == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005396 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005397 else
5398 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5399
5400 return 0;
5401}
5402
5403static ssize_t memory_high_write(struct kernfs_open_file *of,
5404 char *buf, size_t nbytes, loff_t off)
5405{
5406 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5407 unsigned long high;
5408 int err;
5409
5410 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005411 err = page_counter_memparse(buf, "max", &high);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005412 if (err)
5413 return err;
5414
5415 memcg->high = high;
5416
Tejun Heo2529bb32015-05-22 18:23:34 -04005417 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005418 return nbytes;
5419}
5420
5421static int memory_max_show(struct seq_file *m, void *v)
5422{
5423 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
Jason Low4db0c3c2015-04-15 16:14:08 -07005424 unsigned long max = READ_ONCE(memcg->memory.limit);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005425
5426 if (max == PAGE_COUNTER_MAX)
Johannes Weinerd2973692015-02-27 15:52:04 -08005427 seq_puts(m, "max\n");
Johannes Weiner241994ed2015-02-11 15:26:06 -08005428 else
5429 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5430
5431 return 0;
5432}
5433
5434static ssize_t memory_max_write(struct kernfs_open_file *of,
5435 char *buf, size_t nbytes, loff_t off)
5436{
5437 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5438 unsigned long max;
5439 int err;
5440
5441 buf = strstrip(buf);
Johannes Weinerd2973692015-02-27 15:52:04 -08005442 err = page_counter_memparse(buf, "max", &max);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005443 if (err)
5444 return err;
5445
5446 err = mem_cgroup_resize_limit(memcg, max);
5447 if (err)
5448 return err;
5449
Tejun Heo2529bb32015-05-22 18:23:34 -04005450 memcg_wb_domain_size_changed(memcg);
Johannes Weiner241994ed2015-02-11 15:26:06 -08005451 return nbytes;
5452}
5453
5454static int memory_events_show(struct seq_file *m, void *v)
5455{
5456 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5457
5458 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5459 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5460 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5461 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5462
5463 return 0;
5464}
5465
5466static struct cftype memory_files[] = {
5467 {
5468 .name = "current",
5469 .read_u64 = memory_current_read,
5470 },
5471 {
5472 .name = "low",
5473 .flags = CFTYPE_NOT_ON_ROOT,
5474 .seq_show = memory_low_show,
5475 .write = memory_low_write,
5476 },
5477 {
5478 .name = "high",
5479 .flags = CFTYPE_NOT_ON_ROOT,
5480 .seq_show = memory_high_show,
5481 .write = memory_high_write,
5482 },
5483 {
5484 .name = "max",
5485 .flags = CFTYPE_NOT_ON_ROOT,
5486 .seq_show = memory_max_show,
5487 .write = memory_max_write,
5488 },
5489 {
5490 .name = "events",
5491 .flags = CFTYPE_NOT_ON_ROOT,
5492 .seq_show = memory_events_show,
5493 },
5494 { } /* terminate */
5495};
5496
Tejun Heo073219e2014-02-08 10:36:58 -05005497struct cgroup_subsys memory_cgrp_subsys = {
Tejun Heo92fb9742012-11-19 08:13:38 -08005498 .css_alloc = mem_cgroup_css_alloc,
Glauber Costad142e3e2013-02-22 16:34:52 -08005499 .css_online = mem_cgroup_css_online,
Tejun Heo92fb9742012-11-19 08:13:38 -08005500 .css_offline = mem_cgroup_css_offline,
5501 .css_free = mem_cgroup_css_free,
Tejun Heo1ced9532014-07-08 18:02:57 -04005502 .css_reset = mem_cgroup_css_reset,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005503 .can_attach = mem_cgroup_can_attach,
5504 .cancel_attach = mem_cgroup_cancel_attach,
Balbir Singh67e465a2008-02-07 00:13:54 -08005505 .attach = mem_cgroup_move_task,
Tejun Heof00baae2013-04-15 13:41:15 -07005506 .bind = mem_cgroup_bind,
Johannes Weiner241994ed2015-02-11 15:26:06 -08005507 .dfl_cftypes = memory_files,
5508 .legacy_cftypes = mem_cgroup_legacy_files,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005509 .early_init = 0,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005510};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005511
Johannes Weiner241994ed2015-02-11 15:26:06 -08005512/**
5513 * mem_cgroup_events - count memory events against a cgroup
5514 * @memcg: the memory cgroup
5515 * @idx: the event index
5516 * @nr: the number of events to account for
5517 */
5518void mem_cgroup_events(struct mem_cgroup *memcg,
5519 enum mem_cgroup_events_index idx,
5520 unsigned int nr)
5521{
5522 this_cpu_add(memcg->stat->events[idx], nr);
5523}
5524
5525/**
5526 * mem_cgroup_low - check if memory consumption is below the normal range
5527 * @root: the highest ancestor to consider
5528 * @memcg: the memory cgroup to check
5529 *
5530 * Returns %true if memory consumption of @memcg, and that of all
5531 * configurable ancestors up to @root, is below the normal range.
5532 */
5533bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5534{
5535 if (mem_cgroup_disabled())
5536 return false;
5537
5538 /*
5539 * The toplevel group doesn't have a configurable range, so
5540 * it's never low when looked at directly, and it is not
5541 * considered an ancestor when assessing the hierarchy.
5542 */
5543
5544 if (memcg == root_mem_cgroup)
5545 return false;
5546
Michal Hocko4e54ded2015-02-27 15:51:46 -08005547 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005548 return false;
5549
5550 while (memcg != root) {
5551 memcg = parent_mem_cgroup(memcg);
5552
5553 if (memcg == root_mem_cgroup)
5554 break;
5555
Michal Hocko4e54ded2015-02-27 15:51:46 -08005556 if (page_counter_read(&memcg->memory) >= memcg->low)
Johannes Weiner241994ed2015-02-11 15:26:06 -08005557 return false;
5558 }
5559 return true;
5560}
5561
Johannes Weiner00501b52014-08-08 14:19:20 -07005562/**
5563 * mem_cgroup_try_charge - try charging a page
5564 * @page: page to charge
5565 * @mm: mm context of the victim
5566 * @gfp_mask: reclaim mode
5567 * @memcgp: charged memcg return
5568 *
5569 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5570 * pages according to @gfp_mask if necessary.
5571 *
5572 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5573 * Otherwise, an error code is returned.
5574 *
5575 * After page->mapping has been set up, the caller must finalize the
5576 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5577 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5578 */
5579int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5580 gfp_t gfp_mask, struct mem_cgroup **memcgp)
5581{
5582 struct mem_cgroup *memcg = NULL;
5583 unsigned int nr_pages = 1;
5584 int ret = 0;
5585
5586 if (mem_cgroup_disabled())
5587 goto out;
5588
5589 if (PageSwapCache(page)) {
Johannes Weiner00501b52014-08-08 14:19:20 -07005590 /*
5591 * Every swap fault against a single page tries to charge the
5592 * page, bail as early as possible. shmem_unuse() encounters
5593 * already charged pages, too. The USED bit is protected by
5594 * the page lock, which serializes swap cache removal, which
5595 * in turn serializes uncharging.
5596 */
Johannes Weiner1306a852014-12-10 15:44:52 -08005597 if (page->mem_cgroup)
Johannes Weiner00501b52014-08-08 14:19:20 -07005598 goto out;
5599 }
5600
5601 if (PageTransHuge(page)) {
5602 nr_pages <<= compound_order(page);
5603 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5604 }
5605
5606 if (do_swap_account && PageSwapCache(page))
5607 memcg = try_get_mem_cgroup_from_page(page);
5608 if (!memcg)
5609 memcg = get_mem_cgroup_from_mm(mm);
5610
5611 ret = try_charge(memcg, gfp_mask, nr_pages);
5612
5613 css_put(&memcg->css);
5614
5615 if (ret == -EINTR) {
5616 memcg = root_mem_cgroup;
5617 ret = 0;
5618 }
5619out:
5620 *memcgp = memcg;
5621 return ret;
5622}
5623
5624/**
5625 * mem_cgroup_commit_charge - commit a page charge
5626 * @page: page to charge
5627 * @memcg: memcg to charge the page to
5628 * @lrucare: page might be on LRU already
5629 *
5630 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5631 * after page->mapping has been set up. This must happen atomically
5632 * as part of the page instantiation, i.e. under the page table lock
5633 * for anonymous pages, under the page lock for page and swap cache.
5634 *
5635 * In addition, the page must not be on the LRU during the commit, to
5636 * prevent racing with task migration. If it might be, use @lrucare.
5637 *
5638 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5639 */
5640void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5641 bool lrucare)
5642{
5643 unsigned int nr_pages = 1;
5644
5645 VM_BUG_ON_PAGE(!page->mapping, page);
5646 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5647
5648 if (mem_cgroup_disabled())
5649 return;
5650 /*
5651 * Swap faults will attempt to charge the same page multiple
5652 * times. But reuse_swap_page() might have removed the page
5653 * from swapcache already, so we can't check PageSwapCache().
5654 */
5655 if (!memcg)
5656 return;
5657
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005658 commit_charge(page, memcg, lrucare);
5659
Johannes Weiner00501b52014-08-08 14:19:20 -07005660 if (PageTransHuge(page)) {
5661 nr_pages <<= compound_order(page);
5662 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5663 }
5664
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005665 local_irq_disable();
5666 mem_cgroup_charge_statistics(memcg, page, nr_pages);
5667 memcg_check_events(memcg, page);
5668 local_irq_enable();
Johannes Weiner00501b52014-08-08 14:19:20 -07005669
5670 if (do_swap_account && PageSwapCache(page)) {
5671 swp_entry_t entry = { .val = page_private(page) };
5672 /*
5673 * The swap entry might not get freed for a long time,
5674 * let's not wait for it. The page already received a
5675 * memory+swap charge, drop the swap entry duplicate.
5676 */
5677 mem_cgroup_uncharge_swap(entry);
5678 }
5679}
5680
5681/**
5682 * mem_cgroup_cancel_charge - cancel a page charge
5683 * @page: page to charge
5684 * @memcg: memcg to charge the page to
5685 *
5686 * Cancel a charge transaction started by mem_cgroup_try_charge().
5687 */
5688void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
5689{
5690 unsigned int nr_pages = 1;
5691
5692 if (mem_cgroup_disabled())
5693 return;
5694 /*
5695 * Swap faults will attempt to charge the same page multiple
5696 * times. But reuse_swap_page() might have removed the page
5697 * from swapcache already, so we can't check PageSwapCache().
5698 */
5699 if (!memcg)
5700 return;
5701
5702 if (PageTransHuge(page)) {
5703 nr_pages <<= compound_order(page);
5704 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5705 }
5706
5707 cancel_charge(memcg, nr_pages);
5708}
5709
Johannes Weiner747db952014-08-08 14:19:24 -07005710static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
Johannes Weiner747db952014-08-08 14:19:24 -07005711 unsigned long nr_anon, unsigned long nr_file,
5712 unsigned long nr_huge, struct page *dummy_page)
5713{
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005714 unsigned long nr_pages = nr_anon + nr_file;
Johannes Weiner747db952014-08-08 14:19:24 -07005715 unsigned long flags;
5716
Johannes Weinerce00a962014-09-05 08:43:57 -04005717 if (!mem_cgroup_is_root(memcg)) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005718 page_counter_uncharge(&memcg->memory, nr_pages);
5719 if (do_swap_account)
5720 page_counter_uncharge(&memcg->memsw, nr_pages);
Johannes Weinerce00a962014-09-05 08:43:57 -04005721 memcg_oom_recover(memcg);
5722 }
Johannes Weiner747db952014-08-08 14:19:24 -07005723
5724 local_irq_save(flags);
5725 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5726 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5727 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5728 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005729 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005730 memcg_check_events(memcg, dummy_page);
5731 local_irq_restore(flags);
Johannes Weinere8ea14c2014-12-10 15:42:42 -08005732
5733 if (!mem_cgroup_is_root(memcg))
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005734 css_put_many(&memcg->css, nr_pages);
Johannes Weiner747db952014-08-08 14:19:24 -07005735}
5736
5737static void uncharge_list(struct list_head *page_list)
5738{
5739 struct mem_cgroup *memcg = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005740 unsigned long nr_anon = 0;
5741 unsigned long nr_file = 0;
5742 unsigned long nr_huge = 0;
5743 unsigned long pgpgout = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005744 struct list_head *next;
5745 struct page *page;
5746
5747 next = page_list->next;
5748 do {
5749 unsigned int nr_pages = 1;
Johannes Weiner747db952014-08-08 14:19:24 -07005750
5751 page = list_entry(next, struct page, lru);
5752 next = page->lru.next;
5753
5754 VM_BUG_ON_PAGE(PageLRU(page), page);
5755 VM_BUG_ON_PAGE(page_count(page), page);
5756
Johannes Weiner1306a852014-12-10 15:44:52 -08005757 if (!page->mem_cgroup)
Johannes Weiner747db952014-08-08 14:19:24 -07005758 continue;
5759
5760 /*
5761 * Nobody should be changing or seriously looking at
Johannes Weiner1306a852014-12-10 15:44:52 -08005762 * page->mem_cgroup at this point, we have fully
Johannes Weiner29833312014-12-10 15:44:02 -08005763 * exclusive access to the page.
Johannes Weiner747db952014-08-08 14:19:24 -07005764 */
5765
Johannes Weiner1306a852014-12-10 15:44:52 -08005766 if (memcg != page->mem_cgroup) {
Johannes Weiner747db952014-08-08 14:19:24 -07005767 if (memcg) {
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005768 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5769 nr_huge, page);
5770 pgpgout = nr_anon = nr_file = nr_huge = 0;
Johannes Weiner747db952014-08-08 14:19:24 -07005771 }
Johannes Weiner1306a852014-12-10 15:44:52 -08005772 memcg = page->mem_cgroup;
Johannes Weiner747db952014-08-08 14:19:24 -07005773 }
5774
5775 if (PageTransHuge(page)) {
5776 nr_pages <<= compound_order(page);
5777 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5778 nr_huge += nr_pages;
5779 }
5780
5781 if (PageAnon(page))
5782 nr_anon += nr_pages;
5783 else
5784 nr_file += nr_pages;
5785
Johannes Weiner1306a852014-12-10 15:44:52 -08005786 page->mem_cgroup = NULL;
Johannes Weiner747db952014-08-08 14:19:24 -07005787
5788 pgpgout++;
5789 } while (next != page_list);
5790
5791 if (memcg)
Johannes Weiner18eca2e2014-12-10 15:43:57 -08005792 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5793 nr_huge, page);
Johannes Weiner747db952014-08-08 14:19:24 -07005794}
5795
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005796/**
5797 * mem_cgroup_uncharge - uncharge a page
5798 * @page: page to uncharge
5799 *
5800 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5801 * mem_cgroup_commit_charge().
5802 */
5803void mem_cgroup_uncharge(struct page *page)
5804{
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005805 if (mem_cgroup_disabled())
5806 return;
5807
Johannes Weiner747db952014-08-08 14:19:24 -07005808 /* Don't touch page->lru of any random page, pre-check: */
Johannes Weiner1306a852014-12-10 15:44:52 -08005809 if (!page->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005810 return;
5811
Johannes Weiner747db952014-08-08 14:19:24 -07005812 INIT_LIST_HEAD(&page->lru);
5813 uncharge_list(&page->lru);
5814}
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005815
Johannes Weiner747db952014-08-08 14:19:24 -07005816/**
5817 * mem_cgroup_uncharge_list - uncharge a list of page
5818 * @page_list: list of pages to uncharge
5819 *
5820 * Uncharge a list of pages previously charged with
5821 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5822 */
5823void mem_cgroup_uncharge_list(struct list_head *page_list)
5824{
5825 if (mem_cgroup_disabled())
5826 return;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005827
Johannes Weiner747db952014-08-08 14:19:24 -07005828 if (!list_empty(page_list))
5829 uncharge_list(page_list);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005830}
5831
5832/**
5833 * mem_cgroup_migrate - migrate a charge to another page
5834 * @oldpage: currently charged page
5835 * @newpage: page to transfer the charge to
Michal Hockof5e03a42015-02-05 12:25:14 -08005836 * @lrucare: either or both pages might be on the LRU already
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005837 *
5838 * Migrate the charge from @oldpage to @newpage.
5839 *
5840 * Both pages must be locked, @newpage->mapping must be set up.
5841 */
5842void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
5843 bool lrucare)
5844{
Johannes Weiner29833312014-12-10 15:44:02 -08005845 struct mem_cgroup *memcg;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005846 int isolated;
5847
5848 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5849 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5850 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
5851 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
5852 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
Johannes Weiner6abb5a82014-08-08 14:19:33 -07005853 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5854 newpage);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005855
5856 if (mem_cgroup_disabled())
5857 return;
5858
5859 /* Page cache replacement: new page already charged? */
Johannes Weiner1306a852014-12-10 15:44:52 -08005860 if (newpage->mem_cgroup)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005861 return;
5862
Johannes Weiner7d5e3242014-12-10 15:43:46 -08005863 /*
5864 * Swapcache readahead pages can get migrated before being
5865 * charged, and migration from compaction can happen to an
5866 * uncharged page when the PFN walker finds a page that
5867 * reclaim just put back on the LRU but has not released yet.
5868 */
Johannes Weiner1306a852014-12-10 15:44:52 -08005869 memcg = oldpage->mem_cgroup;
Johannes Weiner29833312014-12-10 15:44:02 -08005870 if (!memcg)
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005871 return;
5872
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005873 if (lrucare)
5874 lock_page_lru(oldpage, &isolated);
5875
Johannes Weiner1306a852014-12-10 15:44:52 -08005876 oldpage->mem_cgroup = NULL;
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005877
5878 if (lrucare)
5879 unlock_page_lru(oldpage, isolated);
5880
Johannes Weiner29833312014-12-10 15:44:02 -08005881 commit_charge(newpage, memcg, lrucare);
Johannes Weiner0a31bc92014-08-08 14:19:22 -07005882}
5883
Michal Hocko2d110852013-02-22 16:34:43 -08005884/*
Michal Hocko10813122013-02-22 16:35:41 -08005885 * subsys_initcall() for memory controller.
5886 *
5887 * Some parts like hotcpu_notifier() have to be initialized from this context
5888 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5889 * everything that doesn't depend on a specific mem_cgroup structure should
5890 * be initialized from here.
Michal Hocko2d110852013-02-22 16:34:43 -08005891 */
5892static int __init mem_cgroup_init(void)
5893{
Johannes Weiner95a045f2015-02-11 15:26:33 -08005894 int cpu, node;
5895
Michal Hocko2d110852013-02-22 16:34:43 -08005896 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
Johannes Weiner95a045f2015-02-11 15:26:33 -08005897
5898 for_each_possible_cpu(cpu)
5899 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5900 drain_local_stock);
5901
5902 for_each_node(node) {
5903 struct mem_cgroup_tree_per_node *rtpn;
5904 int zone;
5905
5906 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5907 node_online(node) ? node : NUMA_NO_NODE);
5908
5909 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5910 struct mem_cgroup_tree_per_zone *rtpz;
5911
5912 rtpz = &rtpn->rb_tree_per_zone[zone];
5913 rtpz->rb_root = RB_ROOT;
5914 spin_lock_init(&rtpz->lock);
5915 }
5916 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5917 }
5918
Michal Hocko2d110852013-02-22 16:34:43 -08005919 return 0;
5920}
5921subsys_initcall(mem_cgroup_init);
Johannes Weiner21afa382015-02-11 15:26:36 -08005922
5923#ifdef CONFIG_MEMCG_SWAP
5924/**
5925 * mem_cgroup_swapout - transfer a memsw charge to swap
5926 * @page: page whose memsw charge to transfer
5927 * @entry: swap entry to move the charge to
5928 *
5929 * Transfer the memsw charge of @page to @entry.
5930 */
5931void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5932{
5933 struct mem_cgroup *memcg;
5934 unsigned short oldid;
5935
5936 VM_BUG_ON_PAGE(PageLRU(page), page);
5937 VM_BUG_ON_PAGE(page_count(page), page);
5938
5939 if (!do_swap_account)
5940 return;
5941
5942 memcg = page->mem_cgroup;
5943
5944 /* Readahead page, never charged */
5945 if (!memcg)
5946 return;
5947
5948 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5949 VM_BUG_ON_PAGE(oldid, page);
5950 mem_cgroup_swap_statistics(memcg, true);
5951
5952 page->mem_cgroup = NULL;
5953
5954 if (!mem_cgroup_is_root(memcg))
5955 page_counter_uncharge(&memcg->memory, 1);
5956
5957 /* XXX: caller holds IRQ-safe mapping->tree_lock */
5958 VM_BUG_ON(!irqs_disabled());
5959
5960 mem_cgroup_charge_statistics(memcg, page, -1);
5961 memcg_check_events(memcg, page);
5962}
5963
5964/**
5965 * mem_cgroup_uncharge_swap - uncharge a swap entry
5966 * @entry: swap entry to uncharge
5967 *
5968 * Drop the memsw charge associated with @entry.
5969 */
5970void mem_cgroup_uncharge_swap(swp_entry_t entry)
5971{
5972 struct mem_cgroup *memcg;
5973 unsigned short id;
5974
5975 if (!do_swap_account)
5976 return;
5977
5978 id = swap_cgroup_record(entry, 0);
5979 rcu_read_lock();
Vladimir Davydovadbe4272015-04-15 16:13:00 -07005980 memcg = mem_cgroup_from_id(id);
Johannes Weiner21afa382015-02-11 15:26:36 -08005981 if (memcg) {
5982 if (!mem_cgroup_is_root(memcg))
5983 page_counter_uncharge(&memcg->memsw, 1);
5984 mem_cgroup_swap_statistics(memcg, false);
5985 css_put(&memcg->css);
5986 }
5987 rcu_read_unlock();
5988}
5989
5990/* for remember boot option*/
5991#ifdef CONFIG_MEMCG_SWAP_ENABLED
5992static int really_do_swap_account __initdata = 1;
5993#else
5994static int really_do_swap_account __initdata;
5995#endif
5996
5997static int __init enable_swap_account(char *s)
5998{
5999 if (!strcmp(s, "1"))
6000 really_do_swap_account = 1;
6001 else if (!strcmp(s, "0"))
6002 really_do_swap_account = 0;
6003 return 1;
6004}
6005__setup("swapaccount=", enable_swap_account);
6006
6007static struct cftype memsw_cgroup_files[] = {
6008 {
6009 .name = "memsw.usage_in_bytes",
6010 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6011 .read_u64 = mem_cgroup_read_u64,
6012 },
6013 {
6014 .name = "memsw.max_usage_in_bytes",
6015 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6016 .write = mem_cgroup_reset,
6017 .read_u64 = mem_cgroup_read_u64,
6018 },
6019 {
6020 .name = "memsw.limit_in_bytes",
6021 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6022 .write = mem_cgroup_write,
6023 .read_u64 = mem_cgroup_read_u64,
6024 },
6025 {
6026 .name = "memsw.failcnt",
6027 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6028 .write = mem_cgroup_reset,
6029 .read_u64 = mem_cgroup_read_u64,
6030 },
6031 { }, /* terminate */
6032};
6033
6034static int __init mem_cgroup_swap_init(void)
6035{
6036 if (!mem_cgroup_disabled() && really_do_swap_account) {
6037 do_swap_account = 1;
6038 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6039 memsw_cgroup_files));
6040 }
6041 return 0;
6042}
6043subsys_initcall(mem_cgroup_swap_init);
6044
6045#endif /* CONFIG_MEMCG_SWAP */