blob: 62bbb48980e50c365d3aabd0476ebf912cfffd11 [file] [log] [blame]
Balbir Singh8cdea7c2008-02-07 00:13:50 -08001/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
Pavel Emelianov78fb7462008-02-07 00:13:51 -08006 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08009 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
Balbir Singh8cdea7c2008-02-07 00:13:50 -080013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23
24#include <linux/res_counter.h>
25#include <linux/memcontrol.h>
26#include <linux/cgroup.h>
Pavel Emelianov78fb7462008-02-07 00:13:51 -080027#include <linux/mm.h>
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -080028#include <linux/hugetlb.h>
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -080029#include <linux/pagemap.h>
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080030#include <linux/smp.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080031#include <linux/page-flags.h>
Balbir Singh66e17072008-02-07 00:13:56 -080032#include <linux/backing-dev.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080033#include <linux/bit_spinlock.h>
34#include <linux/rcupdate.h>
Balbir Singhe2224322009-04-02 16:57:39 -070035#include <linux/limits.h>
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -080036#include <linux/mutex.h>
Balbir Singhf64c3f52009-09-23 15:56:37 -070037#include <linux/rbtree.h>
Balbir Singhb6ac57d2008-04-29 01:00:19 -070038#include <linux/slab.h>
Balbir Singh66e17072008-02-07 00:13:56 -080039#include <linux/swap.h>
Daisuke Nishimura02491442010-03-10 15:22:17 -080040#include <linux/swapops.h>
Balbir Singh66e17072008-02-07 00:13:56 -080041#include <linux/spinlock.h>
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -080042#include <linux/eventfd.h>
43#include <linux/sort.h>
Balbir Singh66e17072008-02-07 00:13:56 -080044#include <linux/fs.h>
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -080045#include <linux/seq_file.h>
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -070046#include <linux/vmalloc.h>
Christoph Lameterb69408e2008-10-18 20:26:14 -070047#include <linux/mm_inline.h>
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -070048#include <linux/page_cgroup.h>
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -080049#include <linux/cpu.h>
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -070050#include <linux/oom.h>
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -080051#include "internal.h"
Balbir Singh8cdea7c2008-02-07 00:13:50 -080052
Balbir Singh8697d332008-02-07 00:13:59 -080053#include <asm/uaccess.h>
54
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -070055#include <trace/events/vmscan.h>
56
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070057struct cgroup_subsys mem_cgroup_subsys __read_mostly;
KAMEZAWA Hiroyukia181b0e2008-07-25 01:47:08 -070058#define MEM_CGROUP_RECLAIM_RETRIES 5
Balbir Singh4b3bde42009-09-23 15:56:32 -070059struct mem_cgroup *root_mem_cgroup __read_mostly;
Balbir Singh8cdea7c2008-02-07 00:13:50 -080060
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080061#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
Li Zefan338c8432009-06-17 16:27:15 -070062/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080063int do_swap_account __read_mostly;
Michal Hockoa42c3902010-11-24 12:57:08 -080064
65/* for remember boot option*/
66#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
67static int really_do_swap_account __initdata = 1;
68#else
69static int really_do_swap_account __initdata = 0;
70#endif
71
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080072#else
73#define do_swap_account (0)
74#endif
75
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -080076/*
77 * Per memcg event counter is incremented at every pagein/pageout. This counter
78 * is used for trigger some periodic events. This is straightforward and better
79 * than using jiffies etc. to handle periodic memcg event.
80 *
81 * These values will be used as !((event) & ((1 <<(thresh)) - 1))
82 */
83#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
84#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -080085
Balbir Singh8cdea7c2008-02-07 00:13:50 -080086/*
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -080087 * Statistics for memory cgroup.
88 */
89enum mem_cgroup_stat_index {
90 /*
91 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
92 */
93 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
Balbir Singhd69b0422009-06-17 16:26:34 -070094 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
KAMEZAWA Hiroyukid8046582009-12-15 16:47:09 -080095 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
Balaji Rao55e462b2008-05-01 04:35:12 -070096 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
97 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
Balbir Singh0c3e73e2009-09-23 15:56:42 -070098 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -070099 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
100 /* incremented at every pagein/pageout */
101 MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA,
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -0700102 MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800103
104 MEM_CGROUP_STAT_NSTATS,
105};
106
107struct mem_cgroup_stat_cpu {
108 s64 count[MEM_CGROUP_STAT_NSTATS];
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800109};
110
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800111/*
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800112 * per-zone information in memory controller.
113 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800114struct mem_cgroup_per_zone {
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -0800115 /*
116 * spin_lock to protect the per cgroup LRU
117 */
Christoph Lameterb69408e2008-10-18 20:26:14 -0700118 struct list_head lists[NR_LRU_LISTS];
119 unsigned long count[NR_LRU_LISTS];
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -0800120
121 struct zone_reclaim_stat reclaim_stat;
Balbir Singhf64c3f52009-09-23 15:56:37 -0700122 struct rb_node tree_node; /* RB tree node */
123 unsigned long long usage_in_excess;/* Set to the value by which */
124 /* the soft limit is exceeded*/
125 bool on_tree;
Balbir Singh4e416952009-09-23 15:56:39 -0700126 struct mem_cgroup *mem; /* Back pointer, we cannot */
127 /* use container_of */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800128};
129/* Macro for accessing counter */
130#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
131
132struct mem_cgroup_per_node {
133 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
134};
135
136struct mem_cgroup_lru_info {
137 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
138};
139
140/*
Balbir Singhf64c3f52009-09-23 15:56:37 -0700141 * Cgroups above their limits are maintained in a RB-Tree, independent of
142 * their hierarchy representation
143 */
144
145struct mem_cgroup_tree_per_zone {
146 struct rb_root rb_root;
147 spinlock_t lock;
148};
149
150struct mem_cgroup_tree_per_node {
151 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
152};
153
154struct mem_cgroup_tree {
155 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
156};
157
158static struct mem_cgroup_tree soft_limit_tree __read_mostly;
159
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800160struct mem_cgroup_threshold {
161 struct eventfd_ctx *eventfd;
162 u64 threshold;
163};
164
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700165/* For threshold */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800166struct mem_cgroup_threshold_ary {
167 /* An array index points to threshold just below usage. */
Phil Carmody5407a562010-05-26 14:42:42 -0700168 int current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800169 /* Size of entries[] */
170 unsigned int size;
171 /* Array of thresholds */
172 struct mem_cgroup_threshold entries[0];
173};
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700174
175struct mem_cgroup_thresholds {
176 /* Primary thresholds array */
177 struct mem_cgroup_threshold_ary *primary;
178 /*
179 * Spare threshold array.
180 * This is needed to make mem_cgroup_unregister_event() "never fail".
181 * It must be able to store at least primary->size - 1 entries.
182 */
183 struct mem_cgroup_threshold_ary *spare;
184};
185
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700186/* for OOM */
187struct mem_cgroup_eventfd_list {
188 struct list_head list;
189 struct eventfd_ctx *eventfd;
190};
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800191
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800192static void mem_cgroup_threshold(struct mem_cgroup *mem);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700193static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800194
Balbir Singhf64c3f52009-09-23 15:56:37 -0700195/*
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800196 * The memory controller data structure. The memory controller controls both
197 * page cache and RSS per cgroup. We would eventually like to provide
198 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
199 * to help the administrator determine what knobs to tune.
200 *
201 * TODO: Add a water mark for the memory controller. Reclaim will begin when
Balbir Singh8a9f3cc2008-02-07 00:13:53 -0800202 * we hit the water mark. May be even add a low water mark, such that
203 * no reclaim occurs from a cgroup at it's low water mark, this is
204 * a feature that will be implemented much later in the future.
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800205 */
206struct mem_cgroup {
207 struct cgroup_subsys_state css;
208 /*
209 * the counter to account for memory usage
210 */
211 struct res_counter res;
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800212 /*
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800213 * the counter to account for mem+swap usage.
214 */
215 struct res_counter memsw;
216 /*
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800217 * Per cgroup active and inactive list, similar to the
218 * per zone LRU lists.
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800219 */
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800220 struct mem_cgroup_lru_info info;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -0800221
KOSAKI Motohiro2733c062009-01-07 18:08:23 -0800222 /*
223 protect against reclaim related member.
224 */
225 spinlock_t reclaim_param_lock;
226
Balbir Singh6d61ef42009-01-07 18:08:06 -0800227 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200228 * While reclaiming in a hierarchy, we cache the last child we
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -0700229 * reclaimed from.
Balbir Singh6d61ef42009-01-07 18:08:06 -0800230 */
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -0700231 int last_scanned_child;
Balbir Singh18f59ea2009-01-07 18:08:07 -0800232 /*
233 * Should the accounting and control be hierarchical, per subtree?
234 */
235 bool use_hierarchy;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -0800236 atomic_t oom_lock;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800237 atomic_t refcnt;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800238
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -0800239 unsigned int swappiness;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -0700240 /* OOM-Killer disable */
241 int oom_kill_disable;
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -0800242
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -0700243 /* set when res.limit == memsw.limit */
244 bool memsw_is_minimum;
245
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800246 /* protect arrays of thresholds */
247 struct mutex thresholds_lock;
248
249 /* thresholds for memory usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700250 struct mem_cgroup_thresholds thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700251
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800252 /* thresholds for mem+swap usage. RCU-protected */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -0700253 struct mem_cgroup_thresholds memsw_thresholds;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -0700254
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700255 /* For oom notifier event fd */
256 struct list_head oom_notify;
257
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800258 /*
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800259 * Should we move charges of a task when a task is moved into this
260 * mem_cgroup ? And what type of charges should we move ?
261 */
262 unsigned long move_charge_at_immigrate;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800263 /*
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800264 * percpu counter.
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800265 */
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800266 struct mem_cgroup_stat_cpu *stat;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700267 /*
268 * used when a cpu is offlined or other synchronizations
269 * See mem_cgroup_read_stat().
270 */
271 struct mem_cgroup_stat_cpu nocpu_base;
272 spinlock_t pcp_counter_lock;
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800273};
274
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800275/* Stuffs for move charges at task migration. */
276/*
277 * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
278 * left-shifted bitmap of these types.
279 */
280enum move_type {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800281 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700282 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -0800283 NR_MOVE_TYPE,
284};
285
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800286/* "mc" and its members are protected by cgroup_mutex */
287static struct move_charge_struct {
Daisuke Nishimurab1dd6932010-11-24 12:57:06 -0800288 spinlock_t lock; /* for from, to */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800289 struct mem_cgroup *from;
290 struct mem_cgroup *to;
291 unsigned long precharge;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -0800292 unsigned long moved_charge;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -0800293 unsigned long moved_swap;
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800294 struct task_struct *moving_task; /* a task moving charges */
295 wait_queue_head_t waitq; /* a waitq for other context */
296} mc = {
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -0700297 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
Daisuke Nishimura8033b972010-03-10 15:22:16 -0800298 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
299};
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -0800300
Daisuke Nishimura90254a62010-05-26 14:42:38 -0700301static bool move_anon(void)
302{
303 return test_bit(MOVE_CHARGE_TYPE_ANON,
304 &mc.to->move_charge_at_immigrate);
305}
306
Daisuke Nishimura87946a72010-05-26 14:42:39 -0700307static bool move_file(void)
308{
309 return test_bit(MOVE_CHARGE_TYPE_FILE,
310 &mc.to->move_charge_at_immigrate);
311}
312
Balbir Singh4e416952009-09-23 15:56:39 -0700313/*
314 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
315 * limit reclaim to prevent infinite loops, if they ever occur.
316 */
317#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
318#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
319
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800320enum charge_type {
321 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
322 MEM_CGROUP_CHARGE_TYPE_MAPPED,
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700323 MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700324 MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -0800325 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -0700326 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700327 NR_CHARGE_TYPE,
328};
329
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700330/* only for here (for easy reading.) */
331#define PCGF_CACHE (1UL << PCG_CACHE)
332#define PCGF_USED (1UL << PCG_USED)
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -0700333#define PCGF_LOCK (1UL << PCG_LOCK)
Balbir Singh4b3bde42009-09-23 15:56:32 -0700334/* Not used, but added here for completeness */
335#define PCGF_ACCT (1UL << PCG_ACCT)
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -0800336
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800337/* for encoding cft->private value on file */
338#define _MEM (0)
339#define _MEMSWAP (1)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700340#define _OOM_TYPE (2)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800341#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
342#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
343#define MEMFILE_ATTR(val) ((val) & 0xffff)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -0700344/* Used for OOM nofiier */
345#define OOM_CONTROL (0)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800346
Balbir Singh75822b42009-09-23 15:56:38 -0700347/*
348 * Reclaim flags for mem_cgroup_hierarchical_reclaim
349 */
350#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
351#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
352#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
353#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
Balbir Singh4e416952009-09-23 15:56:39 -0700354#define MEM_CGROUP_RECLAIM_SOFT_BIT 0x2
355#define MEM_CGROUP_RECLAIM_SOFT (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
Balbir Singh75822b42009-09-23 15:56:38 -0700356
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800357static void mem_cgroup_get(struct mem_cgroup *mem);
358static void mem_cgroup_put(struct mem_cgroup *mem);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -0800359static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -0800360static void drain_all_stock_async(void);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -0800361
Balbir Singhf64c3f52009-09-23 15:56:37 -0700362static struct mem_cgroup_per_zone *
363mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
364{
365 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
366}
367
Wu Fengguangd3242362009-12-16 12:19:59 +0100368struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
369{
370 return &mem->css;
371}
372
Balbir Singhf64c3f52009-09-23 15:56:37 -0700373static struct mem_cgroup_per_zone *
374page_cgroup_zoneinfo(struct page_cgroup *pc)
375{
376 struct mem_cgroup *mem = pc->mem_cgroup;
377 int nid = page_cgroup_nid(pc);
378 int zid = page_cgroup_zid(pc);
379
380 if (!mem)
381 return NULL;
382
383 return mem_cgroup_zoneinfo(mem, nid, zid);
384}
385
386static struct mem_cgroup_tree_per_zone *
387soft_limit_tree_node_zone(int nid, int zid)
388{
389 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
390}
391
392static struct mem_cgroup_tree_per_zone *
393soft_limit_tree_from_page(struct page *page)
394{
395 int nid = page_to_nid(page);
396 int zid = page_zonenum(page);
397
398 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
399}
400
401static void
Balbir Singh4e416952009-09-23 15:56:39 -0700402__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
Balbir Singhf64c3f52009-09-23 15:56:37 -0700403 struct mem_cgroup_per_zone *mz,
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700404 struct mem_cgroup_tree_per_zone *mctz,
405 unsigned long long new_usage_in_excess)
Balbir Singhf64c3f52009-09-23 15:56:37 -0700406{
407 struct rb_node **p = &mctz->rb_root.rb_node;
408 struct rb_node *parent = NULL;
409 struct mem_cgroup_per_zone *mz_node;
410
411 if (mz->on_tree)
412 return;
413
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700414 mz->usage_in_excess = new_usage_in_excess;
415 if (!mz->usage_in_excess)
416 return;
Balbir Singhf64c3f52009-09-23 15:56:37 -0700417 while (*p) {
418 parent = *p;
419 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
420 tree_node);
421 if (mz->usage_in_excess < mz_node->usage_in_excess)
422 p = &(*p)->rb_left;
423 /*
424 * We can't avoid mem cgroups that are over their soft
425 * limit by the same amount
426 */
427 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
428 p = &(*p)->rb_right;
429 }
430 rb_link_node(&mz->tree_node, parent, p);
431 rb_insert_color(&mz->tree_node, &mctz->rb_root);
432 mz->on_tree = true;
Balbir Singh4e416952009-09-23 15:56:39 -0700433}
434
435static void
436__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
437 struct mem_cgroup_per_zone *mz,
438 struct mem_cgroup_tree_per_zone *mctz)
439{
440 if (!mz->on_tree)
441 return;
442 rb_erase(&mz->tree_node, &mctz->rb_root);
443 mz->on_tree = false;
444}
445
446static void
Balbir Singhf64c3f52009-09-23 15:56:37 -0700447mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
448 struct mem_cgroup_per_zone *mz,
449 struct mem_cgroup_tree_per_zone *mctz)
450{
451 spin_lock(&mctz->lock);
Balbir Singh4e416952009-09-23 15:56:39 -0700452 __mem_cgroup_remove_exceeded(mem, mz, mctz);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700453 spin_unlock(&mctz->lock);
454}
455
Balbir Singhf64c3f52009-09-23 15:56:37 -0700456
457static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
458{
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700459 unsigned long long excess;
Balbir Singhf64c3f52009-09-23 15:56:37 -0700460 struct mem_cgroup_per_zone *mz;
461 struct mem_cgroup_tree_per_zone *mctz;
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700462 int nid = page_to_nid(page);
463 int zid = page_zonenum(page);
Balbir Singhf64c3f52009-09-23 15:56:37 -0700464 mctz = soft_limit_tree_from_page(page);
465
466 /*
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700467 * Necessary to update all ancestors when hierarchy is used.
468 * because their event counter is not touched.
Balbir Singhf64c3f52009-09-23 15:56:37 -0700469 */
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700470 for (; mem; mem = parent_mem_cgroup(mem)) {
471 mz = mem_cgroup_zoneinfo(mem, nid, zid);
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700472 excess = res_counter_soft_limit_excess(&mem->res);
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700473 /*
474 * We have to update the tree if mz is on RB-tree or
475 * mem is over its softlimit.
476 */
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700477 if (excess || mz->on_tree) {
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700478 spin_lock(&mctz->lock);
479 /* if on-tree, remove it */
480 if (mz->on_tree)
481 __mem_cgroup_remove_exceeded(mem, mz, mctz);
482 /*
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700483 * Insert again. mz->usage_in_excess will be updated.
484 * If excess is 0, no tree ops.
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700485 */
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -0700486 __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -0700487 spin_unlock(&mctz->lock);
488 }
Balbir Singhf64c3f52009-09-23 15:56:37 -0700489 }
490}
491
492static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
493{
494 int node, zone;
495 struct mem_cgroup_per_zone *mz;
496 struct mem_cgroup_tree_per_zone *mctz;
497
498 for_each_node_state(node, N_POSSIBLE) {
499 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
500 mz = mem_cgroup_zoneinfo(mem, node, zone);
501 mctz = soft_limit_tree_node_zone(node, zone);
502 mem_cgroup_remove_exceeded(mem, mz, mctz);
503 }
504 }
505}
506
Balbir Singh4e416952009-09-23 15:56:39 -0700507static struct mem_cgroup_per_zone *
508__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
509{
510 struct rb_node *rightmost = NULL;
KAMEZAWA Hiroyuki26251ea2009-10-01 15:44:08 -0700511 struct mem_cgroup_per_zone *mz;
Balbir Singh4e416952009-09-23 15:56:39 -0700512
513retry:
KAMEZAWA Hiroyuki26251ea2009-10-01 15:44:08 -0700514 mz = NULL;
Balbir Singh4e416952009-09-23 15:56:39 -0700515 rightmost = rb_last(&mctz->rb_root);
516 if (!rightmost)
517 goto done; /* Nothing to reclaim from */
518
519 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
520 /*
521 * Remove the node now but someone else can add it back,
522 * we will to add it back at the end of reclaim to its correct
523 * position in the tree.
524 */
525 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
526 if (!res_counter_soft_limit_excess(&mz->mem->res) ||
527 !css_tryget(&mz->mem->css))
528 goto retry;
529done:
530 return mz;
531}
532
533static struct mem_cgroup_per_zone *
534mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
535{
536 struct mem_cgroup_per_zone *mz;
537
538 spin_lock(&mctz->lock);
539 mz = __mem_cgroup_largest_soft_limit_node(mctz);
540 spin_unlock(&mctz->lock);
541 return mz;
542}
543
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700544/*
545 * Implementation Note: reading percpu statistics for memcg.
546 *
547 * Both of vmstat[] and percpu_counter has threshold and do periodic
548 * synchronization to implement "quick" read. There are trade-off between
549 * reading cost and precision of value. Then, we may have a chance to implement
550 * a periodic synchronizion of counter in memcg's counter.
551 *
552 * But this _read() function is used for user interface now. The user accounts
553 * memory usage by memory cgroup and he _always_ requires exact value because
554 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
555 * have to visit all online cpus and make sum. So, for now, unnecessary
556 * synchronization is not implemented. (just implemented for cpu hotplug)
557 *
558 * If there are kernel internal actions which can make use of some not-exact
559 * value, and reading all cpu value can be performance bottleneck in some
560 * common workload, threashold and synchonization as vmstat[] should be
561 * implemented.
562 */
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800563static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
564 enum mem_cgroup_stat_index idx)
565{
566 int cpu;
567 s64 val = 0;
568
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700569 get_online_cpus();
570 for_each_online_cpu(cpu)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800571 val += per_cpu(mem->stat->count[idx], cpu);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700572#ifdef CONFIG_HOTPLUG_CPU
573 spin_lock(&mem->pcp_counter_lock);
574 val += mem->nocpu_base.count[idx];
575 spin_unlock(&mem->pcp_counter_lock);
576#endif
577 put_online_cpus();
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800578 return val;
579}
580
581static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
582{
583 s64 ret;
584
585 ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
586 ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
587 return ret;
588}
589
Balbir Singh0c3e73e2009-09-23 15:56:42 -0700590static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
591 bool charge)
592{
593 int val = (charge) ? 1 : -1;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800594 this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
Balbir Singh0c3e73e2009-09-23 15:56:42 -0700595}
596
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -0700597static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800598 bool file, int nr_pages)
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800599{
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800600 preempt_disable();
601
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800602 if (file)
603 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800604 else
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800605 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
Balaji Rao55e462b2008-05-01 04:35:12 -0700606
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800607 /* pagein of a big page is an event. So, ignore page size */
608 if (nr_pages > 0)
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800609 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800610 else {
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800611 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
KAMEZAWA Hiroyuki3751d602011-02-01 15:52:45 -0800612 nr_pages = -nr_pages; /* for event */
613 }
KAMEZAWA Hiroyukie401f172011-01-20 14:44:23 -0800614
615 __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -0800616
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -0800617 preempt_enable();
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800618}
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800619
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -0700620static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
Christoph Lameterb69408e2008-10-18 20:26:14 -0700621 enum lru_list idx)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800622{
623 int nid, zid;
624 struct mem_cgroup_per_zone *mz;
625 u64 total = 0;
626
627 for_each_online_node(nid)
628 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
629 mz = mem_cgroup_zoneinfo(mem, nid, zid);
630 total += MEM_CGROUP_ZSTAT(mz, idx);
631 }
632 return total;
KAMEZAWA Hiroyukid52aa412008-02-07 00:14:24 -0800633}
634
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -0800635static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
636{
637 s64 val;
638
639 val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
640
641 return !(val & ((1 << event_mask_shift) - 1));
642}
643
644/*
645 * Check events in order.
646 *
647 */
648static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
649{
650 /* threshold event is triggered in finer grain than soft limit */
651 if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
652 mem_cgroup_threshold(mem);
653 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
654 mem_cgroup_update_tree(mem, page);
655 }
656}
657
Hugh Dickinsd5b69e32008-03-04 14:29:10 -0800658static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
Balbir Singh8cdea7c2008-02-07 00:13:50 -0800659{
660 return container_of(cgroup_subsys_state(cont,
661 mem_cgroup_subsys_id), struct mem_cgroup,
662 css);
663}
664
Balbir Singhcf475ad2008-04-29 01:00:16 -0700665struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800666{
Balbir Singh31a78f22008-09-28 23:09:31 +0100667 /*
668 * mm_update_next_owner() may clear mm->owner to NULL
669 * if it races with swapoff, page migration, etc.
670 * So this can be called with p == NULL.
671 */
672 if (unlikely(!p))
673 return NULL;
674
Pavel Emelianov78fb7462008-02-07 00:13:51 -0800675 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
676 struct mem_cgroup, css);
677}
678
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800679static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
680{
681 struct mem_cgroup *mem = NULL;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700682
683 if (!mm)
684 return NULL;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -0800685 /*
686 * Because we have no locks, mm->owner's may be being moved to other
687 * cgroup. We use css_tryget() here even if this looks
688 * pessimistic (rather than adding locks here).
689 */
690 rcu_read_lock();
691 do {
692 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
693 if (unlikely(!mem))
694 break;
695 } while (!css_tryget(&mem->css));
696 rcu_read_unlock();
697 return mem;
698}
699
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700700/* The caller has to guarantee "mem" exists before calling this */
701static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -0700702{
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700703 struct cgroup_subsys_state *css;
704 int found;
705
706 if (!mem) /* ROOT cgroup has the smallest ID */
707 return root_mem_cgroup; /*css_put/get against root is ignored*/
708 if (!mem->use_hierarchy) {
709 if (css_tryget(&mem->css))
710 return mem;
711 return NULL;
712 }
713 rcu_read_lock();
714 /*
715 * searching a memory cgroup which has the smallest ID under given
716 * ROOT cgroup. (ID >= 1)
717 */
718 css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
719 if (css && css_tryget(css))
720 mem = container_of(css, struct mem_cgroup, css);
721 else
722 mem = NULL;
723 rcu_read_unlock();
724 return mem;
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -0700725}
726
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700727static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
728 struct mem_cgroup *root,
729 bool cond)
730{
731 int nextid = css_id(&iter->css) + 1;
732 int found;
733 int hierarchy_used;
734 struct cgroup_subsys_state *css;
735
736 hierarchy_used = iter->use_hierarchy;
737
738 css_put(&iter->css);
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700739 /* If no ROOT, walk all, ignore hierarchy */
740 if (!cond || (root && !hierarchy_used))
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700741 return NULL;
742
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700743 if (!root)
744 root = root_mem_cgroup;
745
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700746 do {
747 iter = NULL;
748 rcu_read_lock();
749
750 css = css_get_next(&mem_cgroup_subsys, nextid,
751 &root->css, &found);
752 if (css && css_tryget(css))
753 iter = container_of(css, struct mem_cgroup, css);
754 rcu_read_unlock();
755 /* If css is NULL, no more cgroups will be found */
756 nextid = found + 1;
757 } while (css && !iter);
758
759 return iter;
760}
761/*
762 * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
763 * be careful that "break" loop is not allowed. We have reference count.
764 * Instead of that modify "cond" to be false and "continue" to exit the loop.
765 */
766#define for_each_mem_cgroup_tree_cond(iter, root, cond) \
767 for (iter = mem_cgroup_start_loop(root);\
768 iter != NULL;\
769 iter = mem_cgroup_get_next(iter, root, cond))
770
771#define for_each_mem_cgroup_tree(iter, root) \
772 for_each_mem_cgroup_tree_cond(iter, root, true)
773
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -0700774#define for_each_mem_cgroup_all(iter) \
775 for_each_mem_cgroup_tree_cond(iter, NULL, true)
776
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -0700777
Balbir Singh4b3bde42009-09-23 15:56:32 -0700778static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
779{
780 return (mem == root_mem_cgroup);
781}
782
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800783/*
784 * Following LRU functions are allowed to be used without PCG_LOCK.
785 * Operations are called by routine of global LRU independently from memcg.
786 * What we have to take care of here is validness of pc->mem_cgroup.
787 *
788 * Changes to pc->mem_cgroup happens when
789 * 1. charge
790 * 2. moving account
791 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
792 * It is added to LRU before charge.
793 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
794 * When moving account, the page is not on LRU. It's isolated.
795 */
796
797void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800798{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800799 struct page_cgroup *pc;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800800 struct mem_cgroup_per_zone *mz;
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700801
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800802 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800803 return;
804 pc = lookup_page_cgroup(page);
805 /* can happen while we handle swapcache. */
Balbir Singh4b3bde42009-09-23 15:56:32 -0700806 if (!TestClearPageCgroupAcctLRU(pc))
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800807 return;
Balbir Singh4b3bde42009-09-23 15:56:32 -0700808 VM_BUG_ON(!pc->mem_cgroup);
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -0800809 /*
810 * We don't check PCG_USED bit. It's cleared when the "page" is finally
811 * removed from global LRU.
812 */
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800813 mz = page_cgroup_zoneinfo(pc);
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -0800814 /* huge page split is done under lru_lock. so, we have no races. */
815 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
Balbir Singh4b3bde42009-09-23 15:56:32 -0700816 if (mem_cgroup_is_root(pc->mem_cgroup))
817 return;
818 VM_BUG_ON(list_empty(&pc->lru));
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800819 list_del_init(&pc->lru);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800820}
821
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800822void mem_cgroup_del_lru(struct page *page)
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800823{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800824 mem_cgroup_del_lru_list(page, page_lru(page));
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800825}
826
Minchan Kim3f58a822011-03-22 16:32:53 -0700827/*
828 * Writeback is about to end against a page which has been marked for immediate
829 * reclaim. If it still appears to be reclaimable, move it to the tail of the
830 * inactive list.
831 */
832void mem_cgroup_rotate_reclaimable_page(struct page *page)
833{
834 struct mem_cgroup_per_zone *mz;
835 struct page_cgroup *pc;
836 enum lru_list lru = page_lru(page);
837
838 if (mem_cgroup_disabled())
839 return;
840
841 pc = lookup_page_cgroup(page);
842 /* unused or root page is not rotated. */
843 if (!PageCgroupUsed(pc))
844 return;
845 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
846 smp_rmb();
847 if (mem_cgroup_is_root(pc->mem_cgroup))
848 return;
849 mz = page_cgroup_zoneinfo(pc);
850 list_move_tail(&pc->lru, &mz->lists[lru]);
851}
852
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800853void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
Balbir Singh66e17072008-02-07 00:13:56 -0800854{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800855 struct mem_cgroup_per_zone *mz;
856 struct page_cgroup *pc;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -0800857
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800858 if (mem_cgroup_disabled())
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700859 return;
Christoph Lameterb69408e2008-10-18 20:26:14 -0700860
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800861 pc = lookup_page_cgroup(page);
Balbir Singh4b3bde42009-09-23 15:56:32 -0700862 /* unused or root page is not rotated. */
Johannes Weiner713735b2011-01-20 14:44:31 -0800863 if (!PageCgroupUsed(pc))
864 return;
865 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
866 smp_rmb();
867 if (mem_cgroup_is_root(pc->mem_cgroup))
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800868 return;
869 mz = page_cgroup_zoneinfo(pc);
Christoph Lameterb69408e2008-10-18 20:26:14 -0700870 list_move(&pc->lru, &mz->lists[lru]);
Balbir Singh66e17072008-02-07 00:13:56 -0800871}
872
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800873void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
874{
875 struct page_cgroup *pc;
876 struct mem_cgroup_per_zone *mz;
877
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800878 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800879 return;
880 pc = lookup_page_cgroup(page);
Balbir Singh4b3bde42009-09-23 15:56:32 -0700881 VM_BUG_ON(PageCgroupAcctLRU(pc));
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800882 if (!PageCgroupUsed(pc))
883 return;
Johannes Weiner713735b2011-01-20 14:44:31 -0800884 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
885 smp_rmb();
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800886 mz = page_cgroup_zoneinfo(pc);
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -0800887 /* huge page split is done under lru_lock. so, we have no races. */
888 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
Balbir Singh4b3bde42009-09-23 15:56:32 -0700889 SetPageCgroupAcctLRU(pc);
890 if (mem_cgroup_is_root(pc->mem_cgroup))
891 return;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800892 list_add(&pc->lru, &mz->lists[lru]);
893}
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -0800894
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800895/*
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -0800896 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
897 * lru because the page may.be reused after it's fully uncharged (because of
898 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
899 * it again. This function is only used to charge SwapCache. It's done under
900 * lock_page and expected that zone->lru_lock is never held.
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800901 */
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -0800902static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800903{
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -0800904 unsigned long flags;
905 struct zone *zone = page_zone(page);
906 struct page_cgroup *pc = lookup_page_cgroup(page);
907
908 spin_lock_irqsave(&zone->lru_lock, flags);
909 /*
910 * Forget old LRU when this page_cgroup is *not* used. This Used bit
911 * is guarded by lock_page() because the page is SwapCache.
912 */
913 if (!PageCgroupUsed(pc))
914 mem_cgroup_del_lru_list(page, page_lru(page));
915 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800916}
917
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -0800918static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
919{
920 unsigned long flags;
921 struct zone *zone = page_zone(page);
922 struct page_cgroup *pc = lookup_page_cgroup(page);
923
924 spin_lock_irqsave(&zone->lru_lock, flags);
925 /* link when the page is linked to LRU but page_cgroup isn't */
Balbir Singh4b3bde42009-09-23 15:56:32 -0700926 if (PageLRU(page) && !PageCgroupAcctLRU(pc))
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -0800927 mem_cgroup_add_lru_list(page, page_lru(page));
928 spin_unlock_irqrestore(&zone->lru_lock, flags);
929}
930
931
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800932void mem_cgroup_move_lists(struct page *page,
933 enum lru_list from, enum lru_list to)
934{
Hirokazu Takahashif8d665422009-01-07 18:08:02 -0800935 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800936 return;
937 mem_cgroup_del_lru_list(page, from);
938 mem_cgroup_add_lru_list(page, to);
939}
940
David Rientjes4c4a2212008-02-07 00:14:06 -0800941int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
942{
943 int ret;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700944 struct mem_cgroup *curr = NULL;
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -0700945 struct task_struct *p;
David Rientjes4c4a2212008-02-07 00:14:06 -0800946
KAMEZAWA Hiroyuki158e0a22010-08-10 18:03:00 -0700947 p = find_lock_task_mm(task);
948 if (!p)
949 return 0;
950 curr = try_get_mem_cgroup_from_mm(p->mm);
951 task_unlock(p);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700952 if (!curr)
953 return 0;
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -0800954 /*
955 * We should check use_hierarchy of "mem" not "curr". Because checking
956 * use_hierarchy of "curr" here make this function true if hierarchy is
957 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
958 * hierarchy(even if use_hierarchy is disabled in "mem").
959 */
960 if (mem->use_hierarchy)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -0700961 ret = css_is_ancestor(&curr->css, &mem->css);
962 else
963 ret = (curr == mem);
964 css_put(&curr->css);
David Rientjes4c4a2212008-02-07 00:14:06 -0800965 return ret;
966}
967
KOSAKI Motohiroc772be92009-01-07 18:08:25 -0800968static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800969{
970 unsigned long active;
971 unsigned long inactive;
KOSAKI Motohiroc772be92009-01-07 18:08:25 -0800972 unsigned long gb;
973 unsigned long inactive_ratio;
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800974
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -0700975 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
976 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -0800977
KOSAKI Motohiroc772be92009-01-07 18:08:25 -0800978 gb = (inactive + active) >> (30 - PAGE_SHIFT);
979 if (gb)
980 inactive_ratio = int_sqrt(10 * gb);
981 else
982 inactive_ratio = 1;
983
984 if (present_pages) {
985 present_pages[0] = inactive;
986 present_pages[1] = active;
987 }
988
989 return inactive_ratio;
990}
991
992int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
993{
994 unsigned long active;
995 unsigned long inactive;
996 unsigned long present_pages[2];
997 unsigned long inactive_ratio;
998
999 inactive_ratio = calc_inactive_ratio(memcg, present_pages);
1000
1001 inactive = present_pages[0];
1002 active = present_pages[1];
1003
1004 if (inactive * inactive_ratio < active)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001005 return 1;
1006
1007 return 0;
1008}
1009
Rik van Riel56e49d22009-06-16 15:32:28 -07001010int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
1011{
1012 unsigned long active;
1013 unsigned long inactive;
1014
1015 inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
1016 active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
1017
1018 return (active > inactive);
1019}
1020
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -08001021unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
1022 struct zone *zone,
1023 enum lru_list lru)
1024{
KOSAKI Motohiro13d7e3a2010-08-10 18:03:06 -07001025 int nid = zone_to_nid(zone);
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -08001026 int zid = zone_idx(zone);
1027 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1028
1029 return MEM_CGROUP_ZSTAT(mz, lru);
1030}
1031
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -08001032struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1033 struct zone *zone)
1034{
KOSAKI Motohiro13d7e3a2010-08-10 18:03:06 -07001035 int nid = zone_to_nid(zone);
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -08001036 int zid = zone_idx(zone);
1037 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1038
1039 return &mz->reclaim_stat;
1040}
1041
1042struct zone_reclaim_stat *
1043mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1044{
1045 struct page_cgroup *pc;
1046 struct mem_cgroup_per_zone *mz;
1047
1048 if (mem_cgroup_disabled())
1049 return NULL;
1050
1051 pc = lookup_page_cgroup(page);
Daisuke Nishimurabd112db2009-01-15 13:51:11 -08001052 if (!PageCgroupUsed(pc))
1053 return NULL;
Johannes Weiner713735b2011-01-20 14:44:31 -08001054 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1055 smp_rmb();
KOSAKI Motohiro3e2f41f2009-01-07 18:08:20 -08001056 mz = page_cgroup_zoneinfo(pc);
1057 if (!mz)
1058 return NULL;
1059
1060 return &mz->reclaim_stat;
1061}
1062
Balbir Singh66e17072008-02-07 00:13:56 -08001063unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1064 struct list_head *dst,
1065 unsigned long *scanned, int order,
1066 int mode, struct zone *z,
1067 struct mem_cgroup *mem_cont,
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001068 int active, int file)
Balbir Singh66e17072008-02-07 00:13:56 -08001069{
1070 unsigned long nr_taken = 0;
1071 struct page *page;
1072 unsigned long scan;
1073 LIST_HEAD(pc_list);
1074 struct list_head *src;
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001075 struct page_cgroup *pc, *tmp;
KOSAKI Motohiro13d7e3a2010-08-10 18:03:06 -07001076 int nid = zone_to_nid(z);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001077 int zid = zone_idx(z);
1078 struct mem_cgroup_per_zone *mz;
Johannes Weinerb7c46d12009-09-21 17:02:56 -07001079 int lru = LRU_FILE * file + active;
KAMEZAWA Hiroyuki2ffebca2009-06-17 16:27:21 -07001080 int ret;
Balbir Singh66e17072008-02-07 00:13:56 -08001081
Balbir Singhcf475ad2008-04-29 01:00:16 -07001082 BUG_ON(!mem_cont);
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08001083 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
Christoph Lameterb69408e2008-10-18 20:26:14 -07001084 src = &mz->lists[lru];
Balbir Singh66e17072008-02-07 00:13:56 -08001085
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001086 scan = 0;
1087 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
Hugh Dickins436c65412008-02-07 00:14:12 -08001088 if (scan >= nr_to_scan)
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001089 break;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08001090
1091 page = pc->page;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07001092 if (unlikely(!PageCgroupUsed(pc)))
1093 continue;
Hugh Dickins436c65412008-02-07 00:14:12 -08001094 if (unlikely(!PageLRU(page)))
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001095 continue;
KAMEZAWA Hiroyukiff7283f2008-02-07 00:14:11 -08001096
Hugh Dickins436c65412008-02-07 00:14:12 -08001097 scan++;
KAMEZAWA Hiroyuki2ffebca2009-06-17 16:27:21 -07001098 ret = __isolate_lru_page(page, mode, file);
1099 switch (ret) {
1100 case 0:
Balbir Singh66e17072008-02-07 00:13:56 -08001101 list_move(&page->lru, dst);
KAMEZAWA Hiroyuki2ffebca2009-06-17 16:27:21 -07001102 mem_cgroup_del_lru(page);
Rik van Riel2c888cf2011-01-13 15:47:13 -08001103 nr_taken += hpage_nr_pages(page);
KAMEZAWA Hiroyuki2ffebca2009-06-17 16:27:21 -07001104 break;
1105 case -EBUSY:
1106 /* we don't affect global LRU but rotate in our LRU */
1107 mem_cgroup_rotate_lru_list(page, page_lru(page));
1108 break;
1109 default:
1110 break;
Balbir Singh66e17072008-02-07 00:13:56 -08001111 }
1112 }
1113
Balbir Singh66e17072008-02-07 00:13:56 -08001114 *scanned = scan;
KOSAKI Motohirocc8e9702010-08-09 17:19:57 -07001115
1116 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1117 0, 0, 0, mode);
1118
Balbir Singh66e17072008-02-07 00:13:56 -08001119 return nr_taken;
1120}
1121
Balbir Singh6d61ef42009-01-07 18:08:06 -08001122#define mem_cgroup_from_res_counter(counter, member) \
1123 container_of(counter, struct mem_cgroup, member)
1124
Johannes Weiner19942822011-02-01 15:52:43 -08001125/**
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001126 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1127 * @mem: the memory cgroup
Johannes Weiner19942822011-02-01 15:52:43 -08001128 *
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001129 * Returns the maximum amount of memory @mem can be charged with, in
1130 * bytes.
Johannes Weiner19942822011-02-01 15:52:43 -08001131 */
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001132static unsigned long long mem_cgroup_margin(struct mem_cgroup *mem)
Johannes Weiner19942822011-02-01 15:52:43 -08001133{
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001134 unsigned long long margin;
1135
1136 margin = res_counter_margin(&mem->res);
1137 if (do_swap_account)
1138 margin = min(margin, res_counter_margin(&mem->memsw));
1139 return margin;
Johannes Weiner19942822011-02-01 15:52:43 -08001140}
1141
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08001142static unsigned int get_swappiness(struct mem_cgroup *memcg)
1143{
1144 struct cgroup *cgrp = memcg->css.cgroup;
1145 unsigned int swappiness;
1146
1147 /* root ? */
1148 if (cgrp->parent == NULL)
1149 return vm_swappiness;
1150
1151 spin_lock(&memcg->reclaim_param_lock);
1152 swappiness = memcg->swappiness;
1153 spin_unlock(&memcg->reclaim_param_lock);
1154
1155 return swappiness;
1156}
1157
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001158static void mem_cgroup_start_move(struct mem_cgroup *mem)
1159{
1160 int cpu;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001161
1162 get_online_cpus();
1163 spin_lock(&mem->pcp_counter_lock);
1164 for_each_online_cpu(cpu)
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001165 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001166 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1167 spin_unlock(&mem->pcp_counter_lock);
1168 put_online_cpus();
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001169
1170 synchronize_rcu();
1171}
1172
1173static void mem_cgroup_end_move(struct mem_cgroup *mem)
1174{
1175 int cpu;
1176
1177 if (!mem)
1178 return;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001179 get_online_cpus();
1180 spin_lock(&mem->pcp_counter_lock);
1181 for_each_online_cpu(cpu)
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001182 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001183 mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1184 spin_unlock(&mem->pcp_counter_lock);
1185 put_online_cpus();
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001186}
1187/*
1188 * 2 routines for checking "mem" is under move_account() or not.
1189 *
1190 * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1191 * for avoiding race in accounting. If true,
1192 * pc->mem_cgroup may be overwritten.
1193 *
1194 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1195 * under hierarchy of moving cgroups. This is for
1196 * waiting at hith-memory prressure caused by "move".
1197 */
1198
1199static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1200{
1201 VM_BUG_ON(!rcu_read_lock_held());
1202 return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1203}
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001204
1205static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1206{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001207 struct mem_cgroup *from;
1208 struct mem_cgroup *to;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001209 bool ret = false;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001210 /*
1211 * Unlike task_move routines, we access mc.to, mc.from not under
1212 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1213 */
1214 spin_lock(&mc.lock);
1215 from = mc.from;
1216 to = mc.to;
1217 if (!from)
1218 goto unlock;
1219 if (from == mem || to == mem
1220 || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1221 || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
1222 ret = true;
1223unlock:
1224 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001225 return ret;
1226}
1227
1228static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1229{
1230 if (mc.moving_task && current != mc.moving_task) {
1231 if (mem_cgroup_under_move(mem)) {
1232 DEFINE_WAIT(wait);
1233 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1234 /* moving charge context might have finished. */
1235 if (mc.moving_task)
1236 schedule();
1237 finish_wait(&mc.waitq, &wait);
1238 return true;
1239 }
1240 }
1241 return false;
1242}
1243
Balbir Singhe2224322009-04-02 16:57:39 -07001244/**
Kirill A. Shutemov6a6135b2010-03-10 15:22:25 -08001245 * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
Balbir Singhe2224322009-04-02 16:57:39 -07001246 * @memcg: The memory cgroup that went over limit
1247 * @p: Task that is going to be killed
1248 *
1249 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1250 * enabled
1251 */
1252void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1253{
1254 struct cgroup *task_cgrp;
1255 struct cgroup *mem_cgrp;
1256 /*
1257 * Need a buffer in BSS, can't rely on allocations. The code relies
1258 * on the assumption that OOM is serialized for memory controller.
1259 * If this assumption is broken, revisit this code.
1260 */
1261 static char memcg_name[PATH_MAX];
1262 int ret;
1263
Daisuke Nishimurad31f56d2009-12-15 16:47:12 -08001264 if (!memcg || !p)
Balbir Singhe2224322009-04-02 16:57:39 -07001265 return;
1266
1267
1268 rcu_read_lock();
1269
1270 mem_cgrp = memcg->css.cgroup;
1271 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1272
1273 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1274 if (ret < 0) {
1275 /*
1276 * Unfortunately, we are unable to convert to a useful name
1277 * But we'll still print out the usage information
1278 */
1279 rcu_read_unlock();
1280 goto done;
1281 }
1282 rcu_read_unlock();
1283
1284 printk(KERN_INFO "Task in %s killed", memcg_name);
1285
1286 rcu_read_lock();
1287 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1288 if (ret < 0) {
1289 rcu_read_unlock();
1290 goto done;
1291 }
1292 rcu_read_unlock();
1293
1294 /*
1295 * Continues from above, so we don't need an KERN_ level
1296 */
1297 printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1298done:
1299
1300 printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1301 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1302 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1303 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1304 printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1305 "failcnt %llu\n",
1306 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1307 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1308 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1309}
1310
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001311/*
1312 * This function returns the number of memcg under hierarchy tree. Returns
1313 * 1(self count) if no children.
1314 */
1315static int mem_cgroup_count_children(struct mem_cgroup *mem)
1316{
1317 int num = 0;
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001318 struct mem_cgroup *iter;
1319
1320 for_each_mem_cgroup_tree(iter, mem)
1321 num++;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001322 return num;
1323}
1324
Balbir Singh6d61ef42009-01-07 18:08:06 -08001325/*
David Rientjesa63d83f2010-08-09 17:19:46 -07001326 * Return the memory (and swap, if configured) limit for a memcg.
1327 */
1328u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1329{
1330 u64 limit;
1331 u64 memsw;
1332
Johannes Weinerf3e8eb72011-01-13 15:47:39 -08001333 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1334 limit += total_swap_pages << PAGE_SHIFT;
1335
David Rientjesa63d83f2010-08-09 17:19:46 -07001336 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1337 /*
1338 * If memsw is finite and limits the amount of swap space available
1339 * to this memcg, return that limit.
1340 */
1341 return min(limit, memsw);
1342}
1343
1344/*
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001345 * Visit the first child (need not be the first child as per the ordering
1346 * of the cgroup list, since we track last_scanned_child) of @mem and use
1347 * that to reclaim free pages from.
1348 */
1349static struct mem_cgroup *
1350mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1351{
1352 struct mem_cgroup *ret = NULL;
1353 struct cgroup_subsys_state *css;
1354 int nextid, found;
1355
1356 if (!root_mem->use_hierarchy) {
1357 css_get(&root_mem->css);
1358 ret = root_mem;
1359 }
1360
1361 while (!ret) {
1362 rcu_read_lock();
1363 nextid = root_mem->last_scanned_child + 1;
1364 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1365 &found);
1366 if (css && css_tryget(css))
1367 ret = container_of(css, struct mem_cgroup, css);
1368
1369 rcu_read_unlock();
1370 /* Updates scanning parameter */
1371 spin_lock(&root_mem->reclaim_param_lock);
1372 if (!css) {
1373 /* this means start scan from ID:1 */
1374 root_mem->last_scanned_child = 0;
1375 } else
1376 root_mem->last_scanned_child = found;
1377 spin_unlock(&root_mem->reclaim_param_lock);
1378 }
1379
1380 return ret;
1381}
1382
1383/*
1384 * Scan the hierarchy if needed to reclaim memory. We remember the last child
1385 * we reclaimed from, so that we don't end up penalizing one child extensively
1386 * based on its position in the children list.
Balbir Singh6d61ef42009-01-07 18:08:06 -08001387 *
1388 * root_mem is the original ancestor that we've been reclaim from.
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001389 *
1390 * We give up and return to the caller when we visit root_mem twice.
1391 * (other groups can be removed while we're walking....)
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001392 *
1393 * If shrink==true, for avoiding to free too much, this returns immedieately.
Balbir Singh6d61ef42009-01-07 18:08:06 -08001394 */
1395static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
Balbir Singh4e416952009-09-23 15:56:39 -07001396 struct zone *zone,
Balbir Singh75822b42009-09-23 15:56:38 -07001397 gfp_t gfp_mask,
1398 unsigned long reclaim_options)
Balbir Singh6d61ef42009-01-07 18:08:06 -08001399{
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001400 struct mem_cgroup *victim;
1401 int ret, total = 0;
1402 int loop = 0;
Balbir Singh75822b42009-09-23 15:56:38 -07001403 bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1404 bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
Balbir Singh4e416952009-09-23 15:56:39 -07001405 bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001406 unsigned long excess;
1407
1408 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001409
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -07001410 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1411 if (root_mem->memsw_is_minimum)
1412 noswap = true;
1413
Balbir Singh4e416952009-09-23 15:56:39 -07001414 while (1) {
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001415 victim = mem_cgroup_select_victim(root_mem);
Balbir Singh4e416952009-09-23 15:56:39 -07001416 if (victim == root_mem) {
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001417 loop++;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001418 if (loop >= 1)
1419 drain_all_stock_async();
Balbir Singh4e416952009-09-23 15:56:39 -07001420 if (loop >= 2) {
1421 /*
1422 * If we have not been able to reclaim
1423 * anything, it might because there are
1424 * no reclaimable pages under this hierarchy
1425 */
1426 if (!check_soft || !total) {
1427 css_put(&victim->css);
1428 break;
1429 }
1430 /*
1431 * We want to do more targetted reclaim.
1432 * excess >> 2 is not to excessive so as to
1433 * reclaim too much, nor too less that we keep
1434 * coming back to reclaim from this cgroup
1435 */
1436 if (total >= (excess >> 2) ||
1437 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1438 css_put(&victim->css);
1439 break;
1440 }
1441 }
1442 }
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08001443 if (!mem_cgroup_local_usage(victim)) {
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001444 /* this cgroup's local usage == 0 */
1445 css_put(&victim->css);
Balbir Singh6d61ef42009-01-07 18:08:06 -08001446 continue;
1447 }
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001448 /* we use swappiness of local cgroup */
Balbir Singh4e416952009-09-23 15:56:39 -07001449 if (check_soft)
1450 ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
KOSAKI Motohiro14fec792010-08-10 18:03:05 -07001451 noswap, get_swappiness(victim), zone);
Balbir Singh4e416952009-09-23 15:56:39 -07001452 else
1453 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1454 noswap, get_swappiness(victim));
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001455 css_put(&victim->css);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07001456 /*
1457 * At shrinking usage, we can't check we should stop here or
1458 * reclaim more. It's depends on callers. last_scanned_child
1459 * will work enough for keeping fairness under tree.
1460 */
1461 if (shrink)
1462 return ret;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001463 total += ret;
Balbir Singh4e416952009-09-23 15:56:39 -07001464 if (check_soft) {
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001465 if (!res_counter_soft_limit_excess(&root_mem->res))
Balbir Singh4e416952009-09-23 15:56:39 -07001466 return total;
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001467 } else if (mem_cgroup_margin(root_mem))
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001468 return 1 + total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001469 }
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07001470 return total;
Balbir Singh6d61ef42009-01-07 18:08:06 -08001471}
1472
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001473/*
1474 * Check OOM-Killer is already running under our hierarchy.
1475 * If someone is running, return false.
1476 */
1477static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1478{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001479 int x, lock_count = 0;
1480 struct mem_cgroup *iter;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001481
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001482 for_each_mem_cgroup_tree(iter, mem) {
1483 x = atomic_inc_return(&iter->oom_lock);
1484 lock_count = max(x, lock_count);
1485 }
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001486
1487 if (lock_count == 1)
1488 return true;
1489 return false;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001490}
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001491
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001492static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001493{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001494 struct mem_cgroup *iter;
1495
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001496 /*
1497 * When a new child is created while the hierarchy is under oom,
1498 * mem_cgroup_oom_lock() may not be called. We have to use
1499 * atomic_add_unless() here.
1500 */
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07001501 for_each_mem_cgroup_tree(iter, mem)
1502 atomic_add_unless(&iter->oom_lock, -1, 0);
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001503 return 0;
1504}
1505
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001506
1507static DEFINE_MUTEX(memcg_oom_mutex);
1508static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1509
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001510struct oom_wait_info {
1511 struct mem_cgroup *mem;
1512 wait_queue_t wait;
1513};
1514
1515static int memcg_oom_wake_function(wait_queue_t *wait,
1516 unsigned mode, int sync, void *arg)
1517{
1518 struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1519 struct oom_wait_info *oom_wait_info;
1520
1521 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1522
1523 if (oom_wait_info->mem == wake_mem)
1524 goto wakeup;
1525 /* if no hierarchy, no match */
1526 if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1527 return 0;
1528 /*
1529 * Both of oom_wait_info->mem and wake_mem are stable under us.
1530 * Then we can use css_is_ancestor without taking care of RCU.
1531 */
1532 if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1533 !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1534 return 0;
1535
1536wakeup:
1537 return autoremove_wake_function(wait, mode, sync, arg);
1538}
1539
1540static void memcg_wakeup_oom(struct mem_cgroup *mem)
1541{
1542 /* for filtering, pass "mem" as argument. */
1543 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1544}
1545
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001546static void memcg_oom_recover(struct mem_cgroup *mem)
1547{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07001548 if (mem && atomic_read(&mem->oom_lock))
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001549 memcg_wakeup_oom(mem);
1550}
1551
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001552/*
1553 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1554 */
1555bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1556{
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001557 struct oom_wait_info owait;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001558 bool locked, need_to_kill;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001559
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001560 owait.mem = mem;
1561 owait.wait.flags = 0;
1562 owait.wait.func = memcg_oom_wake_function;
1563 owait.wait.private = current;
1564 INIT_LIST_HEAD(&owait.wait.task_list);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001565 need_to_kill = true;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001566 /* At first, try to OOM lock hierarchy under mem.*/
1567 mutex_lock(&memcg_oom_mutex);
1568 locked = mem_cgroup_oom_lock(mem);
1569 /*
1570 * Even if signal_pending(), we can't quit charge() loop without
1571 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1572 * under OOM is always welcomed, use TASK_KILLABLE here.
1573 */
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001574 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1575 if (!locked || mem->oom_kill_disable)
1576 need_to_kill = false;
1577 if (locked)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07001578 mem_cgroup_oom_notify(mem);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001579 mutex_unlock(&memcg_oom_mutex);
1580
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001581 if (need_to_kill) {
1582 finish_wait(&memcg_oom_waitq, &owait.wait);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001583 mem_cgroup_out_of_memory(mem, mask);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07001584 } else {
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001585 schedule();
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001586 finish_wait(&memcg_oom_waitq, &owait.wait);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001587 }
1588 mutex_lock(&memcg_oom_mutex);
1589 mem_cgroup_oom_unlock(mem);
KAMEZAWA Hiroyukidc98df52010-05-26 14:42:36 -07001590 memcg_wakeup_oom(mem);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001591 mutex_unlock(&memcg_oom_mutex);
1592
1593 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1594 return false;
1595 /* Give chance to dying process */
1596 schedule_timeout(1);
1597 return true;
KAMEZAWA Hiroyuki0b7f5692009-04-02 16:57:38 -07001598}
1599
Balbir Singhd69b0422009-06-17 16:26:34 -07001600/*
1601 * Currently used to update mapped file statistics, but the routine can be
1602 * generalized to update other statistics as well.
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001603 *
1604 * Notes: Race condition
1605 *
1606 * We usually use page_cgroup_lock() for accessing page_cgroup member but
1607 * it tends to be costly. But considering some conditions, we doesn't need
1608 * to do so _always_.
1609 *
1610 * Considering "charge", lock_page_cgroup() is not required because all
1611 * file-stat operations happen after a page is attached to radix-tree. There
1612 * are no race with "charge".
1613 *
1614 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1615 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1616 * if there are race with "uncharge". Statistics itself is properly handled
1617 * by flags.
1618 *
1619 * Considering "move", this is an only case we see a race. To make the race
1620 * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1621 * possibility of race condition. If there is, we take a lock.
Balbir Singhd69b0422009-06-17 16:26:34 -07001622 */
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07001623
Greg Thelen2a7106f2011-01-13 15:47:37 -08001624void mem_cgroup_update_page_stat(struct page *page,
1625 enum mem_cgroup_page_stat_item idx, int val)
Balbir Singhd69b0422009-06-17 16:26:34 -07001626{
1627 struct mem_cgroup *mem;
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001628 struct page_cgroup *pc = lookup_page_cgroup(page);
1629 bool need_unlock = false;
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08001630 unsigned long uninitialized_var(flags);
Balbir Singhd69b0422009-06-17 16:26:34 -07001631
Balbir Singhd69b0422009-06-17 16:26:34 -07001632 if (unlikely(!pc))
1633 return;
1634
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001635 rcu_read_lock();
Balbir Singhd69b0422009-06-17 16:26:34 -07001636 mem = pc->mem_cgroup;
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001637 if (unlikely(!mem || !PageCgroupUsed(pc)))
1638 goto out;
1639 /* pc->mem_cgroup is unstable ? */
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08001640 if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001641 /* take a lock against to access pc->mem_cgroup */
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08001642 move_lock_page_cgroup(pc, &flags);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001643 need_unlock = true;
1644 mem = pc->mem_cgroup;
1645 if (!mem || !PageCgroupUsed(pc))
1646 goto out;
1647 }
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07001648
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07001649 switch (idx) {
Greg Thelen2a7106f2011-01-13 15:47:37 -08001650 case MEMCG_NR_FILE_MAPPED:
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07001651 if (val > 0)
1652 SetPageCgroupFileMapped(pc);
1653 else if (!page_mapped(page))
KAMEZAWA Hiroyuki0c270f82010-10-27 15:33:39 -07001654 ClearPageCgroupFileMapped(pc);
Greg Thelen2a7106f2011-01-13 15:47:37 -08001655 idx = MEM_CGROUP_STAT_FILE_MAPPED;
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07001656 break;
1657 default:
1658 BUG();
KAMEZAWA Hiroyuki8725d542010-04-06 14:35:05 -07001659 }
Balbir Singhd69b0422009-06-17 16:26:34 -07001660
Greg Thelen2a7106f2011-01-13 15:47:37 -08001661 this_cpu_add(mem->stat->count[idx], val);
1662
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001663out:
1664 if (unlikely(need_unlock))
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08001665 move_unlock_page_cgroup(pc, &flags);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07001666 rcu_read_unlock();
1667 return;
Balbir Singhd69b0422009-06-17 16:26:34 -07001668}
Greg Thelen2a7106f2011-01-13 15:47:37 -08001669EXPORT_SYMBOL(mem_cgroup_update_page_stat);
KAMEZAWA Hiroyuki26174ef2010-10-27 15:33:43 -07001670
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001671/*
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001672 * size of first charge trial. "32" comes from vmscan.c's magic value.
1673 * TODO: maybe necessary to use big numbers in big irons.
1674 */
1675#define CHARGE_SIZE (32 * PAGE_SIZE)
1676struct memcg_stock_pcp {
1677 struct mem_cgroup *cached; /* this never be root cgroup */
1678 int charge;
1679 struct work_struct work;
1680};
1681static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1682static atomic_t memcg_drain_count;
1683
1684/*
1685 * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1686 * from local stock and true is returned. If the stock is 0 or charges from a
1687 * cgroup which is not current target, returns false. This stock will be
1688 * refilled.
1689 */
1690static bool consume_stock(struct mem_cgroup *mem)
1691{
1692 struct memcg_stock_pcp *stock;
1693 bool ret = true;
1694
1695 stock = &get_cpu_var(memcg_stock);
1696 if (mem == stock->cached && stock->charge)
1697 stock->charge -= PAGE_SIZE;
1698 else /* need to call res_counter_charge */
1699 ret = false;
1700 put_cpu_var(memcg_stock);
1701 return ret;
1702}
1703
1704/*
1705 * Returns stocks cached in percpu to res_counter and reset cached information.
1706 */
1707static void drain_stock(struct memcg_stock_pcp *stock)
1708{
1709 struct mem_cgroup *old = stock->cached;
1710
1711 if (stock->charge) {
1712 res_counter_uncharge(&old->res, stock->charge);
1713 if (do_swap_account)
1714 res_counter_uncharge(&old->memsw, stock->charge);
1715 }
1716 stock->cached = NULL;
1717 stock->charge = 0;
1718}
1719
1720/*
1721 * This must be called under preempt disabled or must be called by
1722 * a thread which is pinned to local cpu.
1723 */
1724static void drain_local_stock(struct work_struct *dummy)
1725{
1726 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1727 drain_stock(stock);
1728}
1729
1730/*
1731 * Cache charges(val) which is from res_counter, to local per_cpu area.
Greg Thelen320cc512010-03-15 15:27:28 +01001732 * This will be consumed by consume_stock() function, later.
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001733 */
1734static void refill_stock(struct mem_cgroup *mem, int val)
1735{
1736 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1737
1738 if (stock->cached != mem) { /* reset if necessary */
1739 drain_stock(stock);
1740 stock->cached = mem;
1741 }
1742 stock->charge += val;
1743 put_cpu_var(memcg_stock);
1744}
1745
1746/*
1747 * Tries to drain stocked charges in other cpus. This function is asynchronous
1748 * and just put a work per cpu for draining localy on each cpu. Caller can
1749 * expects some charges will be back to res_counter later but cannot wait for
1750 * it.
1751 */
1752static void drain_all_stock_async(void)
1753{
1754 int cpu;
1755 /* This function is for scheduling "drain" in asynchronous way.
1756 * The result of "drain" is not directly handled by callers. Then,
1757 * if someone is calling drain, we don't have to call drain more.
1758 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1759 * there is a race. We just do loose check here.
1760 */
1761 if (atomic_read(&memcg_drain_count))
1762 return;
1763 /* Notify other cpus that system-wide "drain" is running */
1764 atomic_inc(&memcg_drain_count);
1765 get_online_cpus();
1766 for_each_online_cpu(cpu) {
1767 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1768 schedule_work_on(cpu, &stock->work);
1769 }
1770 put_online_cpus();
1771 atomic_dec(&memcg_drain_count);
1772 /* We don't wait for flush_work */
1773}
1774
1775/* This is a synchronous drain interface. */
1776static void drain_all_stock_sync(void)
1777{
1778 /* called when force_empty is called */
1779 atomic_inc(&memcg_drain_count);
1780 schedule_on_each_cpu(drain_local_stock);
1781 atomic_dec(&memcg_drain_count);
1782}
1783
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001784/*
1785 * This function drains percpu counter value from DEAD cpu and
1786 * move it to local cpu. Note that this function can be preempted.
1787 */
1788static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
1789{
1790 int i;
1791
1792 spin_lock(&mem->pcp_counter_lock);
1793 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
1794 s64 x = per_cpu(mem->stat->count[i], cpu);
1795
1796 per_cpu(mem->stat->count[i], cpu) = 0;
1797 mem->nocpu_base.count[i] += x;
1798 }
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001799 /* need to clear ON_MOVE value, works as a kind of lock. */
1800 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
1801 spin_unlock(&mem->pcp_counter_lock);
1802}
1803
1804static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
1805{
1806 int idx = MEM_CGROUP_ON_MOVE;
1807
1808 spin_lock(&mem->pcp_counter_lock);
1809 per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001810 spin_unlock(&mem->pcp_counter_lock);
1811}
1812
1813static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001814 unsigned long action,
1815 void *hcpu)
1816{
1817 int cpu = (unsigned long)hcpu;
1818 struct memcg_stock_pcp *stock;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001819 struct mem_cgroup *iter;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001820
KAMEZAWA Hiroyuki1489eba2010-10-27 15:33:42 -07001821 if ((action == CPU_ONLINE)) {
1822 for_each_mem_cgroup_all(iter)
1823 synchronize_mem_cgroup_on_move(iter, cpu);
1824 return NOTIFY_OK;
1825 }
1826
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001827 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001828 return NOTIFY_OK;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07001829
1830 for_each_mem_cgroup_all(iter)
1831 mem_cgroup_drain_pcp_counter(iter, cpu);
1832
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001833 stock = &per_cpu(memcg_stock, cpu);
1834 drain_stock(stock);
1835 return NOTIFY_OK;
1836}
1837
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001838
1839/* See __mem_cgroup_try_charge() for details */
1840enum {
1841 CHARGE_OK, /* success */
1842 CHARGE_RETRY, /* need to retry but retry is not bad */
1843 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
1844 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
1845 CHARGE_OOM_DIE, /* the current is killed because of OOM */
1846};
1847
1848static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1849 int csize, bool oom_check)
1850{
1851 struct mem_cgroup *mem_over_limit;
1852 struct res_counter *fail_res;
1853 unsigned long flags = 0;
1854 int ret;
1855
1856 ret = res_counter_charge(&mem->res, csize, &fail_res);
1857
1858 if (likely(!ret)) {
1859 if (!do_swap_account)
1860 return CHARGE_OK;
1861 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1862 if (likely(!ret))
1863 return CHARGE_OK;
1864
KAMEZAWA Hiroyuki01c88e22011-01-25 15:07:27 -08001865 res_counter_uncharge(&mem->res, csize);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001866 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
1867 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1868 } else
1869 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
Johannes Weiner9221edb2011-02-01 15:52:42 -08001870 /*
1871 * csize can be either a huge page (HPAGE_SIZE), a batch of
1872 * regular pages (CHARGE_SIZE), or a single regular page
1873 * (PAGE_SIZE).
1874 *
1875 * Never reclaim on behalf of optional batching, retry with a
1876 * single page instead.
1877 */
1878 if (csize == CHARGE_SIZE)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001879 return CHARGE_RETRY;
1880
1881 if (!(gfp_mask & __GFP_WAIT))
1882 return CHARGE_WOULDBLOCK;
1883
1884 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
Johannes Weiner19942822011-02-01 15:52:43 -08001885 gfp_mask, flags);
Johannes Weiner9d11ea92011-03-23 16:42:21 -07001886 if (mem_cgroup_margin(mem_over_limit) >= csize)
Johannes Weiner19942822011-02-01 15:52:43 -08001887 return CHARGE_RETRY;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001888 /*
Johannes Weiner19942822011-02-01 15:52:43 -08001889 * Even though the limit is exceeded at this point, reclaim
1890 * may have been able to free some pages. Retry the charge
1891 * before killing the task.
1892 *
1893 * Only for regular pages, though: huge pages are rather
1894 * unlikely to succeed so close to the limit, and we fall back
1895 * to regular pages anyway in case of failure.
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001896 */
Johannes Weiner19942822011-02-01 15:52:43 -08001897 if (csize == PAGE_SIZE && ret)
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001898 return CHARGE_RETRY;
1899
1900 /*
1901 * At task move, charge accounts can be doubly counted. So, it's
1902 * better to wait until the end of task_move if something is going on.
1903 */
1904 if (mem_cgroup_wait_acct_move(mem_over_limit))
1905 return CHARGE_RETRY;
1906
1907 /* If we don't need to call oom-killer at el, return immediately */
1908 if (!oom_check)
1909 return CHARGE_NOMEM;
1910 /* check OOM */
1911 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
1912 return CHARGE_OOM_DIE;
1913
1914 return CHARGE_RETRY;
1915}
1916
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08001917/*
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001918 * Unlike exported interface, "oom" parameter is added. if oom==true,
1919 * oom-killer can be invoked.
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001920 */
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08001921static int __mem_cgroup_try_charge(struct mm_struct *mm,
Andrea Arcangeliec168512011-01-13 15:46:56 -08001922 gfp_t gfp_mask,
1923 struct mem_cgroup **memcg, bool oom,
1924 int page_size)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001925{
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001926 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1927 struct mem_cgroup *mem = NULL;
1928 int ret;
Andrea Arcangeliec168512011-01-13 15:46:56 -08001929 int csize = max(CHARGE_SIZE, (unsigned long) page_size);
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001930
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08001931 /*
1932 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1933 * in system level. So, allow to go ahead dying process in addition to
1934 * MEMDIE process.
1935 */
1936 if (unlikely(test_thread_flag(TIF_MEMDIE)
1937 || fatal_signal_pending(current)))
1938 goto bypass;
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08001939
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001940 /*
Hugh Dickins3be912772008-02-07 00:14:19 -08001941 * We always charge the cgroup the mm_struct belongs to.
1942 * The mm_struct's mem_cgroup changes on task migration if the
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001943 * thread group leader migrates. It's possible that mm is not
1944 * set, if so charge the init_mm (happens for pagecache usage).
1945 */
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001946 if (!*memcg && !mm)
1947 goto bypass;
1948again:
1949 if (*memcg) { /* css should be a valid one */
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001950 mem = *memcg;
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001951 VM_BUG_ON(css_is_removed(&mem->css));
1952 if (mem_cgroup_is_root(mem))
1953 goto done;
Andrea Arcangeliec168512011-01-13 15:46:56 -08001954 if (page_size == PAGE_SIZE && consume_stock(mem))
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001955 goto done;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08001956 css_get(&mem->css);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001957 } else {
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001958 struct task_struct *p;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08001959
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001960 rcu_read_lock();
1961 p = rcu_dereference(mm->owner);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001962 /*
KAMEZAWA Hiroyukiebb76ce2010-12-29 14:07:11 -08001963 * Because we don't have task_lock(), "p" can exit.
1964 * In that case, "mem" can point to root or p can be NULL with
1965 * race with swapoff. Then, we have small risk of mis-accouning.
1966 * But such kind of mis-account by race always happens because
1967 * we don't have cgroup_mutex(). It's overkill and we allo that
1968 * small race, here.
1969 * (*) swapoff at el will charge against mm-struct not against
1970 * task-struct. So, mm->owner can be NULL.
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001971 */
1972 mem = mem_cgroup_from_task(p);
KAMEZAWA Hiroyukiebb76ce2010-12-29 14:07:11 -08001973 if (!mem || mem_cgroup_is_root(mem)) {
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001974 rcu_read_unlock();
1975 goto done;
1976 }
Andrea Arcangeliec168512011-01-13 15:46:56 -08001977 if (page_size == PAGE_SIZE && consume_stock(mem)) {
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07001978 /*
1979 * It seems dagerous to access memcg without css_get().
1980 * But considering how consume_stok works, it's not
1981 * necessary. If consume_stock success, some charges
1982 * from this memcg are cached on this cpu. So, we
1983 * don't need to call css_get()/css_tryget() before
1984 * calling consume_stock().
1985 */
1986 rcu_read_unlock();
1987 goto done;
1988 }
1989 /* after here, we may be blocked. we need to get refcnt */
1990 if (!css_tryget(&mem->css)) {
1991 rcu_read_unlock();
1992 goto again;
1993 }
1994 rcu_read_unlock();
1995 }
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08001996
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07001997 do {
1998 bool oom_check;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08001999
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002000 /* If killed, bypass charge */
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002001 if (fatal_signal_pending(current)) {
2002 css_put(&mem->css);
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002003 goto bypass;
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002004 }
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08002005
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002006 oom_check = false;
2007 if (oom && !nr_oom_retries) {
2008 oom_check = true;
2009 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2010 }
Balbir Singh6d61ef42009-01-07 18:08:06 -08002011
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002012 ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
2013
2014 switch (ret) {
2015 case CHARGE_OK:
2016 break;
2017 case CHARGE_RETRY: /* not in OOM situation but retry */
Andrea Arcangeliec168512011-01-13 15:46:56 -08002018 csize = page_size;
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002019 css_put(&mem->css);
2020 mem = NULL;
2021 goto again;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002022 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002023 css_put(&mem->css);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002024 goto nomem;
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002025 case CHARGE_NOMEM: /* OOM routine works */
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002026 if (!oom) {
2027 css_put(&mem->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002028 goto nomem;
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002029 }
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002030 /* If oom, we never return -ENOMEM */
2031 nr_oom_retries--;
2032 break;
2033 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002034 css_put(&mem->css);
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002035 goto bypass;
Balbir Singh66e17072008-02-07 00:13:56 -08002036 }
KAMEZAWA Hiroyuki4b534332010-08-10 18:02:57 -07002037 } while (ret != CHARGE_OK);
2038
Andrea Arcangeliec168512011-01-13 15:46:56 -08002039 if (csize > page_size)
2040 refill_stock(mem, csize - page_size);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002041 css_put(&mem->css);
Balbir Singh0c3e73e2009-09-23 15:56:42 -07002042done:
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002043 *memcg = mem;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002044 return 0;
2045nomem:
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002046 *memcg = NULL;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002047 return -ENOMEM;
KAMEZAWA Hiroyuki867578c2010-03-10 15:22:39 -08002048bypass:
2049 *memcg = NULL;
2050 return 0;
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002051}
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002052
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002053/*
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002054 * Somemtimes we have to undo a charge we got by try_charge().
2055 * This function is for that and do uncharge, put css's refcnt.
2056 * gotten by try_charge().
2057 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002058static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
2059 unsigned long count)
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002060{
2061 if (!mem_cgroup_is_root(mem)) {
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002062 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002063 if (do_swap_account)
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002064 res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002065 }
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002066}
2067
Andrea Arcangeliec168512011-01-13 15:46:56 -08002068static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
2069 int page_size)
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002070{
Andrea Arcangeliec168512011-01-13 15:46:56 -08002071 __mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
Daisuke Nishimuraa3032a22009-12-15 16:47:10 -08002072}
2073
2074/*
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002075 * A helper function to get mem_cgroup from ID. must be called under
2076 * rcu_read_lock(). The caller must check css_is_removed() or some if
2077 * it's concern. (dropping refcnt from swap can be called against removed
2078 * memcg.)
2079 */
2080static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2081{
2082 struct cgroup_subsys_state *css;
2083
2084 /* ID 0 is unused ID */
2085 if (!id)
2086 return NULL;
2087 css = css_lookup(&mem_cgroup_subsys, id);
2088 if (!css)
2089 return NULL;
2090 return container_of(css, struct mem_cgroup, css);
2091}
2092
Wu Fengguange42d9d52009-12-16 12:19:59 +01002093struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002094{
Wu Fengguange42d9d52009-12-16 12:19:59 +01002095 struct mem_cgroup *mem = NULL;
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002096 struct page_cgroup *pc;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002097 unsigned short id;
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002098 swp_entry_t ent;
2099
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002100 VM_BUG_ON(!PageLocked(page));
2101
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002102 pc = lookup_page_cgroup(page);
Daisuke Nishimurac0bd3f632009-04-30 15:08:11 -07002103 lock_page_cgroup(pc);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002104 if (PageCgroupUsed(pc)) {
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002105 mem = pc->mem_cgroup;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002106 if (mem && !css_tryget(&mem->css))
2107 mem = NULL;
Wu Fengguange42d9d52009-12-16 12:19:59 +01002108 } else if (PageSwapCache(page)) {
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002109 ent.val = page_private(page);
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002110 id = lookup_swap_cgroup(ent);
2111 rcu_read_lock();
2112 mem = mem_cgroup_lookup(id);
2113 if (mem && !css_tryget(&mem->css))
2114 mem = NULL;
2115 rcu_read_unlock();
Daisuke Nishimura3c776e62009-04-02 16:57:43 -07002116 }
Daisuke Nishimurac0bd3f632009-04-30 15:08:11 -07002117 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002118 return mem;
2119}
2120
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002121static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
2122 struct page_cgroup *pc,
2123 enum charge_type ctype,
2124 int page_size)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002125{
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002126 int nr_pages = page_size >> PAGE_SHIFT;
2127
2128 /* try_charge() can return NULL to *memcg, taking care of it. */
2129 if (!mem)
2130 return;
2131
2132 lock_page_cgroup(pc);
2133 if (unlikely(PageCgroupUsed(pc))) {
2134 unlock_page_cgroup(pc);
2135 mem_cgroup_cancel_charge(mem, page_size);
2136 return;
2137 }
2138 /*
2139 * we don't need page_cgroup_lock about tail pages, becase they are not
2140 * accessed by any other context at this point.
2141 */
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002142 pc->mem_cgroup = mem;
KAMEZAWA Hiroyuki261fb612009-09-23 15:56:33 -07002143 /*
2144 * We access a page_cgroup asynchronously without lock_page_cgroup().
2145 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2146 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2147 * before USED bit, we need memory barrier here.
2148 * See mem_cgroup_add_lru_list(), etc.
2149 */
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002150 smp_wmb();
Balbir Singh4b3bde42009-09-23 15:56:32 -07002151 switch (ctype) {
2152 case MEM_CGROUP_CHARGE_TYPE_CACHE:
2153 case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2154 SetPageCgroupCache(pc);
2155 SetPageCgroupUsed(pc);
2156 break;
2157 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2158 ClearPageCgroupCache(pc);
2159 SetPageCgroupUsed(pc);
2160 break;
2161 default:
2162 break;
2163 }
Hugh Dickins3be912772008-02-07 00:14:19 -08002164
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002165 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002166 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki430e48632010-03-10 15:22:30 -08002167 /*
2168 * "charge_statistics" updated event counter. Then, check it.
2169 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2170 * if they exceeds softlimit.
2171 */
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08002172 memcg_check_events(mem, pc->page);
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002173}
2174
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002175#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2176
2177#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2178 (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2179/*
2180 * Because tail pages are not marked as "used", set it. We're under
2181 * zone->lru_lock, 'splitting on pmd' and compund_lock.
2182 */
2183void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2184{
2185 struct page_cgroup *head_pc = lookup_page_cgroup(head);
2186 struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2187 unsigned long flags;
2188
KAMEZAWA Hiroyuki3d37c4a2011-01-25 15:07:28 -08002189 if (mem_cgroup_disabled())
2190 return;
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002191 /*
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -08002192 * We have no races with charge/uncharge but will have races with
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002193 * page state accounting.
2194 */
2195 move_lock_page_cgroup(head_pc, &flags);
2196
2197 tail_pc->mem_cgroup = head_pc->mem_cgroup;
2198 smp_wmb(); /* see __commit_charge() */
KAMEZAWA Hiroyukiece35ca2011-01-20 14:44:24 -08002199 if (PageCgroupAcctLRU(head_pc)) {
2200 enum lru_list lru;
2201 struct mem_cgroup_per_zone *mz;
2202
2203 /*
2204 * LRU flags cannot be copied because we need to add tail
2205 *.page to LRU by generic call and our hook will be called.
2206 * We hold lru_lock, then, reduce counter directly.
2207 */
2208 lru = page_lru(head);
2209 mz = page_cgroup_zoneinfo(head_pc);
2210 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2211 }
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002212 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2213 move_unlock_page_cgroup(head_pc, &flags);
2214}
2215#endif
2216
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002217/**
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002218 * __mem_cgroup_move_account - move account of the page
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002219 * @pc: page_cgroup of the page.
2220 * @from: mem_cgroup which the page is moved from.
2221 * @to: mem_cgroup which the page is moved to. @from != @to.
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002222 * @uncharge: whether we should call uncharge and css_put against @from.
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002223 *
2224 * The caller must confirm following.
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002225 * - page is not on LRU (isolate_page() is useful.)
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002226 * - the pc is locked, used, and ->mem_cgroup points to @from.
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002227 *
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002228 * This function doesn't do "charge" nor css_get to new cgroup. It should be
2229 * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
2230 * true, this function does "uncharge" from old cgroup, but it doesn't if
2231 * @uncharge is false, so a caller should do "uncharge".
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002232 */
2233
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002234static void __mem_cgroup_move_account(struct page_cgroup *pc,
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002235 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge,
2236 int charge_size)
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002237{
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002238 int nr_pages = charge_size >> PAGE_SHIFT;
2239
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002240 VM_BUG_ON(from == to);
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002241 VM_BUG_ON(PageLRU(pc->page));
Kirill A. Shutemov112bc2e2010-11-24 12:56:58 -08002242 VM_BUG_ON(!page_is_cgroup_locked(pc));
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002243 VM_BUG_ON(!PageCgroupUsed(pc));
2244 VM_BUG_ON(pc->mem_cgroup != from);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002245
KAMEZAWA Hiroyuki8725d542010-04-06 14:35:05 -07002246 if (PageCgroupFileMapped(pc)) {
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08002247 /* Update mapped_file data for mem_cgroup */
2248 preempt_disable();
2249 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2250 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2251 preempt_enable();
Balbir Singhd69b0422009-06-17 16:26:34 -07002252 }
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002253 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002254 if (uncharge)
2255 /* This is not "cancel", but cancel_charge does all we need. */
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002256 mem_cgroup_cancel_charge(from, charge_size);
Balbir Singhd69b0422009-06-17 16:26:34 -07002257
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002258 /* caller should have done css_get */
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002259 pc->mem_cgroup = to;
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002260 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002261 /*
2262 * We charges against "to" which may not have any tasks. Then, "to"
2263 * can be under rmdir(). But in current implementation, caller of
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08002264 * this function is just force_empty() and move charge, so it's
2265 * garanteed that "to" is never removed. So, we don't check rmdir
2266 * status here.
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002267 */
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002268}
2269
2270/*
2271 * check whether the @pc is valid for moving account and call
2272 * __mem_cgroup_move_account()
2273 */
2274static int mem_cgroup_move_account(struct page_cgroup *pc,
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002275 struct mem_cgroup *from, struct mem_cgroup *to,
2276 bool uncharge, int charge_size)
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002277{
2278 int ret = -EINVAL;
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08002279 unsigned long flags;
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002280 /*
2281 * The page is isolated from LRU. So, collapse function
2282 * will not handle this page. But page splitting can happen.
2283 * Do this check under compound_page_lock(). The caller should
2284 * hold it.
2285 */
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002286 if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
2287 return -EBUSY;
2288
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002289 lock_page_cgroup(pc);
2290 if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08002291 move_lock_page_cgroup(pc, &flags);
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002292 __mem_cgroup_move_account(pc, from, to, uncharge, charge_size);
KAMEZAWA Hiroyukidbd4ea72011-01-13 15:47:38 -08002293 move_unlock_page_cgroup(pc, &flags);
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002294 ret = 0;
2295 }
2296 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08002297 /*
2298 * check events
2299 */
2300 memcg_check_events(to, pc->page);
2301 memcg_check_events(from, pc->page);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002302 return ret;
2303}
2304
2305/*
2306 * move charges to its parent.
2307 */
2308
2309static int mem_cgroup_move_parent(struct page_cgroup *pc,
2310 struct mem_cgroup *child,
2311 gfp_t gfp_mask)
2312{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002313 struct page *page = pc->page;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002314 struct cgroup *cg = child->css.cgroup;
2315 struct cgroup *pcg = cg->parent;
2316 struct mem_cgroup *parent;
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002317 int page_size = PAGE_SIZE;
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002318 unsigned long flags;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002319 int ret;
2320
2321 /* Is ROOT ? */
2322 if (!pcg)
2323 return -EINVAL;
2324
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002325 ret = -EBUSY;
2326 if (!get_page_unless_zero(page))
2327 goto out;
2328 if (isolate_lru_page(page))
2329 goto put;
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002330
2331 if (PageTransHuge(page))
2332 page_size = HPAGE_SIZE;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002333
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002334 parent = mem_cgroup_from_cont(pcg);
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002335 ret = __mem_cgroup_try_charge(NULL, gfp_mask,
2336 &parent, false, page_size);
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002337 if (ret || !parent)
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002338 goto put_back;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002339
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002340 if (page_size > PAGE_SIZE)
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002341 flags = compound_lock_irqsave(page);
2342
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002343 ret = mem_cgroup_move_account(pc, child, parent, true, page_size);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08002344 if (ret)
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002345 mem_cgroup_cancel_charge(parent, page_size);
Jesper Juhl8dba4742011-01-25 15:07:24 -08002346
KAMEZAWA Hiroyuki52dbb902011-01-25 15:07:29 -08002347 if (page_size > PAGE_SIZE)
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08002348 compound_unlock_irqrestore(page, flags);
Jesper Juhl8dba4742011-01-25 15:07:24 -08002349put_back:
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08002350 putback_lru_page(page);
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002351put:
Daisuke Nishimura40d58132009-01-15 13:51:12 -08002352 put_page(page);
Daisuke Nishimura57f9fd7d2009-12-15 16:47:11 -08002353out:
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08002354 return ret;
2355}
2356
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002357/*
2358 * Charge the memory controller for page usage.
2359 * Return
2360 * 0 if the charge was successful
2361 * < 0 if the cgroup is over its limit
2362 */
2363static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002364 gfp_t gfp_mask, enum charge_type ctype)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002365{
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002366 struct mem_cgroup *mem = NULL;
Andrea Arcangeliec168512011-01-13 15:46:56 -08002367 int page_size = PAGE_SIZE;
Johannes Weiner8493ae42011-02-01 15:52:44 -08002368 struct page_cgroup *pc;
2369 bool oom = true;
2370 int ret;
Andrea Arcangeliec168512011-01-13 15:46:56 -08002371
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08002372 if (PageTransHuge(page)) {
Andrea Arcangeliec168512011-01-13 15:46:56 -08002373 page_size <<= compound_order(page);
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08002374 VM_BUG_ON(!PageTransHuge(page));
Johannes Weiner8493ae42011-02-01 15:52:44 -08002375 /*
2376 * Never OOM-kill a process for a huge page. The
2377 * fault handler will fall back to regular pages.
2378 */
2379 oom = false;
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08002380 }
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002381
2382 pc = lookup_page_cgroup(page);
2383 /* can happen at boot */
2384 if (unlikely(!pc))
2385 return 0;
2386 prefetchw(pc);
2387
Johannes Weiner8493ae42011-02-01 15:52:44 -08002388 ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
KAMEZAWA Hiroyukia636b322009-01-07 18:08:08 -08002389 if (ret || !mem)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002390 return ret;
2391
Andrea Arcangeliec168512011-01-13 15:46:56 -08002392 __mem_cgroup_commit_charge(mem, pc, ctype, page_size);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002393 return 0;
2394}
2395
2396int mem_cgroup_newpage_charge(struct page *page,
2397 struct mm_struct *mm, gfp_t gfp_mask)
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -08002398{
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08002399 if (mem_cgroup_disabled())
Li Zefancede86a2008-07-25 01:47:18 -07002400 return 0;
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07002401 /*
2402 * If already mapped, we don't have to account.
2403 * If page cache, page->mapping has address_space.
2404 * But page->mapping may have out-of-use anon_vma pointer,
2405 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2406 * is NULL.
2407 */
2408 if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2409 return 0;
2410 if (unlikely(!mm))
2411 mm = &init_mm;
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -08002412 return mem_cgroup_charge_common(page, mm, gfp_mask,
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002413 MEM_CGROUP_CHARGE_TYPE_MAPPED);
KAMEZAWA Hiroyuki217bc312008-02-07 00:14:17 -08002414}
2415
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07002416static void
2417__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2418 enum charge_type ctype);
2419
Balbir Singhe1a1cd52008-02-07 00:14:02 -08002420int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2421 gfp_t gfp_mask)
Balbir Singh8697d332008-02-07 00:13:59 -08002422{
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002423 int ret;
2424
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08002425 if (mem_cgroup_disabled())
Li Zefancede86a2008-07-25 01:47:18 -07002426 return 0;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002427 if (PageCompound(page))
2428 return 0;
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -07002429 /*
2430 * Corner case handling. This is called from add_to_page_cache()
2431 * in usual. But some FS (shmem) precharges this page before calling it
2432 * and call add_to_page_cache() with GFP_NOWAIT.
2433 *
2434 * For GFP_NOWAIT case, the page may be pre-charged before calling
2435 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2436 * charge twice. (It works but has to pay a bit larger cost.)
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002437 * And when the page is SwapCache, it should take swap information
2438 * into account. This is under lock_page() now.
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -07002439 */
2440 if (!(gfp_mask & __GFP_WAIT)) {
2441 struct page_cgroup *pc;
2442
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002443 pc = lookup_page_cgroup(page);
2444 if (!pc)
2445 return 0;
2446 lock_page_cgroup(pc);
2447 if (PageCgroupUsed(pc)) {
2448 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -07002449 return 0;
2450 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002451 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -07002452 }
2453
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002454 if (unlikely(!mm))
Balbir Singh8697d332008-02-07 00:13:59 -08002455 mm = &init_mm;
KAMEZAWA Hiroyukiaccf1632008-07-25 01:47:17 -07002456
KAMEZAWA Hiroyukic05555b2008-10-18 20:28:11 -07002457 if (page_is_file_cache(page))
2458 return mem_cgroup_charge_common(page, mm, gfp_mask,
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002459 MEM_CGROUP_CHARGE_TYPE_CACHE);
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002460
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07002461 /* shmem */
2462 if (PageSwapCache(page)) {
KAMEZAWA Hiroyuki56039ef2011-03-23 16:42:19 -07002463 struct mem_cgroup *mem;
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002464
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07002465 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2466 if (!ret)
2467 __mem_cgroup_commit_charge_swapin(page, mem,
2468 MEM_CGROUP_CHARGE_TYPE_SHMEM);
2469 } else
2470 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
Daisuke Nishimura73045c42010-08-10 18:02:59 -07002471 MEM_CGROUP_CHARGE_TYPE_SHMEM);
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002472
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08002473 return ret;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07002474}
2475
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002476/*
2477 * While swap-in, try_charge -> commit or cancel, the page is locked.
2478 * And when try_charge() successfully returns, one refcnt to memcg without
Uwe Kleine-König21ae2952009-10-07 15:21:09 +02002479 * struct page_cgroup is acquired. This refcnt will be consumed by
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002480 * "commit()" or removed by "cancel()"
2481 */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002482int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2483 struct page *page,
2484 gfp_t mask, struct mem_cgroup **ptr)
2485{
2486 struct mem_cgroup *mem;
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002487 int ret;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002488
KAMEZAWA Hiroyuki56039ef2011-03-23 16:42:19 -07002489 *ptr = NULL;
2490
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08002491 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002492 return 0;
2493
2494 if (!do_swap_account)
2495 goto charge_cur_mm;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002496 /*
2497 * A racing thread's fault, or swapoff, may have already updated
Hugh Dickins407f9c82009-12-14 17:59:30 -08002498 * the pte, and even removed page from swap cache: in those cases
2499 * do_swap_page()'s pte_same() test will fail; but there's also a
2500 * KSM case which does need to charge the page.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002501 */
2502 if (!PageSwapCache(page))
Hugh Dickins407f9c82009-12-14 17:59:30 -08002503 goto charge_cur_mm;
Wu Fengguange42d9d52009-12-16 12:19:59 +01002504 mem = try_get_mem_cgroup_from_page(page);
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002505 if (!mem)
2506 goto charge_cur_mm;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002507 *ptr = mem;
Andrea Arcangeliec168512011-01-13 15:46:56 -08002508 ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, PAGE_SIZE);
KAMEZAWA Hiroyuki54595fe2009-01-07 18:08:33 -08002509 css_put(&mem->css);
2510 return ret;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002511charge_cur_mm:
2512 if (unlikely(!mm))
2513 mm = &init_mm;
Andrea Arcangeliec168512011-01-13 15:46:56 -08002514 return __mem_cgroup_try_charge(mm, mask, ptr, true, PAGE_SIZE);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002515}
2516
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07002517static void
2518__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2519 enum charge_type ctype)
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002520{
2521 struct page_cgroup *pc;
2522
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08002523 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002524 return;
2525 if (!ptr)
2526 return;
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002527 cgroup_exclude_rmdir(&ptr->css);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002528 pc = lookup_page_cgroup(page);
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08002529 mem_cgroup_lru_del_before_commit_swapcache(page);
Andrea Arcangeliec168512011-01-13 15:46:56 -08002530 __mem_cgroup_commit_charge(ptr, pc, ctype, PAGE_SIZE);
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08002531 mem_cgroup_lru_add_after_commit_swapcache(page);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002532 /*
2533 * Now swap is on-memory. This means this page may be
2534 * counted both as mem and swap....double count.
KAMEZAWA Hiroyuki03f3c432009-01-07 18:08:31 -08002535 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2536 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2537 * may call delete_from_swap_cache() before reach here.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002538 */
KAMEZAWA Hiroyuki03f3c432009-01-07 18:08:31 -08002539 if (do_swap_account && PageSwapCache(page)) {
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002540 swp_entry_t ent = {.val = page_private(page)};
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002541 unsigned short id;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002542 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002543
2544 id = swap_cgroup_record(ent, 0);
2545 rcu_read_lock();
2546 memcg = mem_cgroup_lookup(id);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002547 if (memcg) {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002548 /*
2549 * This recorded memcg can be obsolete one. So, avoid
2550 * calling css_tryget
2551 */
Balbir Singh0c3e73e2009-09-23 15:56:42 -07002552 if (!mem_cgroup_is_root(memcg))
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -07002553 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
Balbir Singh0c3e73e2009-09-23 15:56:42 -07002554 mem_cgroup_swap_statistics(memcg, false);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002555 mem_cgroup_put(memcg);
2556 }
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002557 rcu_read_unlock();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002558 }
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002559 /*
2560 * At swapin, we may charge account against cgroup which has no tasks.
2561 * So, rmdir()->pre_destroy() can be called while we do this charge.
2562 * In that case, we need to call pre_destroy() again. check it here.
2563 */
2564 cgroup_release_and_wakeup_rmdir(&ptr->css);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002565}
2566
Daisuke Nishimura83aae4c72009-04-02 16:57:48 -07002567void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2568{
2569 __mem_cgroup_commit_charge_swapin(page, ptr,
2570 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2571}
2572
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002573void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2574{
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08002575 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002576 return;
2577 if (!mem)
2578 return;
Andrea Arcangeliec168512011-01-13 15:46:56 -08002579 mem_cgroup_cancel_charge(mem, PAGE_SIZE);
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002580}
2581
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002582static void
Andrea Arcangeliec168512011-01-13 15:46:56 -08002583__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype,
2584 int page_size)
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002585{
2586 struct memcg_batch_info *batch = NULL;
2587 bool uncharge_memsw = true;
2588 /* If swapout, usage of swap doesn't decrease */
2589 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2590 uncharge_memsw = false;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002591
2592 batch = &current->memcg_batch;
2593 /*
2594 * In usual, we do css_get() when we remember memcg pointer.
2595 * But in this case, we keep res->usage until end of a series of
2596 * uncharges. Then, it's ok to ignore memcg's refcnt.
2597 */
2598 if (!batch->memcg)
2599 batch->memcg = mem;
2600 /*
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002601 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2602 * In those cases, all pages freed continously can be expected to be in
2603 * the same cgroup and we have chance to coalesce uncharges.
2604 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2605 * because we want to do uncharge as soon as possible.
2606 */
2607
2608 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2609 goto direct_uncharge;
2610
Andrea Arcangeliec168512011-01-13 15:46:56 -08002611 if (page_size != PAGE_SIZE)
2612 goto direct_uncharge;
2613
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002614 /*
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002615 * In typical case, batch->memcg == mem. This means we can
2616 * merge a series of uncharges to an uncharge of res_counter.
2617 * If not, we uncharge res_counter ony by one.
2618 */
2619 if (batch->memcg != mem)
2620 goto direct_uncharge;
2621 /* remember freed charge and uncharge it later */
2622 batch->bytes += PAGE_SIZE;
2623 if (uncharge_memsw)
2624 batch->memsw_bytes += PAGE_SIZE;
2625 return;
2626direct_uncharge:
Andrea Arcangeliec168512011-01-13 15:46:56 -08002627 res_counter_uncharge(&mem->res, page_size);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002628 if (uncharge_memsw)
Andrea Arcangeliec168512011-01-13 15:46:56 -08002629 res_counter_uncharge(&mem->memsw, page_size);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002630 if (unlikely(batch->memcg != mem))
2631 memcg_oom_recover(mem);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002632 return;
2633}
KAMEZAWA Hiroyuki7a81b882009-01-07 18:07:48 -08002634
Balbir Singh8697d332008-02-07 00:13:59 -08002635/*
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07002636 * uncharge if !page_mapped(page)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002637 */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002638static struct mem_cgroup *
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07002639__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002640{
Daisuke Nishimura152c9cc2011-01-13 15:46:56 -08002641 int count;
Hugh Dickins82895462008-03-04 14:29:08 -08002642 struct page_cgroup *pc;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002643 struct mem_cgroup *mem = NULL;
Andrea Arcangeliec168512011-01-13 15:46:56 -08002644 int page_size = PAGE_SIZE;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002645
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08002646 if (mem_cgroup_disabled())
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002647 return NULL;
Balbir Singh40779602008-04-04 14:29:59 -07002648
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002649 if (PageSwapCache(page))
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002650 return NULL;
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002651
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08002652 if (PageTransHuge(page)) {
Andrea Arcangeliec168512011-01-13 15:46:56 -08002653 page_size <<= compound_order(page);
Andrea Arcangeli37c2ac72011-01-13 15:47:16 -08002654 VM_BUG_ON(!PageTransHuge(page));
2655 }
Andrea Arcangeliec168512011-01-13 15:46:56 -08002656
Daisuke Nishimura152c9cc2011-01-13 15:46:56 -08002657 count = page_size >> PAGE_SHIFT;
Balbir Singh8697d332008-02-07 00:13:59 -08002658 /*
Balbir Singh3c541e12008-02-07 00:14:41 -08002659 * Check if our page_cgroup is valid
Balbir Singh8697d332008-02-07 00:13:59 -08002660 */
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002661 pc = lookup_page_cgroup(page);
2662 if (unlikely(!pc || !PageCgroupUsed(pc)))
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002663 return NULL;
Balbir Singh8a9f3cc2008-02-07 00:13:53 -08002664
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002665 lock_page_cgroup(pc);
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002666
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002667 mem = pc->mem_cgroup;
2668
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002669 if (!PageCgroupUsed(pc))
2670 goto unlock_out;
2671
2672 switch (ctype) {
2673 case MEM_CGROUP_CHARGE_TYPE_MAPPED:
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07002674 case MEM_CGROUP_CHARGE_TYPE_DROP:
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002675 /* See mem_cgroup_prepare_migration() */
2676 if (page_mapped(page) || PageCgroupMigration(pc))
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002677 goto unlock_out;
2678 break;
2679 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2680 if (!PageAnon(page)) { /* Shared memory */
2681 if (page->mapping && !page_is_file_cache(page))
2682 goto unlock_out;
2683 } else if (page_mapped(page)) /* Anon */
2684 goto unlock_out;
2685 break;
2686 default:
2687 break;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002688 }
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002689
KAMEZAWA Hiroyukica3e0212011-01-20 14:44:24 -08002690 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -count);
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07002691
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002692 ClearPageCgroupUsed(pc);
KAMEZAWA Hiroyuki544122e2009-01-07 18:08:34 -08002693 /*
2694 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2695 * freed from LRU. This is safe because uncharged page is expected not
2696 * to be reused (freed soon). Exception is SwapCache, it's handled by
2697 * special functions.
2698 */
Hugh Dickinsb9c565d2008-03-04 14:29:11 -08002699
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002700 unlock_page_cgroup(pc);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002701 /*
2702 * even after unlock, we have mem->res.usage here and this memcg
2703 * will never be freed.
2704 */
KAMEZAWA Hiroyukid2265e62010-03-10 15:22:31 -08002705 memcg_check_events(mem, page);
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002706 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2707 mem_cgroup_swap_statistics(mem, true);
2708 mem_cgroup_get(mem);
2709 }
2710 if (!mem_cgroup_is_root(mem))
Andrea Arcangeliec168512011-01-13 15:46:56 -08002711 __do_uncharge(mem, ctype, page_size);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08002712
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002713 return mem;
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002714
2715unlock_out:
2716 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002717 return NULL;
Balbir Singh3c541e12008-02-07 00:14:41 -08002718}
2719
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07002720void mem_cgroup_uncharge_page(struct page *page)
2721{
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002722 /* early check. */
2723 if (page_mapped(page))
2724 return;
2725 if (page->mapping && !PageAnon(page))
2726 return;
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07002727 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2728}
2729
2730void mem_cgroup_uncharge_cache_page(struct page *page)
2731{
2732 VM_BUG_ON(page_mapped(page));
KAMEZAWA Hiroyukib7abea92008-10-18 20:28:09 -07002733 VM_BUG_ON(page->mapping);
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07002734 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2735}
2736
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002737/*
2738 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2739 * In that cases, pages are freed continuously and we can expect pages
2740 * are in the same memcg. All these calls itself limits the number of
2741 * pages freed at once, then uncharge_start/end() is called properly.
2742 * This may be called prural(2) times in a context,
2743 */
2744
2745void mem_cgroup_uncharge_start(void)
2746{
2747 current->memcg_batch.do_batch++;
2748 /* We can do nest. */
2749 if (current->memcg_batch.do_batch == 1) {
2750 current->memcg_batch.memcg = NULL;
2751 current->memcg_batch.bytes = 0;
2752 current->memcg_batch.memsw_bytes = 0;
2753 }
2754}
2755
2756void mem_cgroup_uncharge_end(void)
2757{
2758 struct memcg_batch_info *batch = &current->memcg_batch;
2759
2760 if (!batch->do_batch)
2761 return;
2762
2763 batch->do_batch--;
2764 if (batch->do_batch) /* If stacked, do nothing. */
2765 return;
2766
2767 if (!batch->memcg)
2768 return;
2769 /*
2770 * This "batch->memcg" is valid without any css_get/put etc...
2771 * bacause we hide charges behind us.
2772 */
2773 if (batch->bytes)
2774 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2775 if (batch->memsw_bytes)
2776 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07002777 memcg_oom_recover(batch->memcg);
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08002778 /* forget this pointer (for sanity check) */
2779 batch->memcg = NULL;
2780}
2781
Daisuke Nishimurae767e052009-05-28 14:34:28 -07002782#ifdef CONFIG_SWAP
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002783/*
Daisuke Nishimurae767e052009-05-28 14:34:28 -07002784 * called after __delete_from_swap_cache() and drop "page" account.
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002785 * memcg information is recorded to swap_cgroup of "ent"
2786 */
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07002787void
2788mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002789{
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002790 struct mem_cgroup *memcg;
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07002791 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002792
KAMEZAWA Hiroyuki8a9478c2009-06-17 16:27:17 -07002793 if (!swapout) /* this was a swap cache but the swap is unused ! */
2794 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2795
2796 memcg = __mem_cgroup_uncharge_common(page, ctype);
2797
KAMEZAWA Hiroyukif75ca962010-08-10 18:03:02 -07002798 /*
2799 * record memcg information, if swapout && memcg != NULL,
2800 * mem_cgroup_get() was called in uncharge().
2801 */
2802 if (do_swap_account && swapout && memcg)
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002803 swap_cgroup_record(ent, css_id(&memcg->css));
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002804}
Daisuke Nishimurae767e052009-05-28 14:34:28 -07002805#endif
KAMEZAWA Hiroyukid13d1442009-01-07 18:07:56 -08002806
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002807#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2808/*
2809 * called from swap_entry_free(). remove record in swap_cgroup and
2810 * uncharge "memsw" account.
2811 */
2812void mem_cgroup_uncharge_swap(swp_entry_t ent)
2813{
2814 struct mem_cgroup *memcg;
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002815 unsigned short id;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002816
2817 if (!do_swap_account)
2818 return;
2819
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002820 id = swap_cgroup_record(ent, 0);
2821 rcu_read_lock();
2822 memcg = mem_cgroup_lookup(id);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002823 if (memcg) {
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002824 /*
2825 * We uncharge this because swap is freed.
2826 * This memcg can be obsolete one. We avoid calling css_tryget
2827 */
Balbir Singh0c3e73e2009-09-23 15:56:42 -07002828 if (!mem_cgroup_is_root(memcg))
KAMEZAWA Hiroyuki4e649152009-10-01 15:44:11 -07002829 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
Balbir Singh0c3e73e2009-09-23 15:56:42 -07002830 mem_cgroup_swap_statistics(memcg, false);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002831 mem_cgroup_put(memcg);
2832 }
KAMEZAWA Hiroyukia3b2d692009-04-02 16:57:45 -07002833 rcu_read_unlock();
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002834}
Daisuke Nishimura02491442010-03-10 15:22:17 -08002835
2836/**
2837 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2838 * @entry: swap entry to be moved
2839 * @from: mem_cgroup which the entry is moved from
2840 * @to: mem_cgroup which the entry is moved to
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08002841 * @need_fixup: whether we should fixup res_counters and refcounts.
Daisuke Nishimura02491442010-03-10 15:22:17 -08002842 *
2843 * It succeeds only when the swap_cgroup's record for this entry is the same
2844 * as the mem_cgroup's id of @from.
2845 *
2846 * Returns 0 on success, -EINVAL on failure.
2847 *
2848 * The caller must have charged to @to, IOW, called res_counter_charge() about
2849 * both res and memsw, and called css_get().
2850 */
2851static int mem_cgroup_move_swap_account(swp_entry_t entry,
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08002852 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002853{
2854 unsigned short old_id, new_id;
2855
2856 old_id = css_id(&from->css);
2857 new_id = css_id(&to->css);
2858
2859 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08002860 mem_cgroup_swap_statistics(from, false);
Daisuke Nishimura02491442010-03-10 15:22:17 -08002861 mem_cgroup_swap_statistics(to, true);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08002862 /*
2863 * This function is only called from task migration context now.
2864 * It postpones res_counter and refcount handling till the end
2865 * of task migration(mem_cgroup_clear_mc()) for performance
2866 * improvement. But we cannot postpone mem_cgroup_get(to)
2867 * because if the process that has been moved to @to does
2868 * swap-in, the refcount of @to might be decreased to 0.
2869 */
Daisuke Nishimura02491442010-03-10 15:22:17 -08002870 mem_cgroup_get(to);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08002871 if (need_fixup) {
2872 if (!mem_cgroup_is_root(from))
2873 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2874 mem_cgroup_put(from);
2875 /*
2876 * we charged both to->res and to->memsw, so we should
2877 * uncharge to->res.
2878 */
2879 if (!mem_cgroup_is_root(to))
2880 res_counter_uncharge(&to->res, PAGE_SIZE);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08002881 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08002882 return 0;
2883 }
2884 return -EINVAL;
2885}
2886#else
2887static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08002888 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
Daisuke Nishimura02491442010-03-10 15:22:17 -08002889{
2890 return -EINVAL;
2891}
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08002892#endif
2893
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08002894/*
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08002895 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2896 * page belongs to.
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08002897 */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002898int mem_cgroup_prepare_migration(struct page *page,
Miklos Szeredief6a3c62011-03-22 16:30:52 -07002899 struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08002900{
2901 struct page_cgroup *pc;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07002902 struct mem_cgroup *mem = NULL;
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002903 enum charge_type ctype;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07002904 int ret = 0;
Hugh Dickins8869b8f2008-03-04 14:29:09 -08002905
KAMEZAWA Hiroyuki56039ef2011-03-23 16:42:19 -07002906 *ptr = NULL;
2907
Andrea Arcangeliec168512011-01-13 15:46:56 -08002908 VM_BUG_ON(PageTransHuge(page));
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08002909 if (mem_cgroup_disabled())
Balbir Singh40779602008-04-04 14:29:59 -07002910 return 0;
2911
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002912 pc = lookup_page_cgroup(page);
2913 lock_page_cgroup(pc);
2914 if (PageCgroupUsed(pc)) {
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07002915 mem = pc->mem_cgroup;
2916 css_get(&mem->css);
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002917 /*
2918 * At migrating an anonymous page, its mapcount goes down
2919 * to 0 and uncharge() will be called. But, even if it's fully
2920 * unmapped, migration may fail and this page has to be
2921 * charged again. We set MIGRATION flag here and delay uncharge
2922 * until end_migration() is called
2923 *
2924 * Corner Case Thinking
2925 * A)
2926 * When the old page was mapped as Anon and it's unmap-and-freed
2927 * while migration was ongoing.
2928 * If unmap finds the old page, uncharge() of it will be delayed
2929 * until end_migration(). If unmap finds a new page, it's
2930 * uncharged when it make mapcount to be 1->0. If unmap code
2931 * finds swap_migration_entry, the new page will not be mapped
2932 * and end_migration() will find it(mapcount==0).
2933 *
2934 * B)
2935 * When the old page was mapped but migraion fails, the kernel
2936 * remaps it. A charge for it is kept by MIGRATION flag even
2937 * if mapcount goes down to 0. We can do remap successfully
2938 * without charging it again.
2939 *
2940 * C)
2941 * The "old" page is under lock_page() until the end of
2942 * migration, so, the old page itself will not be swapped-out.
2943 * If the new page is swapped out before end_migraton, our
2944 * hook to usual swap-out path will catch the event.
2945 */
2946 if (PageAnon(page))
2947 SetPageCgroupMigration(pc);
Hugh Dickinsb9c565d2008-03-04 14:29:11 -08002948 }
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07002949 unlock_page_cgroup(pc);
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002950 /*
2951 * If the page is not charged at this point,
2952 * we return here.
2953 */
2954 if (!mem)
2955 return 0;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08002956
Andrea Arcangeli93d5c9b2010-04-23 13:17:39 -04002957 *ptr = mem;
Miklos Szeredief6a3c62011-03-22 16:30:52 -07002958 ret = __mem_cgroup_try_charge(NULL, gfp_mask, ptr, false, PAGE_SIZE);
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002959 css_put(&mem->css);/* drop extra refcnt */
2960 if (ret || *ptr == NULL) {
2961 if (PageAnon(page)) {
2962 lock_page_cgroup(pc);
2963 ClearPageCgroupMigration(pc);
2964 unlock_page_cgroup(pc);
2965 /*
2966 * The old page may be fully unmapped while we kept it.
2967 */
2968 mem_cgroup_uncharge_page(page);
2969 }
2970 return -ENOMEM;
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07002971 }
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002972 /*
2973 * We charge new page before it's used/mapped. So, even if unlock_page()
2974 * is called before end_migration, we can catch all events on this new
2975 * page. In the case new page is migrated but not remapped, new page's
2976 * mapcount will be finally 0 and we call uncharge in end_migration().
2977 */
2978 pc = lookup_page_cgroup(newpage);
2979 if (PageAnon(page))
2980 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2981 else if (page_is_file_cache(page))
2982 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2983 else
2984 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
Andrea Arcangeliec168512011-01-13 15:46:56 -08002985 __mem_cgroup_commit_charge(mem, pc, ctype, PAGE_SIZE);
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07002986 return ret;
2987}
Hugh Dickinsfb59e9f2008-03-04 14:29:16 -08002988
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07002989/* remove redundant charge if migration failed*/
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08002990void mem_cgroup_end_migration(struct mem_cgroup *mem,
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -08002991 struct page *oldpage, struct page *newpage, bool migration_ok)
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -07002992{
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002993 struct page *used, *unused;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08002994 struct page_cgroup *pc;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08002995
2996 if (!mem)
2997 return;
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07002998 /* blocks rmdir() */
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07002999 cgroup_exclude_rmdir(&mem->css);
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -08003000 if (!migration_ok) {
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003001 used = oldpage;
3002 unused = newpage;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003003 } else {
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003004 used = newpage;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003005 unused = oldpage;
3006 }
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003007 /*
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003008 * We disallowed uncharge of pages under migration because mapcount
3009 * of the page goes down to zero, temporarly.
3010 * Clear the flag and check the page should be charged.
KAMEZAWA Hiroyuki69029cd2008-07-25 01:47:14 -07003011 */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003012 pc = lookup_page_cgroup(oldpage);
3013 lock_page_cgroup(pc);
3014 ClearPageCgroupMigration(pc);
3015 unlock_page_cgroup(pc);
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003016
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003017 __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3018
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003019 /*
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003020 * If a page is a file cache, radix-tree replacement is very atomic
3021 * and we can skip this check. When it was an Anon page, its mapcount
3022 * goes down to 0. But because we added MIGRATION flage, it's not
3023 * uncharged yet. There are several case but page->mapcount check
3024 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3025 * check. (see prepare_charge() also)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -08003026 */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003027 if (PageAnon(used))
3028 mem_cgroup_uncharge_page(used);
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07003029 /*
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -07003030 * At migration, we may charge account against cgroup which has no
3031 * tasks.
KAMEZAWA Hiroyuki88703262009-07-29 15:04:06 -07003032 * So, rmdir()->pre_destroy() can be called while we do this charge.
3033 * In that case, we need to call pre_destroy() again. check it here.
3034 */
3035 cgroup_release_and_wakeup_rmdir(&mem->css);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -08003036}
Pavel Emelianov78fb7462008-02-07 00:13:51 -08003037
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003038/*
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -07003039 * A call to try to shrink memory usage on charge failure at shmem's swapin.
3040 * Calling hierarchical_reclaim is not enough because we should update
3041 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
3042 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
3043 * not from the memcg which this page would be charged to.
3044 * try_charge_swapin does all of these works properly.
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07003045 */
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -07003046int mem_cgroup_shmem_charge_fallback(struct page *page,
KAMEZAWA Hiroyukib5a84312009-01-07 18:08:35 -08003047 struct mm_struct *mm,
3048 gfp_t gfp_mask)
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07003049{
KAMEZAWA Hiroyuki56039ef2011-03-23 16:42:19 -07003050 struct mem_cgroup *mem;
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -07003051 int ret;
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07003052
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08003053 if (mem_cgroup_disabled())
Li Zefancede86a2008-07-25 01:47:18 -07003054 return 0;
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07003055
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -07003056 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
3057 if (!ret)
3058 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07003059
Daisuke Nishimuraae3abae2009-04-30 15:08:19 -07003060 return ret;
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07003061}
3062
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003063static DEFINE_MUTEX(set_limit_mutex);
3064
KOSAKI Motohirod38d2a72009-01-06 14:39:44 -08003065static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003066 unsigned long long val)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003067{
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003068 int retry_count;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003069 u64 memswlimit, memlimit;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003070 int ret = 0;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003071 int children = mem_cgroup_count_children(memcg);
3072 u64 curusage, oldusage;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003073 int enlarge;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003074
3075 /*
3076 * For keeping hierarchical_reclaim simple, how long we should retry
3077 * is depends on callers. We set our retry-count to be function
3078 * of # of children which we should visit in this loop.
3079 */
3080 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3081
3082 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003083
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003084 enlarge = 0;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003085 while (retry_count) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003086 if (signal_pending(current)) {
3087 ret = -EINTR;
3088 break;
3089 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003090 /*
3091 * Rather than hide all in some function, I do this in
3092 * open coded manner. You see what this really does.
3093 * We have to guarantee mem->res.limit < mem->memsw.limit.
3094 */
3095 mutex_lock(&set_limit_mutex);
3096 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3097 if (memswlimit < val) {
3098 ret = -EINVAL;
3099 mutex_unlock(&set_limit_mutex);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003100 break;
3101 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003102
3103 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3104 if (memlimit < val)
3105 enlarge = 1;
3106
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003107 ret = res_counter_set_limit(&memcg->res, val);
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -07003108 if (!ret) {
3109 if (memswlimit == val)
3110 memcg->memsw_is_minimum = true;
3111 else
3112 memcg->memsw_is_minimum = false;
3113 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003114 mutex_unlock(&set_limit_mutex);
3115
3116 if (!ret)
3117 break;
3118
Bob Liuaa20d482009-12-15 16:47:14 -08003119 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
Balbir Singh4e416952009-09-23 15:56:39 -07003120 MEM_CGROUP_RECLAIM_SHRINK);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003121 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3122 /* Usage is reduced ? */
3123 if (curusage >= oldusage)
3124 retry_count--;
3125 else
3126 oldusage = curusage;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003127 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003128 if (!ret && enlarge)
3129 memcg_oom_recover(memcg);
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08003130
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003131 return ret;
3132}
3133
Li Zefan338c8432009-06-17 16:27:15 -07003134static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3135 unsigned long long val)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003136{
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003137 int retry_count;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003138 u64 memlimit, memswlimit, oldusage, curusage;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003139 int children = mem_cgroup_count_children(memcg);
3140 int ret = -EBUSY;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003141 int enlarge = 0;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003142
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003143 /* see mem_cgroup_resize_res_limit */
3144 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3145 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003146 while (retry_count) {
3147 if (signal_pending(current)) {
3148 ret = -EINTR;
3149 break;
3150 }
3151 /*
3152 * Rather than hide all in some function, I do this in
3153 * open coded manner. You see what this really does.
3154 * We have to guarantee mem->res.limit < mem->memsw.limit.
3155 */
3156 mutex_lock(&set_limit_mutex);
3157 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3158 if (memlimit > val) {
3159 ret = -EINVAL;
3160 mutex_unlock(&set_limit_mutex);
3161 break;
3162 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003163 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3164 if (memswlimit < val)
3165 enlarge = 1;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003166 ret = res_counter_set_limit(&memcg->memsw, val);
KAMEZAWA Hiroyuki22a668d2009-06-17 16:27:19 -07003167 if (!ret) {
3168 if (memlimit == val)
3169 memcg->memsw_is_minimum = true;
3170 else
3171 memcg->memsw_is_minimum = false;
3172 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003173 mutex_unlock(&set_limit_mutex);
3174
3175 if (!ret)
3176 break;
3177
Balbir Singh4e416952009-09-23 15:56:39 -07003178 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
Balbir Singh75822b42009-09-23 15:56:38 -07003179 MEM_CGROUP_RECLAIM_NOSWAP |
3180 MEM_CGROUP_RECLAIM_SHRINK);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003181 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003182 /* Usage is reduced ? */
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003183 if (curusage >= oldusage)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003184 retry_count--;
KAMEZAWA Hiroyuki81d39c22009-04-02 16:57:36 -07003185 else
3186 oldusage = curusage;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003187 }
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003188 if (!ret && enlarge)
3189 memcg_oom_recover(memcg);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003190 return ret;
3191}
3192
Balbir Singh4e416952009-09-23 15:56:39 -07003193unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
KOSAKI Motohiro00918b62010-08-10 18:03:05 -07003194 gfp_t gfp_mask)
Balbir Singh4e416952009-09-23 15:56:39 -07003195{
3196 unsigned long nr_reclaimed = 0;
3197 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3198 unsigned long reclaimed;
3199 int loop = 0;
3200 struct mem_cgroup_tree_per_zone *mctz;
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -07003201 unsigned long long excess;
Balbir Singh4e416952009-09-23 15:56:39 -07003202
3203 if (order > 0)
3204 return 0;
3205
KOSAKI Motohiro00918b62010-08-10 18:03:05 -07003206 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
Balbir Singh4e416952009-09-23 15:56:39 -07003207 /*
3208 * This loop can run a while, specially if mem_cgroup's continuously
3209 * keep exceeding their soft limit and putting the system under
3210 * pressure
3211 */
3212 do {
3213 if (next_mz)
3214 mz = next_mz;
3215 else
3216 mz = mem_cgroup_largest_soft_limit_node(mctz);
3217 if (!mz)
3218 break;
3219
3220 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3221 gfp_mask,
3222 MEM_CGROUP_RECLAIM_SOFT);
3223 nr_reclaimed += reclaimed;
3224 spin_lock(&mctz->lock);
3225
3226 /*
3227 * If we failed to reclaim anything from this memory cgroup
3228 * it is time to move on to the next cgroup
3229 */
3230 next_mz = NULL;
3231 if (!reclaimed) {
3232 do {
3233 /*
3234 * Loop until we find yet another one.
3235 *
3236 * By the time we get the soft_limit lock
3237 * again, someone might have aded the
3238 * group back on the RB tree. Iterate to
3239 * make sure we get a different mem.
3240 * mem_cgroup_largest_soft_limit_node returns
3241 * NULL if no other cgroup is present on
3242 * the tree
3243 */
3244 next_mz =
3245 __mem_cgroup_largest_soft_limit_node(mctz);
3246 if (next_mz == mz) {
3247 css_put(&next_mz->mem->css);
3248 next_mz = NULL;
3249 } else /* next_mz == NULL or other memcg */
3250 break;
3251 } while (1);
3252 }
Balbir Singh4e416952009-09-23 15:56:39 -07003253 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -07003254 excess = res_counter_soft_limit_excess(&mz->mem->res);
Balbir Singh4e416952009-09-23 15:56:39 -07003255 /*
3256 * One school of thought says that we should not add
3257 * back the node to the tree if reclaim returns 0.
3258 * But our reclaim could return 0, simply because due
3259 * to priority we are exposing a smaller subset of
3260 * memory to reclaim from. Consider this as a longer
3261 * term TODO.
3262 */
KAMEZAWA Hiroyukief8745c2009-10-01 15:44:12 -07003263 /* If excess == 0, no tree ops */
3264 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
Balbir Singh4e416952009-09-23 15:56:39 -07003265 spin_unlock(&mctz->lock);
3266 css_put(&mz->mem->css);
3267 loop++;
3268 /*
3269 * Could not reclaim anything and there are no more
3270 * mem cgroups to try or we seem to be looping without
3271 * reclaiming anything.
3272 */
3273 if (!nr_reclaimed &&
3274 (next_mz == NULL ||
3275 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3276 break;
3277 } while (!nr_reclaimed);
3278 if (next_mz)
3279 css_put(&next_mz->mem->css);
3280 return nr_reclaimed;
3281}
3282
KAMEZAWA Hiroyukic9b0ed52008-07-25 01:47:15 -07003283/*
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003284 * This routine traverse page_cgroup in given list and drop them all.
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003285 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3286 */
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003287static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003288 int node, int zid, enum lru_list lru)
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003289{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003290 struct zone *zone;
3291 struct mem_cgroup_per_zone *mz;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003292 struct page_cgroup *pc, *busy;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003293 unsigned long flags, loop;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -08003294 struct list_head *list;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003295 int ret = 0;
KAMEZAWA Hiroyuki072c56c12008-02-07 00:14:39 -08003296
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003297 zone = &NODE_DATA(node)->node_zones[zid];
3298 mz = mem_cgroup_zoneinfo(mem, node, zid);
Christoph Lameterb69408e2008-10-18 20:26:14 -07003299 list = &mz->lists[lru];
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003300
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003301 loop = MEM_CGROUP_ZSTAT(mz, lru);
3302 /* give some margin against EBUSY etc...*/
3303 loop += 256;
3304 busy = NULL;
3305 while (loop--) {
3306 ret = 0;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003307 spin_lock_irqsave(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003308 if (list_empty(list)) {
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003309 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003310 break;
3311 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003312 pc = list_entry(list->prev, struct page_cgroup, lru);
3313 if (busy == pc) {
3314 list_move(&pc->lru, list);
Thiago Farina648bcc72010-03-05 13:42:04 -08003315 busy = NULL;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003316 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003317 continue;
3318 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003319 spin_unlock_irqrestore(&zone->lru_lock, flags);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003320
KAMEZAWA Hiroyuki2c26fdd2009-01-07 18:08:10 -08003321 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003322 if (ret == -ENOMEM)
3323 break;
3324
3325 if (ret == -EBUSY || ret == -EINVAL) {
3326 /* found lock contention or "pc" is obsolete. */
3327 busy = pc;
3328 cond_resched();
3329 } else
3330 busy = NULL;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003331 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003332
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003333 if (!ret && !list_empty(list))
3334 return -EBUSY;
3335 return ret;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003336}
3337
3338/*
3339 * make mem_cgroup's charge to be 0 if there is no task.
3340 * This enables deleting this mem_cgroup.
3341 */
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003342static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003343{
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003344 int ret;
3345 int node, zid, shrink;
3346 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003347 struct cgroup *cgrp = mem->css.cgroup;
Hugh Dickins8869b8f2008-03-04 14:29:09 -08003348
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003349 css_get(&mem->css);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003350
3351 shrink = 0;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003352 /* should free all ? */
3353 if (free_all)
3354 goto try_to_free;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003355move_account:
Daisuke Nishimurafce66472010-01-15 17:01:30 -08003356 do {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003357 ret = -EBUSY;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003358 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003359 goto out;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003360 ret = -EINTR;
3361 if (signal_pending(current))
3362 goto out;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003363 /* This is for making all *used* pages to be on LRU. */
3364 lru_add_drain_all();
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08003365 drain_all_stock_sync();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003366 ret = 0;
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07003367 mem_cgroup_start_move(mem);
KAMEZAWA Hiroyuki299b4ea2009-01-29 14:25:17 -08003368 for_each_node_state(node, N_HIGH_MEMORY) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003369 for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
Christoph Lameterb69408e2008-10-18 20:26:14 -07003370 enum lru_list l;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003371 for_each_lru(l) {
3372 ret = mem_cgroup_force_empty_list(mem,
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003373 node, zid, l);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003374 if (ret)
3375 break;
3376 }
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08003377 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003378 if (ret)
3379 break;
3380 }
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07003381 mem_cgroup_end_move(mem);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07003382 memcg_oom_recover(mem);
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003383 /* it seems parent cgroup doesn't have enough mem */
3384 if (ret == -ENOMEM)
3385 goto try_to_free;
KAMEZAWA Hiroyuki52d4b9a2008-10-18 20:28:16 -07003386 cond_resched();
Daisuke Nishimurafce66472010-01-15 17:01:30 -08003387 /* "ret" should also be checked to ensure all lists are empty. */
3388 } while (mem->res.usage > 0 || ret);
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003389out:
3390 css_put(&mem->css);
3391 return ret;
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003392
3393try_to_free:
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003394 /* returns EBUSY if there is a task or if we come here twice. */
3395 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003396 ret = -EBUSY;
3397 goto out;
3398 }
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003399 /* we call try-to-free pages for make this cgroup empty */
3400 lru_add_drain_all();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003401 /* try to free all pages in this cgroup */
3402 shrink = 1;
3403 while (nr_retries && mem->res.usage > 0) {
3404 int progress;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003405
3406 if (signal_pending(current)) {
3407 ret = -EINTR;
3408 goto out;
3409 }
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003410 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3411 false, get_swappiness(mem));
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003412 if (!progress) {
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003413 nr_retries--;
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003414 /* maybe some writeback is necessary */
Jens Axboe8aa7e842009-07-09 14:52:32 +02003415 congestion_wait(BLK_RW_ASYNC, HZ/10);
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003416 }
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003417
3418 }
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08003419 lru_add_drain();
KAMEZAWA Hiroyukif817ed42009-01-07 18:07:53 -08003420 /* try move_account...there may be some *locked* pages. */
Daisuke Nishimurafce66472010-01-15 17:01:30 -08003421 goto move_account;
KAMEZAWA Hiroyukicc847582008-02-07 00:14:16 -08003422}
3423
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08003424int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3425{
3426 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3427}
3428
3429
Balbir Singh18f59ea2009-01-07 18:08:07 -08003430static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3431{
3432 return mem_cgroup_from_cont(cont)->use_hierarchy;
3433}
3434
3435static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3436 u64 val)
3437{
3438 int retval = 0;
3439 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3440 struct cgroup *parent = cont->parent;
3441 struct mem_cgroup *parent_mem = NULL;
3442
3443 if (parent)
3444 parent_mem = mem_cgroup_from_cont(parent);
3445
3446 cgroup_lock();
3447 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003448 * If parent's use_hierarchy is set, we can't make any modifications
Balbir Singh18f59ea2009-01-07 18:08:07 -08003449 * in the child subtrees. If it is unset, then the change can
3450 * occur, provided the current cgroup has no children.
3451 *
3452 * For the root cgroup, parent_mem is NULL, we allow value to be
3453 * set if there are no children.
3454 */
3455 if ((!parent_mem || !parent_mem->use_hierarchy) &&
3456 (val == 1 || val == 0)) {
3457 if (list_empty(&cont->children))
3458 mem->use_hierarchy = val;
3459 else
3460 retval = -EBUSY;
3461 } else
3462 retval = -EINVAL;
3463 cgroup_unlock();
3464
3465 return retval;
3466}
3467
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003468
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003469static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
3470 enum mem_cgroup_stat_index idx)
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003471{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003472 struct mem_cgroup *iter;
3473 s64 val = 0;
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003474
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003475 /* each per cpu's value can be minus.Then, use s64 */
3476 for_each_mem_cgroup_tree(iter, mem)
3477 val += mem_cgroup_read_stat(iter, idx);
3478
3479 if (val < 0) /* race ? */
3480 val = 0;
3481 return val;
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003482}
3483
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003484static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3485{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003486 u64 val;
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003487
3488 if (!mem_cgroup_is_root(mem)) {
3489 if (!swap)
3490 return res_counter_read_u64(&mem->res, RES_USAGE);
3491 else
3492 return res_counter_read_u64(&mem->memsw, RES_USAGE);
3493 }
3494
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003495 val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
3496 val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003497
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003498 if (swap)
3499 val += mem_cgroup_get_recursive_idx_stat(mem,
3500 MEM_CGROUP_STAT_SWAPOUT);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003501
3502 return val << PAGE_SHIFT;
3503}
3504
Paul Menage2c3daa72008-04-29 00:59:58 -07003505static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003506{
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003507 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003508 u64 val;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003509 int type, name;
3510
3511 type = MEMFILE_TYPE(cft->private);
3512 name = MEMFILE_ATTR(cft->private);
3513 switch (type) {
3514 case _MEM:
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003515 if (name == RES_USAGE)
3516 val = mem_cgroup_usage(mem, false);
3517 else
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003518 val = res_counter_read_u64(&mem->res, name);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003519 break;
3520 case _MEMSWAP:
Kirill A. Shutemov104f3922010-03-10 15:22:21 -08003521 if (name == RES_USAGE)
3522 val = mem_cgroup_usage(mem, true);
3523 else
Balbir Singh0c3e73e2009-09-23 15:56:42 -07003524 val = res_counter_read_u64(&mem->memsw, name);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003525 break;
3526 default:
3527 BUG();
3528 break;
3529 }
3530 return val;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003531}
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003532/*
3533 * The user of this function is...
3534 * RES_LIMIT.
3535 */
Paul Menage856c13a2008-07-25 01:47:04 -07003536static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3537 const char *buffer)
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003538{
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003539 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003540 int type, name;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003541 unsigned long long val;
3542 int ret;
3543
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003544 type = MEMFILE_TYPE(cft->private);
3545 name = MEMFILE_ATTR(cft->private);
3546 switch (name) {
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003547 case RES_LIMIT:
Balbir Singh4b3bde42009-09-23 15:56:32 -07003548 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3549 ret = -EINVAL;
3550 break;
3551 }
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003552 /* This function does all necessary parse...reuse it */
3553 ret = res_counter_memparse_write_strategy(buffer, &val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003554 if (ret)
3555 break;
3556 if (type == _MEM)
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003557 ret = mem_cgroup_resize_limit(memcg, val);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003558 else
3559 ret = mem_cgroup_resize_memsw_limit(memcg, val);
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003560 break;
Balbir Singh296c81d2009-09-23 15:56:36 -07003561 case RES_SOFT_LIMIT:
3562 ret = res_counter_memparse_write_strategy(buffer, &val);
3563 if (ret)
3564 break;
3565 /*
3566 * For memsw, soft limits are hard to implement in terms
3567 * of semantics, for now, we support soft limits for
3568 * control without swap
3569 */
3570 if (type == _MEM)
3571 ret = res_counter_set_soft_limit(&memcg->res, val);
3572 else
3573 ret = -EINVAL;
3574 break;
KAMEZAWA Hiroyuki628f4232008-07-25 01:47:20 -07003575 default:
3576 ret = -EINVAL; /* should be BUG() ? */
3577 break;
3578 }
3579 return ret;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08003580}
3581
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003582static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3583 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3584{
3585 struct cgroup *cgroup;
3586 unsigned long long min_limit, min_memsw_limit, tmp;
3587
3588 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3589 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3590 cgroup = memcg->css.cgroup;
3591 if (!memcg->use_hierarchy)
3592 goto out;
3593
3594 while (cgroup->parent) {
3595 cgroup = cgroup->parent;
3596 memcg = mem_cgroup_from_cont(cgroup);
3597 if (!memcg->use_hierarchy)
3598 break;
3599 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3600 min_limit = min(min_limit, tmp);
3601 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3602 min_memsw_limit = min(min_memsw_limit, tmp);
3603 }
3604out:
3605 *mem_limit = min_limit;
3606 *memsw_limit = min_memsw_limit;
3607 return;
3608}
3609
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003610static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003611{
3612 struct mem_cgroup *mem;
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003613 int type, name;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003614
3615 mem = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003616 type = MEMFILE_TYPE(event);
3617 name = MEMFILE_ATTR(event);
3618 switch (name) {
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003619 case RES_MAX_USAGE:
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003620 if (type == _MEM)
3621 res_counter_reset_max(&mem->res);
3622 else
3623 res_counter_reset_max(&mem->memsw);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003624 break;
3625 case RES_FAILCNT:
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08003626 if (type == _MEM)
3627 res_counter_reset_failcnt(&mem->res);
3628 else
3629 res_counter_reset_failcnt(&mem->memsw);
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07003630 break;
3631 }
Balbir Singhf64c3f52009-09-23 15:56:37 -07003632
Pavel Emelyanov85cc59d2008-04-29 01:00:20 -07003633 return 0;
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07003634}
3635
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003636static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3637 struct cftype *cft)
3638{
3639 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3640}
3641
Daisuke Nishimura02491442010-03-10 15:22:17 -08003642#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003643static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3644 struct cftype *cft, u64 val)
3645{
3646 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3647
3648 if (val >= (1 << NR_MOVE_TYPE))
3649 return -EINVAL;
3650 /*
3651 * We check this value several times in both in can_attach() and
3652 * attach(), so we need cgroup lock to prevent this value from being
3653 * inconsistent.
3654 */
3655 cgroup_lock();
3656 mem->move_charge_at_immigrate = val;
3657 cgroup_unlock();
3658
3659 return 0;
3660}
Daisuke Nishimura02491442010-03-10 15:22:17 -08003661#else
3662static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3663 struct cftype *cft, u64 val)
3664{
3665 return -ENOSYS;
3666}
3667#endif
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08003668
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003669
3670/* For read statistics */
3671enum {
3672 MCS_CACHE,
3673 MCS_RSS,
KAMEZAWA Hiroyukid8046582009-12-15 16:47:09 -08003674 MCS_FILE_MAPPED,
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003675 MCS_PGPGIN,
3676 MCS_PGPGOUT,
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003677 MCS_SWAP,
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003678 MCS_INACTIVE_ANON,
3679 MCS_ACTIVE_ANON,
3680 MCS_INACTIVE_FILE,
3681 MCS_ACTIVE_FILE,
3682 MCS_UNEVICTABLE,
3683 NR_MCS_STAT,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003684};
3685
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003686struct mcs_total_stat {
3687 s64 stat[NR_MCS_STAT];
3688};
3689
3690struct {
3691 char *local_name;
3692 char *total_name;
3693} memcg_stat_strings[NR_MCS_STAT] = {
3694 {"cache", "total_cache"},
3695 {"rss", "total_rss"},
Balbir Singhd69b0422009-06-17 16:26:34 -07003696 {"mapped_file", "total_mapped_file"},
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003697 {"pgpgin", "total_pgpgin"},
3698 {"pgpgout", "total_pgpgout"},
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003699 {"swap", "total_swap"},
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003700 {"inactive_anon", "total_inactive_anon"},
3701 {"active_anon", "total_active_anon"},
3702 {"inactive_file", "total_inactive_file"},
3703 {"active_file", "total_active_file"},
3704 {"unevictable", "total_unevictable"}
3705};
3706
3707
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003708static void
3709mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003710{
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003711 s64 val;
3712
3713 /* per cpu stat */
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08003714 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003715 s->stat[MCS_CACHE] += val * PAGE_SIZE;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08003716 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003717 s->stat[MCS_RSS] += val * PAGE_SIZE;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08003718 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
KAMEZAWA Hiroyukid8046582009-12-15 16:47:09 -08003719 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08003720 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003721 s->stat[MCS_PGPGIN] += val;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08003722 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003723 s->stat[MCS_PGPGOUT] += val;
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003724 if (do_swap_account) {
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08003725 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003726 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3727 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003728
3729 /* per zone stat */
3730 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3731 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3732 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3733 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3734 val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3735 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3736 val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3737 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3738 val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3739 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003740}
3741
3742static void
3743mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3744{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003745 struct mem_cgroup *iter;
3746
3747 for_each_mem_cgroup_tree(iter, mem)
3748 mem_cgroup_get_local_stat(iter, s);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003749}
3750
Paul Menagec64745c2008-04-29 01:00:02 -07003751static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3752 struct cgroup_map_cb *cb)
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003753{
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003754 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003755 struct mcs_total_stat mystat;
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003756 int i;
3757
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003758 memset(&mystat, 0, sizeof(mystat));
3759 mem_cgroup_get_local_stat(mem_cont, &mystat);
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003760
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003761 for (i = 0; i < NR_MCS_STAT; i++) {
3762 if (i == MCS_SWAP && !do_swap_account)
3763 continue;
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003764 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003765 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08003766
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003767 /* Hierarchical information */
KAMEZAWA Hiroyukifee7b542009-01-07 18:08:26 -08003768 {
3769 unsigned long long limit, memsw_limit;
3770 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3771 cb->fill(cb, "hierarchical_memory_limit", limit);
3772 if (do_swap_account)
3773 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3774 }
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003775
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003776 memset(&mystat, 0, sizeof(mystat));
3777 mem_cgroup_get_total_stat(mem_cont, &mystat);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003778 for (i = 0; i < NR_MCS_STAT; i++) {
3779 if (i == MCS_SWAP && !do_swap_account)
3780 continue;
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003781 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
Daisuke Nishimura1dd3a272009-09-23 15:56:43 -07003782 }
KAMEZAWA Hiroyuki14067bb2009-04-02 16:57:35 -07003783
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003784#ifdef CONFIG_DEBUG_VM
KOSAKI Motohiroc772be92009-01-07 18:08:25 -08003785 cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
KOSAKI Motohiro7f016ee2009-01-07 18:08:22 -08003786
3787 {
3788 int nid, zid;
3789 struct mem_cgroup_per_zone *mz;
3790 unsigned long recent_rotated[2] = {0, 0};
3791 unsigned long recent_scanned[2] = {0, 0};
3792
3793 for_each_online_node(nid)
3794 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3795 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3796
3797 recent_rotated[0] +=
3798 mz->reclaim_stat.recent_rotated[0];
3799 recent_rotated[1] +=
3800 mz->reclaim_stat.recent_rotated[1];
3801 recent_scanned[0] +=
3802 mz->reclaim_stat.recent_scanned[0];
3803 recent_scanned[1] +=
3804 mz->reclaim_stat.recent_scanned[1];
3805 }
3806 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3807 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3808 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3809 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3810 }
3811#endif
3812
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08003813 return 0;
3814}
3815
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003816static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3817{
3818 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3819
3820 return get_swappiness(memcg);
3821}
3822
3823static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3824 u64 val)
3825{
3826 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3827 struct mem_cgroup *parent;
Li Zefan068b38c2009-01-15 13:51:26 -08003828
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003829 if (val > 100)
3830 return -EINVAL;
3831
3832 if (cgrp->parent == NULL)
3833 return -EINVAL;
3834
3835 parent = mem_cgroup_from_cont(cgrp->parent);
Li Zefan068b38c2009-01-15 13:51:26 -08003836
3837 cgroup_lock();
3838
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003839 /* If under hierarchy, only empty-root can set this value */
3840 if ((parent->use_hierarchy) ||
Li Zefan068b38c2009-01-15 13:51:26 -08003841 (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3842 cgroup_unlock();
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003843 return -EINVAL;
Li Zefan068b38c2009-01-15 13:51:26 -08003844 }
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003845
3846 spin_lock(&memcg->reclaim_param_lock);
3847 memcg->swappiness = val;
3848 spin_unlock(&memcg->reclaim_param_lock);
3849
Li Zefan068b38c2009-01-15 13:51:26 -08003850 cgroup_unlock();
3851
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08003852 return 0;
3853}
3854
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003855static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3856{
3857 struct mem_cgroup_threshold_ary *t;
3858 u64 usage;
3859 int i;
3860
3861 rcu_read_lock();
3862 if (!swap)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003863 t = rcu_dereference(memcg->thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003864 else
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003865 t = rcu_dereference(memcg->memsw_thresholds.primary);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003866
3867 if (!t)
3868 goto unlock;
3869
3870 usage = mem_cgroup_usage(memcg, swap);
3871
3872 /*
3873 * current_threshold points to threshold just below usage.
3874 * If it's not true, a threshold was crossed after last
3875 * call of __mem_cgroup_threshold().
3876 */
Phil Carmody5407a562010-05-26 14:42:42 -07003877 i = t->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003878
3879 /*
3880 * Iterate backward over array of thresholds starting from
3881 * current_threshold and check if a threshold is crossed.
3882 * If none of thresholds below usage is crossed, we read
3883 * only one element of the array here.
3884 */
3885 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3886 eventfd_signal(t->entries[i].eventfd, 1);
3887
3888 /* i = current_threshold + 1 */
3889 i++;
3890
3891 /*
3892 * Iterate forward over array of thresholds starting from
3893 * current_threshold+1 and check if a threshold is crossed.
3894 * If none of thresholds above usage is crossed, we read
3895 * only one element of the array here.
3896 */
3897 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3898 eventfd_signal(t->entries[i].eventfd, 1);
3899
3900 /* Update current_threshold */
Phil Carmody5407a562010-05-26 14:42:42 -07003901 t->current_threshold = i - 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003902unlock:
3903 rcu_read_unlock();
3904}
3905
3906static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3907{
Kirill A. Shutemovad4ca5f2010-10-07 12:59:27 -07003908 while (memcg) {
3909 __mem_cgroup_threshold(memcg, false);
3910 if (do_swap_account)
3911 __mem_cgroup_threshold(memcg, true);
3912
3913 memcg = parent_mem_cgroup(memcg);
3914 }
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003915}
3916
3917static int compare_thresholds(const void *a, const void *b)
3918{
3919 const struct mem_cgroup_threshold *_a = a;
3920 const struct mem_cgroup_threshold *_b = b;
3921
3922 return _a->threshold - _b->threshold;
3923}
3924
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003925static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003926{
3927 struct mem_cgroup_eventfd_list *ev;
3928
3929 list_for_each_entry(ev, &mem->oom_notify, list)
3930 eventfd_signal(ev->eventfd, 1);
3931 return 0;
3932}
3933
3934static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3935{
KAMEZAWA Hiroyuki7d74b062010-10-27 15:33:41 -07003936 struct mem_cgroup *iter;
3937
3938 for_each_mem_cgroup_tree(iter, mem)
3939 mem_cgroup_oom_notify_cb(iter);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07003940}
3941
3942static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3943 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003944{
3945 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003946 struct mem_cgroup_thresholds *thresholds;
3947 struct mem_cgroup_threshold_ary *new;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003948 int type = MEMFILE_TYPE(cft->private);
3949 u64 threshold, usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003950 int i, size, ret;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003951
3952 ret = res_counter_memparse_write_strategy(args, &threshold);
3953 if (ret)
3954 return ret;
3955
3956 mutex_lock(&memcg->thresholds_lock);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003957
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003958 if (type == _MEM)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003959 thresholds = &memcg->thresholds;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003960 else if (type == _MEMSWAP)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003961 thresholds = &memcg->memsw_thresholds;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003962 else
3963 BUG();
3964
3965 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3966
3967 /* Check if a threshold crossed before adding a new one */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003968 if (thresholds->primary)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003969 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3970
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003971 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003972
3973 /* Allocate memory for new array of thresholds */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003974 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003975 GFP_KERNEL);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003976 if (!new) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003977 ret = -ENOMEM;
3978 goto unlock;
3979 }
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003980 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003981
3982 /* Copy thresholds (if any) to new array */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003983 if (thresholds->primary) {
3984 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003985 sizeof(struct mem_cgroup_threshold));
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003986 }
3987
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003988 /* Add new threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003989 new->entries[size - 1].eventfd = eventfd;
3990 new->entries[size - 1].threshold = threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003991
3992 /* Sort thresholds. Registering of new threshold isn't time-critical */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003993 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003994 compare_thresholds, NULL);
3995
3996 /* Find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003997 new->current_threshold = -1;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08003998 for (i = 0; i < size; i++) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07003999 if (new->entries[i].threshold < usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004000 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004001 * new->current_threshold will not be used until
4002 * rcu_assign_pointer(), so it's safe to increment
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004003 * it here.
4004 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004005 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004006 }
4007 }
4008
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004009 /* Free old spare buffer and save old primary buffer as spare */
4010 kfree(thresholds->spare);
4011 thresholds->spare = thresholds->primary;
4012
4013 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004014
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004015 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004016 synchronize_rcu();
4017
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004018unlock:
4019 mutex_unlock(&memcg->thresholds_lock);
4020
4021 return ret;
4022}
4023
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004024static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004025 struct cftype *cft, struct eventfd_ctx *eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004026{
4027 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004028 struct mem_cgroup_thresholds *thresholds;
4029 struct mem_cgroup_threshold_ary *new;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004030 int type = MEMFILE_TYPE(cft->private);
4031 u64 usage;
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004032 int i, j, size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004033
4034 mutex_lock(&memcg->thresholds_lock);
4035 if (type == _MEM)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004036 thresholds = &memcg->thresholds;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004037 else if (type == _MEMSWAP)
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004038 thresholds = &memcg->memsw_thresholds;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004039 else
4040 BUG();
4041
4042 /*
4043 * Something went wrong if we trying to unregister a threshold
4044 * if we don't have thresholds
4045 */
4046 BUG_ON(!thresholds);
4047
4048 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4049
4050 /* Check if a threshold crossed before removing */
4051 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4052
4053 /* Calculate new number of threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004054 size = 0;
4055 for (i = 0; i < thresholds->primary->size; i++) {
4056 if (thresholds->primary->entries[i].eventfd != eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004057 size++;
4058 }
4059
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004060 new = thresholds->spare;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004061
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004062 /* Set thresholds array to NULL if we don't have thresholds */
4063 if (!size) {
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004064 kfree(new);
4065 new = NULL;
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004066 goto swap_buffers;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004067 }
4068
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004069 new->size = size;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004070
4071 /* Copy thresholds and find current threshold */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004072 new->current_threshold = -1;
4073 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4074 if (thresholds->primary->entries[i].eventfd == eventfd)
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004075 continue;
4076
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004077 new->entries[j] = thresholds->primary->entries[i];
4078 if (new->entries[j].threshold < usage) {
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004079 /*
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004080 * new->current_threshold will not be used
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004081 * until rcu_assign_pointer(), so it's safe to increment
4082 * it here.
4083 */
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004084 ++new->current_threshold;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004085 }
4086 j++;
4087 }
4088
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004089swap_buffers:
Kirill A. Shutemov2c488db2010-05-26 14:42:47 -07004090 /* Swap primary and spare array */
4091 thresholds->spare = thresholds->primary;
4092 rcu_assign_pointer(thresholds->primary, new);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004093
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004094 /* To be sure that nobody uses thresholds */
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004095 synchronize_rcu();
4096
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004097 mutex_unlock(&memcg->thresholds_lock);
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004098}
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004099
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004100static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4101 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4102{
4103 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4104 struct mem_cgroup_eventfd_list *event;
4105 int type = MEMFILE_TYPE(cft->private);
4106
4107 BUG_ON(type != _OOM_TYPE);
4108 event = kmalloc(sizeof(*event), GFP_KERNEL);
4109 if (!event)
4110 return -ENOMEM;
4111
4112 mutex_lock(&memcg_oom_mutex);
4113
4114 event->eventfd = eventfd;
4115 list_add(&event->list, &memcg->oom_notify);
4116
4117 /* already in OOM ? */
4118 if (atomic_read(&memcg->oom_lock))
4119 eventfd_signal(eventfd, 1);
4120 mutex_unlock(&memcg_oom_mutex);
4121
4122 return 0;
4123}
4124
Kirill A. Shutemov907860e2010-05-26 14:42:46 -07004125static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004126 struct cftype *cft, struct eventfd_ctx *eventfd)
4127{
4128 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4129 struct mem_cgroup_eventfd_list *ev, *tmp;
4130 int type = MEMFILE_TYPE(cft->private);
4131
4132 BUG_ON(type != _OOM_TYPE);
4133
4134 mutex_lock(&memcg_oom_mutex);
4135
4136 list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
4137 if (ev->eventfd == eventfd) {
4138 list_del(&ev->list);
4139 kfree(ev);
4140 }
4141 }
4142
4143 mutex_unlock(&memcg_oom_mutex);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004144}
4145
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004146static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4147 struct cftype *cft, struct cgroup_map_cb *cb)
4148{
4149 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4150
4151 cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
4152
4153 if (atomic_read(&mem->oom_lock))
4154 cb->fill(cb, "under_oom", 1);
4155 else
4156 cb->fill(cb, "under_oom", 0);
4157 return 0;
4158}
4159
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004160static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4161 struct cftype *cft, u64 val)
4162{
4163 struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4164 struct mem_cgroup *parent;
4165
4166 /* cannot set to root cgroup and only 0 and 1 are allowed */
4167 if (!cgrp->parent || !((val == 0) || (val == 1)))
4168 return -EINVAL;
4169
4170 parent = mem_cgroup_from_cont(cgrp->parent);
4171
4172 cgroup_lock();
4173 /* oom-kill-disable is a flag for subhierarchy. */
4174 if ((parent->use_hierarchy) ||
4175 (mem->use_hierarchy && !list_empty(&cgrp->children))) {
4176 cgroup_unlock();
4177 return -EINVAL;
4178 }
4179 mem->oom_kill_disable = val;
KAMEZAWA Hiroyuki4d845eb2010-06-29 15:05:18 -07004180 if (!val)
4181 memcg_oom_recover(mem);
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004182 cgroup_unlock();
4183 return 0;
4184}
4185
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004186static struct cftype mem_cgroup_files[] = {
4187 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004188 .name = "usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004189 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
Paul Menage2c3daa72008-04-29 00:59:58 -07004190 .read_u64 = mem_cgroup_read,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004191 .register_event = mem_cgroup_usage_register_event,
4192 .unregister_event = mem_cgroup_usage_unregister_event,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004193 },
4194 {
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004195 .name = "max_usage_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004196 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004197 .trigger = mem_cgroup_reset,
Pavel Emelyanovc84872e2008-04-29 01:00:17 -07004198 .read_u64 = mem_cgroup_read,
4199 },
4200 {
Balbir Singh0eea1032008-02-07 00:13:57 -08004201 .name = "limit_in_bytes",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004202 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
Paul Menage856c13a2008-07-25 01:47:04 -07004203 .write_string = mem_cgroup_write,
Paul Menage2c3daa72008-04-29 00:59:58 -07004204 .read_u64 = mem_cgroup_read,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004205 },
4206 {
Balbir Singh296c81d2009-09-23 15:56:36 -07004207 .name = "soft_limit_in_bytes",
4208 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4209 .write_string = mem_cgroup_write,
4210 .read_u64 = mem_cgroup_read,
4211 },
4212 {
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004213 .name = "failcnt",
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004214 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
Pavel Emelyanov29f2a4d2008-04-29 01:00:21 -07004215 .trigger = mem_cgroup_reset,
Paul Menage2c3daa72008-04-29 00:59:58 -07004216 .read_u64 = mem_cgroup_read,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004217 },
Balbir Singh8697d332008-02-07 00:13:59 -08004218 {
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004219 .name = "stat",
Paul Menagec64745c2008-04-29 01:00:02 -07004220 .read_map = mem_control_stat_show,
KAMEZAWA Hiroyukid2ceb9b2008-02-07 00:14:25 -08004221 },
KAMEZAWA Hiroyukic1e862c2009-01-07 18:07:55 -08004222 {
4223 .name = "force_empty",
4224 .trigger = mem_cgroup_force_empty_write,
4225 },
Balbir Singh18f59ea2009-01-07 18:08:07 -08004226 {
4227 .name = "use_hierarchy",
4228 .write_u64 = mem_cgroup_hierarchy_write,
4229 .read_u64 = mem_cgroup_hierarchy_read,
4230 },
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004231 {
4232 .name = "swappiness",
4233 .read_u64 = mem_cgroup_swappiness_read,
4234 .write_u64 = mem_cgroup_swappiness_write,
4235 },
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004236 {
4237 .name = "move_charge_at_immigrate",
4238 .read_u64 = mem_cgroup_move_charge_read,
4239 .write_u64 = mem_cgroup_move_charge_write,
4240 },
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004241 {
4242 .name = "oom_control",
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004243 .read_map = mem_cgroup_oom_control_read,
4244 .write_u64 = mem_cgroup_oom_control_write,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004245 .register_event = mem_cgroup_oom_register_event,
4246 .unregister_event = mem_cgroup_oom_unregister_event,
4247 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4248 },
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004249};
4250
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004251#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4252static struct cftype memsw_cgroup_files[] = {
4253 {
4254 .name = "memsw.usage_in_bytes",
4255 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4256 .read_u64 = mem_cgroup_read,
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004257 .register_event = mem_cgroup_usage_register_event,
4258 .unregister_event = mem_cgroup_usage_unregister_event,
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004259 },
4260 {
4261 .name = "memsw.max_usage_in_bytes",
4262 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4263 .trigger = mem_cgroup_reset,
4264 .read_u64 = mem_cgroup_read,
4265 },
4266 {
4267 .name = "memsw.limit_in_bytes",
4268 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4269 .write_string = mem_cgroup_write,
4270 .read_u64 = mem_cgroup_read,
4271 },
4272 {
4273 .name = "memsw.failcnt",
4274 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4275 .trigger = mem_cgroup_reset,
4276 .read_u64 = mem_cgroup_read,
4277 },
4278};
4279
4280static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4281{
4282 if (!do_swap_account)
4283 return 0;
4284 return cgroup_add_files(cont, ss, memsw_cgroup_files,
4285 ARRAY_SIZE(memsw_cgroup_files));
4286};
4287#else
4288static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4289{
4290 return 0;
4291}
4292#endif
4293
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004294static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4295{
4296 struct mem_cgroup_per_node *pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004297 struct mem_cgroup_per_zone *mz;
Christoph Lameterb69408e2008-10-18 20:26:14 -07004298 enum lru_list l;
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004299 int zone, tmp = node;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004300 /*
4301 * This routine is called against possible nodes.
4302 * But it's BUG to call kmalloc() against offline node.
4303 *
4304 * TODO: this routine can waste much memory for nodes which will
4305 * never be onlined. It's better to use memory hotplug callback
4306 * function.
4307 */
KAMEZAWA Hiroyuki41e33552008-04-08 17:41:54 -07004308 if (!node_state(node, N_NORMAL_MEMORY))
4309 tmp = -1;
Jesper Juhl17295c82011-01-13 15:47:42 -08004310 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004311 if (!pn)
4312 return 1;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004313
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004314 mem->info.nodeinfo[node] = pn;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004315 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4316 mz = &pn->zoneinfo[zone];
Christoph Lameterb69408e2008-10-18 20:26:14 -07004317 for_each_lru(l)
4318 INIT_LIST_HEAD(&mz->lists[l]);
Balbir Singhf64c3f52009-09-23 15:56:37 -07004319 mz->usage_in_excess = 0;
Balbir Singh4e416952009-09-23 15:56:39 -07004320 mz->on_tree = false;
4321 mz->mem = mem;
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004322 }
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004323 return 0;
4324}
4325
KAMEZAWA Hiroyuki1ecaab22008-02-07 00:14:38 -08004326static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4327{
4328 kfree(mem->info.nodeinfo[node]);
4329}
4330
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004331static struct mem_cgroup *mem_cgroup_alloc(void)
4332{
4333 struct mem_cgroup *mem;
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08004334 int size = sizeof(struct mem_cgroup);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004335
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08004336 /* Can be very big if MAX_NUMNODES is very big */
Jan Blunckc8dad2b2009-01-07 18:07:53 -08004337 if (size < PAGE_SIZE)
Jesper Juhl17295c82011-01-13 15:47:42 -08004338 mem = kzalloc(size, GFP_KERNEL);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004339 else
Jesper Juhl17295c82011-01-13 15:47:42 -08004340 mem = vzalloc(size);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004341
Dan Carpentere7bbcdf2010-03-23 13:35:12 -07004342 if (!mem)
4343 return NULL;
4344
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08004345 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004346 if (!mem->stat)
4347 goto out_free;
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07004348 spin_lock_init(&mem->pcp_counter_lock);
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004349 return mem;
Dan Carpenterd2e61b82010-11-11 14:05:12 -08004350
4351out_free:
4352 if (size < PAGE_SIZE)
4353 kfree(mem);
4354 else
4355 vfree(mem);
4356 return NULL;
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004357}
4358
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004359/*
4360 * At destroying mem_cgroup, references from swap_cgroup can remain.
4361 * (scanning all at force_empty is too costly...)
4362 *
4363 * Instead of clearing all references at force_empty, we remember
4364 * the number of reference from swap_cgroup and free mem_cgroup when
4365 * it goes down to 0.
4366 *
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004367 * Removal of cgroup itself succeeds regardless of refs from swap.
4368 */
4369
KAMEZAWA Hiroyukia7ba0ee2009-01-07 18:08:32 -08004370static void __mem_cgroup_free(struct mem_cgroup *mem)
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004371{
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004372 int node;
4373
Balbir Singhf64c3f52009-09-23 15:56:37 -07004374 mem_cgroup_remove_from_trees(mem);
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004375 free_css_id(&mem_cgroup_subsys, &mem->css);
4376
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -08004377 for_each_node_state(node, N_POSSIBLE)
4378 free_mem_cgroup_per_zone_info(mem, node);
4379
KAMEZAWA Hiroyukic62b1a32010-03-10 15:22:29 -08004380 free_percpu(mem->stat);
4381 if (sizeof(struct mem_cgroup) < PAGE_SIZE)
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004382 kfree(mem);
4383 else
4384 vfree(mem);
4385}
4386
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004387static void mem_cgroup_get(struct mem_cgroup *mem)
4388{
4389 atomic_inc(&mem->refcnt);
4390}
4391
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004392static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004393{
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004394 if (atomic_sub_and_test(count, &mem->refcnt)) {
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004395 struct mem_cgroup *parent = parent_mem_cgroup(mem);
KAMEZAWA Hiroyukia7ba0ee2009-01-07 18:08:32 -08004396 __mem_cgroup_free(mem);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004397 if (parent)
4398 mem_cgroup_put(parent);
4399 }
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004400}
4401
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004402static void mem_cgroup_put(struct mem_cgroup *mem)
4403{
4404 __mem_cgroup_put(mem, 1);
4405}
4406
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004407/*
4408 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4409 */
4410static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4411{
4412 if (!mem->res.parent)
4413 return NULL;
4414 return mem_cgroup_from_res_counter(mem->res.parent, res);
4415}
KAMEZAWA Hiroyuki33327942008-04-29 01:00:24 -07004416
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004417#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4418static void __init enable_swap_cgroup(void)
4419{
Hirokazu Takahashif8d665422009-01-07 18:08:02 -08004420 if (!mem_cgroup_disabled() && really_do_swap_account)
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004421 do_swap_account = 1;
4422}
4423#else
4424static void __init enable_swap_cgroup(void)
4425{
4426}
4427#endif
4428
Balbir Singhf64c3f52009-09-23 15:56:37 -07004429static int mem_cgroup_soft_limit_tree_init(void)
4430{
4431 struct mem_cgroup_tree_per_node *rtpn;
4432 struct mem_cgroup_tree_per_zone *rtpz;
4433 int tmp, node, zone;
4434
4435 for_each_node_state(node, N_POSSIBLE) {
4436 tmp = node;
4437 if (!node_state(node, N_NORMAL_MEMORY))
4438 tmp = -1;
4439 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4440 if (!rtpn)
4441 return 1;
4442
4443 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4444
4445 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4446 rtpz = &rtpn->rb_tree_per_zone[zone];
4447 rtpz->rb_root = RB_ROOT;
4448 spin_lock_init(&rtpz->lock);
4449 }
4450 }
4451 return 0;
4452}
4453
Li Zefan0eb253e2009-01-15 13:51:25 -08004454static struct cgroup_subsys_state * __ref
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004455mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4456{
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004457 struct mem_cgroup *mem, *parent;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004458 long error = -ENOMEM;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004459 int node;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004460
Jan Blunckc8dad2b2009-01-07 18:07:53 -08004461 mem = mem_cgroup_alloc();
4462 if (!mem)
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004463 return ERR_PTR(error);
Pavel Emelianov78fb7462008-02-07 00:13:51 -08004464
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004465 for_each_node_state(node, N_POSSIBLE)
4466 if (alloc_mem_cgroup_per_zone_info(mem, node))
4467 goto free_out;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004468
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004469 /* root ? */
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004470 if (cont->parent == NULL) {
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08004471 int cpu;
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08004472 enable_swap_cgroup();
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004473 parent = NULL;
Balbir Singh4b3bde42009-09-23 15:56:32 -07004474 root_mem_cgroup = mem;
Balbir Singhf64c3f52009-09-23 15:56:37 -07004475 if (mem_cgroup_soft_limit_tree_init())
4476 goto free_out;
KAMEZAWA Hiroyukicdec2e42009-12-15 16:47:08 -08004477 for_each_possible_cpu(cpu) {
4478 struct memcg_stock_pcp *stock =
4479 &per_cpu(memcg_stock, cpu);
4480 INIT_WORK(&stock->work, drain_local_stock);
4481 }
KAMEZAWA Hiroyuki711d3d22010-10-27 15:33:42 -07004482 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004483 } else {
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004484 parent = mem_cgroup_from_cont(cont->parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004485 mem->use_hierarchy = parent->use_hierarchy;
KAMEZAWA Hiroyuki3c11ecf2010-05-26 14:42:37 -07004486 mem->oom_kill_disable = parent->oom_kill_disable;
Balbir Singh18f59ea2009-01-07 18:08:07 -08004487 }
Balbir Singh28dbc4b2009-01-07 18:08:05 -08004488
Balbir Singh18f59ea2009-01-07 18:08:07 -08004489 if (parent && parent->use_hierarchy) {
4490 res_counter_init(&mem->res, &parent->res);
4491 res_counter_init(&mem->memsw, &parent->memsw);
Daisuke Nishimura7bcc1bb2009-01-29 14:25:11 -08004492 /*
4493 * We increment refcnt of the parent to ensure that we can
4494 * safely access it on res_counter_charge/uncharge.
4495 * This refcnt will be decremented when freeing this
4496 * mem_cgroup(see mem_cgroup_put).
4497 */
4498 mem_cgroup_get(parent);
Balbir Singh18f59ea2009-01-07 18:08:07 -08004499 } else {
4500 res_counter_init(&mem->res, NULL);
4501 res_counter_init(&mem->memsw, NULL);
4502 }
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004503 mem->last_scanned_child = 0;
KOSAKI Motohiro2733c062009-01-07 18:08:23 -08004504 spin_lock_init(&mem->reclaim_param_lock);
KAMEZAWA Hiroyuki9490ff22010-05-26 14:42:36 -07004505 INIT_LIST_HEAD(&mem->oom_notify);
Balbir Singh6d61ef42009-01-07 18:08:06 -08004506
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08004507 if (parent)
4508 mem->swappiness = get_swappiness(parent);
KAMEZAWA Hiroyukia7ba0ee2009-01-07 18:08:32 -08004509 atomic_set(&mem->refcnt, 1);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004510 mem->move_charge_at_immigrate = 0;
Kirill A. Shutemov2e72b632010-03-10 15:22:24 -08004511 mutex_init(&mem->thresholds_lock);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004512 return &mem->css;
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08004513free_out:
KAMEZAWA Hiroyukia7ba0ee2009-01-07 18:08:32 -08004514 __mem_cgroup_free(mem);
Balbir Singh4b3bde42009-09-23 15:56:32 -07004515 root_mem_cgroup = NULL;
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07004516 return ERR_PTR(error);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004517}
4518
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004519static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004520 struct cgroup *cont)
4521{
4522 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
KAMEZAWA Hiroyukiec64f512009-04-02 16:57:26 -07004523
4524 return mem_cgroup_force_empty(mem, false);
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08004525}
4526
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004527static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4528 struct cgroup *cont)
4529{
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004530 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004531
Daisuke Nishimurac268e992009-01-15 13:51:13 -08004532 mem_cgroup_put(mem);
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004533}
4534
4535static int mem_cgroup_populate(struct cgroup_subsys *ss,
4536 struct cgroup *cont)
4537{
KAMEZAWA Hiroyuki8c7c6e342009-01-07 18:08:00 -08004538 int ret;
4539
4540 ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4541 ARRAY_SIZE(mem_cgroup_files));
4542
4543 if (!ret)
4544 ret = register_memsw_files(cont, ss);
4545 return ret;
Balbir Singh8cdea7c2008-02-07 00:13:50 -08004546}
4547
Daisuke Nishimura02491442010-03-10 15:22:17 -08004548#ifdef CONFIG_MMU
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004549/* Handlers for move charge at task migration. */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004550#define PRECHARGE_COUNT_AT_ONCE 256
4551static int mem_cgroup_do_precharge(unsigned long count)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004552{
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004553 int ret = 0;
4554 int batch_count = PRECHARGE_COUNT_AT_ONCE;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004555 struct mem_cgroup *mem = mc.to;
4556
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004557 if (mem_cgroup_is_root(mem)) {
4558 mc.precharge += count;
4559 /* we don't need css_get for root */
4560 return ret;
4561 }
4562 /* try to charge at once */
4563 if (count > 1) {
4564 struct res_counter *dummy;
4565 /*
4566 * "mem" cannot be under rmdir() because we've already checked
4567 * by cgroup_lock_live_cgroup() that it is not removed and we
4568 * are still under the same cgroup_mutex. So we can postpone
4569 * css_get().
4570 */
4571 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4572 goto one_by_one;
4573 if (do_swap_account && res_counter_charge(&mem->memsw,
4574 PAGE_SIZE * count, &dummy)) {
4575 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4576 goto one_by_one;
4577 }
4578 mc.precharge += count;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004579 return ret;
4580 }
4581one_by_one:
4582 /* fall back to one by one charge */
4583 while (count--) {
4584 if (signal_pending(current)) {
4585 ret = -EINTR;
4586 break;
4587 }
4588 if (!batch_count--) {
4589 batch_count = PRECHARGE_COUNT_AT_ONCE;
4590 cond_resched();
4591 }
Andrea Arcangeliec168512011-01-13 15:46:56 -08004592 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
4593 PAGE_SIZE);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004594 if (ret || !mem)
4595 /* mem_cgroup_clear_mc() will do uncharge later */
4596 return -ENOMEM;
4597 mc.precharge++;
4598 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004599 return ret;
4600}
4601
4602/**
4603 * is_target_pte_for_mc - check a pte whether it is valid for move charge
4604 * @vma: the vma the pte to be checked belongs
4605 * @addr: the address corresponding to the pte to be checked
4606 * @ptent: the pte to be checked
Daisuke Nishimura02491442010-03-10 15:22:17 -08004607 * @target: the pointer the target page or swap ent will be stored(can be NULL)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004608 *
4609 * Returns
4610 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4611 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4612 * move charge. if @target is not NULL, the page is stored in target->page
4613 * with extra refcnt got(Callers should handle it).
Daisuke Nishimura02491442010-03-10 15:22:17 -08004614 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4615 * target for charge migration. if @target is not NULL, the entry is stored
4616 * in target->ent.
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004617 *
4618 * Called with pte lock held.
4619 */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004620union mc_target {
4621 struct page *page;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004622 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004623};
4624
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004625enum mc_target_type {
4626 MC_TARGET_NONE, /* not used */
4627 MC_TARGET_PAGE,
Daisuke Nishimura02491442010-03-10 15:22:17 -08004628 MC_TARGET_SWAP,
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004629};
4630
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004631static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4632 unsigned long addr, pte_t ptent)
4633{
4634 struct page *page = vm_normal_page(vma, addr, ptent);
4635
4636 if (!page || !page_mapped(page))
4637 return NULL;
4638 if (PageAnon(page)) {
4639 /* we don't move shared anon */
4640 if (!move_anon() || page_mapcount(page) > 2)
4641 return NULL;
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004642 } else if (!move_file())
4643 /* we ignore mapcount for file pages */
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004644 return NULL;
4645 if (!get_page_unless_zero(page))
4646 return NULL;
4647
4648 return page;
4649}
4650
4651static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4652 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4653{
4654 int usage_count;
4655 struct page *page = NULL;
4656 swp_entry_t ent = pte_to_swp_entry(ptent);
4657
4658 if (!move_anon() || non_swap_entry(ent))
4659 return NULL;
4660 usage_count = mem_cgroup_count_swap_user(ent, &page);
4661 if (usage_count > 1) { /* we don't move shared anon */
4662 if (page)
4663 put_page(page);
4664 return NULL;
4665 }
4666 if (do_swap_account)
4667 entry->val = ent.val;
4668
4669 return page;
4670}
4671
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004672static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4673 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4674{
4675 struct page *page = NULL;
4676 struct inode *inode;
4677 struct address_space *mapping;
4678 pgoff_t pgoff;
4679
4680 if (!vma->vm_file) /* anonymous vma */
4681 return NULL;
4682 if (!move_file())
4683 return NULL;
4684
4685 inode = vma->vm_file->f_path.dentry->d_inode;
4686 mapping = vma->vm_file->f_mapping;
4687 if (pte_none(ptent))
4688 pgoff = linear_page_index(vma, addr);
4689 else /* pte_file(ptent) is true */
4690 pgoff = pte_to_pgoff(ptent);
4691
4692 /* page is moved even if it's not RSS of this task(page-faulted). */
4693 if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4694 page = find_get_page(mapping, pgoff);
4695 } else { /* shmem/tmpfs file. we should take account of swap too. */
4696 swp_entry_t ent;
4697 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4698 if (do_swap_account)
4699 entry->val = ent.val;
4700 }
4701
4702 return page;
4703}
4704
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004705static int is_target_pte_for_mc(struct vm_area_struct *vma,
4706 unsigned long addr, pte_t ptent, union mc_target *target)
4707{
Daisuke Nishimura02491442010-03-10 15:22:17 -08004708 struct page *page = NULL;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004709 struct page_cgroup *pc;
4710 int ret = 0;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004711 swp_entry_t ent = { .val = 0 };
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004712
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004713 if (pte_present(ptent))
4714 page = mc_handle_present_pte(vma, addr, ptent);
4715 else if (is_swap_pte(ptent))
4716 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
Daisuke Nishimura87946a72010-05-26 14:42:39 -07004717 else if (pte_none(ptent) || pte_file(ptent))
4718 page = mc_handle_file_pte(vma, addr, ptent, &ent);
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004719
4720 if (!page && !ent.val)
Daisuke Nishimura02491442010-03-10 15:22:17 -08004721 return 0;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004722 if (page) {
4723 pc = lookup_page_cgroup(page);
4724 /*
4725 * Do only loose check w/o page_cgroup lock.
4726 * mem_cgroup_move_account() checks the pc is valid or not under
4727 * the lock.
4728 */
4729 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4730 ret = MC_TARGET_PAGE;
4731 if (target)
4732 target->page = page;
4733 }
4734 if (!ret || !target)
4735 put_page(page);
4736 }
Daisuke Nishimura90254a62010-05-26 14:42:38 -07004737 /* There is a swap entry and a page doesn't exist or isn't charged */
4738 if (ent.val && !ret &&
KAMEZAWA Hiroyuki7f0f1542010-05-11 14:06:58 -07004739 css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4740 ret = MC_TARGET_SWAP;
4741 if (target)
4742 target->ent = ent;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004743 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004744 return ret;
4745}
4746
4747static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4748 unsigned long addr, unsigned long end,
4749 struct mm_walk *walk)
4750{
4751 struct vm_area_struct *vma = walk->private;
4752 pte_t *pte;
4753 spinlock_t *ptl;
4754
Dave Hansen03319322011-03-22 16:32:56 -07004755 split_huge_page_pmd(walk->mm, pmd);
4756
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004757 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4758 for (; addr != end; pte++, addr += PAGE_SIZE)
4759 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4760 mc.precharge++; /* increment precharge temporarily */
4761 pte_unmap_unlock(pte - 1, ptl);
4762 cond_resched();
4763
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004764 return 0;
4765}
4766
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004767static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4768{
4769 unsigned long precharge;
4770 struct vm_area_struct *vma;
4771
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004772 down_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004773 for (vma = mm->mmap; vma; vma = vma->vm_next) {
4774 struct mm_walk mem_cgroup_count_precharge_walk = {
4775 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4776 .mm = mm,
4777 .private = vma,
4778 };
4779 if (is_vm_hugetlb_page(vma))
4780 continue;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004781 walk_page_range(vma->vm_start, vma->vm_end,
4782 &mem_cgroup_count_precharge_walk);
4783 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004784 up_read(&mm->mmap_sem);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004785
4786 precharge = mc.precharge;
4787 mc.precharge = 0;
4788
4789 return precharge;
4790}
4791
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004792static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4793{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004794 unsigned long precharge = mem_cgroup_count_precharge(mm);
4795
4796 VM_BUG_ON(mc.moving_task);
4797 mc.moving_task = current;
4798 return mem_cgroup_do_precharge(precharge);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004799}
4800
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004801/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4802static void __mem_cgroup_clear_mc(void)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004803{
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004804 struct mem_cgroup *from = mc.from;
4805 struct mem_cgroup *to = mc.to;
4806
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004807 /* we must uncharge all the leftover precharges from mc.to */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004808 if (mc.precharge) {
4809 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4810 mc.precharge = 0;
4811 }
4812 /*
4813 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4814 * we must uncharge here.
4815 */
4816 if (mc.moved_charge) {
4817 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4818 mc.moved_charge = 0;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004819 }
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004820 /* we must fixup refcnts and charges */
4821 if (mc.moved_swap) {
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004822 /* uncharge swap account from the old cgroup */
4823 if (!mem_cgroup_is_root(mc.from))
4824 res_counter_uncharge(&mc.from->memsw,
4825 PAGE_SIZE * mc.moved_swap);
4826 __mem_cgroup_put(mc.from, mc.moved_swap);
4827
4828 if (!mem_cgroup_is_root(mc.to)) {
4829 /*
4830 * we charged both to->res and to->memsw, so we should
4831 * uncharge to->res.
4832 */
4833 res_counter_uncharge(&mc.to->res,
4834 PAGE_SIZE * mc.moved_swap);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004835 }
4836 /* we've already done mem_cgroup_get(mc.to) */
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004837 mc.moved_swap = 0;
4838 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004839 memcg_oom_recover(from);
4840 memcg_oom_recover(to);
4841 wake_up_all(&mc.waitq);
4842}
4843
4844static void mem_cgroup_clear_mc(void)
4845{
4846 struct mem_cgroup *from = mc.from;
4847
4848 /*
4849 * we must clear moving_task before waking up waiters at the end of
4850 * task migration.
4851 */
4852 mc.moving_task = NULL;
4853 __mem_cgroup_clear_mc();
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004854 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004855 mc.from = NULL;
4856 mc.to = NULL;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004857 spin_unlock(&mc.lock);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07004858 mem_cgroup_end_move(from);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004859}
4860
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004861static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4862 struct cgroup *cgroup,
4863 struct task_struct *p,
4864 bool threadgroup)
4865{
4866 int ret = 0;
4867 struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4868
4869 if (mem->move_charge_at_immigrate) {
4870 struct mm_struct *mm;
4871 struct mem_cgroup *from = mem_cgroup_from_task(p);
4872
4873 VM_BUG_ON(from == mem);
4874
4875 mm = get_task_mm(p);
4876 if (!mm)
4877 return 0;
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004878 /* We move charges only when we move a owner of the mm */
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004879 if (mm->owner == p) {
4880 VM_BUG_ON(mc.from);
4881 VM_BUG_ON(mc.to);
4882 VM_BUG_ON(mc.precharge);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004883 VM_BUG_ON(mc.moved_charge);
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004884 VM_BUG_ON(mc.moved_swap);
KAMEZAWA Hiroyuki32047e22010-10-27 15:33:40 -07004885 mem_cgroup_start_move(from);
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004886 spin_lock(&mc.lock);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004887 mc.from = from;
4888 mc.to = mem;
KAMEZAWA Hiroyuki2bd9bb22010-08-10 18:02:58 -07004889 spin_unlock(&mc.lock);
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004890 /* We set mc.moving_task later */
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004891
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004892 ret = mem_cgroup_precharge_mc(mm);
4893 if (ret)
4894 mem_cgroup_clear_mc();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004895 }
4896 mmput(mm);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004897 }
4898 return ret;
4899}
4900
4901static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4902 struct cgroup *cgroup,
4903 struct task_struct *p,
4904 bool threadgroup)
4905{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004906 mem_cgroup_clear_mc();
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004907}
4908
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004909static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4910 unsigned long addr, unsigned long end,
4911 struct mm_walk *walk)
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08004912{
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004913 int ret = 0;
4914 struct vm_area_struct *vma = walk->private;
4915 pte_t *pte;
4916 spinlock_t *ptl;
4917
Dave Hansen03319322011-03-22 16:32:56 -07004918 split_huge_page_pmd(walk->mm, pmd);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004919retry:
4920 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4921 for (; addr != end; addr += PAGE_SIZE) {
4922 pte_t ptent = *(pte++);
4923 union mc_target target;
4924 int type;
4925 struct page *page;
4926 struct page_cgroup *pc;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004927 swp_entry_t ent;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004928
4929 if (!mc.precharge)
4930 break;
4931
4932 type = is_target_pte_for_mc(vma, addr, ptent, &target);
4933 switch (type) {
4934 case MC_TARGET_PAGE:
4935 page = target.page;
4936 if (isolate_lru_page(page))
4937 goto put;
4938 pc = lookup_page_cgroup(page);
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004939 if (!mem_cgroup_move_account(pc,
KAMEZAWA Hiroyuki987eba62011-01-20 14:44:25 -08004940 mc.from, mc.to, false, PAGE_SIZE)) {
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004941 mc.precharge--;
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004942 /* we uncharge from mc.from later. */
4943 mc.moved_charge++;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004944 }
4945 putback_lru_page(page);
4946put: /* is_target_pte_for_mc() gets the page */
4947 put_page(page);
4948 break;
Daisuke Nishimura02491442010-03-10 15:22:17 -08004949 case MC_TARGET_SWAP:
4950 ent = target.ent;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004951 if (!mem_cgroup_move_swap_account(ent,
4952 mc.from, mc.to, false)) {
Daisuke Nishimura02491442010-03-10 15:22:17 -08004953 mc.precharge--;
Daisuke Nishimura483c30b2010-03-10 15:22:18 -08004954 /* we fixup refcnts and charges later. */
4955 mc.moved_swap++;
4956 }
Daisuke Nishimura02491442010-03-10 15:22:17 -08004957 break;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004958 default:
4959 break;
4960 }
4961 }
4962 pte_unmap_unlock(pte - 1, ptl);
4963 cond_resched();
4964
4965 if (addr != end) {
4966 /*
4967 * We have consumed all precharges we got in can_attach().
4968 * We try charge one by one, but don't do any additional
4969 * charges to mc.to if we have failed in charge once in attach()
4970 * phase.
4971 */
Daisuke Nishimura854ffa82010-03-10 15:22:15 -08004972 ret = mem_cgroup_do_precharge(1);
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004973 if (!ret)
4974 goto retry;
4975 }
4976
4977 return ret;
4978}
4979
4980static void mem_cgroup_move_charge(struct mm_struct *mm)
4981{
4982 struct vm_area_struct *vma;
4983
4984 lru_add_drain_all();
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08004985retry:
4986 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
4987 /*
4988 * Someone who are holding the mmap_sem might be waiting in
4989 * waitq. So we cancel all extra charges, wake up all waiters,
4990 * and retry. Because we cancel precharges, we might not be able
4991 * to move enough charges, but moving charge is a best-effort
4992 * feature anyway, so it wouldn't be a big problem.
4993 */
4994 __mem_cgroup_clear_mc();
4995 cond_resched();
4996 goto retry;
4997 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08004998 for (vma = mm->mmap; vma; vma = vma->vm_next) {
4999 int ret;
5000 struct mm_walk mem_cgroup_move_charge_walk = {
5001 .pmd_entry = mem_cgroup_move_charge_pte_range,
5002 .mm = mm,
5003 .private = vma,
5004 };
5005 if (is_vm_hugetlb_page(vma))
5006 continue;
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005007 ret = walk_page_range(vma->vm_start, vma->vm_end,
5008 &mem_cgroup_move_charge_walk);
5009 if (ret)
5010 /*
5011 * means we have consumed all precharges and failed in
5012 * doing additional charge. Just abandon here.
5013 */
5014 break;
5015 }
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005016 up_read(&mm->mmap_sem);
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005017}
5018
Balbir Singh67e465a2008-02-07 00:13:54 -08005019static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5020 struct cgroup *cont,
5021 struct cgroup *old_cont,
Ben Blumbe367d02009-09-23 15:56:31 -07005022 struct task_struct *p,
5023 bool threadgroup)
Balbir Singh67e465a2008-02-07 00:13:54 -08005024{
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005025 struct mm_struct *mm;
5026
5027 if (!mc.to)
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005028 /* no need to move charge */
5029 return;
5030
Daisuke Nishimuradfe076b2011-01-13 15:47:41 -08005031 mm = get_task_mm(p);
5032 if (mm) {
5033 mem_cgroup_move_charge(mm);
5034 mmput(mm);
5035 }
Daisuke Nishimura4ffef5f2010-03-10 15:22:14 -08005036 mem_cgroup_clear_mc();
Balbir Singh67e465a2008-02-07 00:13:54 -08005037}
Daisuke Nishimura5cfb80a2010-03-23 13:35:11 -07005038#else /* !CONFIG_MMU */
5039static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5040 struct cgroup *cgroup,
5041 struct task_struct *p,
5042 bool threadgroup)
5043{
5044 return 0;
5045}
5046static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5047 struct cgroup *cgroup,
5048 struct task_struct *p,
5049 bool threadgroup)
5050{
5051}
5052static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5053 struct cgroup *cont,
5054 struct cgroup *old_cont,
5055 struct task_struct *p,
5056 bool threadgroup)
5057{
5058}
5059#endif
Balbir Singh67e465a2008-02-07 00:13:54 -08005060
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005061struct cgroup_subsys mem_cgroup_subsys = {
5062 .name = "memory",
5063 .subsys_id = mem_cgroup_subsys_id,
5064 .create = mem_cgroup_create,
KAMEZAWA Hiroyukidf878fb2008-02-07 00:14:28 -08005065 .pre_destroy = mem_cgroup_pre_destroy,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005066 .destroy = mem_cgroup_destroy,
5067 .populate = mem_cgroup_populate,
Daisuke Nishimura7dc74be2010-03-10 15:22:13 -08005068 .can_attach = mem_cgroup_can_attach,
5069 .cancel_attach = mem_cgroup_cancel_attach,
Balbir Singh67e465a2008-02-07 00:13:54 -08005070 .attach = mem_cgroup_move_task,
KAMEZAWA Hiroyuki6d12e2d2008-02-07 00:14:31 -08005071 .early_init = 0,
KAMEZAWA Hiroyuki04046e12009-04-02 16:57:33 -07005072 .use_id = 1,
Balbir Singh8cdea7c2008-02-07 00:13:50 -08005073};
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005074
5075#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
Michal Hockoa42c3902010-11-24 12:57:08 -08005076static int __init enable_swap_account(char *s)
5077{
5078 /* consider enabled if no parameter or 1 is given */
Michal Hockofceda1b2011-02-01 15:52:30 -08005079 if (!(*s) || !strcmp(s, "=1"))
Michal Hockoa42c3902010-11-24 12:57:08 -08005080 really_do_swap_account = 1;
Michal Hockofceda1b2011-02-01 15:52:30 -08005081 else if (!strcmp(s, "=0"))
Michal Hockoa42c3902010-11-24 12:57:08 -08005082 really_do_swap_account = 0;
5083 return 1;
5084}
5085__setup("swapaccount", enable_swap_account);
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005086
5087static int __init disable_swap_account(char *s)
5088{
Michal Hocko552b3722011-02-01 15:52:31 -08005089 printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
Michal Hockofceda1b2011-02-01 15:52:30 -08005090 enable_swap_account("=0");
KAMEZAWA Hiroyukic0777192009-01-07 18:07:57 -08005091 return 1;
5092}
5093__setup("noswapaccount", disable_swap_account);
5094#endif