blob: 021a44a7bd20e65f363e4fa06711034325dc35d9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/vmscan.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
12 */
13
14#include <linux/mm.h>
15#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel_stat.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/init.h>
21#include <linux/highmem.h>
Andrew Mortone129b5c2006-09-27 01:50:00 -070022#include <linux/vmstat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/file.h>
24#include <linux/writeback.h>
25#include <linux/blkdev.h>
26#include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28#include <linux/mm_inline.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/backing-dev.h>
30#include <linux/rmap.h>
31#include <linux/topology.h>
32#include <linux/cpu.h>
33#include <linux/cpuset.h>
Mel Gorman3e7d3442011-01-13 15:45:56 -080034#include <linux/compaction.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/notifier.h>
36#include <linux/rwsem.h>
Rafael J. Wysocki248a0302006-03-22 00:09:04 -080037#include <linux/delay.h>
Yasunori Goto3218ae12006-06-27 02:53:33 -070038#include <linux/kthread.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080039#include <linux/freezer.h>
Balbir Singh66e17072008-02-07 00:13:56 -080040#include <linux/memcontrol.h>
Keika Kobayashi873b4772008-07-25 01:48:52 -070041#include <linux/delayacct.h>
Lee Schermerhornaf936a12008-10-18 20:26:53 -070042#include <linux/sysctl.h>
KOSAKI Motohiro929bea72011-04-14 15:22:12 -070043#include <linux/oom.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070044#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include <asm/tlbflush.h>
47#include <asm/div64.h>
48
49#include <linux/swapops.h>
50
Nick Piggin0f8053a2006-03-22 00:08:33 -080051#include "internal.h"
52
Mel Gorman33906bc2010-08-09 17:19:16 -070053#define CREATE_TRACE_POINTS
54#include <trace/events/vmscan.h>
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056struct scan_control {
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 /* Incremented by the number of inactive pages that were scanned */
58 unsigned long nr_scanned;
59
Rik van Riela79311c2009-01-06 14:40:01 -080060 /* Number of pages freed so far during a call to shrink_zones() */
61 unsigned long nr_reclaimed;
62
KOSAKI Motohiro22fba332009-12-14 17:59:10 -080063 /* How many pages shrink_list() should reclaim */
64 unsigned long nr_to_reclaim;
65
KOSAKI Motohiro7b517552009-12-14 17:59:12 -080066 unsigned long hibernation_mode;
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 /* This context's GFP mask */
Al Viro6daa0e22005-10-21 03:18:50 -040069 gfp_t gfp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71 int may_writepage;
72
Johannes Weinera6dc60f82009-03-31 15:19:30 -070073 /* Can mapped pages be reclaimed? */
74 int may_unmap;
Christoph Lameterf1fd1062006-01-18 17:42:30 -080075
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -070076 /* Can pages be swapped as part of reclaim? */
77 int may_swap;
78
Andy Whitcroft5ad333e2007-07-17 04:03:16 -070079 int order;
Balbir Singh66e17072008-02-07 00:13:56 -080080
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -070081 /* Scan (total_size >> priority) pages at once */
82 int priority;
83
KOSAKI Motohiro5f53e762010-05-24 14:32:37 -070084 /*
Johannes Weinerf16015f2012-01-12 17:17:52 -080085 * The memory cgroup that hit its limit and as a result is the
86 * primary target of this reclaim invocation.
87 */
88 struct mem_cgroup *target_mem_cgroup;
Balbir Singh66e17072008-02-07 00:13:56 -080089
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -070090 /*
91 * Nodemask of nodes allowed by the caller. If NULL, all nodes
92 * are scanned.
93 */
94 nodemask_t *nodemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095};
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
98
99#ifdef ARCH_HAS_PREFETCH
100#define prefetch_prev_lru_page(_page, _base, _field) \
101 do { \
102 if ((_page)->lru.prev != _base) { \
103 struct page *prev; \
104 \
105 prev = lru_to_page(&(_page->lru)); \
106 prefetch(&prev->_field); \
107 } \
108 } while (0)
109#else
110#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
111#endif
112
113#ifdef ARCH_HAS_PREFETCHW
114#define prefetchw_prev_lru_page(_page, _base, _field) \
115 do { \
116 if ((_page)->lru.prev != _base) { \
117 struct page *prev; \
118 \
119 prev = lru_to_page(&(_page->lru)); \
120 prefetchw(&prev->_field); \
121 } \
122 } while (0)
123#else
124#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
125#endif
126
127/*
128 * From 0 .. 100. Higher means more swappy.
129 */
130int vm_swappiness = 60;
Andrew Mortonbd1e22b2006-06-23 02:03:47 -0700131long vm_total_pages; /* The total number of pages which the VM controls */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133static LIST_HEAD(shrinker_list);
134static DECLARE_RWSEM(shrinker_rwsem);
135
Andrew Mortonc255a452012-07-31 16:43:02 -0700136#ifdef CONFIG_MEMCG
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800137static bool global_reclaim(struct scan_control *sc)
138{
Johannes Weinerf16015f2012-01-12 17:17:52 -0800139 return !sc->target_mem_cgroup;
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800140}
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -0800141#else
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800142static bool global_reclaim(struct scan_control *sc)
143{
144 return true;
145}
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -0800146#endif
147
Hugh Dickins4d7dcca2012-05-29 15:07:08 -0700148static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
KOSAKI Motohiroc9f299d2009-01-07 18:08:16 -0800149{
Hugh Dickinsc3c787e2012-05-29 15:06:52 -0700150 if (!mem_cgroup_disabled())
Hugh Dickins4d7dcca2012-05-29 15:07:08 -0700151 return mem_cgroup_get_lru_size(lruvec, lru);
KOSAKI Motohiroa3d8e052009-01-07 18:08:19 -0800152
Konstantin Khlebnikov074291f2012-05-29 15:07:00 -0700153 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
KOSAKI Motohiroc9f299d2009-01-07 18:08:16 -0800154}
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156/*
157 * Add a shrinker callback to be called from the vm
158 */
Rusty Russell8e1f9362007-07-17 04:03:17 -0700159void register_shrinker(struct shrinker *shrinker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Konstantin Khlebnikov83aeead2011-12-08 14:33:54 -0800161 atomic_long_set(&shrinker->nr_in_batch, 0);
Rusty Russell8e1f9362007-07-17 04:03:17 -0700162 down_write(&shrinker_rwsem);
163 list_add_tail(&shrinker->list, &shrinker_list);
164 up_write(&shrinker_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
Rusty Russell8e1f9362007-07-17 04:03:17 -0700166EXPORT_SYMBOL(register_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168/*
169 * Remove one
170 */
Rusty Russell8e1f9362007-07-17 04:03:17 -0700171void unregister_shrinker(struct shrinker *shrinker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 down_write(&shrinker_rwsem);
174 list_del(&shrinker->list);
175 up_write(&shrinker_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
Rusty Russell8e1f9362007-07-17 04:03:17 -0700177EXPORT_SYMBOL(unregister_shrinker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Ying Han1495f232011-05-24 17:12:27 -0700179static inline int do_shrinker_shrink(struct shrinker *shrinker,
180 struct shrink_control *sc,
181 unsigned long nr_to_scan)
182{
183 sc->nr_to_scan = nr_to_scan;
184 return (*shrinker->shrink)(shrinker, sc);
185}
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187#define SHRINK_BATCH 128
188/*
189 * Call the shrink functions to age shrinkable caches
190 *
191 * Here we assume it costs one seek to replace a lru page and that it also
192 * takes a seek to recreate a cache object. With this in mind we age equal
193 * percentages of the lru and ageable caches. This should balance the seeks
194 * generated by these structures.
195 *
Simon Arlott183ff222007-10-20 01:27:18 +0200196 * If the vm encountered mapped pages on the LRU it increase the pressure on
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 * slab to avoid swapping.
198 *
199 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
200 *
201 * `lru_pages' represents the number of on-LRU pages in all the zones which
202 * are eligible for the caller's allocation attempt. It is used for balancing
203 * slab reclaim versus page reclaim.
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700204 *
205 * Returns the number of slab objects which we shrunk.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 */
Ying Hana09ed5e2011-05-24 17:12:26 -0700207unsigned long shrink_slab(struct shrink_control *shrink,
Ying Han1495f232011-05-24 17:12:27 -0700208 unsigned long nr_pages_scanned,
Ying Hana09ed5e2011-05-24 17:12:26 -0700209 unsigned long lru_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
211 struct shrinker *shrinker;
Andrew Morton69e05942006-03-22 00:08:19 -0800212 unsigned long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Ying Han1495f232011-05-24 17:12:27 -0700214 if (nr_pages_scanned == 0)
215 nr_pages_scanned = SWAP_CLUSTER_MAX;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Minchan Kimf06590b2011-05-24 17:11:11 -0700217 if (!down_read_trylock(&shrinker_rwsem)) {
218 /* Assume we'll be able to shrink next time */
219 ret = 1;
220 goto out;
221 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223 list_for_each_entry(shrinker, &shrinker_list, list) {
224 unsigned long long delta;
Konstantin Khlebnikov635697c2011-12-08 14:33:51 -0800225 long total_scan;
226 long max_pass;
Dave Chinner09576072011-07-08 14:14:34 +1000227 int shrink_ret = 0;
Dave Chinneracf92b42011-07-08 14:14:35 +1000228 long nr;
229 long new_nr;
Dave Chinnere9299f52011-07-08 14:14:37 +1000230 long batch_size = shrinker->batch ? shrinker->batch
231 : SHRINK_BATCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Konstantin Khlebnikov635697c2011-12-08 14:33:51 -0800233 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
234 if (max_pass <= 0)
235 continue;
236
Dave Chinneracf92b42011-07-08 14:14:35 +1000237 /*
238 * copy the current shrinker scan count into a local variable
239 * and zero it so that other concurrent shrinker invocations
240 * don't also do this scanning work.
241 */
Konstantin Khlebnikov83aeead2011-12-08 14:33:54 -0800242 nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
Dave Chinneracf92b42011-07-08 14:14:35 +1000243
244 total_scan = nr;
Ying Han1495f232011-05-24 17:12:27 -0700245 delta = (4 * nr_pages_scanned) / shrinker->seeks;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800246 delta *= max_pass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 do_div(delta, lru_pages + 1);
Dave Chinneracf92b42011-07-08 14:14:35 +1000248 total_scan += delta;
249 if (total_scan < 0) {
David Rientjes88c3bd72009-03-31 15:23:29 -0700250 printk(KERN_ERR "shrink_slab: %pF negative objects to "
251 "delete nr=%ld\n",
Dave Chinneracf92b42011-07-08 14:14:35 +1000252 shrinker->shrink, total_scan);
253 total_scan = max_pass;
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800254 }
255
256 /*
Dave Chinner3567b592011-07-08 14:14:36 +1000257 * We need to avoid excessive windup on filesystem shrinkers
258 * due to large numbers of GFP_NOFS allocations causing the
259 * shrinkers to return -1 all the time. This results in a large
260 * nr being built up so when a shrink that can do some work
261 * comes along it empties the entire cache due to nr >>>
262 * max_pass. This is bad for sustaining a working set in
263 * memory.
264 *
265 * Hence only allow the shrinker to scan the entire cache when
266 * a large delta change is calculated directly.
267 */
268 if (delta < max_pass / 4)
269 total_scan = min(total_scan, max_pass / 2);
270
271 /*
Andrea Arcangeliea164d72005-11-28 13:44:15 -0800272 * Avoid risking looping forever due to too large nr value:
273 * never try to free more than twice the estimate number of
274 * freeable entries.
275 */
Dave Chinneracf92b42011-07-08 14:14:35 +1000276 if (total_scan > max_pass * 2)
277 total_scan = max_pass * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Dave Chinneracf92b42011-07-08 14:14:35 +1000279 trace_mm_shrink_slab_start(shrinker, shrink, nr,
Dave Chinner09576072011-07-08 14:14:34 +1000280 nr_pages_scanned, lru_pages,
281 max_pass, delta, total_scan);
282
Dave Chinnere9299f52011-07-08 14:14:37 +1000283 while (total_scan >= batch_size) {
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700284 int nr_before;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Ying Han1495f232011-05-24 17:12:27 -0700286 nr_before = do_shrinker_shrink(shrinker, shrink, 0);
287 shrink_ret = do_shrinker_shrink(shrinker, shrink,
Dave Chinnere9299f52011-07-08 14:14:37 +1000288 batch_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 if (shrink_ret == -1)
290 break;
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700291 if (shrink_ret < nr_before)
292 ret += nr_before - shrink_ret;
Dave Chinnere9299f52011-07-08 14:14:37 +1000293 count_vm_events(SLABS_SCANNED, batch_size);
294 total_scan -= batch_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
296 cond_resched();
297 }
298
Dave Chinneracf92b42011-07-08 14:14:35 +1000299 /*
300 * move the unused scan count back into the shrinker in a
301 * manner that handles concurrent updates. If we exhausted the
302 * scan, there is no need to do an update.
303 */
Konstantin Khlebnikov83aeead2011-12-08 14:33:54 -0800304 if (total_scan > 0)
305 new_nr = atomic_long_add_return(total_scan,
306 &shrinker->nr_in_batch);
307 else
308 new_nr = atomic_long_read(&shrinker->nr_in_batch);
Dave Chinneracf92b42011-07-08 14:14:35 +1000309
310 trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 }
312 up_read(&shrinker_rwsem);
Minchan Kimf06590b2011-05-24 17:11:11 -0700313out:
314 cond_resched();
akpm@osdl.orgb15e0902005-06-21 17:14:35 -0700315 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316}
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318static inline int is_page_cache_freeable(struct page *page)
319{
Johannes Weinerceddc3a2009-09-21 17:03:00 -0700320 /*
321 * A freeable page cache page is referenced only by the caller
322 * that isolated the page, the page cache radix tree and
323 * optional buffer heads at page->private.
324 */
Johannes Weineredcf4742009-09-21 17:02:59 -0700325 return page_count(page) - page_has_private(page) == 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700328static int may_write_to_queue(struct backing_dev_info *bdi,
329 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
Christoph Lameter930d9152006-01-08 01:00:47 -0800331 if (current->flags & PF_SWAPWRITE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 return 1;
333 if (!bdi_write_congested(bdi))
334 return 1;
335 if (bdi == current->backing_dev_info)
336 return 1;
337 return 0;
338}
339
340/*
341 * We detected a synchronous write error writing a page out. Probably
342 * -ENOSPC. We need to propagate that into the address_space for a subsequent
343 * fsync(), msync() or close().
344 *
345 * The tricky part is that after writepage we cannot touch the mapping: nothing
346 * prevents it from being freed up. But we have a ref on the page and once
347 * that page is locked, the mapping is pinned.
348 *
349 * We're allowed to run sleeping lock_page() here because we know the caller has
350 * __GFP_FS.
351 */
352static void handle_write_error(struct address_space *mapping,
353 struct page *page, int error)
354{
Jens Axboe7eaceac2011-03-10 08:52:07 +0100355 lock_page(page);
Guillaume Chazarain3e9f45b2007-05-08 00:23:25 -0700356 if (page_mapping(page) == mapping)
357 mapping_set_error(mapping, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 unlock_page(page);
359}
360
Christoph Lameter04e62a22006-06-23 02:03:38 -0700361/* possible outcome of pageout() */
362typedef enum {
363 /* failed to write page out, page is locked */
364 PAGE_KEEP,
365 /* move page to the active list, page is locked */
366 PAGE_ACTIVATE,
367 /* page has been sent to the disk successfully, page is unlocked */
368 PAGE_SUCCESS,
369 /* page is clean and locked */
370 PAGE_CLEAN,
371} pageout_t;
372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373/*
Andrew Morton1742f192006-03-22 00:08:21 -0800374 * pageout is called by shrink_page_list() for each dirty page.
375 * Calls ->writepage().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 */
Andy Whitcroftc661b072007-08-22 14:01:26 -0700377static pageout_t pageout(struct page *page, struct address_space *mapping,
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700378 struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379{
380 /*
381 * If the page is dirty, only perform writeback if that write
382 * will be non-blocking. To prevent this allocation from being
383 * stalled by pagecache activity. But note that there may be
384 * stalls if we need to run get_block(). We could test
385 * PagePrivate for that.
386 *
Vincent Li6aceb532009-12-14 17:58:49 -0800387 * If this process is currently in __generic_file_aio_write() against
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 * this page's queue, we can perform writeback even if that
389 * will block.
390 *
391 * If the page is swapcache, write it back even if that would
392 * block, for some throttling. This happens by accident, because
393 * swap_backing_dev_info is bust: it doesn't reflect the
394 * congestion state of the swapdevs. Easy to fix, if needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 */
396 if (!is_page_cache_freeable(page))
397 return PAGE_KEEP;
398 if (!mapping) {
399 /*
400 * Some data journaling orphaned pages can have
401 * page->mapping == NULL while being dirty with clean buffers.
402 */
David Howells266cf652009-04-03 16:42:36 +0100403 if (page_has_private(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 if (try_to_free_buffers(page)) {
405 ClearPageDirty(page);
Harvey Harrisond40cee22008-04-30 00:55:07 -0700406 printk("%s: orphaned page\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 return PAGE_CLEAN;
408 }
409 }
410 return PAGE_KEEP;
411 }
412 if (mapping->a_ops->writepage == NULL)
413 return PAGE_ACTIVATE;
Mel Gorman0e093d992010-10-26 14:21:45 -0700414 if (!may_write_to_queue(mapping->backing_dev_info, sc))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return PAGE_KEEP;
416
417 if (clear_page_dirty_for_io(page)) {
418 int res;
419 struct writeback_control wbc = {
420 .sync_mode = WB_SYNC_NONE,
421 .nr_to_write = SWAP_CLUSTER_MAX,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700422 .range_start = 0,
423 .range_end = LLONG_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 .for_reclaim = 1,
425 };
426
427 SetPageReclaim(page);
428 res = mapping->a_ops->writepage(page, &wbc);
429 if (res < 0)
430 handle_write_error(mapping, page, res);
Zach Brown994fc28c2005-12-15 14:28:17 -0800431 if (res == AOP_WRITEPAGE_ACTIVATE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 ClearPageReclaim(page);
433 return PAGE_ACTIVATE;
434 }
Andy Whitcroftc661b072007-08-22 14:01:26 -0700435
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 if (!PageWriteback(page)) {
437 /* synchronous write or broken a_ops? */
438 ClearPageReclaim(page);
439 }
Mel Gorman23b9da52012-05-29 15:06:20 -0700440 trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
Andrew Mortone129b5c2006-09-27 01:50:00 -0700441 inc_zone_page_state(page, NR_VMSCAN_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 return PAGE_SUCCESS;
443 }
444
445 return PAGE_CLEAN;
446}
447
Andrew Mortona649fd92006-10-17 00:09:36 -0700448/*
Nick Piggine2867812008-07-25 19:45:30 -0700449 * Same as remove_mapping, but if the page is removed from the mapping, it
450 * gets returned with a refcount of 0.
Andrew Mortona649fd92006-10-17 00:09:36 -0700451 */
Nick Piggine2867812008-07-25 19:45:30 -0700452static int __remove_mapping(struct address_space *mapping, struct page *page)
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800453{
Nick Piggin28e4d962006-09-25 23:31:23 -0700454 BUG_ON(!PageLocked(page));
455 BUG_ON(mapping != page_mapping(page));
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800456
Nick Piggin19fd6232008-07-25 19:45:32 -0700457 spin_lock_irq(&mapping->tree_lock);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800458 /*
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700459 * The non racy check for a busy page.
460 *
461 * Must be careful with the order of the tests. When someone has
462 * a ref to the page, it may be possible that they dirty it then
463 * drop the reference. So if PageDirty is tested before page_count
464 * here, then the following race may occur:
465 *
466 * get_user_pages(&page);
467 * [user mapping goes away]
468 * write_to(page);
469 * !PageDirty(page) [good]
470 * SetPageDirty(page);
471 * put_page(page);
472 * !page_count(page) [good, discard it]
473 *
474 * [oops, our write_to data is lost]
475 *
476 * Reversing the order of the tests ensures such a situation cannot
477 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
478 * load is not satisfied before that of page->_count.
479 *
480 * Note that if SetPageDirty is always performed via set_page_dirty,
481 * and thus under tree_lock, then this ordering is not required.
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800482 */
Nick Piggine2867812008-07-25 19:45:30 -0700483 if (!page_freeze_refs(page, 2))
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800484 goto cannot_free;
Nick Piggine2867812008-07-25 19:45:30 -0700485 /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
486 if (unlikely(PageDirty(page))) {
487 page_unfreeze_refs(page, 2);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800488 goto cannot_free;
Nick Piggine2867812008-07-25 19:45:30 -0700489 }
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800490
491 if (PageSwapCache(page)) {
492 swp_entry_t swap = { .val = page_private(page) };
493 __delete_from_swap_cache(page);
Nick Piggin19fd6232008-07-25 19:45:32 -0700494 spin_unlock_irq(&mapping->tree_lock);
KAMEZAWA Hiroyukicb4b86b2009-06-16 15:32:52 -0700495 swapcache_free(swap, page);
Nick Piggine2867812008-07-25 19:45:30 -0700496 } else {
Linus Torvalds6072d132010-12-01 13:35:19 -0500497 void (*freepage)(struct page *);
498
499 freepage = mapping->a_ops->freepage;
500
Minchan Kime64a7822011-03-22 16:32:44 -0700501 __delete_from_page_cache(page);
Nick Piggin19fd6232008-07-25 19:45:32 -0700502 spin_unlock_irq(&mapping->tree_lock);
Daisuke Nishimurae767e052009-05-28 14:34:28 -0700503 mem_cgroup_uncharge_cache_page(page);
Linus Torvalds6072d132010-12-01 13:35:19 -0500504
505 if (freepage != NULL)
506 freepage(page);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800507 }
508
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800509 return 1;
510
511cannot_free:
Nick Piggin19fd6232008-07-25 19:45:32 -0700512 spin_unlock_irq(&mapping->tree_lock);
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800513 return 0;
514}
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516/*
Nick Piggine2867812008-07-25 19:45:30 -0700517 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
518 * someone else has a ref on the page, abort and return 0. If it was
519 * successfully detached, return 1. Assumes the caller has a single ref on
520 * this page.
521 */
522int remove_mapping(struct address_space *mapping, struct page *page)
523{
524 if (__remove_mapping(mapping, page)) {
525 /*
526 * Unfreezing the refcount with 1 rather than 2 effectively
527 * drops the pagecache ref for us without requiring another
528 * atomic operation.
529 */
530 page_unfreeze_refs(page, 1);
531 return 1;
532 }
533 return 0;
534}
535
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700536/**
537 * putback_lru_page - put previously isolated page onto appropriate LRU list
538 * @page: page to be put back to appropriate lru list
539 *
540 * Add previously isolated @page to appropriate LRU list.
541 * Page may still be unevictable for other reasons.
542 *
543 * lru_lock must not be held, interrupts must be enabled.
544 */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700545void putback_lru_page(struct page *page)
546{
547 int lru;
548 int active = !!TestClearPageActive(page);
Lee Schermerhornbbfd28e2008-10-18 20:26:40 -0700549 int was_unevictable = PageUnevictable(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700550
551 VM_BUG_ON(PageLRU(page));
552
553redo:
554 ClearPageUnevictable(page);
555
556 if (page_evictable(page, NULL)) {
557 /*
558 * For evictable pages, we can use the cache.
559 * In event of a race, worst case is we end up with an
560 * unevictable page on [in]active list.
561 * We know how to handle that.
562 */
Johannes Weiner401a8e12009-09-21 17:02:58 -0700563 lru = active + page_lru_base_type(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700564 lru_cache_add_lru(page, lru);
565 } else {
566 /*
567 * Put unevictable pages directly on zone's unevictable
568 * list.
569 */
570 lru = LRU_UNEVICTABLE;
571 add_page_to_unevictable_list(page);
Johannes Weiner6a7b9542009-10-26 16:50:00 -0700572 /*
Minchan Kim21ee9f32011-10-31 17:09:28 -0700573 * When racing with an mlock or AS_UNEVICTABLE clearing
574 * (page is unlocked) make sure that if the other thread
575 * does not observe our setting of PG_lru and fails
Hugh Dickins24513262012-01-20 14:34:21 -0800576 * isolation/check_move_unevictable_pages,
Minchan Kim21ee9f32011-10-31 17:09:28 -0700577 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
Johannes Weiner6a7b9542009-10-26 16:50:00 -0700578 * the page back to the evictable list.
579 *
Minchan Kim21ee9f32011-10-31 17:09:28 -0700580 * The other side is TestClearPageMlocked() or shmem_lock().
Johannes Weiner6a7b9542009-10-26 16:50:00 -0700581 */
582 smp_mb();
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700583 }
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700584
585 /*
586 * page's status can change while we move it among lru. If an evictable
587 * page is on unevictable list, it never be freed. To avoid that,
588 * check after we added it to the list, again.
589 */
590 if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
591 if (!isolate_lru_page(page)) {
592 put_page(page);
593 goto redo;
594 }
595 /* This means someone else dropped this page from LRU
596 * So, it will be freed or putback to LRU again. There is
597 * nothing to do here.
598 */
599 }
600
Lee Schermerhornbbfd28e2008-10-18 20:26:40 -0700601 if (was_unevictable && lru != LRU_UNEVICTABLE)
602 count_vm_event(UNEVICTABLE_PGRESCUED);
603 else if (!was_unevictable && lru == LRU_UNEVICTABLE)
604 count_vm_event(UNEVICTABLE_PGCULLED);
605
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700606 put_page(page); /* drop ref from isolate */
607}
608
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800609enum page_references {
610 PAGEREF_RECLAIM,
611 PAGEREF_RECLAIM_CLEAN,
Johannes Weiner645747462010-03-05 13:42:22 -0800612 PAGEREF_KEEP,
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800613 PAGEREF_ACTIVATE,
614};
615
616static enum page_references page_check_references(struct page *page,
617 struct scan_control *sc)
618{
Johannes Weiner645747462010-03-05 13:42:22 -0800619 int referenced_ptes, referenced_page;
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800620 unsigned long vm_flags;
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800621
Johannes Weinerc3ac9a82012-05-29 15:06:25 -0700622 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
623 &vm_flags);
Johannes Weiner645747462010-03-05 13:42:22 -0800624 referenced_page = TestClearPageReferenced(page);
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800625
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800626 /*
627 * Mlock lost the isolation race with us. Let try_to_unmap()
628 * move the page to the unevictable list.
629 */
630 if (vm_flags & VM_LOCKED)
631 return PAGEREF_RECLAIM;
632
Johannes Weiner645747462010-03-05 13:42:22 -0800633 if (referenced_ptes) {
Michal Hockoe48982732012-05-29 15:06:45 -0700634 if (PageSwapBacked(page))
Johannes Weiner645747462010-03-05 13:42:22 -0800635 return PAGEREF_ACTIVATE;
636 /*
637 * All mapped pages start out with page table
638 * references from the instantiating fault, so we need
639 * to look twice if a mapped file page is used more
640 * than once.
641 *
642 * Mark it and spare it for another trip around the
643 * inactive list. Another page table reference will
644 * lead to its activation.
645 *
646 * Note: the mark is set for activated pages as well
647 * so that recently deactivated but used pages are
648 * quickly recovered.
649 */
650 SetPageReferenced(page);
651
Konstantin Khlebnikov34dbc672012-01-10 15:06:59 -0800652 if (referenced_page || referenced_ptes > 1)
Johannes Weiner645747462010-03-05 13:42:22 -0800653 return PAGEREF_ACTIVATE;
654
Konstantin Khlebnikovc909e992012-01-10 15:07:03 -0800655 /*
656 * Activate file-backed executable pages after first usage.
657 */
658 if (vm_flags & VM_EXEC)
659 return PAGEREF_ACTIVATE;
660
Johannes Weiner645747462010-03-05 13:42:22 -0800661 return PAGEREF_KEEP;
662 }
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800663
664 /* Reclaim if clean, defer dirty pages to writeback */
KOSAKI Motohiro2e302442010-10-26 14:21:46 -0700665 if (referenced_page && !PageSwapBacked(page))
Johannes Weiner645747462010-03-05 13:42:22 -0800666 return PAGEREF_RECLAIM_CLEAN;
667
668 return PAGEREF_RECLAIM;
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800669}
670
Nick Piggine2867812008-07-25 19:45:30 -0700671/*
Andrew Morton1742f192006-03-22 00:08:21 -0800672 * shrink_page_list() returns the number of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 */
Andrew Morton1742f192006-03-22 00:08:21 -0800674static unsigned long shrink_page_list(struct list_head *page_list,
Konstantin Khlebnikov6a18adb2012-05-29 15:06:59 -0700675 struct zone *zone,
Mel Gormanf84f6e22011-10-31 17:07:51 -0700676 struct scan_control *sc,
Mel Gorman92df3a72011-10-31 17:07:56 -0700677 unsigned long *ret_nr_dirty,
678 unsigned long *ret_nr_writeback)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
680 LIST_HEAD(ret_pages);
Mel Gormanabe4c3b2010-08-09 17:19:31 -0700681 LIST_HEAD(free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 int pgactivate = 0;
Mel Gorman0e093d992010-10-26 14:21:45 -0700683 unsigned long nr_dirty = 0;
684 unsigned long nr_congested = 0;
Andrew Morton05ff5132006-03-22 00:08:20 -0800685 unsigned long nr_reclaimed = 0;
Mel Gorman92df3a72011-10-31 17:07:56 -0700686 unsigned long nr_writeback = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 cond_resched();
689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 while (!list_empty(page_list)) {
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800691 enum page_references references;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 struct address_space *mapping;
693 struct page *page;
694 int may_enter_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
696 cond_resched();
697
698 page = lru_to_page(page_list);
699 list_del(&page->lru);
700
Nick Piggin529ae9a2008-08-02 12:01:03 +0200701 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 goto keep;
703
Nick Piggin725d7042006-09-25 23:30:55 -0700704 VM_BUG_ON(PageActive(page));
Konstantin Khlebnikov6a18adb2012-05-29 15:06:59 -0700705 VM_BUG_ON(page_zone(page) != zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 sc->nr_scanned++;
Christoph Lameter80e43422006-02-11 17:55:53 -0800708
Nick Pigginb291f002008-10-18 20:26:44 -0700709 if (unlikely(!page_evictable(page, NULL)))
710 goto cull_mlocked;
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700711
Johannes Weinera6dc60f82009-03-31 15:19:30 -0700712 if (!sc->may_unmap && page_mapped(page))
Christoph Lameter80e43422006-02-11 17:55:53 -0800713 goto keep_locked;
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 /* Double the slab pressure for mapped and swapcache pages */
716 if (page_mapped(page) || PageSwapCache(page))
717 sc->nr_scanned++;
718
Andy Whitcroftc661b072007-08-22 14:01:26 -0700719 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
720 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
721
722 if (PageWriteback(page)) {
Mel Gorman92df3a72011-10-31 17:07:56 -0700723 nr_writeback++;
Mel Gorman41ac1992012-05-29 15:06:19 -0700724 unlock_page(page);
725 goto keep;
Andy Whitcroftc661b072007-08-22 14:01:26 -0700726 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
Konstantin Khlebnikov6a18adb2012-05-29 15:06:59 -0700728 references = page_check_references(page, sc);
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800729 switch (references) {
730 case PAGEREF_ACTIVATE:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 goto activate_locked;
Johannes Weiner645747462010-03-05 13:42:22 -0800732 case PAGEREF_KEEP:
733 goto keep_locked;
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800734 case PAGEREF_RECLAIM:
735 case PAGEREF_RECLAIM_CLEAN:
736 ; /* try to reclaim the page below */
737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 /*
740 * Anonymous process memory has backing store?
741 * Try to allocate it some swap space here.
742 */
Nick Pigginb291f002008-10-18 20:26:44 -0700743 if (PageAnon(page) && !PageSwapCache(page)) {
Hugh Dickins63eb6b932008-11-19 15:36:37 -0800744 if (!(sc->gfp_mask & __GFP_IO))
745 goto keep_locked;
Hugh Dickinsac47b002009-01-06 14:39:39 -0800746 if (!add_to_swap(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 goto activate_locked;
Hugh Dickins63eb6b932008-11-19 15:36:37 -0800748 may_enter_fs = 1;
Nick Pigginb291f002008-10-18 20:26:44 -0700749 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
751 mapping = page_mapping(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
753 /*
754 * The page is mapped into the page tables of one or more
755 * processes. Try to unmap it here.
756 */
757 if (page_mapped(page) && mapping) {
Andi Kleen14fa31b2009-09-16 11:50:10 +0200758 switch (try_to_unmap(page, TTU_UNMAP)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 case SWAP_FAIL:
760 goto activate_locked;
761 case SWAP_AGAIN:
762 goto keep_locked;
Nick Pigginb291f002008-10-18 20:26:44 -0700763 case SWAP_MLOCK:
764 goto cull_mlocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 case SWAP_SUCCESS:
766 ; /* try to free the page below */
767 }
768 }
769
770 if (PageDirty(page)) {
Mel Gorman0e093d992010-10-26 14:21:45 -0700771 nr_dirty++;
772
Mel Gormanee728862011-10-31 17:07:38 -0700773 /*
774 * Only kswapd can writeback filesystem pages to
Mel Gormanf84f6e22011-10-31 17:07:51 -0700775 * avoid risk of stack overflow but do not writeback
776 * unless under significant pressure.
Mel Gormanee728862011-10-31 17:07:38 -0700777 */
Mel Gormanf84f6e22011-10-31 17:07:51 -0700778 if (page_is_file_cache(page) &&
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -0700779 (!current_is_kswapd() ||
780 sc->priority >= DEF_PRIORITY - 2)) {
Mel Gorman49ea7eb2011-10-31 17:07:59 -0700781 /*
782 * Immediately reclaim when written back.
783 * Similar in principal to deactivate_page()
784 * except we already have the page isolated
785 * and know it's dirty
786 */
787 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
788 SetPageReclaim(page);
789
Mel Gormanee728862011-10-31 17:07:38 -0700790 goto keep_locked;
791 }
792
Johannes Weinerdfc8d632010-03-05 13:42:19 -0800793 if (references == PAGEREF_RECLAIM_CLEAN)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 goto keep_locked;
Andrew Morton4dd4b922008-03-24 12:29:52 -0700795 if (!may_enter_fs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 goto keep_locked;
Christoph Lameter52a83632006-02-01 03:05:28 -0800797 if (!sc->may_writepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 goto keep_locked;
799
800 /* Page is dirty, try to write it out here */
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700801 switch (pageout(page, mapping, sc)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 case PAGE_KEEP:
Mel Gorman0e093d992010-10-26 14:21:45 -0700803 nr_congested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 goto keep_locked;
805 case PAGE_ACTIVATE:
806 goto activate_locked;
807 case PAGE_SUCCESS:
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700808 if (PageWriteback(page))
Mel Gorman41ac1992012-05-29 15:06:19 -0700809 goto keep;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700810 if (PageDirty(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 goto keep;
KOSAKI Motohiro7d3579e2010-10-26 14:21:42 -0700812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 /*
814 * A synchronous write - probably a ramdisk. Go
815 * ahead and try to reclaim the page.
816 */
Nick Piggin529ae9a2008-08-02 12:01:03 +0200817 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 goto keep;
819 if (PageDirty(page) || PageWriteback(page))
820 goto keep_locked;
821 mapping = page_mapping(page);
822 case PAGE_CLEAN:
823 ; /* try to free the page below */
824 }
825 }
826
827 /*
828 * If the page has buffers, try to free the buffer mappings
829 * associated with this page. If we succeed we try to free
830 * the page as well.
831 *
832 * We do this even if the page is PageDirty().
833 * try_to_release_page() does not perform I/O, but it is
834 * possible for a page to have PageDirty set, but it is actually
835 * clean (all its buffers are clean). This happens if the
836 * buffers were written out directly, with submit_bh(). ext3
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700837 * will do this, as well as the blockdev mapping.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 * try_to_release_page() will discover that cleanness and will
839 * drop the buffers and mark the page clean - it can be freed.
840 *
841 * Rarely, pages can have buffers and no ->mapping. These are
842 * the pages which were not successfully invalidated in
843 * truncate_complete_page(). We try to drop those buffers here
844 * and if that worked, and the page is no longer mapped into
845 * process address space (page_count == 1) it can be freed.
846 * Otherwise, leave the page on the LRU so it is swappable.
847 */
David Howells266cf652009-04-03 16:42:36 +0100848 if (page_has_private(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 if (!try_to_release_page(page, sc->gfp_mask))
850 goto activate_locked;
Nick Piggine2867812008-07-25 19:45:30 -0700851 if (!mapping && page_count(page) == 1) {
852 unlock_page(page);
853 if (put_page_testzero(page))
854 goto free_it;
855 else {
856 /*
857 * rare race with speculative reference.
858 * the speculative reference will free
859 * this page shortly, so we may
860 * increment nr_reclaimed here (and
861 * leave it off the LRU).
862 */
863 nr_reclaimed++;
864 continue;
865 }
866 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 }
868
Nick Piggine2867812008-07-25 19:45:30 -0700869 if (!mapping || !__remove_mapping(mapping, page))
Christoph Lameter49d2e9c2006-01-08 01:00:48 -0800870 goto keep_locked;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Nick Piggina978d6f2008-10-18 20:26:58 -0700872 /*
873 * At this point, we have no other references and there is
874 * no way to pick any more up (removed from LRU, removed
875 * from pagecache). Can use non-atomic bitops now (and
876 * we obviously don't have to worry about waking up a process
877 * waiting on the page lock, because there are no references.
878 */
879 __clear_page_locked(page);
Nick Piggine2867812008-07-25 19:45:30 -0700880free_it:
Andrew Morton05ff5132006-03-22 00:08:20 -0800881 nr_reclaimed++;
Mel Gormanabe4c3b2010-08-09 17:19:31 -0700882
883 /*
884 * Is there need to periodically free_page_list? It would
885 * appear not as the counts should be low
886 */
887 list_add(&page->lru, &free_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 continue;
889
Nick Pigginb291f002008-10-18 20:26:44 -0700890cull_mlocked:
Hugh Dickins63d6c5a2009-01-06 14:39:38 -0800891 if (PageSwapCache(page))
892 try_to_free_swap(page);
Nick Pigginb291f002008-10-18 20:26:44 -0700893 unlock_page(page);
894 putback_lru_page(page);
895 continue;
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897activate_locked:
Rik van Riel68a223942008-10-18 20:26:23 -0700898 /* Not a candidate for swapping, so reclaim swap space. */
899 if (PageSwapCache(page) && vm_swap_full())
Hugh Dickinsa2c43ee2009-01-06 14:39:36 -0800900 try_to_free_swap(page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700901 VM_BUG_ON(PageActive(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 SetPageActive(page);
903 pgactivate++;
904keep_locked:
905 unlock_page(page);
906keep:
907 list_add(&page->lru, &ret_pages);
Nick Pigginb291f002008-10-18 20:26:44 -0700908 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 }
Mel Gormanabe4c3b2010-08-09 17:19:31 -0700910
Mel Gorman0e093d992010-10-26 14:21:45 -0700911 /*
912 * Tag a zone as congested if all the dirty pages encountered were
913 * backed by a congested BDI. In this case, reclaimers should just
914 * back off and wait for congestion to clear because further reclaim
915 * will encounter the same problem
916 */
Johannes Weiner89b5fae2012-01-12 17:17:50 -0800917 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
Konstantin Khlebnikov6a18adb2012-05-29 15:06:59 -0700918 zone_set_flag(zone, ZONE_CONGESTED);
Mel Gorman0e093d992010-10-26 14:21:45 -0700919
Konstantin Khlebnikovcc598502012-01-10 15:07:04 -0800920 free_hot_cold_page_list(&free_pages, 1);
Mel Gormanabe4c3b2010-08-09 17:19:31 -0700921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 list_splice(&ret_pages, page_list);
Christoph Lameterf8891e52006-06-30 01:55:45 -0700923 count_vm_events(PGACTIVATE, pgactivate);
Mel Gorman92df3a72011-10-31 17:07:56 -0700924 *ret_nr_dirty += nr_dirty;
925 *ret_nr_writeback += nr_writeback;
Andrew Morton05ff5132006-03-22 00:08:20 -0800926 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927}
928
Andy Whitcroft5ad333e2007-07-17 04:03:16 -0700929/*
930 * Attempt to remove the specified page from its LRU. Only take this page
931 * if it is of the appropriate PageActive status. Pages which are being
932 * freed elsewhere are also ignored.
933 *
934 * page: page to consider
935 * mode: one of the LRU isolation modes defined above
936 *
937 * returns 0 on success, -ve errno on failure.
938 */
Konstantin Khlebnikovf3fd4a62012-05-29 15:06:54 -0700939int __isolate_lru_page(struct page *page, isolate_mode_t mode)
Andy Whitcroft5ad333e2007-07-17 04:03:16 -0700940{
941 int ret = -EINVAL;
942
943 /* Only take pages on the LRU. */
944 if (!PageLRU(page))
945 return ret;
946
Mel Gormanc53919a2012-05-29 15:06:19 -0700947 /* Do not give back unevictable pages for compaction */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700948 if (PageUnevictable(page))
949 return ret;
950
Andy Whitcroft5ad333e2007-07-17 04:03:16 -0700951 ret = -EBUSY;
KAMEZAWA Hiroyuki08e552c2009-01-07 18:08:01 -0800952
Mel Gormanc8244932012-01-12 17:19:38 -0800953 /*
954 * To minimise LRU disruption, the caller can indicate that it only
955 * wants to isolate pages it will be able to operate on without
956 * blocking - clean pages for the most part.
957 *
958 * ISOLATE_CLEAN means that only clean pages should be isolated. This
959 * is used by reclaim when it is cannot write to backing storage
960 *
961 * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
962 * that it is possible to migrate without blocking
963 */
964 if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
965 /* All the caller can do on PageWriteback is block */
966 if (PageWriteback(page))
967 return ret;
968
969 if (PageDirty(page)) {
970 struct address_space *mapping;
971
972 /* ISOLATE_CLEAN means only clean pages */
973 if (mode & ISOLATE_CLEAN)
974 return ret;
975
976 /*
977 * Only pages without mappings or that have a
978 * ->migratepage callback are possible to migrate
979 * without blocking
980 */
981 mapping = page_mapping(page);
982 if (mapping && !mapping->a_ops->migratepage)
983 return ret;
984 }
985 }
Minchan Kim39deaf82011-10-31 17:06:51 -0700986
Minchan Kimf80c0672011-10-31 17:06:55 -0700987 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
988 return ret;
989
Andy Whitcroft5ad333e2007-07-17 04:03:16 -0700990 if (likely(get_page_unless_zero(page))) {
991 /*
992 * Be careful not to clear PageLRU until after we're
993 * sure the page is not being freed elsewhere -- the
994 * page release code relies on it.
995 */
996 ClearPageLRU(page);
997 ret = 0;
998 }
999
1000 return ret;
1001}
1002
Christoph Lameter49d2e9c2006-01-08 01:00:48 -08001003/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 * zone->lru_lock is heavily contended. Some of the functions that
1005 * shrink the lists perform better by taking out a batch of pages
1006 * and working on them outside the LRU lock.
1007 *
1008 * For pagecache intensive workloads, this function is the hottest
1009 * spot in the kernel (apart from copy_*_user functions).
1010 *
1011 * Appropriate locks must be held before calling this function.
1012 *
1013 * @nr_to_scan: The number of pages to look through on the list.
Konstantin Khlebnikov5dc35972012-05-29 15:06:58 -07001014 * @lruvec: The LRU vector to pull pages from.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 * @dst: The temp list to put pages on to.
Hugh Dickinsf6260122012-01-12 17:20:06 -08001016 * @nr_scanned: The number of pages that were scanned.
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001017 * @sc: The scan_control struct for this reclaim session
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001018 * @mode: One of the LRU isolation modes
Konstantin Khlebnikov3cb99452012-05-29 15:06:53 -07001019 * @lru: LRU list id for isolating
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 *
1021 * returns how many pages were moved onto *@dst.
1022 */
Andrew Morton69e05942006-03-22 00:08:19 -08001023static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
Konstantin Khlebnikov5dc35972012-05-29 15:06:58 -07001024 struct lruvec *lruvec, struct list_head *dst,
Rik van Rielfe2c2a12012-03-21 16:33:51 -07001025 unsigned long *nr_scanned, struct scan_control *sc,
Konstantin Khlebnikov3cb99452012-05-29 15:06:53 -07001026 isolate_mode_t mode, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027{
Hugh Dickins75b00af2012-05-29 15:07:09 -07001028 struct list_head *src = &lruvec->lists[lru];
Andrew Morton69e05942006-03-22 00:08:19 -08001029 unsigned long nr_taken = 0;
Wu Fengguangc9b02d92006-03-22 00:08:23 -08001030 unsigned long scan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031
Wu Fengguangc9b02d92006-03-22 00:08:23 -08001032 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001033 struct page *page;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001034 int nr_pages;
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 page = lru_to_page(src);
1037 prefetchw_prev_lru_page(page, src, flags);
1038
Nick Piggin725d7042006-09-25 23:30:55 -07001039 VM_BUG_ON(!PageLRU(page));
Nick Piggin8d438f92006-03-22 00:07:59 -08001040
Konstantin Khlebnikovf3fd4a62012-05-29 15:06:54 -07001041 switch (__isolate_lru_page(page, mode)) {
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001042 case 0:
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001043 nr_pages = hpage_nr_pages(page);
1044 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001045 list_move(&page->lru, dst);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001046 nr_taken += nr_pages;
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001047 break;
Nick Piggin46453a62006-03-22 00:07:58 -08001048
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001049 case -EBUSY:
1050 /* else it is being freed elsewhere */
1051 list_move(&page->lru, src);
1052 continue;
1053
1054 default:
1055 BUG();
1056 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 }
1058
Hugh Dickinsf6260122012-01-12 17:20:06 -08001059 *nr_scanned = scan;
Hugh Dickins75b00af2012-05-29 15:07:09 -07001060 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1061 nr_taken, mode, is_file_lru(lru));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 return nr_taken;
1063}
1064
Nick Piggin62695a82008-10-18 20:26:09 -07001065/**
1066 * isolate_lru_page - tries to isolate a page from its LRU list
1067 * @page: page to isolate from its LRU list
1068 *
1069 * Isolates a @page from an LRU list, clears PageLRU and adjusts the
1070 * vmstat statistic corresponding to whatever LRU list the page was on.
1071 *
1072 * Returns 0 if the page was removed from an LRU list.
1073 * Returns -EBUSY if the page was not on an LRU list.
1074 *
1075 * The returned page will have PageLRU() cleared. If it was found on
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001076 * the active list, it will have PageActive set. If it was found on
1077 * the unevictable list, it will have the PageUnevictable bit set. That flag
1078 * may need to be cleared by the caller before letting the page go.
Nick Piggin62695a82008-10-18 20:26:09 -07001079 *
1080 * The vmstat statistic corresponding to the list on which the page was
1081 * found will be decremented.
1082 *
1083 * Restrictions:
1084 * (1) Must be called with an elevated refcount on the page. This is a
1085 * fundamentnal difference from isolate_lru_pages (which is called
1086 * without a stable reference).
1087 * (2) the lru_lock must not be held.
1088 * (3) interrupts must be enabled.
1089 */
1090int isolate_lru_page(struct page *page)
1091{
1092 int ret = -EBUSY;
1093
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001094 VM_BUG_ON(!page_count(page));
1095
Nick Piggin62695a82008-10-18 20:26:09 -07001096 if (PageLRU(page)) {
1097 struct zone *zone = page_zone(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001098 struct lruvec *lruvec;
Nick Piggin62695a82008-10-18 20:26:09 -07001099
1100 spin_lock_irq(&zone->lru_lock);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001101 lruvec = mem_cgroup_page_lruvec(page, zone);
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001102 if (PageLRU(page)) {
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001103 int lru = page_lru(page);
Konstantin Khlebnikov0c917312011-05-24 17:12:21 -07001104 get_page(page);
Nick Piggin62695a82008-10-18 20:26:09 -07001105 ClearPageLRU(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001106 del_page_from_lru_list(page, lruvec, lru);
1107 ret = 0;
Nick Piggin62695a82008-10-18 20:26:09 -07001108 }
1109 spin_unlock_irq(&zone->lru_lock);
1110 }
1111 return ret;
1112}
1113
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07001114/*
Rik van Riel35cd7812009-09-21 17:01:38 -07001115 * Are there way too many processes in the direct reclaim path already?
1116 */
1117static int too_many_isolated(struct zone *zone, int file,
1118 struct scan_control *sc)
1119{
1120 unsigned long inactive, isolated;
1121
1122 if (current_is_kswapd())
1123 return 0;
1124
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001125 if (!global_reclaim(sc))
Rik van Riel35cd7812009-09-21 17:01:38 -07001126 return 0;
1127
1128 if (file) {
1129 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1130 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1131 } else {
1132 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1133 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1134 }
1135
1136 return isolated > inactive;
1137}
1138
Mel Gorman66635622010-08-09 17:19:30 -07001139static noinline_for_stack void
Hugh Dickins75b00af2012-05-29 15:07:09 -07001140putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
Mel Gorman66635622010-08-09 17:19:30 -07001141{
Konstantin Khlebnikov27ac81d2012-05-29 15:07:00 -07001142 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1143 struct zone *zone = lruvec_zone(lruvec);
Hugh Dickins3f797682012-01-12 17:20:07 -08001144 LIST_HEAD(pages_to_free);
Mel Gorman66635622010-08-09 17:19:30 -07001145
Mel Gorman66635622010-08-09 17:19:30 -07001146 /*
1147 * Put back any unfreeable pages.
1148 */
Mel Gorman66635622010-08-09 17:19:30 -07001149 while (!list_empty(page_list)) {
Hugh Dickins3f797682012-01-12 17:20:07 -08001150 struct page *page = lru_to_page(page_list);
Mel Gorman66635622010-08-09 17:19:30 -07001151 int lru;
Hugh Dickins3f797682012-01-12 17:20:07 -08001152
Mel Gorman66635622010-08-09 17:19:30 -07001153 VM_BUG_ON(PageLRU(page));
1154 list_del(&page->lru);
1155 if (unlikely(!page_evictable(page, NULL))) {
1156 spin_unlock_irq(&zone->lru_lock);
1157 putback_lru_page(page);
1158 spin_lock_irq(&zone->lru_lock);
1159 continue;
1160 }
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001161
1162 lruvec = mem_cgroup_page_lruvec(page, zone);
1163
Linus Torvalds7a608572011-01-17 14:42:19 -08001164 SetPageLRU(page);
Mel Gorman66635622010-08-09 17:19:30 -07001165 lru = page_lru(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001166 add_page_to_lru_list(page, lruvec, lru);
1167
Mel Gorman66635622010-08-09 17:19:30 -07001168 if (is_active_lru(lru)) {
1169 int file = is_file_lru(lru);
Rik van Riel9992af12011-01-13 15:47:13 -08001170 int numpages = hpage_nr_pages(page);
1171 reclaim_stat->recent_rotated[file] += numpages;
Mel Gorman66635622010-08-09 17:19:30 -07001172 }
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001173 if (put_page_testzero(page)) {
1174 __ClearPageLRU(page);
1175 __ClearPageActive(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001176 del_page_from_lru_list(page, lruvec, lru);
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001177
1178 if (unlikely(PageCompound(page))) {
1179 spin_unlock_irq(&zone->lru_lock);
1180 (*get_compound_page_dtor(page))(page);
1181 spin_lock_irq(&zone->lru_lock);
1182 } else
1183 list_add(&page->lru, &pages_to_free);
Mel Gorman66635622010-08-09 17:19:30 -07001184 }
1185 }
Mel Gorman66635622010-08-09 17:19:30 -07001186
Hugh Dickins3f797682012-01-12 17:20:07 -08001187 /*
1188 * To save our caller's stack, now use input list for pages to free.
1189 */
1190 list_splice(&pages_to_free, page_list);
Mel Gorman66635622010-08-09 17:19:30 -07001191}
1192
1193/*
Andrew Morton1742f192006-03-22 00:08:21 -08001194 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1195 * of reclaimed pages
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 */
Mel Gorman66635622010-08-09 17:19:30 -07001197static noinline_for_stack unsigned long
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001198shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001199 struct scan_control *sc, enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200{
1201 LIST_HEAD(page_list);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001202 unsigned long nr_scanned;
Andrew Morton05ff5132006-03-22 00:08:20 -08001203 unsigned long nr_reclaimed = 0;
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001204 unsigned long nr_taken;
Mel Gorman92df3a72011-10-31 17:07:56 -07001205 unsigned long nr_dirty = 0;
1206 unsigned long nr_writeback = 0;
Konstantin Khlebnikovf3fd4a62012-05-29 15:06:54 -07001207 isolate_mode_t isolate_mode = 0;
Konstantin Khlebnikov3cb99452012-05-29 15:06:53 -07001208 int file = is_file_lru(lru);
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001209 struct zone *zone = lruvec_zone(lruvec);
1210 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
KOSAKI Motohiro78dc5832009-06-16 15:31:40 -07001211
Rik van Riel35cd7812009-09-21 17:01:38 -07001212 while (unlikely(too_many_isolated(zone, file, sc))) {
KOSAKI Motohiro58355c72009-10-26 16:49:35 -07001213 congestion_wait(BLK_RW_ASYNC, HZ/10);
Rik van Riel35cd7812009-09-21 17:01:38 -07001214
1215 /* We are about to die and free our memory. Return now. */
1216 if (fatal_signal_pending(current))
1217 return SWAP_CLUSTER_MAX;
1218 }
1219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 lru_add_drain();
Minchan Kimf80c0672011-10-31 17:06:55 -07001221
1222 if (!sc->may_unmap)
Hillf Danton61317282012-03-21 16:33:48 -07001223 isolate_mode |= ISOLATE_UNMAPPED;
Minchan Kimf80c0672011-10-31 17:06:55 -07001224 if (!sc->may_writepage)
Hillf Danton61317282012-03-21 16:33:48 -07001225 isolate_mode |= ISOLATE_CLEAN;
Minchan Kimf80c0672011-10-31 17:06:55 -07001226
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 spin_lock_irq(&zone->lru_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Konstantin Khlebnikov5dc35972012-05-29 15:06:58 -07001229 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1230 &nr_scanned, sc, isolate_mode, lru);
Konstantin Khlebnikov95d918f2012-05-29 15:06:59 -07001231
1232 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1233 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1234
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001235 if (global_reclaim(sc)) {
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001236 zone->pages_scanned += nr_scanned;
KOSAKI Motohirob35ea172009-09-21 17:01:36 -07001237 if (current_is_kswapd())
Hugh Dickins75b00af2012-05-29 15:07:09 -07001238 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001239 else
Hugh Dickins75b00af2012-05-29 15:07:09 -07001240 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001241 }
Hillf Dantond563c052012-03-21 16:34:02 -07001242 spin_unlock_irq(&zone->lru_lock);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07001243
Hillf Dantond563c052012-03-21 16:34:02 -07001244 if (nr_taken == 0)
Mel Gorman66635622010-08-09 17:19:30 -07001245 return 0;
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001246
Konstantin Khlebnikov6a18adb2012-05-29 15:06:59 -07001247 nr_reclaimed = shrink_page_list(&page_list, zone, sc,
Mel Gorman92df3a72011-10-31 17:07:56 -07001248 &nr_dirty, &nr_writeback);
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001249
Hugh Dickins3f797682012-01-12 17:20:07 -08001250 spin_lock_irq(&zone->lru_lock);
1251
Konstantin Khlebnikov95d918f2012-05-29 15:06:59 -07001252 reclaim_stat->recent_scanned[file] += nr_taken;
Hillf Dantond563c052012-03-21 16:34:02 -07001253
Ying Han904249a2012-04-25 16:01:48 -07001254 if (global_reclaim(sc)) {
1255 if (current_is_kswapd())
1256 __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
1257 nr_reclaimed);
1258 else
1259 __count_zone_vm_events(PGSTEAL_DIRECT, zone,
1260 nr_reclaimed);
1261 }
KOSAKI Motohiroe247dbc2010-08-09 17:19:28 -07001262
Konstantin Khlebnikov27ac81d2012-05-29 15:07:00 -07001263 putback_inactive_pages(lruvec, &page_list);
Hugh Dickins3f797682012-01-12 17:20:07 -08001264
Konstantin Khlebnikov95d918f2012-05-29 15:06:59 -07001265 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
Hugh Dickins3f797682012-01-12 17:20:07 -08001266
1267 spin_unlock_irq(&zone->lru_lock);
1268
1269 free_hot_cold_page_list(&page_list, 1);
Mel Gormane11da5b2010-10-26 14:21:40 -07001270
Mel Gorman92df3a72011-10-31 17:07:56 -07001271 /*
1272 * If reclaim is isolating dirty pages under writeback, it implies
1273 * that the long-lived page allocation rate is exceeding the page
1274 * laundering rate. Either the global limits are not being effective
1275 * at throttling processes due to the page distribution throughout
1276 * zones or there is heavy usage of a slow backing device. The
1277 * only option is to throttle from reclaim context which is not ideal
1278 * as there is no guarantee the dirtying process is throttled in the
1279 * same way balance_dirty_pages() manages.
1280 *
1281 * This scales the number of dirty pages that must be under writeback
1282 * before throttling depending on priority. It is a simple backoff
1283 * function that has the most effect in the range DEF_PRIORITY to
1284 * DEF_PRIORITY-2 which is the priority reclaim is considered to be
1285 * in trouble and reclaim is considered to be in trouble.
1286 *
1287 * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
1288 * DEF_PRIORITY-1 50% must be PageWriteback
1289 * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
1290 * ...
1291 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
1292 * isolated page is PageWriteback
1293 */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001294 if (nr_writeback && nr_writeback >=
1295 (nr_taken >> (DEF_PRIORITY - sc->priority)))
Mel Gorman92df3a72011-10-31 17:07:56 -07001296 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1297
Mel Gormane11da5b2010-10-26 14:21:40 -07001298 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1299 zone_idx(zone),
1300 nr_scanned, nr_reclaimed,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001301 sc->priority,
Mel Gorman23b9da52012-05-29 15:06:20 -07001302 trace_shrink_flags(file));
Andrew Morton05ff5132006-03-22 00:08:20 -08001303 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304}
1305
Martin Bligh3bb1a8522006-10-28 10:38:24 -07001306/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 * This moves pages from the active list to the inactive list.
1308 *
1309 * We move them the other way if the page is referenced by one or more
1310 * processes, from rmap.
1311 *
1312 * If the pages are mostly unmapped, the processing is fast and it is
1313 * appropriate to hold zone->lru_lock across the whole operation. But if
1314 * the pages are mapped, the processing is slow (page_referenced()) so we
1315 * should drop zone->lru_lock around each page. It's impossible to balance
1316 * this, so instead we remove the pages from the LRU while processing them.
1317 * It is safe to rely on PG_active against the non-LRU pages in here because
1318 * nobody will play with that bit on a non-LRU page.
1319 *
1320 * The downside is that we have to touch page->_count against each page.
1321 * But we had to alter page->flags anyway.
1322 */
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001323
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001324static void move_active_pages_to_lru(struct lruvec *lruvec,
Wu Fengguang3eb41402009-06-16 15:33:13 -07001325 struct list_head *list,
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001326 struct list_head *pages_to_free,
Wu Fengguang3eb41402009-06-16 15:33:13 -07001327 enum lru_list lru)
1328{
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001329 struct zone *zone = lruvec_zone(lruvec);
Wu Fengguang3eb41402009-06-16 15:33:13 -07001330 unsigned long pgmoved = 0;
Wu Fengguang3eb41402009-06-16 15:33:13 -07001331 struct page *page;
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001332 int nr_pages;
Wu Fengguang3eb41402009-06-16 15:33:13 -07001333
Wu Fengguang3eb41402009-06-16 15:33:13 -07001334 while (!list_empty(list)) {
1335 page = lru_to_page(list);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001336 lruvec = mem_cgroup_page_lruvec(page, zone);
Wu Fengguang3eb41402009-06-16 15:33:13 -07001337
1338 VM_BUG_ON(PageLRU(page));
1339 SetPageLRU(page);
1340
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001341 nr_pages = hpage_nr_pages(page);
1342 mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
Johannes Weiner925b7672012-01-12 17:18:15 -08001343 list_move(&page->lru, &lruvec->lists[lru]);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001344 pgmoved += nr_pages;
Wu Fengguang3eb41402009-06-16 15:33:13 -07001345
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001346 if (put_page_testzero(page)) {
1347 __ClearPageLRU(page);
1348 __ClearPageActive(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001349 del_page_from_lru_list(page, lruvec, lru);
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001350
1351 if (unlikely(PageCompound(page))) {
1352 spin_unlock_irq(&zone->lru_lock);
1353 (*get_compound_page_dtor(page))(page);
1354 spin_lock_irq(&zone->lru_lock);
1355 } else
1356 list_add(&page->lru, pages_to_free);
Wu Fengguang3eb41402009-06-16 15:33:13 -07001357 }
1358 }
1359 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1360 if (!is_active_lru(lru))
1361 __count_vm_events(PGDEACTIVATE, pgmoved);
1362}
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001363
Hugh Dickinsf6260122012-01-12 17:20:06 -08001364static void shrink_active_list(unsigned long nr_to_scan,
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001365 struct lruvec *lruvec,
Johannes Weinerf16015f2012-01-12 17:17:52 -08001366 struct scan_control *sc,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001367 enum lru_list lru)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368{
KOSAKI Motohiro44c241f2009-09-21 17:01:35 -07001369 unsigned long nr_taken;
Hugh Dickinsf6260122012-01-12 17:20:06 -08001370 unsigned long nr_scanned;
Wu Fengguang6fe6b7e2009-06-16 15:33:05 -07001371 unsigned long vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 LIST_HEAD(l_hold); /* The pages which were snipped off */
Wu Fengguang8cab4752009-06-16 15:33:12 -07001373 LIST_HEAD(l_active);
Christoph Lameterb69408e2008-10-18 20:26:14 -07001374 LIST_HEAD(l_inactive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 struct page *page;
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001376 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
KOSAKI Motohiro44c241f2009-09-21 17:01:35 -07001377 unsigned long nr_rotated = 0;
Konstantin Khlebnikovf3fd4a62012-05-29 15:06:54 -07001378 isolate_mode_t isolate_mode = 0;
Konstantin Khlebnikov3cb99452012-05-29 15:06:53 -07001379 int file = is_file_lru(lru);
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001380 struct zone *zone = lruvec_zone(lruvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381
1382 lru_add_drain();
Minchan Kimf80c0672011-10-31 17:06:55 -07001383
1384 if (!sc->may_unmap)
Hillf Danton61317282012-03-21 16:33:48 -07001385 isolate_mode |= ISOLATE_UNMAPPED;
Minchan Kimf80c0672011-10-31 17:06:55 -07001386 if (!sc->may_writepage)
Hillf Danton61317282012-03-21 16:33:48 -07001387 isolate_mode |= ISOLATE_CLEAN;
Minchan Kimf80c0672011-10-31 17:06:55 -07001388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 spin_lock_irq(&zone->lru_lock);
Johannes Weiner925b7672012-01-12 17:18:15 -08001390
Konstantin Khlebnikov5dc35972012-05-29 15:06:58 -07001391 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1392 &nr_scanned, sc, isolate_mode, lru);
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001393 if (global_reclaim(sc))
Hugh Dickinsf6260122012-01-12 17:20:06 -08001394 zone->pages_scanned += nr_scanned;
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001395
Johannes Weinerb7c46d12009-09-21 17:02:56 -07001396 reclaim_stat->recent_scanned[file] += nr_taken;
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001397
Hugh Dickinsf6260122012-01-12 17:20:06 -08001398 __count_zone_vm_events(PGREFILL, zone, nr_scanned);
Konstantin Khlebnikov3cb99452012-05-29 15:06:53 -07001399 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07001400 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 spin_unlock_irq(&zone->lru_lock);
1402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 while (!list_empty(&l_hold)) {
1404 cond_resched();
1405 page = lru_to_page(&l_hold);
1406 list_del(&page->lru);
Rik van Riel7e9cd482008-10-18 20:26:35 -07001407
Lee Schermerhorn894bc312008-10-18 20:26:39 -07001408 if (unlikely(!page_evictable(page, NULL))) {
1409 putback_lru_page(page);
1410 continue;
1411 }
1412
Mel Gormancc715d92012-03-21 16:34:00 -07001413 if (unlikely(buffer_heads_over_limit)) {
1414 if (page_has_private(page) && trylock_page(page)) {
1415 if (page_has_private(page))
1416 try_to_release_page(page, 0);
1417 unlock_page(page);
1418 }
1419 }
1420
Johannes Weinerc3ac9a82012-05-29 15:06:25 -07001421 if (page_referenced(page, 0, sc->target_mem_cgroup,
1422 &vm_flags)) {
Rik van Riel9992af12011-01-13 15:47:13 -08001423 nr_rotated += hpage_nr_pages(page);
Wu Fengguang8cab4752009-06-16 15:33:12 -07001424 /*
1425 * Identify referenced, file-backed active pages and
1426 * give them one more trip around the active list. So
1427 * that executable code get better chances to stay in
1428 * memory under moderate memory pressure. Anon pages
1429 * are not likely to be evicted by use-once streaming
1430 * IO, plus JVM can create lots of anon VM_EXEC pages,
1431 * so we ignore them here.
1432 */
Wu Fengguang41e20982009-10-26 16:49:53 -07001433 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
Wu Fengguang8cab4752009-06-16 15:33:12 -07001434 list_add(&page->lru, &l_active);
1435 continue;
1436 }
1437 }
Rik van Riel7e9cd482008-10-18 20:26:35 -07001438
KOSAKI Motohiro5205e562009-09-21 17:01:44 -07001439 ClearPageActive(page); /* we are de-activating */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 list_add(&page->lru, &l_inactive);
1441 }
1442
Andrew Mortonb5557492009-01-06 14:40:13 -08001443 /*
Wu Fengguang8cab4752009-06-16 15:33:12 -07001444 * Move pages back to the lru list.
Andrew Mortonb5557492009-01-06 14:40:13 -08001445 */
Johannes Weiner2a1dc502008-12-01 03:00:35 +01001446 spin_lock_irq(&zone->lru_lock);
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001447 /*
Wu Fengguang8cab4752009-06-16 15:33:12 -07001448 * Count referenced pages from currently used mappings as rotated,
1449 * even though only some of them are actually re-activated. This
1450 * helps balance scan pressure between file and anonymous pages in
1451 * get_scan_ratio.
Rik van Riel7e9cd482008-10-18 20:26:35 -07001452 */
Johannes Weinerb7c46d12009-09-21 17:02:56 -07001453 reclaim_stat->recent_rotated[file] += nr_rotated;
Rik van Riel556adec2008-10-18 20:26:34 -07001454
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001455 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1456 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -07001457 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
Christoph Lameterf8891e52006-06-30 01:55:45 -07001458 spin_unlock_irq(&zone->lru_lock);
Hugh Dickins2bcf8872012-01-12 17:19:56 -08001459
1460 free_hot_cold_page_list(&l_hold, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461}
1462
Minchan Kim74e3f3c2010-10-26 14:21:31 -07001463#ifdef CONFIG_SWAP
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001464static int inactive_anon_is_low_global(struct zone *zone)
KOSAKI Motohirof89eb902009-01-07 18:08:14 -08001465{
1466 unsigned long active, inactive;
1467
1468 active = zone_page_state(zone, NR_ACTIVE_ANON);
1469 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1470
1471 if (inactive * zone->inactive_ratio < active)
1472 return 1;
1473
1474 return 0;
1475}
1476
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001477/**
1478 * inactive_anon_is_low - check if anonymous pages need to be deactivated
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001479 * @lruvec: LRU vector to check
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001480 *
1481 * Returns true if the zone does not have enough inactive anon pages,
1482 * meaning some active anon pages need to be deactivated.
1483 */
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001484static int inactive_anon_is_low(struct lruvec *lruvec)
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001485{
Minchan Kim74e3f3c2010-10-26 14:21:31 -07001486 /*
1487 * If we don't have swap space, anonymous page deactivation
1488 * is pointless.
1489 */
1490 if (!total_swap_pages)
1491 return 0;
1492
Hugh Dickinsc3c787e2012-05-29 15:06:52 -07001493 if (!mem_cgroup_disabled())
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001494 return mem_cgroup_inactive_anon_is_low(lruvec);
Johannes Weinerf16015f2012-01-12 17:17:52 -08001495
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001496 return inactive_anon_is_low_global(lruvec_zone(lruvec));
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001497}
Minchan Kim74e3f3c2010-10-26 14:21:31 -07001498#else
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001499static inline int inactive_anon_is_low(struct lruvec *lruvec)
Minchan Kim74e3f3c2010-10-26 14:21:31 -07001500{
1501 return 0;
1502}
1503#endif
KOSAKI Motohiro14797e22009-01-07 18:08:18 -08001504
Rik van Riel56e49d22009-06-16 15:32:28 -07001505static int inactive_file_is_low_global(struct zone *zone)
1506{
1507 unsigned long active, inactive;
1508
1509 active = zone_page_state(zone, NR_ACTIVE_FILE);
1510 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1511
1512 return (active > inactive);
1513}
1514
1515/**
1516 * inactive_file_is_low - check if file pages need to be deactivated
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001517 * @lruvec: LRU vector to check
Rik van Riel56e49d22009-06-16 15:32:28 -07001518 *
1519 * When the system is doing streaming IO, memory pressure here
1520 * ensures that active file pages get deactivated, until more
1521 * than half of the file pages are on the inactive list.
1522 *
1523 * Once we get to that situation, protect the system's working
1524 * set from being evicted by disabling active file page aging.
1525 *
1526 * This uses a different ratio than the anonymous pages, because
1527 * the page cache uses a use-once replacement algorithm.
1528 */
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001529static int inactive_file_is_low(struct lruvec *lruvec)
Rik van Riel56e49d22009-06-16 15:32:28 -07001530{
Hugh Dickinsc3c787e2012-05-29 15:06:52 -07001531 if (!mem_cgroup_disabled())
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001532 return mem_cgroup_inactive_file_is_low(lruvec);
Rik van Riel56e49d22009-06-16 15:32:28 -07001533
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001534 return inactive_file_is_low_global(lruvec_zone(lruvec));
Rik van Riel56e49d22009-06-16 15:32:28 -07001535}
1536
Hugh Dickins75b00af2012-05-29 15:07:09 -07001537static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
Rik van Rielb39415b2009-12-14 17:59:48 -08001538{
Hugh Dickins75b00af2012-05-29 15:07:09 -07001539 if (is_file_lru(lru))
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001540 return inactive_file_is_low(lruvec);
Rik van Rielb39415b2009-12-14 17:59:48 -08001541 else
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001542 return inactive_anon_is_low(lruvec);
Rik van Rielb39415b2009-12-14 17:59:48 -08001543}
1544
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001545static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001546 struct lruvec *lruvec, struct scan_control *sc)
Christoph Lameterb69408e2008-10-18 20:26:14 -07001547{
Rik van Rielb39415b2009-12-14 17:59:48 -08001548 if (is_active_lru(lru)) {
Hugh Dickins75b00af2012-05-29 15:07:09 -07001549 if (inactive_list_is_low(lruvec, lru))
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001550 shrink_active_list(nr_to_scan, lruvec, sc, lru);
Rik van Riel556adec2008-10-18 20:26:34 -07001551 return 0;
1552 }
1553
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001554 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
Christoph Lameterb69408e2008-10-18 20:26:14 -07001555}
1556
Konstantin Khlebnikov3d58ab52012-05-29 15:06:57 -07001557static int vmscan_swappiness(struct scan_control *sc)
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001558{
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001559 if (global_reclaim(sc))
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001560 return vm_swappiness;
Konstantin Khlebnikov3d58ab52012-05-29 15:06:57 -07001561 return mem_cgroup_swappiness(sc->target_mem_cgroup);
KAMEZAWA Hiroyuki1f4c0252011-07-26 16:08:21 -07001562}
1563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564/*
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001565 * Determine how aggressively the anon and file LRU lists should be
1566 * scanned. The relative value of each set of LRU lists is determined
1567 * by looking at the fraction of the pages scanned we did rotate back
1568 * onto the active list instead of evict.
1569 *
Wanpeng Libe7bd592012-06-14 20:41:02 +08001570 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1571 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001572 */
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001573static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001574 unsigned long *nr)
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001575{
1576 unsigned long anon, file, free;
1577 unsigned long anon_prio, file_prio;
1578 unsigned long ap, fp;
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001579 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001580 u64 fraction[2], denominator;
Hugh Dickins41113042012-01-12 17:20:01 -08001581 enum lru_list lru;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001582 int noswap = 0;
Johannes Weinera4d3e9e2011-09-14 16:21:52 -07001583 bool force_scan = false;
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001584 struct zone *zone = lruvec_zone(lruvec);
KAMEZAWA Hiroyuki246e87a2011-05-26 16:25:34 -07001585
Johannes Weinerf11c0ca2011-10-31 17:07:27 -07001586 /*
1587 * If the zone or memcg is small, nr[l] can be 0. This
1588 * results in no scanning on this priority and a potential
1589 * priority drop. Global direct reclaim can go to the next
1590 * zone and tends to have no problems. Global kswapd is for
1591 * zone balancing and it needs to scan a minimum amount. When
1592 * reclaiming for a memcg, a priority drop can cause high
1593 * latencies, so it's better to scan a minimum amount there as
1594 * well.
1595 */
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001596 if (current_is_kswapd() && zone->all_unreclaimable)
Johannes Weinera4d3e9e2011-09-14 16:21:52 -07001597 force_scan = true;
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001598 if (!global_reclaim(sc))
Johannes Weinera4d3e9e2011-09-14 16:21:52 -07001599 force_scan = true;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001600
1601 /* If we have no swap space, do not bother scanning anon pages. */
1602 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1603 noswap = 1;
1604 fraction[0] = 0;
1605 fraction[1] = 1;
1606 denominator = 1;
1607 goto out;
1608 }
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001609
Hugh Dickins4d7dcca2012-05-29 15:07:08 -07001610 anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1611 get_lru_size(lruvec, LRU_INACTIVE_ANON);
1612 file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1613 get_lru_size(lruvec, LRU_INACTIVE_FILE);
Johannes Weinera4d3e9e2011-09-14 16:21:52 -07001614
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001615 if (global_reclaim(sc)) {
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001616 free = zone_page_state(zone, NR_FREE_PAGES);
KOSAKI Motohiroeeee9a82009-01-07 18:08:17 -08001617 /* If we have very few page cache pages,
1618 force-scan anon pages. */
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001619 if (unlikely(file + free <= high_wmark_pages(zone))) {
Shaohua Li76a33fc2010-05-24 14:32:36 -07001620 fraction[0] = 1;
1621 fraction[1] = 0;
1622 denominator = 1;
1623 goto out;
KOSAKI Motohiroeeee9a82009-01-07 18:08:17 -08001624 }
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001625 }
1626
1627 /*
KOSAKI Motohiro58c37f62010-08-09 17:19:51 -07001628 * With swappiness at 100, anonymous and file have the same priority.
1629 * This scanning priority is essentially the inverse of IO cost.
1630 */
Konstantin Khlebnikov3d58ab52012-05-29 15:06:57 -07001631 anon_prio = vmscan_swappiness(sc);
Hugh Dickins75b00af2012-05-29 15:07:09 -07001632 file_prio = 200 - anon_prio;
KOSAKI Motohiro58c37f62010-08-09 17:19:51 -07001633
1634 /*
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001635 * OK, so we have swap space and a fair amount of page cache
1636 * pages. We use the recently rotated / recently scanned
1637 * ratios to determine how valuable each cache is.
1638 *
1639 * Because workloads change over time (and to avoid overflow)
1640 * we keep these statistics as a floating average, which ends
1641 * up weighing recent references more than old ones.
1642 *
1643 * anon in [0], file in [1]
1644 */
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001645 spin_lock_irq(&zone->lru_lock);
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001646 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001647 reclaim_stat->recent_scanned[0] /= 2;
1648 reclaim_stat->recent_rotated[0] /= 2;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001649 }
1650
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001651 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001652 reclaim_stat->recent_scanned[1] /= 2;
1653 reclaim_stat->recent_rotated[1] /= 2;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001654 }
1655
1656 /*
Rik van Riel00d80892008-11-19 15:36:44 -08001657 * The amount of pressure on anon vs file pages is inversely
1658 * proportional to the fraction of recently scanned pages on
1659 * each list that were recently referenced and in active use.
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001660 */
Satoru Moriyafe350042012-05-29 15:06:47 -07001661 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001662 ap /= reclaim_stat->recent_rotated[0] + 1;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001663
Satoru Moriyafe350042012-05-29 15:06:47 -07001664 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
KOSAKI Motohiro6e901572009-01-07 18:08:15 -08001665 fp /= reclaim_stat->recent_rotated[1] + 1;
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001666 spin_unlock_irq(&zone->lru_lock);
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001667
Shaohua Li76a33fc2010-05-24 14:32:36 -07001668 fraction[0] = ap;
1669 fraction[1] = fp;
1670 denominator = ap + fp + 1;
1671out:
Hugh Dickins41113042012-01-12 17:20:01 -08001672 for_each_evictable_lru(lru) {
1673 int file = is_file_lru(lru);
Shaohua Li76a33fc2010-05-24 14:32:36 -07001674 unsigned long scan;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001675
Hugh Dickins4d7dcca2012-05-29 15:07:08 -07001676 scan = get_lru_size(lruvec, lru);
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001677 if (sc->priority || noswap || !vmscan_swappiness(sc)) {
1678 scan >>= sc->priority;
Johannes Weinerf11c0ca2011-10-31 17:07:27 -07001679 if (!scan && force_scan)
1680 scan = SWAP_CLUSTER_MAX;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001681 scan = div64_u64(scan * fraction[file], denominator);
1682 }
Hugh Dickins41113042012-01-12 17:20:01 -08001683 nr[lru] = scan;
Shaohua Li76a33fc2010-05-24 14:32:36 -07001684 }
Wu Fengguang6e08a362009-06-16 15:32:29 -07001685}
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001686
Mel Gorman23b9da52012-05-29 15:06:20 -07001687/* Use reclaim/compaction for costly allocs or under memory pressure */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001688static bool in_reclaim_compaction(struct scan_control *sc)
Mel Gorman23b9da52012-05-29 15:06:20 -07001689{
1690 if (COMPACTION_BUILD && sc->order &&
1691 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001692 sc->priority < DEF_PRIORITY - 2))
Mel Gorman23b9da52012-05-29 15:06:20 -07001693 return true;
1694
1695 return false;
1696}
1697
Rik van Riel4f98a2f2008-10-18 20:26:32 -07001698/*
Mel Gorman23b9da52012-05-29 15:06:20 -07001699 * Reclaim/compaction is used for high-order allocation requests. It reclaims
1700 * order-0 pages before compacting the zone. should_continue_reclaim() returns
1701 * true if more pages should be reclaimed such that when the page allocator
1702 * calls try_to_compact_zone() that it will have enough free pages to succeed.
1703 * It will give up earlier than that if there is difficulty reclaiming pages.
Mel Gorman3e7d3442011-01-13 15:45:56 -08001704 */
Konstantin Khlebnikov90bdcfa2012-05-29 15:07:02 -07001705static inline bool should_continue_reclaim(struct lruvec *lruvec,
Mel Gorman3e7d3442011-01-13 15:45:56 -08001706 unsigned long nr_reclaimed,
1707 unsigned long nr_scanned,
1708 struct scan_control *sc)
1709{
1710 unsigned long pages_for_compaction;
1711 unsigned long inactive_lru_pages;
1712
1713 /* If not in reclaim/compaction mode, stop */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001714 if (!in_reclaim_compaction(sc))
Mel Gorman3e7d3442011-01-13 15:45:56 -08001715 return false;
1716
Mel Gorman28765922011-02-25 14:44:20 -08001717 /* Consider stopping depending on scan and reclaim activity */
1718 if (sc->gfp_mask & __GFP_REPEAT) {
1719 /*
1720 * For __GFP_REPEAT allocations, stop reclaiming if the
1721 * full LRU list has been scanned and we are still failing
1722 * to reclaim pages. This full LRU scan is potentially
1723 * expensive but a __GFP_REPEAT caller really wants to succeed
1724 */
1725 if (!nr_reclaimed && !nr_scanned)
1726 return false;
1727 } else {
1728 /*
1729 * For non-__GFP_REPEAT allocations which can presumably
1730 * fail without consequence, stop if we failed to reclaim
1731 * any pages from the last SWAP_CLUSTER_MAX number of
1732 * pages that were scanned. This will return to the
1733 * caller faster at the risk reclaim/compaction and
1734 * the resulting allocation attempt fails
1735 */
1736 if (!nr_reclaimed)
1737 return false;
1738 }
Mel Gorman3e7d3442011-01-13 15:45:56 -08001739
1740 /*
1741 * If we have not reclaimed enough pages for compaction and the
1742 * inactive lists are large enough, continue reclaiming
1743 */
1744 pages_for_compaction = (2UL << sc->order);
Hugh Dickins4d7dcca2012-05-29 15:07:08 -07001745 inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
Minchan Kim86cfd3a2012-01-10 15:08:18 -08001746 if (nr_swap_pages > 0)
Hugh Dickins4d7dcca2012-05-29 15:07:08 -07001747 inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
Mel Gorman3e7d3442011-01-13 15:45:56 -08001748 if (sc->nr_reclaimed < pages_for_compaction &&
1749 inactive_lru_pages > pages_for_compaction)
1750 return true;
1751
1752 /* If compaction would go ahead or the allocation would succeed, stop */
Konstantin Khlebnikov90bdcfa2012-05-29 15:07:02 -07001753 switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
Mel Gorman3e7d3442011-01-13 15:45:56 -08001754 case COMPACT_PARTIAL:
1755 case COMPACT_CONTINUE:
1756 return false;
1757 default:
1758 return true;
1759 }
1760}
1761
1762/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1764 */
Konstantin Khlebnikovf9be23d2012-05-29 15:07:02 -07001765static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766{
Christoph Lameterb69408e2008-10-18 20:26:14 -07001767 unsigned long nr[NR_LRU_LISTS];
Christoph Lameter86959492006-03-22 00:08:18 -08001768 unsigned long nr_to_scan;
Hugh Dickins41113042012-01-12 17:20:01 -08001769 enum lru_list lru;
Johannes Weinerf0fdc5e2011-02-10 15:01:34 -08001770 unsigned long nr_reclaimed, nr_scanned;
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08001771 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
Shaohua Li3da367c2011-10-31 17:07:03 -07001772 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Mel Gorman3e7d3442011-01-13 15:45:56 -08001774restart:
1775 nr_reclaimed = 0;
Johannes Weinerf0fdc5e2011-02-10 15:01:34 -08001776 nr_scanned = sc->nr_scanned;
Konstantin Khlebnikov90126372012-05-29 15:07:01 -07001777 get_scan_count(lruvec, sc, nr);
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001778
Shaohua Li3da367c2011-10-31 17:07:03 -07001779 blk_start_plug(&plug);
Rik van Riel556adec2008-10-18 20:26:34 -07001780 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1781 nr[LRU_INACTIVE_FILE]) {
Hugh Dickins41113042012-01-12 17:20:01 -08001782 for_each_evictable_lru(lru) {
1783 if (nr[lru]) {
KOSAKI Motohiroece74b22009-12-14 17:59:14 -08001784 nr_to_scan = min_t(unsigned long,
Hugh Dickins41113042012-01-12 17:20:01 -08001785 nr[lru], SWAP_CLUSTER_MAX);
1786 nr[lru] -= nr_to_scan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
Hugh Dickins41113042012-01-12 17:20:01 -08001788 nr_reclaimed += shrink_list(lru, nr_to_scan,
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001789 lruvec, sc);
Christoph Lameterb69408e2008-10-18 20:26:14 -07001790 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 }
Rik van Riela79311c2009-01-06 14:40:01 -08001792 /*
1793 * On large memory systems, scan >> priority can become
1794 * really large. This is fine for the starting priority;
1795 * we want to put equal scanning pressure on each zone.
1796 * However, if the VM has a harder time of freeing pages,
1797 * with multiple processes reclaiming pages, the total
1798 * freeing target can get unreasonably large.
1799 */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001800 if (nr_reclaimed >= nr_to_reclaim &&
1801 sc->priority < DEF_PRIORITY)
Rik van Riela79311c2009-01-06 14:40:01 -08001802 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 }
Shaohua Li3da367c2011-10-31 17:07:03 -07001804 blk_finish_plug(&plug);
Mel Gorman3e7d3442011-01-13 15:45:56 -08001805 sc->nr_reclaimed += nr_reclaimed;
KOSAKI Motohiro01dbe5c2009-01-06 14:40:02 -08001806
Rik van Riel556adec2008-10-18 20:26:34 -07001807 /*
1808 * Even if we did not try to evict anon pages at all, we want to
1809 * rebalance the anon lru active/inactive ratio.
1810 */
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07001811 if (inactive_anon_is_low(lruvec))
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07001812 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001813 sc, LRU_ACTIVE_ANON);
Rik van Riel556adec2008-10-18 20:26:34 -07001814
Mel Gorman3e7d3442011-01-13 15:45:56 -08001815 /* reclaim/compaction might need reclaim to continue */
Konstantin Khlebnikov90bdcfa2012-05-29 15:07:02 -07001816 if (should_continue_reclaim(lruvec, nr_reclaimed,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001817 sc->nr_scanned - nr_scanned, sc))
Mel Gorman3e7d3442011-01-13 15:45:56 -08001818 goto restart;
1819
Andrew Morton232ea4d2007-02-28 20:13:21 -08001820 throttle_vm_writeout(sc->gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821}
1822
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001823static void shrink_zone(struct zone *zone, struct scan_control *sc)
Johannes Weinerf16015f2012-01-12 17:17:52 -08001824{
Johannes Weiner56600482012-01-12 17:17:59 -08001825 struct mem_cgroup *root = sc->target_mem_cgroup;
1826 struct mem_cgroup_reclaim_cookie reclaim = {
Johannes Weinerf16015f2012-01-12 17:17:52 -08001827 .zone = zone,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001828 .priority = sc->priority,
Johannes Weinerf16015f2012-01-12 17:17:52 -08001829 };
Johannes Weiner56600482012-01-12 17:17:59 -08001830 struct mem_cgroup *memcg;
Johannes Weinerf16015f2012-01-12 17:17:52 -08001831
Johannes Weiner56600482012-01-12 17:17:59 -08001832 memcg = mem_cgroup_iter(root, NULL, &reclaim);
1833 do {
Konstantin Khlebnikovf9be23d2012-05-29 15:07:02 -07001834 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
Johannes Weiner56600482012-01-12 17:17:59 -08001835
Konstantin Khlebnikovf9be23d2012-05-29 15:07:02 -07001836 shrink_lruvec(lruvec, sc);
1837
Johannes Weiner56600482012-01-12 17:17:59 -08001838 /*
1839 * Limit reclaim has historically picked one memcg and
1840 * scanned it with decreasing priority levels until
1841 * nr_to_reclaim had been reclaimed. This priority
1842 * cycle is thus over after a single memcg.
Johannes Weinerb95a2f22012-01-12 17:18:06 -08001843 *
1844 * Direct reclaim and kswapd, on the other hand, have
1845 * to scan all memory cgroups to fulfill the overall
1846 * scan target for the zone.
Johannes Weiner56600482012-01-12 17:17:59 -08001847 */
1848 if (!global_reclaim(sc)) {
1849 mem_cgroup_iter_break(root, memcg);
1850 break;
1851 }
1852 memcg = mem_cgroup_iter(root, memcg, &reclaim);
1853 } while (memcg);
Johannes Weinerf16015f2012-01-12 17:17:52 -08001854}
1855
Mel Gormanfe4b1b22012-01-12 17:19:45 -08001856/* Returns true if compaction should go ahead for a high-order request */
1857static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
1858{
1859 unsigned long balance_gap, watermark;
1860 bool watermark_ok;
1861
1862 /* Do not consider compaction for orders reclaim is meant to satisfy */
1863 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
1864 return false;
1865
1866 /*
1867 * Compaction takes time to run and there are potentially other
1868 * callers using the pages just freed. Continue reclaiming until
1869 * there is a buffer of free pages available to give compaction
1870 * a reasonable chance of completing and allocating the page
1871 */
1872 balance_gap = min(low_wmark_pages(zone),
1873 (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
1874 KSWAPD_ZONE_BALANCE_GAP_RATIO);
1875 watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
1876 watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
1877
1878 /*
1879 * If compaction is deferred, reclaim up to a point where
1880 * compaction will have a chance of success when re-enabled
1881 */
Rik van Rielaff62242012-03-21 16:33:52 -07001882 if (compaction_deferred(zone, sc->order))
Mel Gormanfe4b1b22012-01-12 17:19:45 -08001883 return watermark_ok;
1884
1885 /* If compaction is not ready to start, keep reclaiming */
1886 if (!compaction_suitable(zone, sc->order))
1887 return false;
1888
1889 return watermark_ok;
1890}
1891
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892/*
1893 * This is the direct reclaim path, for page-allocating processes. We only
1894 * try to reclaim pages from zones which will satisfy the caller's allocation
1895 * request.
1896 *
Mel Gorman41858962009-06-16 15:32:12 -07001897 * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
1898 * Because:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1900 * allocation or
Mel Gorman41858962009-06-16 15:32:12 -07001901 * b) The target zone may be at high_wmark_pages(zone) but the lower zones
1902 * must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
1903 * zone defense algorithm.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 * If a zone is deemed to be full of pinned pages then just give it a light
1906 * scan then give up on it.
Mel Gormane0c23272011-10-31 17:09:33 -07001907 *
1908 * This function returns true if a zone is being reclaimed for a costly
Mel Gormanfe4b1b22012-01-12 17:19:45 -08001909 * high-order allocation and compaction is ready to begin. This indicates to
Mel Gorman0cee34f2012-01-12 17:19:49 -08001910 * the caller that it should consider retrying the allocation instead of
1911 * further reclaim.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001913static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914{
Mel Gormandd1a2392008-04-28 02:12:17 -07001915 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07001916 struct zone *zone;
Ying Hand149e3b2011-05-26 16:25:27 -07001917 unsigned long nr_soft_reclaimed;
1918 unsigned long nr_soft_scanned;
Mel Gorman0cee34f2012-01-12 17:19:49 -08001919 bool aborted_reclaim = false;
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001920
Mel Gormancc715d92012-03-21 16:34:00 -07001921 /*
1922 * If the number of buffer_heads in the machine exceeds the maximum
1923 * allowed level, force direct reclaim to scan the highmem zone as
1924 * highmem pages could be pinning lowmem pages storing buffer_heads
1925 */
1926 if (buffer_heads_over_limit)
1927 sc->gfp_mask |= __GFP_HIGHMEM;
1928
Mel Gormand4debc62010-08-09 17:19:29 -07001929 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1930 gfp_zone(sc->gfp_mask), sc->nodemask) {
Con Kolivasf3fe6512006-01-06 00:11:15 -08001931 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 continue;
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001933 /*
1934 * Take care memory controller reclaiming has small influence
1935 * to global LRU.
1936 */
Johannes Weiner89b5fae2012-01-12 17:17:50 -08001937 if (global_reclaim(sc)) {
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001938 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1939 continue;
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001940 if (zone->all_unreclaimable &&
1941 sc->priority != DEF_PRIORITY)
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001942 continue; /* Let kswapd poll it */
Rik van Riele0887c12011-10-31 17:09:31 -07001943 if (COMPACTION_BUILD) {
1944 /*
Mel Gormane0c23272011-10-31 17:09:33 -07001945 * If we already have plenty of memory free for
1946 * compaction in this zone, don't free any more.
1947 * Even though compaction is invoked for any
1948 * non-zero order, only frequent costly order
1949 * reclamation is disruptive enough to become a
Copot Alexandruc7cfa372012-03-21 16:34:10 -07001950 * noticeable problem, like transparent huge
1951 * page allocations.
Rik van Riele0887c12011-10-31 17:09:31 -07001952 */
Mel Gormanfe4b1b22012-01-12 17:19:45 -08001953 if (compaction_ready(zone, sc)) {
Mel Gorman0cee34f2012-01-12 17:19:49 -08001954 aborted_reclaim = true;
Rik van Riele0887c12011-10-31 17:09:31 -07001955 continue;
Mel Gormane0c23272011-10-31 17:09:33 -07001956 }
Rik van Riele0887c12011-10-31 17:09:31 -07001957 }
KAMEZAWA Hiroyukiac34a1a2011-06-27 16:18:12 -07001958 /*
1959 * This steals pages from memory cgroups over softlimit
1960 * and returns the number of reclaimed pages and
1961 * scanned pages. This works for global memory pressure
1962 * and balancing, not for a memcg's limit.
1963 */
1964 nr_soft_scanned = 0;
1965 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
1966 sc->order, sc->gfp_mask,
1967 &nr_soft_scanned);
1968 sc->nr_reclaimed += nr_soft_reclaimed;
1969 sc->nr_scanned += nr_soft_scanned;
1970 /* need some check for avoid more shrink_zone() */
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08001971 }
Nick Piggin408d8542006-09-25 23:31:27 -07001972
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07001973 shrink_zone(zone, sc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 }
Mel Gormane0c23272011-10-31 17:09:33 -07001975
Mel Gorman0cee34f2012-01-12 17:19:49 -08001976 return aborted_reclaim;
Minchan Kimd1908362010-09-22 13:05:01 -07001977}
1978
1979static bool zone_reclaimable(struct zone *zone)
1980{
1981 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
1982}
1983
KOSAKI Motohiro929bea72011-04-14 15:22:12 -07001984/* All zones in zonelist are unreclaimable? */
Minchan Kimd1908362010-09-22 13:05:01 -07001985static bool all_unreclaimable(struct zonelist *zonelist,
1986 struct scan_control *sc)
1987{
1988 struct zoneref *z;
1989 struct zone *zone;
Minchan Kimd1908362010-09-22 13:05:01 -07001990
1991 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1992 gfp_zone(sc->gfp_mask), sc->nodemask) {
1993 if (!populated_zone(zone))
1994 continue;
1995 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1996 continue;
KOSAKI Motohiro929bea72011-04-14 15:22:12 -07001997 if (!zone->all_unreclaimable)
1998 return false;
Minchan Kimd1908362010-09-22 13:05:01 -07001999 }
2000
KOSAKI Motohiro929bea72011-04-14 15:22:12 -07002001 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002}
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002003
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004/*
2005 * This is the main entry point to direct page reclaim.
2006 *
2007 * If a full scan of the inactive list fails to free enough memory then we
2008 * are "out of memory" and something needs to be killed.
2009 *
2010 * If the caller is !__GFP_FS then the probability of a failure is reasonably
2011 * high - the zone may be full of dirty or under-writeback pages, which this
Jens Axboe5b0830c2009-09-23 19:37:09 +02002012 * caller can't do much about. We kick the writeback threads and take explicit
2013 * naps in the hope that some of these pages can be written. But if the
2014 * allocating task holds filesystem locks which prevent writeout this might not
2015 * work, and the allocation attempt will fail.
Nishanth Aravamudana41f24e2008-04-29 00:58:25 -07002016 *
2017 * returns: 0, if no pages reclaimed
2018 * else, the number of pages reclaimed
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 */
Mel Gormandac1d272008-04-28 02:12:12 -07002020static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
Ying Hana09ed5e2011-05-24 17:12:26 -07002021 struct scan_control *sc,
2022 struct shrink_control *shrink)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023{
Andrew Morton69e05942006-03-22 00:08:19 -08002024 unsigned long total_scanned = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 struct reclaim_state *reclaim_state = current->reclaim_state;
Mel Gormandd1a2392008-04-28 02:12:17 -07002026 struct zoneref *z;
Mel Gorman54a6eb52008-04-28 02:12:16 -07002027 struct zone *zone;
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002028 unsigned long writeback_threshold;
Mel Gorman0cee34f2012-01-12 17:19:49 -08002029 bool aborted_reclaim;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
Keika Kobayashi873b4772008-07-25 01:48:52 -07002031 delayacct_freepages_start();
2032
Johannes Weiner89b5fae2012-01-12 17:17:50 -08002033 if (global_reclaim(sc))
KAMEZAWA Hiroyuki1cfb4192008-02-07 00:14:37 -08002034 count_vm_event(ALLOCSTALL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002036 do {
Balbir Singh66e17072008-02-07 00:13:56 -08002037 sc->nr_scanned = 0;
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002038 aborted_reclaim = shrink_zones(zonelist, sc);
Mel Gormane0c23272011-10-31 17:09:33 -07002039
Balbir Singh66e17072008-02-07 00:13:56 -08002040 /*
2041 * Don't shrink slabs when reclaiming memory from
2042 * over limit cgroups
2043 */
Johannes Weiner89b5fae2012-01-12 17:17:50 -08002044 if (global_reclaim(sc)) {
KOSAKI Motohiroc6a8a8c2010-08-09 17:19:14 -07002045 unsigned long lru_pages = 0;
Mel Gormand4debc62010-08-09 17:19:29 -07002046 for_each_zone_zonelist(zone, z, zonelist,
2047 gfp_zone(sc->gfp_mask)) {
KOSAKI Motohiroc6a8a8c2010-08-09 17:19:14 -07002048 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2049 continue;
2050
2051 lru_pages += zone_reclaimable_pages(zone);
2052 }
2053
Ying Han1495f232011-05-24 17:12:27 -07002054 shrink_slab(shrink, sc->nr_scanned, lru_pages);
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -08002055 if (reclaim_state) {
Rik van Riela79311c2009-01-06 14:40:01 -08002056 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
KAMEZAWA Hiroyuki91a45472008-02-07 00:14:29 -08002057 reclaim_state->reclaimed_slab = 0;
2058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 }
Balbir Singh66e17072008-02-07 00:13:56 -08002060 total_scanned += sc->nr_scanned;
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002061 if (sc->nr_reclaimed >= sc->nr_to_reclaim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
2064 /*
2065 * Try to write back as many pages as we just scanned. This
2066 * tends to cause slow streaming writers to write data to the
2067 * disk smoothly, at the dirtying rate, which is nice. But
2068 * that's undesirable in laptop mode, where we *want* lumpy
2069 * writeout. So in laptop mode, write out the whole world.
2070 */
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002071 writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
2072 if (total_scanned > writeback_threshold) {
Curt Wohlgemuth0e175a12011-10-07 21:54:10 -06002073 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned,
2074 WB_REASON_TRY_TO_FREE_PAGES);
Balbir Singh66e17072008-02-07 00:13:56 -08002075 sc->may_writepage = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 }
2077
2078 /* Take a nap, wait for some writeback to complete */
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08002079 if (!sc->hibernation_mode && sc->nr_scanned &&
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002080 sc->priority < DEF_PRIORITY - 2) {
Mel Gorman0e093d992010-10-26 14:21:45 -07002081 struct zone *preferred_zone;
2082
2083 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
David Rientjesf33261d2011-01-25 15:07:20 -08002084 &cpuset_current_mems_allowed,
2085 &preferred_zone);
Mel Gorman0e093d992010-10-26 14:21:45 -07002086 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2087 }
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002088 } while (--sc->priority >= 0);
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002089
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090out:
Keika Kobayashi873b4772008-07-25 01:48:52 -07002091 delayacct_freepages_end();
2092
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002093 if (sc->nr_reclaimed)
2094 return sc->nr_reclaimed;
2095
KOSAKI Motohiro929bea72011-04-14 15:22:12 -07002096 /*
2097 * As hibernation is going on, kswapd is freezed so that it can't mark
2098 * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
2099 * check.
2100 */
2101 if (oom_killer_disabled)
2102 return 0;
2103
Mel Gorman0cee34f2012-01-12 17:19:49 -08002104 /* Aborted reclaim to try compaction? don't OOM, then */
2105 if (aborted_reclaim)
Mel Gorman73350842012-01-12 17:19:33 -08002106 return 1;
2107
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002108 /* top priority shrink_zones still had more to do? don't OOM, then */
Johannes Weiner89b5fae2012-01-12 17:17:50 -08002109 if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
KOSAKI Motohirobb21c7c2010-06-04 14:15:05 -07002110 return 1;
2111
2112 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113}
2114
Mel Gorman55150612012-07-31 16:44:35 -07002115static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
2116{
2117 struct zone *zone;
2118 unsigned long pfmemalloc_reserve = 0;
2119 unsigned long free_pages = 0;
2120 int i;
2121 bool wmark_ok;
2122
2123 for (i = 0; i <= ZONE_NORMAL; i++) {
2124 zone = &pgdat->node_zones[i];
2125 pfmemalloc_reserve += min_wmark_pages(zone);
2126 free_pages += zone_page_state(zone, NR_FREE_PAGES);
2127 }
2128
2129 wmark_ok = free_pages > pfmemalloc_reserve / 2;
2130
2131 /* kswapd must be awake if processes are being throttled */
2132 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
2133 pgdat->classzone_idx = min(pgdat->classzone_idx,
2134 (enum zone_type)ZONE_NORMAL);
2135 wake_up_interruptible(&pgdat->kswapd_wait);
2136 }
2137
2138 return wmark_ok;
2139}
2140
2141/*
2142 * Throttle direct reclaimers if backing storage is backed by the network
2143 * and the PFMEMALLOC reserve for the preferred node is getting dangerously
2144 * depleted. kswapd will continue to make progress and wake the processes
2145 * when the low watermark is reached
2146 */
2147static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
2148 nodemask_t *nodemask)
2149{
2150 struct zone *zone;
2151 int high_zoneidx = gfp_zone(gfp_mask);
2152 pg_data_t *pgdat;
2153
2154 /*
2155 * Kernel threads should not be throttled as they may be indirectly
2156 * responsible for cleaning pages necessary for reclaim to make forward
2157 * progress. kjournald for example may enter direct reclaim while
2158 * committing a transaction where throttling it could forcing other
2159 * processes to block on log_wait_commit().
2160 */
2161 if (current->flags & PF_KTHREAD)
2162 return;
2163
2164 /* Check if the pfmemalloc reserves are ok */
2165 first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
2166 pgdat = zone->zone_pgdat;
2167 if (pfmemalloc_watermark_ok(pgdat))
2168 return;
2169
2170 /*
2171 * If the caller cannot enter the filesystem, it's possible that it
2172 * is due to the caller holding an FS lock or performing a journal
2173 * transaction in the case of a filesystem like ext[3|4]. In this case,
2174 * it is not safe to block on pfmemalloc_wait as kswapd could be
2175 * blocked waiting on the same lock. Instead, throttle for up to a
2176 * second before continuing.
2177 */
2178 if (!(gfp_mask & __GFP_FS)) {
2179 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
2180 pfmemalloc_watermark_ok(pgdat), HZ);
2181 return;
2182 }
2183
2184 /* Throttle until kswapd wakes the process */
2185 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
2186 pfmemalloc_watermark_ok(pgdat));
2187}
2188
Mel Gormandac1d272008-04-28 02:12:12 -07002189unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -07002190 gfp_t gfp_mask, nodemask_t *nodemask)
Balbir Singh66e17072008-02-07 00:13:56 -08002191{
Mel Gorman33906bc2010-08-09 17:19:16 -07002192 unsigned long nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08002193 struct scan_control sc = {
2194 .gfp_mask = gfp_mask,
2195 .may_writepage = !laptop_mode,
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002196 .nr_to_reclaim = SWAP_CLUSTER_MAX,
Johannes Weinera6dc60f82009-03-31 15:19:30 -07002197 .may_unmap = 1,
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07002198 .may_swap = 1,
Balbir Singh66e17072008-02-07 00:13:56 -08002199 .order = order,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002200 .priority = DEF_PRIORITY,
Johannes Weinerf16015f2012-01-12 17:17:52 -08002201 .target_mem_cgroup = NULL,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -07002202 .nodemask = nodemask,
Balbir Singh66e17072008-02-07 00:13:56 -08002203 };
Ying Hana09ed5e2011-05-24 17:12:26 -07002204 struct shrink_control shrink = {
2205 .gfp_mask = sc.gfp_mask,
2206 };
Balbir Singh66e17072008-02-07 00:13:56 -08002207
Mel Gorman55150612012-07-31 16:44:35 -07002208 throttle_direct_reclaim(gfp_mask, zonelist, nodemask);
2209
2210 /*
2211 * Do not enter reclaim if fatal signal is pending. 1 is returned so
2212 * that the page allocator does not consider triggering OOM
2213 */
2214 if (fatal_signal_pending(current))
2215 return 1;
2216
Mel Gorman33906bc2010-08-09 17:19:16 -07002217 trace_mm_vmscan_direct_reclaim_begin(order,
2218 sc.may_writepage,
2219 gfp_mask);
2220
Ying Hana09ed5e2011-05-24 17:12:26 -07002221 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
Mel Gorman33906bc2010-08-09 17:19:16 -07002222
2223 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2224
2225 return nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08002226}
2227
Andrew Mortonc255a452012-07-31 16:43:02 -07002228#ifdef CONFIG_MEMCG
Balbir Singh66e17072008-02-07 00:13:56 -08002229
Johannes Weiner72835c82012-01-12 17:18:32 -08002230unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
Balbir Singh4e416952009-09-23 15:56:39 -07002231 gfp_t gfp_mask, bool noswap,
Ying Han0ae5e892011-05-26 16:25:25 -07002232 struct zone *zone,
2233 unsigned long *nr_scanned)
Balbir Singh4e416952009-09-23 15:56:39 -07002234{
2235 struct scan_control sc = {
Ying Han0ae5e892011-05-26 16:25:25 -07002236 .nr_scanned = 0,
KOSAKI Motohirob8f5c562010-08-10 18:03:02 -07002237 .nr_to_reclaim = SWAP_CLUSTER_MAX,
Balbir Singh4e416952009-09-23 15:56:39 -07002238 .may_writepage = !laptop_mode,
2239 .may_unmap = 1,
2240 .may_swap = !noswap,
Balbir Singh4e416952009-09-23 15:56:39 -07002241 .order = 0,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002242 .priority = 0,
Johannes Weiner72835c82012-01-12 17:18:32 -08002243 .target_mem_cgroup = memcg,
Balbir Singh4e416952009-09-23 15:56:39 -07002244 };
Konstantin Khlebnikovf9be23d2012-05-29 15:07:02 -07002245 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
Ying Han0ae5e892011-05-26 16:25:25 -07002246
Balbir Singh4e416952009-09-23 15:56:39 -07002247 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2248 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002249
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002250 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002251 sc.may_writepage,
2252 sc.gfp_mask);
2253
Balbir Singh4e416952009-09-23 15:56:39 -07002254 /*
2255 * NOTE: Although we can get the priority field, using it
2256 * here is not a good idea, since it limits the pages we can scan.
2257 * if we don't reclaim here, the shrink_zone from balance_pgdat
2258 * will pick up pages from other mem cgroup's as well. We hack
2259 * the priority and make it zero.
2260 */
Konstantin Khlebnikovf9be23d2012-05-29 15:07:02 -07002261 shrink_lruvec(lruvec, &sc);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002262
2263 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2264
Ying Han0ae5e892011-05-26 16:25:25 -07002265 *nr_scanned = sc.nr_scanned;
Balbir Singh4e416952009-09-23 15:56:39 -07002266 return sc.nr_reclaimed;
2267}
2268
Johannes Weiner72835c82012-01-12 17:18:32 -08002269unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
KOSAKI Motohiroa7885eb2009-01-07 18:08:24 -08002270 gfp_t gfp_mask,
Johannes Weiner185efc02011-09-14 16:21:58 -07002271 bool noswap)
Balbir Singh66e17072008-02-07 00:13:56 -08002272{
Balbir Singh4e416952009-09-23 15:56:39 -07002273 struct zonelist *zonelist;
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002274 unsigned long nr_reclaimed;
Ying Han889976d2011-05-26 16:25:33 -07002275 int nid;
Balbir Singh66e17072008-02-07 00:13:56 -08002276 struct scan_control sc = {
Balbir Singh66e17072008-02-07 00:13:56 -08002277 .may_writepage = !laptop_mode,
Johannes Weinera6dc60f82009-03-31 15:19:30 -07002278 .may_unmap = 1,
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07002279 .may_swap = !noswap,
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002280 .nr_to_reclaim = SWAP_CLUSTER_MAX,
Balbir Singh66e17072008-02-07 00:13:56 -08002281 .order = 0,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002282 .priority = DEF_PRIORITY,
Johannes Weiner72835c82012-01-12 17:18:32 -08002283 .target_mem_cgroup = memcg,
KAMEZAWA Hiroyuki327c0e92009-03-31 15:23:31 -07002284 .nodemask = NULL, /* we don't care the placement */
Ying Hana09ed5e2011-05-24 17:12:26 -07002285 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2286 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2287 };
2288 struct shrink_control shrink = {
2289 .gfp_mask = sc.gfp_mask,
Balbir Singh66e17072008-02-07 00:13:56 -08002290 };
Balbir Singh66e17072008-02-07 00:13:56 -08002291
Ying Han889976d2011-05-26 16:25:33 -07002292 /*
2293 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
2294 * take care of from where we get pages. So the node where we start the
2295 * scan does not need to be the current node.
2296 */
Johannes Weiner72835c82012-01-12 17:18:32 -08002297 nid = mem_cgroup_select_victim_node(memcg);
Ying Han889976d2011-05-26 16:25:33 -07002298
2299 zonelist = NODE_DATA(nid)->node_zonelists;
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002300
2301 trace_mm_vmscan_memcg_reclaim_begin(0,
2302 sc.may_writepage,
2303 sc.gfp_mask);
2304
Ying Hana09ed5e2011-05-24 17:12:26 -07002305 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
KOSAKI Motohirobdce6d92010-08-09 17:19:56 -07002306
2307 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2308
2309 return nr_reclaimed;
Balbir Singh66e17072008-02-07 00:13:56 -08002310}
2311#endif
2312
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002313static void age_active_anon(struct zone *zone, struct scan_control *sc)
Johannes Weinerf16015f2012-01-12 17:17:52 -08002314{
Johannes Weinerb95a2f22012-01-12 17:18:06 -08002315 struct mem_cgroup *memcg;
Johannes Weinerf16015f2012-01-12 17:17:52 -08002316
Johannes Weinerb95a2f22012-01-12 17:18:06 -08002317 if (!total_swap_pages)
2318 return;
2319
2320 memcg = mem_cgroup_iter(NULL, NULL, NULL);
2321 do {
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07002322 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
Johannes Weinerb95a2f22012-01-12 17:18:06 -08002323
Konstantin Khlebnikovc56d5c72012-05-29 15:07:00 -07002324 if (inactive_anon_is_low(lruvec))
Konstantin Khlebnikov1a93be02012-05-29 15:07:01 -07002325 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002326 sc, LRU_ACTIVE_ANON);
Johannes Weinerb95a2f22012-01-12 17:18:06 -08002327
2328 memcg = mem_cgroup_iter(NULL, memcg, NULL);
2329 } while (memcg);
Johannes Weinerf16015f2012-01-12 17:17:52 -08002330}
2331
Mel Gorman1741c872011-01-13 15:46:21 -08002332/*
2333 * pgdat_balanced is used when checking if a node is balanced for high-order
2334 * allocations. Only zones that meet watermarks and are in a zone allowed
2335 * by the callers classzone_idx are added to balanced_pages. The total of
2336 * balanced pages must be at least 25% of the zones allowed by classzone_idx
2337 * for the node to be considered balanced. Forcing all zones to be balanced
2338 * for high orders can cause excessive reclaim when there are imbalanced zones.
2339 * The choice of 25% is due to
2340 * o a 16M DMA zone that is balanced will not balance a zone on any
2341 * reasonable sized machine
2342 * o On all other machines, the top zone must be at least a reasonable
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002343 * percentage of the middle zones. For example, on 32-bit x86, highmem
Mel Gorman1741c872011-01-13 15:46:21 -08002344 * would need to be at least 256M for it to be balance a whole node.
2345 * Similarly, on x86-64 the Normal zone would need to be at least 1G
2346 * to balance a node on its own. These seemed like reasonable ratios.
2347 */
2348static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
2349 int classzone_idx)
2350{
2351 unsigned long present_pages = 0;
2352 int i;
2353
2354 for (i = 0; i <= classzone_idx; i++)
2355 present_pages += pgdat->node_zones[i].present_pages;
2356
Shaohua Li4746efd2011-07-19 08:49:26 -07002357 /* A special case here: if zone has no page, we think it's balanced */
2358 return balanced_pages >= (present_pages >> 2);
Mel Gorman1741c872011-01-13 15:46:21 -08002359}
2360
Mel Gorman55150612012-07-31 16:44:35 -07002361/*
2362 * Prepare kswapd for sleeping. This verifies that there are no processes
2363 * waiting in throttle_direct_reclaim() and that watermarks have been met.
2364 *
2365 * Returns true if kswapd is ready to sleep
2366 */
2367static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
Mel Gormandc83edd2011-01-13 15:46:26 -08002368 int classzone_idx)
Mel Gormanf50de2d2009-12-14 17:58:53 -08002369{
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002370 int i;
Mel Gorman1741c872011-01-13 15:46:21 -08002371 unsigned long balanced = 0;
2372 bool all_zones_ok = true;
Mel Gormanf50de2d2009-12-14 17:58:53 -08002373
2374 /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
2375 if (remaining)
Mel Gorman55150612012-07-31 16:44:35 -07002376 return false;
2377
2378 /*
2379 * There is a potential race between when kswapd checks its watermarks
2380 * and a process gets throttled. There is also a potential race if
2381 * processes get throttled, kswapd wakes, a large process exits therby
2382 * balancing the zones that causes kswapd to miss a wakeup. If kswapd
2383 * is going to sleep, no process should be sleeping on pfmemalloc_wait
2384 * so wake them now if necessary. If necessary, processes will wake
2385 * kswapd and get throttled again
2386 */
2387 if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
2388 wake_up(&pgdat->pfmemalloc_wait);
2389 return false;
2390 }
Mel Gormanf50de2d2009-12-14 17:58:53 -08002391
Mel Gorman0abdee22011-01-13 15:46:22 -08002392 /* Check the watermark levels */
Mel Gorman08951e52011-07-08 15:39:36 -07002393 for (i = 0; i <= classzone_idx; i++) {
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002394 struct zone *zone = pgdat->node_zones + i;
2395
2396 if (!populated_zone(zone))
2397 continue;
2398
Mel Gorman355b09c2011-01-13 15:46:24 -08002399 /*
2400 * balance_pgdat() skips over all_unreclaimable after
2401 * DEF_PRIORITY. Effectively, it considers them balanced so
2402 * they must be considered balanced here as well if kswapd
2403 * is to sleep
2404 */
2405 if (zone->all_unreclaimable) {
2406 balanced += zone->present_pages;
KOSAKI Motohirode3fab32010-01-15 17:01:25 -08002407 continue;
Mel Gorman355b09c2011-01-13 15:46:24 -08002408 }
KOSAKI Motohirode3fab32010-01-15 17:01:25 -08002409
Mel Gorman88f5acf2011-01-13 15:45:41 -08002410 if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
Mel Gormanda175d02011-07-08 15:39:39 -07002411 i, 0))
Mel Gorman1741c872011-01-13 15:46:21 -08002412 all_zones_ok = false;
2413 else
2414 balanced += zone->present_pages;
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002415 }
Mel Gormanf50de2d2009-12-14 17:58:53 -08002416
Mel Gorman1741c872011-01-13 15:46:21 -08002417 /*
2418 * For high-order requests, the balanced zones must contain at least
2419 * 25% of the nodes pages for kswapd to sleep. For order-0, all zones
2420 * must be balanced
2421 */
2422 if (order)
Mel Gorman55150612012-07-31 16:44:35 -07002423 return pgdat_balanced(pgdat, balanced, classzone_idx);
Mel Gorman1741c872011-01-13 15:46:21 -08002424 else
Mel Gorman55150612012-07-31 16:44:35 -07002425 return all_zones_ok;
Mel Gormanf50de2d2009-12-14 17:58:53 -08002426}
2427
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428/*
2429 * For kswapd, balance_pgdat() will work across all this node's zones until
Mel Gorman41858962009-06-16 15:32:12 -07002430 * they are all at high_wmark_pages(zone).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 *
Mel Gorman0abdee22011-01-13 15:46:22 -08002432 * Returns the final order kswapd was reclaiming at
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 *
2434 * There is special handling here for zones which are full of pinned pages.
2435 * This can happen if the pages are all mlocked, or if they are all used by
2436 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
2437 * What we do is to detect the case where all pages in the zone have been
2438 * scanned twice and there has been zero successful reclaim. Mark the zone as
2439 * dead and from now on, only perform a short scan. Basically we're polling
2440 * the zone for when the problem goes away.
2441 *
2442 * kswapd scans the zones in the highmem->normal->dma direction. It skips
Mel Gorman41858962009-06-16 15:32:12 -07002443 * zones which have free_pages > high_wmark_pages(zone), but once a zone is
2444 * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
2445 * lower zones regardless of the number of free pages in the lower zones. This
2446 * interoperates with the page allocator fallback scheme to ensure that aging
2447 * of pages is balanced across the zones.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 */
Mel Gorman99504742011-01-13 15:46:20 -08002449static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
Mel Gormandc83edd2011-01-13 15:46:26 -08002450 int *classzone_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 int all_zones_ok;
Mel Gorman1741c872011-01-13 15:46:21 -08002453 unsigned long balanced;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 int i;
Mel Gorman99504742011-01-13 15:46:20 -08002455 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
Andrew Morton69e05942006-03-22 00:08:19 -08002456 unsigned long total_scanned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 struct reclaim_state *reclaim_state = current->reclaim_state;
Ying Han0ae5e892011-05-26 16:25:25 -07002458 unsigned long nr_soft_reclaimed;
2459 unsigned long nr_soft_scanned;
Andrew Morton179e9632006-03-22 00:08:18 -08002460 struct scan_control sc = {
2461 .gfp_mask = GFP_KERNEL,
Johannes Weinera6dc60f82009-03-31 15:19:30 -07002462 .may_unmap = 1,
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07002463 .may_swap = 1,
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08002464 /*
2465 * kswapd doesn't want to be bailed out while reclaim. because
2466 * we want to put equal scanning pressure on each zone.
2467 */
2468 .nr_to_reclaim = ULONG_MAX,
Andy Whitcroft5ad333e2007-07-17 04:03:16 -07002469 .order = order,
Johannes Weinerf16015f2012-01-12 17:17:52 -08002470 .target_mem_cgroup = NULL,
Andrew Morton179e9632006-03-22 00:08:18 -08002471 };
Ying Hana09ed5e2011-05-24 17:12:26 -07002472 struct shrink_control shrink = {
2473 .gfp_mask = sc.gfp_mask,
2474 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475loop_again:
2476 total_scanned = 0;
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002477 sc.priority = DEF_PRIORITY;
Rik van Riela79311c2009-01-06 14:40:01 -08002478 sc.nr_reclaimed = 0;
Christoph Lameterc0bbbc72006-06-11 15:22:26 -07002479 sc.may_writepage = !laptop_mode;
Christoph Lameterf8891e52006-06-30 01:55:45 -07002480 count_vm_event(PAGEOUTRUN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002482 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 unsigned long lru_pages = 0;
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002484 int has_under_min_watermark_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
2486 all_zones_ok = 1;
Mel Gorman1741c872011-01-13 15:46:21 -08002487 balanced = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002489 /*
2490 * Scan in the highmem->dma direction for the highest
2491 * zone which needs scanning
2492 */
2493 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
2494 struct zone *zone = pgdat->node_zones + i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002496 if (!populated_zone(zone))
2497 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002499 if (zone->all_unreclaimable &&
2500 sc.priority != DEF_PRIORITY)
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002501 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
Rik van Riel556adec2008-10-18 20:26:34 -07002503 /*
2504 * Do some background aging of the anon list, to give
2505 * pages a chance to be referenced before reclaiming.
2506 */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002507 age_active_anon(zone, &sc);
Rik van Riel556adec2008-10-18 20:26:34 -07002508
Mel Gormancc715d92012-03-21 16:34:00 -07002509 /*
2510 * If the number of buffer_heads in the machine
2511 * exceeds the maximum allowed level and this node
2512 * has a highmem zone, force kswapd to reclaim from
2513 * it to relieve lowmem pressure.
2514 */
2515 if (buffer_heads_over_limit && is_highmem_idx(i)) {
2516 end_zone = i;
2517 break;
2518 }
2519
Mel Gorman88f5acf2011-01-13 15:45:41 -08002520 if (!zone_watermark_ok_safe(zone, order,
Mel Gorman41858962009-06-16 15:32:12 -07002521 high_wmark_pages(zone), 0, 0)) {
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002522 end_zone = i;
Andrew Mortone1dbeda2006-12-06 20:32:01 -08002523 break;
Shaohua Li439423f2011-08-25 15:59:12 -07002524 } else {
2525 /* If balanced, clear the congested flag */
2526 zone_clear_flag(zone, ZONE_CONGESTED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 }
Andrew Mortone1dbeda2006-12-06 20:32:01 -08002529 if (i < 0)
2530 goto out;
2531
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 for (i = 0; i <= end_zone; i++) {
2533 struct zone *zone = pgdat->node_zones + i;
2534
Wu Fengguangadea02a2009-09-21 17:01:42 -07002535 lru_pages += zone_reclaimable_pages(zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 }
2537
2538 /*
2539 * Now scan the zone in the dma->highmem direction, stopping
2540 * at the last zone which needs scanning.
2541 *
2542 * We do this because the page allocator works in the opposite
2543 * direction. This prevents the page allocator from allocating
2544 * pages behind kswapd's direction of progress, which would
2545 * cause too much scanning of the lower zones.
2546 */
2547 for (i = 0; i <= end_zone; i++) {
2548 struct zone *zone = pgdat->node_zones + i;
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002549 int nr_slab, testorder;
Mel Gorman8afdcec2011-03-22 16:33:04 -07002550 unsigned long balance_gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
Con Kolivasf3fe6512006-01-06 00:11:15 -08002552 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 continue;
2554
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002555 if (zone->all_unreclaimable &&
2556 sc.priority != DEF_PRIORITY)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 continue;
2558
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 sc.nr_scanned = 0;
Balbir Singh4e416952009-09-23 15:56:39 -07002560
Ying Han0ae5e892011-05-26 16:25:25 -07002561 nr_soft_scanned = 0;
Balbir Singh4e416952009-09-23 15:56:39 -07002562 /*
2563 * Call soft limit reclaim before calling shrink_zone.
Balbir Singh4e416952009-09-23 15:56:39 -07002564 */
Ying Han0ae5e892011-05-26 16:25:25 -07002565 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
2566 order, sc.gfp_mask,
2567 &nr_soft_scanned);
2568 sc.nr_reclaimed += nr_soft_reclaimed;
2569 total_scanned += nr_soft_scanned;
KOSAKI Motohiro00918b62010-08-10 18:03:05 -07002570
Rik van Riel32a43302007-10-16 01:24:50 -07002571 /*
Mel Gorman8afdcec2011-03-22 16:33:04 -07002572 * We put equal pressure on every zone, unless
2573 * one zone has way too many pages free
2574 * already. The "too many pages" is defined
2575 * as the high wmark plus a "gap" where the
2576 * gap is either the low watermark or 1%
2577 * of the zone, whichever is smaller.
Rik van Riel32a43302007-10-16 01:24:50 -07002578 */
Mel Gorman8afdcec2011-03-22 16:33:04 -07002579 balance_gap = min(low_wmark_pages(zone),
2580 (zone->present_pages +
2581 KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
2582 KSWAPD_ZONE_BALANCE_GAP_RATIO);
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002583 /*
2584 * Kswapd reclaims only single pages with compaction
2585 * enabled. Trying too hard to reclaim until contiguous
2586 * free pages have become available can hurt performance
2587 * by evicting too much useful data from memory.
2588 * Do not reclaim more than needed for compaction.
2589 */
2590 testorder = order;
2591 if (COMPACTION_BUILD && order &&
2592 compaction_suitable(zone, order) !=
2593 COMPACT_SKIPPED)
2594 testorder = 0;
2595
Mel Gormancc715d92012-03-21 16:34:00 -07002596 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
Hugh Dickins643ac9f2012-03-23 02:57:31 -07002597 !zone_watermark_ok_safe(zone, testorder,
Mel Gorman8afdcec2011-03-22 16:33:04 -07002598 high_wmark_pages(zone) + balance_gap,
Mel Gormand7868da2011-07-08 15:39:38 -07002599 end_zone, 0)) {
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002600 shrink_zone(zone, &sc);
Andrea Arcangeli5a03b052011-01-13 15:47:11 -08002601
Mel Gormand7868da2011-07-08 15:39:38 -07002602 reclaim_state->reclaimed_slab = 0;
2603 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2604 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2605 total_scanned += sc.nr_scanned;
2606
2607 if (nr_slab == 0 && !zone_reclaimable(zone))
2608 zone->all_unreclaimable = 1;
2609 }
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 /*
2612 * If we've done a decent amount of scanning and
2613 * the reclaim ratio is low, start doing writepage
2614 * even in laptop mode
2615 */
2616 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
Rik van Riela79311c2009-01-06 14:40:01 -08002617 total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 sc.may_writepage = 1;
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002619
Mel Gorman215ddd62011-07-08 15:39:40 -07002620 if (zone->all_unreclaimable) {
2621 if (end_zone && end_zone == i)
2622 end_zone--;
Mel Gormand7868da2011-07-08 15:39:38 -07002623 continue;
Mel Gorman215ddd62011-07-08 15:39:40 -07002624 }
Mel Gormand7868da2011-07-08 15:39:38 -07002625
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002626 if (!zone_watermark_ok_safe(zone, testorder,
Minchan Kim45973d72010-03-05 13:41:45 -08002627 high_wmark_pages(zone), end_zone, 0)) {
2628 all_zones_ok = 0;
2629 /*
2630 * We are still under min water mark. This
2631 * means that we have a GFP_ATOMIC allocation
2632 * failure risk. Hurry up!
2633 */
Mel Gorman88f5acf2011-01-13 15:45:41 -08002634 if (!zone_watermark_ok_safe(zone, order,
Minchan Kim45973d72010-03-05 13:41:45 -08002635 min_wmark_pages(zone), end_zone, 0))
2636 has_under_min_watermark_zone = 1;
Mel Gorman0e093d992010-10-26 14:21:45 -07002637 } else {
2638 /*
2639 * If a zone reaches its high watermark,
2640 * consider it to be no longer congested. It's
2641 * possible there are dirty pages backed by
2642 * congested BDIs but as pressure is relieved,
Wanpeng Liab8704b2012-06-17 09:27:18 +08002643 * speculatively avoid congestion waits
Mel Gorman0e093d992010-10-26 14:21:45 -07002644 */
2645 zone_clear_flag(zone, ZONE_CONGESTED);
Mel Gormandc83edd2011-01-13 15:46:26 -08002646 if (i <= *classzone_idx)
Mel Gorman1741c872011-01-13 15:46:21 -08002647 balanced += zone->present_pages;
Minchan Kim45973d72010-03-05 13:41:45 -08002648 }
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 }
Mel Gorman55150612012-07-31 16:44:35 -07002651
2652 /*
2653 * If the low watermark is met there is no need for processes
2654 * to be throttled on pfmemalloc_wait as they should not be
2655 * able to safely make forward progress. Wake them
2656 */
2657 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
2658 pfmemalloc_watermark_ok(pgdat))
2659 wake_up(&pgdat->pfmemalloc_wait);
2660
Mel Gormandc83edd2011-01-13 15:46:26 -08002661 if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 break; /* kswapd: all done */
2663 /*
2664 * OK, kswapd is getting into trouble. Take a nap, then take
2665 * another pass across the zones.
2666 */
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002667 if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
KOSAKI Motohirobb3ab592009-12-14 17:58:55 -08002668 if (has_under_min_watermark_zone)
2669 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2670 else
2671 congestion_wait(BLK_RW_ASYNC, HZ/10);
2672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
2674 /*
2675 * We do this so kswapd doesn't build up large priorities for
2676 * example when it is freeing in parallel with allocators. It
2677 * matches the direct reclaim path behaviour in terms of impact
2678 * on zone->*_priority.
2679 */
Rik van Riela79311c2009-01-06 14:40:01 -08002680 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 break;
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002682 } while (--sc.priority >= 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683out:
Mel Gorman99504742011-01-13 15:46:20 -08002684
2685 /*
2686 * order-0: All zones must meet high watermark for a balanced node
Mel Gorman1741c872011-01-13 15:46:21 -08002687 * high-order: Balanced zones must make up at least 25% of the node
2688 * for the node to be balanced
Mel Gorman99504742011-01-13 15:46:20 -08002689 */
Mel Gormandc83edd2011-01-13 15:46:26 -08002690 if (!(all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 cond_resched();
Rafael J. Wysocki83573762006-12-06 20:34:18 -08002692
2693 try_to_freeze();
2694
KOSAKI Motohiro73ce02e2009-01-06 14:40:33 -08002695 /*
2696 * Fragmentation may mean that the system cannot be
2697 * rebalanced for high-order allocations in all zones.
2698 * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
2699 * it means the zones have been fully scanned and are still
2700 * not balanced. For high-order allocations, there is
2701 * little point trying all over again as kswapd may
2702 * infinite loop.
2703 *
2704 * Instead, recheck all watermarks at order-0 as they
2705 * are the most important. If watermarks are ok, kswapd will go
2706 * back to sleep. High-order users can still perform direct
2707 * reclaim if they wish.
2708 */
2709 if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
2710 order = sc.order = 0;
2711
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 goto loop_again;
2713 }
2714
Mel Gorman99504742011-01-13 15:46:20 -08002715 /*
2716 * If kswapd was reclaiming at a higher order, it has the option of
2717 * sleeping without all zones being balanced. Before it does, it must
2718 * ensure that the watermarks for order-0 on *all* zones are met and
2719 * that the congestion flags are cleared. The congestion flag must
2720 * be cleared as kswapd is the only mechanism that clears the flag
2721 * and it is potentially going to sleep here.
2722 */
2723 if (order) {
Rik van Riel7be62de2012-03-21 16:33:52 -07002724 int zones_need_compaction = 1;
2725
Mel Gorman99504742011-01-13 15:46:20 -08002726 for (i = 0; i <= end_zone; i++) {
2727 struct zone *zone = pgdat->node_zones + i;
2728
2729 if (!populated_zone(zone))
2730 continue;
2731
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07002732 if (zone->all_unreclaimable &&
2733 sc.priority != DEF_PRIORITY)
Mel Gorman99504742011-01-13 15:46:20 -08002734 continue;
2735
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002736 /* Would compaction fail due to lack of free memory? */
Rik van Riel496b9192012-03-24 10:26:21 -04002737 if (COMPACTION_BUILD &&
2738 compaction_suitable(zone, order) == COMPACT_SKIPPED)
Rik van Rielfe2c2a12012-03-21 16:33:51 -07002739 goto loop_again;
2740
Mel Gorman99504742011-01-13 15:46:20 -08002741 /* Confirm the zone is balanced for order-0 */
2742 if (!zone_watermark_ok(zone, 0,
2743 high_wmark_pages(zone), 0, 0)) {
2744 order = sc.order = 0;
2745 goto loop_again;
2746 }
2747
Rik van Riel7be62de2012-03-21 16:33:52 -07002748 /* Check if the memory needs to be defragmented. */
2749 if (zone_watermark_ok(zone, order,
2750 low_wmark_pages(zone), *classzone_idx, 0))
2751 zones_need_compaction = 0;
2752
Mel Gorman99504742011-01-13 15:46:20 -08002753 /* If balanced, clear the congested flag */
2754 zone_clear_flag(zone, ZONE_CONGESTED);
2755 }
Rik van Riel7be62de2012-03-21 16:33:52 -07002756
2757 if (zones_need_compaction)
2758 compact_pgdat(pgdat, order);
Mel Gorman99504742011-01-13 15:46:20 -08002759 }
2760
Mel Gorman0abdee22011-01-13 15:46:22 -08002761 /*
Mel Gorman55150612012-07-31 16:44:35 -07002762 * Return the order we were reclaiming at so prepare_kswapd_sleep()
Mel Gorman0abdee22011-01-13 15:46:22 -08002763 * makes a decision on the order we were last reclaiming at. However,
2764 * if another caller entered the allocator slow path while kswapd
2765 * was awake, order will remain at the higher level
2766 */
Mel Gormandc83edd2011-01-13 15:46:26 -08002767 *classzone_idx = end_zone;
Mel Gorman0abdee22011-01-13 15:46:22 -08002768 return order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769}
2770
Mel Gormandc83edd2011-01-13 15:46:26 -08002771static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002772{
2773 long remaining = 0;
2774 DEFINE_WAIT(wait);
2775
2776 if (freezing(current) || kthread_should_stop())
2777 return;
2778
2779 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2780
2781 /* Try to sleep for a short interval */
Mel Gorman55150612012-07-31 16:44:35 -07002782 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002783 remaining = schedule_timeout(HZ/10);
2784 finish_wait(&pgdat->kswapd_wait, &wait);
2785 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
2786 }
2787
2788 /*
2789 * After a short sleep, check if it was a premature sleep. If not, then
2790 * go fully to sleep until explicitly woken up.
2791 */
Mel Gorman55150612012-07-31 16:44:35 -07002792 if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002793 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
2794
2795 /*
2796 * vmstat counters are not perfectly accurate and the estimated
2797 * value for counters such as NR_FREE_PAGES can deviate from the
2798 * true value by nr_online_cpus * threshold. To avoid the zone
2799 * watermarks being breached while under pressure, we reduce the
2800 * per-cpu vmstat threshold while kswapd is awake and restore
2801 * them before going back to sleep.
2802 */
2803 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
Aaditya Kumar1c7e7f62012-07-17 15:48:07 -07002804
2805 if (!kthread_should_stop())
2806 schedule();
2807
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002808 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
2809 } else {
2810 if (remaining)
2811 count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
2812 else
2813 count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
2814 }
2815 finish_wait(&pgdat->kswapd_wait, &wait);
2816}
2817
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818/*
2819 * The background pageout daemon, started as a kernel thread
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002820 * from the init process.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 *
2822 * This basically trickles out pages so that we have _some_
2823 * free memory available even if there is no other activity
2824 * that frees anything up. This is needed for things like routing
2825 * etc, where we otherwise might have all activity going on in
2826 * asynchronous contexts that cannot page things out.
2827 *
2828 * If there are applications that are active memory-allocators
2829 * (most normal use), this basically shouldn't matter.
2830 */
2831static int kswapd(void *p)
2832{
Mel Gorman215ddd62011-07-08 15:39:40 -07002833 unsigned long order, new_order;
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002834 unsigned balanced_order;
Mel Gorman215ddd62011-07-08 15:39:40 -07002835 int classzone_idx, new_classzone_idx;
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002836 int balanced_classzone_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 pg_data_t *pgdat = (pg_data_t*)p;
2838 struct task_struct *tsk = current;
KOSAKI Motohirof0bc0a62011-01-13 15:45:50 -08002839
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 struct reclaim_state reclaim_state = {
2841 .reclaimed_slab = 0,
2842 };
Rusty Russella70f7302009-03-13 14:49:46 +10302843 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844
Nick Piggincf40bd12009-01-21 08:12:39 +01002845 lockdep_set_current_reclaim_state(GFP_KERNEL);
2846
Rusty Russell174596a2009-01-01 10:12:29 +10302847 if (!cpumask_empty(cpumask))
Mike Travisc5f59f02008-04-04 18:11:10 -07002848 set_cpus_allowed_ptr(tsk, cpumask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 current->reclaim_state = &reclaim_state;
2850
2851 /*
2852 * Tell the memory management that we're a "memory allocator",
2853 * and that if we need more memory we should get access to it
2854 * regardless (see "__alloc_pages()"). "kswapd" should
2855 * never get caught in the normal page freeing logic.
2856 *
2857 * (Kswapd normally doesn't need memory anyway, but sometimes
2858 * you need a small amount of memory in order to be able to
2859 * page out something else, and this flag essentially protects
2860 * us from recursively trying to free more memory as we're
2861 * trying to free the first piece of memory in the first place).
2862 */
Christoph Lameter930d9152006-01-08 01:00:47 -08002863 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
Rafael J. Wysocki83144182007-07-17 04:03:35 -07002864 set_freezable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865
Mel Gorman215ddd62011-07-08 15:39:40 -07002866 order = new_order = 0;
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002867 balanced_order = 0;
Mel Gorman215ddd62011-07-08 15:39:40 -07002868 classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002869 balanced_classzone_idx = classzone_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 for ( ; ; ) {
David Rientjes8fe23e02009-12-14 17:58:33 -08002871 int ret;
Christoph Lameter3e1d1d22005-06-24 23:13:50 -07002872
Mel Gorman215ddd62011-07-08 15:39:40 -07002873 /*
2874 * If the last balance_pgdat was unsuccessful it's unlikely a
2875 * new request of a similar or harder type will succeed soon
2876 * so consider going to sleep on the basis we reclaimed at
2877 */
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002878 if (balanced_classzone_idx >= new_classzone_idx &&
2879 balanced_order == new_order) {
Mel Gorman215ddd62011-07-08 15:39:40 -07002880 new_order = pgdat->kswapd_max_order;
2881 new_classzone_idx = pgdat->classzone_idx;
2882 pgdat->kswapd_max_order = 0;
2883 pgdat->classzone_idx = pgdat->nr_zones - 1;
2884 }
2885
Mel Gorman99504742011-01-13 15:46:20 -08002886 if (order < new_order || classzone_idx > new_classzone_idx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 /*
2888 * Don't sleep if someone wants a larger 'order'
Mel Gorman99504742011-01-13 15:46:20 -08002889 * allocation or has tigher zone constraints
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 */
2891 order = new_order;
Mel Gorman99504742011-01-13 15:46:20 -08002892 classzone_idx = new_classzone_idx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 } else {
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002894 kswapd_try_to_sleep(pgdat, balanced_order,
2895 balanced_classzone_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 order = pgdat->kswapd_max_order;
Mel Gorman99504742011-01-13 15:46:20 -08002897 classzone_idx = pgdat->classzone_idx;
Alex,Shif0dfcde2011-10-31 17:08:45 -07002898 new_order = order;
2899 new_classzone_idx = classzone_idx;
Mel Gorman4d405022011-01-13 15:46:23 -08002900 pgdat->kswapd_max_order = 0;
Mel Gorman215ddd62011-07-08 15:39:40 -07002901 pgdat->classzone_idx = pgdat->nr_zones - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903
David Rientjes8fe23e02009-12-14 17:58:33 -08002904 ret = try_to_freeze();
2905 if (kthread_should_stop())
2906 break;
2907
2908 /*
2909 * We can speed up thawing tasks if we don't call balance_pgdat
2910 * after returning from the refrigerator
2911 */
Mel Gorman33906bc2010-08-09 17:19:16 -07002912 if (!ret) {
2913 trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
Alex,Shid2ebd0f62011-10-31 17:08:39 -07002914 balanced_classzone_idx = classzone_idx;
2915 balanced_order = balance_pgdat(pgdat, order,
2916 &balanced_classzone_idx);
Mel Gorman33906bc2010-08-09 17:19:16 -07002917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 }
2919 return 0;
2920}
2921
2922/*
2923 * A zone is low on free memory, so wake its kswapd task to service it.
2924 */
Mel Gorman99504742011-01-13 15:46:20 -08002925void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926{
2927 pg_data_t *pgdat;
2928
Con Kolivasf3fe6512006-01-06 00:11:15 -08002929 if (!populated_zone(zone))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 return;
2931
Paul Jackson02a0e532006-12-13 00:34:25 -08002932 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 return;
Mel Gorman88f5acf2011-01-13 15:45:41 -08002934 pgdat = zone->zone_pgdat;
Mel Gorman99504742011-01-13 15:46:20 -08002935 if (pgdat->kswapd_max_order < order) {
Mel Gorman88f5acf2011-01-13 15:45:41 -08002936 pgdat->kswapd_max_order = order;
Mel Gorman99504742011-01-13 15:46:20 -08002937 pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx);
2938 }
Con Kolivas8d0986e2005-09-13 01:25:07 -07002939 if (!waitqueue_active(&pgdat->kswapd_wait))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 return;
Mel Gorman88f5acf2011-01-13 15:45:41 -08002941 if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0))
2942 return;
2943
2944 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
Con Kolivas8d0986e2005-09-13 01:25:07 -07002945 wake_up_interruptible(&pgdat->kswapd_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946}
2947
Wu Fengguangadea02a2009-09-21 17:01:42 -07002948/*
2949 * The reclaimable count would be mostly accurate.
2950 * The less reclaimable pages may be
2951 * - mlocked pages, which will be moved to unevictable list when encountered
2952 * - mapped pages, which may require several travels to be reclaimed
2953 * - dirty pages, which is not "instantly" reclaimable
2954 */
2955unsigned long global_reclaimable_pages(void)
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002956{
Wu Fengguangadea02a2009-09-21 17:01:42 -07002957 int nr;
2958
2959 nr = global_page_state(NR_ACTIVE_FILE) +
2960 global_page_state(NR_INACTIVE_FILE);
2961
2962 if (nr_swap_pages > 0)
2963 nr += global_page_state(NR_ACTIVE_ANON) +
2964 global_page_state(NR_INACTIVE_ANON);
2965
2966 return nr;
2967}
2968
2969unsigned long zone_reclaimable_pages(struct zone *zone)
2970{
2971 int nr;
2972
2973 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2974 zone_page_state(zone, NR_INACTIVE_FILE);
2975
2976 if (nr_swap_pages > 0)
2977 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2978 zone_page_state(zone, NR_INACTIVE_ANON);
2979
2980 return nr;
Rik van Riel4f98a2f2008-10-18 20:26:32 -07002981}
2982
Rafael J. Wysockic6f37f12009-05-24 22:16:31 +02002983#ifdef CONFIG_HIBERNATION
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984/*
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08002985 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002986 * freed pages.
2987 *
2988 * Rather than trying to age LRUs the aim is to preserve the overall
2989 * LRU order by reclaiming preferentially
2990 * inactive > active > active referenced > active mapped
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991 */
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08002992unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993{
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002994 struct reclaim_state reclaim_state;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002995 struct scan_control sc = {
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08002996 .gfp_mask = GFP_HIGHUSER_MOVABLE,
2997 .may_swap = 1,
2998 .may_unmap = 1,
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07002999 .may_writepage = 1,
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003000 .nr_to_reclaim = nr_to_reclaim,
3001 .hibernation_mode = 1,
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003002 .order = 0,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07003003 .priority = DEF_PRIORITY,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 };
Ying Hana09ed5e2011-05-24 17:12:26 -07003005 struct shrink_control shrink = {
3006 .gfp_mask = sc.gfp_mask,
3007 };
3008 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003009 struct task_struct *p = current;
3010 unsigned long nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003012 p->flags |= PF_MEMALLOC;
3013 lockdep_set_current_reclaim_state(sc.gfp_mask);
3014 reclaim_state.reclaimed_slab = 0;
3015 p->reclaim_state = &reclaim_state;
Andrew Morton69e05942006-03-22 00:08:19 -08003016
Ying Hana09ed5e2011-05-24 17:12:26 -07003017 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07003018
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003019 p->reclaim_state = NULL;
3020 lockdep_clear_current_reclaim_state();
3021 p->flags &= ~PF_MEMALLOC;
Rafael J. Wysockid6277db2006-06-23 02:03:18 -07003022
KOSAKI Motohiro7b517552009-12-14 17:59:12 -08003023 return nr_reclaimed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024}
Rafael J. Wysockic6f37f12009-05-24 22:16:31 +02003025#endif /* CONFIG_HIBERNATION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027/* It's optimal to keep kswapds on the same CPUs as their memory, but
3028 not required for correctness. So if the last cpu in a node goes
3029 away, we get changed to run anywhere: as the first one comes back,
3030 restore their cpu bindings. */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -07003031static int __devinit cpu_callback(struct notifier_block *nfb,
Andrew Morton69e05942006-03-22 00:08:19 -08003032 unsigned long action, void *hcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033{
Yasunori Goto58c0a4a2007-10-16 01:25:40 -07003034 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003036 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
Yasunori Goto58c0a4a2007-10-16 01:25:40 -07003037 for_each_node_state(nid, N_HIGH_MEMORY) {
Mike Travisc5f59f02008-04-04 18:11:10 -07003038 pg_data_t *pgdat = NODE_DATA(nid);
Rusty Russella70f7302009-03-13 14:49:46 +10303039 const struct cpumask *mask;
3040
3041 mask = cpumask_of_node(pgdat->node_id);
Mike Travisc5f59f02008-04-04 18:11:10 -07003042
Rusty Russell3e597942009-01-01 10:12:24 +10303043 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 /* One of our CPUs online: restore mask */
Mike Travisc5f59f02008-04-04 18:11:10 -07003045 set_cpus_allowed_ptr(pgdat->kswapd, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 }
3047 }
3048 return NOTIFY_OK;
3049}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050
Yasunori Goto3218ae12006-06-27 02:53:33 -07003051/*
3052 * This kswapd start function will be called by init and node-hot-add.
3053 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
3054 */
3055int kswapd_run(int nid)
3056{
3057 pg_data_t *pgdat = NODE_DATA(nid);
3058 int ret = 0;
3059
3060 if (pgdat->kswapd)
3061 return 0;
3062
3063 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
3064 if (IS_ERR(pgdat->kswapd)) {
3065 /* failure at boot is fatal */
3066 BUG_ON(system_state == SYSTEM_BOOTING);
3067 printk("Failed to start kswapd on node %d\n",nid);
3068 ret = -1;
3069 }
3070 return ret;
3071}
3072
David Rientjes8fe23e02009-12-14 17:58:33 -08003073/*
Jiang Liud8adde12012-07-11 14:01:52 -07003074 * Called by memory hotplug when all memory in a node is offlined. Caller must
3075 * hold lock_memory_hotplug().
David Rientjes8fe23e02009-12-14 17:58:33 -08003076 */
3077void kswapd_stop(int nid)
3078{
3079 struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
3080
Jiang Liud8adde12012-07-11 14:01:52 -07003081 if (kswapd) {
David Rientjes8fe23e02009-12-14 17:58:33 -08003082 kthread_stop(kswapd);
Jiang Liud8adde12012-07-11 14:01:52 -07003083 NODE_DATA(nid)->kswapd = NULL;
3084 }
David Rientjes8fe23e02009-12-14 17:58:33 -08003085}
3086
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087static int __init kswapd_init(void)
3088{
Yasunori Goto3218ae12006-06-27 02:53:33 -07003089 int nid;
Andrew Morton69e05942006-03-22 00:08:19 -08003090
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 swap_setup();
Christoph Lameter9422ffb2007-10-16 01:25:31 -07003092 for_each_node_state(nid, N_HIGH_MEMORY)
Yasunori Goto3218ae12006-06-27 02:53:33 -07003093 kswapd_run(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 hotcpu_notifier(cpu_callback, 0);
3095 return 0;
3096}
3097
3098module_init(kswapd_init)
Christoph Lameter9eeff232006-01-18 17:42:31 -08003099
3100#ifdef CONFIG_NUMA
3101/*
3102 * Zone reclaim mode
3103 *
3104 * If non-zero call zone_reclaim when the number of free pages falls below
3105 * the watermarks.
Christoph Lameter9eeff232006-01-18 17:42:31 -08003106 */
3107int zone_reclaim_mode __read_mostly;
3108
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08003109#define RECLAIM_OFF 0
Fernando Luis Vazquez Cao7d034312008-07-29 22:33:41 -07003110#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
Christoph Lameter1b2ffb72006-02-01 03:05:34 -08003111#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
3112#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
3113
Christoph Lameter9eeff232006-01-18 17:42:31 -08003114/*
Christoph Lametera92f7122006-02-01 03:05:32 -08003115 * Priority for ZONE_RECLAIM. This determines the fraction of pages
3116 * of a node considered for each zone_reclaim. 4 scans 1/16th of
3117 * a zone.
3118 */
3119#define ZONE_RECLAIM_PRIORITY 4
3120
Christoph Lameter9eeff232006-01-18 17:42:31 -08003121/*
Christoph Lameter96146342006-07-03 00:24:13 -07003122 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
3123 * occur.
3124 */
3125int sysctl_min_unmapped_ratio = 1;
3126
3127/*
Christoph Lameter0ff38492006-09-25 23:31:52 -07003128 * If the number of slab pages in a zone grows beyond this percentage then
3129 * slab reclaim needs to occur.
3130 */
3131int sysctl_min_slab_ratio = 5;
3132
Mel Gorman90afa5d2009-06-16 15:33:20 -07003133static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
3134{
3135 unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
3136 unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
3137 zone_page_state(zone, NR_ACTIVE_FILE);
3138
3139 /*
3140 * It's possible for there to be more file mapped pages than
3141 * accounted for by the pages on the file LRU lists because
3142 * tmpfs pages accounted for as ANON can also be FILE_MAPPED
3143 */
3144 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
3145}
3146
3147/* Work out how many page cache pages we can reclaim in this reclaim_mode */
3148static long zone_pagecache_reclaimable(struct zone *zone)
3149{
3150 long nr_pagecache_reclaimable;
3151 long delta = 0;
3152
3153 /*
3154 * If RECLAIM_SWAP is set, then all file pages are considered
3155 * potentially reclaimable. Otherwise, we have to worry about
3156 * pages like swapcache and zone_unmapped_file_pages() provides
3157 * a better estimate
3158 */
3159 if (zone_reclaim_mode & RECLAIM_SWAP)
3160 nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
3161 else
3162 nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
3163
3164 /* If we can't clean pages, remove dirty pages from consideration */
3165 if (!(zone_reclaim_mode & RECLAIM_WRITE))
3166 delta += zone_page_state(zone, NR_FILE_DIRTY);
3167
3168 /* Watch for any possible underflows due to delta */
3169 if (unlikely(delta > nr_pagecache_reclaimable))
3170 delta = nr_pagecache_reclaimable;
3171
3172 return nr_pagecache_reclaimable - delta;
3173}
3174
Christoph Lameter0ff38492006-09-25 23:31:52 -07003175/*
Christoph Lameter9eeff232006-01-18 17:42:31 -08003176 * Try to free up some pages from this zone through reclaim.
3177 */
Andrew Morton179e9632006-03-22 00:08:18 -08003178static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
Christoph Lameter9eeff232006-01-18 17:42:31 -08003179{
Christoph Lameter7fb2d462006-03-22 00:08:22 -08003180 /* Minimum pages needed in order to stay on node */
Andrew Morton69e05942006-03-22 00:08:19 -08003181 const unsigned long nr_pages = 1 << order;
Christoph Lameter9eeff232006-01-18 17:42:31 -08003182 struct task_struct *p = current;
3183 struct reclaim_state reclaim_state;
Andrew Morton179e9632006-03-22 00:08:18 -08003184 struct scan_control sc = {
3185 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
Johannes Weinera6dc60f82009-03-31 15:19:30 -07003186 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
KOSAKI Motohiro2e2e4252009-04-21 12:24:57 -07003187 .may_swap = 1,
KOSAKI Motohiro22fba332009-12-14 17:59:10 -08003188 .nr_to_reclaim = max_t(unsigned long, nr_pages,
3189 SWAP_CLUSTER_MAX),
Andrew Morton179e9632006-03-22 00:08:18 -08003190 .gfp_mask = gfp_mask,
Johannes Weinerbd2f6192009-03-31 15:19:38 -07003191 .order = order,
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07003192 .priority = ZONE_RECLAIM_PRIORITY,
Andrew Morton179e9632006-03-22 00:08:18 -08003193 };
Ying Hana09ed5e2011-05-24 17:12:26 -07003194 struct shrink_control shrink = {
3195 .gfp_mask = sc.gfp_mask,
3196 };
KOSAKI Motohiro15748042010-08-09 17:19:50 -07003197 unsigned long nr_slab_pages0, nr_slab_pages1;
Christoph Lameter9eeff232006-01-18 17:42:31 -08003198
Christoph Lameter9eeff232006-01-18 17:42:31 -08003199 cond_resched();
Christoph Lameterd4f77962006-02-24 13:04:22 -08003200 /*
3201 * We need to be able to allocate from the reserves for RECLAIM_SWAP
3202 * and we also need to be able to write out pages for RECLAIM_WRITE
3203 * and RECLAIM_SWAP.
3204 */
3205 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
KOSAKI Motohiro76ca5422010-03-05 13:41:47 -08003206 lockdep_set_current_reclaim_state(gfp_mask);
Christoph Lameter9eeff232006-01-18 17:42:31 -08003207 reclaim_state.reclaimed_slab = 0;
3208 p->reclaim_state = &reclaim_state;
Christoph Lameterc84db232006-02-01 03:05:29 -08003209
Mel Gorman90afa5d2009-06-16 15:33:20 -07003210 if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
Christoph Lameter0ff38492006-09-25 23:31:52 -07003211 /*
3212 * Free memory by calling shrink zone with increasing
3213 * priorities until we have enough memory freed.
3214 */
Christoph Lameter0ff38492006-09-25 23:31:52 -07003215 do {
Konstantin Khlebnikov9e3b2f82012-05-29 15:06:57 -07003216 shrink_zone(zone, &sc);
3217 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
Christoph Lameter0ff38492006-09-25 23:31:52 -07003218 }
Christoph Lameterc84db232006-02-01 03:05:29 -08003219
KOSAKI Motohiro15748042010-08-09 17:19:50 -07003220 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3221 if (nr_slab_pages0 > zone->min_slab_pages) {
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08003222 /*
Christoph Lameter7fb2d462006-03-22 00:08:22 -08003223 * shrink_slab() does not currently allow us to determine how
Christoph Lameter0ff38492006-09-25 23:31:52 -07003224 * many pages were freed in this zone. So we take the current
3225 * number of slab pages and shake the slab until it is reduced
3226 * by the same nr_pages that we used for reclaiming unmapped
3227 * pages.
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08003228 *
Christoph Lameter0ff38492006-09-25 23:31:52 -07003229 * Note that shrink_slab will free memory on all zones and may
3230 * take a long time.
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08003231 */
KOSAKI Motohiro4dc4b3d2010-08-09 17:19:54 -07003232 for (;;) {
3233 unsigned long lru_pages = zone_reclaimable_pages(zone);
3234
3235 /* No reclaimable slab or very low memory pressure */
Ying Han1495f232011-05-24 17:12:27 -07003236 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
KOSAKI Motohiro4dc4b3d2010-08-09 17:19:54 -07003237 break;
3238
3239 /* Freed enough memory */
3240 nr_slab_pages1 = zone_page_state(zone,
3241 NR_SLAB_RECLAIMABLE);
3242 if (nr_slab_pages1 + nr_pages <= nr_slab_pages0)
3243 break;
3244 }
Christoph Lameter83e33a42006-09-25 23:31:53 -07003245
3246 /*
3247 * Update nr_reclaimed by the number of slab pages we
3248 * reclaimed from this zone.
3249 */
KOSAKI Motohiro15748042010-08-09 17:19:50 -07003250 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3251 if (nr_slab_pages1 < nr_slab_pages0)
3252 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
Christoph Lameter2a16e3f2006-02-01 03:05:35 -08003253 }
3254
Christoph Lameter9eeff232006-01-18 17:42:31 -08003255 p->reclaim_state = NULL;
Christoph Lameterd4f77962006-02-24 13:04:22 -08003256 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
KOSAKI Motohiro76ca5422010-03-05 13:41:47 -08003257 lockdep_clear_current_reclaim_state();
Rik van Riela79311c2009-01-06 14:40:01 -08003258 return sc.nr_reclaimed >= nr_pages;
Christoph Lameter9eeff232006-01-18 17:42:31 -08003259}
Andrew Morton179e9632006-03-22 00:08:18 -08003260
3261int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3262{
Andrew Morton179e9632006-03-22 00:08:18 -08003263 int node_id;
David Rientjesd773ed62007-10-16 23:26:01 -07003264 int ret;
Andrew Morton179e9632006-03-22 00:08:18 -08003265
3266 /*
Christoph Lameter0ff38492006-09-25 23:31:52 -07003267 * Zone reclaim reclaims unmapped file backed pages and
3268 * slab pages if we are over the defined limits.
Christoph Lameter34aa1332006-06-30 01:55:37 -07003269 *
Christoph Lameter96146342006-07-03 00:24:13 -07003270 * A small portion of unmapped file backed pages is needed for
3271 * file I/O otherwise pages read by file I/O will be immediately
3272 * thrown out if the zone is overallocated. So we do not reclaim
3273 * if less than a specified percentage of the zone is used by
3274 * unmapped file backed pages.
Andrew Morton179e9632006-03-22 00:08:18 -08003275 */
Mel Gorman90afa5d2009-06-16 15:33:20 -07003276 if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
3277 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
Mel Gormanfa5e0842009-06-16 15:33:22 -07003278 return ZONE_RECLAIM_FULL;
Andrew Morton179e9632006-03-22 00:08:18 -08003279
KOSAKI Motohiro93e4a89a2010-03-05 13:41:55 -08003280 if (zone->all_unreclaimable)
Mel Gormanfa5e0842009-06-16 15:33:22 -07003281 return ZONE_RECLAIM_FULL;
David Rientjesd773ed62007-10-16 23:26:01 -07003282
Andrew Morton179e9632006-03-22 00:08:18 -08003283 /*
David Rientjesd773ed62007-10-16 23:26:01 -07003284 * Do not scan if the allocation should not be delayed.
Andrew Morton179e9632006-03-22 00:08:18 -08003285 */
David Rientjesd773ed62007-10-16 23:26:01 -07003286 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
Mel Gormanfa5e0842009-06-16 15:33:22 -07003287 return ZONE_RECLAIM_NOSCAN;
Andrew Morton179e9632006-03-22 00:08:18 -08003288
3289 /*
3290 * Only run zone reclaim on the local zone or on zones that do not
3291 * have associated processors. This will favor the local processor
3292 * over remote processors and spread off node memory allocations
3293 * as wide as possible.
3294 */
Christoph Lameter89fa3022006-09-25 23:31:55 -07003295 node_id = zone_to_nid(zone);
Christoph Lameter37c07082007-10-16 01:25:36 -07003296 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
Mel Gormanfa5e0842009-06-16 15:33:22 -07003297 return ZONE_RECLAIM_NOSCAN;
David Rientjesd773ed62007-10-16 23:26:01 -07003298
3299 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
Mel Gormanfa5e0842009-06-16 15:33:22 -07003300 return ZONE_RECLAIM_NOSCAN;
3301
David Rientjesd773ed62007-10-16 23:26:01 -07003302 ret = __zone_reclaim(zone, gfp_mask, order);
3303 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
3304
Mel Gorman24cf725182009-06-16 15:33:23 -07003305 if (!ret)
3306 count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
3307
David Rientjesd773ed62007-10-16 23:26:01 -07003308 return ret;
Andrew Morton179e9632006-03-22 00:08:18 -08003309}
Christoph Lameter9eeff232006-01-18 17:42:31 -08003310#endif
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003311
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003312/*
3313 * page_evictable - test whether a page is evictable
3314 * @page: the page to test
3315 * @vma: the VMA in which the page is or will be mapped, may be NULL
3316 *
3317 * Test whether page is evictable--i.e., should be placed on active/inactive
Nick Pigginb291f002008-10-18 20:26:44 -07003318 * lists vs unevictable list. The vma argument is !NULL when called from the
3319 * fault path to determine how to instantate a new page.
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003320 *
3321 * Reasons page might not be evictable:
Lee Schermerhornba9ddf42008-10-18 20:26:42 -07003322 * (1) page's mapping marked unevictable
Nick Pigginb291f002008-10-18 20:26:44 -07003323 * (2) page is part of an mlocked VMA
Lee Schermerhornba9ddf42008-10-18 20:26:42 -07003324 *
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003325 */
3326int page_evictable(struct page *page, struct vm_area_struct *vma)
3327{
3328
Lee Schermerhornba9ddf42008-10-18 20:26:42 -07003329 if (mapping_unevictable(page_mapping(page)))
3330 return 0;
3331
Ying Han096a7cf2012-05-29 15:06:25 -07003332 if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
Nick Pigginb291f002008-10-18 20:26:44 -07003333 return 0;
Lee Schermerhorn894bc312008-10-18 20:26:39 -07003334
3335 return 1;
3336}
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003337
Hugh Dickins850465792012-01-20 14:34:19 -08003338#ifdef CONFIG_SHMEM
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003339/**
Hugh Dickins24513262012-01-20 14:34:21 -08003340 * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
3341 * @pages: array of pages to check
3342 * @nr_pages: number of pages to check
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003343 *
Hugh Dickins24513262012-01-20 14:34:21 -08003344 * Checks pages for evictability and moves them to the appropriate lru list.
Hugh Dickins850465792012-01-20 14:34:19 -08003345 *
3346 * This function is only used for SysV IPC SHM_UNLOCK.
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003347 */
Hugh Dickins24513262012-01-20 14:34:21 -08003348void check_move_unevictable_pages(struct page **pages, int nr_pages)
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003349{
Johannes Weiner925b7672012-01-12 17:18:15 -08003350 struct lruvec *lruvec;
Hugh Dickins24513262012-01-20 14:34:21 -08003351 struct zone *zone = NULL;
3352 int pgscanned = 0;
3353 int pgrescued = 0;
3354 int i;
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003355
Hugh Dickins24513262012-01-20 14:34:21 -08003356 for (i = 0; i < nr_pages; i++) {
3357 struct page *page = pages[i];
3358 struct zone *pagezone;
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003359
Hugh Dickins24513262012-01-20 14:34:21 -08003360 pgscanned++;
3361 pagezone = page_zone(page);
3362 if (pagezone != zone) {
3363 if (zone)
3364 spin_unlock_irq(&zone->lru_lock);
3365 zone = pagezone;
3366 spin_lock_irq(&zone->lru_lock);
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003367 }
Hugh Dickinsfa9add62012-05-29 15:07:09 -07003368 lruvec = mem_cgroup_page_lruvec(page, zone);
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003369
Hugh Dickins24513262012-01-20 14:34:21 -08003370 if (!PageLRU(page) || !PageUnevictable(page))
3371 continue;
3372
3373 if (page_evictable(page, NULL)) {
3374 enum lru_list lru = page_lru_base_type(page);
3375
3376 VM_BUG_ON(PageActive(page));
3377 ClearPageUnevictable(page);
Hugh Dickinsfa9add62012-05-29 15:07:09 -07003378 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3379 add_page_to_lru_list(page, lruvec, lru);
Hugh Dickins24513262012-01-20 14:34:21 -08003380 pgrescued++;
3381 }
Lee Schermerhorn89e004ea2008-10-18 20:26:43 -07003382 }
Hugh Dickins24513262012-01-20 14:34:21 -08003383
3384 if (zone) {
3385 __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
3386 __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
3387 spin_unlock_irq(&zone->lru_lock);
3388 }
Hugh Dickins850465792012-01-20 14:34:19 -08003389}
3390#endif /* CONFIG_SHMEM */
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003391
Johannes Weiner264e56d2011-10-31 17:09:13 -07003392static void warn_scan_unevictable_pages(void)
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003393{
Johannes Weiner264e56d2011-10-31 17:09:13 -07003394 printk_once(KERN_WARNING
KOSAKI Motohiro25bd91b2012-01-10 15:07:40 -08003395 "%s: The scan_unevictable_pages sysctl/node-interface has been "
Johannes Weiner264e56d2011-10-31 17:09:13 -07003396 "disabled for lack of a legitimate use case. If you have "
KOSAKI Motohiro25bd91b2012-01-10 15:07:40 -08003397 "one, please send an email to linux-mm@kvack.org.\n",
3398 current->comm);
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003399}
3400
3401/*
3402 * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of
3403 * all nodes' unevictable lists for evictable pages
3404 */
3405unsigned long scan_unevictable_pages;
3406
3407int scan_unevictable_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003408 void __user *buffer,
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003409 size_t *length, loff_t *ppos)
3410{
Johannes Weiner264e56d2011-10-31 17:09:13 -07003411 warn_scan_unevictable_pages();
Alexey Dobriyan8d65af72009-09-23 15:57:19 -07003412 proc_doulongvec_minmax(table, write, buffer, length, ppos);
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003413 scan_unevictable_pages = 0;
3414 return 0;
3415}
3416
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -07003417#ifdef CONFIG_NUMA
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003418/*
3419 * per node 'scan_unevictable_pages' attribute. On demand re-scan of
3420 * a specified node's per zone unevictable lists for evictable pages.
3421 */
3422
Kay Sievers10fbcf42011-12-21 14:48:43 -08003423static ssize_t read_scan_unevictable_node(struct device *dev,
3424 struct device_attribute *attr,
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003425 char *buf)
3426{
Johannes Weiner264e56d2011-10-31 17:09:13 -07003427 warn_scan_unevictable_pages();
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003428 return sprintf(buf, "0\n"); /* always zero; should fit... */
3429}
3430
Kay Sievers10fbcf42011-12-21 14:48:43 -08003431static ssize_t write_scan_unevictable_node(struct device *dev,
3432 struct device_attribute *attr,
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003433 const char *buf, size_t count)
3434{
Johannes Weiner264e56d2011-10-31 17:09:13 -07003435 warn_scan_unevictable_pages();
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003436 return 1;
3437}
3438
3439
Kay Sievers10fbcf42011-12-21 14:48:43 -08003440static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003441 read_scan_unevictable_node,
3442 write_scan_unevictable_node);
3443
3444int scan_unevictable_register_node(struct node *node)
3445{
Kay Sievers10fbcf42011-12-21 14:48:43 -08003446 return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages);
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003447}
3448
3449void scan_unevictable_unregister_node(struct node *node)
3450{
Kay Sievers10fbcf42011-12-21 14:48:43 -08003451 device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
Lee Schermerhornaf936a12008-10-18 20:26:53 -07003452}
Thadeu Lima de Souza Cascardoe4455ab2010-10-26 14:21:28 -07003453#endif