blob: c6178bbd3e044ed08476cf52cb83626c836bbfc2 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001/*
2 * linux/mm/compaction.c
3 *
4 * Memory compaction for the reduction of external fragmentation. Note that
5 * this heavily depends upon page migration to do all the real heavy
6 * lifting
7 *
8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
9 */
Vlastimil Babka698b1b32016-03-17 14:18:08 -070010#include <linux/cpu.h>
Mel Gorman748446b2010-05-24 14:32:27 -070011#include <linux/swap.h>
12#include <linux/migrate.h>
13#include <linux/compaction.h>
14#include <linux/mm_inline.h>
15#include <linux/backing-dev.h>
Mel Gorman76ab0f52010-05-24 14:32:28 -070016#include <linux/sysctl.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070017#include <linux/sysfs.h>
Minchan Kim194159f2013-02-22 16:33:58 -080018#include <linux/page-isolation.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080019#include <linux/kasan.h>
Vlastimil Babka698b1b32016-03-17 14:18:08 -070020#include <linux/kthread.h>
21#include <linux/freezer.h>
Joonsoo Kim83358ec2016-07-26 15:23:43 -070022#include <linux/page_owner.h>
Mel Gorman748446b2010-05-24 14:32:27 -070023#include "internal.h"
24
Minchan Kim010fc292012-12-20 15:05:06 -080025#ifdef CONFIG_COMPACTION
26static inline void count_compact_event(enum vm_event_item item)
27{
28 count_vm_event(item);
29}
30
31static inline void count_compact_events(enum vm_event_item item, long delta)
32{
33 count_vm_events(item, delta);
34}
35#else
36#define count_compact_event(item) do { } while (0)
37#define count_compact_events(item, delta) do { } while (0)
38#endif
39
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010040#if defined CONFIG_COMPACTION || defined CONFIG_CMA
41
Mel Gormanb7aba692011-01-13 15:45:54 -080042#define CREATE_TRACE_POINTS
43#include <trace/events/compaction.h>
44
Vlastimil Babka06b66402016-05-19 17:11:48 -070045#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
46#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
47#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
48#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
49
Mel Gorman748446b2010-05-24 14:32:27 -070050static unsigned long release_freepages(struct list_head *freelist)
51{
52 struct page *page, *next;
Vlastimil Babka6bace092014-12-10 15:43:31 -080053 unsigned long high_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -070054
55 list_for_each_entry_safe(page, next, freelist, lru) {
Vlastimil Babka6bace092014-12-10 15:43:31 -080056 unsigned long pfn = page_to_pfn(page);
Mel Gorman748446b2010-05-24 14:32:27 -070057 list_del(&page->lru);
58 __free_page(page);
Vlastimil Babka6bace092014-12-10 15:43:31 -080059 if (pfn > high_pfn)
60 high_pfn = pfn;
Mel Gorman748446b2010-05-24 14:32:27 -070061 }
62
Vlastimil Babka6bace092014-12-10 15:43:31 -080063 return high_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -070064}
65
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010066static void map_pages(struct list_head *list)
67{
Joonsoo Kim66c64222016-07-26 15:23:40 -070068 unsigned int i, order, nr_pages;
69 struct page *page, *next;
70 LIST_HEAD(tmp_list);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010071
Joonsoo Kim66c64222016-07-26 15:23:40 -070072 list_for_each_entry_safe(page, next, list, lru) {
73 list_del(&page->lru);
74
75 order = page_private(page);
76 nr_pages = 1 << order;
Joonsoo Kim66c64222016-07-26 15:23:40 -070077
Joonsoo Kim46f24fd2016-07-26 15:23:58 -070078 post_alloc_hook(page, order, __GFP_MOVABLE);
Joonsoo Kim66c64222016-07-26 15:23:40 -070079 if (order)
80 split_page(page, order);
81
82 for (i = 0; i < nr_pages; i++) {
83 list_add(&page->lru, &tmp_list);
84 page++;
85 }
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010086 }
Joonsoo Kim66c64222016-07-26 15:23:40 -070087
88 list_splice(&tmp_list, list);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010089}
90
Michal Nazarewicz47118af2011-12-29 13:09:50 +010091static inline bool migrate_async_suitable(int migratetype)
92{
93 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
94}
95
Mel Gormanbb13ffe2012-10-08 16:32:41 -070096#ifdef CONFIG_COMPACTION
Joonsoo Kim24e27162015-02-11 15:27:09 -080097
Minchan Kimbda807d2016-07-26 15:23:05 -070098int PageMovable(struct page *page)
99{
100 struct address_space *mapping;
101
102 VM_BUG_ON_PAGE(!PageLocked(page), page);
103 if (!__PageMovable(page))
104 return 0;
105
106 mapping = page_mapping(page);
107 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
108 return 1;
109
110 return 0;
111}
112EXPORT_SYMBOL(PageMovable);
113
114void __SetPageMovable(struct page *page, struct address_space *mapping)
115{
116 VM_BUG_ON_PAGE(!PageLocked(page), page);
117 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
118 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
119}
120EXPORT_SYMBOL(__SetPageMovable);
121
122void __ClearPageMovable(struct page *page)
123{
124 VM_BUG_ON_PAGE(!PageLocked(page), page);
125 VM_BUG_ON_PAGE(!PageMovable(page), page);
126 /*
127 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
128 * flag so that VM can catch up released page by driver after isolation.
129 * With it, VM migration doesn't try to put it back.
130 */
131 page->mapping = (void *)((unsigned long)page->mapping &
132 PAGE_MAPPING_MOVABLE);
133}
134EXPORT_SYMBOL(__ClearPageMovable);
135
Joonsoo Kim24e27162015-02-11 15:27:09 -0800136/* Do not skip compaction more than 64 times */
137#define COMPACT_MAX_DEFER_SHIFT 6
138
139/*
140 * Compaction is deferred when compaction fails to result in a page
141 * allocation success. 1 << compact_defer_limit compactions are skipped up
142 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
143 */
144void defer_compaction(struct zone *zone, int order)
145{
146 zone->compact_considered = 0;
147 zone->compact_defer_shift++;
148
149 if (order < zone->compact_order_failed)
150 zone->compact_order_failed = order;
151
152 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
153 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
154
155 trace_mm_compaction_defer_compaction(zone, order);
156}
157
158/* Returns true if compaction should be skipped this time */
159bool compaction_deferred(struct zone *zone, int order)
160{
161 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
162
163 if (order < zone->compact_order_failed)
164 return false;
165
166 /* Avoid possible overflow */
167 if (++zone->compact_considered > defer_limit)
168 zone->compact_considered = defer_limit;
169
170 if (zone->compact_considered >= defer_limit)
171 return false;
172
173 trace_mm_compaction_deferred(zone, order);
174
175 return true;
176}
177
178/*
179 * Update defer tracking counters after successful compaction of given order,
180 * which means an allocation either succeeded (alloc_success == true) or is
181 * expected to succeed.
182 */
183void compaction_defer_reset(struct zone *zone, int order,
184 bool alloc_success)
185{
186 if (alloc_success) {
187 zone->compact_considered = 0;
188 zone->compact_defer_shift = 0;
189 }
190 if (order >= zone->compact_order_failed)
191 zone->compact_order_failed = order + 1;
192
193 trace_mm_compaction_defer_reset(zone, order);
194}
195
196/* Returns true if restarting compaction after many failures */
197bool compaction_restarting(struct zone *zone, int order)
198{
199 if (order < zone->compact_order_failed)
200 return false;
201
202 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
203 zone->compact_considered >= 1UL << zone->compact_defer_shift;
204}
205
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700206/* Returns true if the pageblock should be scanned for pages to isolate. */
207static inline bool isolation_suitable(struct compact_control *cc,
208 struct page *page)
209{
210 if (cc->ignore_skip_hint)
211 return true;
212
213 return !get_pageblock_skip(page);
214}
215
Vlastimil Babka023336412015-09-08 15:02:42 -0700216static void reset_cached_positions(struct zone *zone)
217{
218 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
219 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
Joonsoo Kim623446e2016-03-15 14:57:45 -0700220 zone->compact_cached_free_pfn =
Vlastimil Babka06b66402016-05-19 17:11:48 -0700221 pageblock_start_pfn(zone_end_pfn(zone) - 1);
Vlastimil Babka023336412015-09-08 15:02:42 -0700222}
223
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700224/*
225 * This function is called to clear all cached information on pageblocks that
226 * should be skipped for page isolation when the migrate and free page scanner
227 * meet.
228 */
Mel Gorman62997022012-10-08 16:32:47 -0700229static void __reset_isolation_suitable(struct zone *zone)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700230{
231 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800232 unsigned long end_pfn = zone_end_pfn(zone);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700233 unsigned long pfn;
234
Mel Gorman62997022012-10-08 16:32:47 -0700235 zone->compact_blockskip_flush = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700236
237 /* Walk the zone and mark every pageblock as suitable for isolation */
238 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
239 struct page *page;
240
241 cond_resched();
242
243 if (!pfn_valid(pfn))
244 continue;
245
246 page = pfn_to_page(pfn);
247 if (zone != page_zone(page))
248 continue;
249
250 clear_pageblock_skip(page);
251 }
Vlastimil Babka023336412015-09-08 15:02:42 -0700252
253 reset_cached_positions(zone);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700254}
255
Mel Gorman62997022012-10-08 16:32:47 -0700256void reset_isolation_suitable(pg_data_t *pgdat)
257{
258 int zoneid;
259
260 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
261 struct zone *zone = &pgdat->node_zones[zoneid];
262 if (!populated_zone(zone))
263 continue;
264
265 /* Only flush if a full compaction finished recently */
266 if (zone->compact_blockskip_flush)
267 __reset_isolation_suitable(zone);
268 }
269}
270
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700271/*
272 * If no pages were isolated then mark this pageblock to be skipped in the
Mel Gorman62997022012-10-08 16:32:47 -0700273 * future. The information is later cleared by __reset_isolation_suitable().
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700274 */
Mel Gormanc89511a2012-10-08 16:32:45 -0700275static void update_pageblock_skip(struct compact_control *cc,
276 struct page *page, unsigned long nr_isolated,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700277 bool migrate_scanner)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700278{
Mel Gormanc89511a2012-10-08 16:32:45 -0700279 struct zone *zone = cc->zone;
David Rientjes35979ef2014-06-04 16:08:27 -0700280 unsigned long pfn;
Joonsoo Kim6815bf32013-12-18 17:08:52 -0800281
282 if (cc->ignore_skip_hint)
283 return;
284
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700285 if (!page)
286 return;
287
David Rientjes35979ef2014-06-04 16:08:27 -0700288 if (nr_isolated)
289 return;
290
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700291 set_pageblock_skip(page);
Mel Gormanc89511a2012-10-08 16:32:45 -0700292
David Rientjes35979ef2014-06-04 16:08:27 -0700293 pfn = page_to_pfn(page);
294
295 /* Update where async and sync compaction should restart */
296 if (migrate_scanner) {
David Rientjes35979ef2014-06-04 16:08:27 -0700297 if (pfn > zone->compact_cached_migrate_pfn[0])
298 zone->compact_cached_migrate_pfn[0] = pfn;
David Rientjese0b9dae2014-06-04 16:08:28 -0700299 if (cc->mode != MIGRATE_ASYNC &&
300 pfn > zone->compact_cached_migrate_pfn[1])
David Rientjes35979ef2014-06-04 16:08:27 -0700301 zone->compact_cached_migrate_pfn[1] = pfn;
302 } else {
David Rientjes35979ef2014-06-04 16:08:27 -0700303 if (pfn < zone->compact_cached_free_pfn)
304 zone->compact_cached_free_pfn = pfn;
Mel Gormanc89511a2012-10-08 16:32:45 -0700305 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700306}
307#else
308static inline bool isolation_suitable(struct compact_control *cc,
309 struct page *page)
310{
311 return true;
312}
313
Mel Gormanc89511a2012-10-08 16:32:45 -0700314static void update_pageblock_skip(struct compact_control *cc,
315 struct page *page, unsigned long nr_isolated,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700316 bool migrate_scanner)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700317{
318}
319#endif /* CONFIG_COMPACTION */
320
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700321/*
322 * Compaction requires the taking of some coarse locks that are potentially
323 * very heavily contended. For async compaction, back out if the lock cannot
324 * be taken immediately. For sync compaction, spin on the lock if needed.
325 *
326 * Returns true if the lock is held
327 * Returns false if the lock is not held and compaction should abort
328 */
329static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
330 struct compact_control *cc)
Mel Gorman2a1402a2012-10-08 16:32:33 -0700331{
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700332 if (cc->mode == MIGRATE_ASYNC) {
333 if (!spin_trylock_irqsave(lock, *flags)) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700334 cc->contended = true;
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700335 return false;
336 }
337 } else {
338 spin_lock_irqsave(lock, *flags);
339 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700340
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700341 return true;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700342}
343
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100344/*
Mel Gormanc67fe372012-08-21 16:16:17 -0700345 * Compaction requires the taking of some coarse locks that are potentially
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700346 * very heavily contended. The lock should be periodically unlocked to avoid
347 * having disabled IRQs for a long time, even when there is nobody waiting on
348 * the lock. It might also be that allowing the IRQs will result in
349 * need_resched() becoming true. If scheduling is needed, async compaction
350 * aborts. Sync compaction schedules.
351 * Either compaction type will also abort if a fatal signal is pending.
352 * In either case if the lock was locked, it is dropped and not regained.
Mel Gormanc67fe372012-08-21 16:16:17 -0700353 *
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700354 * Returns true if compaction should abort due to fatal signal pending, or
355 * async compaction due to need_resched()
356 * Returns false when compaction can continue (sync compaction might have
357 * scheduled)
Mel Gormanc67fe372012-08-21 16:16:17 -0700358 */
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700359static bool compact_unlock_should_abort(spinlock_t *lock,
360 unsigned long flags, bool *locked, struct compact_control *cc)
Mel Gormanc67fe372012-08-21 16:16:17 -0700361{
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700362 if (*locked) {
363 spin_unlock_irqrestore(lock, flags);
364 *locked = false;
365 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700366
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700367 if (fatal_signal_pending(current)) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700368 cc->contended = true;
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700369 return true;
370 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700371
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700372 if (need_resched()) {
David Rientjese0b9dae2014-06-04 16:08:28 -0700373 if (cc->mode == MIGRATE_ASYNC) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700374 cc->contended = true;
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700375 return true;
Mel Gormanc67fe372012-08-21 16:16:17 -0700376 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700377 cond_resched();
Mel Gormanc67fe372012-08-21 16:16:17 -0700378 }
379
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700380 return false;
Mel Gormanc67fe372012-08-21 16:16:17 -0700381}
382
Vlastimil Babkabe976572014-06-04 16:10:41 -0700383/*
384 * Aside from avoiding lock contention, compaction also periodically checks
385 * need_resched() and either schedules in sync compaction or aborts async
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700386 * compaction. This is similar to what compact_unlock_should_abort() does, but
Vlastimil Babkabe976572014-06-04 16:10:41 -0700387 * is used where no lock is concerned.
388 *
389 * Returns false when no scheduling was needed, or sync compaction scheduled.
390 * Returns true when async compaction should abort.
391 */
392static inline bool compact_should_abort(struct compact_control *cc)
393{
394 /* async compaction aborts if contended */
395 if (need_resched()) {
396 if (cc->mode == MIGRATE_ASYNC) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700397 cc->contended = true;
Vlastimil Babkabe976572014-06-04 16:10:41 -0700398 return true;
399 }
400
401 cond_resched();
402 }
403
404 return false;
405}
406
Mel Gormanc67fe372012-08-21 16:16:17 -0700407/*
Jerome Marchand9e4be472013-11-12 15:07:12 -0800408 * Isolate free pages onto a private freelist. If @strict is true, will abort
409 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
410 * (even though it may still end up isolating some pages).
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100411 */
Mel Gormanf40d1e42012-10-08 16:32:36 -0700412static unsigned long isolate_freepages_block(struct compact_control *cc,
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700413 unsigned long *start_pfn,
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100414 unsigned long end_pfn,
415 struct list_head *freelist,
416 bool strict)
Mel Gorman748446b2010-05-24 14:32:27 -0700417{
Mel Gormanb7aba692011-01-13 15:45:54 -0800418 int nr_scanned = 0, total_isolated = 0;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700419 struct page *cursor, *valid_page = NULL;
Xiubo Lib8b2d822014-10-09 15:28:21 -0700420 unsigned long flags = 0;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700421 bool locked = false;
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700422 unsigned long blockpfn = *start_pfn;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700423 unsigned int order;
Mel Gorman748446b2010-05-24 14:32:27 -0700424
Mel Gorman748446b2010-05-24 14:32:27 -0700425 cursor = pfn_to_page(blockpfn);
426
Mel Gormanf40d1e42012-10-08 16:32:36 -0700427 /* Isolate free pages. */
Mel Gorman748446b2010-05-24 14:32:27 -0700428 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
Joonsoo Kim66c64222016-07-26 15:23:40 -0700429 int isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700430 struct page *page = cursor;
431
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700432 /*
433 * Periodically drop the lock (if held) regardless of its
434 * contention, to give chance to IRQs. Abort if fatal signal
435 * pending or async compaction detects need_resched()
436 */
437 if (!(blockpfn % SWAP_CLUSTER_MAX)
438 && compact_unlock_should_abort(&cc->zone->lock, flags,
439 &locked, cc))
440 break;
441
Mel Gormanb7aba692011-01-13 15:45:54 -0800442 nr_scanned++;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700443 if (!pfn_valid_within(blockpfn))
Laura Abbott2af120b2014-03-10 15:49:44 -0700444 goto isolate_fail;
445
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700446 if (!valid_page)
447 valid_page = page;
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700448
449 /*
450 * For compound pages such as THP and hugetlbfs, we can save
451 * potentially a lot of iterations if we skip them at once.
452 * The check is racy, but we can consider only valid values
453 * and the only danger is skipping too much.
454 */
455 if (PageCompound(page)) {
456 unsigned int comp_order = compound_order(page);
457
458 if (likely(comp_order < MAX_ORDER)) {
459 blockpfn += (1UL << comp_order) - 1;
460 cursor += (1UL << comp_order) - 1;
461 }
462
463 goto isolate_fail;
464 }
465
Mel Gormanf40d1e42012-10-08 16:32:36 -0700466 if (!PageBuddy(page))
Laura Abbott2af120b2014-03-10 15:49:44 -0700467 goto isolate_fail;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700468
469 /*
Vlastimil Babka69b71892014-10-09 15:27:18 -0700470 * If we already hold the lock, we can skip some rechecking.
471 * Note that if we hold the lock now, checked_pageblock was
472 * already set in some previous iteration (or strict is true),
473 * so it is correct to skip the suitable migration target
474 * recheck as well.
Mel Gormanf40d1e42012-10-08 16:32:36 -0700475 */
Vlastimil Babka69b71892014-10-09 15:27:18 -0700476 if (!locked) {
477 /*
478 * The zone lock must be held to isolate freepages.
479 * Unfortunately this is a very coarse lock and can be
480 * heavily contended if there are parallel allocations
481 * or parallel compactions. For async compaction do not
482 * spin on the lock and we acquire the lock as late as
483 * possible.
484 */
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700485 locked = compact_trylock_irqsave(&cc->zone->lock,
486 &flags, cc);
Vlastimil Babka69b71892014-10-09 15:27:18 -0700487 if (!locked)
488 break;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700489
Vlastimil Babka69b71892014-10-09 15:27:18 -0700490 /* Recheck this is a buddy page under lock */
491 if (!PageBuddy(page))
492 goto isolate_fail;
493 }
Mel Gorman748446b2010-05-24 14:32:27 -0700494
Joonsoo Kim66c64222016-07-26 15:23:40 -0700495 /* Found a free page, will break it into order-0 pages */
496 order = page_order(page);
497 isolated = __isolate_free_page(page, order);
David Rientjesa4f04f22016-06-24 14:50:10 -0700498 if (!isolated)
499 break;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700500 set_page_private(page, order);
David Rientjesa4f04f22016-06-24 14:50:10 -0700501
Mel Gorman748446b2010-05-24 14:32:27 -0700502 total_isolated += isolated;
David Rientjesa4f04f22016-06-24 14:50:10 -0700503 cc->nr_freepages += isolated;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700504 list_add_tail(&page->lru, freelist);
505
David Rientjesa4f04f22016-06-24 14:50:10 -0700506 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
507 blockpfn += isolated;
508 break;
Mel Gorman748446b2010-05-24 14:32:27 -0700509 }
David Rientjesa4f04f22016-06-24 14:50:10 -0700510 /* Advance to the end of split page */
511 blockpfn += isolated - 1;
512 cursor += isolated - 1;
513 continue;
Laura Abbott2af120b2014-03-10 15:49:44 -0700514
515isolate_fail:
516 if (strict)
517 break;
518 else
519 continue;
520
Mel Gorman748446b2010-05-24 14:32:27 -0700521 }
522
David Rientjesa4f04f22016-06-24 14:50:10 -0700523 if (locked)
524 spin_unlock_irqrestore(&cc->zone->lock, flags);
525
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700526 /*
527 * There is a tiny chance that we have read bogus compound_order(),
528 * so be careful to not go outside of the pageblock.
529 */
530 if (unlikely(blockpfn > end_pfn))
531 blockpfn = end_pfn;
532
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800533 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
534 nr_scanned, total_isolated);
535
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700536 /* Record how far we have got within the block */
537 *start_pfn = blockpfn;
538
Mel Gormanf40d1e42012-10-08 16:32:36 -0700539 /*
540 * If strict isolation is requested by CMA then check that all the
541 * pages requested were isolated. If there were any failures, 0 is
542 * returned and CMA will fail.
543 */
Laura Abbott2af120b2014-03-10 15:49:44 -0700544 if (strict && blockpfn < end_pfn)
Mel Gormanf40d1e42012-10-08 16:32:36 -0700545 total_isolated = 0;
546
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700547 /* Update the pageblock-skip if the whole pageblock was scanned */
548 if (blockpfn == end_pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700549 update_pageblock_skip(cc, valid_page, total_isolated, false);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700550
David Rientjes7f354a52017-02-22 15:44:50 -0800551 cc->total_free_scanned += nr_scanned;
Mel Gorman397487d2012-10-19 12:00:10 +0100552 if (total_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800553 count_compact_events(COMPACTISOLATED, total_isolated);
Mel Gorman748446b2010-05-24 14:32:27 -0700554 return total_isolated;
555}
556
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100557/**
558 * isolate_freepages_range() - isolate free pages.
559 * @start_pfn: The first PFN to start isolating.
560 * @end_pfn: The one-past-last PFN.
561 *
562 * Non-free pages, invalid PFNs, or zone boundaries within the
563 * [start_pfn, end_pfn) range are considered errors, cause function to
564 * undo its actions and return zero.
565 *
566 * Otherwise, function returns one-past-the-last PFN of isolated page
567 * (which may be greater then end_pfn if end fell in a middle of
568 * a free page).
569 */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100570unsigned long
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700571isolate_freepages_range(struct compact_control *cc,
572 unsigned long start_pfn, unsigned long end_pfn)
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100573{
Joonsoo Kime1409c32016-03-15 14:57:48 -0700574 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100575 LIST_HEAD(freelist);
576
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700577 pfn = start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700578 block_start_pfn = pageblock_start_pfn(pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -0700579 if (block_start_pfn < cc->zone->zone_start_pfn)
580 block_start_pfn = cc->zone->zone_start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700581 block_end_pfn = pageblock_end_pfn(pfn);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100582
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700583 for (; pfn < end_pfn; pfn += isolated,
Joonsoo Kime1409c32016-03-15 14:57:48 -0700584 block_start_pfn = block_end_pfn,
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700585 block_end_pfn += pageblock_nr_pages) {
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700586 /* Protect pfn from changing by isolate_freepages_block */
587 unsigned long isolate_start_pfn = pfn;
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700588
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100589 block_end_pfn = min(block_end_pfn, end_pfn);
590
Joonsoo Kim58420012014-11-13 15:19:07 -0800591 /*
592 * pfn could pass the block_end_pfn if isolated freepage
593 * is more than pageblock order. In this case, we adjust
594 * scanning range to right one.
595 */
596 if (pfn >= block_end_pfn) {
Vlastimil Babka06b66402016-05-19 17:11:48 -0700597 block_start_pfn = pageblock_start_pfn(pfn);
598 block_end_pfn = pageblock_end_pfn(pfn);
Joonsoo Kim58420012014-11-13 15:19:07 -0800599 block_end_pfn = min(block_end_pfn, end_pfn);
600 }
601
Joonsoo Kime1409c32016-03-15 14:57:48 -0700602 if (!pageblock_pfn_to_page(block_start_pfn,
603 block_end_pfn, cc->zone))
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700604 break;
605
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700606 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
607 block_end_pfn, &freelist, true);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100608
609 /*
610 * In strict mode, isolate_freepages_block() returns 0 if
611 * there are any holes in the block (ie. invalid PFNs or
612 * non-free pages).
613 */
614 if (!isolated)
615 break;
616
617 /*
618 * If we managed to isolate pages, it is always (1 << n) *
619 * pageblock_nr_pages for some non-negative n. (Max order
620 * page may span two pageblocks).
621 */
622 }
623
Joonsoo Kim66c64222016-07-26 15:23:40 -0700624 /* __isolate_free_page() does not map the pages */
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100625 map_pages(&freelist);
626
627 if (pfn < end_pfn) {
628 /* Loop terminated early, cleanup. */
629 release_freepages(&freelist);
630 return 0;
631 }
632
633 /* We don't use freelists for anything. */
634 return pfn;
635}
636
Mel Gorman748446b2010-05-24 14:32:27 -0700637/* Similar to reclaim, but different enough that they don't share logic */
638static bool too_many_isolated(struct zone *zone)
639{
Minchan Kimbc693042010-09-09 16:38:00 -0700640 unsigned long active, inactive, isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700641
Mel Gorman599d0c92016-07-28 15:45:31 -0700642 inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
643 node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
644 active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
645 node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
646 isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
647 node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
Mel Gorman748446b2010-05-24 14:32:27 -0700648
Minchan Kimbc693042010-09-09 16:38:00 -0700649 return isolated > (inactive + active) / 2;
Mel Gorman748446b2010-05-24 14:32:27 -0700650}
651
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100652/**
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700653 * isolate_migratepages_block() - isolate all migrate-able pages within
654 * a single pageblock
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100655 * @cc: Compaction control structure.
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700656 * @low_pfn: The first PFN to isolate
657 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
658 * @isolate_mode: Isolation mode to be used.
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100659 *
660 * Isolate all pages that can be migrated from the range specified by
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700661 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
662 * Returns zero if there is a fatal signal pending, otherwise PFN of the
663 * first page that was not scanned (which may be both less, equal to or more
664 * than end_pfn).
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100665 *
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700666 * The pages are isolated on cc->migratepages list (not required to be empty),
667 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
668 * is neither read nor updated.
Mel Gorman748446b2010-05-24 14:32:27 -0700669 */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700670static unsigned long
671isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
672 unsigned long end_pfn, isolate_mode_t isolate_mode)
Mel Gorman748446b2010-05-24 14:32:27 -0700673{
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700674 struct zone *zone = cc->zone;
Mel Gormanb7aba692011-01-13 15:45:54 -0800675 unsigned long nr_scanned = 0, nr_isolated = 0;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700676 struct lruvec *lruvec;
Xiubo Lib8b2d822014-10-09 15:28:21 -0700677 unsigned long flags = 0;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700678 bool locked = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700679 struct page *page = NULL, *valid_page = NULL;
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800680 unsigned long start_pfn = low_pfn;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700681 bool skip_on_failure = false;
682 unsigned long next_skip_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700683
Mel Gorman748446b2010-05-24 14:32:27 -0700684 /*
685 * Ensure that there are not too many pages isolated from the LRU
686 * list by either parallel reclaimers or compaction. If there are,
687 * delay for some time until fewer pages are isolated
688 */
689 while (unlikely(too_many_isolated(zone))) {
Mel Gormanf9e35b32011-06-15 15:08:52 -0700690 /* async migration should just abort */
David Rientjese0b9dae2014-06-04 16:08:28 -0700691 if (cc->mode == MIGRATE_ASYNC)
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100692 return 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700693
Mel Gorman748446b2010-05-24 14:32:27 -0700694 congestion_wait(BLK_RW_ASYNC, HZ/10);
695
696 if (fatal_signal_pending(current))
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100697 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700698 }
699
Vlastimil Babkabe976572014-06-04 16:10:41 -0700700 if (compact_should_abort(cc))
701 return 0;
David Rientjesaeef4b82014-06-04 16:08:31 -0700702
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700703 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
704 skip_on_failure = true;
705 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
706 }
707
Mel Gorman748446b2010-05-24 14:32:27 -0700708 /* Time to isolate some pages for migration */
Mel Gorman748446b2010-05-24 14:32:27 -0700709 for (; low_pfn < end_pfn; low_pfn++) {
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700710
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700711 if (skip_on_failure && low_pfn >= next_skip_pfn) {
712 /*
713 * We have isolated all migration candidates in the
714 * previous order-aligned block, and did not skip it due
715 * to failure. We should migrate the pages now and
716 * hopefully succeed compaction.
717 */
718 if (nr_isolated)
719 break;
720
721 /*
722 * We failed to isolate in the previous order-aligned
723 * block. Set the new boundary to the end of the
724 * current block. Note we can't simply increase
725 * next_skip_pfn by 1 << order, as low_pfn might have
726 * been incremented by a higher number due to skipping
727 * a compound or a high-order buddy page in the
728 * previous loop iteration.
729 */
730 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
731 }
732
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700733 /*
734 * Periodically drop the lock (if held) regardless of its
735 * contention, to give chance to IRQs. Abort async compaction
736 * if contended.
737 */
738 if (!(low_pfn % SWAP_CLUSTER_MAX)
Mel Gormana52633d2016-07-28 15:45:28 -0700739 && compact_unlock_should_abort(zone_lru_lock(zone), flags,
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700740 &locked, cc))
741 break;
Mel Gormanc67fe372012-08-21 16:16:17 -0700742
Mel Gorman748446b2010-05-24 14:32:27 -0700743 if (!pfn_valid_within(low_pfn))
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700744 goto isolate_fail;
Mel Gormanb7aba692011-01-13 15:45:54 -0800745 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700746
Mel Gorman748446b2010-05-24 14:32:27 -0700747 page = pfn_to_page(low_pfn);
Mel Gormandc908602012-02-08 17:13:38 -0800748
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700749 if (!valid_page)
750 valid_page = page;
751
Mel Gorman6c144662014-01-23 15:53:38 -0800752 /*
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700753 * Skip if free. We read page order here without zone lock
754 * which is generally unsafe, but the race window is small and
755 * the worst thing that can happen is that we skip some
756 * potential isolation targets.
Mel Gorman6c144662014-01-23 15:53:38 -0800757 */
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700758 if (PageBuddy(page)) {
759 unsigned long freepage_order = page_order_unsafe(page);
760
761 /*
762 * Without lock, we cannot be sure that what we got is
763 * a valid page order. Consider only values in the
764 * valid order range to prevent low_pfn overflow.
765 */
766 if (freepage_order > 0 && freepage_order < MAX_ORDER)
767 low_pfn += (1UL << freepage_order) - 1;
Mel Gorman748446b2010-05-24 14:32:27 -0700768 continue;
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700769 }
Mel Gorman748446b2010-05-24 14:32:27 -0700770
Mel Gorman9927af742011-01-13 15:45:59 -0800771 /*
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700772 * Regardless of being on LRU, compound pages such as THP and
773 * hugetlbfs are not to be compacted. We can potentially save
774 * a lot of iterations if we skip them at once. The check is
775 * racy, but we can consider only valid values and the only
776 * danger is skipping too much.
Andrea Arcangelibc835012011-01-13 15:47:08 -0800777 */
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700778 if (PageCompound(page)) {
779 unsigned int comp_order = compound_order(page);
780
781 if (likely(comp_order < MAX_ORDER))
782 low_pfn += (1UL << comp_order) - 1;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700783
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700784 goto isolate_fail;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700785 }
786
Minchan Kimbda807d2016-07-26 15:23:05 -0700787 /*
788 * Check may be lockless but that's ok as we recheck later.
789 * It's possible to migrate LRU and non-lru movable pages.
790 * Skip any other type of page
791 */
792 if (!PageLRU(page)) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700793 /*
794 * __PageMovable can return false positive so we need
795 * to verify it under page_lock.
796 */
797 if (unlikely(__PageMovable(page)) &&
798 !PageIsolated(page)) {
799 if (locked) {
Mel Gormana52633d2016-07-28 15:45:28 -0700800 spin_unlock_irqrestore(zone_lru_lock(zone),
Minchan Kimbda807d2016-07-26 15:23:05 -0700801 flags);
802 locked = false;
803 }
804
805 if (isolate_movable_page(page, isolate_mode))
806 goto isolate_success;
807 }
808
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700809 goto isolate_fail;
Minchan Kimbda807d2016-07-26 15:23:05 -0700810 }
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700811
David Rientjes119d6d52014-04-03 14:48:00 -0700812 /*
813 * Migration will fail if an anonymous page is pinned in memory,
814 * so avoid taking lru_lock and isolating it unnecessarily in an
815 * admittedly racy check.
816 */
817 if (!page_mapping(page) &&
818 page_count(page) > page_mapcount(page))
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700819 goto isolate_fail;
David Rientjes119d6d52014-04-03 14:48:00 -0700820
Michal Hocko73e64c52016-12-14 15:04:07 -0800821 /*
822 * Only allow to migrate anonymous pages in GFP_NOFS context
823 * because those do not depend on fs locks.
824 */
825 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
826 goto isolate_fail;
827
Vlastimil Babka69b71892014-10-09 15:27:18 -0700828 /* If we already hold the lock, we can skip some rechecking */
829 if (!locked) {
Mel Gormana52633d2016-07-28 15:45:28 -0700830 locked = compact_trylock_irqsave(zone_lru_lock(zone),
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700831 &flags, cc);
Vlastimil Babka69b71892014-10-09 15:27:18 -0700832 if (!locked)
833 break;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700834
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700835 /* Recheck PageLRU and PageCompound under lock */
Vlastimil Babka69b71892014-10-09 15:27:18 -0700836 if (!PageLRU(page))
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700837 goto isolate_fail;
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700838
839 /*
840 * Page become compound since the non-locked check,
841 * and it's on LRU. It can only be a THP so the order
842 * is safe to read and it's 0 for tail pages.
843 */
844 if (unlikely(PageCompound(page))) {
845 low_pfn += (1UL << compound_order(page)) - 1;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700846 goto isolate_fail;
Vlastimil Babka69b71892014-10-09 15:27:18 -0700847 }
Andrea Arcangelibc835012011-01-13 15:47:08 -0800848 }
849
Mel Gorman599d0c92016-07-28 15:45:31 -0700850 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700851
Mel Gorman748446b2010-05-24 14:32:27 -0700852 /* Try isolate the page */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700853 if (__isolate_lru_page(page, isolate_mode) != 0)
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700854 goto isolate_fail;
Mel Gorman748446b2010-05-24 14:32:27 -0700855
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700856 VM_BUG_ON_PAGE(PageCompound(page), page);
Andrea Arcangelibc835012011-01-13 15:47:08 -0800857
Mel Gorman748446b2010-05-24 14:32:27 -0700858 /* Successfully isolated */
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700859 del_page_from_lru_list(page, lruvec, page_lru(page));
Ming Ling6afcf8e2016-12-12 16:42:26 -0800860 inc_node_page_state(page,
861 NR_ISOLATED_ANON + page_is_file_cache(page));
Joonsoo Kimb6c75012014-04-07 15:37:07 -0700862
863isolate_success:
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700864 list_add(&page->lru, &cc->migratepages);
Mel Gorman748446b2010-05-24 14:32:27 -0700865 cc->nr_migratepages++;
Mel Gormanb7aba692011-01-13 15:45:54 -0800866 nr_isolated++;
Mel Gorman748446b2010-05-24 14:32:27 -0700867
Vlastimil Babkaa34753d2016-05-19 17:11:51 -0700868 /*
869 * Record where we could have freed pages by migration and not
870 * yet flushed them to buddy allocator.
871 * - this is the lowest page that was isolated and likely be
872 * then freed by migration.
873 */
874 if (!cc->last_migrated_pfn)
875 cc->last_migrated_pfn = low_pfn;
876
Mel Gorman748446b2010-05-24 14:32:27 -0700877 /* Avoid isolating too much */
Hillf Danton31b83842012-01-10 15:07:59 -0800878 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
879 ++low_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -0700880 break;
Hillf Danton31b83842012-01-10 15:07:59 -0800881 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700882
883 continue;
884isolate_fail:
885 if (!skip_on_failure)
886 continue;
887
888 /*
889 * We have isolated some pages, but then failed. Release them
890 * instead of migrating, as we cannot form the cc->order buddy
891 * page anyway.
892 */
893 if (nr_isolated) {
894 if (locked) {
Mel Gormana52633d2016-07-28 15:45:28 -0700895 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700896 locked = false;
897 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700898 putback_movable_pages(&cc->migratepages);
899 cc->nr_migratepages = 0;
900 cc->last_migrated_pfn = 0;
901 nr_isolated = 0;
902 }
903
904 if (low_pfn < next_skip_pfn) {
905 low_pfn = next_skip_pfn - 1;
906 /*
907 * The check near the loop beginning would have updated
908 * next_skip_pfn too, but this is a bit simpler.
909 */
910 next_skip_pfn += 1UL << cc->order;
911 }
Mel Gorman748446b2010-05-24 14:32:27 -0700912 }
913
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700914 /*
915 * The PageBuddy() check could have potentially brought us outside
916 * the range to be scanned.
917 */
918 if (unlikely(low_pfn > end_pfn))
919 low_pfn = end_pfn;
920
Mel Gormanc67fe372012-08-21 16:16:17 -0700921 if (locked)
Mel Gormana52633d2016-07-28 15:45:28 -0700922 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
Mel Gorman748446b2010-05-24 14:32:27 -0700923
Vlastimil Babka50b5b092014-01-21 15:51:10 -0800924 /*
925 * Update the pageblock-skip information and cached scanner pfn,
926 * if the whole pageblock was scanned without isolating any page.
Vlastimil Babka50b5b092014-01-21 15:51:10 -0800927 */
David Rientjes35979ef2014-06-04 16:08:27 -0700928 if (low_pfn == end_pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700929 update_pageblock_skip(cc, valid_page, nr_isolated, true);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700930
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800931 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
932 nr_scanned, nr_isolated);
Mel Gormanb7aba692011-01-13 15:45:54 -0800933
David Rientjes7f354a52017-02-22 15:44:50 -0800934 cc->total_migrate_scanned += nr_scanned;
Mel Gorman397487d2012-10-19 12:00:10 +0100935 if (nr_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800936 count_compact_events(COMPACTISOLATED, nr_isolated);
Mel Gorman397487d2012-10-19 12:00:10 +0100937
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100938 return low_pfn;
939}
940
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700941/**
942 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
943 * @cc: Compaction control structure.
944 * @start_pfn: The first PFN to start isolating.
945 * @end_pfn: The one-past-last PFN.
946 *
947 * Returns zero if isolation fails fatally due to e.g. pending signal.
948 * Otherwise, function returns one-past-the-last PFN of isolated page
949 * (which may be greater than end_pfn if end fell in a middle of a THP page).
950 */
951unsigned long
952isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
953 unsigned long end_pfn)
954{
Joonsoo Kime1409c32016-03-15 14:57:48 -0700955 unsigned long pfn, block_start_pfn, block_end_pfn;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700956
957 /* Scan block by block. First and last block may be incomplete */
958 pfn = start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700959 block_start_pfn = pageblock_start_pfn(pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -0700960 if (block_start_pfn < cc->zone->zone_start_pfn)
961 block_start_pfn = cc->zone->zone_start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700962 block_end_pfn = pageblock_end_pfn(pfn);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700963
964 for (; pfn < end_pfn; pfn = block_end_pfn,
Joonsoo Kime1409c32016-03-15 14:57:48 -0700965 block_start_pfn = block_end_pfn,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700966 block_end_pfn += pageblock_nr_pages) {
967
968 block_end_pfn = min(block_end_pfn, end_pfn);
969
Joonsoo Kime1409c32016-03-15 14:57:48 -0700970 if (!pageblock_pfn_to_page(block_start_pfn,
971 block_end_pfn, cc->zone))
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700972 continue;
973
974 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
975 ISOLATE_UNEVICTABLE);
976
Hugh Dickins14af4a52016-05-05 16:22:15 -0700977 if (!pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700978 break;
Joonsoo Kim6ea41c02014-10-29 14:50:20 -0700979
980 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
981 break;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700982 }
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700983
984 return pfn;
985}
986
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100987#endif /* CONFIG_COMPACTION || CONFIG_CMA */
988#ifdef CONFIG_COMPACTION
Andrew Morton018e9a42015-04-15 16:15:20 -0700989
990/* Returns true if the page is within a block suitable for migration to */
Vlastimil Babka9f7e3382016-10-07 17:00:37 -0700991static bool suitable_migration_target(struct compact_control *cc,
992 struct page *page)
Andrew Morton018e9a42015-04-15 16:15:20 -0700993{
Vlastimil Babka9f7e3382016-10-07 17:00:37 -0700994 if (cc->ignore_block_suitable)
995 return true;
996
Andrew Morton018e9a42015-04-15 16:15:20 -0700997 /* If the page is a large free page, then disallow migration */
998 if (PageBuddy(page)) {
999 /*
1000 * We are checking page_order without zone->lock taken. But
1001 * the only small danger is that we skip a potentially suitable
1002 * pageblock, so it's not worth to check order for valid range.
1003 */
1004 if (page_order_unsafe(page) >= pageblock_order)
1005 return false;
1006 }
1007
1008 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1009 if (migrate_async_suitable(get_pageblock_migratetype(page)))
1010 return true;
1011
1012 /* Otherwise skip the block */
1013 return false;
1014}
1015
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001016/*
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001017 * Test whether the free scanner has reached the same or lower pageblock than
1018 * the migration scanner, and compaction should thus terminate.
1019 */
1020static inline bool compact_scanners_met(struct compact_control *cc)
1021{
1022 return (cc->free_pfn >> pageblock_order)
1023 <= (cc->migrate_pfn >> pageblock_order);
1024}
1025
1026/*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001027 * Based on information in the current compact_control, find blocks
1028 * suitable for isolating free pages from and then isolate them.
1029 */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001030static void isolate_freepages(struct compact_control *cc)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001031{
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001032 struct zone *zone = cc->zone;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001033 struct page *page;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001034 unsigned long block_start_pfn; /* start of current pageblock */
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001035 unsigned long isolate_start_pfn; /* exact pfn we start at */
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001036 unsigned long block_end_pfn; /* end of current pageblock */
1037 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001038 struct list_head *freelist = &cc->freepages;
1039
1040 /*
1041 * Initialise the free scanner. The starting point is where we last
Vlastimil Babka49e068f2014-05-06 12:50:03 -07001042 * successfully isolated from, zone-cached value, or the end of the
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001043 * zone when isolating for the first time. For looping we also need
1044 * this pfn aligned down to the pageblock boundary, because we do
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001045 * block_start_pfn -= pageblock_nr_pages in the for loop.
1046 * For ending point, take care when isolating in last pageblock of a
1047 * a zone which ends in the middle of a pageblock.
Vlastimil Babka49e068f2014-05-06 12:50:03 -07001048 * The low boundary is the end of the pageblock the migration scanner
1049 * is using.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001050 */
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001051 isolate_start_pfn = cc->free_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -07001052 block_start_pfn = pageblock_start_pfn(cc->free_pfn);
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001053 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1054 zone_end_pfn(zone));
Vlastimil Babka06b66402016-05-19 17:11:48 -07001055 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001056
1057 /*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001058 * Isolate free pages until enough are available to migrate the
1059 * pages on cc->migratepages. We stop searching if the migrate
1060 * and free page scanners meet or enough free pages are isolated.
1061 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001062 for (; block_start_pfn >= low_pfn;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001063 block_end_pfn = block_start_pfn,
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001064 block_start_pfn -= pageblock_nr_pages,
1065 isolate_start_pfn = block_start_pfn) {
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001066 /*
1067 * This can iterate a massively long zone without finding any
1068 * suitable migration targets, so periodically check if we need
Vlastimil Babkabe976572014-06-04 16:10:41 -07001069 * to schedule, or even abort async compaction.
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001070 */
Vlastimil Babkabe976572014-06-04 16:10:41 -07001071 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1072 && compact_should_abort(cc))
1073 break;
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001074
Vlastimil Babka7d49d882014-10-09 15:27:11 -07001075 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1076 zone);
1077 if (!page)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001078 continue;
1079
1080 /* Check the block is suitable for migration */
Vlastimil Babka9f7e3382016-10-07 17:00:37 -07001081 if (!suitable_migration_target(cc, page))
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001082 continue;
Linus Torvalds68e3e922012-06-03 20:05:57 -07001083
Mel Gormanbb13ffe2012-10-08 16:32:41 -07001084 /* If isolation recently failed, do not retry */
1085 if (!isolation_suitable(cc, page))
1086 continue;
1087
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001088 /* Found a block suitable for isolating free pages from. */
David Rientjesa46cbf32016-07-14 12:06:50 -07001089 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1090 freelist, false);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001091
1092 /*
David Rientjesa46cbf32016-07-14 12:06:50 -07001093 * If we isolated enough freepages, or aborted due to lock
1094 * contention, terminate.
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001095 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001096 if ((cc->nr_freepages >= cc->nr_migratepages)
1097 || cc->contended) {
David Rientjesa46cbf32016-07-14 12:06:50 -07001098 if (isolate_start_pfn >= block_end_pfn) {
1099 /*
1100 * Restart at previous pageblock if more
1101 * freepages can be isolated next time.
1102 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001103 isolate_start_pfn =
1104 block_start_pfn - pageblock_nr_pages;
David Rientjesa46cbf32016-07-14 12:06:50 -07001105 }
Vlastimil Babkabe976572014-06-04 16:10:41 -07001106 break;
David Rientjesa46cbf32016-07-14 12:06:50 -07001107 } else if (isolate_start_pfn < block_end_pfn) {
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001108 /*
David Rientjesa46cbf32016-07-14 12:06:50 -07001109 * If isolation failed early, do not continue
1110 * needlessly.
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001111 */
David Rientjesa46cbf32016-07-14 12:06:50 -07001112 break;
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001113 }
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +01001114 }
1115
Joonsoo Kim66c64222016-07-26 15:23:40 -07001116 /* __isolate_free_page() does not map the pages */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001117 map_pages(freelist);
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +01001118
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001119 /*
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001120 * Record where the free scanner will restart next time. Either we
1121 * broke from the loop and set isolate_start_pfn based on the last
1122 * call to isolate_freepages_block(), or we met the migration scanner
1123 * and the loop terminated due to isolate_start_pfn < low_pfn
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001124 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001125 cc->free_pfn = isolate_start_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -07001126}
1127
1128/*
1129 * This is a migrate-callback that "allocates" freepages by taking pages
1130 * from the isolated freelists in the block we are migrating to.
1131 */
1132static struct page *compaction_alloc(struct page *migratepage,
1133 unsigned long data,
1134 int **result)
1135{
1136 struct compact_control *cc = (struct compact_control *)data;
1137 struct page *freepage;
1138
Vlastimil Babkabe976572014-06-04 16:10:41 -07001139 /*
1140 * Isolate free pages if necessary, and if we are not aborting due to
1141 * contention.
1142 */
Mel Gorman748446b2010-05-24 14:32:27 -07001143 if (list_empty(&cc->freepages)) {
Vlastimil Babkabe976572014-06-04 16:10:41 -07001144 if (!cc->contended)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001145 isolate_freepages(cc);
Mel Gorman748446b2010-05-24 14:32:27 -07001146
1147 if (list_empty(&cc->freepages))
1148 return NULL;
1149 }
1150
1151 freepage = list_entry(cc->freepages.next, struct page, lru);
1152 list_del(&freepage->lru);
1153 cc->nr_freepages--;
1154
1155 return freepage;
1156}
1157
1158/*
David Rientjesd53aea32014-06-04 16:08:26 -07001159 * This is a migrate-callback that "frees" freepages back to the isolated
1160 * freelist. All pages on the freelist are from the same zone, so there is no
1161 * special handling needed for NUMA.
1162 */
1163static void compaction_free(struct page *page, unsigned long data)
1164{
1165 struct compact_control *cc = (struct compact_control *)data;
1166
1167 list_add(&page->lru, &cc->freepages);
1168 cc->nr_freepages++;
1169}
1170
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001171/* possible outcome of isolate_migratepages */
1172typedef enum {
1173 ISOLATE_ABORT, /* Abort compaction now */
1174 ISOLATE_NONE, /* No pages isolated, continue scanning */
1175 ISOLATE_SUCCESS, /* Pages isolated, migrate */
1176} isolate_migrate_t;
1177
1178/*
Eric B Munson5bbe3542015-04-15 16:13:20 -07001179 * Allow userspace to control policy on scanning the unevictable LRU for
1180 * compactable pages.
1181 */
1182int sysctl_compact_unevictable_allowed __read_mostly = 1;
1183
1184/*
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001185 * Isolate all pages that can be migrated from the first suitable block,
1186 * starting at the block pointed to by the migrate scanner pfn within
1187 * compact_control.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001188 */
1189static isolate_migrate_t isolate_migratepages(struct zone *zone,
1190 struct compact_control *cc)
1191{
Joonsoo Kime1409c32016-03-15 14:57:48 -07001192 unsigned long block_start_pfn;
1193 unsigned long block_end_pfn;
1194 unsigned long low_pfn;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001195 struct page *page;
1196 const isolate_mode_t isolate_mode =
Eric B Munson5bbe3542015-04-15 16:13:20 -07001197 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
Hugh Dickins1d2047f2016-07-28 15:48:41 -07001198 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001199
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001200 /*
1201 * Start at where we last stopped, or beginning of the zone as
1202 * initialized by compact_zone()
1203 */
1204 low_pfn = cc->migrate_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -07001205 block_start_pfn = pageblock_start_pfn(low_pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -07001206 if (block_start_pfn < zone->zone_start_pfn)
1207 block_start_pfn = zone->zone_start_pfn;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001208
1209 /* Only scan within a pageblock boundary */
Vlastimil Babka06b66402016-05-19 17:11:48 -07001210 block_end_pfn = pageblock_end_pfn(low_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001211
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001212 /*
1213 * Iterate over whole pageblocks until we find the first suitable.
1214 * Do not cross the free scanner.
1215 */
Joonsoo Kime1409c32016-03-15 14:57:48 -07001216 for (; block_end_pfn <= cc->free_pfn;
1217 low_pfn = block_end_pfn,
1218 block_start_pfn = block_end_pfn,
1219 block_end_pfn += pageblock_nr_pages) {
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001220
1221 /*
1222 * This can potentially iterate a massively long zone with
1223 * many pageblocks unsuitable, so periodically check if we
1224 * need to schedule, or even abort async compaction.
1225 */
1226 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1227 && compact_should_abort(cc))
1228 break;
1229
Joonsoo Kime1409c32016-03-15 14:57:48 -07001230 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1231 zone);
Vlastimil Babka7d49d882014-10-09 15:27:11 -07001232 if (!page)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001233 continue;
1234
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001235 /* If isolation recently failed, do not retry */
1236 if (!isolation_suitable(cc, page))
1237 continue;
1238
1239 /*
1240 * For async compaction, also only scan in MOVABLE blocks.
1241 * Async compaction is optimistic to see if the minimum amount
1242 * of work satisfies the allocation.
1243 */
1244 if (cc->mode == MIGRATE_ASYNC &&
1245 !migrate_async_suitable(get_pageblock_migratetype(page)))
1246 continue;
1247
1248 /* Perform the isolation */
Joonsoo Kime1409c32016-03-15 14:57:48 -07001249 low_pfn = isolate_migratepages_block(cc, low_pfn,
1250 block_end_pfn, isolate_mode);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001251
Ming Ling6afcf8e2016-12-12 16:42:26 -08001252 if (!low_pfn || cc->contended)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001253 return ISOLATE_ABORT;
1254
1255 /*
1256 * Either we isolated something and proceed with migration. Or
1257 * we failed and compact_zone should decide if we should
1258 * continue or not.
1259 */
1260 break;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001261 }
1262
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001263 /* Record where migration scanner will be restarted. */
1264 cc->migrate_pfn = low_pfn;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001265
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001266 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001267}
1268
Yaowei Bai21c527a2015-11-05 18:47:20 -08001269/*
1270 * order == -1 is expected when compacting via
1271 * /proc/sys/vm/compact_memory
1272 */
1273static inline bool is_via_compact_memory(int order)
1274{
1275 return order == -1;
1276}
1277
Michal Hockoea7ab982016-05-20 16:56:38 -07001278static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc,
David Rientjes6d7ce552014-10-09 15:27:27 -07001279 const int migratetype)
Mel Gorman748446b2010-05-24 14:32:27 -07001280{
Mel Gorman8fb74b92013-01-11 14:32:16 -08001281 unsigned int order;
Andrea Arcangeli5a03b052011-01-13 15:47:11 -08001282 unsigned long watermark;
Mel Gorman56de7262010-05-24 14:32:30 -07001283
Vlastimil Babkabe976572014-06-04 16:10:41 -07001284 if (cc->contended || fatal_signal_pending(current))
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001285 return COMPACT_CONTENDED;
Mel Gorman748446b2010-05-24 14:32:27 -07001286
Mel Gorman753341a2012-10-08 16:32:40 -07001287 /* Compaction run completes if the migrate and free scanner meet */
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001288 if (compact_scanners_met(cc)) {
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -08001289 /* Let the next compaction start anew. */
Vlastimil Babka023336412015-09-08 15:02:42 -07001290 reset_cached_positions(zone);
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -08001291
Mel Gorman62997022012-10-08 16:32:47 -07001292 /*
1293 * Mark that the PG_migrate_skip information should be cleared
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001294 * by kswapd when it goes to sleep. kcompactd does not set the
Mel Gorman62997022012-10-08 16:32:47 -07001295 * flag itself as the decision to be clear should be directly
1296 * based on an allocation request.
1297 */
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001298 if (cc->direct_compaction)
Mel Gorman62997022012-10-08 16:32:47 -07001299 zone->compact_blockskip_flush = true;
1300
Michal Hockoc8f7de02016-05-20 16:56:47 -07001301 if (cc->whole_zone)
1302 return COMPACT_COMPLETE;
1303 else
1304 return COMPACT_PARTIAL_SKIPPED;
Mel Gormanbb13ffe2012-10-08 16:32:41 -07001305 }
Mel Gorman748446b2010-05-24 14:32:27 -07001306
Yaowei Bai21c527a2015-11-05 18:47:20 -08001307 if (is_via_compact_memory(cc->order))
Mel Gorman56de7262010-05-24 14:32:30 -07001308 return COMPACT_CONTINUE;
1309
Michal Hocko3957c772011-06-15 15:08:25 -07001310 /* Compaction run is not finished if the watermark is not met */
Vlastimil Babkaf2b82282016-10-07 16:57:50 -07001311 watermark = zone->watermark[cc->alloc_flags & ALLOC_WMARK_MASK];
Michal Hocko3957c772011-06-15 15:08:25 -07001312
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001313 if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
1314 cc->alloc_flags))
Michal Hocko3957c772011-06-15 15:08:25 -07001315 return COMPACT_CONTINUE;
1316
Mel Gorman56de7262010-05-24 14:32:30 -07001317 /* Direct compactor: Is a suitable page free? */
Mel Gorman8fb74b92013-01-11 14:32:16 -08001318 for (order = cc->order; order < MAX_ORDER; order++) {
1319 struct free_area *area = &zone->free_area[order];
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001320 bool can_steal;
Mel Gorman56de7262010-05-24 14:32:30 -07001321
Mel Gorman8fb74b92013-01-11 14:32:16 -08001322 /* Job done if page is free of the right migratetype */
David Rientjes6d7ce552014-10-09 15:27:27 -07001323 if (!list_empty(&area->free_list[migratetype]))
Vlastimil Babkacf378312016-10-07 16:57:41 -07001324 return COMPACT_SUCCESS;
Mel Gorman8fb74b92013-01-11 14:32:16 -08001325
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001326#ifdef CONFIG_CMA
1327 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1328 if (migratetype == MIGRATE_MOVABLE &&
1329 !list_empty(&area->free_list[MIGRATE_CMA]))
Vlastimil Babkacf378312016-10-07 16:57:41 -07001330 return COMPACT_SUCCESS;
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001331#endif
1332 /*
1333 * Job done if allocation would steal freepages from
1334 * other migratetype buddy lists.
1335 */
1336 if (find_suitable_fallback(area, order, migratetype,
1337 true, &can_steal) != -1)
Vlastimil Babkacf378312016-10-07 16:57:41 -07001338 return COMPACT_SUCCESS;
Mel Gorman56de7262010-05-24 14:32:30 -07001339 }
1340
Joonsoo Kim837d0262015-02-11 15:27:06 -08001341 return COMPACT_NO_SUITABLE_PAGE;
1342}
1343
Michal Hockoea7ab982016-05-20 16:56:38 -07001344static enum compact_result compact_finished(struct zone *zone,
1345 struct compact_control *cc,
1346 const int migratetype)
Joonsoo Kim837d0262015-02-11 15:27:06 -08001347{
1348 int ret;
1349
1350 ret = __compact_finished(zone, cc, migratetype);
1351 trace_mm_compaction_finished(zone, cc->order, ret);
1352 if (ret == COMPACT_NO_SUITABLE_PAGE)
1353 ret = COMPACT_CONTINUE;
1354
1355 return ret;
Mel Gorman748446b2010-05-24 14:32:27 -07001356}
1357
Mel Gorman3e7d3442011-01-13 15:45:56 -08001358/*
1359 * compaction_suitable: Is this suitable to run compaction on this zone now?
1360 * Returns
1361 * COMPACT_SKIPPED - If there are too few free pages for compaction
Vlastimil Babkacf378312016-10-07 16:57:41 -07001362 * COMPACT_SUCCESS - If the allocation would succeed without compaction
Mel Gorman3e7d3442011-01-13 15:45:56 -08001363 * COMPACT_CONTINUE - If compaction should run now
1364 */
Michal Hockoea7ab982016-05-20 16:56:38 -07001365static enum compact_result __compaction_suitable(struct zone *zone, int order,
Mel Gormanc6038442016-05-19 17:13:38 -07001366 unsigned int alloc_flags,
Michal Hocko86a294a2016-05-20 16:57:12 -07001367 int classzone_idx,
1368 unsigned long wmark_target)
Mel Gorman3e7d3442011-01-13 15:45:56 -08001369{
Mel Gorman3e7d3442011-01-13 15:45:56 -08001370 unsigned long watermark;
1371
Yaowei Bai21c527a2015-11-05 18:47:20 -08001372 if (is_via_compact_memory(order))
Michal Hocko3957c772011-06-15 15:08:25 -07001373 return COMPACT_CONTINUE;
1374
Vlastimil Babkaf2b82282016-10-07 16:57:50 -07001375 watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001376 /*
1377 * If watermarks for high-order allocation are already met, there
1378 * should be no need for compaction at all.
1379 */
1380 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1381 alloc_flags))
Vlastimil Babkacf378312016-10-07 16:57:41 -07001382 return COMPACT_SUCCESS;
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001383
Michal Hocko3957c772011-06-15 15:08:25 -07001384 /*
Vlastimil Babka9861a622016-10-07 16:57:53 -07001385 * Watermarks for order-0 must be met for compaction to be able to
Vlastimil Babka984fdba2016-10-07 16:57:57 -07001386 * isolate free pages for migration targets. This means that the
1387 * watermark and alloc_flags have to match, or be more pessimistic than
1388 * the check in __isolate_free_page(). We don't use the direct
1389 * compactor's alloc_flags, as they are not relevant for freepage
1390 * isolation. We however do use the direct compactor's classzone_idx to
1391 * skip over zones where lowmem reserves would prevent allocation even
1392 * if compaction succeeds.
Vlastimil Babka8348faf2016-10-07 16:58:00 -07001393 * For costly orders, we require low watermark instead of min for
1394 * compaction to proceed to increase its chances.
Vlastimil Babka984fdba2016-10-07 16:57:57 -07001395 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
1396 * suitable migration targets
Mel Gorman3e7d3442011-01-13 15:45:56 -08001397 */
Vlastimil Babka8348faf2016-10-07 16:58:00 -07001398 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1399 low_wmark_pages(zone) : min_wmark_pages(zone);
1400 watermark += compact_gap(order);
Michal Hocko86a294a2016-05-20 16:57:12 -07001401 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
Vlastimil Babka984fdba2016-10-07 16:57:57 -07001402 ALLOC_CMA, wmark_target))
Mel Gorman3e7d3442011-01-13 15:45:56 -08001403 return COMPACT_SKIPPED;
1404
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07001405 return COMPACT_CONTINUE;
1406}
1407
1408enum compact_result compaction_suitable(struct zone *zone, int order,
1409 unsigned int alloc_flags,
1410 int classzone_idx)
1411{
1412 enum compact_result ret;
1413 int fragindex;
1414
1415 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1416 zone_page_state(zone, NR_FREE_PAGES));
Mel Gorman3e7d3442011-01-13 15:45:56 -08001417 /*
1418 * fragmentation index determines if allocation failures are due to
1419 * low memory or external fragmentation
1420 *
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001421 * index of -1000 would imply allocations might succeed depending on
1422 * watermarks, but we already failed the high-order watermark check
Mel Gorman3e7d3442011-01-13 15:45:56 -08001423 * index towards 0 implies failure is due to lack of memory
1424 * index towards 1000 implies failure is due to fragmentation
1425 *
Vlastimil Babka20311422016-10-07 17:00:46 -07001426 * Only compact if a failure would be due to fragmentation. Also
1427 * ignore fragindex for non-costly orders where the alternative to
1428 * a successful reclaim/compaction is OOM. Fragindex and the
1429 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
1430 * excessive compaction for costly orders, but it should not be at the
1431 * expense of system stability.
Mel Gorman3e7d3442011-01-13 15:45:56 -08001432 */
Vlastimil Babka20311422016-10-07 17:00:46 -07001433 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07001434 fragindex = fragmentation_index(zone, order);
1435 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1436 ret = COMPACT_NOT_SUITABLE_ZONE;
1437 }
Mel Gorman3e7d3442011-01-13 15:45:56 -08001438
Joonsoo Kim837d0262015-02-11 15:27:06 -08001439 trace_mm_compaction_suitable(zone, order, ret);
1440 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1441 ret = COMPACT_SKIPPED;
1442
1443 return ret;
1444}
1445
Michal Hocko86a294a2016-05-20 16:57:12 -07001446bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1447 int alloc_flags)
1448{
1449 struct zone *zone;
1450 struct zoneref *z;
1451
1452 /*
1453 * Make sure at least one zone would pass __compaction_suitable if we continue
1454 * retrying the reclaim.
1455 */
1456 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1457 ac->nodemask) {
1458 unsigned long available;
1459 enum compact_result compact_result;
1460
1461 /*
1462 * Do not consider all the reclaimable memory because we do not
1463 * want to trash just for a single high order allocation which
1464 * is even not guaranteed to appear even if __compaction_suitable
1465 * is happy about the watermark check.
1466 */
Mel Gorman5a1c84b2016-07-28 15:47:31 -07001467 available = zone_reclaimable_pages(zone) / order;
Michal Hocko86a294a2016-05-20 16:57:12 -07001468 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1469 compact_result = __compaction_suitable(zone, order, alloc_flags,
1470 ac_classzone_idx(ac), available);
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07001471 if (compact_result != COMPACT_SKIPPED)
Michal Hocko86a294a2016-05-20 16:57:12 -07001472 return true;
1473 }
1474
1475 return false;
1476}
1477
Michal Hockoea7ab982016-05-20 16:56:38 -07001478static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -07001479{
Michal Hockoea7ab982016-05-20 16:56:38 -07001480 enum compact_result ret;
Mel Gormanc89511a2012-10-08 16:32:45 -07001481 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -08001482 unsigned long end_pfn = zone_end_pfn(zone);
David Rientjes6d7ce552014-10-09 15:27:27 -07001483 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
David Rientjese0b9dae2014-06-04 16:08:28 -07001484 const bool sync = cc->mode != MIGRATE_ASYNC;
Mel Gorman748446b2010-05-24 14:32:27 -07001485
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001486 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1487 cc->classzone_idx);
Michal Hockoc46649d2016-05-20 16:56:41 -07001488 /* Compaction is likely to fail */
Vlastimil Babkacf378312016-10-07 16:57:41 -07001489 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
Mel Gorman3e7d3442011-01-13 15:45:56 -08001490 return ret;
Michal Hockoc46649d2016-05-20 16:56:41 -07001491
1492 /* huh, compaction_suitable is returning something unexpected */
1493 VM_BUG_ON(ret != COMPACT_CONTINUE);
Mel Gorman3e7d3442011-01-13 15:45:56 -08001494
Mel Gormanc89511a2012-10-08 16:32:45 -07001495 /*
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001496 * Clear pageblock skip if there were failures recently and compaction
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001497 * is about to be retried after being deferred.
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001498 */
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001499 if (compaction_restarting(zone, cc->order))
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001500 __reset_isolation_suitable(zone);
1501
1502 /*
Mel Gormanc89511a2012-10-08 16:32:45 -07001503 * Setup to move all movable pages to the end of the zone. Used cached
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001504 * information on where the scanners should start (unless we explicitly
1505 * want to compact the whole zone), but check that it is initialised
1506 * by ensuring the values are within zone boundaries.
Mel Gormanc89511a2012-10-08 16:32:45 -07001507 */
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001508 if (cc->whole_zone) {
Mel Gormanc89511a2012-10-08 16:32:45 -07001509 cc->migrate_pfn = start_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001510 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1511 } else {
1512 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1513 cc->free_pfn = zone->compact_cached_free_pfn;
1514 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1515 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1516 zone->compact_cached_free_pfn = cc->free_pfn;
1517 }
1518 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1519 cc->migrate_pfn = start_pfn;
1520 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1521 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1522 }
Michal Hockoc8f7de02016-05-20 16:56:47 -07001523
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001524 if (cc->migrate_pfn == start_pfn)
1525 cc->whole_zone = true;
1526 }
Michal Hockoc8f7de02016-05-20 16:56:47 -07001527
Joonsoo Kim1a167182015-09-08 15:03:59 -07001528 cc->last_migrated_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -07001529
Joonsoo Kim16c4a092015-02-11 15:27:01 -08001530 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1531 cc->free_pfn, end_pfn, sync);
Mel Gorman0eb927c2014-01-21 15:51:05 -08001532
Mel Gorman748446b2010-05-24 14:32:27 -07001533 migrate_prep_local();
1534
David Rientjes6d7ce552014-10-09 15:27:27 -07001535 while ((ret = compact_finished(zone, cc, migratetype)) ==
1536 COMPACT_CONTINUE) {
Minchan Kim9d502c12011-03-22 16:30:39 -07001537 int err;
Mel Gorman748446b2010-05-24 14:32:27 -07001538
Mel Gormanf9e35b32011-06-15 15:08:52 -07001539 switch (isolate_migratepages(zone, cc)) {
1540 case ISOLATE_ABORT:
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001541 ret = COMPACT_CONTENDED;
Rafael Aquini5733c7d2012-12-11 16:02:47 -08001542 putback_movable_pages(&cc->migratepages);
Shaohua Lie64c5232012-10-08 16:32:27 -07001543 cc->nr_migratepages = 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001544 goto out;
1545 case ISOLATE_NONE:
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001546 /*
1547 * We haven't isolated and migrated anything, but
1548 * there might still be unflushed migrations from
1549 * previous cc->order aligned block.
1550 */
1551 goto check_drain;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001552 case ISOLATE_SUCCESS:
1553 ;
1554 }
Mel Gorman748446b2010-05-24 14:32:27 -07001555
David Rientjesd53aea32014-06-04 16:08:26 -07001556 err = migrate_pages(&cc->migratepages, compaction_alloc,
David Rientjese0b9dae2014-06-04 16:08:28 -07001557 compaction_free, (unsigned long)cc, cc->mode,
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001558 MR_COMPACTION);
Mel Gorman748446b2010-05-24 14:32:27 -07001559
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001560 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1561 &cc->migratepages);
Mel Gorman748446b2010-05-24 14:32:27 -07001562
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001563 /* All pages were either migrated or will be released */
1564 cc->nr_migratepages = 0;
Minchan Kim9d502c12011-03-22 16:30:39 -07001565 if (err) {
Rafael Aquini5733c7d2012-12-11 16:02:47 -08001566 putback_movable_pages(&cc->migratepages);
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001567 /*
1568 * migrate_pages() may return -ENOMEM when scanners meet
1569 * and we want compact_finished() to detect it
1570 */
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001571 if (err == -ENOMEM && !compact_scanners_met(cc)) {
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001572 ret = COMPACT_CONTENDED;
David Rientjes4bf2bba2012-07-11 14:02:13 -07001573 goto out;
1574 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001575 /*
1576 * We failed to migrate at least one page in the current
1577 * order-aligned block, so skip the rest of it.
1578 */
1579 if (cc->direct_compaction &&
1580 (cc->mode == MIGRATE_ASYNC)) {
1581 cc->migrate_pfn = block_end_pfn(
1582 cc->migrate_pfn - 1, cc->order);
1583 /* Draining pcplists is useless in this case */
1584 cc->last_migrated_pfn = 0;
1585
1586 }
Mel Gorman748446b2010-05-24 14:32:27 -07001587 }
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001588
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001589check_drain:
1590 /*
1591 * Has the migration scanner moved away from the previous
1592 * cc->order aligned block where we migrated from? If yes,
1593 * flush the pages that were freed, so that they can merge and
1594 * compact_finished() can detect immediately if allocation
1595 * would succeed.
1596 */
Joonsoo Kim1a167182015-09-08 15:03:59 -07001597 if (cc->order > 0 && cc->last_migrated_pfn) {
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001598 int cpu;
1599 unsigned long current_block_start =
Vlastimil Babka06b66402016-05-19 17:11:48 -07001600 block_start_pfn(cc->migrate_pfn, cc->order);
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001601
Joonsoo Kim1a167182015-09-08 15:03:59 -07001602 if (cc->last_migrated_pfn < current_block_start) {
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001603 cpu = get_cpu();
1604 lru_add_drain_cpu(cpu);
1605 drain_local_pages(zone);
1606 put_cpu();
1607 /* No more flushing until we migrate again */
Joonsoo Kim1a167182015-09-08 15:03:59 -07001608 cc->last_migrated_pfn = 0;
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001609 }
1610 }
1611
Mel Gorman748446b2010-05-24 14:32:27 -07001612 }
1613
Mel Gormanf9e35b32011-06-15 15:08:52 -07001614out:
Vlastimil Babka6bace092014-12-10 15:43:31 -08001615 /*
1616 * Release free pages and update where the free scanner should restart,
1617 * so we don't leave any returned pages behind in the next attempt.
1618 */
1619 if (cc->nr_freepages > 0) {
1620 unsigned long free_pfn = release_freepages(&cc->freepages);
1621
1622 cc->nr_freepages = 0;
1623 VM_BUG_ON(free_pfn == 0);
1624 /* The cached pfn is always the first in a pageblock */
Vlastimil Babka06b66402016-05-19 17:11:48 -07001625 free_pfn = pageblock_start_pfn(free_pfn);
Vlastimil Babka6bace092014-12-10 15:43:31 -08001626 /*
1627 * Only go back, not forward. The cached pfn might have been
1628 * already reset to zone end in compact_finished()
1629 */
1630 if (free_pfn > zone->compact_cached_free_pfn)
1631 zone->compact_cached_free_pfn = free_pfn;
1632 }
Mel Gorman748446b2010-05-24 14:32:27 -07001633
David Rientjes7f354a52017-02-22 15:44:50 -08001634 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
1635 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
1636
Joonsoo Kim16c4a092015-02-11 15:27:01 -08001637 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1638 cc->free_pfn, end_pfn, sync, ret);
Mel Gorman0eb927c2014-01-21 15:51:05 -08001639
Mel Gorman748446b2010-05-24 14:32:27 -07001640 return ret;
1641}
Mel Gorman76ab0f52010-05-24 14:32:28 -07001642
Michal Hockoea7ab982016-05-20 16:56:38 -07001643static enum compact_result compact_zone_order(struct zone *zone, int order,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001644 gfp_t gfp_mask, enum compact_priority prio,
Mel Gormanc6038442016-05-19 17:13:38 -07001645 unsigned int alloc_flags, int classzone_idx)
Mel Gorman56de7262010-05-24 14:32:30 -07001646{
Michal Hockoea7ab982016-05-20 16:56:38 -07001647 enum compact_result ret;
Mel Gorman56de7262010-05-24 14:32:30 -07001648 struct compact_control cc = {
1649 .nr_freepages = 0,
1650 .nr_migratepages = 0,
David Rientjes7f354a52017-02-22 15:44:50 -08001651 .total_migrate_scanned = 0,
1652 .total_free_scanned = 0,
Mel Gorman56de7262010-05-24 14:32:30 -07001653 .order = order,
David Rientjes6d7ce552014-10-09 15:27:27 -07001654 .gfp_mask = gfp_mask,
Mel Gorman56de7262010-05-24 14:32:30 -07001655 .zone = zone,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07001656 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1657 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001658 .alloc_flags = alloc_flags,
1659 .classzone_idx = classzone_idx,
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001660 .direct_compaction = true,
Vlastimil Babkaa8e025e2016-10-07 16:57:47 -07001661 .whole_zone = (prio == MIN_COMPACT_PRIORITY),
Vlastimil Babka9f7e3382016-10-07 17:00:37 -07001662 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
1663 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
Mel Gorman56de7262010-05-24 14:32:30 -07001664 };
1665 INIT_LIST_HEAD(&cc.freepages);
1666 INIT_LIST_HEAD(&cc.migratepages);
1667
Shaohua Lie64c5232012-10-08 16:32:27 -07001668 ret = compact_zone(zone, &cc);
1669
1670 VM_BUG_ON(!list_empty(&cc.freepages));
1671 VM_BUG_ON(!list_empty(&cc.migratepages));
1672
Shaohua Lie64c5232012-10-08 16:32:27 -07001673 return ret;
Mel Gorman56de7262010-05-24 14:32:30 -07001674}
1675
Mel Gorman5e771902010-05-24 14:32:31 -07001676int sysctl_extfrag_threshold = 500;
1677
Mel Gorman56de7262010-05-24 14:32:30 -07001678/**
1679 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
Mel Gorman56de7262010-05-24 14:32:30 -07001680 * @gfp_mask: The GFP mask of the current allocation
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001681 * @order: The order of the current allocation
1682 * @alloc_flags: The allocation flags of the current allocation
1683 * @ac: The context of current allocation
David Rientjese0b9dae2014-06-04 16:08:28 -07001684 * @mode: The migration mode for async, sync light, or sync migration
Mel Gorman56de7262010-05-24 14:32:30 -07001685 *
1686 * This is the main entry point for direct page compaction.
1687 */
Michal Hockoea7ab982016-05-20 16:56:38 -07001688enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07001689 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001690 enum compact_priority prio)
Mel Gorman56de7262010-05-24 14:32:30 -07001691{
Mel Gorman56de7262010-05-24 14:32:30 -07001692 int may_perform_io = gfp_mask & __GFP_IO;
Mel Gorman56de7262010-05-24 14:32:30 -07001693 struct zoneref *z;
1694 struct zone *zone;
Michal Hocko1d4746d2016-05-20 16:56:44 -07001695 enum compact_result rc = COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07001696
Michal Hocko73e64c52016-12-14 15:04:07 -08001697 /*
1698 * Check if the GFP flags allow compaction - GFP_NOIO is really
1699 * tricky context because the migration might require IO
1700 */
1701 if (!may_perform_io)
Vlastimil Babka53853e22014-10-09 15:27:02 -07001702 return COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07001703
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07001704 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
Joonsoo Kim837d0262015-02-11 15:27:06 -08001705
Mel Gorman56de7262010-05-24 14:32:30 -07001706 /* Compact each zone in the list */
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001707 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1708 ac->nodemask) {
Michal Hockoea7ab982016-05-20 16:56:38 -07001709 enum compact_result status;
Mel Gorman56de7262010-05-24 14:32:30 -07001710
Vlastimil Babkaa8e025e2016-10-07 16:57:47 -07001711 if (prio > MIN_COMPACT_PRIORITY
1712 && compaction_deferred(zone, order)) {
Michal Hocko1d4746d2016-05-20 16:56:44 -07001713 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
Vlastimil Babka53853e22014-10-09 15:27:02 -07001714 continue;
Michal Hocko1d4746d2016-05-20 16:56:44 -07001715 }
Vlastimil Babka53853e22014-10-09 15:27:02 -07001716
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07001717 status = compact_zone_order(zone, order, gfp_mask, prio,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001718 alloc_flags, ac_classzone_idx(ac));
Mel Gorman56de7262010-05-24 14:32:30 -07001719 rc = max(status, rc);
1720
Vlastimil Babka7ceb0092016-10-07 16:57:44 -07001721 /* The allocation should succeed, stop compacting */
1722 if (status == COMPACT_SUCCESS) {
Vlastimil Babka53853e22014-10-09 15:27:02 -07001723 /*
1724 * We think the allocation will succeed in this zone,
1725 * but it is not certain, hence the false. The caller
1726 * will repeat this with true if allocation indeed
1727 * succeeds in this zone.
1728 */
1729 compaction_defer_reset(zone, order, false);
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001730
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001731 break;
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001732 }
1733
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07001734 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001735 status == COMPACT_PARTIAL_SKIPPED))
Vlastimil Babka53853e22014-10-09 15:27:02 -07001736 /*
1737 * We think that allocation won't succeed in this zone
1738 * so we defer compaction there. If it ends up
1739 * succeeding after all, it will be reset.
1740 */
1741 defer_compaction(zone, order);
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001742
1743 /*
1744 * We might have stopped compacting due to need_resched() in
1745 * async compaction, or due to a fatal signal detected. In that
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001746 * case do not try further zones
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001747 */
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001748 if ((prio == COMPACT_PRIO_ASYNC && need_resched())
1749 || fatal_signal_pending(current))
1750 break;
Mel Gorman56de7262010-05-24 14:32:30 -07001751 }
1752
1753 return rc;
1754}
1755
1756
Mel Gorman76ab0f52010-05-24 14:32:28 -07001757/* Compact all zones within a node */
Andrew Morton7103f162013-02-22 16:32:33 -08001758static void compact_node(int nid)
Rik van Riel7be62de2012-03-21 16:33:52 -07001759{
Vlastimil Babka791cae92016-10-07 16:57:38 -07001760 pg_data_t *pgdat = NODE_DATA(nid);
1761 int zoneid;
1762 struct zone *zone;
Rik van Riel7be62de2012-03-21 16:33:52 -07001763 struct compact_control cc = {
1764 .order = -1,
David Rientjes7f354a52017-02-22 15:44:50 -08001765 .total_migrate_scanned = 0,
1766 .total_free_scanned = 0,
David Rientjese0b9dae2014-06-04 16:08:28 -07001767 .mode = MIGRATE_SYNC,
David Rientjes91ca9182014-04-03 14:47:23 -07001768 .ignore_skip_hint = true,
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001769 .whole_zone = true,
Michal Hocko73e64c52016-12-14 15:04:07 -08001770 .gfp_mask = GFP_KERNEL,
Rik van Riel7be62de2012-03-21 16:33:52 -07001771 };
1772
Vlastimil Babka791cae92016-10-07 16:57:38 -07001773
1774 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1775
1776 zone = &pgdat->node_zones[zoneid];
1777 if (!populated_zone(zone))
1778 continue;
1779
1780 cc.nr_freepages = 0;
1781 cc.nr_migratepages = 0;
1782 cc.zone = zone;
1783 INIT_LIST_HEAD(&cc.freepages);
1784 INIT_LIST_HEAD(&cc.migratepages);
1785
1786 compact_zone(zone, &cc);
1787
1788 VM_BUG_ON(!list_empty(&cc.freepages));
1789 VM_BUG_ON(!list_empty(&cc.migratepages));
1790 }
Rik van Riel7be62de2012-03-21 16:33:52 -07001791}
1792
Mel Gorman76ab0f52010-05-24 14:32:28 -07001793/* Compact all nodes in the system */
Jason Liu7964c062013-01-11 14:31:47 -08001794static void compact_nodes(void)
Mel Gorman76ab0f52010-05-24 14:32:28 -07001795{
1796 int nid;
1797
Hugh Dickins8575ec22012-03-21 16:33:53 -07001798 /* Flush pending updates to the LRU lists */
1799 lru_add_drain_all();
1800
Mel Gorman76ab0f52010-05-24 14:32:28 -07001801 for_each_online_node(nid)
1802 compact_node(nid);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001803}
1804
1805/* The written value is actually unused, all memory is compacted */
1806int sysctl_compact_memory;
1807
Yaowei Baifec4eb22016-01-14 15:20:09 -08001808/*
1809 * This is the entry point for compacting all nodes via
1810 * /proc/sys/vm/compact_memory
1811 */
Mel Gorman76ab0f52010-05-24 14:32:28 -07001812int sysctl_compaction_handler(struct ctl_table *table, int write,
1813 void __user *buffer, size_t *length, loff_t *ppos)
1814{
1815 if (write)
Jason Liu7964c062013-01-11 14:31:47 -08001816 compact_nodes();
Mel Gorman76ab0f52010-05-24 14:32:28 -07001817
1818 return 0;
1819}
Mel Gormaned4a6d72010-05-24 14:32:29 -07001820
Mel Gorman5e771902010-05-24 14:32:31 -07001821int sysctl_extfrag_handler(struct ctl_table *table, int write,
1822 void __user *buffer, size_t *length, loff_t *ppos)
1823{
1824 proc_dointvec_minmax(table, write, buffer, length, ppos);
1825
1826 return 0;
1827}
1828
Mel Gormaned4a6d72010-05-24 14:32:29 -07001829#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
Rashika Kheria74e77fb2014-04-03 14:48:01 -07001830static ssize_t sysfs_compact_node(struct device *dev,
Kay Sievers10fbcf42011-12-21 14:48:43 -08001831 struct device_attribute *attr,
Mel Gormaned4a6d72010-05-24 14:32:29 -07001832 const char *buf, size_t count)
1833{
Hugh Dickins8575ec22012-03-21 16:33:53 -07001834 int nid = dev->id;
1835
1836 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1837 /* Flush pending updates to the LRU lists */
1838 lru_add_drain_all();
1839
1840 compact_node(nid);
1841 }
Mel Gormaned4a6d72010-05-24 14:32:29 -07001842
1843 return count;
1844}
Kay Sievers10fbcf42011-12-21 14:48:43 -08001845static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001846
1847int compaction_register_node(struct node *node)
1848{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001849 return device_create_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001850}
1851
1852void compaction_unregister_node(struct node *node)
1853{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001854 return device_remove_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001855}
1856#endif /* CONFIG_SYSFS && CONFIG_NUMA */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001857
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001858static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1859{
Vlastimil Babka172400c2016-05-05 16:22:32 -07001860 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001861}
1862
1863static bool kcompactd_node_suitable(pg_data_t *pgdat)
1864{
1865 int zoneid;
1866 struct zone *zone;
1867 enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1868
Chen Feng6cd9dc32016-05-20 16:59:02 -07001869 for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001870 zone = &pgdat->node_zones[zoneid];
1871
1872 if (!populated_zone(zone))
1873 continue;
1874
1875 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1876 classzone_idx) == COMPACT_CONTINUE)
1877 return true;
1878 }
1879
1880 return false;
1881}
1882
1883static void kcompactd_do_work(pg_data_t *pgdat)
1884{
1885 /*
1886 * With no special task, compact all zones so that a page of requested
1887 * order is allocatable.
1888 */
1889 int zoneid;
1890 struct zone *zone;
1891 struct compact_control cc = {
1892 .order = pgdat->kcompactd_max_order,
David Rientjes7f354a52017-02-22 15:44:50 -08001893 .total_migrate_scanned = 0,
1894 .total_free_scanned = 0,
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001895 .classzone_idx = pgdat->kcompactd_classzone_idx,
1896 .mode = MIGRATE_SYNC_LIGHT,
1897 .ignore_skip_hint = true,
Michal Hocko73e64c52016-12-14 15:04:07 -08001898 .gfp_mask = GFP_KERNEL,
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001899
1900 };
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001901 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1902 cc.classzone_idx);
David Rientjes7f354a52017-02-22 15:44:50 -08001903 count_compact_event(KCOMPACTD_WAKE);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001904
Chen Feng6cd9dc32016-05-20 16:59:02 -07001905 for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001906 int status;
1907
1908 zone = &pgdat->node_zones[zoneid];
1909 if (!populated_zone(zone))
1910 continue;
1911
1912 if (compaction_deferred(zone, cc.order))
1913 continue;
1914
1915 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1916 COMPACT_CONTINUE)
1917 continue;
1918
1919 cc.nr_freepages = 0;
1920 cc.nr_migratepages = 0;
David Rientjes7f354a52017-02-22 15:44:50 -08001921 cc.total_migrate_scanned = 0;
1922 cc.total_free_scanned = 0;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001923 cc.zone = zone;
1924 INIT_LIST_HEAD(&cc.freepages);
1925 INIT_LIST_HEAD(&cc.migratepages);
1926
Vlastimil Babka172400c2016-05-05 16:22:32 -07001927 if (kthread_should_stop())
1928 return;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001929 status = compact_zone(zone, &cc);
1930
Vlastimil Babka7ceb0092016-10-07 16:57:44 -07001931 if (status == COMPACT_SUCCESS) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001932 compaction_defer_reset(zone, cc.order, false);
Michal Hockoc8f7de02016-05-20 16:56:47 -07001933 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001934 /*
1935 * We use sync migration mode here, so we defer like
1936 * sync direct compaction does.
1937 */
1938 defer_compaction(zone, cc.order);
1939 }
1940
David Rientjes7f354a52017-02-22 15:44:50 -08001941 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
1942 cc.total_migrate_scanned);
1943 count_compact_events(KCOMPACTD_FREE_SCANNED,
1944 cc.total_free_scanned);
1945
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001946 VM_BUG_ON(!list_empty(&cc.freepages));
1947 VM_BUG_ON(!list_empty(&cc.migratepages));
1948 }
1949
1950 /*
1951 * Regardless of success, we are done until woken up next. But remember
1952 * the requested order/classzone_idx in case it was higher/tighter than
1953 * our current ones
1954 */
1955 if (pgdat->kcompactd_max_order <= cc.order)
1956 pgdat->kcompactd_max_order = 0;
1957 if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
1958 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
1959}
1960
1961void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
1962{
1963 if (!order)
1964 return;
1965
1966 if (pgdat->kcompactd_max_order < order)
1967 pgdat->kcompactd_max_order = order;
1968
1969 if (pgdat->kcompactd_classzone_idx > classzone_idx)
1970 pgdat->kcompactd_classzone_idx = classzone_idx;
1971
1972 if (!waitqueue_active(&pgdat->kcompactd_wait))
1973 return;
1974
1975 if (!kcompactd_node_suitable(pgdat))
1976 return;
1977
1978 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
1979 classzone_idx);
1980 wake_up_interruptible(&pgdat->kcompactd_wait);
1981}
1982
1983/*
1984 * The background compaction daemon, started as a kernel thread
1985 * from the init process.
1986 */
1987static int kcompactd(void *p)
1988{
1989 pg_data_t *pgdat = (pg_data_t*)p;
1990 struct task_struct *tsk = current;
1991
1992 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1993
1994 if (!cpumask_empty(cpumask))
1995 set_cpus_allowed_ptr(tsk, cpumask);
1996
1997 set_freezable();
1998
1999 pgdat->kcompactd_max_order = 0;
2000 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2001
2002 while (!kthread_should_stop()) {
2003 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2004 wait_event_freezable(pgdat->kcompactd_wait,
2005 kcompactd_work_requested(pgdat));
2006
2007 kcompactd_do_work(pgdat);
2008 }
2009
2010 return 0;
2011}
2012
2013/*
2014 * This kcompactd start function will be called by init and node-hot-add.
2015 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2016 */
2017int kcompactd_run(int nid)
2018{
2019 pg_data_t *pgdat = NODE_DATA(nid);
2020 int ret = 0;
2021
2022 if (pgdat->kcompactd)
2023 return 0;
2024
2025 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2026 if (IS_ERR(pgdat->kcompactd)) {
2027 pr_err("Failed to start kcompactd on node %d\n", nid);
2028 ret = PTR_ERR(pgdat->kcompactd);
2029 pgdat->kcompactd = NULL;
2030 }
2031 return ret;
2032}
2033
2034/*
2035 * Called by memory hotplug when all memory in a node is offlined. Caller must
2036 * hold mem_hotplug_begin/end().
2037 */
2038void kcompactd_stop(int nid)
2039{
2040 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2041
2042 if (kcompactd) {
2043 kthread_stop(kcompactd);
2044 NODE_DATA(nid)->kcompactd = NULL;
2045 }
2046}
2047
2048/*
2049 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2050 * not required for correctness. So if the last cpu in a node goes
2051 * away, we get changed to run anywhere: as the first one comes back,
2052 * restore their cpu bindings.
2053 */
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002054static int kcompactd_cpu_online(unsigned int cpu)
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002055{
2056 int nid;
2057
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002058 for_each_node_state(nid, N_MEMORY) {
2059 pg_data_t *pgdat = NODE_DATA(nid);
2060 const struct cpumask *mask;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002061
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002062 mask = cpumask_of_node(pgdat->node_id);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002063
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002064 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2065 /* One of our CPUs online: restore mask */
2066 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002067 }
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002068 return 0;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002069}
2070
2071static int __init kcompactd_init(void)
2072{
2073 int nid;
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002074 int ret;
2075
2076 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2077 "mm/compaction:online",
2078 kcompactd_cpu_online, NULL);
2079 if (ret < 0) {
2080 pr_err("kcompactd: failed to register hotplug callbacks.\n");
2081 return ret;
2082 }
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002083
2084 for_each_node_state(nid, N_MEMORY)
2085 kcompactd_run(nid);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002086 return 0;
2087}
2088subsys_initcall(kcompactd_init)
2089
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01002090#endif /* CONFIG_COMPACTION */