blob: 32a88b49f9735d09fcab47868f687149ed7d5ed2 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Mel Gorman748446b2010-05-24 14:32:27 -07002/*
3 * linux/mm/compaction.c
4 *
5 * Memory compaction for the reduction of external fragmentation. Note that
6 * this heavily depends upon page migration to do all the real heavy
7 * lifting
8 *
9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
10 */
Vlastimil Babka698b1b32016-03-17 14:18:08 -070011#include <linux/cpu.h>
Mel Gorman748446b2010-05-24 14:32:27 -070012#include <linux/swap.h>
13#include <linux/migrate.h>
14#include <linux/compaction.h>
15#include <linux/mm_inline.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010016#include <linux/sched/signal.h>
Mel Gorman748446b2010-05-24 14:32:27 -070017#include <linux/backing-dev.h>
Mel Gorman76ab0f52010-05-24 14:32:28 -070018#include <linux/sysctl.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070019#include <linux/sysfs.h>
Minchan Kim194159f2013-02-22 16:33:58 -080020#include <linux/page-isolation.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080021#include <linux/kasan.h>
Vlastimil Babka698b1b32016-03-17 14:18:08 -070022#include <linux/kthread.h>
23#include <linux/freezer.h>
Joonsoo Kim83358ec2016-07-26 15:23:43 -070024#include <linux/page_owner.h>
Johannes Weinereb414682018-10-26 15:06:27 -070025#include <linux/psi.h>
Mel Gorman748446b2010-05-24 14:32:27 -070026#include "internal.h"
27
Minchan Kim010fc292012-12-20 15:05:06 -080028#ifdef CONFIG_COMPACTION
29static inline void count_compact_event(enum vm_event_item item)
30{
31 count_vm_event(item);
32}
33
34static inline void count_compact_events(enum vm_event_item item, long delta)
35{
36 count_vm_events(item, delta);
37}
38#else
39#define count_compact_event(item) do { } while (0)
40#define count_compact_events(item, delta) do { } while (0)
41#endif
42
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010043#if defined CONFIG_COMPACTION || defined CONFIG_CMA
44
Mel Gormanb7aba692011-01-13 15:45:54 -080045#define CREATE_TRACE_POINTS
46#include <trace/events/compaction.h>
47
Vlastimil Babka06b66402016-05-19 17:11:48 -070048#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
49#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
50#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
51#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
52
Mel Gorman748446b2010-05-24 14:32:27 -070053static unsigned long release_freepages(struct list_head *freelist)
54{
55 struct page *page, *next;
Vlastimil Babka6bace092014-12-10 15:43:31 -080056 unsigned long high_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -070057
58 list_for_each_entry_safe(page, next, freelist, lru) {
Vlastimil Babka6bace092014-12-10 15:43:31 -080059 unsigned long pfn = page_to_pfn(page);
Mel Gorman748446b2010-05-24 14:32:27 -070060 list_del(&page->lru);
61 __free_page(page);
Vlastimil Babka6bace092014-12-10 15:43:31 -080062 if (pfn > high_pfn)
63 high_pfn = pfn;
Mel Gorman748446b2010-05-24 14:32:27 -070064 }
65
Vlastimil Babka6bace092014-12-10 15:43:31 -080066 return high_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -070067}
68
Mel Gorman4469ab92019-03-05 15:44:39 -080069static void split_map_pages(struct list_head *list)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010070{
Joonsoo Kim66c64222016-07-26 15:23:40 -070071 unsigned int i, order, nr_pages;
72 struct page *page, *next;
73 LIST_HEAD(tmp_list);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010074
Joonsoo Kim66c64222016-07-26 15:23:40 -070075 list_for_each_entry_safe(page, next, list, lru) {
76 list_del(&page->lru);
77
78 order = page_private(page);
79 nr_pages = 1 << order;
Joonsoo Kim66c64222016-07-26 15:23:40 -070080
Joonsoo Kim46f24fd2016-07-26 15:23:58 -070081 post_alloc_hook(page, order, __GFP_MOVABLE);
Joonsoo Kim66c64222016-07-26 15:23:40 -070082 if (order)
83 split_page(page, order);
84
85 for (i = 0; i < nr_pages; i++) {
86 list_add(&page->lru, &tmp_list);
87 page++;
88 }
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010089 }
Joonsoo Kim66c64222016-07-26 15:23:40 -070090
91 list_splice(&tmp_list, list);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010092}
93
Mel Gormanbb13ffe2012-10-08 16:32:41 -070094#ifdef CONFIG_COMPACTION
Joonsoo Kim24e27162015-02-11 15:27:09 -080095
Minchan Kimbda807d2016-07-26 15:23:05 -070096int PageMovable(struct page *page)
97{
98 struct address_space *mapping;
99
100 VM_BUG_ON_PAGE(!PageLocked(page), page);
101 if (!__PageMovable(page))
102 return 0;
103
104 mapping = page_mapping(page);
105 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
106 return 1;
107
108 return 0;
109}
110EXPORT_SYMBOL(PageMovable);
111
112void __SetPageMovable(struct page *page, struct address_space *mapping)
113{
114 VM_BUG_ON_PAGE(!PageLocked(page), page);
115 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
116 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
117}
118EXPORT_SYMBOL(__SetPageMovable);
119
120void __ClearPageMovable(struct page *page)
121{
122 VM_BUG_ON_PAGE(!PageLocked(page), page);
123 VM_BUG_ON_PAGE(!PageMovable(page), page);
124 /*
125 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
126 * flag so that VM can catch up released page by driver after isolation.
127 * With it, VM migration doesn't try to put it back.
128 */
129 page->mapping = (void *)((unsigned long)page->mapping &
130 PAGE_MAPPING_MOVABLE);
131}
132EXPORT_SYMBOL(__ClearPageMovable);
133
Joonsoo Kim24e27162015-02-11 15:27:09 -0800134/* Do not skip compaction more than 64 times */
135#define COMPACT_MAX_DEFER_SHIFT 6
136
137/*
138 * Compaction is deferred when compaction fails to result in a page
139 * allocation success. 1 << compact_defer_limit compactions are skipped up
140 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
141 */
142void defer_compaction(struct zone *zone, int order)
143{
144 zone->compact_considered = 0;
145 zone->compact_defer_shift++;
146
147 if (order < zone->compact_order_failed)
148 zone->compact_order_failed = order;
149
150 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
151 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
152
153 trace_mm_compaction_defer_compaction(zone, order);
154}
155
156/* Returns true if compaction should be skipped this time */
157bool compaction_deferred(struct zone *zone, int order)
158{
159 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
160
161 if (order < zone->compact_order_failed)
162 return false;
163
164 /* Avoid possible overflow */
165 if (++zone->compact_considered > defer_limit)
166 zone->compact_considered = defer_limit;
167
168 if (zone->compact_considered >= defer_limit)
169 return false;
170
171 trace_mm_compaction_deferred(zone, order);
172
173 return true;
174}
175
176/*
177 * Update defer tracking counters after successful compaction of given order,
178 * which means an allocation either succeeded (alloc_success == true) or is
179 * expected to succeed.
180 */
181void compaction_defer_reset(struct zone *zone, int order,
182 bool alloc_success)
183{
184 if (alloc_success) {
185 zone->compact_considered = 0;
186 zone->compact_defer_shift = 0;
187 }
188 if (order >= zone->compact_order_failed)
189 zone->compact_order_failed = order + 1;
190
191 trace_mm_compaction_defer_reset(zone, order);
192}
193
194/* Returns true if restarting compaction after many failures */
195bool compaction_restarting(struct zone *zone, int order)
196{
197 if (order < zone->compact_order_failed)
198 return false;
199
200 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
201 zone->compact_considered >= 1UL << zone->compact_defer_shift;
202}
203
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700204/* Returns true if the pageblock should be scanned for pages to isolate. */
205static inline bool isolation_suitable(struct compact_control *cc,
206 struct page *page)
207{
208 if (cc->ignore_skip_hint)
209 return true;
210
211 return !get_pageblock_skip(page);
212}
213
Vlastimil Babka023336412015-09-08 15:02:42 -0700214static void reset_cached_positions(struct zone *zone)
215{
216 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
217 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
Joonsoo Kim623446e2016-03-15 14:57:45 -0700218 zone->compact_cached_free_pfn =
Vlastimil Babka06b66402016-05-19 17:11:48 -0700219 pageblock_start_pfn(zone_end_pfn(zone) - 1);
Vlastimil Babka023336412015-09-08 15:02:42 -0700220}
221
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700222/*
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800223 * Compound pages of >= pageblock_order should consistenly be skipped until
224 * released. It is always pointless to compact pages of such order (if they are
225 * migratable), and the pageblocks they occupy cannot contain any free pages.
David Rientjes21dc7e02017-11-17 15:26:30 -0800226 */
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800227static bool pageblock_skip_persistent(struct page *page)
David Rientjes21dc7e02017-11-17 15:26:30 -0800228{
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800229 if (!PageCompound(page))
David Rientjes21dc7e02017-11-17 15:26:30 -0800230 return false;
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800231
232 page = compound_head(page);
233
234 if (compound_order(page) >= pageblock_order)
235 return true;
236
237 return false;
David Rientjes21dc7e02017-11-17 15:26:30 -0800238}
239
240/*
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700241 * This function is called to clear all cached information on pageblocks that
242 * should be skipped for page isolation when the migrate and free page scanner
243 * meet.
244 */
Mel Gorman62997022012-10-08 16:32:47 -0700245static void __reset_isolation_suitable(struct zone *zone)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700246{
247 unsigned long start_pfn = zone->zone_start_pfn;
Cody P Schafer108bcc92013-02-22 16:35:23 -0800248 unsigned long end_pfn = zone_end_pfn(zone);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700249 unsigned long pfn;
250
Mel Gorman62997022012-10-08 16:32:47 -0700251 zone->compact_blockskip_flush = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700252
253 /* Walk the zone and mark every pageblock as suitable for isolation */
254 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
255 struct page *page;
256
257 cond_resched();
258
Michal Hockoccbe1e42017-07-06 15:38:00 -0700259 page = pfn_to_online_page(pfn);
260 if (!page)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700261 continue;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700262 if (zone != page_zone(page))
263 continue;
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800264 if (pageblock_skip_persistent(page))
David Rientjes21dc7e02017-11-17 15:26:30 -0800265 continue;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700266
267 clear_pageblock_skip(page);
268 }
Vlastimil Babka023336412015-09-08 15:02:42 -0700269
270 reset_cached_positions(zone);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700271}
272
Mel Gorman62997022012-10-08 16:32:47 -0700273void reset_isolation_suitable(pg_data_t *pgdat)
274{
275 int zoneid;
276
277 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
278 struct zone *zone = &pgdat->node_zones[zoneid];
279 if (!populated_zone(zone))
280 continue;
281
282 /* Only flush if a full compaction finished recently */
283 if (zone->compact_blockskip_flush)
284 __reset_isolation_suitable(zone);
285 }
286}
287
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700288/*
289 * If no pages were isolated then mark this pageblock to be skipped in the
Mel Gorman62997022012-10-08 16:32:47 -0700290 * future. The information is later cleared by __reset_isolation_suitable().
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700291 */
Mel Gormanc89511a2012-10-08 16:32:45 -0700292static void update_pageblock_skip(struct compact_control *cc,
293 struct page *page, unsigned long nr_isolated,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700294 bool migrate_scanner)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700295{
Mel Gormanc89511a2012-10-08 16:32:45 -0700296 struct zone *zone = cc->zone;
David Rientjes35979ef2014-06-04 16:08:27 -0700297 unsigned long pfn;
Joonsoo Kim6815bf32013-12-18 17:08:52 -0800298
Vlastimil Babka2583d672017-11-17 15:26:38 -0800299 if (cc->no_set_skip_hint)
Joonsoo Kim6815bf32013-12-18 17:08:52 -0800300 return;
301
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700302 if (!page)
303 return;
304
David Rientjes35979ef2014-06-04 16:08:27 -0700305 if (nr_isolated)
306 return;
307
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700308 set_pageblock_skip(page);
Mel Gormanc89511a2012-10-08 16:32:45 -0700309
David Rientjes35979ef2014-06-04 16:08:27 -0700310 pfn = page_to_pfn(page);
311
312 /* Update where async and sync compaction should restart */
313 if (migrate_scanner) {
David Rientjes35979ef2014-06-04 16:08:27 -0700314 if (pfn > zone->compact_cached_migrate_pfn[0])
315 zone->compact_cached_migrate_pfn[0] = pfn;
David Rientjese0b9dae2014-06-04 16:08:28 -0700316 if (cc->mode != MIGRATE_ASYNC &&
317 pfn > zone->compact_cached_migrate_pfn[1])
David Rientjes35979ef2014-06-04 16:08:27 -0700318 zone->compact_cached_migrate_pfn[1] = pfn;
319 } else {
David Rientjes35979ef2014-06-04 16:08:27 -0700320 if (pfn < zone->compact_cached_free_pfn)
321 zone->compact_cached_free_pfn = pfn;
Mel Gormanc89511a2012-10-08 16:32:45 -0700322 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700323}
324#else
325static inline bool isolation_suitable(struct compact_control *cc,
326 struct page *page)
327{
328 return true;
329}
330
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800331static inline bool pageblock_skip_persistent(struct page *page)
David Rientjes21dc7e02017-11-17 15:26:30 -0800332{
333 return false;
334}
335
336static inline void update_pageblock_skip(struct compact_control *cc,
Mel Gormanc89511a2012-10-08 16:32:45 -0700337 struct page *page, unsigned long nr_isolated,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700338 bool migrate_scanner)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700339{
340}
341#endif /* CONFIG_COMPACTION */
342
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700343/*
344 * Compaction requires the taking of some coarse locks that are potentially
345 * very heavily contended. For async compaction, back out if the lock cannot
346 * be taken immediately. For sync compaction, spin on the lock if needed.
347 *
348 * Returns true if the lock is held
349 * Returns false if the lock is not held and compaction should abort
350 */
351static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
352 struct compact_control *cc)
Mel Gorman2a1402a2012-10-08 16:32:33 -0700353{
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700354 if (cc->mode == MIGRATE_ASYNC) {
355 if (!spin_trylock_irqsave(lock, *flags)) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700356 cc->contended = true;
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700357 return false;
358 }
359 } else {
360 spin_lock_irqsave(lock, *flags);
361 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700362
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700363 return true;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700364}
365
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100366/*
Mel Gormanc67fe372012-08-21 16:16:17 -0700367 * Compaction requires the taking of some coarse locks that are potentially
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700368 * very heavily contended. The lock should be periodically unlocked to avoid
369 * having disabled IRQs for a long time, even when there is nobody waiting on
370 * the lock. It might also be that allowing the IRQs will result in
371 * need_resched() becoming true. If scheduling is needed, async compaction
372 * aborts. Sync compaction schedules.
373 * Either compaction type will also abort if a fatal signal is pending.
374 * In either case if the lock was locked, it is dropped and not regained.
Mel Gormanc67fe372012-08-21 16:16:17 -0700375 *
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700376 * Returns true if compaction should abort due to fatal signal pending, or
377 * async compaction due to need_resched()
378 * Returns false when compaction can continue (sync compaction might have
379 * scheduled)
Mel Gormanc67fe372012-08-21 16:16:17 -0700380 */
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700381static bool compact_unlock_should_abort(spinlock_t *lock,
382 unsigned long flags, bool *locked, struct compact_control *cc)
Mel Gormanc67fe372012-08-21 16:16:17 -0700383{
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700384 if (*locked) {
385 spin_unlock_irqrestore(lock, flags);
386 *locked = false;
387 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700388
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700389 if (fatal_signal_pending(current)) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700390 cc->contended = true;
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700391 return true;
392 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700393
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700394 if (need_resched()) {
David Rientjese0b9dae2014-06-04 16:08:28 -0700395 if (cc->mode == MIGRATE_ASYNC) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700396 cc->contended = true;
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700397 return true;
Mel Gormanc67fe372012-08-21 16:16:17 -0700398 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700399 cond_resched();
Mel Gormanc67fe372012-08-21 16:16:17 -0700400 }
401
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700402 return false;
Mel Gormanc67fe372012-08-21 16:16:17 -0700403}
404
Vlastimil Babkabe976572014-06-04 16:10:41 -0700405/*
406 * Aside from avoiding lock contention, compaction also periodically checks
407 * need_resched() and either schedules in sync compaction or aborts async
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700408 * compaction. This is similar to what compact_unlock_should_abort() does, but
Vlastimil Babkabe976572014-06-04 16:10:41 -0700409 * is used where no lock is concerned.
410 *
411 * Returns false when no scheduling was needed, or sync compaction scheduled.
412 * Returns true when async compaction should abort.
413 */
414static inline bool compact_should_abort(struct compact_control *cc)
415{
416 /* async compaction aborts if contended */
417 if (need_resched()) {
418 if (cc->mode == MIGRATE_ASYNC) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700419 cc->contended = true;
Vlastimil Babkabe976572014-06-04 16:10:41 -0700420 return true;
421 }
422
423 cond_resched();
424 }
425
426 return false;
427}
428
Mel Gormanc67fe372012-08-21 16:16:17 -0700429/*
Jerome Marchand9e4be472013-11-12 15:07:12 -0800430 * Isolate free pages onto a private freelist. If @strict is true, will abort
431 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
432 * (even though it may still end up isolating some pages).
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100433 */
Mel Gormanf40d1e42012-10-08 16:32:36 -0700434static unsigned long isolate_freepages_block(struct compact_control *cc,
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700435 unsigned long *start_pfn,
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100436 unsigned long end_pfn,
437 struct list_head *freelist,
438 bool strict)
Mel Gorman748446b2010-05-24 14:32:27 -0700439{
Mel Gormanb7aba692011-01-13 15:45:54 -0800440 int nr_scanned = 0, total_isolated = 0;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700441 struct page *cursor, *valid_page = NULL;
Xiubo Lib8b2d822014-10-09 15:28:21 -0700442 unsigned long flags = 0;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700443 bool locked = false;
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700444 unsigned long blockpfn = *start_pfn;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700445 unsigned int order;
Mel Gorman748446b2010-05-24 14:32:27 -0700446
Mel Gorman748446b2010-05-24 14:32:27 -0700447 cursor = pfn_to_page(blockpfn);
448
Mel Gormanf40d1e42012-10-08 16:32:36 -0700449 /* Isolate free pages. */
Mel Gorman748446b2010-05-24 14:32:27 -0700450 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
Joonsoo Kim66c64222016-07-26 15:23:40 -0700451 int isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700452 struct page *page = cursor;
453
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700454 /*
455 * Periodically drop the lock (if held) regardless of its
456 * contention, to give chance to IRQs. Abort if fatal signal
457 * pending or async compaction detects need_resched()
458 */
459 if (!(blockpfn % SWAP_CLUSTER_MAX)
460 && compact_unlock_should_abort(&cc->zone->lock, flags,
461 &locked, cc))
462 break;
463
Mel Gormanb7aba692011-01-13 15:45:54 -0800464 nr_scanned++;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700465 if (!pfn_valid_within(blockpfn))
Laura Abbott2af120b2014-03-10 15:49:44 -0700466 goto isolate_fail;
467
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700468 if (!valid_page)
469 valid_page = page;
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700470
471 /*
472 * For compound pages such as THP and hugetlbfs, we can save
473 * potentially a lot of iterations if we skip them at once.
474 * The check is racy, but we can consider only valid values
475 * and the only danger is skipping too much.
476 */
477 if (PageCompound(page)) {
David Rientjes21dc7e02017-11-17 15:26:30 -0800478 const unsigned int order = compound_order(page);
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700479
Vlastimil Babkad3c85ba2017-11-17 15:26:41 -0800480 if (likely(order < MAX_ORDER)) {
David Rientjes21dc7e02017-11-17 15:26:30 -0800481 blockpfn += (1UL << order) - 1;
482 cursor += (1UL << order) - 1;
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700483 }
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700484 goto isolate_fail;
485 }
486
Mel Gormanf40d1e42012-10-08 16:32:36 -0700487 if (!PageBuddy(page))
Laura Abbott2af120b2014-03-10 15:49:44 -0700488 goto isolate_fail;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700489
490 /*
Vlastimil Babka69b71892014-10-09 15:27:18 -0700491 * If we already hold the lock, we can skip some rechecking.
492 * Note that if we hold the lock now, checked_pageblock was
493 * already set in some previous iteration (or strict is true),
494 * so it is correct to skip the suitable migration target
495 * recheck as well.
Mel Gormanf40d1e42012-10-08 16:32:36 -0700496 */
Vlastimil Babka69b71892014-10-09 15:27:18 -0700497 if (!locked) {
498 /*
499 * The zone lock must be held to isolate freepages.
500 * Unfortunately this is a very coarse lock and can be
501 * heavily contended if there are parallel allocations
502 * or parallel compactions. For async compaction do not
503 * spin on the lock and we acquire the lock as late as
504 * possible.
505 */
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700506 locked = compact_trylock_irqsave(&cc->zone->lock,
507 &flags, cc);
Vlastimil Babka69b71892014-10-09 15:27:18 -0700508 if (!locked)
509 break;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700510
Vlastimil Babka69b71892014-10-09 15:27:18 -0700511 /* Recheck this is a buddy page under lock */
512 if (!PageBuddy(page))
513 goto isolate_fail;
514 }
Mel Gorman748446b2010-05-24 14:32:27 -0700515
Joonsoo Kim66c64222016-07-26 15:23:40 -0700516 /* Found a free page, will break it into order-0 pages */
517 order = page_order(page);
518 isolated = __isolate_free_page(page, order);
David Rientjesa4f04f22016-06-24 14:50:10 -0700519 if (!isolated)
520 break;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700521 set_page_private(page, order);
David Rientjesa4f04f22016-06-24 14:50:10 -0700522
Mel Gorman748446b2010-05-24 14:32:27 -0700523 total_isolated += isolated;
David Rientjesa4f04f22016-06-24 14:50:10 -0700524 cc->nr_freepages += isolated;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700525 list_add_tail(&page->lru, freelist);
526
David Rientjesa4f04f22016-06-24 14:50:10 -0700527 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
528 blockpfn += isolated;
529 break;
Mel Gorman748446b2010-05-24 14:32:27 -0700530 }
David Rientjesa4f04f22016-06-24 14:50:10 -0700531 /* Advance to the end of split page */
532 blockpfn += isolated - 1;
533 cursor += isolated - 1;
534 continue;
Laura Abbott2af120b2014-03-10 15:49:44 -0700535
536isolate_fail:
537 if (strict)
538 break;
539 else
540 continue;
541
Mel Gorman748446b2010-05-24 14:32:27 -0700542 }
543
David Rientjesa4f04f22016-06-24 14:50:10 -0700544 if (locked)
545 spin_unlock_irqrestore(&cc->zone->lock, flags);
546
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700547 /*
548 * There is a tiny chance that we have read bogus compound_order(),
549 * so be careful to not go outside of the pageblock.
550 */
551 if (unlikely(blockpfn > end_pfn))
552 blockpfn = end_pfn;
553
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800554 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
555 nr_scanned, total_isolated);
556
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700557 /* Record how far we have got within the block */
558 *start_pfn = blockpfn;
559
Mel Gormanf40d1e42012-10-08 16:32:36 -0700560 /*
561 * If strict isolation is requested by CMA then check that all the
562 * pages requested were isolated. If there were any failures, 0 is
563 * returned and CMA will fail.
564 */
Laura Abbott2af120b2014-03-10 15:49:44 -0700565 if (strict && blockpfn < end_pfn)
Mel Gormanf40d1e42012-10-08 16:32:36 -0700566 total_isolated = 0;
567
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700568 /* Update the pageblock-skip if the whole pageblock was scanned */
569 if (blockpfn == end_pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700570 update_pageblock_skip(cc, valid_page, total_isolated, false);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700571
David Rientjes7f354a52017-02-22 15:44:50 -0800572 cc->total_free_scanned += nr_scanned;
Mel Gorman397487d2012-10-19 12:00:10 +0100573 if (total_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800574 count_compact_events(COMPACTISOLATED, total_isolated);
Mel Gorman748446b2010-05-24 14:32:27 -0700575 return total_isolated;
576}
577
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100578/**
579 * isolate_freepages_range() - isolate free pages.
Mike Rapoporte8b098f2018-04-05 16:24:57 -0700580 * @cc: Compaction control structure.
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100581 * @start_pfn: The first PFN to start isolating.
582 * @end_pfn: The one-past-last PFN.
583 *
584 * Non-free pages, invalid PFNs, or zone boundaries within the
585 * [start_pfn, end_pfn) range are considered errors, cause function to
586 * undo its actions and return zero.
587 *
588 * Otherwise, function returns one-past-the-last PFN of isolated page
589 * (which may be greater then end_pfn if end fell in a middle of
590 * a free page).
591 */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100592unsigned long
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700593isolate_freepages_range(struct compact_control *cc,
594 unsigned long start_pfn, unsigned long end_pfn)
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100595{
Joonsoo Kime1409c32016-03-15 14:57:48 -0700596 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100597 LIST_HEAD(freelist);
598
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700599 pfn = start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700600 block_start_pfn = pageblock_start_pfn(pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -0700601 if (block_start_pfn < cc->zone->zone_start_pfn)
602 block_start_pfn = cc->zone->zone_start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700603 block_end_pfn = pageblock_end_pfn(pfn);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100604
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700605 for (; pfn < end_pfn; pfn += isolated,
Joonsoo Kime1409c32016-03-15 14:57:48 -0700606 block_start_pfn = block_end_pfn,
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700607 block_end_pfn += pageblock_nr_pages) {
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700608 /* Protect pfn from changing by isolate_freepages_block */
609 unsigned long isolate_start_pfn = pfn;
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700610
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100611 block_end_pfn = min(block_end_pfn, end_pfn);
612
Joonsoo Kim58420012014-11-13 15:19:07 -0800613 /*
614 * pfn could pass the block_end_pfn if isolated freepage
615 * is more than pageblock order. In this case, we adjust
616 * scanning range to right one.
617 */
618 if (pfn >= block_end_pfn) {
Vlastimil Babka06b66402016-05-19 17:11:48 -0700619 block_start_pfn = pageblock_start_pfn(pfn);
620 block_end_pfn = pageblock_end_pfn(pfn);
Joonsoo Kim58420012014-11-13 15:19:07 -0800621 block_end_pfn = min(block_end_pfn, end_pfn);
622 }
623
Joonsoo Kime1409c32016-03-15 14:57:48 -0700624 if (!pageblock_pfn_to_page(block_start_pfn,
625 block_end_pfn, cc->zone))
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700626 break;
627
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700628 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
629 block_end_pfn, &freelist, true);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100630
631 /*
632 * In strict mode, isolate_freepages_block() returns 0 if
633 * there are any holes in the block (ie. invalid PFNs or
634 * non-free pages).
635 */
636 if (!isolated)
637 break;
638
639 /*
640 * If we managed to isolate pages, it is always (1 << n) *
641 * pageblock_nr_pages for some non-negative n. (Max order
642 * page may span two pageblocks).
643 */
644 }
645
Joonsoo Kim66c64222016-07-26 15:23:40 -0700646 /* __isolate_free_page() does not map the pages */
Mel Gorman4469ab92019-03-05 15:44:39 -0800647 split_map_pages(&freelist);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100648
649 if (pfn < end_pfn) {
650 /* Loop terminated early, cleanup. */
651 release_freepages(&freelist);
652 return 0;
653 }
654
655 /* We don't use freelists for anything. */
656 return pfn;
657}
658
Mel Gorman748446b2010-05-24 14:32:27 -0700659/* Similar to reclaim, but different enough that they don't share logic */
660static bool too_many_isolated(struct zone *zone)
661{
Minchan Kimbc693042010-09-09 16:38:00 -0700662 unsigned long active, inactive, isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700663
Mel Gorman599d0c92016-07-28 15:45:31 -0700664 inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
665 node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
666 active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
667 node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
668 isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
669 node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
Mel Gorman748446b2010-05-24 14:32:27 -0700670
Minchan Kimbc693042010-09-09 16:38:00 -0700671 return isolated > (inactive + active) / 2;
Mel Gorman748446b2010-05-24 14:32:27 -0700672}
673
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100674/**
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700675 * isolate_migratepages_block() - isolate all migrate-able pages within
676 * a single pageblock
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100677 * @cc: Compaction control structure.
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700678 * @low_pfn: The first PFN to isolate
679 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
680 * @isolate_mode: Isolation mode to be used.
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100681 *
682 * Isolate all pages that can be migrated from the range specified by
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700683 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
684 * Returns zero if there is a fatal signal pending, otherwise PFN of the
685 * first page that was not scanned (which may be both less, equal to or more
686 * than end_pfn).
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100687 *
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700688 * The pages are isolated on cc->migratepages list (not required to be empty),
689 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
690 * is neither read nor updated.
Mel Gorman748446b2010-05-24 14:32:27 -0700691 */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700692static unsigned long
693isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
694 unsigned long end_pfn, isolate_mode_t isolate_mode)
Mel Gorman748446b2010-05-24 14:32:27 -0700695{
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700696 struct zone *zone = cc->zone;
Mel Gormanb7aba692011-01-13 15:45:54 -0800697 unsigned long nr_scanned = 0, nr_isolated = 0;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700698 struct lruvec *lruvec;
Xiubo Lib8b2d822014-10-09 15:28:21 -0700699 unsigned long flags = 0;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700700 bool locked = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700701 struct page *page = NULL, *valid_page = NULL;
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800702 unsigned long start_pfn = low_pfn;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700703 bool skip_on_failure = false;
704 unsigned long next_skip_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700705
Mel Gorman748446b2010-05-24 14:32:27 -0700706 /*
707 * Ensure that there are not too many pages isolated from the LRU
708 * list by either parallel reclaimers or compaction. If there are,
709 * delay for some time until fewer pages are isolated
710 */
711 while (unlikely(too_many_isolated(zone))) {
Mel Gormanf9e35b32011-06-15 15:08:52 -0700712 /* async migration should just abort */
David Rientjese0b9dae2014-06-04 16:08:28 -0700713 if (cc->mode == MIGRATE_ASYNC)
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100714 return 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700715
Mel Gorman748446b2010-05-24 14:32:27 -0700716 congestion_wait(BLK_RW_ASYNC, HZ/10);
717
718 if (fatal_signal_pending(current))
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100719 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700720 }
721
Vlastimil Babkabe976572014-06-04 16:10:41 -0700722 if (compact_should_abort(cc))
723 return 0;
David Rientjesaeef4b82014-06-04 16:08:31 -0700724
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700725 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
726 skip_on_failure = true;
727 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
728 }
729
Mel Gorman748446b2010-05-24 14:32:27 -0700730 /* Time to isolate some pages for migration */
Mel Gorman748446b2010-05-24 14:32:27 -0700731 for (; low_pfn < end_pfn; low_pfn++) {
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700732
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700733 if (skip_on_failure && low_pfn >= next_skip_pfn) {
734 /*
735 * We have isolated all migration candidates in the
736 * previous order-aligned block, and did not skip it due
737 * to failure. We should migrate the pages now and
738 * hopefully succeed compaction.
739 */
740 if (nr_isolated)
741 break;
742
743 /*
744 * We failed to isolate in the previous order-aligned
745 * block. Set the new boundary to the end of the
746 * current block. Note we can't simply increase
747 * next_skip_pfn by 1 << order, as low_pfn might have
748 * been incremented by a higher number due to skipping
749 * a compound or a high-order buddy page in the
750 * previous loop iteration.
751 */
752 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
753 }
754
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700755 /*
756 * Periodically drop the lock (if held) regardless of its
757 * contention, to give chance to IRQs. Abort async compaction
758 * if contended.
759 */
760 if (!(low_pfn % SWAP_CLUSTER_MAX)
Mel Gormana52633d2016-07-28 15:45:28 -0700761 && compact_unlock_should_abort(zone_lru_lock(zone), flags,
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700762 &locked, cc))
763 break;
Mel Gormanc67fe372012-08-21 16:16:17 -0700764
Mel Gorman748446b2010-05-24 14:32:27 -0700765 if (!pfn_valid_within(low_pfn))
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700766 goto isolate_fail;
Mel Gormanb7aba692011-01-13 15:45:54 -0800767 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700768
Mel Gorman748446b2010-05-24 14:32:27 -0700769 page = pfn_to_page(low_pfn);
Mel Gormandc908602012-02-08 17:13:38 -0800770
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700771 if (!valid_page)
772 valid_page = page;
773
Mel Gorman6c144662014-01-23 15:53:38 -0800774 /*
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700775 * Skip if free. We read page order here without zone lock
776 * which is generally unsafe, but the race window is small and
777 * the worst thing that can happen is that we skip some
778 * potential isolation targets.
Mel Gorman6c144662014-01-23 15:53:38 -0800779 */
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700780 if (PageBuddy(page)) {
781 unsigned long freepage_order = page_order_unsafe(page);
782
783 /*
784 * Without lock, we cannot be sure that what we got is
785 * a valid page order. Consider only values in the
786 * valid order range to prevent low_pfn overflow.
787 */
788 if (freepage_order > 0 && freepage_order < MAX_ORDER)
789 low_pfn += (1UL << freepage_order) - 1;
Mel Gorman748446b2010-05-24 14:32:27 -0700790 continue;
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700791 }
Mel Gorman748446b2010-05-24 14:32:27 -0700792
Mel Gorman9927af742011-01-13 15:45:59 -0800793 /*
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700794 * Regardless of being on LRU, compound pages such as THP and
795 * hugetlbfs are not to be compacted. We can potentially save
796 * a lot of iterations if we skip them at once. The check is
797 * racy, but we can consider only valid values and the only
798 * danger is skipping too much.
Andrea Arcangelibc835012011-01-13 15:47:08 -0800799 */
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700800 if (PageCompound(page)) {
David Rientjes21dc7e02017-11-17 15:26:30 -0800801 const unsigned int order = compound_order(page);
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700802
Vlastimil Babkad3c85ba2017-11-17 15:26:41 -0800803 if (likely(order < MAX_ORDER))
David Rientjes21dc7e02017-11-17 15:26:30 -0800804 low_pfn += (1UL << order) - 1;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700805 goto isolate_fail;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700806 }
807
Minchan Kimbda807d2016-07-26 15:23:05 -0700808 /*
809 * Check may be lockless but that's ok as we recheck later.
810 * It's possible to migrate LRU and non-lru movable pages.
811 * Skip any other type of page
812 */
813 if (!PageLRU(page)) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700814 /*
815 * __PageMovable can return false positive so we need
816 * to verify it under page_lock.
817 */
818 if (unlikely(__PageMovable(page)) &&
819 !PageIsolated(page)) {
820 if (locked) {
Mel Gormana52633d2016-07-28 15:45:28 -0700821 spin_unlock_irqrestore(zone_lru_lock(zone),
Minchan Kimbda807d2016-07-26 15:23:05 -0700822 flags);
823 locked = false;
824 }
825
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800826 if (!isolate_movable_page(page, isolate_mode))
Minchan Kimbda807d2016-07-26 15:23:05 -0700827 goto isolate_success;
828 }
829
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700830 goto isolate_fail;
Minchan Kimbda807d2016-07-26 15:23:05 -0700831 }
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700832
David Rientjes119d6d52014-04-03 14:48:00 -0700833 /*
834 * Migration will fail if an anonymous page is pinned in memory,
835 * so avoid taking lru_lock and isolating it unnecessarily in an
836 * admittedly racy check.
837 */
838 if (!page_mapping(page) &&
839 page_count(page) > page_mapcount(page))
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700840 goto isolate_fail;
David Rientjes119d6d52014-04-03 14:48:00 -0700841
Michal Hocko73e64c52016-12-14 15:04:07 -0800842 /*
843 * Only allow to migrate anonymous pages in GFP_NOFS context
844 * because those do not depend on fs locks.
845 */
846 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
847 goto isolate_fail;
848
Vlastimil Babka69b71892014-10-09 15:27:18 -0700849 /* If we already hold the lock, we can skip some rechecking */
850 if (!locked) {
Mel Gormana52633d2016-07-28 15:45:28 -0700851 locked = compact_trylock_irqsave(zone_lru_lock(zone),
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700852 &flags, cc);
Vlastimil Babka69b71892014-10-09 15:27:18 -0700853 if (!locked)
854 break;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700855
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700856 /* Recheck PageLRU and PageCompound under lock */
Vlastimil Babka69b71892014-10-09 15:27:18 -0700857 if (!PageLRU(page))
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700858 goto isolate_fail;
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700859
860 /*
861 * Page become compound since the non-locked check,
862 * and it's on LRU. It can only be a THP so the order
863 * is safe to read and it's 0 for tail pages.
864 */
865 if (unlikely(PageCompound(page))) {
Vlastimil Babkad3c85ba2017-11-17 15:26:41 -0800866 low_pfn += (1UL << compound_order(page)) - 1;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700867 goto isolate_fail;
Vlastimil Babka69b71892014-10-09 15:27:18 -0700868 }
Andrea Arcangelibc835012011-01-13 15:47:08 -0800869 }
870
Mel Gorman599d0c92016-07-28 15:45:31 -0700871 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700872
Mel Gorman748446b2010-05-24 14:32:27 -0700873 /* Try isolate the page */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700874 if (__isolate_lru_page(page, isolate_mode) != 0)
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700875 goto isolate_fail;
Mel Gorman748446b2010-05-24 14:32:27 -0700876
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700877 VM_BUG_ON_PAGE(PageCompound(page), page);
Andrea Arcangelibc835012011-01-13 15:47:08 -0800878
Mel Gorman748446b2010-05-24 14:32:27 -0700879 /* Successfully isolated */
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700880 del_page_from_lru_list(page, lruvec, page_lru(page));
Ming Ling6afcf8e2016-12-12 16:42:26 -0800881 inc_node_page_state(page,
882 NR_ISOLATED_ANON + page_is_file_cache(page));
Joonsoo Kimb6c75012014-04-07 15:37:07 -0700883
884isolate_success:
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700885 list_add(&page->lru, &cc->migratepages);
Mel Gorman748446b2010-05-24 14:32:27 -0700886 cc->nr_migratepages++;
Mel Gormanb7aba692011-01-13 15:45:54 -0800887 nr_isolated++;
Mel Gorman748446b2010-05-24 14:32:27 -0700888
889 /* Avoid isolating too much */
Hillf Danton31b83842012-01-10 15:07:59 -0800890 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
891 ++low_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -0700892 break;
Hillf Danton31b83842012-01-10 15:07:59 -0800893 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700894
895 continue;
896isolate_fail:
897 if (!skip_on_failure)
898 continue;
899
900 /*
901 * We have isolated some pages, but then failed. Release them
902 * instead of migrating, as we cannot form the cc->order buddy
903 * page anyway.
904 */
905 if (nr_isolated) {
906 if (locked) {
Mel Gormana52633d2016-07-28 15:45:28 -0700907 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700908 locked = false;
909 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700910 putback_movable_pages(&cc->migratepages);
911 cc->nr_migratepages = 0;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700912 nr_isolated = 0;
913 }
914
915 if (low_pfn < next_skip_pfn) {
916 low_pfn = next_skip_pfn - 1;
917 /*
918 * The check near the loop beginning would have updated
919 * next_skip_pfn too, but this is a bit simpler.
920 */
921 next_skip_pfn += 1UL << cc->order;
922 }
Mel Gorman748446b2010-05-24 14:32:27 -0700923 }
924
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700925 /*
926 * The PageBuddy() check could have potentially brought us outside
927 * the range to be scanned.
928 */
929 if (unlikely(low_pfn > end_pfn))
930 low_pfn = end_pfn;
931
Mel Gormanc67fe372012-08-21 16:16:17 -0700932 if (locked)
Mel Gormana52633d2016-07-28 15:45:28 -0700933 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
Mel Gorman748446b2010-05-24 14:32:27 -0700934
Vlastimil Babka50b5b092014-01-21 15:51:10 -0800935 /*
936 * Update the pageblock-skip information and cached scanner pfn,
937 * if the whole pageblock was scanned without isolating any page.
Vlastimil Babka50b5b092014-01-21 15:51:10 -0800938 */
David Rientjes35979ef2014-06-04 16:08:27 -0700939 if (low_pfn == end_pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700940 update_pageblock_skip(cc, valid_page, nr_isolated, true);
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700941
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800942 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
943 nr_scanned, nr_isolated);
Mel Gormanb7aba692011-01-13 15:45:54 -0800944
David Rientjes7f354a52017-02-22 15:44:50 -0800945 cc->total_migrate_scanned += nr_scanned;
Mel Gorman397487d2012-10-19 12:00:10 +0100946 if (nr_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800947 count_compact_events(COMPACTISOLATED, nr_isolated);
Mel Gorman397487d2012-10-19 12:00:10 +0100948
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100949 return low_pfn;
950}
951
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700952/**
953 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
954 * @cc: Compaction control structure.
955 * @start_pfn: The first PFN to start isolating.
956 * @end_pfn: The one-past-last PFN.
957 *
958 * Returns zero if isolation fails fatally due to e.g. pending signal.
959 * Otherwise, function returns one-past-the-last PFN of isolated page
960 * (which may be greater than end_pfn if end fell in a middle of a THP page).
961 */
962unsigned long
963isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
964 unsigned long end_pfn)
965{
Joonsoo Kime1409c32016-03-15 14:57:48 -0700966 unsigned long pfn, block_start_pfn, block_end_pfn;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700967
968 /* Scan block by block. First and last block may be incomplete */
969 pfn = start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700970 block_start_pfn = pageblock_start_pfn(pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -0700971 if (block_start_pfn < cc->zone->zone_start_pfn)
972 block_start_pfn = cc->zone->zone_start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700973 block_end_pfn = pageblock_end_pfn(pfn);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700974
975 for (; pfn < end_pfn; pfn = block_end_pfn,
Joonsoo Kime1409c32016-03-15 14:57:48 -0700976 block_start_pfn = block_end_pfn,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700977 block_end_pfn += pageblock_nr_pages) {
978
979 block_end_pfn = min(block_end_pfn, end_pfn);
980
Joonsoo Kime1409c32016-03-15 14:57:48 -0700981 if (!pageblock_pfn_to_page(block_start_pfn,
982 block_end_pfn, cc->zone))
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700983 continue;
984
985 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
986 ISOLATE_UNEVICTABLE);
987
Hugh Dickins14af4a52016-05-05 16:22:15 -0700988 if (!pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700989 break;
Joonsoo Kim6ea41c02014-10-29 14:50:20 -0700990
991 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
992 break;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700993 }
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700994
995 return pfn;
996}
997
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100998#endif /* CONFIG_COMPACTION || CONFIG_CMA */
999#ifdef CONFIG_COMPACTION
Andrew Morton018e9a42015-04-15 16:15:20 -07001000
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001001static bool suitable_migration_source(struct compact_control *cc,
1002 struct page *page)
1003{
Vlastimil Babka282722b2017-05-08 15:54:49 -07001004 int block_mt;
1005
1006 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001007 return true;
1008
Vlastimil Babka282722b2017-05-08 15:54:49 -07001009 block_mt = get_pageblock_migratetype(page);
1010
1011 if (cc->migratetype == MIGRATE_MOVABLE)
1012 return is_migrate_movable(block_mt);
1013 else
1014 return block_mt == cc->migratetype;
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001015}
1016
Andrew Morton018e9a42015-04-15 16:15:20 -07001017/* Returns true if the page is within a block suitable for migration to */
Vlastimil Babka9f7e3382016-10-07 17:00:37 -07001018static bool suitable_migration_target(struct compact_control *cc,
1019 struct page *page)
Andrew Morton018e9a42015-04-15 16:15:20 -07001020{
1021 /* If the page is a large free page, then disallow migration */
1022 if (PageBuddy(page)) {
1023 /*
1024 * We are checking page_order without zone->lock taken. But
1025 * the only small danger is that we skip a potentially suitable
1026 * pageblock, so it's not worth to check order for valid range.
1027 */
1028 if (page_order_unsafe(page) >= pageblock_order)
1029 return false;
1030 }
1031
Yisheng Xie1ef36db2017-05-03 14:53:54 -07001032 if (cc->ignore_block_suitable)
1033 return true;
1034
Andrew Morton018e9a42015-04-15 16:15:20 -07001035 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001036 if (is_migrate_movable(get_pageblock_migratetype(page)))
Andrew Morton018e9a42015-04-15 16:15:20 -07001037 return true;
1038
1039 /* Otherwise skip the block */
1040 return false;
1041}
1042
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001043/*
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001044 * Test whether the free scanner has reached the same or lower pageblock than
1045 * the migration scanner, and compaction should thus terminate.
1046 */
1047static inline bool compact_scanners_met(struct compact_control *cc)
1048{
1049 return (cc->free_pfn >> pageblock_order)
1050 <= (cc->migrate_pfn >> pageblock_order);
1051}
1052
1053/*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001054 * Based on information in the current compact_control, find blocks
1055 * suitable for isolating free pages from and then isolate them.
1056 */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001057static void isolate_freepages(struct compact_control *cc)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001058{
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001059 struct zone *zone = cc->zone;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001060 struct page *page;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001061 unsigned long block_start_pfn; /* start of current pageblock */
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001062 unsigned long isolate_start_pfn; /* exact pfn we start at */
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001063 unsigned long block_end_pfn; /* end of current pageblock */
1064 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001065 struct list_head *freelist = &cc->freepages;
1066
1067 /*
1068 * Initialise the free scanner. The starting point is where we last
Vlastimil Babka49e068f2014-05-06 12:50:03 -07001069 * successfully isolated from, zone-cached value, or the end of the
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001070 * zone when isolating for the first time. For looping we also need
1071 * this pfn aligned down to the pageblock boundary, because we do
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001072 * block_start_pfn -= pageblock_nr_pages in the for loop.
1073 * For ending point, take care when isolating in last pageblock of a
1074 * a zone which ends in the middle of a pageblock.
Vlastimil Babka49e068f2014-05-06 12:50:03 -07001075 * The low boundary is the end of the pageblock the migration scanner
1076 * is using.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001077 */
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001078 isolate_start_pfn = cc->free_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -07001079 block_start_pfn = pageblock_start_pfn(cc->free_pfn);
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001080 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1081 zone_end_pfn(zone));
Vlastimil Babka06b66402016-05-19 17:11:48 -07001082 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001083
1084 /*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001085 * Isolate free pages until enough are available to migrate the
1086 * pages on cc->migratepages. We stop searching if the migrate
1087 * and free page scanners meet or enough free pages are isolated.
1088 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001089 for (; block_start_pfn >= low_pfn;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001090 block_end_pfn = block_start_pfn,
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001091 block_start_pfn -= pageblock_nr_pages,
1092 isolate_start_pfn = block_start_pfn) {
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001093 /*
1094 * This can iterate a massively long zone without finding any
1095 * suitable migration targets, so periodically check if we need
Vlastimil Babkabe976572014-06-04 16:10:41 -07001096 * to schedule, or even abort async compaction.
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001097 */
Vlastimil Babkabe976572014-06-04 16:10:41 -07001098 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1099 && compact_should_abort(cc))
1100 break;
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001101
Vlastimil Babka7d49d882014-10-09 15:27:11 -07001102 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1103 zone);
1104 if (!page)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001105 continue;
1106
1107 /* Check the block is suitable for migration */
Vlastimil Babka9f7e3382016-10-07 17:00:37 -07001108 if (!suitable_migration_target(cc, page))
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001109 continue;
Linus Torvalds68e3e922012-06-03 20:05:57 -07001110
Mel Gormanbb13ffe2012-10-08 16:32:41 -07001111 /* If isolation recently failed, do not retry */
1112 if (!isolation_suitable(cc, page))
1113 continue;
1114
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001115 /* Found a block suitable for isolating free pages from. */
David Rientjesa46cbf32016-07-14 12:06:50 -07001116 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1117 freelist, false);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001118
1119 /*
David Rientjesa46cbf32016-07-14 12:06:50 -07001120 * If we isolated enough freepages, or aborted due to lock
1121 * contention, terminate.
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001122 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001123 if ((cc->nr_freepages >= cc->nr_migratepages)
1124 || cc->contended) {
David Rientjesa46cbf32016-07-14 12:06:50 -07001125 if (isolate_start_pfn >= block_end_pfn) {
1126 /*
1127 * Restart at previous pageblock if more
1128 * freepages can be isolated next time.
1129 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001130 isolate_start_pfn =
1131 block_start_pfn - pageblock_nr_pages;
David Rientjesa46cbf32016-07-14 12:06:50 -07001132 }
Vlastimil Babkabe976572014-06-04 16:10:41 -07001133 break;
David Rientjesa46cbf32016-07-14 12:06:50 -07001134 } else if (isolate_start_pfn < block_end_pfn) {
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001135 /*
David Rientjesa46cbf32016-07-14 12:06:50 -07001136 * If isolation failed early, do not continue
1137 * needlessly.
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001138 */
David Rientjesa46cbf32016-07-14 12:06:50 -07001139 break;
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001140 }
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +01001141 }
1142
Joonsoo Kim66c64222016-07-26 15:23:40 -07001143 /* __isolate_free_page() does not map the pages */
Mel Gorman4469ab92019-03-05 15:44:39 -08001144 split_map_pages(freelist);
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +01001145
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001146 /*
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001147 * Record where the free scanner will restart next time. Either we
1148 * broke from the loop and set isolate_start_pfn based on the last
1149 * call to isolate_freepages_block(), or we met the migration scanner
1150 * and the loop terminated due to isolate_start_pfn < low_pfn
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001151 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001152 cc->free_pfn = isolate_start_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -07001153}
1154
1155/*
1156 * This is a migrate-callback that "allocates" freepages by taking pages
1157 * from the isolated freelists in the block we are migrating to.
1158 */
1159static struct page *compaction_alloc(struct page *migratepage,
Michal Hocko666feb22018-04-10 16:30:03 -07001160 unsigned long data)
Mel Gorman748446b2010-05-24 14:32:27 -07001161{
1162 struct compact_control *cc = (struct compact_control *)data;
1163 struct page *freepage;
1164
Vlastimil Babkabe976572014-06-04 16:10:41 -07001165 /*
1166 * Isolate free pages if necessary, and if we are not aborting due to
1167 * contention.
1168 */
Mel Gorman748446b2010-05-24 14:32:27 -07001169 if (list_empty(&cc->freepages)) {
Vlastimil Babkabe976572014-06-04 16:10:41 -07001170 if (!cc->contended)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001171 isolate_freepages(cc);
Mel Gorman748446b2010-05-24 14:32:27 -07001172
1173 if (list_empty(&cc->freepages))
1174 return NULL;
1175 }
1176
1177 freepage = list_entry(cc->freepages.next, struct page, lru);
1178 list_del(&freepage->lru);
1179 cc->nr_freepages--;
1180
1181 return freepage;
1182}
1183
1184/*
David Rientjesd53aea32014-06-04 16:08:26 -07001185 * This is a migrate-callback that "frees" freepages back to the isolated
1186 * freelist. All pages on the freelist are from the same zone, so there is no
1187 * special handling needed for NUMA.
1188 */
1189static void compaction_free(struct page *page, unsigned long data)
1190{
1191 struct compact_control *cc = (struct compact_control *)data;
1192
1193 list_add(&page->lru, &cc->freepages);
1194 cc->nr_freepages++;
1195}
1196
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001197/* possible outcome of isolate_migratepages */
1198typedef enum {
1199 ISOLATE_ABORT, /* Abort compaction now */
1200 ISOLATE_NONE, /* No pages isolated, continue scanning */
1201 ISOLATE_SUCCESS, /* Pages isolated, migrate */
1202} isolate_migrate_t;
1203
1204/*
Eric B Munson5bbe3542015-04-15 16:13:20 -07001205 * Allow userspace to control policy on scanning the unevictable LRU for
1206 * compactable pages.
1207 */
1208int sysctl_compact_unevictable_allowed __read_mostly = 1;
1209
1210/*
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001211 * Isolate all pages that can be migrated from the first suitable block,
1212 * starting at the block pointed to by the migrate scanner pfn within
1213 * compact_control.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001214 */
1215static isolate_migrate_t isolate_migratepages(struct zone *zone,
1216 struct compact_control *cc)
1217{
Joonsoo Kime1409c32016-03-15 14:57:48 -07001218 unsigned long block_start_pfn;
1219 unsigned long block_end_pfn;
1220 unsigned long low_pfn;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001221 struct page *page;
1222 const isolate_mode_t isolate_mode =
Eric B Munson5bbe3542015-04-15 16:13:20 -07001223 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
Hugh Dickins1d2047f2016-07-28 15:48:41 -07001224 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001225
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001226 /*
1227 * Start at where we last stopped, or beginning of the zone as
1228 * initialized by compact_zone()
1229 */
1230 low_pfn = cc->migrate_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -07001231 block_start_pfn = pageblock_start_pfn(low_pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -07001232 if (block_start_pfn < zone->zone_start_pfn)
1233 block_start_pfn = zone->zone_start_pfn;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001234
1235 /* Only scan within a pageblock boundary */
Vlastimil Babka06b66402016-05-19 17:11:48 -07001236 block_end_pfn = pageblock_end_pfn(low_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001237
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001238 /*
1239 * Iterate over whole pageblocks until we find the first suitable.
1240 * Do not cross the free scanner.
1241 */
Joonsoo Kime1409c32016-03-15 14:57:48 -07001242 for (; block_end_pfn <= cc->free_pfn;
1243 low_pfn = block_end_pfn,
1244 block_start_pfn = block_end_pfn,
1245 block_end_pfn += pageblock_nr_pages) {
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001246
1247 /*
1248 * This can potentially iterate a massively long zone with
1249 * many pageblocks unsuitable, so periodically check if we
1250 * need to schedule, or even abort async compaction.
1251 */
1252 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1253 && compact_should_abort(cc))
1254 break;
1255
Joonsoo Kime1409c32016-03-15 14:57:48 -07001256 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1257 zone);
Vlastimil Babka7d49d882014-10-09 15:27:11 -07001258 if (!page)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001259 continue;
1260
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001261 /* If isolation recently failed, do not retry */
1262 if (!isolation_suitable(cc, page))
1263 continue;
1264
1265 /*
1266 * For async compaction, also only scan in MOVABLE blocks.
1267 * Async compaction is optimistic to see if the minimum amount
1268 * of work satisfies the allocation.
1269 */
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001270 if (!suitable_migration_source(cc, page))
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001271 continue;
1272
1273 /* Perform the isolation */
Joonsoo Kime1409c32016-03-15 14:57:48 -07001274 low_pfn = isolate_migratepages_block(cc, low_pfn,
1275 block_end_pfn, isolate_mode);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001276
Ming Ling6afcf8e2016-12-12 16:42:26 -08001277 if (!low_pfn || cc->contended)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001278 return ISOLATE_ABORT;
1279
1280 /*
1281 * Either we isolated something and proceed with migration. Or
1282 * we failed and compact_zone should decide if we should
1283 * continue or not.
1284 */
1285 break;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001286 }
1287
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001288 /* Record where migration scanner will be restarted. */
1289 cc->migrate_pfn = low_pfn;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001290
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001291 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001292}
1293
Yaowei Bai21c527a2015-11-05 18:47:20 -08001294/*
1295 * order == -1 is expected when compacting via
1296 * /proc/sys/vm/compact_memory
1297 */
1298static inline bool is_via_compact_memory(int order)
1299{
1300 return order == -1;
1301}
1302
Mel Gorman40cacbc2019-03-05 15:44:36 -08001303static enum compact_result __compact_finished(struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -07001304{
Mel Gorman8fb74b92013-01-11 14:32:16 -08001305 unsigned int order;
Vlastimil Babkad39773a2017-05-08 15:54:46 -07001306 const int migratetype = cc->migratetype;
Mel Gorman56de7262010-05-24 14:32:30 -07001307
Vlastimil Babkabe976572014-06-04 16:10:41 -07001308 if (cc->contended || fatal_signal_pending(current))
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001309 return COMPACT_CONTENDED;
Mel Gorman748446b2010-05-24 14:32:27 -07001310
Mel Gorman753341a2012-10-08 16:32:40 -07001311 /* Compaction run completes if the migrate and free scanner meet */
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001312 if (compact_scanners_met(cc)) {
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -08001313 /* Let the next compaction start anew. */
Mel Gorman40cacbc2019-03-05 15:44:36 -08001314 reset_cached_positions(cc->zone);
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -08001315
Mel Gorman62997022012-10-08 16:32:47 -07001316 /*
1317 * Mark that the PG_migrate_skip information should be cleared
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001318 * by kswapd when it goes to sleep. kcompactd does not set the
Mel Gorman62997022012-10-08 16:32:47 -07001319 * flag itself as the decision to be clear should be directly
1320 * based on an allocation request.
1321 */
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001322 if (cc->direct_compaction)
Mel Gorman40cacbc2019-03-05 15:44:36 -08001323 cc->zone->compact_blockskip_flush = true;
Mel Gorman62997022012-10-08 16:32:47 -07001324
Michal Hockoc8f7de02016-05-20 16:56:47 -07001325 if (cc->whole_zone)
1326 return COMPACT_COMPLETE;
1327 else
1328 return COMPACT_PARTIAL_SKIPPED;
Mel Gormanbb13ffe2012-10-08 16:32:41 -07001329 }
Mel Gorman748446b2010-05-24 14:32:27 -07001330
Yaowei Bai21c527a2015-11-05 18:47:20 -08001331 if (is_via_compact_memory(cc->order))
Mel Gorman56de7262010-05-24 14:32:30 -07001332 return COMPACT_CONTINUE;
1333
Vlastimil Babkabaf6a9a2017-05-08 15:54:52 -07001334 if (cc->finishing_block) {
1335 /*
1336 * We have finished the pageblock, but better check again that
1337 * we really succeeded.
1338 */
1339 if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
1340 cc->finishing_block = false;
1341 else
1342 return COMPACT_CONTINUE;
1343 }
1344
Mel Gorman56de7262010-05-24 14:32:30 -07001345 /* Direct compactor: Is a suitable page free? */
Mel Gorman8fb74b92013-01-11 14:32:16 -08001346 for (order = cc->order; order < MAX_ORDER; order++) {
Mel Gorman40cacbc2019-03-05 15:44:36 -08001347 struct free_area *area = &cc->zone->free_area[order];
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001348 bool can_steal;
Mel Gorman56de7262010-05-24 14:32:30 -07001349
Mel Gorman8fb74b92013-01-11 14:32:16 -08001350 /* Job done if page is free of the right migratetype */
David Rientjes6d7ce552014-10-09 15:27:27 -07001351 if (!list_empty(&area->free_list[migratetype]))
Vlastimil Babkacf378312016-10-07 16:57:41 -07001352 return COMPACT_SUCCESS;
Mel Gorman8fb74b92013-01-11 14:32:16 -08001353
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001354#ifdef CONFIG_CMA
1355 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1356 if (migratetype == MIGRATE_MOVABLE &&
1357 !list_empty(&area->free_list[MIGRATE_CMA]))
Vlastimil Babkacf378312016-10-07 16:57:41 -07001358 return COMPACT_SUCCESS;
Joonsoo Kim2149cda2015-04-14 15:45:21 -07001359#endif
1360 /*
1361 * Job done if allocation would steal freepages from
1362 * other migratetype buddy lists.
1363 */
1364 if (find_suitable_fallback(area, order, migratetype,
Vlastimil Babkabaf6a9a2017-05-08 15:54:52 -07001365 true, &can_steal) != -1) {
1366
1367 /* movable pages are OK in any pageblock */
1368 if (migratetype == MIGRATE_MOVABLE)
1369 return COMPACT_SUCCESS;
1370
1371 /*
1372 * We are stealing for a non-movable allocation. Make
1373 * sure we finish compacting the current pageblock
1374 * first so it is as free as possible and we won't
1375 * have to steal another one soon. This only applies
1376 * to sync compaction, as async compaction operates
1377 * on pageblocks of the same migratetype.
1378 */
1379 if (cc->mode == MIGRATE_ASYNC ||
1380 IS_ALIGNED(cc->migrate_pfn,
1381 pageblock_nr_pages)) {
1382 return COMPACT_SUCCESS;
1383 }
1384
1385 cc->finishing_block = true;
1386 return COMPACT_CONTINUE;
1387 }
Mel Gorman56de7262010-05-24 14:32:30 -07001388 }
1389
Joonsoo Kim837d0262015-02-11 15:27:06 -08001390 return COMPACT_NO_SUITABLE_PAGE;
1391}
1392
Mel Gorman40cacbc2019-03-05 15:44:36 -08001393static enum compact_result compact_finished(struct compact_control *cc)
Joonsoo Kim837d0262015-02-11 15:27:06 -08001394{
1395 int ret;
1396
Mel Gorman40cacbc2019-03-05 15:44:36 -08001397 ret = __compact_finished(cc);
1398 trace_mm_compaction_finished(cc->zone, cc->order, ret);
Joonsoo Kim837d0262015-02-11 15:27:06 -08001399 if (ret == COMPACT_NO_SUITABLE_PAGE)
1400 ret = COMPACT_CONTINUE;
1401
1402 return ret;
Mel Gorman748446b2010-05-24 14:32:27 -07001403}
1404
Mel Gorman3e7d3442011-01-13 15:45:56 -08001405/*
1406 * compaction_suitable: Is this suitable to run compaction on this zone now?
1407 * Returns
1408 * COMPACT_SKIPPED - If there are too few free pages for compaction
Vlastimil Babkacf378312016-10-07 16:57:41 -07001409 * COMPACT_SUCCESS - If the allocation would succeed without compaction
Mel Gorman3e7d3442011-01-13 15:45:56 -08001410 * COMPACT_CONTINUE - If compaction should run now
1411 */
Michal Hockoea7ab982016-05-20 16:56:38 -07001412static enum compact_result __compaction_suitable(struct zone *zone, int order,
Mel Gormanc6038442016-05-19 17:13:38 -07001413 unsigned int alloc_flags,
Michal Hocko86a294a2016-05-20 16:57:12 -07001414 int classzone_idx,
1415 unsigned long wmark_target)
Mel Gorman3e7d3442011-01-13 15:45:56 -08001416{
Mel Gorman3e7d3442011-01-13 15:45:56 -08001417 unsigned long watermark;
1418
Yaowei Bai21c527a2015-11-05 18:47:20 -08001419 if (is_via_compact_memory(order))
Michal Hocko3957c772011-06-15 15:08:25 -07001420 return COMPACT_CONTINUE;
1421
Mel Gormana9214442018-12-28 00:35:44 -08001422 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001423 /*
1424 * If watermarks for high-order allocation are already met, there
1425 * should be no need for compaction at all.
1426 */
1427 if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1428 alloc_flags))
Vlastimil Babkacf378312016-10-07 16:57:41 -07001429 return COMPACT_SUCCESS;
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001430
Michal Hocko3957c772011-06-15 15:08:25 -07001431 /*
Vlastimil Babka9861a622016-10-07 16:57:53 -07001432 * Watermarks for order-0 must be met for compaction to be able to
Vlastimil Babka984fdba2016-10-07 16:57:57 -07001433 * isolate free pages for migration targets. This means that the
1434 * watermark and alloc_flags have to match, or be more pessimistic than
1435 * the check in __isolate_free_page(). We don't use the direct
1436 * compactor's alloc_flags, as they are not relevant for freepage
1437 * isolation. We however do use the direct compactor's classzone_idx to
1438 * skip over zones where lowmem reserves would prevent allocation even
1439 * if compaction succeeds.
Vlastimil Babka8348faf2016-10-07 16:58:00 -07001440 * For costly orders, we require low watermark instead of min for
1441 * compaction to proceed to increase its chances.
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09001442 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
1443 * suitable migration targets
Mel Gorman3e7d3442011-01-13 15:45:56 -08001444 */
Vlastimil Babka8348faf2016-10-07 16:58:00 -07001445 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1446 low_wmark_pages(zone) : min_wmark_pages(zone);
1447 watermark += compact_gap(order);
Michal Hocko86a294a2016-05-20 16:57:12 -07001448 if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09001449 ALLOC_CMA, wmark_target))
Mel Gorman3e7d3442011-01-13 15:45:56 -08001450 return COMPACT_SKIPPED;
1451
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07001452 return COMPACT_CONTINUE;
1453}
1454
1455enum compact_result compaction_suitable(struct zone *zone, int order,
1456 unsigned int alloc_flags,
1457 int classzone_idx)
1458{
1459 enum compact_result ret;
1460 int fragindex;
1461
1462 ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1463 zone_page_state(zone, NR_FREE_PAGES));
Mel Gorman3e7d3442011-01-13 15:45:56 -08001464 /*
1465 * fragmentation index determines if allocation failures are due to
1466 * low memory or external fragmentation
1467 *
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001468 * index of -1000 would imply allocations might succeed depending on
1469 * watermarks, but we already failed the high-order watermark check
Mel Gorman3e7d3442011-01-13 15:45:56 -08001470 * index towards 0 implies failure is due to lack of memory
1471 * index towards 1000 implies failure is due to fragmentation
1472 *
Vlastimil Babka20311422016-10-07 17:00:46 -07001473 * Only compact if a failure would be due to fragmentation. Also
1474 * ignore fragindex for non-costly orders where the alternative to
1475 * a successful reclaim/compaction is OOM. Fragindex and the
1476 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
1477 * excessive compaction for costly orders, but it should not be at the
1478 * expense of system stability.
Mel Gorman3e7d3442011-01-13 15:45:56 -08001479 */
Vlastimil Babka20311422016-10-07 17:00:46 -07001480 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07001481 fragindex = fragmentation_index(zone, order);
1482 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1483 ret = COMPACT_NOT_SUITABLE_ZONE;
1484 }
Mel Gorman3e7d3442011-01-13 15:45:56 -08001485
Joonsoo Kim837d0262015-02-11 15:27:06 -08001486 trace_mm_compaction_suitable(zone, order, ret);
1487 if (ret == COMPACT_NOT_SUITABLE_ZONE)
1488 ret = COMPACT_SKIPPED;
1489
1490 return ret;
1491}
1492
Michal Hocko86a294a2016-05-20 16:57:12 -07001493bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1494 int alloc_flags)
1495{
1496 struct zone *zone;
1497 struct zoneref *z;
1498
1499 /*
1500 * Make sure at least one zone would pass __compaction_suitable if we continue
1501 * retrying the reclaim.
1502 */
1503 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1504 ac->nodemask) {
1505 unsigned long available;
1506 enum compact_result compact_result;
1507
1508 /*
1509 * Do not consider all the reclaimable memory because we do not
1510 * want to trash just for a single high order allocation which
1511 * is even not guaranteed to appear even if __compaction_suitable
1512 * is happy about the watermark check.
1513 */
Mel Gorman5a1c84b2016-07-28 15:47:31 -07001514 available = zone_reclaimable_pages(zone) / order;
Michal Hocko86a294a2016-05-20 16:57:12 -07001515 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1516 compact_result = __compaction_suitable(zone, order, alloc_flags,
1517 ac_classzone_idx(ac), available);
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07001518 if (compact_result != COMPACT_SKIPPED)
Michal Hocko86a294a2016-05-20 16:57:12 -07001519 return true;
1520 }
1521
1522 return false;
1523}
1524
Mel Gorman40cacbc2019-03-05 15:44:36 -08001525static enum compact_result compact_zone(struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -07001526{
Michal Hockoea7ab982016-05-20 16:56:38 -07001527 enum compact_result ret;
Mel Gorman40cacbc2019-03-05 15:44:36 -08001528 unsigned long start_pfn = cc->zone->zone_start_pfn;
1529 unsigned long end_pfn = zone_end_pfn(cc->zone);
Mel Gorman566e54e2019-03-05 15:44:32 -08001530 unsigned long last_migrated_pfn;
David Rientjese0b9dae2014-06-04 16:08:28 -07001531 const bool sync = cc->mode != MIGRATE_ASYNC;
Mel Gorman748446b2010-05-24 14:32:27 -07001532
Vlastimil Babkad39773a2017-05-08 15:54:46 -07001533 cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
Mel Gorman40cacbc2019-03-05 15:44:36 -08001534 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001535 cc->classzone_idx);
Michal Hockoc46649d2016-05-20 16:56:41 -07001536 /* Compaction is likely to fail */
Vlastimil Babkacf378312016-10-07 16:57:41 -07001537 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
Mel Gorman3e7d3442011-01-13 15:45:56 -08001538 return ret;
Michal Hockoc46649d2016-05-20 16:56:41 -07001539
1540 /* huh, compaction_suitable is returning something unexpected */
1541 VM_BUG_ON(ret != COMPACT_CONTINUE);
Mel Gorman3e7d3442011-01-13 15:45:56 -08001542
Mel Gormanc89511a2012-10-08 16:32:45 -07001543 /*
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001544 * Clear pageblock skip if there were failures recently and compaction
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001545 * is about to be retried after being deferred.
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001546 */
Mel Gorman40cacbc2019-03-05 15:44:36 -08001547 if (compaction_restarting(cc->zone, cc->order))
1548 __reset_isolation_suitable(cc->zone);
Vlastimil Babkad3132e42014-01-21 15:51:08 -08001549
1550 /*
Mel Gormanc89511a2012-10-08 16:32:45 -07001551 * Setup to move all movable pages to the end of the zone. Used cached
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001552 * information on where the scanners should start (unless we explicitly
1553 * want to compact the whole zone), but check that it is initialised
1554 * by ensuring the values are within zone boundaries.
Mel Gormanc89511a2012-10-08 16:32:45 -07001555 */
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001556 if (cc->whole_zone) {
Mel Gormanc89511a2012-10-08 16:32:45 -07001557 cc->migrate_pfn = start_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001558 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1559 } else {
Mel Gorman40cacbc2019-03-05 15:44:36 -08001560 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
1561 cc->free_pfn = cc->zone->compact_cached_free_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001562 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1563 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
Mel Gorman40cacbc2019-03-05 15:44:36 -08001564 cc->zone->compact_cached_free_pfn = cc->free_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001565 }
1566 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1567 cc->migrate_pfn = start_pfn;
Mel Gorman40cacbc2019-03-05 15:44:36 -08001568 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1569 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001570 }
Michal Hockoc8f7de02016-05-20 16:56:47 -07001571
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001572 if (cc->migrate_pfn == start_pfn)
1573 cc->whole_zone = true;
1574 }
Michal Hockoc8f7de02016-05-20 16:56:47 -07001575
Mel Gorman566e54e2019-03-05 15:44:32 -08001576 last_migrated_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -07001577
Joonsoo Kim16c4a092015-02-11 15:27:01 -08001578 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1579 cc->free_pfn, end_pfn, sync);
Mel Gorman0eb927c2014-01-21 15:51:05 -08001580
Mel Gorman748446b2010-05-24 14:32:27 -07001581 migrate_prep_local();
1582
Mel Gorman40cacbc2019-03-05 15:44:36 -08001583 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
Minchan Kim9d502c12011-03-22 16:30:39 -07001584 int err;
Mel Gorman566e54e2019-03-05 15:44:32 -08001585 unsigned long start_pfn = cc->migrate_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -07001586
Mel Gorman40cacbc2019-03-05 15:44:36 -08001587 switch (isolate_migratepages(cc->zone, cc)) {
Mel Gormanf9e35b32011-06-15 15:08:52 -07001588 case ISOLATE_ABORT:
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001589 ret = COMPACT_CONTENDED;
Rafael Aquini5733c7d2012-12-11 16:02:47 -08001590 putback_movable_pages(&cc->migratepages);
Shaohua Lie64c5232012-10-08 16:32:27 -07001591 cc->nr_migratepages = 0;
Mel Gorman566e54e2019-03-05 15:44:32 -08001592 last_migrated_pfn = 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001593 goto out;
1594 case ISOLATE_NONE:
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001595 /*
1596 * We haven't isolated and migrated anything, but
1597 * there might still be unflushed migrations from
1598 * previous cc->order aligned block.
1599 */
1600 goto check_drain;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001601 case ISOLATE_SUCCESS:
Mel Gorman566e54e2019-03-05 15:44:32 -08001602 last_migrated_pfn = start_pfn;
Mel Gormanf9e35b32011-06-15 15:08:52 -07001603 ;
1604 }
Mel Gorman748446b2010-05-24 14:32:27 -07001605
David Rientjesd53aea32014-06-04 16:08:26 -07001606 err = migrate_pages(&cc->migratepages, compaction_alloc,
David Rientjese0b9dae2014-06-04 16:08:28 -07001607 compaction_free, (unsigned long)cc, cc->mode,
Mel Gorman7b2a2d42012-10-19 14:07:31 +01001608 MR_COMPACTION);
Mel Gorman748446b2010-05-24 14:32:27 -07001609
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001610 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1611 &cc->migratepages);
Mel Gorman748446b2010-05-24 14:32:27 -07001612
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07001613 /* All pages were either migrated or will be released */
1614 cc->nr_migratepages = 0;
Minchan Kim9d502c12011-03-22 16:30:39 -07001615 if (err) {
Rafael Aquini5733c7d2012-12-11 16:02:47 -08001616 putback_movable_pages(&cc->migratepages);
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001617 /*
1618 * migrate_pages() may return -ENOMEM when scanners meet
1619 * and we want compact_finished() to detect it
1620 */
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001621 if (err == -ENOMEM && !compact_scanners_met(cc)) {
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08001622 ret = COMPACT_CONTENDED;
David Rientjes4bf2bba2012-07-11 14:02:13 -07001623 goto out;
1624 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001625 /*
1626 * We failed to migrate at least one page in the current
1627 * order-aligned block, so skip the rest of it.
1628 */
1629 if (cc->direct_compaction &&
1630 (cc->mode == MIGRATE_ASYNC)) {
1631 cc->migrate_pfn = block_end_pfn(
1632 cc->migrate_pfn - 1, cc->order);
1633 /* Draining pcplists is useless in this case */
Mel Gorman566e54e2019-03-05 15:44:32 -08001634 last_migrated_pfn = 0;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001635 }
Mel Gorman748446b2010-05-24 14:32:27 -07001636 }
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001637
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001638check_drain:
1639 /*
1640 * Has the migration scanner moved away from the previous
1641 * cc->order aligned block where we migrated from? If yes,
1642 * flush the pages that were freed, so that they can merge and
1643 * compact_finished() can detect immediately if allocation
1644 * would succeed.
1645 */
Mel Gorman566e54e2019-03-05 15:44:32 -08001646 if (cc->order > 0 && last_migrated_pfn) {
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001647 int cpu;
1648 unsigned long current_block_start =
Vlastimil Babka06b66402016-05-19 17:11:48 -07001649 block_start_pfn(cc->migrate_pfn, cc->order);
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001650
Mel Gorman566e54e2019-03-05 15:44:32 -08001651 if (last_migrated_pfn < current_block_start) {
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001652 cpu = get_cpu();
1653 lru_add_drain_cpu(cpu);
Mel Gorman40cacbc2019-03-05 15:44:36 -08001654 drain_local_pages(cc->zone);
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001655 put_cpu();
1656 /* No more flushing until we migrate again */
Mel Gorman566e54e2019-03-05 15:44:32 -08001657 last_migrated_pfn = 0;
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08001658 }
1659 }
1660
Mel Gorman748446b2010-05-24 14:32:27 -07001661 }
1662
Mel Gormanf9e35b32011-06-15 15:08:52 -07001663out:
Vlastimil Babka6bace092014-12-10 15:43:31 -08001664 /*
1665 * Release free pages and update where the free scanner should restart,
1666 * so we don't leave any returned pages behind in the next attempt.
1667 */
1668 if (cc->nr_freepages > 0) {
1669 unsigned long free_pfn = release_freepages(&cc->freepages);
1670
1671 cc->nr_freepages = 0;
1672 VM_BUG_ON(free_pfn == 0);
1673 /* The cached pfn is always the first in a pageblock */
Vlastimil Babka06b66402016-05-19 17:11:48 -07001674 free_pfn = pageblock_start_pfn(free_pfn);
Vlastimil Babka6bace092014-12-10 15:43:31 -08001675 /*
1676 * Only go back, not forward. The cached pfn might have been
1677 * already reset to zone end in compact_finished()
1678 */
Mel Gorman40cacbc2019-03-05 15:44:36 -08001679 if (free_pfn > cc->zone->compact_cached_free_pfn)
1680 cc->zone->compact_cached_free_pfn = free_pfn;
Vlastimil Babka6bace092014-12-10 15:43:31 -08001681 }
Mel Gorman748446b2010-05-24 14:32:27 -07001682
David Rientjes7f354a52017-02-22 15:44:50 -08001683 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
1684 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
1685
Joonsoo Kim16c4a092015-02-11 15:27:01 -08001686 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1687 cc->free_pfn, end_pfn, sync, ret);
Mel Gorman0eb927c2014-01-21 15:51:05 -08001688
Mel Gorman748446b2010-05-24 14:32:27 -07001689 return ret;
1690}
Mel Gorman76ab0f52010-05-24 14:32:28 -07001691
Michal Hockoea7ab982016-05-20 16:56:38 -07001692static enum compact_result compact_zone_order(struct zone *zone, int order,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001693 gfp_t gfp_mask, enum compact_priority prio,
Mel Gormanc6038442016-05-19 17:13:38 -07001694 unsigned int alloc_flags, int classzone_idx)
Mel Gorman56de7262010-05-24 14:32:30 -07001695{
Michal Hockoea7ab982016-05-20 16:56:38 -07001696 enum compact_result ret;
Mel Gorman56de7262010-05-24 14:32:30 -07001697 struct compact_control cc = {
1698 .nr_freepages = 0,
1699 .nr_migratepages = 0,
David Rientjes7f354a52017-02-22 15:44:50 -08001700 .total_migrate_scanned = 0,
1701 .total_free_scanned = 0,
Mel Gorman56de7262010-05-24 14:32:30 -07001702 .order = order,
David Rientjes6d7ce552014-10-09 15:27:27 -07001703 .gfp_mask = gfp_mask,
Mel Gorman56de7262010-05-24 14:32:30 -07001704 .zone = zone,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07001705 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1706 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
Vlastimil Babkaebff3982014-12-10 15:43:22 -08001707 .alloc_flags = alloc_flags,
1708 .classzone_idx = classzone_idx,
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07001709 .direct_compaction = true,
Vlastimil Babkaa8e025e2016-10-07 16:57:47 -07001710 .whole_zone = (prio == MIN_COMPACT_PRIORITY),
Vlastimil Babka9f7e3382016-10-07 17:00:37 -07001711 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
1712 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
Mel Gorman56de7262010-05-24 14:32:30 -07001713 };
1714 INIT_LIST_HEAD(&cc.freepages);
1715 INIT_LIST_HEAD(&cc.migratepages);
1716
Mel Gorman40cacbc2019-03-05 15:44:36 -08001717 ret = compact_zone(&cc);
Shaohua Lie64c5232012-10-08 16:32:27 -07001718
1719 VM_BUG_ON(!list_empty(&cc.freepages));
1720 VM_BUG_ON(!list_empty(&cc.migratepages));
1721
Shaohua Lie64c5232012-10-08 16:32:27 -07001722 return ret;
Mel Gorman56de7262010-05-24 14:32:30 -07001723}
1724
Mel Gorman5e771902010-05-24 14:32:31 -07001725int sysctl_extfrag_threshold = 500;
1726
Mel Gorman56de7262010-05-24 14:32:30 -07001727/**
1728 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
Mel Gorman56de7262010-05-24 14:32:30 -07001729 * @gfp_mask: The GFP mask of the current allocation
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001730 * @order: The order of the current allocation
1731 * @alloc_flags: The allocation flags of the current allocation
1732 * @ac: The context of current allocation
Yang Shi112d2d22018-01-31 16:20:23 -08001733 * @prio: Determines how hard direct compaction should try to succeed
Mel Gorman56de7262010-05-24 14:32:30 -07001734 *
1735 * This is the main entry point for direct page compaction.
1736 */
Michal Hockoea7ab982016-05-20 16:56:38 -07001737enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07001738 unsigned int alloc_flags, const struct alloc_context *ac,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001739 enum compact_priority prio)
Mel Gorman56de7262010-05-24 14:32:30 -07001740{
Mel Gorman56de7262010-05-24 14:32:30 -07001741 int may_perform_io = gfp_mask & __GFP_IO;
Mel Gorman56de7262010-05-24 14:32:30 -07001742 struct zoneref *z;
1743 struct zone *zone;
Michal Hocko1d4746d2016-05-20 16:56:44 -07001744 enum compact_result rc = COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07001745
Michal Hocko73e64c52016-12-14 15:04:07 -08001746 /*
1747 * Check if the GFP flags allow compaction - GFP_NOIO is really
1748 * tricky context because the migration might require IO
1749 */
1750 if (!may_perform_io)
Vlastimil Babka53853e22014-10-09 15:27:02 -07001751 return COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07001752
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07001753 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
Joonsoo Kim837d0262015-02-11 15:27:06 -08001754
Mel Gorman56de7262010-05-24 14:32:30 -07001755 /* Compact each zone in the list */
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08001756 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1757 ac->nodemask) {
Michal Hockoea7ab982016-05-20 16:56:38 -07001758 enum compact_result status;
Mel Gorman56de7262010-05-24 14:32:30 -07001759
Vlastimil Babkaa8e025e2016-10-07 16:57:47 -07001760 if (prio > MIN_COMPACT_PRIORITY
1761 && compaction_deferred(zone, order)) {
Michal Hocko1d4746d2016-05-20 16:56:44 -07001762 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
Vlastimil Babka53853e22014-10-09 15:27:02 -07001763 continue;
Michal Hocko1d4746d2016-05-20 16:56:44 -07001764 }
Vlastimil Babka53853e22014-10-09 15:27:02 -07001765
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07001766 status = compact_zone_order(zone, order, gfp_mask, prio,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001767 alloc_flags, ac_classzone_idx(ac));
Mel Gorman56de7262010-05-24 14:32:30 -07001768 rc = max(status, rc);
1769
Vlastimil Babka7ceb0092016-10-07 16:57:44 -07001770 /* The allocation should succeed, stop compacting */
1771 if (status == COMPACT_SUCCESS) {
Vlastimil Babka53853e22014-10-09 15:27:02 -07001772 /*
1773 * We think the allocation will succeed in this zone,
1774 * but it is not certain, hence the false. The caller
1775 * will repeat this with true if allocation indeed
1776 * succeeds in this zone.
1777 */
1778 compaction_defer_reset(zone, order, false);
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001779
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001780 break;
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001781 }
1782
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07001783 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001784 status == COMPACT_PARTIAL_SKIPPED))
Vlastimil Babka53853e22014-10-09 15:27:02 -07001785 /*
1786 * We think that allocation won't succeed in this zone
1787 * so we defer compaction there. If it ends up
1788 * succeeding after all, it will be reset.
1789 */
1790 defer_compaction(zone, order);
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001791
1792 /*
1793 * We might have stopped compacting due to need_resched() in
1794 * async compaction, or due to a fatal signal detected. In that
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001795 * case do not try further zones
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07001796 */
Vlastimil Babkac3486f52016-07-28 15:49:30 -07001797 if ((prio == COMPACT_PRIO_ASYNC && need_resched())
1798 || fatal_signal_pending(current))
1799 break;
Mel Gorman56de7262010-05-24 14:32:30 -07001800 }
1801
1802 return rc;
1803}
1804
1805
Mel Gorman76ab0f52010-05-24 14:32:28 -07001806/* Compact all zones within a node */
Andrew Morton7103f162013-02-22 16:32:33 -08001807static void compact_node(int nid)
Rik van Riel7be62de2012-03-21 16:33:52 -07001808{
Vlastimil Babka791cae92016-10-07 16:57:38 -07001809 pg_data_t *pgdat = NODE_DATA(nid);
1810 int zoneid;
1811 struct zone *zone;
Rik van Riel7be62de2012-03-21 16:33:52 -07001812 struct compact_control cc = {
1813 .order = -1,
David Rientjes7f354a52017-02-22 15:44:50 -08001814 .total_migrate_scanned = 0,
1815 .total_free_scanned = 0,
David Rientjese0b9dae2014-06-04 16:08:28 -07001816 .mode = MIGRATE_SYNC,
David Rientjes91ca9182014-04-03 14:47:23 -07001817 .ignore_skip_hint = true,
Vlastimil Babka06ed2992016-10-07 16:57:35 -07001818 .whole_zone = true,
Michal Hocko73e64c52016-12-14 15:04:07 -08001819 .gfp_mask = GFP_KERNEL,
Rik van Riel7be62de2012-03-21 16:33:52 -07001820 };
1821
Vlastimil Babka791cae92016-10-07 16:57:38 -07001822
1823 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1824
1825 zone = &pgdat->node_zones[zoneid];
1826 if (!populated_zone(zone))
1827 continue;
1828
1829 cc.nr_freepages = 0;
1830 cc.nr_migratepages = 0;
1831 cc.zone = zone;
1832 INIT_LIST_HEAD(&cc.freepages);
1833 INIT_LIST_HEAD(&cc.migratepages);
1834
Mel Gorman40cacbc2019-03-05 15:44:36 -08001835 compact_zone(&cc);
Vlastimil Babka791cae92016-10-07 16:57:38 -07001836
1837 VM_BUG_ON(!list_empty(&cc.freepages));
1838 VM_BUG_ON(!list_empty(&cc.migratepages));
1839 }
Rik van Riel7be62de2012-03-21 16:33:52 -07001840}
1841
Mel Gorman76ab0f52010-05-24 14:32:28 -07001842/* Compact all nodes in the system */
Jason Liu7964c062013-01-11 14:31:47 -08001843static void compact_nodes(void)
Mel Gorman76ab0f52010-05-24 14:32:28 -07001844{
1845 int nid;
1846
Hugh Dickins8575ec22012-03-21 16:33:53 -07001847 /* Flush pending updates to the LRU lists */
1848 lru_add_drain_all();
1849
Mel Gorman76ab0f52010-05-24 14:32:28 -07001850 for_each_online_node(nid)
1851 compact_node(nid);
Mel Gorman76ab0f52010-05-24 14:32:28 -07001852}
1853
1854/* The written value is actually unused, all memory is compacted */
1855int sysctl_compact_memory;
1856
Yaowei Baifec4eb22016-01-14 15:20:09 -08001857/*
1858 * This is the entry point for compacting all nodes via
1859 * /proc/sys/vm/compact_memory
1860 */
Mel Gorman76ab0f52010-05-24 14:32:28 -07001861int sysctl_compaction_handler(struct ctl_table *table, int write,
1862 void __user *buffer, size_t *length, loff_t *ppos)
1863{
1864 if (write)
Jason Liu7964c062013-01-11 14:31:47 -08001865 compact_nodes();
Mel Gorman76ab0f52010-05-24 14:32:28 -07001866
1867 return 0;
1868}
Mel Gormaned4a6d72010-05-24 14:32:29 -07001869
1870#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
Rashika Kheria74e77fb2014-04-03 14:48:01 -07001871static ssize_t sysfs_compact_node(struct device *dev,
Kay Sievers10fbcf42011-12-21 14:48:43 -08001872 struct device_attribute *attr,
Mel Gormaned4a6d72010-05-24 14:32:29 -07001873 const char *buf, size_t count)
1874{
Hugh Dickins8575ec22012-03-21 16:33:53 -07001875 int nid = dev->id;
1876
1877 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1878 /* Flush pending updates to the LRU lists */
1879 lru_add_drain_all();
1880
1881 compact_node(nid);
1882 }
Mel Gormaned4a6d72010-05-24 14:32:29 -07001883
1884 return count;
1885}
Joe Perches0825a6f2018-06-14 15:27:58 -07001886static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001887
1888int compaction_register_node(struct node *node)
1889{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001890 return device_create_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001891}
1892
1893void compaction_unregister_node(struct node *node)
1894{
Kay Sievers10fbcf42011-12-21 14:48:43 -08001895 return device_remove_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07001896}
1897#endif /* CONFIG_SYSFS && CONFIG_NUMA */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001898
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001899static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1900{
Vlastimil Babka172400c2016-05-05 16:22:32 -07001901 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001902}
1903
1904static bool kcompactd_node_suitable(pg_data_t *pgdat)
1905{
1906 int zoneid;
1907 struct zone *zone;
1908 enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1909
Chen Feng6cd9dc32016-05-20 16:59:02 -07001910 for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001911 zone = &pgdat->node_zones[zoneid];
1912
1913 if (!populated_zone(zone))
1914 continue;
1915
1916 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1917 classzone_idx) == COMPACT_CONTINUE)
1918 return true;
1919 }
1920
1921 return false;
1922}
1923
1924static void kcompactd_do_work(pg_data_t *pgdat)
1925{
1926 /*
1927 * With no special task, compact all zones so that a page of requested
1928 * order is allocatable.
1929 */
1930 int zoneid;
1931 struct zone *zone;
1932 struct compact_control cc = {
1933 .order = pgdat->kcompactd_max_order,
David Rientjes7f354a52017-02-22 15:44:50 -08001934 .total_migrate_scanned = 0,
1935 .total_free_scanned = 0,
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001936 .classzone_idx = pgdat->kcompactd_classzone_idx,
1937 .mode = MIGRATE_SYNC_LIGHT,
David Rientjesa0647dc2017-11-17 15:26:27 -08001938 .ignore_skip_hint = false,
Michal Hocko73e64c52016-12-14 15:04:07 -08001939 .gfp_mask = GFP_KERNEL,
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001940 };
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001941 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1942 cc.classzone_idx);
David Rientjes7f354a52017-02-22 15:44:50 -08001943 count_compact_event(KCOMPACTD_WAKE);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001944
Chen Feng6cd9dc32016-05-20 16:59:02 -07001945 for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001946 int status;
1947
1948 zone = &pgdat->node_zones[zoneid];
1949 if (!populated_zone(zone))
1950 continue;
1951
1952 if (compaction_deferred(zone, cc.order))
1953 continue;
1954
1955 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1956 COMPACT_CONTINUE)
1957 continue;
1958
1959 cc.nr_freepages = 0;
1960 cc.nr_migratepages = 0;
David Rientjes7f354a52017-02-22 15:44:50 -08001961 cc.total_migrate_scanned = 0;
1962 cc.total_free_scanned = 0;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001963 cc.zone = zone;
1964 INIT_LIST_HEAD(&cc.freepages);
1965 INIT_LIST_HEAD(&cc.migratepages);
1966
Vlastimil Babka172400c2016-05-05 16:22:32 -07001967 if (kthread_should_stop())
1968 return;
Mel Gorman40cacbc2019-03-05 15:44:36 -08001969 status = compact_zone(&cc);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001970
Vlastimil Babka7ceb0092016-10-07 16:57:44 -07001971 if (status == COMPACT_SUCCESS) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001972 compaction_defer_reset(zone, cc.order, false);
Michal Hockoc8f7de02016-05-20 16:56:47 -07001973 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001974 /*
David Rientjesbc3106b2018-04-05 16:24:02 -07001975 * Buddy pages may become stranded on pcps that could
1976 * otherwise coalesce on the zone's free area for
1977 * order >= cc.order. This is ratelimited by the
1978 * upcoming deferral.
1979 */
1980 drain_all_pages(zone);
1981
1982 /*
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001983 * We use sync migration mode here, so we defer like
1984 * sync direct compaction does.
1985 */
1986 defer_compaction(zone, cc.order);
1987 }
1988
David Rientjes7f354a52017-02-22 15:44:50 -08001989 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
1990 cc.total_migrate_scanned);
1991 count_compact_events(KCOMPACTD_FREE_SCANNED,
1992 cc.total_free_scanned);
1993
Vlastimil Babka698b1b32016-03-17 14:18:08 -07001994 VM_BUG_ON(!list_empty(&cc.freepages));
1995 VM_BUG_ON(!list_empty(&cc.migratepages));
1996 }
1997
1998 /*
1999 * Regardless of success, we are done until woken up next. But remember
2000 * the requested order/classzone_idx in case it was higher/tighter than
2001 * our current ones
2002 */
2003 if (pgdat->kcompactd_max_order <= cc.order)
2004 pgdat->kcompactd_max_order = 0;
2005 if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
2006 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2007}
2008
2009void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
2010{
2011 if (!order)
2012 return;
2013
2014 if (pgdat->kcompactd_max_order < order)
2015 pgdat->kcompactd_max_order = order;
2016
2017 if (pgdat->kcompactd_classzone_idx > classzone_idx)
2018 pgdat->kcompactd_classzone_idx = classzone_idx;
2019
Davidlohr Bueso68186002017-10-03 16:15:03 -07002020 /*
2021 * Pairs with implicit barrier in wait_event_freezable()
2022 * such that wakeups are not missed.
2023 */
2024 if (!wq_has_sleeper(&pgdat->kcompactd_wait))
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002025 return;
2026
2027 if (!kcompactd_node_suitable(pgdat))
2028 return;
2029
2030 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
2031 classzone_idx);
2032 wake_up_interruptible(&pgdat->kcompactd_wait);
2033}
2034
2035/*
2036 * The background compaction daemon, started as a kernel thread
2037 * from the init process.
2038 */
2039static int kcompactd(void *p)
2040{
2041 pg_data_t *pgdat = (pg_data_t*)p;
2042 struct task_struct *tsk = current;
2043
2044 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2045
2046 if (!cpumask_empty(cpumask))
2047 set_cpus_allowed_ptr(tsk, cpumask);
2048
2049 set_freezable();
2050
2051 pgdat->kcompactd_max_order = 0;
2052 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2053
2054 while (!kthread_should_stop()) {
Johannes Weinereb414682018-10-26 15:06:27 -07002055 unsigned long pflags;
2056
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002057 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2058 wait_event_freezable(pgdat->kcompactd_wait,
2059 kcompactd_work_requested(pgdat));
2060
Johannes Weinereb414682018-10-26 15:06:27 -07002061 psi_memstall_enter(&pflags);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002062 kcompactd_do_work(pgdat);
Johannes Weinereb414682018-10-26 15:06:27 -07002063 psi_memstall_leave(&pflags);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002064 }
2065
2066 return 0;
2067}
2068
2069/*
2070 * This kcompactd start function will be called by init and node-hot-add.
2071 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2072 */
2073int kcompactd_run(int nid)
2074{
2075 pg_data_t *pgdat = NODE_DATA(nid);
2076 int ret = 0;
2077
2078 if (pgdat->kcompactd)
2079 return 0;
2080
2081 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2082 if (IS_ERR(pgdat->kcompactd)) {
2083 pr_err("Failed to start kcompactd on node %d\n", nid);
2084 ret = PTR_ERR(pgdat->kcompactd);
2085 pgdat->kcompactd = NULL;
2086 }
2087 return ret;
2088}
2089
2090/*
2091 * Called by memory hotplug when all memory in a node is offlined. Caller must
2092 * hold mem_hotplug_begin/end().
2093 */
2094void kcompactd_stop(int nid)
2095{
2096 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2097
2098 if (kcompactd) {
2099 kthread_stop(kcompactd);
2100 NODE_DATA(nid)->kcompactd = NULL;
2101 }
2102}
2103
2104/*
2105 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2106 * not required for correctness. So if the last cpu in a node goes
2107 * away, we get changed to run anywhere: as the first one comes back,
2108 * restore their cpu bindings.
2109 */
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002110static int kcompactd_cpu_online(unsigned int cpu)
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002111{
2112 int nid;
2113
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002114 for_each_node_state(nid, N_MEMORY) {
2115 pg_data_t *pgdat = NODE_DATA(nid);
2116 const struct cpumask *mask;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002117
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002118 mask = cpumask_of_node(pgdat->node_id);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002119
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002120 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2121 /* One of our CPUs online: restore mask */
2122 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002123 }
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002124 return 0;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002125}
2126
2127static int __init kcompactd_init(void)
2128{
2129 int nid;
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002130 int ret;
2131
2132 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2133 "mm/compaction:online",
2134 kcompactd_cpu_online, NULL);
2135 if (ret < 0) {
2136 pr_err("kcompactd: failed to register hotplug callbacks.\n");
2137 return ret;
2138 }
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002139
2140 for_each_node_state(nid, N_MEMORY)
2141 kcompactd_run(nid);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002142 return 0;
2143}
2144subsys_initcall(kcompactd_init)
2145
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01002146#endif /* CONFIG_COMPACTION */