blob: 69ca3d6f97644d9343cc95b115f5e56e499f3f55 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Mel Gorman748446b2010-05-24 14:32:27 -07002/*
3 * linux/mm/compaction.c
4 *
5 * Memory compaction for the reduction of external fragmentation. Note that
6 * this heavily depends upon page migration to do all the real heavy
7 * lifting
8 *
9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
10 */
Vlastimil Babka698b1b32016-03-17 14:18:08 -070011#include <linux/cpu.h>
Mel Gorman748446b2010-05-24 14:32:27 -070012#include <linux/swap.h>
13#include <linux/migrate.h>
14#include <linux/compaction.h>
15#include <linux/mm_inline.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010016#include <linux/sched/signal.h>
Mel Gorman748446b2010-05-24 14:32:27 -070017#include <linux/backing-dev.h>
Mel Gorman76ab0f52010-05-24 14:32:28 -070018#include <linux/sysctl.h>
Mel Gormaned4a6d72010-05-24 14:32:29 -070019#include <linux/sysfs.h>
Minchan Kim194159f2013-02-22 16:33:58 -080020#include <linux/page-isolation.h>
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -080021#include <linux/kasan.h>
Vlastimil Babka698b1b32016-03-17 14:18:08 -070022#include <linux/kthread.h>
23#include <linux/freezer.h>
Joonsoo Kim83358ec2016-07-26 15:23:43 -070024#include <linux/page_owner.h>
Johannes Weinereb414682018-10-26 15:06:27 -070025#include <linux/psi.h>
Mel Gorman748446b2010-05-24 14:32:27 -070026#include "internal.h"
27
Minchan Kim010fc292012-12-20 15:05:06 -080028#ifdef CONFIG_COMPACTION
29static inline void count_compact_event(enum vm_event_item item)
30{
31 count_vm_event(item);
32}
33
34static inline void count_compact_events(enum vm_event_item item, long delta)
35{
36 count_vm_events(item, delta);
37}
38#else
39#define count_compact_event(item) do { } while (0)
40#define count_compact_events(item, delta) do { } while (0)
41#endif
42
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010043#if defined CONFIG_COMPACTION || defined CONFIG_CMA
44
Mel Gormanb7aba692011-01-13 15:45:54 -080045#define CREATE_TRACE_POINTS
46#include <trace/events/compaction.h>
47
Vlastimil Babka06b66402016-05-19 17:11:48 -070048#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
49#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
50#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
51#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
52
Nitin Guptafacdaa92020-08-11 18:31:00 -070053/*
54 * Fragmentation score check interval for proactive compaction purposes.
55 */
Nitin Guptad34c0a72020-08-11 18:31:07 -070056static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
Nitin Guptafacdaa92020-08-11 18:31:00 -070057
58/*
59 * Page order with-respect-to which proactive compaction
60 * calculates external fragmentation, which is used as
61 * the "fragmentation score" of a node/zone.
62 */
63#if defined CONFIG_TRANSPARENT_HUGEPAGE
64#define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER
Nitin Gupta25788732020-08-11 18:31:04 -070065#elif defined CONFIG_HUGETLBFS
Nitin Guptafacdaa92020-08-11 18:31:00 -070066#define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER
67#else
68#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
69#endif
70
Mel Gorman748446b2010-05-24 14:32:27 -070071static unsigned long release_freepages(struct list_head *freelist)
72{
73 struct page *page, *next;
Vlastimil Babka6bace092014-12-10 15:43:31 -080074 unsigned long high_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -070075
76 list_for_each_entry_safe(page, next, freelist, lru) {
Vlastimil Babka6bace092014-12-10 15:43:31 -080077 unsigned long pfn = page_to_pfn(page);
Mel Gorman748446b2010-05-24 14:32:27 -070078 list_del(&page->lru);
79 __free_page(page);
Vlastimil Babka6bace092014-12-10 15:43:31 -080080 if (pfn > high_pfn)
81 high_pfn = pfn;
Mel Gorman748446b2010-05-24 14:32:27 -070082 }
83
Vlastimil Babka6bace092014-12-10 15:43:31 -080084 return high_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -070085}
86
Mel Gorman4469ab92019-03-05 15:44:39 -080087static void split_map_pages(struct list_head *list)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010088{
Joonsoo Kim66c64222016-07-26 15:23:40 -070089 unsigned int i, order, nr_pages;
90 struct page *page, *next;
91 LIST_HEAD(tmp_list);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +010092
Joonsoo Kim66c64222016-07-26 15:23:40 -070093 list_for_each_entry_safe(page, next, list, lru) {
94 list_del(&page->lru);
95
96 order = page_private(page);
97 nr_pages = 1 << order;
Joonsoo Kim66c64222016-07-26 15:23:40 -070098
Joonsoo Kim46f24fd2016-07-26 15:23:58 -070099 post_alloc_hook(page, order, __GFP_MOVABLE);
Joonsoo Kim66c64222016-07-26 15:23:40 -0700100 if (order)
101 split_page(page, order);
102
103 for (i = 0; i < nr_pages; i++) {
104 list_add(&page->lru, &tmp_list);
105 page++;
106 }
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100107 }
Joonsoo Kim66c64222016-07-26 15:23:40 -0700108
109 list_splice(&tmp_list, list);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100110}
111
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700112#ifdef CONFIG_COMPACTION
Joonsoo Kim24e27162015-02-11 15:27:09 -0800113
Minchan Kimbda807d2016-07-26 15:23:05 -0700114int PageMovable(struct page *page)
115{
116 struct address_space *mapping;
117
118 VM_BUG_ON_PAGE(!PageLocked(page), page);
119 if (!__PageMovable(page))
120 return 0;
121
122 mapping = page_mapping(page);
123 if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
124 return 1;
125
126 return 0;
127}
128EXPORT_SYMBOL(PageMovable);
129
130void __SetPageMovable(struct page *page, struct address_space *mapping)
131{
132 VM_BUG_ON_PAGE(!PageLocked(page), page);
133 VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
134 page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
135}
136EXPORT_SYMBOL(__SetPageMovable);
137
138void __ClearPageMovable(struct page *page)
139{
140 VM_BUG_ON_PAGE(!PageLocked(page), page);
141 VM_BUG_ON_PAGE(!PageMovable(page), page);
142 /*
143 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
144 * flag so that VM can catch up released page by driver after isolation.
145 * With it, VM migration doesn't try to put it back.
146 */
147 page->mapping = (void *)((unsigned long)page->mapping &
148 PAGE_MAPPING_MOVABLE);
149}
150EXPORT_SYMBOL(__ClearPageMovable);
151
Joonsoo Kim24e27162015-02-11 15:27:09 -0800152/* Do not skip compaction more than 64 times */
153#define COMPACT_MAX_DEFER_SHIFT 6
154
155/*
156 * Compaction is deferred when compaction fails to result in a page
Alex Shi860b3272020-08-11 18:31:10 -0700157 * allocation success. 1 << compact_defer_shift, compactions are skipped up
Joonsoo Kim24e27162015-02-11 15:27:09 -0800158 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
159 */
Hui Su2271b012020-12-14 19:12:46 -0800160static void defer_compaction(struct zone *zone, int order)
Joonsoo Kim24e27162015-02-11 15:27:09 -0800161{
162 zone->compact_considered = 0;
163 zone->compact_defer_shift++;
164
165 if (order < zone->compact_order_failed)
166 zone->compact_order_failed = order;
167
168 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
169 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
170
171 trace_mm_compaction_defer_compaction(zone, order);
172}
173
174/* Returns true if compaction should be skipped this time */
Hui Su2271b012020-12-14 19:12:46 -0800175static bool compaction_deferred(struct zone *zone, int order)
Joonsoo Kim24e27162015-02-11 15:27:09 -0800176{
177 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
178
179 if (order < zone->compact_order_failed)
180 return false;
181
182 /* Avoid possible overflow */
Mateusz Nosek62b35fe2020-10-13 16:56:58 -0700183 if (++zone->compact_considered >= defer_limit) {
Joonsoo Kim24e27162015-02-11 15:27:09 -0800184 zone->compact_considered = defer_limit;
Joonsoo Kim24e27162015-02-11 15:27:09 -0800185 return false;
Mateusz Nosek62b35fe2020-10-13 16:56:58 -0700186 }
Joonsoo Kim24e27162015-02-11 15:27:09 -0800187
188 trace_mm_compaction_deferred(zone, order);
189
190 return true;
191}
192
193/*
194 * Update defer tracking counters after successful compaction of given order,
195 * which means an allocation either succeeded (alloc_success == true) or is
196 * expected to succeed.
197 */
198void compaction_defer_reset(struct zone *zone, int order,
199 bool alloc_success)
200{
201 if (alloc_success) {
202 zone->compact_considered = 0;
203 zone->compact_defer_shift = 0;
204 }
205 if (order >= zone->compact_order_failed)
206 zone->compact_order_failed = order + 1;
207
208 trace_mm_compaction_defer_reset(zone, order);
209}
210
211/* Returns true if restarting compaction after many failures */
Hui Su2271b012020-12-14 19:12:46 -0800212static bool compaction_restarting(struct zone *zone, int order)
Joonsoo Kim24e27162015-02-11 15:27:09 -0800213{
214 if (order < zone->compact_order_failed)
215 return false;
216
217 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
218 zone->compact_considered >= 1UL << zone->compact_defer_shift;
219}
220
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700221/* Returns true if the pageblock should be scanned for pages to isolate. */
222static inline bool isolation_suitable(struct compact_control *cc,
223 struct page *page)
224{
225 if (cc->ignore_skip_hint)
226 return true;
227
228 return !get_pageblock_skip(page);
229}
230
Vlastimil Babka023336412015-09-08 15:02:42 -0700231static void reset_cached_positions(struct zone *zone)
232{
233 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
234 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
Joonsoo Kim623446e2016-03-15 14:57:45 -0700235 zone->compact_cached_free_pfn =
Vlastimil Babka06b66402016-05-19 17:11:48 -0700236 pageblock_start_pfn(zone_end_pfn(zone) - 1);
Vlastimil Babka023336412015-09-08 15:02:42 -0700237}
238
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700239/*
Hui Su2271b012020-12-14 19:12:46 -0800240 * Compound pages of >= pageblock_order should consistently be skipped until
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800241 * released. It is always pointless to compact pages of such order (if they are
242 * migratable), and the pageblocks they occupy cannot contain any free pages.
David Rientjes21dc7e02017-11-17 15:26:30 -0800243 */
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800244static bool pageblock_skip_persistent(struct page *page)
David Rientjes21dc7e02017-11-17 15:26:30 -0800245{
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800246 if (!PageCompound(page))
David Rientjes21dc7e02017-11-17 15:26:30 -0800247 return false;
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800248
249 page = compound_head(page);
250
251 if (compound_order(page) >= pageblock_order)
252 return true;
253
254 return false;
David Rientjes21dc7e02017-11-17 15:26:30 -0800255}
256
Mel Gormane332f742019-03-05 15:45:38 -0800257static bool
258__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
259 bool check_target)
260{
261 struct page *page = pfn_to_online_page(pfn);
Mel Gorman6b0868c2019-04-04 11:54:09 +0100262 struct page *block_page;
Mel Gormane332f742019-03-05 15:45:38 -0800263 struct page *end_page;
264 unsigned long block_pfn;
265
266 if (!page)
267 return false;
268 if (zone != page_zone(page))
269 return false;
270 if (pageblock_skip_persistent(page))
271 return false;
272
273 /*
274 * If skip is already cleared do no further checking once the
275 * restart points have been set.
276 */
277 if (check_source && check_target && !get_pageblock_skip(page))
278 return true;
279
280 /*
281 * If clearing skip for the target scanner, do not select a
282 * non-movable pageblock as the starting point.
283 */
284 if (!check_source && check_target &&
285 get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
286 return false;
287
Mel Gorman6b0868c2019-04-04 11:54:09 +0100288 /* Ensure the start of the pageblock or zone is online and valid */
289 block_pfn = pageblock_start_pfn(pfn);
Vlastimil Babkaa2e9a5a2019-10-14 14:12:07 -0700290 block_pfn = max(block_pfn, zone->zone_start_pfn);
291 block_page = pfn_to_online_page(block_pfn);
Mel Gorman6b0868c2019-04-04 11:54:09 +0100292 if (block_page) {
293 page = block_page;
294 pfn = block_pfn;
295 }
296
297 /* Ensure the end of the pageblock or zone is online and valid */
Vlastimil Babkaa2e9a5a2019-10-14 14:12:07 -0700298 block_pfn = pageblock_end_pfn(pfn) - 1;
Mel Gorman6b0868c2019-04-04 11:54:09 +0100299 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
300 end_page = pfn_to_online_page(block_pfn);
301 if (!end_page)
302 return false;
303
Mel Gormane332f742019-03-05 15:45:38 -0800304 /*
305 * Only clear the hint if a sample indicates there is either a
306 * free page or an LRU page in the block. One or other condition
307 * is necessary for the block to be a migration source/target.
308 */
Mel Gormane332f742019-03-05 15:45:38 -0800309 do {
310 if (pfn_valid_within(pfn)) {
311 if (check_source && PageLRU(page)) {
312 clear_pageblock_skip(page);
313 return true;
314 }
315
316 if (check_target && PageBuddy(page)) {
317 clear_pageblock_skip(page);
318 return true;
319 }
320 }
321
322 page += (1 << PAGE_ALLOC_COSTLY_ORDER);
323 pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
Vlastimil Babkaa2e9a5a2019-10-14 14:12:07 -0700324 } while (page <= end_page);
Mel Gormane332f742019-03-05 15:45:38 -0800325
326 return false;
327}
328
David Rientjes21dc7e02017-11-17 15:26:30 -0800329/*
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700330 * This function is called to clear all cached information on pageblocks that
331 * should be skipped for page isolation when the migrate and free page scanner
332 * meet.
333 */
Mel Gorman62997022012-10-08 16:32:47 -0700334static void __reset_isolation_suitable(struct zone *zone)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700335{
Mel Gormane332f742019-03-05 15:45:38 -0800336 unsigned long migrate_pfn = zone->zone_start_pfn;
Mel Gorman6b0868c2019-04-04 11:54:09 +0100337 unsigned long free_pfn = zone_end_pfn(zone) - 1;
Mel Gormane332f742019-03-05 15:45:38 -0800338 unsigned long reset_migrate = free_pfn;
339 unsigned long reset_free = migrate_pfn;
340 bool source_set = false;
341 bool free_set = false;
342
343 if (!zone->compact_blockskip_flush)
344 return;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700345
Mel Gorman62997022012-10-08 16:32:47 -0700346 zone->compact_blockskip_flush = false;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700347
Mel Gormane332f742019-03-05 15:45:38 -0800348 /*
349 * Walk the zone and update pageblock skip information. Source looks
350 * for PageLRU while target looks for PageBuddy. When the scanner
351 * is found, both PageBuddy and PageLRU are checked as the pageblock
352 * is suitable as both source and target.
353 */
354 for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
355 free_pfn -= pageblock_nr_pages) {
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700356 cond_resched();
357
Mel Gormane332f742019-03-05 15:45:38 -0800358 /* Update the migrate PFN */
359 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
360 migrate_pfn < reset_migrate) {
361 source_set = true;
362 reset_migrate = migrate_pfn;
363 zone->compact_init_migrate_pfn = reset_migrate;
364 zone->compact_cached_migrate_pfn[0] = reset_migrate;
365 zone->compact_cached_migrate_pfn[1] = reset_migrate;
366 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700367
Mel Gormane332f742019-03-05 15:45:38 -0800368 /* Update the free PFN */
369 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
370 free_pfn > reset_free) {
371 free_set = true;
372 reset_free = free_pfn;
373 zone->compact_init_free_pfn = reset_free;
374 zone->compact_cached_free_pfn = reset_free;
375 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700376 }
Vlastimil Babka023336412015-09-08 15:02:42 -0700377
Mel Gormane332f742019-03-05 15:45:38 -0800378 /* Leave no distance if no suitable block was reset */
379 if (reset_migrate >= reset_free) {
380 zone->compact_cached_migrate_pfn[0] = migrate_pfn;
381 zone->compact_cached_migrate_pfn[1] = migrate_pfn;
382 zone->compact_cached_free_pfn = free_pfn;
383 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700384}
385
Mel Gorman62997022012-10-08 16:32:47 -0700386void reset_isolation_suitable(pg_data_t *pgdat)
387{
388 int zoneid;
389
390 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
391 struct zone *zone = &pgdat->node_zones[zoneid];
392 if (!populated_zone(zone))
393 continue;
394
395 /* Only flush if a full compaction finished recently */
396 if (zone->compact_blockskip_flush)
397 __reset_isolation_suitable(zone);
398 }
399}
400
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700401/*
Mel Gormane380beb2019-03-05 15:44:58 -0800402 * Sets the pageblock skip bit if it was clear. Note that this is a hint as
403 * locks are not required for read/writers. Returns true if it was already set.
404 */
405static bool test_and_set_skip(struct compact_control *cc, struct page *page,
406 unsigned long pfn)
407{
408 bool skip;
409
410 /* Do no update if skip hint is being ignored */
411 if (cc->ignore_skip_hint)
412 return false;
413
414 if (!IS_ALIGNED(pfn, pageblock_nr_pages))
415 return false;
416
417 skip = get_pageblock_skip(page);
418 if (!skip && !cc->no_set_skip_hint)
419 set_pageblock_skip(page);
420
421 return skip;
422}
423
424static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
425{
426 struct zone *zone = cc->zone;
427
428 pfn = pageblock_end_pfn(pfn);
429
430 /* Set for isolation rather than compaction */
431 if (cc->no_set_skip_hint)
432 return;
433
434 if (pfn > zone->compact_cached_migrate_pfn[0])
435 zone->compact_cached_migrate_pfn[0] = pfn;
436 if (cc->mode != MIGRATE_ASYNC &&
437 pfn > zone->compact_cached_migrate_pfn[1])
438 zone->compact_cached_migrate_pfn[1] = pfn;
439}
440
441/*
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700442 * If no pages were isolated then mark this pageblock to be skipped in the
Mel Gorman62997022012-10-08 16:32:47 -0700443 * future. The information is later cleared by __reset_isolation_suitable().
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700444 */
Mel Gormanc89511a2012-10-08 16:32:45 -0700445static void update_pageblock_skip(struct compact_control *cc,
Mel Gormand097a6f2019-03-05 15:45:28 -0800446 struct page *page, unsigned long pfn)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700447{
Mel Gormanc89511a2012-10-08 16:32:45 -0700448 struct zone *zone = cc->zone;
Joonsoo Kim6815bf32013-12-18 17:08:52 -0800449
Vlastimil Babka2583d672017-11-17 15:26:38 -0800450 if (cc->no_set_skip_hint)
Joonsoo Kim6815bf32013-12-18 17:08:52 -0800451 return;
452
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700453 if (!page)
454 return;
455
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700456 set_pageblock_skip(page);
Mel Gormanc89511a2012-10-08 16:32:45 -0700457
David Rientjes35979ef2014-06-04 16:08:27 -0700458 /* Update where async and sync compaction should restart */
Mel Gormane380beb2019-03-05 15:44:58 -0800459 if (pfn < zone->compact_cached_free_pfn)
460 zone->compact_cached_free_pfn = pfn;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700461}
462#else
463static inline bool isolation_suitable(struct compact_control *cc,
464 struct page *page)
465{
466 return true;
467}
468
Vlastimil Babkab527cfe2017-11-17 15:26:34 -0800469static inline bool pageblock_skip_persistent(struct page *page)
David Rientjes21dc7e02017-11-17 15:26:30 -0800470{
471 return false;
472}
473
474static inline void update_pageblock_skip(struct compact_control *cc,
Mel Gormand097a6f2019-03-05 15:45:28 -0800475 struct page *page, unsigned long pfn)
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700476{
477}
Mel Gormane380beb2019-03-05 15:44:58 -0800478
479static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
480{
481}
482
483static bool test_and_set_skip(struct compact_control *cc, struct page *page,
484 unsigned long pfn)
485{
486 return false;
487}
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700488#endif /* CONFIG_COMPACTION */
489
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700490/*
491 * Compaction requires the taking of some coarse locks that are potentially
Mel Gormancb2dcaf2019-03-05 15:45:11 -0800492 * very heavily contended. For async compaction, trylock and record if the
493 * lock is contended. The lock will still be acquired but compaction will
494 * abort when the current block is finished regardless of success rate.
495 * Sync compaction acquires the lock.
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700496 *
Mel Gormancb2dcaf2019-03-05 15:45:11 -0800497 * Always returns true which makes it easier to track lock state in callers.
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700498 */
Mel Gormancb2dcaf2019-03-05 15:45:11 -0800499static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700500 struct compact_control *cc)
Jules Irenge77337ed2020-04-06 20:08:06 -0700501 __acquires(lock)
Mel Gorman2a1402a2012-10-08 16:32:33 -0700502{
Mel Gormancb2dcaf2019-03-05 15:45:11 -0800503 /* Track if the lock is contended in async mode */
504 if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
505 if (spin_trylock_irqsave(lock, *flags))
506 return true;
507
508 cc->contended = true;
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700509 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700510
Mel Gormancb2dcaf2019-03-05 15:45:11 -0800511 spin_lock_irqsave(lock, *flags);
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700512 return true;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700513}
514
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100515/*
Mel Gormanc67fe372012-08-21 16:16:17 -0700516 * Compaction requires the taking of some coarse locks that are potentially
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700517 * very heavily contended. The lock should be periodically unlocked to avoid
518 * having disabled IRQs for a long time, even when there is nobody waiting on
519 * the lock. It might also be that allowing the IRQs will result in
520 * need_resched() becoming true. If scheduling is needed, async compaction
521 * aborts. Sync compaction schedules.
522 * Either compaction type will also abort if a fatal signal is pending.
523 * In either case if the lock was locked, it is dropped and not regained.
Mel Gormanc67fe372012-08-21 16:16:17 -0700524 *
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700525 * Returns true if compaction should abort due to fatal signal pending, or
526 * async compaction due to need_resched()
527 * Returns false when compaction can continue (sync compaction might have
528 * scheduled)
Mel Gormanc67fe372012-08-21 16:16:17 -0700529 */
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700530static bool compact_unlock_should_abort(spinlock_t *lock,
531 unsigned long flags, bool *locked, struct compact_control *cc)
Mel Gormanc67fe372012-08-21 16:16:17 -0700532{
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700533 if (*locked) {
534 spin_unlock_irqrestore(lock, flags);
535 *locked = false;
536 }
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700537
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700538 if (fatal_signal_pending(current)) {
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700539 cc->contended = true;
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700540 return true;
541 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700542
Mel Gormancf66f072019-03-05 15:45:24 -0800543 cond_resched();
Vlastimil Babkabe976572014-06-04 16:10:41 -0700544
545 return false;
546}
547
Mel Gormanc67fe372012-08-21 16:16:17 -0700548/*
Jerome Marchand9e4be472013-11-12 15:07:12 -0800549 * Isolate free pages onto a private freelist. If @strict is true, will abort
550 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
551 * (even though it may still end up isolating some pages).
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100552 */
Mel Gormanf40d1e42012-10-08 16:32:36 -0700553static unsigned long isolate_freepages_block(struct compact_control *cc,
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700554 unsigned long *start_pfn,
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100555 unsigned long end_pfn,
556 struct list_head *freelist,
Mel Gorman4fca9732019-03-05 15:45:34 -0800557 unsigned int stride,
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100558 bool strict)
Mel Gorman748446b2010-05-24 14:32:27 -0700559{
Mel Gormanb7aba692011-01-13 15:45:54 -0800560 int nr_scanned = 0, total_isolated = 0;
Mel Gormand097a6f2019-03-05 15:45:28 -0800561 struct page *cursor;
Xiubo Lib8b2d822014-10-09 15:28:21 -0700562 unsigned long flags = 0;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700563 bool locked = false;
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700564 unsigned long blockpfn = *start_pfn;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700565 unsigned int order;
Mel Gorman748446b2010-05-24 14:32:27 -0700566
Mel Gorman4fca9732019-03-05 15:45:34 -0800567 /* Strict mode is for isolation, speed is secondary */
568 if (strict)
569 stride = 1;
570
Mel Gorman748446b2010-05-24 14:32:27 -0700571 cursor = pfn_to_page(blockpfn);
572
Mel Gormanf40d1e42012-10-08 16:32:36 -0700573 /* Isolate free pages. */
Mel Gorman4fca9732019-03-05 15:45:34 -0800574 for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
Joonsoo Kim66c64222016-07-26 15:23:40 -0700575 int isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700576 struct page *page = cursor;
577
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700578 /*
579 * Periodically drop the lock (if held) regardless of its
580 * contention, to give chance to IRQs. Abort if fatal signal
581 * pending or async compaction detects need_resched()
582 */
583 if (!(blockpfn % SWAP_CLUSTER_MAX)
584 && compact_unlock_should_abort(&cc->zone->lock, flags,
585 &locked, cc))
586 break;
587
Mel Gormanb7aba692011-01-13 15:45:54 -0800588 nr_scanned++;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700589 if (!pfn_valid_within(blockpfn))
Laura Abbott2af120b2014-03-10 15:49:44 -0700590 goto isolate_fail;
591
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700592 /*
593 * For compound pages such as THP and hugetlbfs, we can save
594 * potentially a lot of iterations if we skip them at once.
595 * The check is racy, but we can consider only valid values
596 * and the only danger is skipping too much.
597 */
598 if (PageCompound(page)) {
David Rientjes21dc7e02017-11-17 15:26:30 -0800599 const unsigned int order = compound_order(page);
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700600
Vlastimil Babkad3c85ba2017-11-17 15:26:41 -0800601 if (likely(order < MAX_ORDER)) {
David Rientjes21dc7e02017-11-17 15:26:30 -0800602 blockpfn += (1UL << order) - 1;
603 cursor += (1UL << order) - 1;
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700604 }
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700605 goto isolate_fail;
606 }
607
Mel Gormanf40d1e42012-10-08 16:32:36 -0700608 if (!PageBuddy(page))
Laura Abbott2af120b2014-03-10 15:49:44 -0700609 goto isolate_fail;
Mel Gormanf40d1e42012-10-08 16:32:36 -0700610
611 /*
Vlastimil Babka69b71892014-10-09 15:27:18 -0700612 * If we already hold the lock, we can skip some rechecking.
613 * Note that if we hold the lock now, checked_pageblock was
614 * already set in some previous iteration (or strict is true),
615 * so it is correct to skip the suitable migration target
616 * recheck as well.
Mel Gormanf40d1e42012-10-08 16:32:36 -0700617 */
Vlastimil Babka69b71892014-10-09 15:27:18 -0700618 if (!locked) {
Mel Gormancb2dcaf2019-03-05 15:45:11 -0800619 locked = compact_lock_irqsave(&cc->zone->lock,
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700620 &flags, cc);
Mel Gormanf40d1e42012-10-08 16:32:36 -0700621
Vlastimil Babka69b71892014-10-09 15:27:18 -0700622 /* Recheck this is a buddy page under lock */
623 if (!PageBuddy(page))
624 goto isolate_fail;
625 }
Mel Gorman748446b2010-05-24 14:32:27 -0700626
Joonsoo Kim66c64222016-07-26 15:23:40 -0700627 /* Found a free page, will break it into order-0 pages */
Matthew Wilcox (Oracle)ab130f912020-10-15 20:10:15 -0700628 order = buddy_order(page);
Joonsoo Kim66c64222016-07-26 15:23:40 -0700629 isolated = __isolate_free_page(page, order);
David Rientjesa4f04f22016-06-24 14:50:10 -0700630 if (!isolated)
631 break;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700632 set_page_private(page, order);
David Rientjesa4f04f22016-06-24 14:50:10 -0700633
Mel Gorman748446b2010-05-24 14:32:27 -0700634 total_isolated += isolated;
David Rientjesa4f04f22016-06-24 14:50:10 -0700635 cc->nr_freepages += isolated;
Joonsoo Kim66c64222016-07-26 15:23:40 -0700636 list_add_tail(&page->lru, freelist);
637
David Rientjesa4f04f22016-06-24 14:50:10 -0700638 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
639 blockpfn += isolated;
640 break;
Mel Gorman748446b2010-05-24 14:32:27 -0700641 }
David Rientjesa4f04f22016-06-24 14:50:10 -0700642 /* Advance to the end of split page */
643 blockpfn += isolated - 1;
644 cursor += isolated - 1;
645 continue;
Laura Abbott2af120b2014-03-10 15:49:44 -0700646
647isolate_fail:
648 if (strict)
649 break;
650 else
651 continue;
652
Mel Gorman748446b2010-05-24 14:32:27 -0700653 }
654
David Rientjesa4f04f22016-06-24 14:50:10 -0700655 if (locked)
656 spin_unlock_irqrestore(&cc->zone->lock, flags);
657
Vlastimil Babka9fcd6d22015-09-08 15:02:49 -0700658 /*
659 * There is a tiny chance that we have read bogus compound_order(),
660 * so be careful to not go outside of the pageblock.
661 */
662 if (unlikely(blockpfn > end_pfn))
663 blockpfn = end_pfn;
664
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800665 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
666 nr_scanned, total_isolated);
667
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700668 /* Record how far we have got within the block */
669 *start_pfn = blockpfn;
670
Mel Gormanf40d1e42012-10-08 16:32:36 -0700671 /*
672 * If strict isolation is requested by CMA then check that all the
673 * pages requested were isolated. If there were any failures, 0 is
674 * returned and CMA will fail.
675 */
Laura Abbott2af120b2014-03-10 15:49:44 -0700676 if (strict && blockpfn < end_pfn)
Mel Gormanf40d1e42012-10-08 16:32:36 -0700677 total_isolated = 0;
678
David Rientjes7f354a52017-02-22 15:44:50 -0800679 cc->total_free_scanned += nr_scanned;
Mel Gorman397487d2012-10-19 12:00:10 +0100680 if (total_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -0800681 count_compact_events(COMPACTISOLATED, total_isolated);
Mel Gorman748446b2010-05-24 14:32:27 -0700682 return total_isolated;
683}
684
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100685/**
686 * isolate_freepages_range() - isolate free pages.
Mike Rapoporte8b098f2018-04-05 16:24:57 -0700687 * @cc: Compaction control structure.
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100688 * @start_pfn: The first PFN to start isolating.
689 * @end_pfn: The one-past-last PFN.
690 *
691 * Non-free pages, invalid PFNs, or zone boundaries within the
692 * [start_pfn, end_pfn) range are considered errors, cause function to
693 * undo its actions and return zero.
694 *
695 * Otherwise, function returns one-past-the-last PFN of isolated page
696 * (which may be greater then end_pfn if end fell in a middle of
697 * a free page).
698 */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100699unsigned long
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700700isolate_freepages_range(struct compact_control *cc,
701 unsigned long start_pfn, unsigned long end_pfn)
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100702{
Joonsoo Kime1409c32016-03-15 14:57:48 -0700703 unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100704 LIST_HEAD(freelist);
705
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700706 pfn = start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700707 block_start_pfn = pageblock_start_pfn(pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -0700708 if (block_start_pfn < cc->zone->zone_start_pfn)
709 block_start_pfn = cc->zone->zone_start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -0700710 block_end_pfn = pageblock_end_pfn(pfn);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100711
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700712 for (; pfn < end_pfn; pfn += isolated,
Joonsoo Kime1409c32016-03-15 14:57:48 -0700713 block_start_pfn = block_end_pfn,
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700714 block_end_pfn += pageblock_nr_pages) {
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700715 /* Protect pfn from changing by isolate_freepages_block */
716 unsigned long isolate_start_pfn = pfn;
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700717
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100718 block_end_pfn = min(block_end_pfn, end_pfn);
719
Joonsoo Kim58420012014-11-13 15:19:07 -0800720 /*
721 * pfn could pass the block_end_pfn if isolated freepage
722 * is more than pageblock order. In this case, we adjust
723 * scanning range to right one.
724 */
725 if (pfn >= block_end_pfn) {
Vlastimil Babka06b66402016-05-19 17:11:48 -0700726 block_start_pfn = pageblock_start_pfn(pfn);
727 block_end_pfn = pageblock_end_pfn(pfn);
Joonsoo Kim58420012014-11-13 15:19:07 -0800728 block_end_pfn = min(block_end_pfn, end_pfn);
729 }
730
Joonsoo Kime1409c32016-03-15 14:57:48 -0700731 if (!pageblock_pfn_to_page(block_start_pfn,
732 block_end_pfn, cc->zone))
Vlastimil Babka7d49d882014-10-09 15:27:11 -0700733 break;
734
Vlastimil Babkae14c7202014-10-09 15:27:20 -0700735 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
Mel Gorman4fca9732019-03-05 15:45:34 -0800736 block_end_pfn, &freelist, 0, true);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100737
738 /*
739 * In strict mode, isolate_freepages_block() returns 0 if
740 * there are any holes in the block (ie. invalid PFNs or
741 * non-free pages).
742 */
743 if (!isolated)
744 break;
745
746 /*
747 * If we managed to isolate pages, it is always (1 << n) *
748 * pageblock_nr_pages for some non-negative n. (Max order
749 * page may span two pageblocks).
750 */
751 }
752
Joonsoo Kim66c64222016-07-26 15:23:40 -0700753 /* __isolate_free_page() does not map the pages */
Mel Gorman4469ab92019-03-05 15:44:39 -0800754 split_map_pages(&freelist);
Michal Nazarewicz85aa1252012-01-30 13:24:03 +0100755
756 if (pfn < end_pfn) {
757 /* Loop terminated early, cleanup. */
758 release_freepages(&freelist);
759 return 0;
760 }
761
762 /* We don't use freelists for anything. */
763 return pfn;
764}
765
Mel Gorman748446b2010-05-24 14:32:27 -0700766/* Similar to reclaim, but different enough that they don't share logic */
Andrey Ryabinin5f438ee2019-03-05 15:49:42 -0800767static bool too_many_isolated(pg_data_t *pgdat)
Mel Gorman748446b2010-05-24 14:32:27 -0700768{
Minchan Kimbc693042010-09-09 16:38:00 -0700769 unsigned long active, inactive, isolated;
Mel Gorman748446b2010-05-24 14:32:27 -0700770
Andrey Ryabinin5f438ee2019-03-05 15:49:42 -0800771 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
772 node_page_state(pgdat, NR_INACTIVE_ANON);
773 active = node_page_state(pgdat, NR_ACTIVE_FILE) +
774 node_page_state(pgdat, NR_ACTIVE_ANON);
775 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
776 node_page_state(pgdat, NR_ISOLATED_ANON);
Mel Gorman748446b2010-05-24 14:32:27 -0700777
Minchan Kimbc693042010-09-09 16:38:00 -0700778 return isolated > (inactive + active) / 2;
Mel Gorman748446b2010-05-24 14:32:27 -0700779}
780
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100781/**
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700782 * isolate_migratepages_block() - isolate all migrate-able pages within
783 * a single pageblock
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100784 * @cc: Compaction control structure.
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700785 * @low_pfn: The first PFN to isolate
786 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
787 * @isolate_mode: Isolation mode to be used.
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100788 *
789 * Isolate all pages that can be migrated from the range specified by
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700790 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
791 * Returns zero if there is a fatal signal pending, otherwise PFN of the
792 * first page that was not scanned (which may be both less, equal to or more
793 * than end_pfn).
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100794 *
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700795 * The pages are isolated on cc->migratepages list (not required to be empty),
796 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
797 * is neither read nor updated.
Mel Gorman748446b2010-05-24 14:32:27 -0700798 */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700799static unsigned long
800isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
801 unsigned long end_pfn, isolate_mode_t isolate_mode)
Mel Gorman748446b2010-05-24 14:32:27 -0700802{
Andrey Ryabinin5f438ee2019-03-05 15:49:42 -0800803 pg_data_t *pgdat = cc->zone->zone_pgdat;
Mel Gormanb7aba692011-01-13 15:45:54 -0800804 unsigned long nr_scanned = 0, nr_isolated = 0;
Hugh Dickinsfa9add62012-05-29 15:07:09 -0700805 struct lruvec *lruvec;
Xiubo Lib8b2d822014-10-09 15:28:21 -0700806 unsigned long flags = 0;
Alex Shi6168d0d2020-12-15 12:34:29 -0800807 struct lruvec *locked = NULL;
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700808 struct page *page = NULL, *valid_page = NULL;
Joonsoo Kime34d85f2015-02-11 15:27:04 -0800809 unsigned long start_pfn = low_pfn;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700810 bool skip_on_failure = false;
811 unsigned long next_skip_pfn = 0;
Mel Gormane380beb2019-03-05 15:44:58 -0800812 bool skip_updated = false;
Mel Gorman748446b2010-05-24 14:32:27 -0700813
Mel Gorman748446b2010-05-24 14:32:27 -0700814 /*
815 * Ensure that there are not too many pages isolated from the LRU
816 * list by either parallel reclaimers or compaction. If there are,
817 * delay for some time until fewer pages are isolated
818 */
Andrey Ryabinin5f438ee2019-03-05 15:49:42 -0800819 while (unlikely(too_many_isolated(pgdat))) {
Zi Yand20bdd572020-11-13 22:51:43 -0800820 /* stop isolation if there are still pages not migrated */
821 if (cc->nr_migratepages)
822 return 0;
823
Mel Gormanf9e35b32011-06-15 15:08:52 -0700824 /* async migration should just abort */
David Rientjese0b9dae2014-06-04 16:08:28 -0700825 if (cc->mode == MIGRATE_ASYNC)
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100826 return 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -0700827
Mel Gorman748446b2010-05-24 14:32:27 -0700828 congestion_wait(BLK_RW_ASYNC, HZ/10);
829
830 if (fatal_signal_pending(current))
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +0100831 return 0;
Mel Gorman748446b2010-05-24 14:32:27 -0700832 }
833
Mel Gormancf66f072019-03-05 15:45:24 -0800834 cond_resched();
David Rientjesaeef4b82014-06-04 16:08:31 -0700835
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700836 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
837 skip_on_failure = true;
838 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
839 }
840
Mel Gorman748446b2010-05-24 14:32:27 -0700841 /* Time to isolate some pages for migration */
Mel Gorman748446b2010-05-24 14:32:27 -0700842 for (; low_pfn < end_pfn; low_pfn++) {
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700843
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700844 if (skip_on_failure && low_pfn >= next_skip_pfn) {
845 /*
846 * We have isolated all migration candidates in the
847 * previous order-aligned block, and did not skip it due
848 * to failure. We should migrate the pages now and
849 * hopefully succeed compaction.
850 */
851 if (nr_isolated)
852 break;
853
854 /*
855 * We failed to isolate in the previous order-aligned
856 * block. Set the new boundary to the end of the
857 * current block. Note we can't simply increase
858 * next_skip_pfn by 1 << order, as low_pfn might have
859 * been incremented by a higher number due to skipping
860 * a compound or a high-order buddy page in the
861 * previous loop iteration.
862 */
863 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
864 }
865
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700866 /*
867 * Periodically drop the lock (if held) regardless of its
Mel Gorman670105a2019-08-02 21:48:51 -0700868 * contention, to give chance to IRQs. Abort completely if
869 * a fatal signal is pending.
Vlastimil Babka8b44d272014-10-09 15:27:16 -0700870 */
Alex Shi6168d0d2020-12-15 12:34:29 -0800871 if (!(low_pfn % SWAP_CLUSTER_MAX)) {
872 if (locked) {
873 unlock_page_lruvec_irqrestore(locked, flags);
874 locked = NULL;
875 }
876
877 if (fatal_signal_pending(current)) {
878 cc->contended = true;
879
880 low_pfn = 0;
881 goto fatal_pending;
882 }
883
884 cond_resched();
Mel Gorman670105a2019-08-02 21:48:51 -0700885 }
Mel Gormanc67fe372012-08-21 16:16:17 -0700886
Mel Gorman748446b2010-05-24 14:32:27 -0700887 if (!pfn_valid_within(low_pfn))
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700888 goto isolate_fail;
Mel Gormanb7aba692011-01-13 15:45:54 -0800889 nr_scanned++;
Mel Gorman748446b2010-05-24 14:32:27 -0700890
Mel Gorman748446b2010-05-24 14:32:27 -0700891 page = pfn_to_page(low_pfn);
Mel Gormandc908602012-02-08 17:13:38 -0800892
Mel Gormane380beb2019-03-05 15:44:58 -0800893 /*
894 * Check if the pageblock has already been marked skipped.
895 * Only the aligned PFN is checked as the caller isolates
896 * COMPACT_CLUSTER_MAX at a time so the second call must
897 * not falsely conclude that the block should be skipped.
898 */
899 if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
900 if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
901 low_pfn = end_pfn;
Alex Shi9df41312020-12-15 12:34:20 -0800902 page = NULL;
Mel Gormane380beb2019-03-05 15:44:58 -0800903 goto isolate_abort;
904 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700905 valid_page = page;
Mel Gormane380beb2019-03-05 15:44:58 -0800906 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700907
Mel Gorman6c144662014-01-23 15:53:38 -0800908 /*
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700909 * Skip if free. We read page order here without zone lock
910 * which is generally unsafe, but the race window is small and
911 * the worst thing that can happen is that we skip some
912 * potential isolation targets.
Mel Gorman6c144662014-01-23 15:53:38 -0800913 */
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700914 if (PageBuddy(page)) {
Matthew Wilcox (Oracle)ab130f912020-10-15 20:10:15 -0700915 unsigned long freepage_order = buddy_order_unsafe(page);
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700916
917 /*
918 * Without lock, we cannot be sure that what we got is
919 * a valid page order. Consider only values in the
920 * valid order range to prevent low_pfn overflow.
921 */
922 if (freepage_order > 0 && freepage_order < MAX_ORDER)
923 low_pfn += (1UL << freepage_order) - 1;
Mel Gorman748446b2010-05-24 14:32:27 -0700924 continue;
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700925 }
Mel Gorman748446b2010-05-24 14:32:27 -0700926
Mel Gorman9927af742011-01-13 15:45:59 -0800927 /*
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700928 * Regardless of being on LRU, compound pages such as THP and
Rik van Riel1da2f322020-04-01 21:10:31 -0700929 * hugetlbfs are not to be compacted unless we are attempting
930 * an allocation much larger than the huge page size (eg CMA).
931 * We can potentially save a lot of iterations if we skip them
932 * at once. The check is racy, but we can consider only valid
933 * values and the only danger is skipping too much.
Andrea Arcangelibc835012011-01-13 15:47:08 -0800934 */
Rik van Riel1da2f322020-04-01 21:10:31 -0700935 if (PageCompound(page) && !cc->alloc_contig) {
David Rientjes21dc7e02017-11-17 15:26:30 -0800936 const unsigned int order = compound_order(page);
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700937
Vlastimil Babkad3c85ba2017-11-17 15:26:41 -0800938 if (likely(order < MAX_ORDER))
David Rientjes21dc7e02017-11-17 15:26:30 -0800939 low_pfn += (1UL << order) - 1;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700940 goto isolate_fail;
Mel Gorman2a1402a2012-10-08 16:32:33 -0700941 }
942
Minchan Kimbda807d2016-07-26 15:23:05 -0700943 /*
944 * Check may be lockless but that's ok as we recheck later.
945 * It's possible to migrate LRU and non-lru movable pages.
946 * Skip any other type of page
947 */
948 if (!PageLRU(page)) {
Minchan Kimbda807d2016-07-26 15:23:05 -0700949 /*
950 * __PageMovable can return false positive so we need
951 * to verify it under page_lock.
952 */
953 if (unlikely(__PageMovable(page)) &&
954 !PageIsolated(page)) {
955 if (locked) {
Alex Shi6168d0d2020-12-15 12:34:29 -0800956 unlock_page_lruvec_irqrestore(locked, flags);
957 locked = NULL;
Minchan Kimbda807d2016-07-26 15:23:05 -0700958 }
959
Yisheng Xie9e5bcd62017-02-24 14:57:29 -0800960 if (!isolate_movable_page(page, isolate_mode))
Minchan Kimbda807d2016-07-26 15:23:05 -0700961 goto isolate_success;
962 }
963
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700964 goto isolate_fail;
Minchan Kimbda807d2016-07-26 15:23:05 -0700965 }
Vlastimil Babka29c0dde2015-09-08 15:02:46 -0700966
David Rientjes119d6d52014-04-03 14:48:00 -0700967 /*
968 * Migration will fail if an anonymous page is pinned in memory,
969 * so avoid taking lru_lock and isolating it unnecessarily in an
970 * admittedly racy check.
971 */
972 if (!page_mapping(page) &&
973 page_count(page) > page_mapcount(page))
Vlastimil Babkafdd048e2016-05-19 17:11:55 -0700974 goto isolate_fail;
David Rientjes119d6d52014-04-03 14:48:00 -0700975
Michal Hocko73e64c52016-12-14 15:04:07 -0800976 /*
977 * Only allow to migrate anonymous pages in GFP_NOFS context
978 * because those do not depend on fs locks.
979 */
980 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
981 goto isolate_fail;
982
Alex Shi9df41312020-12-15 12:34:20 -0800983 /*
984 * Be careful not to clear PageLRU until after we're
985 * sure the page is not being freed elsewhere -- the
986 * page release code relies on it.
987 */
988 if (unlikely(!get_page_unless_zero(page)))
989 goto isolate_fail;
990
Alex Shic2135f72021-02-24 12:08:01 -0800991 if (!__isolate_lru_page_prepare(page, isolate_mode))
Alex Shi9df41312020-12-15 12:34:20 -0800992 goto isolate_fail_put;
993
994 /* Try isolate the page */
995 if (!TestClearPageLRU(page))
996 goto isolate_fail_put;
997
Alex Shi6168d0d2020-12-15 12:34:29 -0800998 lruvec = mem_cgroup_page_lruvec(page, pgdat);
999
Vlastimil Babka69b71892014-10-09 15:27:18 -07001000 /* If we already hold the lock, we can skip some rechecking */
Alex Shi6168d0d2020-12-15 12:34:29 -08001001 if (lruvec != locked) {
1002 if (locked)
1003 unlock_page_lruvec_irqrestore(locked, flags);
1004
1005 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
1006 locked = lruvec;
Alex Shi6168d0d2020-12-15 12:34:29 -08001007
1008 lruvec_memcg_debug(lruvec, page);
Mel Gormane380beb2019-03-05 15:44:58 -08001009
Mel Gormane380beb2019-03-05 15:44:58 -08001010 /* Try get exclusive access under lock */
1011 if (!skip_updated) {
1012 skip_updated = true;
1013 if (test_and_set_skip(cc, page, low_pfn))
1014 goto isolate_abort;
1015 }
Mel Gorman2a1402a2012-10-08 16:32:33 -07001016
Vlastimil Babka29c0dde2015-09-08 15:02:46 -07001017 /*
1018 * Page become compound since the non-locked check,
1019 * and it's on LRU. It can only be a THP so the order
1020 * is safe to read and it's 0 for tail pages.
1021 */
Rik van Riel1da2f322020-04-01 21:10:31 -07001022 if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
Matthew Wilcox (Oracle)d8c65462019-09-23 15:34:30 -07001023 low_pfn += compound_nr(page) - 1;
Alex Shi9df41312020-12-15 12:34:20 -08001024 SetPageLRU(page);
1025 goto isolate_fail_put;
Vlastimil Babka69b71892014-10-09 15:27:18 -07001026 }
Alex Shid99fd5f2021-02-24 12:09:25 -08001027 }
Hugh Dickinsfa9add62012-05-29 15:07:09 -07001028
Rik van Riel1da2f322020-04-01 21:10:31 -07001029 /* The whole page is taken off the LRU; skip the tail pages. */
1030 if (PageCompound(page))
1031 low_pfn += compound_nr(page) - 1;
Andrea Arcangelibc835012011-01-13 15:47:08 -08001032
Mel Gorman748446b2010-05-24 14:32:27 -07001033 /* Successfully isolated */
Yu Zhao46ae6b22021-02-24 12:08:25 -08001034 del_page_from_lru_list(page, lruvec);
Rik van Riel1da2f322020-04-01 21:10:31 -07001035 mod_node_page_state(page_pgdat(page),
Huang Ying9de4f222020-04-06 20:04:41 -07001036 NR_ISOLATED_ANON + page_is_file_lru(page),
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -07001037 thp_nr_pages(page));
Joonsoo Kimb6c75012014-04-07 15:37:07 -07001038
1039isolate_success:
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001040 list_add(&page->lru, &cc->migratepages);
Zi Yan38935862020-11-13 22:51:40 -08001041 cc->nr_migratepages += compound_nr(page);
1042 nr_isolated += compound_nr(page);
Mel Gorman748446b2010-05-24 14:32:27 -07001043
Mel Gorman804d3122019-03-05 15:45:07 -08001044 /*
1045 * Avoid isolating too much unless this block is being
Mel Gormancb2dcaf2019-03-05 15:45:11 -08001046 * rescanned (e.g. dirty/writeback pages, parallel allocation)
1047 * or a lock is contended. For contention, isolate quickly to
1048 * potentially remove one source of contention.
Mel Gorman804d3122019-03-05 15:45:07 -08001049 */
Zi Yan38935862020-11-13 22:51:40 -08001050 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
Mel Gormancb2dcaf2019-03-05 15:45:11 -08001051 !cc->rescan && !cc->contended) {
Hillf Danton31b83842012-01-10 15:07:59 -08001052 ++low_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -07001053 break;
Hillf Danton31b83842012-01-10 15:07:59 -08001054 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001055
1056 continue;
Alex Shi9df41312020-12-15 12:34:20 -08001057
1058isolate_fail_put:
1059 /* Avoid potential deadlock in freeing page under lru_lock */
1060 if (locked) {
Alex Shi6168d0d2020-12-15 12:34:29 -08001061 unlock_page_lruvec_irqrestore(locked, flags);
1062 locked = NULL;
Alex Shi9df41312020-12-15 12:34:20 -08001063 }
1064 put_page(page);
1065
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001066isolate_fail:
1067 if (!skip_on_failure)
1068 continue;
1069
1070 /*
1071 * We have isolated some pages, but then failed. Release them
1072 * instead of migrating, as we cannot form the cc->order buddy
1073 * page anyway.
1074 */
1075 if (nr_isolated) {
1076 if (locked) {
Alex Shi6168d0d2020-12-15 12:34:29 -08001077 unlock_page_lruvec_irqrestore(locked, flags);
1078 locked = NULL;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001079 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001080 putback_movable_pages(&cc->migratepages);
1081 cc->nr_migratepages = 0;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07001082 nr_isolated = 0;
1083 }
1084
1085 if (low_pfn < next_skip_pfn) {
1086 low_pfn = next_skip_pfn - 1;
1087 /*
1088 * The check near the loop beginning would have updated
1089 * next_skip_pfn too, but this is a bit simpler.
1090 */
1091 next_skip_pfn += 1UL << cc->order;
1092 }
Mel Gorman748446b2010-05-24 14:32:27 -07001093 }
1094
Vlastimil Babka99c0fd52014-10-09 15:27:23 -07001095 /*
1096 * The PageBuddy() check could have potentially brought us outside
1097 * the range to be scanned.
1098 */
1099 if (unlikely(low_pfn > end_pfn))
1100 low_pfn = end_pfn;
1101
Alex Shi9df41312020-12-15 12:34:20 -08001102 page = NULL;
1103
Mel Gormane380beb2019-03-05 15:44:58 -08001104isolate_abort:
Mel Gormanc67fe372012-08-21 16:16:17 -07001105 if (locked)
Alex Shi6168d0d2020-12-15 12:34:29 -08001106 unlock_page_lruvec_irqrestore(locked, flags);
Alex Shi9df41312020-12-15 12:34:20 -08001107 if (page) {
1108 SetPageLRU(page);
1109 put_page(page);
1110 }
Mel Gorman748446b2010-05-24 14:32:27 -07001111
Vlastimil Babka50b5b092014-01-21 15:51:10 -08001112 /*
Mel Gorman804d3122019-03-05 15:45:07 -08001113 * Updated the cached scanner pfn once the pageblock has been scanned
1114 * Pages will either be migrated in which case there is no point
1115 * scanning in the near future or migration failed in which case the
1116 * failure reason may persist. The block is marked for skipping if
1117 * there were no pages isolated in the block or if the block is
1118 * rescanned twice in a row.
Vlastimil Babka50b5b092014-01-21 15:51:10 -08001119 */
Mel Gorman804d3122019-03-05 15:45:07 -08001120 if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
Mel Gormane380beb2019-03-05 15:44:58 -08001121 if (valid_page && !skip_updated)
1122 set_pageblock_skip(valid_page);
1123 update_cached_migrate(cc, low_pfn);
1124 }
Mel Gormanbb13ffe2012-10-08 16:32:41 -07001125
Joonsoo Kime34d85f2015-02-11 15:27:04 -08001126 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
1127 nr_scanned, nr_isolated);
Mel Gormanb7aba692011-01-13 15:45:54 -08001128
Mel Gorman670105a2019-08-02 21:48:51 -07001129fatal_pending:
David Rientjes7f354a52017-02-22 15:44:50 -08001130 cc->total_migrate_scanned += nr_scanned;
Mel Gorman397487d2012-10-19 12:00:10 +01001131 if (nr_isolated)
Minchan Kim010fc292012-12-20 15:05:06 -08001132 count_compact_events(COMPACTISOLATED, nr_isolated);
Mel Gorman397487d2012-10-19 12:00:10 +01001133
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +01001134 return low_pfn;
1135}
1136
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001137/**
1138 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1139 * @cc: Compaction control structure.
1140 * @start_pfn: The first PFN to start isolating.
1141 * @end_pfn: The one-past-last PFN.
1142 *
1143 * Returns zero if isolation fails fatally due to e.g. pending signal.
1144 * Otherwise, function returns one-past-the-last PFN of isolated page
1145 * (which may be greater than end_pfn if end fell in a middle of a THP page).
1146 */
1147unsigned long
1148isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
1149 unsigned long end_pfn)
1150{
Joonsoo Kime1409c32016-03-15 14:57:48 -07001151 unsigned long pfn, block_start_pfn, block_end_pfn;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001152
1153 /* Scan block by block. First and last block may be incomplete */
1154 pfn = start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -07001155 block_start_pfn = pageblock_start_pfn(pfn);
Joonsoo Kime1409c32016-03-15 14:57:48 -07001156 if (block_start_pfn < cc->zone->zone_start_pfn)
1157 block_start_pfn = cc->zone->zone_start_pfn;
Vlastimil Babka06b66402016-05-19 17:11:48 -07001158 block_end_pfn = pageblock_end_pfn(pfn);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001159
1160 for (; pfn < end_pfn; pfn = block_end_pfn,
Joonsoo Kime1409c32016-03-15 14:57:48 -07001161 block_start_pfn = block_end_pfn,
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001162 block_end_pfn += pageblock_nr_pages) {
1163
1164 block_end_pfn = min(block_end_pfn, end_pfn);
1165
Joonsoo Kime1409c32016-03-15 14:57:48 -07001166 if (!pageblock_pfn_to_page(block_start_pfn,
1167 block_end_pfn, cc->zone))
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001168 continue;
1169
1170 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
1171 ISOLATE_UNEVICTABLE);
1172
Hugh Dickins14af4a52016-05-05 16:22:15 -07001173 if (!pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001174 break;
Joonsoo Kim6ea41c02014-10-29 14:50:20 -07001175
Zi Yan38935862020-11-13 22:51:40 -08001176 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX)
Joonsoo Kim6ea41c02014-10-29 14:50:20 -07001177 break;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001178 }
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001179
1180 return pfn;
1181}
1182
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001183#endif /* CONFIG_COMPACTION || CONFIG_CMA */
1184#ifdef CONFIG_COMPACTION
Andrew Morton018e9a42015-04-15 16:15:20 -07001185
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001186static bool suitable_migration_source(struct compact_control *cc,
1187 struct page *page)
1188{
Vlastimil Babka282722b2017-05-08 15:54:49 -07001189 int block_mt;
1190
Mel Gorman9bebefd2019-03-05 15:45:14 -08001191 if (pageblock_skip_persistent(page))
1192 return false;
1193
Vlastimil Babka282722b2017-05-08 15:54:49 -07001194 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001195 return true;
1196
Vlastimil Babka282722b2017-05-08 15:54:49 -07001197 block_mt = get_pageblock_migratetype(page);
1198
1199 if (cc->migratetype == MIGRATE_MOVABLE)
1200 return is_migrate_movable(block_mt);
1201 else
1202 return block_mt == cc->migratetype;
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001203}
1204
Andrew Morton018e9a42015-04-15 16:15:20 -07001205/* Returns true if the page is within a block suitable for migration to */
Vlastimil Babka9f7e3382016-10-07 17:00:37 -07001206static bool suitable_migration_target(struct compact_control *cc,
1207 struct page *page)
Andrew Morton018e9a42015-04-15 16:15:20 -07001208{
1209 /* If the page is a large free page, then disallow migration */
1210 if (PageBuddy(page)) {
1211 /*
1212 * We are checking page_order without zone->lock taken. But
1213 * the only small danger is that we skip a potentially suitable
1214 * pageblock, so it's not worth to check order for valid range.
1215 */
Matthew Wilcox (Oracle)ab130f912020-10-15 20:10:15 -07001216 if (buddy_order_unsafe(page) >= pageblock_order)
Andrew Morton018e9a42015-04-15 16:15:20 -07001217 return false;
1218 }
1219
Yisheng Xie1ef36db2017-05-03 14:53:54 -07001220 if (cc->ignore_block_suitable)
1221 return true;
1222
Andrew Morton018e9a42015-04-15 16:15:20 -07001223 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
Vlastimil Babkab682deb2017-05-08 15:54:43 -07001224 if (is_migrate_movable(get_pageblock_migratetype(page)))
Andrew Morton018e9a42015-04-15 16:15:20 -07001225 return true;
1226
1227 /* Otherwise skip the block */
1228 return false;
1229}
1230
Mel Gorman70b44592019-03-05 15:44:54 -08001231static inline unsigned int
1232freelist_scan_limit(struct compact_control *cc)
1233{
Qian Caidd7ef7b2019-05-13 17:17:38 -07001234 unsigned short shift = BITS_PER_LONG - 1;
1235
1236 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
Mel Gorman70b44592019-03-05 15:44:54 -08001237}
1238
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001239/*
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001240 * Test whether the free scanner has reached the same or lower pageblock than
1241 * the migration scanner, and compaction should thus terminate.
1242 */
1243static inline bool compact_scanners_met(struct compact_control *cc)
1244{
1245 return (cc->free_pfn >> pageblock_order)
1246 <= (cc->migrate_pfn >> pageblock_order);
1247}
1248
Mel Gorman5a811882019-03-05 15:45:01 -08001249/*
1250 * Used when scanning for a suitable migration target which scans freelists
1251 * in reverse. Reorders the list such as the unscanned pages are scanned
1252 * first on the next iteration of the free scanner
1253 */
1254static void
1255move_freelist_head(struct list_head *freelist, struct page *freepage)
1256{
1257 LIST_HEAD(sublist);
1258
1259 if (!list_is_last(freelist, &freepage->lru)) {
1260 list_cut_before(&sublist, freelist, &freepage->lru);
1261 if (!list_empty(&sublist))
1262 list_splice_tail(&sublist, freelist);
1263 }
1264}
1265
1266/*
1267 * Similar to move_freelist_head except used by the migration scanner
1268 * when scanning forward. It's possible for these list operations to
1269 * move against each other if they search the free list exactly in
1270 * lockstep.
1271 */
Mel Gorman70b44592019-03-05 15:44:54 -08001272static void
1273move_freelist_tail(struct list_head *freelist, struct page *freepage)
1274{
1275 LIST_HEAD(sublist);
1276
1277 if (!list_is_first(freelist, &freepage->lru)) {
1278 list_cut_position(&sublist, freelist, &freepage->lru);
1279 if (!list_empty(&sublist))
1280 list_splice_tail(&sublist, freelist);
1281 }
1282}
1283
Mel Gorman5a811882019-03-05 15:45:01 -08001284static void
1285fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
1286{
1287 unsigned long start_pfn, end_pfn;
1288 struct page *page = pfn_to_page(pfn);
1289
1290 /* Do not search around if there are enough pages already */
1291 if (cc->nr_freepages >= cc->nr_migratepages)
1292 return;
1293
1294 /* Minimise scanning during async compaction */
1295 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
1296 return;
1297
1298 /* Pageblock boundaries */
1299 start_pfn = pageblock_start_pfn(pfn);
Mel Gorman60fce362019-05-17 14:31:41 -07001300 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
Mel Gorman5a811882019-03-05 15:45:01 -08001301
1302 /* Scan before */
1303 if (start_pfn != pfn) {
Mel Gorman4fca9732019-03-05 15:45:34 -08001304 isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
Mel Gorman5a811882019-03-05 15:45:01 -08001305 if (cc->nr_freepages >= cc->nr_migratepages)
1306 return;
1307 }
1308
1309 /* Scan after */
1310 start_pfn = pfn + nr_isolated;
Mel Gorman60fce362019-05-17 14:31:41 -07001311 if (start_pfn < end_pfn)
Mel Gorman4fca9732019-03-05 15:45:34 -08001312 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
Mel Gorman5a811882019-03-05 15:45:01 -08001313
1314 /* Skip this pageblock in the future as it's full or nearly full */
1315 if (cc->nr_freepages < cc->nr_migratepages)
1316 set_pageblock_skip(page);
1317}
1318
Mel Gormandbe2d4e2019-03-05 15:45:31 -08001319/* Search orders in round-robin fashion */
1320static int next_search_order(struct compact_control *cc, int order)
1321{
1322 order--;
1323 if (order < 0)
1324 order = cc->order - 1;
1325
1326 /* Search wrapped around? */
1327 if (order == cc->search_order) {
1328 cc->search_order--;
1329 if (cc->search_order < 0)
1330 cc->search_order = cc->order - 1;
1331 return -1;
1332 }
1333
1334 return order;
1335}
1336
Mel Gorman5a811882019-03-05 15:45:01 -08001337static unsigned long
1338fast_isolate_freepages(struct compact_control *cc)
1339{
1340 unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
1341 unsigned int nr_scanned = 0;
Rokudo Yan74e21482021-02-04 18:32:20 -08001342 unsigned long low_pfn, min_pfn, highest = 0;
Mel Gorman5a811882019-03-05 15:45:01 -08001343 unsigned long nr_isolated = 0;
1344 unsigned long distance;
1345 struct page *page = NULL;
1346 bool scan_start = false;
1347 int order;
1348
1349 /* Full compaction passes in a negative order */
1350 if (cc->order <= 0)
1351 return cc->free_pfn;
1352
1353 /*
1354 * If starting the scan, use a deeper search and use the highest
1355 * PFN found if a suitable one is not found.
1356 */
Mel Gormane332f742019-03-05 15:45:38 -08001357 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
Mel Gorman5a811882019-03-05 15:45:01 -08001358 limit = pageblock_nr_pages >> 1;
1359 scan_start = true;
1360 }
1361
1362 /*
1363 * Preferred point is in the top quarter of the scan space but take
1364 * a pfn from the top half if the search is problematic.
1365 */
1366 distance = (cc->free_pfn - cc->migrate_pfn);
1367 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
1368 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));
1369
1370 if (WARN_ON_ONCE(min_pfn > low_pfn))
1371 low_pfn = min_pfn;
1372
Mel Gormandbe2d4e2019-03-05 15:45:31 -08001373 /*
1374 * Search starts from the last successful isolation order or the next
1375 * order to search after a previous failure
1376 */
1377 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1378
1379 for (order = cc->search_order;
1380 !page && order >= 0;
1381 order = next_search_order(cc, order)) {
Mel Gorman5a811882019-03-05 15:45:01 -08001382 struct free_area *area = &cc->zone->free_area[order];
1383 struct list_head *freelist;
1384 struct page *freepage;
1385 unsigned long flags;
1386 unsigned int order_scanned = 0;
Rokudo Yan74e21482021-02-04 18:32:20 -08001387 unsigned long high_pfn = 0;
Mel Gorman5a811882019-03-05 15:45:01 -08001388
1389 if (!area->nr_free)
1390 continue;
1391
1392 spin_lock_irqsave(&cc->zone->lock, flags);
1393 freelist = &area->free_list[MIGRATE_MOVABLE];
1394 list_for_each_entry_reverse(freepage, freelist, lru) {
1395 unsigned long pfn;
1396
1397 order_scanned++;
1398 nr_scanned++;
1399 pfn = page_to_pfn(freepage);
1400
1401 if (pfn >= highest)
1402 highest = pageblock_start_pfn(pfn);
1403
1404 if (pfn >= low_pfn) {
1405 cc->fast_search_fail = 0;
Mel Gormandbe2d4e2019-03-05 15:45:31 -08001406 cc->search_order = order;
Mel Gorman5a811882019-03-05 15:45:01 -08001407 page = freepage;
1408 break;
1409 }
1410
1411 if (pfn >= min_pfn && pfn > high_pfn) {
1412 high_pfn = pfn;
1413
1414 /* Shorten the scan if a candidate is found */
1415 limit >>= 1;
1416 }
1417
1418 if (order_scanned >= limit)
1419 break;
1420 }
1421
1422 /* Use a minimum pfn if a preferred one was not found */
1423 if (!page && high_pfn) {
1424 page = pfn_to_page(high_pfn);
1425
1426 /* Update freepage for the list reorder below */
1427 freepage = page;
1428 }
1429
1430 /* Reorder to so a future search skips recent pages */
1431 move_freelist_head(freelist, freepage);
1432
1433 /* Isolate the page if available */
1434 if (page) {
1435 if (__isolate_free_page(page, order)) {
1436 set_page_private(page, order);
1437 nr_isolated = 1 << order;
1438 cc->nr_freepages += nr_isolated;
1439 list_add_tail(&page->lru, &cc->freepages);
1440 count_compact_events(COMPACTISOLATED, nr_isolated);
1441 } else {
1442 /* If isolation fails, abort the search */
Qian Cai5b56d992019-04-04 11:54:41 +01001443 order = cc->search_order + 1;
Mel Gorman5a811882019-03-05 15:45:01 -08001444 page = NULL;
1445 }
1446 }
1447
1448 spin_unlock_irqrestore(&cc->zone->lock, flags);
1449
1450 /*
1451 * Smaller scan on next order so the total scan ig related
1452 * to freelist_scan_limit.
1453 */
1454 if (order_scanned >= limit)
1455 limit = min(1U, limit >> 1);
1456 }
1457
1458 if (!page) {
1459 cc->fast_search_fail++;
1460 if (scan_start) {
1461 /*
1462 * Use the highest PFN found above min. If one was
Ethon Paulf3867752020-06-04 16:49:13 -07001463 * not found, be pessimistic for direct compaction
Mel Gorman5a811882019-03-05 15:45:01 -08001464 * and use the min mark.
1465 */
1466 if (highest) {
1467 page = pfn_to_page(highest);
1468 cc->free_pfn = highest;
1469 } else {
Suzuki K Poulosee577c8b2019-05-31 22:30:59 -07001470 if (cc->direct_compaction && pfn_valid(min_pfn)) {
Baoquan He73a6e472020-06-03 15:57:55 -07001471 page = pageblock_pfn_to_page(min_pfn,
1472 pageblock_end_pfn(min_pfn),
1473 cc->zone);
Mel Gorman5a811882019-03-05 15:45:01 -08001474 cc->free_pfn = min_pfn;
1475 }
1476 }
1477 }
1478 }
1479
Mel Gormand097a6f2019-03-05 15:45:28 -08001480 if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1481 highest -= pageblock_nr_pages;
Mel Gorman5a811882019-03-05 15:45:01 -08001482 cc->zone->compact_cached_free_pfn = highest;
Mel Gormand097a6f2019-03-05 15:45:28 -08001483 }
Mel Gorman5a811882019-03-05 15:45:01 -08001484
1485 cc->total_free_scanned += nr_scanned;
1486 if (!page)
1487 return cc->free_pfn;
1488
1489 low_pfn = page_to_pfn(page);
1490 fast_isolate_around(cc, low_pfn, nr_isolated);
1491 return low_pfn;
1492}
1493
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001494/*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001495 * Based on information in the current compact_control, find blocks
1496 * suitable for isolating free pages from and then isolate them.
1497 */
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001498static void isolate_freepages(struct compact_control *cc)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001499{
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001500 struct zone *zone = cc->zone;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001501 struct page *page;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001502 unsigned long block_start_pfn; /* start of current pageblock */
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001503 unsigned long isolate_start_pfn; /* exact pfn we start at */
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001504 unsigned long block_end_pfn; /* end of current pageblock */
1505 unsigned long low_pfn; /* lowest pfn scanner is able to scan */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001506 struct list_head *freelist = &cc->freepages;
Mel Gorman4fca9732019-03-05 15:45:34 -08001507 unsigned int stride;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001508
Mel Gorman5a811882019-03-05 15:45:01 -08001509 /* Try a small search of the free lists for a candidate */
1510 isolate_start_pfn = fast_isolate_freepages(cc);
1511 if (cc->nr_freepages)
1512 goto splitmap;
1513
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001514 /*
1515 * Initialise the free scanner. The starting point is where we last
Vlastimil Babka49e068f2014-05-06 12:50:03 -07001516 * successfully isolated from, zone-cached value, or the end of the
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001517 * zone when isolating for the first time. For looping we also need
1518 * this pfn aligned down to the pageblock boundary, because we do
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001519 * block_start_pfn -= pageblock_nr_pages in the for loop.
1520 * For ending point, take care when isolating in last pageblock of a
Randy Dunlapa1c1dbe2020-08-11 18:32:49 -07001521 * zone which ends in the middle of a pageblock.
Vlastimil Babka49e068f2014-05-06 12:50:03 -07001522 * The low boundary is the end of the pageblock the migration scanner
1523 * is using.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001524 */
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001525 isolate_start_pfn = cc->free_pfn;
Mel Gorman5a811882019-03-05 15:45:01 -08001526 block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001527 block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1528 zone_end_pfn(zone));
Vlastimil Babka06b66402016-05-19 17:11:48 -07001529 low_pfn = pageblock_end_pfn(cc->migrate_pfn);
Mel Gorman4fca9732019-03-05 15:45:34 -08001530 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001531
1532 /*
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001533 * Isolate free pages until enough are available to migrate the
1534 * pages on cc->migratepages. We stop searching if the migrate
1535 * and free page scanners meet or enough free pages are isolated.
1536 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001537 for (; block_start_pfn >= low_pfn;
Vlastimil Babkac96b9e52014-06-04 16:07:26 -07001538 block_end_pfn = block_start_pfn,
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001539 block_start_pfn -= pageblock_nr_pages,
1540 isolate_start_pfn = block_start_pfn) {
Mel Gorman4fca9732019-03-05 15:45:34 -08001541 unsigned long nr_isolated;
1542
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001543 /*
1544 * This can iterate a massively long zone without finding any
Mel Gormancb810ad2019-03-05 15:45:21 -08001545 * suitable migration targets, so periodically check resched.
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001546 */
Mel Gormancb810ad2019-03-05 15:45:21 -08001547 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
Mel Gormancf66f072019-03-05 15:45:24 -08001548 cond_resched();
David Rientjesf6ea3ad2013-09-30 13:45:03 -07001549
Vlastimil Babka7d49d882014-10-09 15:27:11 -07001550 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1551 zone);
1552 if (!page)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001553 continue;
1554
1555 /* Check the block is suitable for migration */
Vlastimil Babka9f7e3382016-10-07 17:00:37 -07001556 if (!suitable_migration_target(cc, page))
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001557 continue;
Linus Torvalds68e3e922012-06-03 20:05:57 -07001558
Mel Gormanbb13ffe2012-10-08 16:32:41 -07001559 /* If isolation recently failed, do not retry */
1560 if (!isolation_suitable(cc, page))
1561 continue;
1562
Vlastimil Babkae14c7202014-10-09 15:27:20 -07001563 /* Found a block suitable for isolating free pages from. */
Mel Gorman4fca9732019-03-05 15:45:34 -08001564 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
1565 block_end_pfn, freelist, stride, false);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001566
Mel Gormand097a6f2019-03-05 15:45:28 -08001567 /* Update the skip hint if the full pageblock was scanned */
1568 if (isolate_start_pfn == block_end_pfn)
1569 update_pageblock_skip(cc, page, block_start_pfn);
1570
Mel Gormancb2dcaf2019-03-05 15:45:11 -08001571 /* Are enough freepages isolated? */
1572 if (cc->nr_freepages >= cc->nr_migratepages) {
David Rientjesa46cbf32016-07-14 12:06:50 -07001573 if (isolate_start_pfn >= block_end_pfn) {
1574 /*
1575 * Restart at previous pageblock if more
1576 * freepages can be isolated next time.
1577 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001578 isolate_start_pfn =
1579 block_start_pfn - pageblock_nr_pages;
David Rientjesa46cbf32016-07-14 12:06:50 -07001580 }
Vlastimil Babkabe976572014-06-04 16:10:41 -07001581 break;
David Rientjesa46cbf32016-07-14 12:06:50 -07001582 } else if (isolate_start_pfn < block_end_pfn) {
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001583 /*
David Rientjesa46cbf32016-07-14 12:06:50 -07001584 * If isolation failed early, do not continue
1585 * needlessly.
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001586 */
David Rientjesa46cbf32016-07-14 12:06:50 -07001587 break;
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001588 }
Mel Gorman4fca9732019-03-05 15:45:34 -08001589
1590 /* Adjust stride depending on isolation */
1591 if (nr_isolated) {
1592 stride = 1;
1593 continue;
1594 }
1595 stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
Michal Nazarewicz2fe86e02012-01-30 13:16:26 +01001596 }
1597
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001598 /*
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001599 * Record where the free scanner will restart next time. Either we
1600 * broke from the loop and set isolate_start_pfn based on the last
1601 * call to isolate_freepages_block(), or we met the migration scanner
1602 * and the loop terminated due to isolate_start_pfn < low_pfn
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08001603 */
Vlastimil Babkaf5f61a32015-09-08 15:02:39 -07001604 cc->free_pfn = isolate_start_pfn;
Mel Gorman5a811882019-03-05 15:45:01 -08001605
1606splitmap:
1607 /* __isolate_free_page() does not map the pages */
1608 split_map_pages(freelist);
Mel Gorman748446b2010-05-24 14:32:27 -07001609}
1610
1611/*
1612 * This is a migrate-callback that "allocates" freepages by taking pages
1613 * from the isolated freelists in the block we are migrating to.
1614 */
1615static struct page *compaction_alloc(struct page *migratepage,
Michal Hocko666feb22018-04-10 16:30:03 -07001616 unsigned long data)
Mel Gorman748446b2010-05-24 14:32:27 -07001617{
1618 struct compact_control *cc = (struct compact_control *)data;
1619 struct page *freepage;
1620
Mel Gorman748446b2010-05-24 14:32:27 -07001621 if (list_empty(&cc->freepages)) {
Mel Gormancb2dcaf2019-03-05 15:45:11 -08001622 isolate_freepages(cc);
Mel Gorman748446b2010-05-24 14:32:27 -07001623
1624 if (list_empty(&cc->freepages))
1625 return NULL;
1626 }
1627
1628 freepage = list_entry(cc->freepages.next, struct page, lru);
1629 list_del(&freepage->lru);
1630 cc->nr_freepages--;
1631
1632 return freepage;
1633}
1634
1635/*
David Rientjesd53aea32014-06-04 16:08:26 -07001636 * This is a migrate-callback that "frees" freepages back to the isolated
1637 * freelist. All pages on the freelist are from the same zone, so there is no
1638 * special handling needed for NUMA.
1639 */
1640static void compaction_free(struct page *page, unsigned long data)
1641{
1642 struct compact_control *cc = (struct compact_control *)data;
1643
1644 list_add(&page->lru, &cc->freepages);
1645 cc->nr_freepages++;
1646}
1647
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001648/* possible outcome of isolate_migratepages */
1649typedef enum {
1650 ISOLATE_ABORT, /* Abort compaction now */
1651 ISOLATE_NONE, /* No pages isolated, continue scanning */
1652 ISOLATE_SUCCESS, /* Pages isolated, migrate */
1653} isolate_migrate_t;
1654
1655/*
Eric B Munson5bbe3542015-04-15 16:13:20 -07001656 * Allow userspace to control policy on scanning the unevictable LRU for
1657 * compactable pages.
1658 */
Sebastian Andrzej Siewior6923aa02020-04-01 21:10:42 -07001659#ifdef CONFIG_PREEMPT_RT
1660int sysctl_compact_unevictable_allowed __read_mostly = 0;
1661#else
Eric B Munson5bbe3542015-04-15 16:13:20 -07001662int sysctl_compact_unevictable_allowed __read_mostly = 1;
Sebastian Andrzej Siewior6923aa02020-04-01 21:10:42 -07001663#endif
Eric B Munson5bbe3542015-04-15 16:13:20 -07001664
Mel Gorman70b44592019-03-05 15:44:54 -08001665static inline void
1666update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
1667{
1668 if (cc->fast_start_pfn == ULONG_MAX)
1669 return;
1670
1671 if (!cc->fast_start_pfn)
1672 cc->fast_start_pfn = pfn;
1673
1674 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
1675}
1676
1677static inline unsigned long
1678reinit_migrate_pfn(struct compact_control *cc)
1679{
1680 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
1681 return cc->migrate_pfn;
1682
1683 cc->migrate_pfn = cc->fast_start_pfn;
1684 cc->fast_start_pfn = ULONG_MAX;
1685
1686 return cc->migrate_pfn;
1687}
1688
1689/*
1690 * Briefly search the free lists for a migration source that already has
1691 * some free pages to reduce the number of pages that need migration
1692 * before a pageblock is free.
1693 */
1694static unsigned long fast_find_migrateblock(struct compact_control *cc)
1695{
1696 unsigned int limit = freelist_scan_limit(cc);
1697 unsigned int nr_scanned = 0;
1698 unsigned long distance;
1699 unsigned long pfn = cc->migrate_pfn;
1700 unsigned long high_pfn;
1701 int order;
1702
1703 /* Skip hints are relied on to avoid repeats on the fast search */
1704 if (cc->ignore_skip_hint)
1705 return pfn;
1706
1707 /*
1708 * If the migrate_pfn is not at the start of a zone or the start
1709 * of a pageblock then assume this is a continuation of a previous
1710 * scan restarted due to COMPACT_CLUSTER_MAX.
1711 */
1712 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
1713 return pfn;
1714
1715 /*
1716 * For smaller orders, just linearly scan as the number of pages
1717 * to migrate should be relatively small and does not necessarily
1718 * justify freeing up a large block for a small allocation.
1719 */
1720 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
1721 return pfn;
1722
1723 /*
1724 * Only allow kcompactd and direct requests for movable pages to
1725 * quickly clear out a MOVABLE pageblock for allocation. This
1726 * reduces the risk that a large movable pageblock is freed for
1727 * an unmovable/reclaimable small allocation.
1728 */
1729 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
1730 return pfn;
1731
1732 /*
1733 * When starting the migration scanner, pick any pageblock within the
1734 * first half of the search space. Otherwise try and pick a pageblock
1735 * within the first eighth to reduce the chances that a migration
1736 * target later becomes a source.
1737 */
1738 distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
1739 if (cc->migrate_pfn != cc->zone->zone_start_pfn)
1740 distance >>= 2;
1741 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
1742
1743 for (order = cc->order - 1;
1744 order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
1745 order--) {
1746 struct free_area *area = &cc->zone->free_area[order];
1747 struct list_head *freelist;
1748 unsigned long flags;
1749 struct page *freepage;
1750
1751 if (!area->nr_free)
1752 continue;
1753
1754 spin_lock_irqsave(&cc->zone->lock, flags);
1755 freelist = &area->free_list[MIGRATE_MOVABLE];
1756 list_for_each_entry(freepage, freelist, lru) {
1757 unsigned long free_pfn;
1758
1759 nr_scanned++;
1760 free_pfn = page_to_pfn(freepage);
1761 if (free_pfn < high_pfn) {
Mel Gorman70b44592019-03-05 15:44:54 -08001762 /*
1763 * Avoid if skipped recently. Ideally it would
1764 * move to the tail but even safe iteration of
1765 * the list assumes an entry is deleted, not
1766 * reordered.
1767 */
1768 if (get_pageblock_skip(freepage)) {
1769 if (list_is_last(freelist, &freepage->lru))
1770 break;
1771
1772 continue;
1773 }
1774
1775 /* Reorder to so a future search skips recent pages */
1776 move_freelist_tail(freelist, freepage);
1777
Mel Gormane380beb2019-03-05 15:44:58 -08001778 update_fast_start_pfn(cc, free_pfn);
Mel Gorman70b44592019-03-05 15:44:54 -08001779 pfn = pageblock_start_pfn(free_pfn);
1780 cc->fast_search_fail = 0;
1781 set_pageblock_skip(freepage);
1782 break;
1783 }
1784
1785 if (nr_scanned >= limit) {
1786 cc->fast_search_fail++;
1787 move_freelist_tail(freelist, freepage);
1788 break;
1789 }
1790 }
1791 spin_unlock_irqrestore(&cc->zone->lock, flags);
1792 }
1793
1794 cc->total_migrate_scanned += nr_scanned;
1795
1796 /*
1797 * If fast scanning failed then use a cached entry for a page block
1798 * that had free pages as the basis for starting a linear scan.
1799 */
1800 if (pfn == cc->migrate_pfn)
1801 pfn = reinit_migrate_pfn(cc);
1802
1803 return pfn;
1804}
1805
Eric B Munson5bbe3542015-04-15 16:13:20 -07001806/*
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001807 * Isolate all pages that can be migrated from the first suitable block,
1808 * starting at the block pointed to by the migrate scanner pfn within
1809 * compact_control.
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001810 */
Pengfei Li32aaf052019-09-23 15:36:58 -07001811static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001812{
Joonsoo Kime1409c32016-03-15 14:57:48 -07001813 unsigned long block_start_pfn;
1814 unsigned long block_end_pfn;
1815 unsigned long low_pfn;
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001816 struct page *page;
1817 const isolate_mode_t isolate_mode =
Eric B Munson5bbe3542015-04-15 16:13:20 -07001818 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
Hugh Dickins1d2047f2016-07-28 15:48:41 -07001819 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
Mel Gorman70b44592019-03-05 15:44:54 -08001820 bool fast_find_block;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001821
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001822 /*
1823 * Start at where we last stopped, or beginning of the zone as
Mel Gorman70b44592019-03-05 15:44:54 -08001824 * initialized by compact_zone(). The first failure will use
1825 * the lowest PFN as the starting point for linear scanning.
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001826 */
Mel Gorman70b44592019-03-05 15:44:54 -08001827 low_pfn = fast_find_migrateblock(cc);
Vlastimil Babka06b66402016-05-19 17:11:48 -07001828 block_start_pfn = pageblock_start_pfn(low_pfn);
Pengfei Li32aaf052019-09-23 15:36:58 -07001829 if (block_start_pfn < cc->zone->zone_start_pfn)
1830 block_start_pfn = cc->zone->zone_start_pfn;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001831
Mel Gorman70b44592019-03-05 15:44:54 -08001832 /*
1833 * fast_find_migrateblock marks a pageblock skipped so to avoid
1834 * the isolation_suitable check below, check whether the fast
1835 * search was successful.
1836 */
1837 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
1838
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001839 /* Only scan within a pageblock boundary */
Vlastimil Babka06b66402016-05-19 17:11:48 -07001840 block_end_pfn = pageblock_end_pfn(low_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001841
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001842 /*
1843 * Iterate over whole pageblocks until we find the first suitable.
1844 * Do not cross the free scanner.
1845 */
Joonsoo Kime1409c32016-03-15 14:57:48 -07001846 for (; block_end_pfn <= cc->free_pfn;
Mel Gorman70b44592019-03-05 15:44:54 -08001847 fast_find_block = false,
Joonsoo Kime1409c32016-03-15 14:57:48 -07001848 low_pfn = block_end_pfn,
1849 block_start_pfn = block_end_pfn,
1850 block_end_pfn += pageblock_nr_pages) {
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001851
1852 /*
1853 * This can potentially iterate a massively long zone with
1854 * many pageblocks unsuitable, so periodically check if we
Mel Gormancb810ad2019-03-05 15:45:21 -08001855 * need to schedule.
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001856 */
Mel Gormancb810ad2019-03-05 15:45:21 -08001857 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
Mel Gormancf66f072019-03-05 15:45:24 -08001858 cond_resched();
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001859
Pengfei Li32aaf052019-09-23 15:36:58 -07001860 page = pageblock_pfn_to_page(block_start_pfn,
1861 block_end_pfn, cc->zone);
Vlastimil Babka7d49d882014-10-09 15:27:11 -07001862 if (!page)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001863 continue;
1864
Mel Gormane380beb2019-03-05 15:44:58 -08001865 /*
1866 * If isolation recently failed, do not retry. Only check the
1867 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
1868 * to be visited multiple times. Assume skip was checked
1869 * before making it "skip" so other compaction instances do
1870 * not scan the same block.
1871 */
1872 if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
1873 !fast_find_block && !isolation_suitable(cc, page))
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001874 continue;
1875
1876 /*
Mel Gorman9bebefd2019-03-05 15:45:14 -08001877 * For async compaction, also only scan in MOVABLE blocks
1878 * without huge pages. Async compaction is optimistic to see
1879 * if the minimum amount of work satisfies the allocation.
1880 * The cached PFN is updated as it's possible that all
1881 * remaining blocks between source and target are unsuitable
1882 * and the compaction scanners fail to meet.
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001883 */
Mel Gorman9bebefd2019-03-05 15:45:14 -08001884 if (!suitable_migration_source(cc, page)) {
1885 update_cached_migrate(cc, block_end_pfn);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001886 continue;
Mel Gorman9bebefd2019-03-05 15:45:14 -08001887 }
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001888
1889 /* Perform the isolation */
Joonsoo Kime1409c32016-03-15 14:57:48 -07001890 low_pfn = isolate_migratepages_block(cc, low_pfn,
1891 block_end_pfn, isolate_mode);
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001892
Mel Gormancb2dcaf2019-03-05 15:45:11 -08001893 if (!low_pfn)
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001894 return ISOLATE_ABORT;
1895
1896 /*
1897 * Either we isolated something and proceed with migration. Or
1898 * we failed and compact_zone should decide if we should
1899 * continue or not.
1900 */
1901 break;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001902 }
1903
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001904 /* Record where migration scanner will be restarted. */
1905 cc->migrate_pfn = low_pfn;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001906
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -07001907 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01001908}
1909
Yaowei Bai21c527a2015-11-05 18:47:20 -08001910/*
1911 * order == -1 is expected when compacting via
1912 * /proc/sys/vm/compact_memory
1913 */
1914static inline bool is_via_compact_memory(int order)
1915{
1916 return order == -1;
1917}
1918
Nitin Guptafacdaa92020-08-11 18:31:00 -07001919static bool kswapd_is_running(pg_data_t *pgdat)
1920{
1921 return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING);
1922}
1923
1924/*
1925 * A zone's fragmentation score is the external fragmentation wrt to the
1926 * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value
1927 * in the range [0, 100].
1928 *
1929 * The scaling factor ensures that proactive compaction focuses on larger
1930 * zones like ZONE_NORMAL, rather than smaller, specialized zones like
1931 * ZONE_DMA32. For smaller zones, the score value remains close to zero,
1932 * and thus never exceeds the high threshold for proactive compaction.
1933 */
Nitin Guptad34c0a72020-08-11 18:31:07 -07001934static unsigned int fragmentation_score_zone(struct zone *zone)
Nitin Guptafacdaa92020-08-11 18:31:00 -07001935{
1936 unsigned long score;
1937
1938 score = zone->present_pages *
1939 extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
1940 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
1941}
1942
1943/*
1944 * The per-node proactive (background) compaction process is started by its
1945 * corresponding kcompactd thread when the node's fragmentation score
1946 * exceeds the high threshold. The compaction process remains active till
1947 * the node's score falls below the low threshold, or one of the back-off
1948 * conditions is met.
1949 */
Nitin Guptad34c0a72020-08-11 18:31:07 -07001950static unsigned int fragmentation_score_node(pg_data_t *pgdat)
Nitin Guptafacdaa92020-08-11 18:31:00 -07001951{
Nitin Guptad34c0a72020-08-11 18:31:07 -07001952 unsigned int score = 0;
Nitin Guptafacdaa92020-08-11 18:31:00 -07001953 int zoneid;
1954
1955 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1956 struct zone *zone;
1957
1958 zone = &pgdat->node_zones[zoneid];
1959 score += fragmentation_score_zone(zone);
1960 }
1961
1962 return score;
1963}
1964
Nitin Guptad34c0a72020-08-11 18:31:07 -07001965static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
Nitin Guptafacdaa92020-08-11 18:31:00 -07001966{
Nitin Guptad34c0a72020-08-11 18:31:07 -07001967 unsigned int wmark_low;
Nitin Guptafacdaa92020-08-11 18:31:00 -07001968
1969 /*
1970 * Cap the low watermak to avoid excessive compaction
1971 * activity in case a user sets the proactivess tunable
1972 * close to 100 (maximum).
1973 */
Nitin Guptad34c0a72020-08-11 18:31:07 -07001974 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U);
1975 return low ? wmark_low : min(wmark_low + 10, 100U);
Nitin Guptafacdaa92020-08-11 18:31:00 -07001976}
1977
1978static bool should_proactive_compact_node(pg_data_t *pgdat)
1979{
1980 int wmark_high;
1981
1982 if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
1983 return false;
1984
1985 wmark_high = fragmentation_score_wmark(pgdat, false);
1986 return fragmentation_score_node(pgdat) > wmark_high;
1987}
1988
Mel Gorman40cacbc2019-03-05 15:44:36 -08001989static enum compact_result __compact_finished(struct compact_control *cc)
Mel Gorman748446b2010-05-24 14:32:27 -07001990{
Mel Gorman8fb74b92013-01-11 14:32:16 -08001991 unsigned int order;
Vlastimil Babkad39773a2017-05-08 15:54:46 -07001992 const int migratetype = cc->migratetype;
Mel Gormancb2dcaf2019-03-05 15:45:11 -08001993 int ret;
Mel Gorman748446b2010-05-24 14:32:27 -07001994
Mel Gorman753341a2012-10-08 16:32:40 -07001995 /* Compaction run completes if the migrate and free scanner meet */
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07001996 if (compact_scanners_met(cc)) {
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -08001997 /* Let the next compaction start anew. */
Mel Gorman40cacbc2019-03-05 15:44:36 -08001998 reset_cached_positions(cc->zone);
Vlastimil Babka55b7c4c2014-01-21 15:51:11 -08001999
Mel Gorman62997022012-10-08 16:32:47 -07002000 /*
2001 * Mark that the PG_migrate_skip information should be cleared
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07002002 * by kswapd when it goes to sleep. kcompactd does not set the
Mel Gorman62997022012-10-08 16:32:47 -07002003 * flag itself as the decision to be clear should be directly
2004 * based on an allocation request.
2005 */
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07002006 if (cc->direct_compaction)
Mel Gorman40cacbc2019-03-05 15:44:36 -08002007 cc->zone->compact_blockskip_flush = true;
Mel Gorman62997022012-10-08 16:32:47 -07002008
Michal Hockoc8f7de02016-05-20 16:56:47 -07002009 if (cc->whole_zone)
2010 return COMPACT_COMPLETE;
2011 else
2012 return COMPACT_PARTIAL_SKIPPED;
Mel Gormanbb13ffe2012-10-08 16:32:41 -07002013 }
Mel Gorman748446b2010-05-24 14:32:27 -07002014
Nitin Guptafacdaa92020-08-11 18:31:00 -07002015 if (cc->proactive_compaction) {
2016 int score, wmark_low;
2017 pg_data_t *pgdat;
2018
2019 pgdat = cc->zone->zone_pgdat;
2020 if (kswapd_is_running(pgdat))
2021 return COMPACT_PARTIAL_SKIPPED;
2022
2023 score = fragmentation_score_zone(cc->zone);
2024 wmark_low = fragmentation_score_wmark(pgdat, true);
2025
2026 if (score > wmark_low)
2027 ret = COMPACT_CONTINUE;
2028 else
2029 ret = COMPACT_SUCCESS;
2030
2031 goto out;
2032 }
2033
Yaowei Bai21c527a2015-11-05 18:47:20 -08002034 if (is_via_compact_memory(cc->order))
Mel Gorman56de7262010-05-24 14:32:30 -07002035 return COMPACT_CONTINUE;
2036
Mel Gormanefe771c2019-03-05 15:44:46 -08002037 /*
2038 * Always finish scanning a pageblock to reduce the possibility of
2039 * fallbacks in the future. This is particularly important when
2040 * migration source is unmovable/reclaimable but it's not worth
2041 * special casing.
2042 */
2043 if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
2044 return COMPACT_CONTINUE;
Vlastimil Babkabaf6a9a2017-05-08 15:54:52 -07002045
Mel Gorman56de7262010-05-24 14:32:30 -07002046 /* Direct compactor: Is a suitable page free? */
Mel Gormancb2dcaf2019-03-05 15:45:11 -08002047 ret = COMPACT_NO_SUITABLE_PAGE;
Mel Gorman8fb74b92013-01-11 14:32:16 -08002048 for (order = cc->order; order < MAX_ORDER; order++) {
Mel Gorman40cacbc2019-03-05 15:44:36 -08002049 struct free_area *area = &cc->zone->free_area[order];
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002050 bool can_steal;
Mel Gorman56de7262010-05-24 14:32:30 -07002051
Mel Gorman8fb74b92013-01-11 14:32:16 -08002052 /* Job done if page is free of the right migratetype */
Dan Williamsb03641a2019-05-14 15:41:32 -07002053 if (!free_area_empty(area, migratetype))
Vlastimil Babkacf378312016-10-07 16:57:41 -07002054 return COMPACT_SUCCESS;
Mel Gorman8fb74b92013-01-11 14:32:16 -08002055
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002056#ifdef CONFIG_CMA
2057 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
2058 if (migratetype == MIGRATE_MOVABLE &&
Dan Williamsb03641a2019-05-14 15:41:32 -07002059 !free_area_empty(area, MIGRATE_CMA))
Vlastimil Babkacf378312016-10-07 16:57:41 -07002060 return COMPACT_SUCCESS;
Joonsoo Kim2149cda2015-04-14 15:45:21 -07002061#endif
2062 /*
2063 * Job done if allocation would steal freepages from
2064 * other migratetype buddy lists.
2065 */
2066 if (find_suitable_fallback(area, order, migratetype,
Vlastimil Babkabaf6a9a2017-05-08 15:54:52 -07002067 true, &can_steal) != -1) {
2068
2069 /* movable pages are OK in any pageblock */
2070 if (migratetype == MIGRATE_MOVABLE)
2071 return COMPACT_SUCCESS;
2072
2073 /*
2074 * We are stealing for a non-movable allocation. Make
2075 * sure we finish compacting the current pageblock
2076 * first so it is as free as possible and we won't
2077 * have to steal another one soon. This only applies
2078 * to sync compaction, as async compaction operates
2079 * on pageblocks of the same migratetype.
2080 */
2081 if (cc->mode == MIGRATE_ASYNC ||
2082 IS_ALIGNED(cc->migrate_pfn,
2083 pageblock_nr_pages)) {
2084 return COMPACT_SUCCESS;
2085 }
2086
Mel Gormancb2dcaf2019-03-05 15:45:11 -08002087 ret = COMPACT_CONTINUE;
2088 break;
Vlastimil Babkabaf6a9a2017-05-08 15:54:52 -07002089 }
Mel Gorman56de7262010-05-24 14:32:30 -07002090 }
2091
Nitin Guptafacdaa92020-08-11 18:31:00 -07002092out:
Mel Gormancb2dcaf2019-03-05 15:45:11 -08002093 if (cc->contended || fatal_signal_pending(current))
2094 ret = COMPACT_CONTENDED;
2095
2096 return ret;
Joonsoo Kim837d0262015-02-11 15:27:06 -08002097}
2098
Mel Gorman40cacbc2019-03-05 15:44:36 -08002099static enum compact_result compact_finished(struct compact_control *cc)
Joonsoo Kim837d0262015-02-11 15:27:06 -08002100{
2101 int ret;
2102
Mel Gorman40cacbc2019-03-05 15:44:36 -08002103 ret = __compact_finished(cc);
2104 trace_mm_compaction_finished(cc->zone, cc->order, ret);
Joonsoo Kim837d0262015-02-11 15:27:06 -08002105 if (ret == COMPACT_NO_SUITABLE_PAGE)
2106 ret = COMPACT_CONTINUE;
2107
2108 return ret;
Mel Gorman748446b2010-05-24 14:32:27 -07002109}
2110
Michal Hockoea7ab982016-05-20 16:56:38 -07002111static enum compact_result __compaction_suitable(struct zone *zone, int order,
Mel Gormanc6038442016-05-19 17:13:38 -07002112 unsigned int alloc_flags,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002113 int highest_zoneidx,
Michal Hocko86a294a2016-05-20 16:57:12 -07002114 unsigned long wmark_target)
Mel Gorman3e7d3442011-01-13 15:45:56 -08002115{
Mel Gorman3e7d3442011-01-13 15:45:56 -08002116 unsigned long watermark;
2117
Yaowei Bai21c527a2015-11-05 18:47:20 -08002118 if (is_via_compact_memory(order))
Michal Hocko3957c772011-06-15 15:08:25 -07002119 return COMPACT_CONTINUE;
2120
Mel Gormana9214442018-12-28 00:35:44 -08002121 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
Vlastimil Babkaebff3982014-12-10 15:43:22 -08002122 /*
2123 * If watermarks for high-order allocation are already met, there
2124 * should be no need for compaction at all.
2125 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002126 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
Vlastimil Babkaebff3982014-12-10 15:43:22 -08002127 alloc_flags))
Vlastimil Babkacf378312016-10-07 16:57:41 -07002128 return COMPACT_SUCCESS;
Vlastimil Babkaebff3982014-12-10 15:43:22 -08002129
Michal Hocko3957c772011-06-15 15:08:25 -07002130 /*
Vlastimil Babka9861a622016-10-07 16:57:53 -07002131 * Watermarks for order-0 must be met for compaction to be able to
Vlastimil Babka984fdba2016-10-07 16:57:57 -07002132 * isolate free pages for migration targets. This means that the
2133 * watermark and alloc_flags have to match, or be more pessimistic than
2134 * the check in __isolate_free_page(). We don't use the direct
2135 * compactor's alloc_flags, as they are not relevant for freepage
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002136 * isolation. We however do use the direct compactor's highest_zoneidx
2137 * to skip over zones where lowmem reserves would prevent allocation
2138 * even if compaction succeeds.
Vlastimil Babka8348faf2016-10-07 16:58:00 -07002139 * For costly orders, we require low watermark instead of min for
2140 * compaction to proceed to increase its chances.
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09002141 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
2142 * suitable migration targets
Mel Gorman3e7d3442011-01-13 15:45:56 -08002143 */
Vlastimil Babka8348faf2016-10-07 16:58:00 -07002144 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
2145 low_wmark_pages(zone) : min_wmark_pages(zone);
2146 watermark += compact_gap(order);
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002147 if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
Joonsoo Kimd883c6c2018-05-23 10:18:21 +09002148 ALLOC_CMA, wmark_target))
Mel Gorman3e7d3442011-01-13 15:45:56 -08002149 return COMPACT_SKIPPED;
2150
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07002151 return COMPACT_CONTINUE;
2152}
2153
Hui Su2b1a20c2020-12-14 19:12:42 -08002154/*
2155 * compaction_suitable: Is this suitable to run compaction on this zone now?
2156 * Returns
2157 * COMPACT_SKIPPED - If there are too few free pages for compaction
2158 * COMPACT_SUCCESS - If the allocation would succeed without compaction
2159 * COMPACT_CONTINUE - If compaction should run now
2160 */
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07002161enum compact_result compaction_suitable(struct zone *zone, int order,
2162 unsigned int alloc_flags,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002163 int highest_zoneidx)
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07002164{
2165 enum compact_result ret;
2166 int fragindex;
2167
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002168 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07002169 zone_page_state(zone, NR_FREE_PAGES));
Mel Gorman3e7d3442011-01-13 15:45:56 -08002170 /*
2171 * fragmentation index determines if allocation failures are due to
2172 * low memory or external fragmentation
2173 *
Vlastimil Babkaebff3982014-12-10 15:43:22 -08002174 * index of -1000 would imply allocations might succeed depending on
2175 * watermarks, but we already failed the high-order watermark check
Mel Gorman3e7d3442011-01-13 15:45:56 -08002176 * index towards 0 implies failure is due to lack of memory
2177 * index towards 1000 implies failure is due to fragmentation
2178 *
Vlastimil Babka20311422016-10-07 17:00:46 -07002179 * Only compact if a failure would be due to fragmentation. Also
2180 * ignore fragindex for non-costly orders where the alternative to
2181 * a successful reclaim/compaction is OOM. Fragindex and the
2182 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
2183 * excessive compaction for costly orders, but it should not be at the
2184 * expense of system stability.
Mel Gorman3e7d3442011-01-13 15:45:56 -08002185 */
Vlastimil Babka20311422016-10-07 17:00:46 -07002186 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07002187 fragindex = fragmentation_index(zone, order);
2188 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
2189 ret = COMPACT_NOT_SUITABLE_ZONE;
2190 }
Mel Gorman3e7d3442011-01-13 15:45:56 -08002191
Joonsoo Kim837d0262015-02-11 15:27:06 -08002192 trace_mm_compaction_suitable(zone, order, ret);
2193 if (ret == COMPACT_NOT_SUITABLE_ZONE)
2194 ret = COMPACT_SKIPPED;
2195
2196 return ret;
2197}
2198
Michal Hocko86a294a2016-05-20 16:57:12 -07002199bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2200 int alloc_flags)
2201{
2202 struct zone *zone;
2203 struct zoneref *z;
2204
2205 /*
2206 * Make sure at least one zone would pass __compaction_suitable if we continue
2207 * retrying the reclaim.
2208 */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002209 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2210 ac->highest_zoneidx, ac->nodemask) {
Michal Hocko86a294a2016-05-20 16:57:12 -07002211 unsigned long available;
2212 enum compact_result compact_result;
2213
2214 /*
2215 * Do not consider all the reclaimable memory because we do not
2216 * want to trash just for a single high order allocation which
2217 * is even not guaranteed to appear even if __compaction_suitable
2218 * is happy about the watermark check.
2219 */
Mel Gorman5a1c84b2016-07-28 15:47:31 -07002220 available = zone_reclaimable_pages(zone) / order;
Michal Hocko86a294a2016-05-20 16:57:12 -07002221 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
2222 compact_result = __compaction_suitable(zone, order, alloc_flags,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002223 ac->highest_zoneidx, available);
Vlastimil Babkacc5c9f02016-10-07 17:00:43 -07002224 if (compact_result != COMPACT_SKIPPED)
Michal Hocko86a294a2016-05-20 16:57:12 -07002225 return true;
2226 }
2227
2228 return false;
2229}
2230
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002231static enum compact_result
2232compact_zone(struct compact_control *cc, struct capture_control *capc)
Mel Gorman748446b2010-05-24 14:32:27 -07002233{
Michal Hockoea7ab982016-05-20 16:56:38 -07002234 enum compact_result ret;
Mel Gorman40cacbc2019-03-05 15:44:36 -08002235 unsigned long start_pfn = cc->zone->zone_start_pfn;
2236 unsigned long end_pfn = zone_end_pfn(cc->zone);
Mel Gorman566e54e2019-03-05 15:44:32 -08002237 unsigned long last_migrated_pfn;
David Rientjese0b9dae2014-06-04 16:08:28 -07002238 const bool sync = cc->mode != MIGRATE_ASYNC;
Mel Gorman8854c552019-03-05 15:45:18 -08002239 bool update_cached;
Mel Gorman748446b2010-05-24 14:32:27 -07002240
Yafang Shaoa94b5252019-09-23 15:36:54 -07002241 /*
2242 * These counters track activities during zone compaction. Initialize
2243 * them before compacting a new zone.
2244 */
2245 cc->total_migrate_scanned = 0;
2246 cc->total_free_scanned = 0;
2247 cc->nr_migratepages = 0;
2248 cc->nr_freepages = 0;
2249 INIT_LIST_HEAD(&cc->freepages);
2250 INIT_LIST_HEAD(&cc->migratepages);
2251
Wei Yang01c0bfe2020-06-03 15:59:08 -07002252 cc->migratetype = gfp_migratetype(cc->gfp_mask);
Mel Gorman40cacbc2019-03-05 15:44:36 -08002253 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002254 cc->highest_zoneidx);
Michal Hockoc46649d2016-05-20 16:56:41 -07002255 /* Compaction is likely to fail */
Vlastimil Babkacf378312016-10-07 16:57:41 -07002256 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
Mel Gorman3e7d3442011-01-13 15:45:56 -08002257 return ret;
Michal Hockoc46649d2016-05-20 16:56:41 -07002258
2259 /* huh, compaction_suitable is returning something unexpected */
2260 VM_BUG_ON(ret != COMPACT_CONTINUE);
Mel Gorman3e7d3442011-01-13 15:45:56 -08002261
Mel Gormanc89511a2012-10-08 16:32:45 -07002262 /*
Vlastimil Babkad3132e42014-01-21 15:51:08 -08002263 * Clear pageblock skip if there were failures recently and compaction
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07002264 * is about to be retried after being deferred.
Vlastimil Babkad3132e42014-01-21 15:51:08 -08002265 */
Mel Gorman40cacbc2019-03-05 15:44:36 -08002266 if (compaction_restarting(cc->zone, cc->order))
2267 __reset_isolation_suitable(cc->zone);
Vlastimil Babkad3132e42014-01-21 15:51:08 -08002268
2269 /*
Mel Gormanc89511a2012-10-08 16:32:45 -07002270 * Setup to move all movable pages to the end of the zone. Used cached
Vlastimil Babka06ed2992016-10-07 16:57:35 -07002271 * information on where the scanners should start (unless we explicitly
2272 * want to compact the whole zone), but check that it is initialised
2273 * by ensuring the values are within zone boundaries.
Mel Gormanc89511a2012-10-08 16:32:45 -07002274 */
Mel Gorman70b44592019-03-05 15:44:54 -08002275 cc->fast_start_pfn = 0;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07002276 if (cc->whole_zone) {
Mel Gormanc89511a2012-10-08 16:32:45 -07002277 cc->migrate_pfn = start_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07002278 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2279 } else {
Mel Gorman40cacbc2019-03-05 15:44:36 -08002280 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
2281 cc->free_pfn = cc->zone->compact_cached_free_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07002282 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
2283 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
Mel Gorman40cacbc2019-03-05 15:44:36 -08002284 cc->zone->compact_cached_free_pfn = cc->free_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07002285 }
2286 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
2287 cc->migrate_pfn = start_pfn;
Mel Gorman40cacbc2019-03-05 15:44:36 -08002288 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
2289 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
Vlastimil Babka06ed2992016-10-07 16:57:35 -07002290 }
Michal Hockoc8f7de02016-05-20 16:56:47 -07002291
Mel Gormane332f742019-03-05 15:45:38 -08002292 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
Vlastimil Babka06ed2992016-10-07 16:57:35 -07002293 cc->whole_zone = true;
2294 }
Michal Hockoc8f7de02016-05-20 16:56:47 -07002295
Mel Gorman566e54e2019-03-05 15:44:32 -08002296 last_migrated_pfn = 0;
Mel Gorman748446b2010-05-24 14:32:27 -07002297
Mel Gorman8854c552019-03-05 15:45:18 -08002298 /*
2299 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
2300 * the basis that some migrations will fail in ASYNC mode. However,
2301 * if the cached PFNs match and pageblocks are skipped due to having
2302 * no isolation candidates, then the sync state does not matter.
2303 * Until a pageblock with isolation candidates is found, keep the
2304 * cached PFNs in sync to avoid revisiting the same blocks.
2305 */
2306 update_cached = !sync &&
2307 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
2308
Joonsoo Kim16c4a092015-02-11 15:27:01 -08002309 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
2310 cc->free_pfn, end_pfn, sync);
Mel Gorman0eb927c2014-01-21 15:51:05 -08002311
Mel Gorman748446b2010-05-24 14:32:27 -07002312 migrate_prep_local();
2313
Mel Gorman40cacbc2019-03-05 15:44:36 -08002314 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
Minchan Kim9d502c12011-03-22 16:30:39 -07002315 int err;
Yanfei Xu19d3cf92020-12-14 19:12:39 -08002316 unsigned long iteration_start_pfn = cc->migrate_pfn;
Mel Gorman748446b2010-05-24 14:32:27 -07002317
Mel Gorman804d3122019-03-05 15:45:07 -08002318 /*
2319 * Avoid multiple rescans which can happen if a page cannot be
2320 * isolated (dirty/writeback in async mode) or if the migrated
2321 * pages are being allocated before the pageblock is cleared.
2322 * The first rescan will capture the entire pageblock for
2323 * migration. If it fails, it'll be marked skip and scanning
2324 * will proceed as normal.
2325 */
2326 cc->rescan = false;
2327 if (pageblock_start_pfn(last_migrated_pfn) ==
Yanfei Xu19d3cf92020-12-14 19:12:39 -08002328 pageblock_start_pfn(iteration_start_pfn)) {
Mel Gorman804d3122019-03-05 15:45:07 -08002329 cc->rescan = true;
2330 }
2331
Pengfei Li32aaf052019-09-23 15:36:58 -07002332 switch (isolate_migratepages(cc)) {
Mel Gormanf9e35b32011-06-15 15:08:52 -07002333 case ISOLATE_ABORT:
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08002334 ret = COMPACT_CONTENDED;
Rafael Aquini5733c7d2012-12-11 16:02:47 -08002335 putback_movable_pages(&cc->migratepages);
Shaohua Lie64c5232012-10-08 16:32:27 -07002336 cc->nr_migratepages = 0;
Mel Gormanf9e35b32011-06-15 15:08:52 -07002337 goto out;
2338 case ISOLATE_NONE:
Mel Gorman8854c552019-03-05 15:45:18 -08002339 if (update_cached) {
2340 cc->zone->compact_cached_migrate_pfn[1] =
2341 cc->zone->compact_cached_migrate_pfn[0];
2342 }
2343
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08002344 /*
2345 * We haven't isolated and migrated anything, but
2346 * there might still be unflushed migrations from
2347 * previous cc->order aligned block.
2348 */
2349 goto check_drain;
Mel Gormanf9e35b32011-06-15 15:08:52 -07002350 case ISOLATE_SUCCESS:
Mel Gorman8854c552019-03-05 15:45:18 -08002351 update_cached = false;
Yanfei Xu19d3cf92020-12-14 19:12:39 -08002352 last_migrated_pfn = iteration_start_pfn;
Mel Gormanf9e35b32011-06-15 15:08:52 -07002353 }
Mel Gorman748446b2010-05-24 14:32:27 -07002354
David Rientjesd53aea32014-06-04 16:08:26 -07002355 err = migrate_pages(&cc->migratepages, compaction_alloc,
David Rientjese0b9dae2014-06-04 16:08:28 -07002356 compaction_free, (unsigned long)cc, cc->mode,
Mel Gorman7b2a2d42012-10-19 14:07:31 +01002357 MR_COMPACTION);
Mel Gorman748446b2010-05-24 14:32:27 -07002358
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07002359 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
2360 &cc->migratepages);
Mel Gorman748446b2010-05-24 14:32:27 -07002361
Vlastimil Babkaf8c93012014-06-04 16:08:32 -07002362 /* All pages were either migrated or will be released */
2363 cc->nr_migratepages = 0;
Minchan Kim9d502c12011-03-22 16:30:39 -07002364 if (err) {
Rafael Aquini5733c7d2012-12-11 16:02:47 -08002365 putback_movable_pages(&cc->migratepages);
Vlastimil Babka7ed695e2014-01-21 15:51:09 -08002366 /*
2367 * migrate_pages() may return -ENOMEM when scanners meet
2368 * and we want compact_finished() to detect it
2369 */
Vlastimil Babkaf2849aa2015-09-08 15:02:36 -07002370 if (err == -ENOMEM && !compact_scanners_met(cc)) {
Vlastimil Babka2d1e1042015-11-05 18:48:02 -08002371 ret = COMPACT_CONTENDED;
David Rientjes4bf2bba2012-07-11 14:02:13 -07002372 goto out;
2373 }
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07002374 /*
2375 * We failed to migrate at least one page in the current
2376 * order-aligned block, so skip the rest of it.
2377 */
2378 if (cc->direct_compaction &&
2379 (cc->mode == MIGRATE_ASYNC)) {
2380 cc->migrate_pfn = block_end_pfn(
2381 cc->migrate_pfn - 1, cc->order);
2382 /* Draining pcplists is useless in this case */
Mel Gorman566e54e2019-03-05 15:44:32 -08002383 last_migrated_pfn = 0;
Vlastimil Babkafdd048e2016-05-19 17:11:55 -07002384 }
Mel Gorman748446b2010-05-24 14:32:27 -07002385 }
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08002386
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08002387check_drain:
2388 /*
2389 * Has the migration scanner moved away from the previous
2390 * cc->order aligned block where we migrated from? If yes,
2391 * flush the pages that were freed, so that they can merge and
2392 * compact_finished() can detect immediately if allocation
2393 * would succeed.
2394 */
Mel Gorman566e54e2019-03-05 15:44:32 -08002395 if (cc->order > 0 && last_migrated_pfn) {
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08002396 unsigned long current_block_start =
Vlastimil Babka06b66402016-05-19 17:11:48 -07002397 block_start_pfn(cc->migrate_pfn, cc->order);
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08002398
Mel Gorman566e54e2019-03-05 15:44:32 -08002399 if (last_migrated_pfn < current_block_start) {
Ingo Molnarb01b2142020-05-27 22:11:15 +02002400 lru_add_drain_cpu_zone(cc->zone);
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08002401 /* No more flushing until we migrate again */
Mel Gorman566e54e2019-03-05 15:44:32 -08002402 last_migrated_pfn = 0;
Vlastimil Babkafdaf7f52014-12-10 15:43:34 -08002403 }
2404 }
2405
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002406 /* Stop if a page has been captured */
2407 if (capc && capc->page) {
2408 ret = COMPACT_SUCCESS;
2409 break;
2410 }
Mel Gorman748446b2010-05-24 14:32:27 -07002411 }
2412
Mel Gormanf9e35b32011-06-15 15:08:52 -07002413out:
Vlastimil Babka6bace092014-12-10 15:43:31 -08002414 /*
2415 * Release free pages and update where the free scanner should restart,
2416 * so we don't leave any returned pages behind in the next attempt.
2417 */
2418 if (cc->nr_freepages > 0) {
2419 unsigned long free_pfn = release_freepages(&cc->freepages);
2420
2421 cc->nr_freepages = 0;
2422 VM_BUG_ON(free_pfn == 0);
2423 /* The cached pfn is always the first in a pageblock */
Vlastimil Babka06b66402016-05-19 17:11:48 -07002424 free_pfn = pageblock_start_pfn(free_pfn);
Vlastimil Babka6bace092014-12-10 15:43:31 -08002425 /*
2426 * Only go back, not forward. The cached pfn might have been
2427 * already reset to zone end in compact_finished()
2428 */
Mel Gorman40cacbc2019-03-05 15:44:36 -08002429 if (free_pfn > cc->zone->compact_cached_free_pfn)
2430 cc->zone->compact_cached_free_pfn = free_pfn;
Vlastimil Babka6bace092014-12-10 15:43:31 -08002431 }
Mel Gorman748446b2010-05-24 14:32:27 -07002432
David Rientjes7f354a52017-02-22 15:44:50 -08002433 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
2434 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
2435
Joonsoo Kim16c4a092015-02-11 15:27:01 -08002436 trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
2437 cc->free_pfn, end_pfn, sync, ret);
Mel Gorman0eb927c2014-01-21 15:51:05 -08002438
Mel Gorman748446b2010-05-24 14:32:27 -07002439 return ret;
2440}
Mel Gorman76ab0f52010-05-24 14:32:28 -07002441
Michal Hockoea7ab982016-05-20 16:56:38 -07002442static enum compact_result compact_zone_order(struct zone *zone, int order,
Vlastimil Babkac3486f52016-07-28 15:49:30 -07002443 gfp_t gfp_mask, enum compact_priority prio,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002444 unsigned int alloc_flags, int highest_zoneidx,
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002445 struct page **capture)
Mel Gorman56de7262010-05-24 14:32:30 -07002446{
Michal Hockoea7ab982016-05-20 16:56:38 -07002447 enum compact_result ret;
Mel Gorman56de7262010-05-24 14:32:30 -07002448 struct compact_control cc = {
Mel Gorman56de7262010-05-24 14:32:30 -07002449 .order = order,
Mel Gormandbe2d4e2019-03-05 15:45:31 -08002450 .search_order = order,
David Rientjes6d7ce552014-10-09 15:27:27 -07002451 .gfp_mask = gfp_mask,
Mel Gorman56de7262010-05-24 14:32:30 -07002452 .zone = zone,
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07002453 .mode = (prio == COMPACT_PRIO_ASYNC) ?
2454 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
Vlastimil Babkaebff3982014-12-10 15:43:22 -08002455 .alloc_flags = alloc_flags,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002456 .highest_zoneidx = highest_zoneidx,
Vlastimil Babkaaccf6242016-03-17 14:18:15 -07002457 .direct_compaction = true,
Vlastimil Babkaa8e025e2016-10-07 16:57:47 -07002458 .whole_zone = (prio == MIN_COMPACT_PRIORITY),
Vlastimil Babka9f7e3382016-10-07 17:00:37 -07002459 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
2460 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
Mel Gorman56de7262010-05-24 14:32:30 -07002461 };
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002462 struct capture_control capc = {
2463 .cc = &cc,
2464 .page = NULL,
2465 };
2466
Vlastimil Babkab9e20f02020-06-25 20:29:24 -07002467 /*
2468 * Make sure the structs are really initialized before we expose the
2469 * capture control, in case we are interrupted and the interrupt handler
2470 * frees a page.
2471 */
2472 barrier();
2473 WRITE_ONCE(current->capture_control, &capc);
Mel Gorman56de7262010-05-24 14:32:30 -07002474
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002475 ret = compact_zone(&cc, &capc);
Shaohua Lie64c5232012-10-08 16:32:27 -07002476
2477 VM_BUG_ON(!list_empty(&cc.freepages));
2478 VM_BUG_ON(!list_empty(&cc.migratepages));
2479
Vlastimil Babkab9e20f02020-06-25 20:29:24 -07002480 /*
2481 * Make sure we hide capture control first before we read the captured
2482 * page pointer, otherwise an interrupt could free and capture a page
2483 * and we would leak it.
2484 */
2485 WRITE_ONCE(current->capture_control, NULL);
2486 *capture = READ_ONCE(capc.page);
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002487
Shaohua Lie64c5232012-10-08 16:32:27 -07002488 return ret;
Mel Gorman56de7262010-05-24 14:32:30 -07002489}
2490
Mel Gorman5e771902010-05-24 14:32:31 -07002491int sysctl_extfrag_threshold = 500;
2492
Mel Gorman56de7262010-05-24 14:32:30 -07002493/**
2494 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
Mel Gorman56de7262010-05-24 14:32:30 -07002495 * @gfp_mask: The GFP mask of the current allocation
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -08002496 * @order: The order of the current allocation
2497 * @alloc_flags: The allocation flags of the current allocation
2498 * @ac: The context of current allocation
Yang Shi112d2d22018-01-31 16:20:23 -08002499 * @prio: Determines how hard direct compaction should try to succeed
Vlastimil Babka64675522020-04-01 21:10:35 -07002500 * @capture: Pointer to free page created by compaction will be stored here
Mel Gorman56de7262010-05-24 14:32:30 -07002501 *
2502 * This is the main entry point for direct page compaction.
2503 */
Michal Hockoea7ab982016-05-20 16:56:38 -07002504enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
Mel Gormanc6038442016-05-19 17:13:38 -07002505 unsigned int alloc_flags, const struct alloc_context *ac,
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002506 enum compact_priority prio, struct page **capture)
Mel Gorman56de7262010-05-24 14:32:30 -07002507{
Mel Gorman56de7262010-05-24 14:32:30 -07002508 int may_perform_io = gfp_mask & __GFP_IO;
Mel Gorman56de7262010-05-24 14:32:30 -07002509 struct zoneref *z;
2510 struct zone *zone;
Michal Hocko1d4746d2016-05-20 16:56:44 -07002511 enum compact_result rc = COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07002512
Michal Hocko73e64c52016-12-14 15:04:07 -08002513 /*
2514 * Check if the GFP flags allow compaction - GFP_NOIO is really
2515 * tricky context because the migration might require IO
2516 */
2517 if (!may_perform_io)
Vlastimil Babka53853e22014-10-09 15:27:02 -07002518 return COMPACT_SKIPPED;
Mel Gorman56de7262010-05-24 14:32:30 -07002519
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07002520 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
Joonsoo Kim837d0262015-02-11 15:27:06 -08002521
Mel Gorman56de7262010-05-24 14:32:30 -07002522 /* Compact each zone in the list */
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002523 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
2524 ac->highest_zoneidx, ac->nodemask) {
Michal Hockoea7ab982016-05-20 16:56:38 -07002525 enum compact_result status;
Mel Gorman56de7262010-05-24 14:32:30 -07002526
Vlastimil Babkaa8e025e2016-10-07 16:57:47 -07002527 if (prio > MIN_COMPACT_PRIORITY
2528 && compaction_deferred(zone, order)) {
Michal Hocko1d4746d2016-05-20 16:56:44 -07002529 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
Vlastimil Babka53853e22014-10-09 15:27:02 -07002530 continue;
Michal Hocko1d4746d2016-05-20 16:56:44 -07002531 }
Vlastimil Babka53853e22014-10-09 15:27:02 -07002532
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07002533 status = compact_zone_order(zone, order, gfp_mask, prio,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002534 alloc_flags, ac->highest_zoneidx, capture);
Mel Gorman56de7262010-05-24 14:32:30 -07002535 rc = max(status, rc);
2536
Vlastimil Babka7ceb0092016-10-07 16:57:44 -07002537 /* The allocation should succeed, stop compacting */
2538 if (status == COMPACT_SUCCESS) {
Vlastimil Babka53853e22014-10-09 15:27:02 -07002539 /*
2540 * We think the allocation will succeed in this zone,
2541 * but it is not certain, hence the false. The caller
2542 * will repeat this with true if allocation indeed
2543 * succeeds in this zone.
2544 */
2545 compaction_defer_reset(zone, order, false);
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07002546
Vlastimil Babkac3486f52016-07-28 15:49:30 -07002547 break;
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07002548 }
2549
Vlastimil Babkaa5508cd2016-07-28 15:49:28 -07002550 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
Vlastimil Babkac3486f52016-07-28 15:49:30 -07002551 status == COMPACT_PARTIAL_SKIPPED))
Vlastimil Babka53853e22014-10-09 15:27:02 -07002552 /*
2553 * We think that allocation won't succeed in this zone
2554 * so we defer compaction there. If it ends up
2555 * succeeding after all, it will be reset.
2556 */
2557 defer_compaction(zone, order);
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07002558
2559 /*
2560 * We might have stopped compacting due to need_resched() in
2561 * async compaction, or due to a fatal signal detected. In that
Vlastimil Babkac3486f52016-07-28 15:49:30 -07002562 * case do not try further zones
Vlastimil Babka1f9efde2014-10-09 15:27:14 -07002563 */
Vlastimil Babkac3486f52016-07-28 15:49:30 -07002564 if ((prio == COMPACT_PRIO_ASYNC && need_resched())
2565 || fatal_signal_pending(current))
2566 break;
Mel Gorman56de7262010-05-24 14:32:30 -07002567 }
2568
2569 return rc;
2570}
2571
Nitin Guptafacdaa92020-08-11 18:31:00 -07002572/*
2573 * Compact all zones within a node till each zone's fragmentation score
2574 * reaches within proactive compaction thresholds (as determined by the
2575 * proactiveness tunable).
2576 *
2577 * It is possible that the function returns before reaching score targets
2578 * due to various back-off conditions, such as, contention on per-node or
2579 * per-zone locks.
2580 */
2581static void proactive_compact_node(pg_data_t *pgdat)
2582{
2583 int zoneid;
2584 struct zone *zone;
2585 struct compact_control cc = {
2586 .order = -1,
2587 .mode = MIGRATE_SYNC_LIGHT,
2588 .ignore_skip_hint = true,
2589 .whole_zone = true,
2590 .gfp_mask = GFP_KERNEL,
2591 .proactive_compaction = true,
2592 };
2593
2594 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2595 zone = &pgdat->node_zones[zoneid];
2596 if (!populated_zone(zone))
2597 continue;
2598
2599 cc.zone = zone;
2600
2601 compact_zone(&cc, NULL);
2602
2603 VM_BUG_ON(!list_empty(&cc.freepages));
2604 VM_BUG_ON(!list_empty(&cc.migratepages));
2605 }
2606}
Mel Gorman56de7262010-05-24 14:32:30 -07002607
Mel Gorman76ab0f52010-05-24 14:32:28 -07002608/* Compact all zones within a node */
Andrew Morton7103f162013-02-22 16:32:33 -08002609static void compact_node(int nid)
Rik van Riel7be62de2012-03-21 16:33:52 -07002610{
Vlastimil Babka791cae92016-10-07 16:57:38 -07002611 pg_data_t *pgdat = NODE_DATA(nid);
2612 int zoneid;
2613 struct zone *zone;
Rik van Riel7be62de2012-03-21 16:33:52 -07002614 struct compact_control cc = {
2615 .order = -1,
David Rientjese0b9dae2014-06-04 16:08:28 -07002616 .mode = MIGRATE_SYNC,
David Rientjes91ca9182014-04-03 14:47:23 -07002617 .ignore_skip_hint = true,
Vlastimil Babka06ed2992016-10-07 16:57:35 -07002618 .whole_zone = true,
Michal Hocko73e64c52016-12-14 15:04:07 -08002619 .gfp_mask = GFP_KERNEL,
Rik van Riel7be62de2012-03-21 16:33:52 -07002620 };
2621
Vlastimil Babka791cae92016-10-07 16:57:38 -07002622
2623 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
2624
2625 zone = &pgdat->node_zones[zoneid];
2626 if (!populated_zone(zone))
2627 continue;
2628
Vlastimil Babka791cae92016-10-07 16:57:38 -07002629 cc.zone = zone;
Vlastimil Babka791cae92016-10-07 16:57:38 -07002630
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002631 compact_zone(&cc, NULL);
Vlastimil Babka791cae92016-10-07 16:57:38 -07002632
2633 VM_BUG_ON(!list_empty(&cc.freepages));
2634 VM_BUG_ON(!list_empty(&cc.migratepages));
2635 }
Rik van Riel7be62de2012-03-21 16:33:52 -07002636}
2637
Mel Gorman76ab0f52010-05-24 14:32:28 -07002638/* Compact all nodes in the system */
Jason Liu7964c062013-01-11 14:31:47 -08002639static void compact_nodes(void)
Mel Gorman76ab0f52010-05-24 14:32:28 -07002640{
2641 int nid;
2642
Hugh Dickins8575ec22012-03-21 16:33:53 -07002643 /* Flush pending updates to the LRU lists */
2644 lru_add_drain_all();
2645
Mel Gorman76ab0f52010-05-24 14:32:28 -07002646 for_each_online_node(nid)
2647 compact_node(nid);
Mel Gorman76ab0f52010-05-24 14:32:28 -07002648}
2649
2650/* The written value is actually unused, all memory is compacted */
2651int sysctl_compact_memory;
2652
Yaowei Baifec4eb22016-01-14 15:20:09 -08002653/*
Nitin Guptafacdaa92020-08-11 18:31:00 -07002654 * Tunable for proactive compaction. It determines how
2655 * aggressively the kernel should compact memory in the
2656 * background. It takes values in the range [0, 100].
2657 */
Nitin Guptad34c0a72020-08-11 18:31:07 -07002658unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
Nitin Guptafacdaa92020-08-11 18:31:00 -07002659
2660/*
Yaowei Baifec4eb22016-01-14 15:20:09 -08002661 * This is the entry point for compacting all nodes via
2662 * /proc/sys/vm/compact_memory
2663 */
Mel Gorman76ab0f52010-05-24 14:32:28 -07002664int sysctl_compaction_handler(struct ctl_table *table, int write,
Christoph Hellwig32927392020-04-24 08:43:38 +02002665 void *buffer, size_t *length, loff_t *ppos)
Mel Gorman76ab0f52010-05-24 14:32:28 -07002666{
2667 if (write)
Jason Liu7964c062013-01-11 14:31:47 -08002668 compact_nodes();
Mel Gorman76ab0f52010-05-24 14:32:28 -07002669
2670 return 0;
2671}
Mel Gormaned4a6d72010-05-24 14:32:29 -07002672
2673#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
Rashika Kheria74e77fb2014-04-03 14:48:01 -07002674static ssize_t sysfs_compact_node(struct device *dev,
Kay Sievers10fbcf42011-12-21 14:48:43 -08002675 struct device_attribute *attr,
Mel Gormaned4a6d72010-05-24 14:32:29 -07002676 const char *buf, size_t count)
2677{
Hugh Dickins8575ec22012-03-21 16:33:53 -07002678 int nid = dev->id;
2679
2680 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
2681 /* Flush pending updates to the LRU lists */
2682 lru_add_drain_all();
2683
2684 compact_node(nid);
2685 }
Mel Gormaned4a6d72010-05-24 14:32:29 -07002686
2687 return count;
2688}
Joe Perches0825a6f2018-06-14 15:27:58 -07002689static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node);
Mel Gormaned4a6d72010-05-24 14:32:29 -07002690
2691int compaction_register_node(struct node *node)
2692{
Kay Sievers10fbcf42011-12-21 14:48:43 -08002693 return device_create_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07002694}
2695
2696void compaction_unregister_node(struct node *node)
2697{
Kay Sievers10fbcf42011-12-21 14:48:43 -08002698 return device_remove_file(&node->dev, &dev_attr_compact);
Mel Gormaned4a6d72010-05-24 14:32:29 -07002699}
2700#endif /* CONFIG_SYSFS && CONFIG_NUMA */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01002701
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002702static inline bool kcompactd_work_requested(pg_data_t *pgdat)
2703{
Vlastimil Babka172400c2016-05-05 16:22:32 -07002704 return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002705}
2706
2707static bool kcompactd_node_suitable(pg_data_t *pgdat)
2708{
2709 int zoneid;
2710 struct zone *zone;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002711 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002712
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002713 for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002714 zone = &pgdat->node_zones[zoneid];
2715
2716 if (!populated_zone(zone))
2717 continue;
2718
2719 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002720 highest_zoneidx) == COMPACT_CONTINUE)
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002721 return true;
2722 }
2723
2724 return false;
2725}
2726
2727static void kcompactd_do_work(pg_data_t *pgdat)
2728{
2729 /*
2730 * With no special task, compact all zones so that a page of requested
2731 * order is allocatable.
2732 */
2733 int zoneid;
2734 struct zone *zone;
2735 struct compact_control cc = {
2736 .order = pgdat->kcompactd_max_order,
Mel Gormandbe2d4e2019-03-05 15:45:31 -08002737 .search_order = pgdat->kcompactd_max_order,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002738 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx,
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002739 .mode = MIGRATE_SYNC_LIGHT,
David Rientjesa0647dc2017-11-17 15:26:27 -08002740 .ignore_skip_hint = false,
Michal Hocko73e64c52016-12-14 15:04:07 -08002741 .gfp_mask = GFP_KERNEL,
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002742 };
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002743 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002744 cc.highest_zoneidx);
David Rientjes7f354a52017-02-22 15:44:50 -08002745 count_compact_event(KCOMPACTD_WAKE);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002746
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002747 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002748 int status;
2749
2750 zone = &pgdat->node_zones[zoneid];
2751 if (!populated_zone(zone))
2752 continue;
2753
2754 if (compaction_deferred(zone, cc.order))
2755 continue;
2756
2757 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
2758 COMPACT_CONTINUE)
2759 continue;
2760
Vlastimil Babka172400c2016-05-05 16:22:32 -07002761 if (kthread_should_stop())
2762 return;
Yafang Shaoa94b5252019-09-23 15:36:54 -07002763
2764 cc.zone = zone;
Mel Gorman5e1f0f02019-03-05 15:45:41 -08002765 status = compact_zone(&cc, NULL);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002766
Vlastimil Babka7ceb0092016-10-07 16:57:44 -07002767 if (status == COMPACT_SUCCESS) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002768 compaction_defer_reset(zone, cc.order, false);
Michal Hockoc8f7de02016-05-20 16:56:47 -07002769 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002770 /*
David Rientjesbc3106b2018-04-05 16:24:02 -07002771 * Buddy pages may become stranded on pcps that could
2772 * otherwise coalesce on the zone's free area for
2773 * order >= cc.order. This is ratelimited by the
2774 * upcoming deferral.
2775 */
2776 drain_all_pages(zone);
2777
2778 /*
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002779 * We use sync migration mode here, so we defer like
2780 * sync direct compaction does.
2781 */
2782 defer_compaction(zone, cc.order);
2783 }
2784
David Rientjes7f354a52017-02-22 15:44:50 -08002785 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
2786 cc.total_migrate_scanned);
2787 count_compact_events(KCOMPACTD_FREE_SCANNED,
2788 cc.total_free_scanned);
2789
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002790 VM_BUG_ON(!list_empty(&cc.freepages));
2791 VM_BUG_ON(!list_empty(&cc.migratepages));
2792 }
2793
2794 /*
2795 * Regardless of success, we are done until woken up next. But remember
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002796 * the requested order/highest_zoneidx in case it was higher/tighter
2797 * than our current ones
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002798 */
2799 if (pgdat->kcompactd_max_order <= cc.order)
2800 pgdat->kcompactd_max_order = 0;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002801 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx)
2802 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002803}
2804
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002805void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002806{
2807 if (!order)
2808 return;
2809
2810 if (pgdat->kcompactd_max_order < order)
2811 pgdat->kcompactd_max_order = order;
2812
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002813 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx)
2814 pgdat->kcompactd_highest_zoneidx = highest_zoneidx;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002815
Davidlohr Bueso68186002017-10-03 16:15:03 -07002816 /*
2817 * Pairs with implicit barrier in wait_event_freezable()
2818 * such that wakeups are not missed.
2819 */
2820 if (!wq_has_sleeper(&pgdat->kcompactd_wait))
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002821 return;
2822
2823 if (!kcompactd_node_suitable(pgdat))
2824 return;
2825
2826 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002827 highest_zoneidx);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002828 wake_up_interruptible(&pgdat->kcompactd_wait);
2829}
2830
2831/*
2832 * The background compaction daemon, started as a kernel thread
2833 * from the init process.
2834 */
2835static int kcompactd(void *p)
2836{
2837 pg_data_t *pgdat = (pg_data_t*)p;
2838 struct task_struct *tsk = current;
Nitin Guptafacdaa92020-08-11 18:31:00 -07002839 unsigned int proactive_defer = 0;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002840
2841 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2842
2843 if (!cpumask_empty(cpumask))
2844 set_cpus_allowed_ptr(tsk, cpumask);
2845
2846 set_freezable();
2847
2848 pgdat->kcompactd_max_order = 0;
Joonsoo Kim97a225e2020-06-03 15:59:01 -07002849 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002850
2851 while (!kthread_should_stop()) {
Johannes Weinereb414682018-10-26 15:06:27 -07002852 unsigned long pflags;
2853
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002854 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
Nitin Guptafacdaa92020-08-11 18:31:00 -07002855 if (wait_event_freezable_timeout(pgdat->kcompactd_wait,
2856 kcompactd_work_requested(pgdat),
2857 msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC))) {
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002858
Nitin Guptafacdaa92020-08-11 18:31:00 -07002859 psi_memstall_enter(&pflags);
2860 kcompactd_do_work(pgdat);
2861 psi_memstall_leave(&pflags);
2862 continue;
2863 }
2864
2865 /* kcompactd wait timeout */
2866 if (should_proactive_compact_node(pgdat)) {
2867 unsigned int prev_score, score;
2868
2869 if (proactive_defer) {
2870 proactive_defer--;
2871 continue;
2872 }
2873 prev_score = fragmentation_score_node(pgdat);
2874 proactive_compact_node(pgdat);
2875 score = fragmentation_score_node(pgdat);
2876 /*
2877 * Defer proactive compaction if the fragmentation
2878 * score did not go down i.e. no progress made.
2879 */
2880 proactive_defer = score < prev_score ?
2881 0 : 1 << COMPACT_MAX_DEFER_SHIFT;
2882 }
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002883 }
2884
2885 return 0;
2886}
2887
2888/*
2889 * This kcompactd start function will be called by init and node-hot-add.
2890 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2891 */
2892int kcompactd_run(int nid)
2893{
2894 pg_data_t *pgdat = NODE_DATA(nid);
2895 int ret = 0;
2896
2897 if (pgdat->kcompactd)
2898 return 0;
2899
2900 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2901 if (IS_ERR(pgdat->kcompactd)) {
2902 pr_err("Failed to start kcompactd on node %d\n", nid);
2903 ret = PTR_ERR(pgdat->kcompactd);
2904 pgdat->kcompactd = NULL;
2905 }
2906 return ret;
2907}
2908
2909/*
2910 * Called by memory hotplug when all memory in a node is offlined. Caller must
2911 * hold mem_hotplug_begin/end().
2912 */
2913void kcompactd_stop(int nid)
2914{
2915 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2916
2917 if (kcompactd) {
2918 kthread_stop(kcompactd);
2919 NODE_DATA(nid)->kcompactd = NULL;
2920 }
2921}
2922
2923/*
2924 * It's optimal to keep kcompactd on the same CPUs as their memory, but
2925 * not required for correctness. So if the last cpu in a node goes
2926 * away, we get changed to run anywhere: as the first one comes back,
2927 * restore their cpu bindings.
2928 */
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002929static int kcompactd_cpu_online(unsigned int cpu)
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002930{
2931 int nid;
2932
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002933 for_each_node_state(nid, N_MEMORY) {
2934 pg_data_t *pgdat = NODE_DATA(nid);
2935 const struct cpumask *mask;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002936
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002937 mask = cpumask_of_node(pgdat->node_id);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002938
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002939 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2940 /* One of our CPUs online: restore mask */
2941 set_cpus_allowed_ptr(pgdat->kcompactd, mask);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002942 }
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002943 return 0;
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002944}
2945
2946static int __init kcompactd_init(void)
2947{
2948 int nid;
Anna-Maria Gleixnere46b1db2016-11-27 00:13:42 +01002949 int ret;
2950
2951 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2952 "mm/compaction:online",
2953 kcompactd_cpu_online, NULL);
2954 if (ret < 0) {
2955 pr_err("kcompactd: failed to register hotplug callbacks.\n");
2956 return ret;
2957 }
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002958
2959 for_each_node_state(nid, N_MEMORY)
2960 kcompactd_run(nid);
Vlastimil Babka698b1b32016-03-17 14:18:08 -07002961 return 0;
2962}
2963subsys_initcall(kcompactd_init)
2964
Michal Nazarewiczff9543f2011-12-29 13:09:50 +01002965#endif /* CONFIG_COMPACTION */