Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/compaction.c |
| 4 | * |
| 5 | * Memory compaction for the reduction of external fragmentation. Note that |
| 6 | * this heavily depends upon page migration to do all the real heavy |
| 7 | * lifting |
| 8 | * |
| 9 | * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> |
| 10 | */ |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 11 | #include <linux/cpu.h> |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 12 | #include <linux/swap.h> |
| 13 | #include <linux/migrate.h> |
| 14 | #include <linux/compaction.h> |
| 15 | #include <linux/mm_inline.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 16 | #include <linux/sched/signal.h> |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 17 | #include <linux/backing-dev.h> |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 18 | #include <linux/sysctl.h> |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 19 | #include <linux/sysfs.h> |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 20 | #include <linux/page-isolation.h> |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame] | 21 | #include <linux/kasan.h> |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 22 | #include <linux/kthread.h> |
| 23 | #include <linux/freezer.h> |
Joonsoo Kim | 83358ec | 2016-07-26 15:23:43 -0700 | [diff] [blame] | 24 | #include <linux/page_owner.h> |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 25 | #include <linux/psi.h> |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 26 | #include "internal.h" |
| 27 | |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 28 | #ifdef CONFIG_COMPACTION |
| 29 | static inline void count_compact_event(enum vm_event_item item) |
| 30 | { |
| 31 | count_vm_event(item); |
| 32 | } |
| 33 | |
| 34 | static inline void count_compact_events(enum vm_event_item item, long delta) |
| 35 | { |
| 36 | count_vm_events(item, delta); |
| 37 | } |
| 38 | #else |
| 39 | #define count_compact_event(item) do { } while (0) |
| 40 | #define count_compact_events(item, delta) do { } while (0) |
| 41 | #endif |
| 42 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 43 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 44 | |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 45 | #define CREATE_TRACE_POINTS |
| 46 | #include <trace/events/compaction.h> |
| 47 | |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 48 | #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) |
| 49 | #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) |
| 50 | #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) |
| 51 | #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) |
| 52 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 53 | static unsigned long release_freepages(struct list_head *freelist) |
| 54 | { |
| 55 | struct page *page, *next; |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 56 | unsigned long high_pfn = 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 57 | |
| 58 | list_for_each_entry_safe(page, next, freelist, lru) { |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 59 | unsigned long pfn = page_to_pfn(page); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 60 | list_del(&page->lru); |
| 61 | __free_page(page); |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 62 | if (pfn > high_pfn) |
| 63 | high_pfn = pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 66 | return high_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 67 | } |
| 68 | |
Mel Gorman | 4469ab9 | 2019-03-05 15:44:39 -0800 | [diff] [blame] | 69 | static void split_map_pages(struct list_head *list) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 70 | { |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 71 | unsigned int i, order, nr_pages; |
| 72 | struct page *page, *next; |
| 73 | LIST_HEAD(tmp_list); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 74 | |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 75 | list_for_each_entry_safe(page, next, list, lru) { |
| 76 | list_del(&page->lru); |
| 77 | |
| 78 | order = page_private(page); |
| 79 | nr_pages = 1 << order; |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 80 | |
Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 81 | post_alloc_hook(page, order, __GFP_MOVABLE); |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 82 | if (order) |
| 83 | split_page(page, order); |
| 84 | |
| 85 | for (i = 0; i < nr_pages; i++) { |
| 86 | list_add(&page->lru, &tmp_list); |
| 87 | page++; |
| 88 | } |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 89 | } |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 90 | |
| 91 | list_splice(&tmp_list, list); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 92 | } |
| 93 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_COMPACTION |
Joonsoo Kim | 24e2716 | 2015-02-11 15:27:09 -0800 | [diff] [blame] | 95 | |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 96 | int PageMovable(struct page *page) |
| 97 | { |
| 98 | struct address_space *mapping; |
| 99 | |
| 100 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 101 | if (!__PageMovable(page)) |
| 102 | return 0; |
| 103 | |
| 104 | mapping = page_mapping(page); |
| 105 | if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) |
| 106 | return 1; |
| 107 | |
| 108 | return 0; |
| 109 | } |
| 110 | EXPORT_SYMBOL(PageMovable); |
| 111 | |
| 112 | void __SetPageMovable(struct page *page, struct address_space *mapping) |
| 113 | { |
| 114 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 115 | VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); |
| 116 | page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); |
| 117 | } |
| 118 | EXPORT_SYMBOL(__SetPageMovable); |
| 119 | |
| 120 | void __ClearPageMovable(struct page *page) |
| 121 | { |
| 122 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 123 | VM_BUG_ON_PAGE(!PageMovable(page), page); |
| 124 | /* |
| 125 | * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE |
| 126 | * flag so that VM can catch up released page by driver after isolation. |
| 127 | * With it, VM migration doesn't try to put it back. |
| 128 | */ |
| 129 | page->mapping = (void *)((unsigned long)page->mapping & |
| 130 | PAGE_MAPPING_MOVABLE); |
| 131 | } |
| 132 | EXPORT_SYMBOL(__ClearPageMovable); |
| 133 | |
Joonsoo Kim | 24e2716 | 2015-02-11 15:27:09 -0800 | [diff] [blame] | 134 | /* Do not skip compaction more than 64 times */ |
| 135 | #define COMPACT_MAX_DEFER_SHIFT 6 |
| 136 | |
| 137 | /* |
| 138 | * Compaction is deferred when compaction fails to result in a page |
| 139 | * allocation success. 1 << compact_defer_limit compactions are skipped up |
| 140 | * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT |
| 141 | */ |
| 142 | void defer_compaction(struct zone *zone, int order) |
| 143 | { |
| 144 | zone->compact_considered = 0; |
| 145 | zone->compact_defer_shift++; |
| 146 | |
| 147 | if (order < zone->compact_order_failed) |
| 148 | zone->compact_order_failed = order; |
| 149 | |
| 150 | if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) |
| 151 | zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; |
| 152 | |
| 153 | trace_mm_compaction_defer_compaction(zone, order); |
| 154 | } |
| 155 | |
| 156 | /* Returns true if compaction should be skipped this time */ |
| 157 | bool compaction_deferred(struct zone *zone, int order) |
| 158 | { |
| 159 | unsigned long defer_limit = 1UL << zone->compact_defer_shift; |
| 160 | |
| 161 | if (order < zone->compact_order_failed) |
| 162 | return false; |
| 163 | |
| 164 | /* Avoid possible overflow */ |
| 165 | if (++zone->compact_considered > defer_limit) |
| 166 | zone->compact_considered = defer_limit; |
| 167 | |
| 168 | if (zone->compact_considered >= defer_limit) |
| 169 | return false; |
| 170 | |
| 171 | trace_mm_compaction_deferred(zone, order); |
| 172 | |
| 173 | return true; |
| 174 | } |
| 175 | |
| 176 | /* |
| 177 | * Update defer tracking counters after successful compaction of given order, |
| 178 | * which means an allocation either succeeded (alloc_success == true) or is |
| 179 | * expected to succeed. |
| 180 | */ |
| 181 | void compaction_defer_reset(struct zone *zone, int order, |
| 182 | bool alloc_success) |
| 183 | { |
| 184 | if (alloc_success) { |
| 185 | zone->compact_considered = 0; |
| 186 | zone->compact_defer_shift = 0; |
| 187 | } |
| 188 | if (order >= zone->compact_order_failed) |
| 189 | zone->compact_order_failed = order + 1; |
| 190 | |
| 191 | trace_mm_compaction_defer_reset(zone, order); |
| 192 | } |
| 193 | |
| 194 | /* Returns true if restarting compaction after many failures */ |
| 195 | bool compaction_restarting(struct zone *zone, int order) |
| 196 | { |
| 197 | if (order < zone->compact_order_failed) |
| 198 | return false; |
| 199 | |
| 200 | return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && |
| 201 | zone->compact_considered >= 1UL << zone->compact_defer_shift; |
| 202 | } |
| 203 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 204 | /* Returns true if the pageblock should be scanned for pages to isolate. */ |
| 205 | static inline bool isolation_suitable(struct compact_control *cc, |
| 206 | struct page *page) |
| 207 | { |
| 208 | if (cc->ignore_skip_hint) |
| 209 | return true; |
| 210 | |
| 211 | return !get_pageblock_skip(page); |
| 212 | } |
| 213 | |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame] | 214 | static void reset_cached_positions(struct zone *zone) |
| 215 | { |
| 216 | zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; |
| 217 | zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; |
Joonsoo Kim | 623446e | 2016-03-15 14:57:45 -0700 | [diff] [blame] | 218 | zone->compact_cached_free_pfn = |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 219 | pageblock_start_pfn(zone_end_pfn(zone) - 1); |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame] | 220 | } |
| 221 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 222 | /* |
Vlastimil Babka | b527cfe | 2017-11-17 15:26:34 -0800 | [diff] [blame] | 223 | * Compound pages of >= pageblock_order should consistenly be skipped until |
| 224 | * released. It is always pointless to compact pages of such order (if they are |
| 225 | * migratable), and the pageblocks they occupy cannot contain any free pages. |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 226 | */ |
Vlastimil Babka | b527cfe | 2017-11-17 15:26:34 -0800 | [diff] [blame] | 227 | static bool pageblock_skip_persistent(struct page *page) |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 228 | { |
Vlastimil Babka | b527cfe | 2017-11-17 15:26:34 -0800 | [diff] [blame] | 229 | if (!PageCompound(page)) |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 230 | return false; |
Vlastimil Babka | b527cfe | 2017-11-17 15:26:34 -0800 | [diff] [blame] | 231 | |
| 232 | page = compound_head(page); |
| 233 | |
| 234 | if (compound_order(page) >= pageblock_order) |
| 235 | return true; |
| 236 | |
| 237 | return false; |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 238 | } |
| 239 | |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 240 | static bool |
| 241 | __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, |
| 242 | bool check_target) |
| 243 | { |
| 244 | struct page *page = pfn_to_online_page(pfn); |
Mel Gorman | 6b0868c | 2019-04-04 11:54:09 +0100 | [diff] [blame] | 245 | struct page *block_page; |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 246 | struct page *end_page; |
| 247 | unsigned long block_pfn; |
| 248 | |
| 249 | if (!page) |
| 250 | return false; |
| 251 | if (zone != page_zone(page)) |
| 252 | return false; |
| 253 | if (pageblock_skip_persistent(page)) |
| 254 | return false; |
| 255 | |
| 256 | /* |
| 257 | * If skip is already cleared do no further checking once the |
| 258 | * restart points have been set. |
| 259 | */ |
| 260 | if (check_source && check_target && !get_pageblock_skip(page)) |
| 261 | return true; |
| 262 | |
| 263 | /* |
| 264 | * If clearing skip for the target scanner, do not select a |
| 265 | * non-movable pageblock as the starting point. |
| 266 | */ |
| 267 | if (!check_source && check_target && |
| 268 | get_pageblock_migratetype(page) != MIGRATE_MOVABLE) |
| 269 | return false; |
| 270 | |
Mel Gorman | 6b0868c | 2019-04-04 11:54:09 +0100 | [diff] [blame] | 271 | /* Ensure the start of the pageblock or zone is online and valid */ |
| 272 | block_pfn = pageblock_start_pfn(pfn); |
Vlastimil Babka | a2e9a5a | 2019-10-14 14:12:07 -0700 | [diff] [blame] | 273 | block_pfn = max(block_pfn, zone->zone_start_pfn); |
| 274 | block_page = pfn_to_online_page(block_pfn); |
Mel Gorman | 6b0868c | 2019-04-04 11:54:09 +0100 | [diff] [blame] | 275 | if (block_page) { |
| 276 | page = block_page; |
| 277 | pfn = block_pfn; |
| 278 | } |
| 279 | |
| 280 | /* Ensure the end of the pageblock or zone is online and valid */ |
Vlastimil Babka | a2e9a5a | 2019-10-14 14:12:07 -0700 | [diff] [blame] | 281 | block_pfn = pageblock_end_pfn(pfn) - 1; |
Mel Gorman | 6b0868c | 2019-04-04 11:54:09 +0100 | [diff] [blame] | 282 | block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); |
| 283 | end_page = pfn_to_online_page(block_pfn); |
| 284 | if (!end_page) |
| 285 | return false; |
| 286 | |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 287 | /* |
| 288 | * Only clear the hint if a sample indicates there is either a |
| 289 | * free page or an LRU page in the block. One or other condition |
| 290 | * is necessary for the block to be a migration source/target. |
| 291 | */ |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 292 | do { |
| 293 | if (pfn_valid_within(pfn)) { |
| 294 | if (check_source && PageLRU(page)) { |
| 295 | clear_pageblock_skip(page); |
| 296 | return true; |
| 297 | } |
| 298 | |
| 299 | if (check_target && PageBuddy(page)) { |
| 300 | clear_pageblock_skip(page); |
| 301 | return true; |
| 302 | } |
| 303 | } |
| 304 | |
| 305 | page += (1 << PAGE_ALLOC_COSTLY_ORDER); |
| 306 | pfn += (1 << PAGE_ALLOC_COSTLY_ORDER); |
Vlastimil Babka | a2e9a5a | 2019-10-14 14:12:07 -0700 | [diff] [blame] | 307 | } while (page <= end_page); |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 308 | |
| 309 | return false; |
| 310 | } |
| 311 | |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 312 | /* |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 313 | * This function is called to clear all cached information on pageblocks that |
| 314 | * should be skipped for page isolation when the migrate and free page scanner |
| 315 | * meet. |
| 316 | */ |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 317 | static void __reset_isolation_suitable(struct zone *zone) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 318 | { |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 319 | unsigned long migrate_pfn = zone->zone_start_pfn; |
Mel Gorman | 6b0868c | 2019-04-04 11:54:09 +0100 | [diff] [blame] | 320 | unsigned long free_pfn = zone_end_pfn(zone) - 1; |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 321 | unsigned long reset_migrate = free_pfn; |
| 322 | unsigned long reset_free = migrate_pfn; |
| 323 | bool source_set = false; |
| 324 | bool free_set = false; |
| 325 | |
| 326 | if (!zone->compact_blockskip_flush) |
| 327 | return; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 328 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 329 | zone->compact_blockskip_flush = false; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 330 | |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 331 | /* |
| 332 | * Walk the zone and update pageblock skip information. Source looks |
| 333 | * for PageLRU while target looks for PageBuddy. When the scanner |
| 334 | * is found, both PageBuddy and PageLRU are checked as the pageblock |
| 335 | * is suitable as both source and target. |
| 336 | */ |
| 337 | for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, |
| 338 | free_pfn -= pageblock_nr_pages) { |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 339 | cond_resched(); |
| 340 | |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 341 | /* Update the migrate PFN */ |
| 342 | if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && |
| 343 | migrate_pfn < reset_migrate) { |
| 344 | source_set = true; |
| 345 | reset_migrate = migrate_pfn; |
| 346 | zone->compact_init_migrate_pfn = reset_migrate; |
| 347 | zone->compact_cached_migrate_pfn[0] = reset_migrate; |
| 348 | zone->compact_cached_migrate_pfn[1] = reset_migrate; |
| 349 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 350 | |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 351 | /* Update the free PFN */ |
| 352 | if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && |
| 353 | free_pfn > reset_free) { |
| 354 | free_set = true; |
| 355 | reset_free = free_pfn; |
| 356 | zone->compact_init_free_pfn = reset_free; |
| 357 | zone->compact_cached_free_pfn = reset_free; |
| 358 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 359 | } |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame] | 360 | |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 361 | /* Leave no distance if no suitable block was reset */ |
| 362 | if (reset_migrate >= reset_free) { |
| 363 | zone->compact_cached_migrate_pfn[0] = migrate_pfn; |
| 364 | zone->compact_cached_migrate_pfn[1] = migrate_pfn; |
| 365 | zone->compact_cached_free_pfn = free_pfn; |
| 366 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 367 | } |
| 368 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 369 | void reset_isolation_suitable(pg_data_t *pgdat) |
| 370 | { |
| 371 | int zoneid; |
| 372 | |
| 373 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
| 374 | struct zone *zone = &pgdat->node_zones[zoneid]; |
| 375 | if (!populated_zone(zone)) |
| 376 | continue; |
| 377 | |
| 378 | /* Only flush if a full compaction finished recently */ |
| 379 | if (zone->compact_blockskip_flush) |
| 380 | __reset_isolation_suitable(zone); |
| 381 | } |
| 382 | } |
| 383 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 384 | /* |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 385 | * Sets the pageblock skip bit if it was clear. Note that this is a hint as |
| 386 | * locks are not required for read/writers. Returns true if it was already set. |
| 387 | */ |
| 388 | static bool test_and_set_skip(struct compact_control *cc, struct page *page, |
| 389 | unsigned long pfn) |
| 390 | { |
| 391 | bool skip; |
| 392 | |
| 393 | /* Do no update if skip hint is being ignored */ |
| 394 | if (cc->ignore_skip_hint) |
| 395 | return false; |
| 396 | |
| 397 | if (!IS_ALIGNED(pfn, pageblock_nr_pages)) |
| 398 | return false; |
| 399 | |
| 400 | skip = get_pageblock_skip(page); |
| 401 | if (!skip && !cc->no_set_skip_hint) |
| 402 | set_pageblock_skip(page); |
| 403 | |
| 404 | return skip; |
| 405 | } |
| 406 | |
| 407 | static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) |
| 408 | { |
| 409 | struct zone *zone = cc->zone; |
| 410 | |
| 411 | pfn = pageblock_end_pfn(pfn); |
| 412 | |
| 413 | /* Set for isolation rather than compaction */ |
| 414 | if (cc->no_set_skip_hint) |
| 415 | return; |
| 416 | |
| 417 | if (pfn > zone->compact_cached_migrate_pfn[0]) |
| 418 | zone->compact_cached_migrate_pfn[0] = pfn; |
| 419 | if (cc->mode != MIGRATE_ASYNC && |
| 420 | pfn > zone->compact_cached_migrate_pfn[1]) |
| 421 | zone->compact_cached_migrate_pfn[1] = pfn; |
| 422 | } |
| 423 | |
| 424 | /* |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 425 | * If no pages were isolated then mark this pageblock to be skipped in the |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 426 | * future. The information is later cleared by __reset_isolation_suitable(). |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 427 | */ |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 428 | static void update_pageblock_skip(struct compact_control *cc, |
Mel Gorman | d097a6f | 2019-03-05 15:45:28 -0800 | [diff] [blame] | 429 | struct page *page, unsigned long pfn) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 430 | { |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 431 | struct zone *zone = cc->zone; |
Joonsoo Kim | 6815bf3 | 2013-12-18 17:08:52 -0800 | [diff] [blame] | 432 | |
Vlastimil Babka | 2583d67 | 2017-11-17 15:26:38 -0800 | [diff] [blame] | 433 | if (cc->no_set_skip_hint) |
Joonsoo Kim | 6815bf3 | 2013-12-18 17:08:52 -0800 | [diff] [blame] | 434 | return; |
| 435 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 436 | if (!page) |
| 437 | return; |
| 438 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 439 | set_pageblock_skip(page); |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 440 | |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 441 | /* Update where async and sync compaction should restart */ |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 442 | if (pfn < zone->compact_cached_free_pfn) |
| 443 | zone->compact_cached_free_pfn = pfn; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 444 | } |
| 445 | #else |
| 446 | static inline bool isolation_suitable(struct compact_control *cc, |
| 447 | struct page *page) |
| 448 | { |
| 449 | return true; |
| 450 | } |
| 451 | |
Vlastimil Babka | b527cfe | 2017-11-17 15:26:34 -0800 | [diff] [blame] | 452 | static inline bool pageblock_skip_persistent(struct page *page) |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 453 | { |
| 454 | return false; |
| 455 | } |
| 456 | |
| 457 | static inline void update_pageblock_skip(struct compact_control *cc, |
Mel Gorman | d097a6f | 2019-03-05 15:45:28 -0800 | [diff] [blame] | 458 | struct page *page, unsigned long pfn) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 459 | { |
| 460 | } |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 461 | |
| 462 | static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) |
| 463 | { |
| 464 | } |
| 465 | |
| 466 | static bool test_and_set_skip(struct compact_control *cc, struct page *page, |
| 467 | unsigned long pfn) |
| 468 | { |
| 469 | return false; |
| 470 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 471 | #endif /* CONFIG_COMPACTION */ |
| 472 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 473 | /* |
| 474 | * Compaction requires the taking of some coarse locks that are potentially |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 475 | * very heavily contended. For async compaction, trylock and record if the |
| 476 | * lock is contended. The lock will still be acquired but compaction will |
| 477 | * abort when the current block is finished regardless of success rate. |
| 478 | * Sync compaction acquires the lock. |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 479 | * |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 480 | * Always returns true which makes it easier to track lock state in callers. |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 481 | */ |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 482 | static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 483 | struct compact_control *cc) |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 484 | { |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 485 | /* Track if the lock is contended in async mode */ |
| 486 | if (cc->mode == MIGRATE_ASYNC && !cc->contended) { |
| 487 | if (spin_trylock_irqsave(lock, *flags)) |
| 488 | return true; |
| 489 | |
| 490 | cc->contended = true; |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 491 | } |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 492 | |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 493 | spin_lock_irqsave(lock, *flags); |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 494 | return true; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 495 | } |
| 496 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 497 | /* |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 498 | * Compaction requires the taking of some coarse locks that are potentially |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 499 | * very heavily contended. The lock should be periodically unlocked to avoid |
| 500 | * having disabled IRQs for a long time, even when there is nobody waiting on |
| 501 | * the lock. It might also be that allowing the IRQs will result in |
| 502 | * need_resched() becoming true. If scheduling is needed, async compaction |
| 503 | * aborts. Sync compaction schedules. |
| 504 | * Either compaction type will also abort if a fatal signal is pending. |
| 505 | * In either case if the lock was locked, it is dropped and not regained. |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 506 | * |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 507 | * Returns true if compaction should abort due to fatal signal pending, or |
| 508 | * async compaction due to need_resched() |
| 509 | * Returns false when compaction can continue (sync compaction might have |
| 510 | * scheduled) |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 511 | */ |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 512 | static bool compact_unlock_should_abort(spinlock_t *lock, |
| 513 | unsigned long flags, bool *locked, struct compact_control *cc) |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 514 | { |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 515 | if (*locked) { |
| 516 | spin_unlock_irqrestore(lock, flags); |
| 517 | *locked = false; |
| 518 | } |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 519 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 520 | if (fatal_signal_pending(current)) { |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 521 | cc->contended = true; |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 522 | return true; |
| 523 | } |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 524 | |
Mel Gorman | cf66f07 | 2019-03-05 15:45:24 -0800 | [diff] [blame] | 525 | cond_resched(); |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 526 | |
| 527 | return false; |
| 528 | } |
| 529 | |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 530 | /* |
Jerome Marchand | 9e4be47 | 2013-11-12 15:07:12 -0800 | [diff] [blame] | 531 | * Isolate free pages onto a private freelist. If @strict is true, will abort |
| 532 | * returning 0 on any invalid PFNs or non-free pages inside of the pageblock |
| 533 | * (even though it may still end up isolating some pages). |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 534 | */ |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 535 | static unsigned long isolate_freepages_block(struct compact_control *cc, |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 536 | unsigned long *start_pfn, |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 537 | unsigned long end_pfn, |
| 538 | struct list_head *freelist, |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 539 | unsigned int stride, |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 540 | bool strict) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 541 | { |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 542 | int nr_scanned = 0, total_isolated = 0; |
Mel Gorman | d097a6f | 2019-03-05 15:45:28 -0800 | [diff] [blame] | 543 | struct page *cursor; |
Xiubo Li | b8b2d82 | 2014-10-09 15:28:21 -0700 | [diff] [blame] | 544 | unsigned long flags = 0; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 545 | bool locked = false; |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 546 | unsigned long blockpfn = *start_pfn; |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 547 | unsigned int order; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 548 | |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 549 | /* Strict mode is for isolation, speed is secondary */ |
| 550 | if (strict) |
| 551 | stride = 1; |
| 552 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 553 | cursor = pfn_to_page(blockpfn); |
| 554 | |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 555 | /* Isolate free pages. */ |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 556 | for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 557 | int isolated; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 558 | struct page *page = cursor; |
| 559 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 560 | /* |
| 561 | * Periodically drop the lock (if held) regardless of its |
| 562 | * contention, to give chance to IRQs. Abort if fatal signal |
| 563 | * pending or async compaction detects need_resched() |
| 564 | */ |
| 565 | if (!(blockpfn % SWAP_CLUSTER_MAX) |
| 566 | && compact_unlock_should_abort(&cc->zone->lock, flags, |
| 567 | &locked, cc)) |
| 568 | break; |
| 569 | |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 570 | nr_scanned++; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 571 | if (!pfn_valid_within(blockpfn)) |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 572 | goto isolate_fail; |
| 573 | |
Vlastimil Babka | 9fcd6d2 | 2015-09-08 15:02:49 -0700 | [diff] [blame] | 574 | /* |
| 575 | * For compound pages such as THP and hugetlbfs, we can save |
| 576 | * potentially a lot of iterations if we skip them at once. |
| 577 | * The check is racy, but we can consider only valid values |
| 578 | * and the only danger is skipping too much. |
| 579 | */ |
| 580 | if (PageCompound(page)) { |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 581 | const unsigned int order = compound_order(page); |
Vlastimil Babka | 9fcd6d2 | 2015-09-08 15:02:49 -0700 | [diff] [blame] | 582 | |
Vlastimil Babka | d3c85ba | 2017-11-17 15:26:41 -0800 | [diff] [blame] | 583 | if (likely(order < MAX_ORDER)) { |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 584 | blockpfn += (1UL << order) - 1; |
| 585 | cursor += (1UL << order) - 1; |
Vlastimil Babka | 9fcd6d2 | 2015-09-08 15:02:49 -0700 | [diff] [blame] | 586 | } |
Vlastimil Babka | 9fcd6d2 | 2015-09-08 15:02:49 -0700 | [diff] [blame] | 587 | goto isolate_fail; |
| 588 | } |
| 589 | |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 590 | if (!PageBuddy(page)) |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 591 | goto isolate_fail; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 592 | |
| 593 | /* |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 594 | * If we already hold the lock, we can skip some rechecking. |
| 595 | * Note that if we hold the lock now, checked_pageblock was |
| 596 | * already set in some previous iteration (or strict is true), |
| 597 | * so it is correct to skip the suitable migration target |
| 598 | * recheck as well. |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 599 | */ |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 600 | if (!locked) { |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 601 | locked = compact_lock_irqsave(&cc->zone->lock, |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 602 | &flags, cc); |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 603 | |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 604 | /* Recheck this is a buddy page under lock */ |
| 605 | if (!PageBuddy(page)) |
| 606 | goto isolate_fail; |
| 607 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 608 | |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 609 | /* Found a free page, will break it into order-0 pages */ |
| 610 | order = page_order(page); |
| 611 | isolated = __isolate_free_page(page, order); |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 612 | if (!isolated) |
| 613 | break; |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 614 | set_page_private(page, order); |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 615 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 616 | total_isolated += isolated; |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 617 | cc->nr_freepages += isolated; |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 618 | list_add_tail(&page->lru, freelist); |
| 619 | |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 620 | if (!strict && cc->nr_migratepages <= cc->nr_freepages) { |
| 621 | blockpfn += isolated; |
| 622 | break; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 623 | } |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 624 | /* Advance to the end of split page */ |
| 625 | blockpfn += isolated - 1; |
| 626 | cursor += isolated - 1; |
| 627 | continue; |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 628 | |
| 629 | isolate_fail: |
| 630 | if (strict) |
| 631 | break; |
| 632 | else |
| 633 | continue; |
| 634 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 635 | } |
| 636 | |
David Rientjes | a4f04f2 | 2016-06-24 14:50:10 -0700 | [diff] [blame] | 637 | if (locked) |
| 638 | spin_unlock_irqrestore(&cc->zone->lock, flags); |
| 639 | |
Vlastimil Babka | 9fcd6d2 | 2015-09-08 15:02:49 -0700 | [diff] [blame] | 640 | /* |
| 641 | * There is a tiny chance that we have read bogus compound_order(), |
| 642 | * so be careful to not go outside of the pageblock. |
| 643 | */ |
| 644 | if (unlikely(blockpfn > end_pfn)) |
| 645 | blockpfn = end_pfn; |
| 646 | |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 647 | trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, |
| 648 | nr_scanned, total_isolated); |
| 649 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 650 | /* Record how far we have got within the block */ |
| 651 | *start_pfn = blockpfn; |
| 652 | |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 653 | /* |
| 654 | * If strict isolation is requested by CMA then check that all the |
| 655 | * pages requested were isolated. If there were any failures, 0 is |
| 656 | * returned and CMA will fail. |
| 657 | */ |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 658 | if (strict && blockpfn < end_pfn) |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 659 | total_isolated = 0; |
| 660 | |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 661 | cc->total_free_scanned += nr_scanned; |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 662 | if (total_isolated) |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 663 | count_compact_events(COMPACTISOLATED, total_isolated); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 664 | return total_isolated; |
| 665 | } |
| 666 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 667 | /** |
| 668 | * isolate_freepages_range() - isolate free pages. |
Mike Rapoport | e8b098f | 2018-04-05 16:24:57 -0700 | [diff] [blame] | 669 | * @cc: Compaction control structure. |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 670 | * @start_pfn: The first PFN to start isolating. |
| 671 | * @end_pfn: The one-past-last PFN. |
| 672 | * |
| 673 | * Non-free pages, invalid PFNs, or zone boundaries within the |
| 674 | * [start_pfn, end_pfn) range are considered errors, cause function to |
| 675 | * undo its actions and return zero. |
| 676 | * |
| 677 | * Otherwise, function returns one-past-the-last PFN of isolated page |
| 678 | * (which may be greater then end_pfn if end fell in a middle of |
| 679 | * a free page). |
| 680 | */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 681 | unsigned long |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 682 | isolate_freepages_range(struct compact_control *cc, |
| 683 | unsigned long start_pfn, unsigned long end_pfn) |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 684 | { |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 685 | unsigned long isolated, pfn, block_start_pfn, block_end_pfn; |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 686 | LIST_HEAD(freelist); |
| 687 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 688 | pfn = start_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 689 | block_start_pfn = pageblock_start_pfn(pfn); |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 690 | if (block_start_pfn < cc->zone->zone_start_pfn) |
| 691 | block_start_pfn = cc->zone->zone_start_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 692 | block_end_pfn = pageblock_end_pfn(pfn); |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 693 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 694 | for (; pfn < end_pfn; pfn += isolated, |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 695 | block_start_pfn = block_end_pfn, |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 696 | block_end_pfn += pageblock_nr_pages) { |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 697 | /* Protect pfn from changing by isolate_freepages_block */ |
| 698 | unsigned long isolate_start_pfn = pfn; |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 699 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 700 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 701 | |
Joonsoo Kim | 5842001 | 2014-11-13 15:19:07 -0800 | [diff] [blame] | 702 | /* |
| 703 | * pfn could pass the block_end_pfn if isolated freepage |
| 704 | * is more than pageblock order. In this case, we adjust |
| 705 | * scanning range to right one. |
| 706 | */ |
| 707 | if (pfn >= block_end_pfn) { |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 708 | block_start_pfn = pageblock_start_pfn(pfn); |
| 709 | block_end_pfn = pageblock_end_pfn(pfn); |
Joonsoo Kim | 5842001 | 2014-11-13 15:19:07 -0800 | [diff] [blame] | 710 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 711 | } |
| 712 | |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 713 | if (!pageblock_pfn_to_page(block_start_pfn, |
| 714 | block_end_pfn, cc->zone)) |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 715 | break; |
| 716 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 717 | isolated = isolate_freepages_block(cc, &isolate_start_pfn, |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 718 | block_end_pfn, &freelist, 0, true); |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 719 | |
| 720 | /* |
| 721 | * In strict mode, isolate_freepages_block() returns 0 if |
| 722 | * there are any holes in the block (ie. invalid PFNs or |
| 723 | * non-free pages). |
| 724 | */ |
| 725 | if (!isolated) |
| 726 | break; |
| 727 | |
| 728 | /* |
| 729 | * If we managed to isolate pages, it is always (1 << n) * |
| 730 | * pageblock_nr_pages for some non-negative n. (Max order |
| 731 | * page may span two pageblocks). |
| 732 | */ |
| 733 | } |
| 734 | |
Joonsoo Kim | 66c6422 | 2016-07-26 15:23:40 -0700 | [diff] [blame] | 735 | /* __isolate_free_page() does not map the pages */ |
Mel Gorman | 4469ab9 | 2019-03-05 15:44:39 -0800 | [diff] [blame] | 736 | split_map_pages(&freelist); |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 737 | |
| 738 | if (pfn < end_pfn) { |
| 739 | /* Loop terminated early, cleanup. */ |
| 740 | release_freepages(&freelist); |
| 741 | return 0; |
| 742 | } |
| 743 | |
| 744 | /* We don't use freelists for anything. */ |
| 745 | return pfn; |
| 746 | } |
| 747 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 748 | /* Similar to reclaim, but different enough that they don't share logic */ |
Andrey Ryabinin | 5f438ee | 2019-03-05 15:49:42 -0800 | [diff] [blame] | 749 | static bool too_many_isolated(pg_data_t *pgdat) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 750 | { |
Minchan Kim | bc69304 | 2010-09-09 16:38:00 -0700 | [diff] [blame] | 751 | unsigned long active, inactive, isolated; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 752 | |
Andrey Ryabinin | 5f438ee | 2019-03-05 15:49:42 -0800 | [diff] [blame] | 753 | inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + |
| 754 | node_page_state(pgdat, NR_INACTIVE_ANON); |
| 755 | active = node_page_state(pgdat, NR_ACTIVE_FILE) + |
| 756 | node_page_state(pgdat, NR_ACTIVE_ANON); |
| 757 | isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + |
| 758 | node_page_state(pgdat, NR_ISOLATED_ANON); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 759 | |
Minchan Kim | bc69304 | 2010-09-09 16:38:00 -0700 | [diff] [blame] | 760 | return isolated > (inactive + active) / 2; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 761 | } |
| 762 | |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 763 | /** |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 764 | * isolate_migratepages_block() - isolate all migrate-able pages within |
| 765 | * a single pageblock |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 766 | * @cc: Compaction control structure. |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 767 | * @low_pfn: The first PFN to isolate |
| 768 | * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock |
| 769 | * @isolate_mode: Isolation mode to be used. |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 770 | * |
| 771 | * Isolate all pages that can be migrated from the range specified by |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 772 | * [low_pfn, end_pfn). The range is expected to be within same pageblock. |
| 773 | * Returns zero if there is a fatal signal pending, otherwise PFN of the |
| 774 | * first page that was not scanned (which may be both less, equal to or more |
| 775 | * than end_pfn). |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 776 | * |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 777 | * The pages are isolated on cc->migratepages list (not required to be empty), |
| 778 | * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field |
| 779 | * is neither read nor updated. |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 780 | */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 781 | static unsigned long |
| 782 | isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, |
| 783 | unsigned long end_pfn, isolate_mode_t isolate_mode) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 784 | { |
Andrey Ryabinin | 5f438ee | 2019-03-05 15:49:42 -0800 | [diff] [blame] | 785 | pg_data_t *pgdat = cc->zone->zone_pgdat; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 786 | unsigned long nr_scanned = 0, nr_isolated = 0; |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 787 | struct lruvec *lruvec; |
Xiubo Li | b8b2d82 | 2014-10-09 15:28:21 -0700 | [diff] [blame] | 788 | unsigned long flags = 0; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 789 | bool locked = false; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 790 | struct page *page = NULL, *valid_page = NULL; |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 791 | unsigned long start_pfn = low_pfn; |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 792 | bool skip_on_failure = false; |
| 793 | unsigned long next_skip_pfn = 0; |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 794 | bool skip_updated = false; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 795 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 796 | /* |
| 797 | * Ensure that there are not too many pages isolated from the LRU |
| 798 | * list by either parallel reclaimers or compaction. If there are, |
| 799 | * delay for some time until fewer pages are isolated |
| 800 | */ |
Andrey Ryabinin | 5f438ee | 2019-03-05 15:49:42 -0800 | [diff] [blame] | 801 | while (unlikely(too_many_isolated(pgdat))) { |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 802 | /* async migration should just abort */ |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 803 | if (cc->mode == MIGRATE_ASYNC) |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 804 | return 0; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 805 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 806 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 807 | |
| 808 | if (fatal_signal_pending(current)) |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 809 | return 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 810 | } |
| 811 | |
Mel Gorman | cf66f07 | 2019-03-05 15:45:24 -0800 | [diff] [blame] | 812 | cond_resched(); |
David Rientjes | aeef4b8 | 2014-06-04 16:08:31 -0700 | [diff] [blame] | 813 | |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 814 | if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { |
| 815 | skip_on_failure = true; |
| 816 | next_skip_pfn = block_end_pfn(low_pfn, cc->order); |
| 817 | } |
| 818 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 819 | /* Time to isolate some pages for migration */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 820 | for (; low_pfn < end_pfn; low_pfn++) { |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 821 | |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 822 | if (skip_on_failure && low_pfn >= next_skip_pfn) { |
| 823 | /* |
| 824 | * We have isolated all migration candidates in the |
| 825 | * previous order-aligned block, and did not skip it due |
| 826 | * to failure. We should migrate the pages now and |
| 827 | * hopefully succeed compaction. |
| 828 | */ |
| 829 | if (nr_isolated) |
| 830 | break; |
| 831 | |
| 832 | /* |
| 833 | * We failed to isolate in the previous order-aligned |
| 834 | * block. Set the new boundary to the end of the |
| 835 | * current block. Note we can't simply increase |
| 836 | * next_skip_pfn by 1 << order, as low_pfn might have |
| 837 | * been incremented by a higher number due to skipping |
| 838 | * a compound or a high-order buddy page in the |
| 839 | * previous loop iteration. |
| 840 | */ |
| 841 | next_skip_pfn = block_end_pfn(low_pfn, cc->order); |
| 842 | } |
| 843 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 844 | /* |
| 845 | * Periodically drop the lock (if held) regardless of its |
Mel Gorman | 670105a | 2019-08-02 21:48:51 -0700 | [diff] [blame] | 846 | * contention, to give chance to IRQs. Abort completely if |
| 847 | * a fatal signal is pending. |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 848 | */ |
| 849 | if (!(low_pfn % SWAP_CLUSTER_MAX) |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 850 | && compact_unlock_should_abort(&pgdat->lru_lock, |
Mel Gorman | 670105a | 2019-08-02 21:48:51 -0700 | [diff] [blame] | 851 | flags, &locked, cc)) { |
| 852 | low_pfn = 0; |
| 853 | goto fatal_pending; |
| 854 | } |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 855 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 856 | if (!pfn_valid_within(low_pfn)) |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 857 | goto isolate_fail; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 858 | nr_scanned++; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 859 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 860 | page = pfn_to_page(low_pfn); |
Mel Gorman | dc90860 | 2012-02-08 17:13:38 -0800 | [diff] [blame] | 861 | |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 862 | /* |
| 863 | * Check if the pageblock has already been marked skipped. |
| 864 | * Only the aligned PFN is checked as the caller isolates |
| 865 | * COMPACT_CLUSTER_MAX at a time so the second call must |
| 866 | * not falsely conclude that the block should be skipped. |
| 867 | */ |
| 868 | if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { |
| 869 | if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { |
| 870 | low_pfn = end_pfn; |
| 871 | goto isolate_abort; |
| 872 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 873 | valid_page = page; |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 874 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 875 | |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 876 | /* |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 877 | * Skip if free. We read page order here without zone lock |
| 878 | * which is generally unsafe, but the race window is small and |
| 879 | * the worst thing that can happen is that we skip some |
| 880 | * potential isolation targets. |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 881 | */ |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 882 | if (PageBuddy(page)) { |
| 883 | unsigned long freepage_order = page_order_unsafe(page); |
| 884 | |
| 885 | /* |
| 886 | * Without lock, we cannot be sure that what we got is |
| 887 | * a valid page order. Consider only values in the |
| 888 | * valid order range to prevent low_pfn overflow. |
| 889 | */ |
| 890 | if (freepage_order > 0 && freepage_order < MAX_ORDER) |
| 891 | low_pfn += (1UL << freepage_order) - 1; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 892 | continue; |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 893 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 894 | |
Mel Gorman | 9927af74 | 2011-01-13 15:45:59 -0800 | [diff] [blame] | 895 | /* |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 896 | * Regardless of being on LRU, compound pages such as THP and |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 897 | * hugetlbfs are not to be compacted unless we are attempting |
| 898 | * an allocation much larger than the huge page size (eg CMA). |
| 899 | * We can potentially save a lot of iterations if we skip them |
| 900 | * at once. The check is racy, but we can consider only valid |
| 901 | * values and the only danger is skipping too much. |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 902 | */ |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 903 | if (PageCompound(page) && !cc->alloc_contig) { |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 904 | const unsigned int order = compound_order(page); |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 905 | |
Vlastimil Babka | d3c85ba | 2017-11-17 15:26:41 -0800 | [diff] [blame] | 906 | if (likely(order < MAX_ORDER)) |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 907 | low_pfn += (1UL << order) - 1; |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 908 | goto isolate_fail; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 909 | } |
| 910 | |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 911 | /* |
| 912 | * Check may be lockless but that's ok as we recheck later. |
| 913 | * It's possible to migrate LRU and non-lru movable pages. |
| 914 | * Skip any other type of page |
| 915 | */ |
| 916 | if (!PageLRU(page)) { |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 917 | /* |
| 918 | * __PageMovable can return false positive so we need |
| 919 | * to verify it under page_lock. |
| 920 | */ |
| 921 | if (unlikely(__PageMovable(page)) && |
| 922 | !PageIsolated(page)) { |
| 923 | if (locked) { |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 924 | spin_unlock_irqrestore(&pgdat->lru_lock, |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 925 | flags); |
| 926 | locked = false; |
| 927 | } |
| 928 | |
Yisheng Xie | 9e5bcd6 | 2017-02-24 14:57:29 -0800 | [diff] [blame] | 929 | if (!isolate_movable_page(page, isolate_mode)) |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 930 | goto isolate_success; |
| 931 | } |
| 932 | |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 933 | goto isolate_fail; |
Minchan Kim | bda807d | 2016-07-26 15:23:05 -0700 | [diff] [blame] | 934 | } |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 935 | |
David Rientjes | 119d6d5 | 2014-04-03 14:48:00 -0700 | [diff] [blame] | 936 | /* |
| 937 | * Migration will fail if an anonymous page is pinned in memory, |
| 938 | * so avoid taking lru_lock and isolating it unnecessarily in an |
| 939 | * admittedly racy check. |
| 940 | */ |
| 941 | if (!page_mapping(page) && |
| 942 | page_count(page) > page_mapcount(page)) |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 943 | goto isolate_fail; |
David Rientjes | 119d6d5 | 2014-04-03 14:48:00 -0700 | [diff] [blame] | 944 | |
Michal Hocko | 73e64c5 | 2016-12-14 15:04:07 -0800 | [diff] [blame] | 945 | /* |
| 946 | * Only allow to migrate anonymous pages in GFP_NOFS context |
| 947 | * because those do not depend on fs locks. |
| 948 | */ |
| 949 | if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) |
| 950 | goto isolate_fail; |
| 951 | |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 952 | /* If we already hold the lock, we can skip some rechecking */ |
| 953 | if (!locked) { |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 954 | locked = compact_lock_irqsave(&pgdat->lru_lock, |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 955 | &flags, cc); |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 956 | |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 957 | /* Try get exclusive access under lock */ |
| 958 | if (!skip_updated) { |
| 959 | skip_updated = true; |
| 960 | if (test_and_set_skip(cc, page, low_pfn)) |
| 961 | goto isolate_abort; |
| 962 | } |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 963 | |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 964 | /* Recheck PageLRU and PageCompound under lock */ |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 965 | if (!PageLRU(page)) |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 966 | goto isolate_fail; |
Vlastimil Babka | 29c0dde | 2015-09-08 15:02:46 -0700 | [diff] [blame] | 967 | |
| 968 | /* |
| 969 | * Page become compound since the non-locked check, |
| 970 | * and it's on LRU. It can only be a THP so the order |
| 971 | * is safe to read and it's 0 for tail pages. |
| 972 | */ |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 973 | if (unlikely(PageCompound(page) && !cc->alloc_contig)) { |
Matthew Wilcox (Oracle) | d8c6546 | 2019-09-23 15:34:30 -0700 | [diff] [blame] | 974 | low_pfn += compound_nr(page) - 1; |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 975 | goto isolate_fail; |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 976 | } |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 977 | } |
| 978 | |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 979 | lruvec = mem_cgroup_page_lruvec(page, pgdat); |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 980 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 981 | /* Try isolate the page */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 982 | if (__isolate_lru_page(page, isolate_mode) != 0) |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 983 | goto isolate_fail; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 984 | |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 985 | /* The whole page is taken off the LRU; skip the tail pages. */ |
| 986 | if (PageCompound(page)) |
| 987 | low_pfn += compound_nr(page) - 1; |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 988 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 989 | /* Successfully isolated */ |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 990 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 991 | mod_node_page_state(page_pgdat(page), |
Huang Ying | 9de4f22 | 2020-04-06 20:04:41 -0700 | [diff] [blame^] | 992 | NR_ISOLATED_ANON + page_is_file_lru(page), |
Rik van Riel | 1da2f32 | 2020-04-01 21:10:31 -0700 | [diff] [blame] | 993 | hpage_nr_pages(page)); |
Joonsoo Kim | b6c7501 | 2014-04-07 15:37:07 -0700 | [diff] [blame] | 994 | |
| 995 | isolate_success: |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 996 | list_add(&page->lru, &cc->migratepages); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 997 | cc->nr_migratepages++; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 998 | nr_isolated++; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 999 | |
Mel Gorman | 804d312 | 2019-03-05 15:45:07 -0800 | [diff] [blame] | 1000 | /* |
| 1001 | * Avoid isolating too much unless this block is being |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1002 | * rescanned (e.g. dirty/writeback pages, parallel allocation) |
| 1003 | * or a lock is contended. For contention, isolate quickly to |
| 1004 | * potentially remove one source of contention. |
Mel Gorman | 804d312 | 2019-03-05 15:45:07 -0800 | [diff] [blame] | 1005 | */ |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1006 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX && |
| 1007 | !cc->rescan && !cc->contended) { |
Hillf Danton | 31b8384 | 2012-01-10 15:07:59 -0800 | [diff] [blame] | 1008 | ++low_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1009 | break; |
Hillf Danton | 31b8384 | 2012-01-10 15:07:59 -0800 | [diff] [blame] | 1010 | } |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 1011 | |
| 1012 | continue; |
| 1013 | isolate_fail: |
| 1014 | if (!skip_on_failure) |
| 1015 | continue; |
| 1016 | |
| 1017 | /* |
| 1018 | * We have isolated some pages, but then failed. Release them |
| 1019 | * instead of migrating, as we cannot form the cc->order buddy |
| 1020 | * page anyway. |
| 1021 | */ |
| 1022 | if (nr_isolated) { |
| 1023 | if (locked) { |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 1024 | spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 1025 | locked = false; |
| 1026 | } |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 1027 | putback_movable_pages(&cc->migratepages); |
| 1028 | cc->nr_migratepages = 0; |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 1029 | nr_isolated = 0; |
| 1030 | } |
| 1031 | |
| 1032 | if (low_pfn < next_skip_pfn) { |
| 1033 | low_pfn = next_skip_pfn - 1; |
| 1034 | /* |
| 1035 | * The check near the loop beginning would have updated |
| 1036 | * next_skip_pfn too, but this is a bit simpler. |
| 1037 | */ |
| 1038 | next_skip_pfn += 1UL << cc->order; |
| 1039 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1040 | } |
| 1041 | |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 1042 | /* |
| 1043 | * The PageBuddy() check could have potentially brought us outside |
| 1044 | * the range to be scanned. |
| 1045 | */ |
| 1046 | if (unlikely(low_pfn > end_pfn)) |
| 1047 | low_pfn = end_pfn; |
| 1048 | |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 1049 | isolate_abort: |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 1050 | if (locked) |
Andrey Ryabinin | f4b7e27 | 2019-03-05 15:49:39 -0800 | [diff] [blame] | 1051 | spin_unlock_irqrestore(&pgdat->lru_lock, flags); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1052 | |
Vlastimil Babka | 50b5b09 | 2014-01-21 15:51:10 -0800 | [diff] [blame] | 1053 | /* |
Mel Gorman | 804d312 | 2019-03-05 15:45:07 -0800 | [diff] [blame] | 1054 | * Updated the cached scanner pfn once the pageblock has been scanned |
| 1055 | * Pages will either be migrated in which case there is no point |
| 1056 | * scanning in the near future or migration failed in which case the |
| 1057 | * failure reason may persist. The block is marked for skipping if |
| 1058 | * there were no pages isolated in the block or if the block is |
| 1059 | * rescanned twice in a row. |
Vlastimil Babka | 50b5b09 | 2014-01-21 15:51:10 -0800 | [diff] [blame] | 1060 | */ |
Mel Gorman | 804d312 | 2019-03-05 15:45:07 -0800 | [diff] [blame] | 1061 | if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 1062 | if (valid_page && !skip_updated) |
| 1063 | set_pageblock_skip(valid_page); |
| 1064 | update_cached_migrate(cc, low_pfn); |
| 1065 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 1066 | |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 1067 | trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, |
| 1068 | nr_scanned, nr_isolated); |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 1069 | |
Mel Gorman | 670105a | 2019-08-02 21:48:51 -0700 | [diff] [blame] | 1070 | fatal_pending: |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 1071 | cc->total_migrate_scanned += nr_scanned; |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 1072 | if (nr_isolated) |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 1073 | count_compact_events(COMPACTISOLATED, nr_isolated); |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 1074 | |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 1075 | return low_pfn; |
| 1076 | } |
| 1077 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1078 | /** |
| 1079 | * isolate_migratepages_range() - isolate migrate-able pages in a PFN range |
| 1080 | * @cc: Compaction control structure. |
| 1081 | * @start_pfn: The first PFN to start isolating. |
| 1082 | * @end_pfn: The one-past-last PFN. |
| 1083 | * |
| 1084 | * Returns zero if isolation fails fatally due to e.g. pending signal. |
| 1085 | * Otherwise, function returns one-past-the-last PFN of isolated page |
| 1086 | * (which may be greater than end_pfn if end fell in a middle of a THP page). |
| 1087 | */ |
| 1088 | unsigned long |
| 1089 | isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, |
| 1090 | unsigned long end_pfn) |
| 1091 | { |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1092 | unsigned long pfn, block_start_pfn, block_end_pfn; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1093 | |
| 1094 | /* Scan block by block. First and last block may be incomplete */ |
| 1095 | pfn = start_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1096 | block_start_pfn = pageblock_start_pfn(pfn); |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1097 | if (block_start_pfn < cc->zone->zone_start_pfn) |
| 1098 | block_start_pfn = cc->zone->zone_start_pfn; |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1099 | block_end_pfn = pageblock_end_pfn(pfn); |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1100 | |
| 1101 | for (; pfn < end_pfn; pfn = block_end_pfn, |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1102 | block_start_pfn = block_end_pfn, |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1103 | block_end_pfn += pageblock_nr_pages) { |
| 1104 | |
| 1105 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 1106 | |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1107 | if (!pageblock_pfn_to_page(block_start_pfn, |
| 1108 | block_end_pfn, cc->zone)) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1109 | continue; |
| 1110 | |
| 1111 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, |
| 1112 | ISOLATE_UNEVICTABLE); |
| 1113 | |
Hugh Dickins | 14af4a5 | 2016-05-05 16:22:15 -0700 | [diff] [blame] | 1114 | if (!pfn) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1115 | break; |
Joonsoo Kim | 6ea41c0 | 2014-10-29 14:50:20 -0700 | [diff] [blame] | 1116 | |
| 1117 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) |
| 1118 | break; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1119 | } |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1120 | |
| 1121 | return pfn; |
| 1122 | } |
| 1123 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1124 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ |
| 1125 | #ifdef CONFIG_COMPACTION |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1126 | |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 1127 | static bool suitable_migration_source(struct compact_control *cc, |
| 1128 | struct page *page) |
| 1129 | { |
Vlastimil Babka | 282722b | 2017-05-08 15:54:49 -0700 | [diff] [blame] | 1130 | int block_mt; |
| 1131 | |
Mel Gorman | 9bebefd | 2019-03-05 15:45:14 -0800 | [diff] [blame] | 1132 | if (pageblock_skip_persistent(page)) |
| 1133 | return false; |
| 1134 | |
Vlastimil Babka | 282722b | 2017-05-08 15:54:49 -0700 | [diff] [blame] | 1135 | if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 1136 | return true; |
| 1137 | |
Vlastimil Babka | 282722b | 2017-05-08 15:54:49 -0700 | [diff] [blame] | 1138 | block_mt = get_pageblock_migratetype(page); |
| 1139 | |
| 1140 | if (cc->migratetype == MIGRATE_MOVABLE) |
| 1141 | return is_migrate_movable(block_mt); |
| 1142 | else |
| 1143 | return block_mt == cc->migratetype; |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 1144 | } |
| 1145 | |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1146 | /* Returns true if the page is within a block suitable for migration to */ |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 1147 | static bool suitable_migration_target(struct compact_control *cc, |
| 1148 | struct page *page) |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1149 | { |
| 1150 | /* If the page is a large free page, then disallow migration */ |
| 1151 | if (PageBuddy(page)) { |
| 1152 | /* |
| 1153 | * We are checking page_order without zone->lock taken. But |
| 1154 | * the only small danger is that we skip a potentially suitable |
| 1155 | * pageblock, so it's not worth to check order for valid range. |
| 1156 | */ |
| 1157 | if (page_order_unsafe(page) >= pageblock_order) |
| 1158 | return false; |
| 1159 | } |
| 1160 | |
Yisheng Xie | 1ef36db | 2017-05-03 14:53:54 -0700 | [diff] [blame] | 1161 | if (cc->ignore_block_suitable) |
| 1162 | return true; |
| 1163 | |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1164 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ |
Vlastimil Babka | b682deb | 2017-05-08 15:54:43 -0700 | [diff] [blame] | 1165 | if (is_migrate_movable(get_pageblock_migratetype(page))) |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 1166 | return true; |
| 1167 | |
| 1168 | /* Otherwise skip the block */ |
| 1169 | return false; |
| 1170 | } |
| 1171 | |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1172 | static inline unsigned int |
| 1173 | freelist_scan_limit(struct compact_control *cc) |
| 1174 | { |
Qian Cai | dd7ef7b | 2019-05-13 17:17:38 -0700 | [diff] [blame] | 1175 | unsigned short shift = BITS_PER_LONG - 1; |
| 1176 | |
| 1177 | return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1178 | } |
| 1179 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1180 | /* |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1181 | * Test whether the free scanner has reached the same or lower pageblock than |
| 1182 | * the migration scanner, and compaction should thus terminate. |
| 1183 | */ |
| 1184 | static inline bool compact_scanners_met(struct compact_control *cc) |
| 1185 | { |
| 1186 | return (cc->free_pfn >> pageblock_order) |
| 1187 | <= (cc->migrate_pfn >> pageblock_order); |
| 1188 | } |
| 1189 | |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1190 | /* |
| 1191 | * Used when scanning for a suitable migration target which scans freelists |
| 1192 | * in reverse. Reorders the list such as the unscanned pages are scanned |
| 1193 | * first on the next iteration of the free scanner |
| 1194 | */ |
| 1195 | static void |
| 1196 | move_freelist_head(struct list_head *freelist, struct page *freepage) |
| 1197 | { |
| 1198 | LIST_HEAD(sublist); |
| 1199 | |
| 1200 | if (!list_is_last(freelist, &freepage->lru)) { |
| 1201 | list_cut_before(&sublist, freelist, &freepage->lru); |
| 1202 | if (!list_empty(&sublist)) |
| 1203 | list_splice_tail(&sublist, freelist); |
| 1204 | } |
| 1205 | } |
| 1206 | |
| 1207 | /* |
| 1208 | * Similar to move_freelist_head except used by the migration scanner |
| 1209 | * when scanning forward. It's possible for these list operations to |
| 1210 | * move against each other if they search the free list exactly in |
| 1211 | * lockstep. |
| 1212 | */ |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1213 | static void |
| 1214 | move_freelist_tail(struct list_head *freelist, struct page *freepage) |
| 1215 | { |
| 1216 | LIST_HEAD(sublist); |
| 1217 | |
| 1218 | if (!list_is_first(freelist, &freepage->lru)) { |
| 1219 | list_cut_position(&sublist, freelist, &freepage->lru); |
| 1220 | if (!list_empty(&sublist)) |
| 1221 | list_splice_tail(&sublist, freelist); |
| 1222 | } |
| 1223 | } |
| 1224 | |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1225 | static void |
| 1226 | fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) |
| 1227 | { |
| 1228 | unsigned long start_pfn, end_pfn; |
| 1229 | struct page *page = pfn_to_page(pfn); |
| 1230 | |
| 1231 | /* Do not search around if there are enough pages already */ |
| 1232 | if (cc->nr_freepages >= cc->nr_migratepages) |
| 1233 | return; |
| 1234 | |
| 1235 | /* Minimise scanning during async compaction */ |
| 1236 | if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) |
| 1237 | return; |
| 1238 | |
| 1239 | /* Pageblock boundaries */ |
| 1240 | start_pfn = pageblock_start_pfn(pfn); |
Mel Gorman | 60fce36 | 2019-05-17 14:31:41 -0700 | [diff] [blame] | 1241 | end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1; |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1242 | |
| 1243 | /* Scan before */ |
| 1244 | if (start_pfn != pfn) { |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 1245 | isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1246 | if (cc->nr_freepages >= cc->nr_migratepages) |
| 1247 | return; |
| 1248 | } |
| 1249 | |
| 1250 | /* Scan after */ |
| 1251 | start_pfn = pfn + nr_isolated; |
Mel Gorman | 60fce36 | 2019-05-17 14:31:41 -0700 | [diff] [blame] | 1252 | if (start_pfn < end_pfn) |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 1253 | isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1254 | |
| 1255 | /* Skip this pageblock in the future as it's full or nearly full */ |
| 1256 | if (cc->nr_freepages < cc->nr_migratepages) |
| 1257 | set_pageblock_skip(page); |
| 1258 | } |
| 1259 | |
Mel Gorman | dbe2d4e | 2019-03-05 15:45:31 -0800 | [diff] [blame] | 1260 | /* Search orders in round-robin fashion */ |
| 1261 | static int next_search_order(struct compact_control *cc, int order) |
| 1262 | { |
| 1263 | order--; |
| 1264 | if (order < 0) |
| 1265 | order = cc->order - 1; |
| 1266 | |
| 1267 | /* Search wrapped around? */ |
| 1268 | if (order == cc->search_order) { |
| 1269 | cc->search_order--; |
| 1270 | if (cc->search_order < 0) |
| 1271 | cc->search_order = cc->order - 1; |
| 1272 | return -1; |
| 1273 | } |
| 1274 | |
| 1275 | return order; |
| 1276 | } |
| 1277 | |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1278 | static unsigned long |
| 1279 | fast_isolate_freepages(struct compact_control *cc) |
| 1280 | { |
| 1281 | unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); |
| 1282 | unsigned int nr_scanned = 0; |
| 1283 | unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; |
| 1284 | unsigned long nr_isolated = 0; |
| 1285 | unsigned long distance; |
| 1286 | struct page *page = NULL; |
| 1287 | bool scan_start = false; |
| 1288 | int order; |
| 1289 | |
| 1290 | /* Full compaction passes in a negative order */ |
| 1291 | if (cc->order <= 0) |
| 1292 | return cc->free_pfn; |
| 1293 | |
| 1294 | /* |
| 1295 | * If starting the scan, use a deeper search and use the highest |
| 1296 | * PFN found if a suitable one is not found. |
| 1297 | */ |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 1298 | if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1299 | limit = pageblock_nr_pages >> 1; |
| 1300 | scan_start = true; |
| 1301 | } |
| 1302 | |
| 1303 | /* |
| 1304 | * Preferred point is in the top quarter of the scan space but take |
| 1305 | * a pfn from the top half if the search is problematic. |
| 1306 | */ |
| 1307 | distance = (cc->free_pfn - cc->migrate_pfn); |
| 1308 | low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); |
| 1309 | min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); |
| 1310 | |
| 1311 | if (WARN_ON_ONCE(min_pfn > low_pfn)) |
| 1312 | low_pfn = min_pfn; |
| 1313 | |
Mel Gorman | dbe2d4e | 2019-03-05 15:45:31 -0800 | [diff] [blame] | 1314 | /* |
| 1315 | * Search starts from the last successful isolation order or the next |
| 1316 | * order to search after a previous failure |
| 1317 | */ |
| 1318 | cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); |
| 1319 | |
| 1320 | for (order = cc->search_order; |
| 1321 | !page && order >= 0; |
| 1322 | order = next_search_order(cc, order)) { |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1323 | struct free_area *area = &cc->zone->free_area[order]; |
| 1324 | struct list_head *freelist; |
| 1325 | struct page *freepage; |
| 1326 | unsigned long flags; |
| 1327 | unsigned int order_scanned = 0; |
| 1328 | |
| 1329 | if (!area->nr_free) |
| 1330 | continue; |
| 1331 | |
| 1332 | spin_lock_irqsave(&cc->zone->lock, flags); |
| 1333 | freelist = &area->free_list[MIGRATE_MOVABLE]; |
| 1334 | list_for_each_entry_reverse(freepage, freelist, lru) { |
| 1335 | unsigned long pfn; |
| 1336 | |
| 1337 | order_scanned++; |
| 1338 | nr_scanned++; |
| 1339 | pfn = page_to_pfn(freepage); |
| 1340 | |
| 1341 | if (pfn >= highest) |
| 1342 | highest = pageblock_start_pfn(pfn); |
| 1343 | |
| 1344 | if (pfn >= low_pfn) { |
| 1345 | cc->fast_search_fail = 0; |
Mel Gorman | dbe2d4e | 2019-03-05 15:45:31 -0800 | [diff] [blame] | 1346 | cc->search_order = order; |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1347 | page = freepage; |
| 1348 | break; |
| 1349 | } |
| 1350 | |
| 1351 | if (pfn >= min_pfn && pfn > high_pfn) { |
| 1352 | high_pfn = pfn; |
| 1353 | |
| 1354 | /* Shorten the scan if a candidate is found */ |
| 1355 | limit >>= 1; |
| 1356 | } |
| 1357 | |
| 1358 | if (order_scanned >= limit) |
| 1359 | break; |
| 1360 | } |
| 1361 | |
| 1362 | /* Use a minimum pfn if a preferred one was not found */ |
| 1363 | if (!page && high_pfn) { |
| 1364 | page = pfn_to_page(high_pfn); |
| 1365 | |
| 1366 | /* Update freepage for the list reorder below */ |
| 1367 | freepage = page; |
| 1368 | } |
| 1369 | |
| 1370 | /* Reorder to so a future search skips recent pages */ |
| 1371 | move_freelist_head(freelist, freepage); |
| 1372 | |
| 1373 | /* Isolate the page if available */ |
| 1374 | if (page) { |
| 1375 | if (__isolate_free_page(page, order)) { |
| 1376 | set_page_private(page, order); |
| 1377 | nr_isolated = 1 << order; |
| 1378 | cc->nr_freepages += nr_isolated; |
| 1379 | list_add_tail(&page->lru, &cc->freepages); |
| 1380 | count_compact_events(COMPACTISOLATED, nr_isolated); |
| 1381 | } else { |
| 1382 | /* If isolation fails, abort the search */ |
Qian Cai | 5b56d99 | 2019-04-04 11:54:41 +0100 | [diff] [blame] | 1383 | order = cc->search_order + 1; |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1384 | page = NULL; |
| 1385 | } |
| 1386 | } |
| 1387 | |
| 1388 | spin_unlock_irqrestore(&cc->zone->lock, flags); |
| 1389 | |
| 1390 | /* |
| 1391 | * Smaller scan on next order so the total scan ig related |
| 1392 | * to freelist_scan_limit. |
| 1393 | */ |
| 1394 | if (order_scanned >= limit) |
| 1395 | limit = min(1U, limit >> 1); |
| 1396 | } |
| 1397 | |
| 1398 | if (!page) { |
| 1399 | cc->fast_search_fail++; |
| 1400 | if (scan_start) { |
| 1401 | /* |
| 1402 | * Use the highest PFN found above min. If one was |
| 1403 | * not found, be pessemistic for direct compaction |
| 1404 | * and use the min mark. |
| 1405 | */ |
| 1406 | if (highest) { |
| 1407 | page = pfn_to_page(highest); |
| 1408 | cc->free_pfn = highest; |
| 1409 | } else { |
Suzuki K Poulose | e577c8b | 2019-05-31 22:30:59 -0700 | [diff] [blame] | 1410 | if (cc->direct_compaction && pfn_valid(min_pfn)) { |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1411 | page = pfn_to_page(min_pfn); |
| 1412 | cc->free_pfn = min_pfn; |
| 1413 | } |
| 1414 | } |
| 1415 | } |
| 1416 | } |
| 1417 | |
Mel Gorman | d097a6f | 2019-03-05 15:45:28 -0800 | [diff] [blame] | 1418 | if (highest && highest >= cc->zone->compact_cached_free_pfn) { |
| 1419 | highest -= pageblock_nr_pages; |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1420 | cc->zone->compact_cached_free_pfn = highest; |
Mel Gorman | d097a6f | 2019-03-05 15:45:28 -0800 | [diff] [blame] | 1421 | } |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1422 | |
| 1423 | cc->total_free_scanned += nr_scanned; |
| 1424 | if (!page) |
| 1425 | return cc->free_pfn; |
| 1426 | |
| 1427 | low_pfn = page_to_pfn(page); |
| 1428 | fast_isolate_around(cc, low_pfn, nr_isolated); |
| 1429 | return low_pfn; |
| 1430 | } |
| 1431 | |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1432 | /* |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1433 | * Based on information in the current compact_control, find blocks |
| 1434 | * suitable for isolating free pages from and then isolate them. |
| 1435 | */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1436 | static void isolate_freepages(struct compact_control *cc) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1437 | { |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1438 | struct zone *zone = cc->zone; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1439 | struct page *page; |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1440 | unsigned long block_start_pfn; /* start of current pageblock */ |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1441 | unsigned long isolate_start_pfn; /* exact pfn we start at */ |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1442 | unsigned long block_end_pfn; /* end of current pageblock */ |
| 1443 | unsigned long low_pfn; /* lowest pfn scanner is able to scan */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1444 | struct list_head *freelist = &cc->freepages; |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 1445 | unsigned int stride; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1446 | |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1447 | /* Try a small search of the free lists for a candidate */ |
| 1448 | isolate_start_pfn = fast_isolate_freepages(cc); |
| 1449 | if (cc->nr_freepages) |
| 1450 | goto splitmap; |
| 1451 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1452 | /* |
| 1453 | * Initialise the free scanner. The starting point is where we last |
Vlastimil Babka | 49e068f | 2014-05-06 12:50:03 -0700 | [diff] [blame] | 1454 | * successfully isolated from, zone-cached value, or the end of the |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1455 | * zone when isolating for the first time. For looping we also need |
| 1456 | * this pfn aligned down to the pageblock boundary, because we do |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1457 | * block_start_pfn -= pageblock_nr_pages in the for loop. |
| 1458 | * For ending point, take care when isolating in last pageblock of a |
| 1459 | * a zone which ends in the middle of a pageblock. |
Vlastimil Babka | 49e068f | 2014-05-06 12:50:03 -0700 | [diff] [blame] | 1460 | * The low boundary is the end of the pageblock the migration scanner |
| 1461 | * is using. |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1462 | */ |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1463 | isolate_start_pfn = cc->free_pfn; |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1464 | block_start_pfn = pageblock_start_pfn(isolate_start_pfn); |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1465 | block_end_pfn = min(block_start_pfn + pageblock_nr_pages, |
| 1466 | zone_end_pfn(zone)); |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1467 | low_pfn = pageblock_end_pfn(cc->migrate_pfn); |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 1468 | stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1469 | |
| 1470 | /* |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1471 | * Isolate free pages until enough are available to migrate the |
| 1472 | * pages on cc->migratepages. We stop searching if the migrate |
| 1473 | * and free page scanners meet or enough free pages are isolated. |
| 1474 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1475 | for (; block_start_pfn >= low_pfn; |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 1476 | block_end_pfn = block_start_pfn, |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1477 | block_start_pfn -= pageblock_nr_pages, |
| 1478 | isolate_start_pfn = block_start_pfn) { |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 1479 | unsigned long nr_isolated; |
| 1480 | |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 1481 | /* |
| 1482 | * This can iterate a massively long zone without finding any |
Mel Gorman | cb810ad | 2019-03-05 15:45:21 -0800 | [diff] [blame] | 1483 | * suitable migration targets, so periodically check resched. |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 1484 | */ |
Mel Gorman | cb810ad | 2019-03-05 15:45:21 -0800 | [diff] [blame] | 1485 | if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) |
Mel Gorman | cf66f07 | 2019-03-05 15:45:24 -0800 | [diff] [blame] | 1486 | cond_resched(); |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 1487 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 1488 | page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, |
| 1489 | zone); |
| 1490 | if (!page) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1491 | continue; |
| 1492 | |
| 1493 | /* Check the block is suitable for migration */ |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 1494 | if (!suitable_migration_target(cc, page)) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1495 | continue; |
Linus Torvalds | 68e3e92 | 2012-06-03 20:05:57 -0700 | [diff] [blame] | 1496 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 1497 | /* If isolation recently failed, do not retry */ |
| 1498 | if (!isolation_suitable(cc, page)) |
| 1499 | continue; |
| 1500 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 1501 | /* Found a block suitable for isolating free pages from. */ |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 1502 | nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, |
| 1503 | block_end_pfn, freelist, stride, false); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1504 | |
Mel Gorman | d097a6f | 2019-03-05 15:45:28 -0800 | [diff] [blame] | 1505 | /* Update the skip hint if the full pageblock was scanned */ |
| 1506 | if (isolate_start_pfn == block_end_pfn) |
| 1507 | update_pageblock_skip(cc, page, block_start_pfn); |
| 1508 | |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1509 | /* Are enough freepages isolated? */ |
| 1510 | if (cc->nr_freepages >= cc->nr_migratepages) { |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1511 | if (isolate_start_pfn >= block_end_pfn) { |
| 1512 | /* |
| 1513 | * Restart at previous pageblock if more |
| 1514 | * freepages can be isolated next time. |
| 1515 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1516 | isolate_start_pfn = |
| 1517 | block_start_pfn - pageblock_nr_pages; |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1518 | } |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1519 | break; |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1520 | } else if (isolate_start_pfn < block_end_pfn) { |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1521 | /* |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1522 | * If isolation failed early, do not continue |
| 1523 | * needlessly. |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1524 | */ |
David Rientjes | a46cbf3 | 2016-07-14 12:06:50 -0700 | [diff] [blame] | 1525 | break; |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1526 | } |
Mel Gorman | 4fca973 | 2019-03-05 15:45:34 -0800 | [diff] [blame] | 1527 | |
| 1528 | /* Adjust stride depending on isolation */ |
| 1529 | if (nr_isolated) { |
| 1530 | stride = 1; |
| 1531 | continue; |
| 1532 | } |
| 1533 | stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 1534 | } |
| 1535 | |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 1536 | /* |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1537 | * Record where the free scanner will restart next time. Either we |
| 1538 | * broke from the loop and set isolate_start_pfn based on the last |
| 1539 | * call to isolate_freepages_block(), or we met the migration scanner |
| 1540 | * and the loop terminated due to isolate_start_pfn < low_pfn |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 1541 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1542 | cc->free_pfn = isolate_start_pfn; |
Mel Gorman | 5a81188 | 2019-03-05 15:45:01 -0800 | [diff] [blame] | 1543 | |
| 1544 | splitmap: |
| 1545 | /* __isolate_free_page() does not map the pages */ |
| 1546 | split_map_pages(freelist); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1547 | } |
| 1548 | |
| 1549 | /* |
| 1550 | * This is a migrate-callback that "allocates" freepages by taking pages |
| 1551 | * from the isolated freelists in the block we are migrating to. |
| 1552 | */ |
| 1553 | static struct page *compaction_alloc(struct page *migratepage, |
Michal Hocko | 666feb2 | 2018-04-10 16:30:03 -0700 | [diff] [blame] | 1554 | unsigned long data) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1555 | { |
| 1556 | struct compact_control *cc = (struct compact_control *)data; |
| 1557 | struct page *freepage; |
| 1558 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1559 | if (list_empty(&cc->freepages)) { |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1560 | isolate_freepages(cc); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1561 | |
| 1562 | if (list_empty(&cc->freepages)) |
| 1563 | return NULL; |
| 1564 | } |
| 1565 | |
| 1566 | freepage = list_entry(cc->freepages.next, struct page, lru); |
| 1567 | list_del(&freepage->lru); |
| 1568 | cc->nr_freepages--; |
| 1569 | |
| 1570 | return freepage; |
| 1571 | } |
| 1572 | |
| 1573 | /* |
David Rientjes | d53aea3 | 2014-06-04 16:08:26 -0700 | [diff] [blame] | 1574 | * This is a migrate-callback that "frees" freepages back to the isolated |
| 1575 | * freelist. All pages on the freelist are from the same zone, so there is no |
| 1576 | * special handling needed for NUMA. |
| 1577 | */ |
| 1578 | static void compaction_free(struct page *page, unsigned long data) |
| 1579 | { |
| 1580 | struct compact_control *cc = (struct compact_control *)data; |
| 1581 | |
| 1582 | list_add(&page->lru, &cc->freepages); |
| 1583 | cc->nr_freepages++; |
| 1584 | } |
| 1585 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1586 | /* possible outcome of isolate_migratepages */ |
| 1587 | typedef enum { |
| 1588 | ISOLATE_ABORT, /* Abort compaction now */ |
| 1589 | ISOLATE_NONE, /* No pages isolated, continue scanning */ |
| 1590 | ISOLATE_SUCCESS, /* Pages isolated, migrate */ |
| 1591 | } isolate_migrate_t; |
| 1592 | |
| 1593 | /* |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1594 | * Allow userspace to control policy on scanning the unevictable LRU for |
| 1595 | * compactable pages. |
| 1596 | */ |
Sebastian Andrzej Siewior | 6923aa0 | 2020-04-01 21:10:42 -0700 | [diff] [blame] | 1597 | #ifdef CONFIG_PREEMPT_RT |
| 1598 | int sysctl_compact_unevictable_allowed __read_mostly = 0; |
| 1599 | #else |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1600 | int sysctl_compact_unevictable_allowed __read_mostly = 1; |
Sebastian Andrzej Siewior | 6923aa0 | 2020-04-01 21:10:42 -0700 | [diff] [blame] | 1601 | #endif |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1602 | |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1603 | static inline void |
| 1604 | update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) |
| 1605 | { |
| 1606 | if (cc->fast_start_pfn == ULONG_MAX) |
| 1607 | return; |
| 1608 | |
| 1609 | if (!cc->fast_start_pfn) |
| 1610 | cc->fast_start_pfn = pfn; |
| 1611 | |
| 1612 | cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); |
| 1613 | } |
| 1614 | |
| 1615 | static inline unsigned long |
| 1616 | reinit_migrate_pfn(struct compact_control *cc) |
| 1617 | { |
| 1618 | if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) |
| 1619 | return cc->migrate_pfn; |
| 1620 | |
| 1621 | cc->migrate_pfn = cc->fast_start_pfn; |
| 1622 | cc->fast_start_pfn = ULONG_MAX; |
| 1623 | |
| 1624 | return cc->migrate_pfn; |
| 1625 | } |
| 1626 | |
| 1627 | /* |
| 1628 | * Briefly search the free lists for a migration source that already has |
| 1629 | * some free pages to reduce the number of pages that need migration |
| 1630 | * before a pageblock is free. |
| 1631 | */ |
| 1632 | static unsigned long fast_find_migrateblock(struct compact_control *cc) |
| 1633 | { |
| 1634 | unsigned int limit = freelist_scan_limit(cc); |
| 1635 | unsigned int nr_scanned = 0; |
| 1636 | unsigned long distance; |
| 1637 | unsigned long pfn = cc->migrate_pfn; |
| 1638 | unsigned long high_pfn; |
| 1639 | int order; |
| 1640 | |
| 1641 | /* Skip hints are relied on to avoid repeats on the fast search */ |
| 1642 | if (cc->ignore_skip_hint) |
| 1643 | return pfn; |
| 1644 | |
| 1645 | /* |
| 1646 | * If the migrate_pfn is not at the start of a zone or the start |
| 1647 | * of a pageblock then assume this is a continuation of a previous |
| 1648 | * scan restarted due to COMPACT_CLUSTER_MAX. |
| 1649 | */ |
| 1650 | if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) |
| 1651 | return pfn; |
| 1652 | |
| 1653 | /* |
| 1654 | * For smaller orders, just linearly scan as the number of pages |
| 1655 | * to migrate should be relatively small and does not necessarily |
| 1656 | * justify freeing up a large block for a small allocation. |
| 1657 | */ |
| 1658 | if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) |
| 1659 | return pfn; |
| 1660 | |
| 1661 | /* |
| 1662 | * Only allow kcompactd and direct requests for movable pages to |
| 1663 | * quickly clear out a MOVABLE pageblock for allocation. This |
| 1664 | * reduces the risk that a large movable pageblock is freed for |
| 1665 | * an unmovable/reclaimable small allocation. |
| 1666 | */ |
| 1667 | if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) |
| 1668 | return pfn; |
| 1669 | |
| 1670 | /* |
| 1671 | * When starting the migration scanner, pick any pageblock within the |
| 1672 | * first half of the search space. Otherwise try and pick a pageblock |
| 1673 | * within the first eighth to reduce the chances that a migration |
| 1674 | * target later becomes a source. |
| 1675 | */ |
| 1676 | distance = (cc->free_pfn - cc->migrate_pfn) >> 1; |
| 1677 | if (cc->migrate_pfn != cc->zone->zone_start_pfn) |
| 1678 | distance >>= 2; |
| 1679 | high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); |
| 1680 | |
| 1681 | for (order = cc->order - 1; |
| 1682 | order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit; |
| 1683 | order--) { |
| 1684 | struct free_area *area = &cc->zone->free_area[order]; |
| 1685 | struct list_head *freelist; |
| 1686 | unsigned long flags; |
| 1687 | struct page *freepage; |
| 1688 | |
| 1689 | if (!area->nr_free) |
| 1690 | continue; |
| 1691 | |
| 1692 | spin_lock_irqsave(&cc->zone->lock, flags); |
| 1693 | freelist = &area->free_list[MIGRATE_MOVABLE]; |
| 1694 | list_for_each_entry(freepage, freelist, lru) { |
| 1695 | unsigned long free_pfn; |
| 1696 | |
| 1697 | nr_scanned++; |
| 1698 | free_pfn = page_to_pfn(freepage); |
| 1699 | if (free_pfn < high_pfn) { |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1700 | /* |
| 1701 | * Avoid if skipped recently. Ideally it would |
| 1702 | * move to the tail but even safe iteration of |
| 1703 | * the list assumes an entry is deleted, not |
| 1704 | * reordered. |
| 1705 | */ |
| 1706 | if (get_pageblock_skip(freepage)) { |
| 1707 | if (list_is_last(freelist, &freepage->lru)) |
| 1708 | break; |
| 1709 | |
| 1710 | continue; |
| 1711 | } |
| 1712 | |
| 1713 | /* Reorder to so a future search skips recent pages */ |
| 1714 | move_freelist_tail(freelist, freepage); |
| 1715 | |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 1716 | update_fast_start_pfn(cc, free_pfn); |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1717 | pfn = pageblock_start_pfn(free_pfn); |
| 1718 | cc->fast_search_fail = 0; |
| 1719 | set_pageblock_skip(freepage); |
| 1720 | break; |
| 1721 | } |
| 1722 | |
| 1723 | if (nr_scanned >= limit) { |
| 1724 | cc->fast_search_fail++; |
| 1725 | move_freelist_tail(freelist, freepage); |
| 1726 | break; |
| 1727 | } |
| 1728 | } |
| 1729 | spin_unlock_irqrestore(&cc->zone->lock, flags); |
| 1730 | } |
| 1731 | |
| 1732 | cc->total_migrate_scanned += nr_scanned; |
| 1733 | |
| 1734 | /* |
| 1735 | * If fast scanning failed then use a cached entry for a page block |
| 1736 | * that had free pages as the basis for starting a linear scan. |
| 1737 | */ |
| 1738 | if (pfn == cc->migrate_pfn) |
| 1739 | pfn = reinit_migrate_pfn(cc); |
| 1740 | |
| 1741 | return pfn; |
| 1742 | } |
| 1743 | |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1744 | /* |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1745 | * Isolate all pages that can be migrated from the first suitable block, |
| 1746 | * starting at the block pointed to by the migrate scanner pfn within |
| 1747 | * compact_control. |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1748 | */ |
Pengfei Li | 32aaf05 | 2019-09-23 15:36:58 -0700 | [diff] [blame] | 1749 | static isolate_migrate_t isolate_migratepages(struct compact_control *cc) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1750 | { |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1751 | unsigned long block_start_pfn; |
| 1752 | unsigned long block_end_pfn; |
| 1753 | unsigned long low_pfn; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1754 | struct page *page; |
| 1755 | const isolate_mode_t isolate_mode = |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1756 | (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | |
Hugh Dickins | 1d2047f | 2016-07-28 15:48:41 -0700 | [diff] [blame] | 1757 | (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1758 | bool fast_find_block; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1759 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1760 | /* |
| 1761 | * Start at where we last stopped, or beginning of the zone as |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1762 | * initialized by compact_zone(). The first failure will use |
| 1763 | * the lowest PFN as the starting point for linear scanning. |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1764 | */ |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1765 | low_pfn = fast_find_migrateblock(cc); |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1766 | block_start_pfn = pageblock_start_pfn(low_pfn); |
Pengfei Li | 32aaf05 | 2019-09-23 15:36:58 -0700 | [diff] [blame] | 1767 | if (block_start_pfn < cc->zone->zone_start_pfn) |
| 1768 | block_start_pfn = cc->zone->zone_start_pfn; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1769 | |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1770 | /* |
| 1771 | * fast_find_migrateblock marks a pageblock skipped so to avoid |
| 1772 | * the isolation_suitable check below, check whether the fast |
| 1773 | * search was successful. |
| 1774 | */ |
| 1775 | fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; |
| 1776 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1777 | /* Only scan within a pageblock boundary */ |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 1778 | block_end_pfn = pageblock_end_pfn(low_pfn); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1779 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1780 | /* |
| 1781 | * Iterate over whole pageblocks until we find the first suitable. |
| 1782 | * Do not cross the free scanner. |
| 1783 | */ |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1784 | for (; block_end_pfn <= cc->free_pfn; |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 1785 | fast_find_block = false, |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1786 | low_pfn = block_end_pfn, |
| 1787 | block_start_pfn = block_end_pfn, |
| 1788 | block_end_pfn += pageblock_nr_pages) { |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1789 | |
| 1790 | /* |
| 1791 | * This can potentially iterate a massively long zone with |
| 1792 | * many pageblocks unsuitable, so periodically check if we |
Mel Gorman | cb810ad | 2019-03-05 15:45:21 -0800 | [diff] [blame] | 1793 | * need to schedule. |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1794 | */ |
Mel Gorman | cb810ad | 2019-03-05 15:45:21 -0800 | [diff] [blame] | 1795 | if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) |
Mel Gorman | cf66f07 | 2019-03-05 15:45:24 -0800 | [diff] [blame] | 1796 | cond_resched(); |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1797 | |
Pengfei Li | 32aaf05 | 2019-09-23 15:36:58 -0700 | [diff] [blame] | 1798 | page = pageblock_pfn_to_page(block_start_pfn, |
| 1799 | block_end_pfn, cc->zone); |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 1800 | if (!page) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1801 | continue; |
| 1802 | |
Mel Gorman | e380beb | 2019-03-05 15:44:58 -0800 | [diff] [blame] | 1803 | /* |
| 1804 | * If isolation recently failed, do not retry. Only check the |
| 1805 | * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock |
| 1806 | * to be visited multiple times. Assume skip was checked |
| 1807 | * before making it "skip" so other compaction instances do |
| 1808 | * not scan the same block. |
| 1809 | */ |
| 1810 | if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && |
| 1811 | !fast_find_block && !isolation_suitable(cc, page)) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1812 | continue; |
| 1813 | |
| 1814 | /* |
Mel Gorman | 9bebefd | 2019-03-05 15:45:14 -0800 | [diff] [blame] | 1815 | * For async compaction, also only scan in MOVABLE blocks |
| 1816 | * without huge pages. Async compaction is optimistic to see |
| 1817 | * if the minimum amount of work satisfies the allocation. |
| 1818 | * The cached PFN is updated as it's possible that all |
| 1819 | * remaining blocks between source and target are unsuitable |
| 1820 | * and the compaction scanners fail to meet. |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1821 | */ |
Mel Gorman | 9bebefd | 2019-03-05 15:45:14 -0800 | [diff] [blame] | 1822 | if (!suitable_migration_source(cc, page)) { |
| 1823 | update_cached_migrate(cc, block_end_pfn); |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1824 | continue; |
Mel Gorman | 9bebefd | 2019-03-05 15:45:14 -0800 | [diff] [blame] | 1825 | } |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1826 | |
| 1827 | /* Perform the isolation */ |
Joonsoo Kim | e1409c3 | 2016-03-15 14:57:48 -0700 | [diff] [blame] | 1828 | low_pfn = isolate_migratepages_block(cc, low_pfn, |
| 1829 | block_end_pfn, isolate_mode); |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1830 | |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1831 | if (!low_pfn) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1832 | return ISOLATE_ABORT; |
| 1833 | |
| 1834 | /* |
| 1835 | * Either we isolated something and proceed with migration. Or |
| 1836 | * we failed and compact_zone should decide if we should |
| 1837 | * continue or not. |
| 1838 | */ |
| 1839 | break; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1840 | } |
| 1841 | |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1842 | /* Record where migration scanner will be restarted. */ |
| 1843 | cc->migrate_pfn = low_pfn; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1844 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1845 | return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1846 | } |
| 1847 | |
Yaowei Bai | 21c527a | 2015-11-05 18:47:20 -0800 | [diff] [blame] | 1848 | /* |
| 1849 | * order == -1 is expected when compacting via |
| 1850 | * /proc/sys/vm/compact_memory |
| 1851 | */ |
| 1852 | static inline bool is_via_compact_memory(int order) |
| 1853 | { |
| 1854 | return order == -1; |
| 1855 | } |
| 1856 | |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 1857 | static enum compact_result __compact_finished(struct compact_control *cc) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1858 | { |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1859 | unsigned int order; |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 1860 | const int migratetype = cc->migratetype; |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1861 | int ret; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1862 | |
Mel Gorman | 753341a | 2012-10-08 16:32:40 -0700 | [diff] [blame] | 1863 | /* Compaction run completes if the migrate and free scanner meet */ |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1864 | if (compact_scanners_met(cc)) { |
Vlastimil Babka | 55b7c4c | 2014-01-21 15:51:11 -0800 | [diff] [blame] | 1865 | /* Let the next compaction start anew. */ |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 1866 | reset_cached_positions(cc->zone); |
Vlastimil Babka | 55b7c4c | 2014-01-21 15:51:11 -0800 | [diff] [blame] | 1867 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 1868 | /* |
| 1869 | * Mark that the PG_migrate_skip information should be cleared |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 1870 | * by kswapd when it goes to sleep. kcompactd does not set the |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 1871 | * flag itself as the decision to be clear should be directly |
| 1872 | * based on an allocation request. |
| 1873 | */ |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 1874 | if (cc->direct_compaction) |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 1875 | cc->zone->compact_blockskip_flush = true; |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 1876 | |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 1877 | if (cc->whole_zone) |
| 1878 | return COMPACT_COMPLETE; |
| 1879 | else |
| 1880 | return COMPACT_PARTIAL_SKIPPED; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 1881 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1882 | |
Yaowei Bai | 21c527a | 2015-11-05 18:47:20 -0800 | [diff] [blame] | 1883 | if (is_via_compact_memory(cc->order)) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1884 | return COMPACT_CONTINUE; |
| 1885 | |
Mel Gorman | efe771c | 2019-03-05 15:44:46 -0800 | [diff] [blame] | 1886 | /* |
| 1887 | * Always finish scanning a pageblock to reduce the possibility of |
| 1888 | * fallbacks in the future. This is particularly important when |
| 1889 | * migration source is unmovable/reclaimable but it's not worth |
| 1890 | * special casing. |
| 1891 | */ |
| 1892 | if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) |
| 1893 | return COMPACT_CONTINUE; |
Vlastimil Babka | baf6a9a | 2017-05-08 15:54:52 -0700 | [diff] [blame] | 1894 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1895 | /* Direct compactor: Is a suitable page free? */ |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1896 | ret = COMPACT_NO_SUITABLE_PAGE; |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1897 | for (order = cc->order; order < MAX_ORDER; order++) { |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 1898 | struct free_area *area = &cc->zone->free_area[order]; |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 1899 | bool can_steal; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1900 | |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1901 | /* Job done if page is free of the right migratetype */ |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 1902 | if (!free_area_empty(area, migratetype)) |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1903 | return COMPACT_SUCCESS; |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1904 | |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 1905 | #ifdef CONFIG_CMA |
| 1906 | /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ |
| 1907 | if (migratetype == MIGRATE_MOVABLE && |
Dan Williams | b03641a | 2019-05-14 15:41:32 -0700 | [diff] [blame] | 1908 | !free_area_empty(area, MIGRATE_CMA)) |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1909 | return COMPACT_SUCCESS; |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 1910 | #endif |
| 1911 | /* |
| 1912 | * Job done if allocation would steal freepages from |
| 1913 | * other migratetype buddy lists. |
| 1914 | */ |
| 1915 | if (find_suitable_fallback(area, order, migratetype, |
Vlastimil Babka | baf6a9a | 2017-05-08 15:54:52 -0700 | [diff] [blame] | 1916 | true, &can_steal) != -1) { |
| 1917 | |
| 1918 | /* movable pages are OK in any pageblock */ |
| 1919 | if (migratetype == MIGRATE_MOVABLE) |
| 1920 | return COMPACT_SUCCESS; |
| 1921 | |
| 1922 | /* |
| 1923 | * We are stealing for a non-movable allocation. Make |
| 1924 | * sure we finish compacting the current pageblock |
| 1925 | * first so it is as free as possible and we won't |
| 1926 | * have to steal another one soon. This only applies |
| 1927 | * to sync compaction, as async compaction operates |
| 1928 | * on pageblocks of the same migratetype. |
| 1929 | */ |
| 1930 | if (cc->mode == MIGRATE_ASYNC || |
| 1931 | IS_ALIGNED(cc->migrate_pfn, |
| 1932 | pageblock_nr_pages)) { |
| 1933 | return COMPACT_SUCCESS; |
| 1934 | } |
| 1935 | |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1936 | ret = COMPACT_CONTINUE; |
| 1937 | break; |
Vlastimil Babka | baf6a9a | 2017-05-08 15:54:52 -0700 | [diff] [blame] | 1938 | } |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1939 | } |
| 1940 | |
Mel Gorman | cb2dcaf | 2019-03-05 15:45:11 -0800 | [diff] [blame] | 1941 | if (cc->contended || fatal_signal_pending(current)) |
| 1942 | ret = COMPACT_CONTENDED; |
| 1943 | |
| 1944 | return ret; |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1945 | } |
| 1946 | |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 1947 | static enum compact_result compact_finished(struct compact_control *cc) |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1948 | { |
| 1949 | int ret; |
| 1950 | |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 1951 | ret = __compact_finished(cc); |
| 1952 | trace_mm_compaction_finished(cc->zone, cc->order, ret); |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1953 | if (ret == COMPACT_NO_SUITABLE_PAGE) |
| 1954 | ret = COMPACT_CONTINUE; |
| 1955 | |
| 1956 | return ret; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1957 | } |
| 1958 | |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1959 | /* |
| 1960 | * compaction_suitable: Is this suitable to run compaction on this zone now? |
| 1961 | * Returns |
| 1962 | * COMPACT_SKIPPED - If there are too few free pages for compaction |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1963 | * COMPACT_SUCCESS - If the allocation would succeed without compaction |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1964 | * COMPACT_CONTINUE - If compaction should run now |
| 1965 | */ |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 1966 | static enum compact_result __compaction_suitable(struct zone *zone, int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 1967 | unsigned int alloc_flags, |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 1968 | int classzone_idx, |
| 1969 | unsigned long wmark_target) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1970 | { |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1971 | unsigned long watermark; |
| 1972 | |
Yaowei Bai | 21c527a | 2015-11-05 18:47:20 -0800 | [diff] [blame] | 1973 | if (is_via_compact_memory(order)) |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1974 | return COMPACT_CONTINUE; |
| 1975 | |
Mel Gorman | a921444 | 2018-12-28 00:35:44 -0800 | [diff] [blame] | 1976 | watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1977 | /* |
| 1978 | * If watermarks for high-order allocation are already met, there |
| 1979 | * should be no need for compaction at all. |
| 1980 | */ |
| 1981 | if (zone_watermark_ok(zone, order, watermark, classzone_idx, |
| 1982 | alloc_flags)) |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 1983 | return COMPACT_SUCCESS; |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1984 | |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1985 | /* |
Vlastimil Babka | 9861a62 | 2016-10-07 16:57:53 -0700 | [diff] [blame] | 1986 | * Watermarks for order-0 must be met for compaction to be able to |
Vlastimil Babka | 984fdba | 2016-10-07 16:57:57 -0700 | [diff] [blame] | 1987 | * isolate free pages for migration targets. This means that the |
| 1988 | * watermark and alloc_flags have to match, or be more pessimistic than |
| 1989 | * the check in __isolate_free_page(). We don't use the direct |
| 1990 | * compactor's alloc_flags, as they are not relevant for freepage |
| 1991 | * isolation. We however do use the direct compactor's classzone_idx to |
| 1992 | * skip over zones where lowmem reserves would prevent allocation even |
| 1993 | * if compaction succeeds. |
Vlastimil Babka | 8348faf | 2016-10-07 16:58:00 -0700 | [diff] [blame] | 1994 | * For costly orders, we require low watermark instead of min for |
| 1995 | * compaction to proceed to increase its chances. |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 1996 | * ALLOC_CMA is used, as pages in CMA pageblocks are considered |
| 1997 | * suitable migration targets |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1998 | */ |
Vlastimil Babka | 8348faf | 2016-10-07 16:58:00 -0700 | [diff] [blame] | 1999 | watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? |
| 2000 | low_wmark_pages(zone) : min_wmark_pages(zone); |
| 2001 | watermark += compact_gap(order); |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 2002 | if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, |
Joonsoo Kim | d883c6c | 2018-05-23 10:18:21 +0900 | [diff] [blame] | 2003 | ALLOC_CMA, wmark_target)) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 2004 | return COMPACT_SKIPPED; |
| 2005 | |
Vlastimil Babka | cc5c9f0 | 2016-10-07 17:00:43 -0700 | [diff] [blame] | 2006 | return COMPACT_CONTINUE; |
| 2007 | } |
| 2008 | |
| 2009 | enum compact_result compaction_suitable(struct zone *zone, int order, |
| 2010 | unsigned int alloc_flags, |
| 2011 | int classzone_idx) |
| 2012 | { |
| 2013 | enum compact_result ret; |
| 2014 | int fragindex; |
| 2015 | |
| 2016 | ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, |
| 2017 | zone_page_state(zone, NR_FREE_PAGES)); |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 2018 | /* |
| 2019 | * fragmentation index determines if allocation failures are due to |
| 2020 | * low memory or external fragmentation |
| 2021 | * |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 2022 | * index of -1000 would imply allocations might succeed depending on |
| 2023 | * watermarks, but we already failed the high-order watermark check |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 2024 | * index towards 0 implies failure is due to lack of memory |
| 2025 | * index towards 1000 implies failure is due to fragmentation |
| 2026 | * |
Vlastimil Babka | 2031142 | 2016-10-07 17:00:46 -0700 | [diff] [blame] | 2027 | * Only compact if a failure would be due to fragmentation. Also |
| 2028 | * ignore fragindex for non-costly orders where the alternative to |
| 2029 | * a successful reclaim/compaction is OOM. Fragindex and the |
| 2030 | * vm.extfrag_threshold sysctl is meant as a heuristic to prevent |
| 2031 | * excessive compaction for costly orders, but it should not be at the |
| 2032 | * expense of system stability. |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 2033 | */ |
Vlastimil Babka | 2031142 | 2016-10-07 17:00:46 -0700 | [diff] [blame] | 2034 | if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { |
Vlastimil Babka | cc5c9f0 | 2016-10-07 17:00:43 -0700 | [diff] [blame] | 2035 | fragindex = fragmentation_index(zone, order); |
| 2036 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) |
| 2037 | ret = COMPACT_NOT_SUITABLE_ZONE; |
| 2038 | } |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 2039 | |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 2040 | trace_mm_compaction_suitable(zone, order, ret); |
| 2041 | if (ret == COMPACT_NOT_SUITABLE_ZONE) |
| 2042 | ret = COMPACT_SKIPPED; |
| 2043 | |
| 2044 | return ret; |
| 2045 | } |
| 2046 | |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 2047 | bool compaction_zonelist_suitable(struct alloc_context *ac, int order, |
| 2048 | int alloc_flags) |
| 2049 | { |
| 2050 | struct zone *zone; |
| 2051 | struct zoneref *z; |
| 2052 | |
| 2053 | /* |
| 2054 | * Make sure at least one zone would pass __compaction_suitable if we continue |
| 2055 | * retrying the reclaim. |
| 2056 | */ |
| 2057 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
| 2058 | ac->nodemask) { |
| 2059 | unsigned long available; |
| 2060 | enum compact_result compact_result; |
| 2061 | |
| 2062 | /* |
| 2063 | * Do not consider all the reclaimable memory because we do not |
| 2064 | * want to trash just for a single high order allocation which |
| 2065 | * is even not guaranteed to appear even if __compaction_suitable |
| 2066 | * is happy about the watermark check. |
| 2067 | */ |
Mel Gorman | 5a1c84b | 2016-07-28 15:47:31 -0700 | [diff] [blame] | 2068 | available = zone_reclaimable_pages(zone) / order; |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 2069 | available += zone_page_state_snapshot(zone, NR_FREE_PAGES); |
| 2070 | compact_result = __compaction_suitable(zone, order, alloc_flags, |
| 2071 | ac_classzone_idx(ac), available); |
Vlastimil Babka | cc5c9f0 | 2016-10-07 17:00:43 -0700 | [diff] [blame] | 2072 | if (compact_result != COMPACT_SKIPPED) |
Michal Hocko | 86a294a | 2016-05-20 16:57:12 -0700 | [diff] [blame] | 2073 | return true; |
| 2074 | } |
| 2075 | |
| 2076 | return false; |
| 2077 | } |
| 2078 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2079 | static enum compact_result |
| 2080 | compact_zone(struct compact_control *cc, struct capture_control *capc) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2081 | { |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 2082 | enum compact_result ret; |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2083 | unsigned long start_pfn = cc->zone->zone_start_pfn; |
| 2084 | unsigned long end_pfn = zone_end_pfn(cc->zone); |
Mel Gorman | 566e54e | 2019-03-05 15:44:32 -0800 | [diff] [blame] | 2085 | unsigned long last_migrated_pfn; |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 2086 | const bool sync = cc->mode != MIGRATE_ASYNC; |
Mel Gorman | 8854c55 | 2019-03-05 15:45:18 -0800 | [diff] [blame] | 2087 | bool update_cached; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2088 | |
Yafang Shao | a94b525 | 2019-09-23 15:36:54 -0700 | [diff] [blame] | 2089 | /* |
| 2090 | * These counters track activities during zone compaction. Initialize |
| 2091 | * them before compacting a new zone. |
| 2092 | */ |
| 2093 | cc->total_migrate_scanned = 0; |
| 2094 | cc->total_free_scanned = 0; |
| 2095 | cc->nr_migratepages = 0; |
| 2096 | cc->nr_freepages = 0; |
| 2097 | INIT_LIST_HEAD(&cc->freepages); |
| 2098 | INIT_LIST_HEAD(&cc->migratepages); |
| 2099 | |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 2100 | cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2101 | ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 2102 | cc->classzone_idx); |
Michal Hocko | c46649d | 2016-05-20 16:56:41 -0700 | [diff] [blame] | 2103 | /* Compaction is likely to fail */ |
Vlastimil Babka | cf37831 | 2016-10-07 16:57:41 -0700 | [diff] [blame] | 2104 | if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 2105 | return ret; |
Michal Hocko | c46649d | 2016-05-20 16:56:41 -0700 | [diff] [blame] | 2106 | |
| 2107 | /* huh, compaction_suitable is returning something unexpected */ |
| 2108 | VM_BUG_ON(ret != COMPACT_CONTINUE); |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 2109 | |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 2110 | /* |
Vlastimil Babka | d3132e4 | 2014-01-21 15:51:08 -0800 | [diff] [blame] | 2111 | * Clear pageblock skip if there were failures recently and compaction |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 2112 | * is about to be retried after being deferred. |
Vlastimil Babka | d3132e4 | 2014-01-21 15:51:08 -0800 | [diff] [blame] | 2113 | */ |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2114 | if (compaction_restarting(cc->zone, cc->order)) |
| 2115 | __reset_isolation_suitable(cc->zone); |
Vlastimil Babka | d3132e4 | 2014-01-21 15:51:08 -0800 | [diff] [blame] | 2116 | |
| 2117 | /* |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 2118 | * Setup to move all movable pages to the end of the zone. Used cached |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 2119 | * information on where the scanners should start (unless we explicitly |
| 2120 | * want to compact the whole zone), but check that it is initialised |
| 2121 | * by ensuring the values are within zone boundaries. |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 2122 | */ |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 2123 | cc->fast_start_pfn = 0; |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 2124 | if (cc->whole_zone) { |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 2125 | cc->migrate_pfn = start_pfn; |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 2126 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); |
| 2127 | } else { |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2128 | cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; |
| 2129 | cc->free_pfn = cc->zone->compact_cached_free_pfn; |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 2130 | if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { |
| 2131 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2132 | cc->zone->compact_cached_free_pfn = cc->free_pfn; |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 2133 | } |
| 2134 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { |
| 2135 | cc->migrate_pfn = start_pfn; |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2136 | cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; |
| 2137 | cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 2138 | } |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 2139 | |
Mel Gorman | e332f74 | 2019-03-05 15:45:38 -0800 | [diff] [blame] | 2140 | if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 2141 | cc->whole_zone = true; |
| 2142 | } |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 2143 | |
Mel Gorman | 566e54e | 2019-03-05 15:44:32 -0800 | [diff] [blame] | 2144 | last_migrated_pfn = 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2145 | |
Mel Gorman | 8854c55 | 2019-03-05 15:45:18 -0800 | [diff] [blame] | 2146 | /* |
| 2147 | * Migrate has separate cached PFNs for ASYNC and SYNC* migration on |
| 2148 | * the basis that some migrations will fail in ASYNC mode. However, |
| 2149 | * if the cached PFNs match and pageblocks are skipped due to having |
| 2150 | * no isolation candidates, then the sync state does not matter. |
| 2151 | * Until a pageblock with isolation candidates is found, keep the |
| 2152 | * cached PFNs in sync to avoid revisiting the same blocks. |
| 2153 | */ |
| 2154 | update_cached = !sync && |
| 2155 | cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; |
| 2156 | |
Joonsoo Kim | 16c4a09 | 2015-02-11 15:27:01 -0800 | [diff] [blame] | 2157 | trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, |
| 2158 | cc->free_pfn, end_pfn, sync); |
Mel Gorman | 0eb927c | 2014-01-21 15:51:05 -0800 | [diff] [blame] | 2159 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2160 | migrate_prep_local(); |
| 2161 | |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2162 | while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { |
Minchan Kim | 9d502c1 | 2011-03-22 16:30:39 -0700 | [diff] [blame] | 2163 | int err; |
Mel Gorman | 566e54e | 2019-03-05 15:44:32 -0800 | [diff] [blame] | 2164 | unsigned long start_pfn = cc->migrate_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2165 | |
Mel Gorman | 804d312 | 2019-03-05 15:45:07 -0800 | [diff] [blame] | 2166 | /* |
| 2167 | * Avoid multiple rescans which can happen if a page cannot be |
| 2168 | * isolated (dirty/writeback in async mode) or if the migrated |
| 2169 | * pages are being allocated before the pageblock is cleared. |
| 2170 | * The first rescan will capture the entire pageblock for |
| 2171 | * migration. If it fails, it'll be marked skip and scanning |
| 2172 | * will proceed as normal. |
| 2173 | */ |
| 2174 | cc->rescan = false; |
| 2175 | if (pageblock_start_pfn(last_migrated_pfn) == |
| 2176 | pageblock_start_pfn(start_pfn)) { |
| 2177 | cc->rescan = true; |
| 2178 | } |
| 2179 | |
Pengfei Li | 32aaf05 | 2019-09-23 15:36:58 -0700 | [diff] [blame] | 2180 | switch (isolate_migratepages(cc)) { |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 2181 | case ISOLATE_ABORT: |
Vlastimil Babka | 2d1e104 | 2015-11-05 18:48:02 -0800 | [diff] [blame] | 2182 | ret = COMPACT_CONTENDED; |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 2183 | putback_movable_pages(&cc->migratepages); |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 2184 | cc->nr_migratepages = 0; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 2185 | goto out; |
| 2186 | case ISOLATE_NONE: |
Mel Gorman | 8854c55 | 2019-03-05 15:45:18 -0800 | [diff] [blame] | 2187 | if (update_cached) { |
| 2188 | cc->zone->compact_cached_migrate_pfn[1] = |
| 2189 | cc->zone->compact_cached_migrate_pfn[0]; |
| 2190 | } |
| 2191 | |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 2192 | /* |
| 2193 | * We haven't isolated and migrated anything, but |
| 2194 | * there might still be unflushed migrations from |
| 2195 | * previous cc->order aligned block. |
| 2196 | */ |
| 2197 | goto check_drain; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 2198 | case ISOLATE_SUCCESS: |
Mel Gorman | 8854c55 | 2019-03-05 15:45:18 -0800 | [diff] [blame] | 2199 | update_cached = false; |
Mel Gorman | 566e54e | 2019-03-05 15:44:32 -0800 | [diff] [blame] | 2200 | last_migrated_pfn = start_pfn; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 2201 | ; |
| 2202 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2203 | |
David Rientjes | d53aea3 | 2014-06-04 16:08:26 -0700 | [diff] [blame] | 2204 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 2205 | compaction_free, (unsigned long)cc, cc->mode, |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 2206 | MR_COMPACTION); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2207 | |
Vlastimil Babka | f8c9301 | 2014-06-04 16:08:32 -0700 | [diff] [blame] | 2208 | trace_mm_compaction_migratepages(cc->nr_migratepages, err, |
| 2209 | &cc->migratepages); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2210 | |
Vlastimil Babka | f8c9301 | 2014-06-04 16:08:32 -0700 | [diff] [blame] | 2211 | /* All pages were either migrated or will be released */ |
| 2212 | cc->nr_migratepages = 0; |
Minchan Kim | 9d502c1 | 2011-03-22 16:30:39 -0700 | [diff] [blame] | 2213 | if (err) { |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 2214 | putback_movable_pages(&cc->migratepages); |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 2215 | /* |
| 2216 | * migrate_pages() may return -ENOMEM when scanners meet |
| 2217 | * and we want compact_finished() to detect it |
| 2218 | */ |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 2219 | if (err == -ENOMEM && !compact_scanners_met(cc)) { |
Vlastimil Babka | 2d1e104 | 2015-11-05 18:48:02 -0800 | [diff] [blame] | 2220 | ret = COMPACT_CONTENDED; |
David Rientjes | 4bf2bba | 2012-07-11 14:02:13 -0700 | [diff] [blame] | 2221 | goto out; |
| 2222 | } |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 2223 | /* |
| 2224 | * We failed to migrate at least one page in the current |
| 2225 | * order-aligned block, so skip the rest of it. |
| 2226 | */ |
| 2227 | if (cc->direct_compaction && |
| 2228 | (cc->mode == MIGRATE_ASYNC)) { |
| 2229 | cc->migrate_pfn = block_end_pfn( |
| 2230 | cc->migrate_pfn - 1, cc->order); |
| 2231 | /* Draining pcplists is useless in this case */ |
Mel Gorman | 566e54e | 2019-03-05 15:44:32 -0800 | [diff] [blame] | 2232 | last_migrated_pfn = 0; |
Vlastimil Babka | fdd048e | 2016-05-19 17:11:55 -0700 | [diff] [blame] | 2233 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2234 | } |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 2235 | |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 2236 | check_drain: |
| 2237 | /* |
| 2238 | * Has the migration scanner moved away from the previous |
| 2239 | * cc->order aligned block where we migrated from? If yes, |
| 2240 | * flush the pages that were freed, so that they can merge and |
| 2241 | * compact_finished() can detect immediately if allocation |
| 2242 | * would succeed. |
| 2243 | */ |
Mel Gorman | 566e54e | 2019-03-05 15:44:32 -0800 | [diff] [blame] | 2244 | if (cc->order > 0 && last_migrated_pfn) { |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 2245 | int cpu; |
| 2246 | unsigned long current_block_start = |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 2247 | block_start_pfn(cc->migrate_pfn, cc->order); |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 2248 | |
Mel Gorman | 566e54e | 2019-03-05 15:44:32 -0800 | [diff] [blame] | 2249 | if (last_migrated_pfn < current_block_start) { |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 2250 | cpu = get_cpu(); |
| 2251 | lru_add_drain_cpu(cpu); |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2252 | drain_local_pages(cc->zone); |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 2253 | put_cpu(); |
| 2254 | /* No more flushing until we migrate again */ |
Mel Gorman | 566e54e | 2019-03-05 15:44:32 -0800 | [diff] [blame] | 2255 | last_migrated_pfn = 0; |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 2256 | } |
| 2257 | } |
| 2258 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2259 | /* Stop if a page has been captured */ |
| 2260 | if (capc && capc->page) { |
| 2261 | ret = COMPACT_SUCCESS; |
| 2262 | break; |
| 2263 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2264 | } |
| 2265 | |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 2266 | out: |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 2267 | /* |
| 2268 | * Release free pages and update where the free scanner should restart, |
| 2269 | * so we don't leave any returned pages behind in the next attempt. |
| 2270 | */ |
| 2271 | if (cc->nr_freepages > 0) { |
| 2272 | unsigned long free_pfn = release_freepages(&cc->freepages); |
| 2273 | |
| 2274 | cc->nr_freepages = 0; |
| 2275 | VM_BUG_ON(free_pfn == 0); |
| 2276 | /* The cached pfn is always the first in a pageblock */ |
Vlastimil Babka | 06b6640 | 2016-05-19 17:11:48 -0700 | [diff] [blame] | 2277 | free_pfn = pageblock_start_pfn(free_pfn); |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 2278 | /* |
| 2279 | * Only go back, not forward. The cached pfn might have been |
| 2280 | * already reset to zone end in compact_finished() |
| 2281 | */ |
Mel Gorman | 40cacbc | 2019-03-05 15:44:36 -0800 | [diff] [blame] | 2282 | if (free_pfn > cc->zone->compact_cached_free_pfn) |
| 2283 | cc->zone->compact_cached_free_pfn = free_pfn; |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 2284 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2285 | |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 2286 | count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); |
| 2287 | count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); |
| 2288 | |
Joonsoo Kim | 16c4a09 | 2015-02-11 15:27:01 -0800 | [diff] [blame] | 2289 | trace_mm_compaction_end(start_pfn, cc->migrate_pfn, |
| 2290 | cc->free_pfn, end_pfn, sync, ret); |
Mel Gorman | 0eb927c | 2014-01-21 15:51:05 -0800 | [diff] [blame] | 2291 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 2292 | return ret; |
| 2293 | } |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 2294 | |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 2295 | static enum compact_result compact_zone_order(struct zone *zone, int order, |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 2296 | gfp_t gfp_mask, enum compact_priority prio, |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2297 | unsigned int alloc_flags, int classzone_idx, |
| 2298 | struct page **capture) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2299 | { |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 2300 | enum compact_result ret; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2301 | struct compact_control cc = { |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2302 | .order = order, |
Mel Gorman | dbe2d4e | 2019-03-05 15:45:31 -0800 | [diff] [blame] | 2303 | .search_order = order, |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 2304 | .gfp_mask = gfp_mask, |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2305 | .zone = zone, |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 2306 | .mode = (prio == COMPACT_PRIO_ASYNC) ? |
| 2307 | MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 2308 | .alloc_flags = alloc_flags, |
| 2309 | .classzone_idx = classzone_idx, |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 2310 | .direct_compaction = true, |
Vlastimil Babka | a8e025e | 2016-10-07 16:57:47 -0700 | [diff] [blame] | 2311 | .whole_zone = (prio == MIN_COMPACT_PRIORITY), |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 2312 | .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), |
| 2313 | .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2314 | }; |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2315 | struct capture_control capc = { |
| 2316 | .cc = &cc, |
| 2317 | .page = NULL, |
| 2318 | }; |
| 2319 | |
Vlastimil Babka | 6467552 | 2020-04-01 21:10:35 -0700 | [diff] [blame] | 2320 | current->capture_control = &capc; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2321 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2322 | ret = compact_zone(&cc, &capc); |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 2323 | |
| 2324 | VM_BUG_ON(!list_empty(&cc.freepages)); |
| 2325 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
| 2326 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2327 | *capture = capc.page; |
| 2328 | current->capture_control = NULL; |
| 2329 | |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 2330 | return ret; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2331 | } |
| 2332 | |
Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 2333 | int sysctl_extfrag_threshold = 500; |
| 2334 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2335 | /** |
| 2336 | * try_to_compact_pages - Direct compact to satisfy a high-order allocation |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2337 | * @gfp_mask: The GFP mask of the current allocation |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 2338 | * @order: The order of the current allocation |
| 2339 | * @alloc_flags: The allocation flags of the current allocation |
| 2340 | * @ac: The context of current allocation |
Yang Shi | 112d2d2 | 2018-01-31 16:20:23 -0800 | [diff] [blame] | 2341 | * @prio: Determines how hard direct compaction should try to succeed |
Vlastimil Babka | 6467552 | 2020-04-01 21:10:35 -0700 | [diff] [blame] | 2342 | * @capture: Pointer to free page created by compaction will be stored here |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2343 | * |
| 2344 | * This is the main entry point for direct page compaction. |
| 2345 | */ |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 2346 | enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, |
Mel Gorman | c603844 | 2016-05-19 17:13:38 -0700 | [diff] [blame] | 2347 | unsigned int alloc_flags, const struct alloc_context *ac, |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2348 | enum compact_priority prio, struct page **capture) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2349 | { |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2350 | int may_perform_io = gfp_mask & __GFP_IO; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2351 | struct zoneref *z; |
| 2352 | struct zone *zone; |
Michal Hocko | 1d4746d | 2016-05-20 16:56:44 -0700 | [diff] [blame] | 2353 | enum compact_result rc = COMPACT_SKIPPED; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2354 | |
Michal Hocko | 73e64c5 | 2016-12-14 15:04:07 -0800 | [diff] [blame] | 2355 | /* |
| 2356 | * Check if the GFP flags allow compaction - GFP_NOIO is really |
| 2357 | * tricky context because the migration might require IO |
| 2358 | */ |
| 2359 | if (!may_perform_io) |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 2360 | return COMPACT_SKIPPED; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2361 | |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 2362 | trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 2363 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2364 | /* Compact each zone in the list */ |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 2365 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
| 2366 | ac->nodemask) { |
Michal Hocko | ea7ab98 | 2016-05-20 16:56:38 -0700 | [diff] [blame] | 2367 | enum compact_result status; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2368 | |
Vlastimil Babka | a8e025e | 2016-10-07 16:57:47 -0700 | [diff] [blame] | 2369 | if (prio > MIN_COMPACT_PRIORITY |
| 2370 | && compaction_deferred(zone, order)) { |
Michal Hocko | 1d4746d | 2016-05-20 16:56:44 -0700 | [diff] [blame] | 2371 | rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 2372 | continue; |
Michal Hocko | 1d4746d | 2016-05-20 16:56:44 -0700 | [diff] [blame] | 2373 | } |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 2374 | |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 2375 | status = compact_zone_order(zone, order, gfp_mask, prio, |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2376 | alloc_flags, ac_classzone_idx(ac), capture); |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2377 | rc = max(status, rc); |
| 2378 | |
Vlastimil Babka | 7ceb009 | 2016-10-07 16:57:44 -0700 | [diff] [blame] | 2379 | /* The allocation should succeed, stop compacting */ |
| 2380 | if (status == COMPACT_SUCCESS) { |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 2381 | /* |
| 2382 | * We think the allocation will succeed in this zone, |
| 2383 | * but it is not certain, hence the false. The caller |
| 2384 | * will repeat this with true if allocation indeed |
| 2385 | * succeeds in this zone. |
| 2386 | */ |
| 2387 | compaction_defer_reset(zone, order, false); |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 2388 | |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 2389 | break; |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 2390 | } |
| 2391 | |
Vlastimil Babka | a5508cd | 2016-07-28 15:49:28 -0700 | [diff] [blame] | 2392 | if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 2393 | status == COMPACT_PARTIAL_SKIPPED)) |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 2394 | /* |
| 2395 | * We think that allocation won't succeed in this zone |
| 2396 | * so we defer compaction there. If it ends up |
| 2397 | * succeeding after all, it will be reset. |
| 2398 | */ |
| 2399 | defer_compaction(zone, order); |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 2400 | |
| 2401 | /* |
| 2402 | * We might have stopped compacting due to need_resched() in |
| 2403 | * async compaction, or due to a fatal signal detected. In that |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 2404 | * case do not try further zones |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 2405 | */ |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 2406 | if ((prio == COMPACT_PRIO_ASYNC && need_resched()) |
| 2407 | || fatal_signal_pending(current)) |
| 2408 | break; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 2409 | } |
| 2410 | |
| 2411 | return rc; |
| 2412 | } |
| 2413 | |
| 2414 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 2415 | /* Compact all zones within a node */ |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 2416 | static void compact_node(int nid) |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 2417 | { |
Vlastimil Babka | 791cae9 | 2016-10-07 16:57:38 -0700 | [diff] [blame] | 2418 | pg_data_t *pgdat = NODE_DATA(nid); |
| 2419 | int zoneid; |
| 2420 | struct zone *zone; |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 2421 | struct compact_control cc = { |
| 2422 | .order = -1, |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 2423 | .mode = MIGRATE_SYNC, |
David Rientjes | 91ca918 | 2014-04-03 14:47:23 -0700 | [diff] [blame] | 2424 | .ignore_skip_hint = true, |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 2425 | .whole_zone = true, |
Michal Hocko | 73e64c5 | 2016-12-14 15:04:07 -0800 | [diff] [blame] | 2426 | .gfp_mask = GFP_KERNEL, |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 2427 | }; |
| 2428 | |
Vlastimil Babka | 791cae9 | 2016-10-07 16:57:38 -0700 | [diff] [blame] | 2429 | |
| 2430 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
| 2431 | |
| 2432 | zone = &pgdat->node_zones[zoneid]; |
| 2433 | if (!populated_zone(zone)) |
| 2434 | continue; |
| 2435 | |
Vlastimil Babka | 791cae9 | 2016-10-07 16:57:38 -0700 | [diff] [blame] | 2436 | cc.zone = zone; |
Vlastimil Babka | 791cae9 | 2016-10-07 16:57:38 -0700 | [diff] [blame] | 2437 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2438 | compact_zone(&cc, NULL); |
Vlastimil Babka | 791cae9 | 2016-10-07 16:57:38 -0700 | [diff] [blame] | 2439 | |
| 2440 | VM_BUG_ON(!list_empty(&cc.freepages)); |
| 2441 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
| 2442 | } |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 2443 | } |
| 2444 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 2445 | /* Compact all nodes in the system */ |
Jason Liu | 7964c06 | 2013-01-11 14:31:47 -0800 | [diff] [blame] | 2446 | static void compact_nodes(void) |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 2447 | { |
| 2448 | int nid; |
| 2449 | |
Hugh Dickins | 8575ec2 | 2012-03-21 16:33:53 -0700 | [diff] [blame] | 2450 | /* Flush pending updates to the LRU lists */ |
| 2451 | lru_add_drain_all(); |
| 2452 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 2453 | for_each_online_node(nid) |
| 2454 | compact_node(nid); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 2455 | } |
| 2456 | |
| 2457 | /* The written value is actually unused, all memory is compacted */ |
| 2458 | int sysctl_compact_memory; |
| 2459 | |
Yaowei Bai | fec4eb2 | 2016-01-14 15:20:09 -0800 | [diff] [blame] | 2460 | /* |
| 2461 | * This is the entry point for compacting all nodes via |
| 2462 | * /proc/sys/vm/compact_memory |
| 2463 | */ |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 2464 | int sysctl_compaction_handler(struct ctl_table *table, int write, |
| 2465 | void __user *buffer, size_t *length, loff_t *ppos) |
| 2466 | { |
| 2467 | if (write) |
Jason Liu | 7964c06 | 2013-01-11 14:31:47 -0800 | [diff] [blame] | 2468 | compact_nodes(); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 2469 | |
| 2470 | return 0; |
| 2471 | } |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 2472 | |
| 2473 | #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
Rashika Kheria | 74e77fb | 2014-04-03 14:48:01 -0700 | [diff] [blame] | 2474 | static ssize_t sysfs_compact_node(struct device *dev, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 2475 | struct device_attribute *attr, |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 2476 | const char *buf, size_t count) |
| 2477 | { |
Hugh Dickins | 8575ec2 | 2012-03-21 16:33:53 -0700 | [diff] [blame] | 2478 | int nid = dev->id; |
| 2479 | |
| 2480 | if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { |
| 2481 | /* Flush pending updates to the LRU lists */ |
| 2482 | lru_add_drain_all(); |
| 2483 | |
| 2484 | compact_node(nid); |
| 2485 | } |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 2486 | |
| 2487 | return count; |
| 2488 | } |
Joe Perches | 0825a6f | 2018-06-14 15:27:58 -0700 | [diff] [blame] | 2489 | static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 2490 | |
| 2491 | int compaction_register_node(struct node *node) |
| 2492 | { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 2493 | return device_create_file(&node->dev, &dev_attr_compact); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 2494 | } |
| 2495 | |
| 2496 | void compaction_unregister_node(struct node *node) |
| 2497 | { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 2498 | return device_remove_file(&node->dev, &dev_attr_compact); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 2499 | } |
| 2500 | #endif /* CONFIG_SYSFS && CONFIG_NUMA */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2501 | |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2502 | static inline bool kcompactd_work_requested(pg_data_t *pgdat) |
| 2503 | { |
Vlastimil Babka | 172400c | 2016-05-05 16:22:32 -0700 | [diff] [blame] | 2504 | return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2505 | } |
| 2506 | |
| 2507 | static bool kcompactd_node_suitable(pg_data_t *pgdat) |
| 2508 | { |
| 2509 | int zoneid; |
| 2510 | struct zone *zone; |
| 2511 | enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; |
| 2512 | |
Chen Feng | 6cd9dc3 | 2016-05-20 16:59:02 -0700 | [diff] [blame] | 2513 | for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2514 | zone = &pgdat->node_zones[zoneid]; |
| 2515 | |
| 2516 | if (!populated_zone(zone)) |
| 2517 | continue; |
| 2518 | |
| 2519 | if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, |
| 2520 | classzone_idx) == COMPACT_CONTINUE) |
| 2521 | return true; |
| 2522 | } |
| 2523 | |
| 2524 | return false; |
| 2525 | } |
| 2526 | |
| 2527 | static void kcompactd_do_work(pg_data_t *pgdat) |
| 2528 | { |
| 2529 | /* |
| 2530 | * With no special task, compact all zones so that a page of requested |
| 2531 | * order is allocatable. |
| 2532 | */ |
| 2533 | int zoneid; |
| 2534 | struct zone *zone; |
| 2535 | struct compact_control cc = { |
| 2536 | .order = pgdat->kcompactd_max_order, |
Mel Gorman | dbe2d4e | 2019-03-05 15:45:31 -0800 | [diff] [blame] | 2537 | .search_order = pgdat->kcompactd_max_order, |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2538 | .classzone_idx = pgdat->kcompactd_classzone_idx, |
| 2539 | .mode = MIGRATE_SYNC_LIGHT, |
David Rientjes | a0647dc | 2017-11-17 15:26:27 -0800 | [diff] [blame] | 2540 | .ignore_skip_hint = false, |
Michal Hocko | 73e64c5 | 2016-12-14 15:04:07 -0800 | [diff] [blame] | 2541 | .gfp_mask = GFP_KERNEL, |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2542 | }; |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2543 | trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, |
| 2544 | cc.classzone_idx); |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 2545 | count_compact_event(KCOMPACTD_WAKE); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2546 | |
Chen Feng | 6cd9dc3 | 2016-05-20 16:59:02 -0700 | [diff] [blame] | 2547 | for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2548 | int status; |
| 2549 | |
| 2550 | zone = &pgdat->node_zones[zoneid]; |
| 2551 | if (!populated_zone(zone)) |
| 2552 | continue; |
| 2553 | |
| 2554 | if (compaction_deferred(zone, cc.order)) |
| 2555 | continue; |
| 2556 | |
| 2557 | if (compaction_suitable(zone, cc.order, 0, zoneid) != |
| 2558 | COMPACT_CONTINUE) |
| 2559 | continue; |
| 2560 | |
Vlastimil Babka | 172400c | 2016-05-05 16:22:32 -0700 | [diff] [blame] | 2561 | if (kthread_should_stop()) |
| 2562 | return; |
Yafang Shao | a94b525 | 2019-09-23 15:36:54 -0700 | [diff] [blame] | 2563 | |
| 2564 | cc.zone = zone; |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 2565 | status = compact_zone(&cc, NULL); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2566 | |
Vlastimil Babka | 7ceb009 | 2016-10-07 16:57:44 -0700 | [diff] [blame] | 2567 | if (status == COMPACT_SUCCESS) { |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2568 | compaction_defer_reset(zone, cc.order, false); |
Michal Hocko | c8f7de0 | 2016-05-20 16:56:47 -0700 | [diff] [blame] | 2569 | } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2570 | /* |
David Rientjes | bc3106b | 2018-04-05 16:24:02 -0700 | [diff] [blame] | 2571 | * Buddy pages may become stranded on pcps that could |
| 2572 | * otherwise coalesce on the zone's free area for |
| 2573 | * order >= cc.order. This is ratelimited by the |
| 2574 | * upcoming deferral. |
| 2575 | */ |
| 2576 | drain_all_pages(zone); |
| 2577 | |
| 2578 | /* |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2579 | * We use sync migration mode here, so we defer like |
| 2580 | * sync direct compaction does. |
| 2581 | */ |
| 2582 | defer_compaction(zone, cc.order); |
| 2583 | } |
| 2584 | |
David Rientjes | 7f354a5 | 2017-02-22 15:44:50 -0800 | [diff] [blame] | 2585 | count_compact_events(KCOMPACTD_MIGRATE_SCANNED, |
| 2586 | cc.total_migrate_scanned); |
| 2587 | count_compact_events(KCOMPACTD_FREE_SCANNED, |
| 2588 | cc.total_free_scanned); |
| 2589 | |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2590 | VM_BUG_ON(!list_empty(&cc.freepages)); |
| 2591 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
| 2592 | } |
| 2593 | |
| 2594 | /* |
| 2595 | * Regardless of success, we are done until woken up next. But remember |
| 2596 | * the requested order/classzone_idx in case it was higher/tighter than |
| 2597 | * our current ones |
| 2598 | */ |
| 2599 | if (pgdat->kcompactd_max_order <= cc.order) |
| 2600 | pgdat->kcompactd_max_order = 0; |
| 2601 | if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) |
| 2602 | pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; |
| 2603 | } |
| 2604 | |
| 2605 | void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) |
| 2606 | { |
| 2607 | if (!order) |
| 2608 | return; |
| 2609 | |
| 2610 | if (pgdat->kcompactd_max_order < order) |
| 2611 | pgdat->kcompactd_max_order = order; |
| 2612 | |
| 2613 | if (pgdat->kcompactd_classzone_idx > classzone_idx) |
| 2614 | pgdat->kcompactd_classzone_idx = classzone_idx; |
| 2615 | |
Davidlohr Bueso | 6818600 | 2017-10-03 16:15:03 -0700 | [diff] [blame] | 2616 | /* |
| 2617 | * Pairs with implicit barrier in wait_event_freezable() |
| 2618 | * such that wakeups are not missed. |
| 2619 | */ |
| 2620 | if (!wq_has_sleeper(&pgdat->kcompactd_wait)) |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2621 | return; |
| 2622 | |
| 2623 | if (!kcompactd_node_suitable(pgdat)) |
| 2624 | return; |
| 2625 | |
| 2626 | trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, |
| 2627 | classzone_idx); |
| 2628 | wake_up_interruptible(&pgdat->kcompactd_wait); |
| 2629 | } |
| 2630 | |
| 2631 | /* |
| 2632 | * The background compaction daemon, started as a kernel thread |
| 2633 | * from the init process. |
| 2634 | */ |
| 2635 | static int kcompactd(void *p) |
| 2636 | { |
| 2637 | pg_data_t *pgdat = (pg_data_t*)p; |
| 2638 | struct task_struct *tsk = current; |
| 2639 | |
| 2640 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); |
| 2641 | |
| 2642 | if (!cpumask_empty(cpumask)) |
| 2643 | set_cpus_allowed_ptr(tsk, cpumask); |
| 2644 | |
| 2645 | set_freezable(); |
| 2646 | |
| 2647 | pgdat->kcompactd_max_order = 0; |
| 2648 | pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; |
| 2649 | |
| 2650 | while (!kthread_should_stop()) { |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 2651 | unsigned long pflags; |
| 2652 | |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2653 | trace_mm_compaction_kcompactd_sleep(pgdat->node_id); |
| 2654 | wait_event_freezable(pgdat->kcompactd_wait, |
| 2655 | kcompactd_work_requested(pgdat)); |
| 2656 | |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 2657 | psi_memstall_enter(&pflags); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2658 | kcompactd_do_work(pgdat); |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 2659 | psi_memstall_leave(&pflags); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2660 | } |
| 2661 | |
| 2662 | return 0; |
| 2663 | } |
| 2664 | |
| 2665 | /* |
| 2666 | * This kcompactd start function will be called by init and node-hot-add. |
| 2667 | * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. |
| 2668 | */ |
| 2669 | int kcompactd_run(int nid) |
| 2670 | { |
| 2671 | pg_data_t *pgdat = NODE_DATA(nid); |
| 2672 | int ret = 0; |
| 2673 | |
| 2674 | if (pgdat->kcompactd) |
| 2675 | return 0; |
| 2676 | |
| 2677 | pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); |
| 2678 | if (IS_ERR(pgdat->kcompactd)) { |
| 2679 | pr_err("Failed to start kcompactd on node %d\n", nid); |
| 2680 | ret = PTR_ERR(pgdat->kcompactd); |
| 2681 | pgdat->kcompactd = NULL; |
| 2682 | } |
| 2683 | return ret; |
| 2684 | } |
| 2685 | |
| 2686 | /* |
| 2687 | * Called by memory hotplug when all memory in a node is offlined. Caller must |
| 2688 | * hold mem_hotplug_begin/end(). |
| 2689 | */ |
| 2690 | void kcompactd_stop(int nid) |
| 2691 | { |
| 2692 | struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; |
| 2693 | |
| 2694 | if (kcompactd) { |
| 2695 | kthread_stop(kcompactd); |
| 2696 | NODE_DATA(nid)->kcompactd = NULL; |
| 2697 | } |
| 2698 | } |
| 2699 | |
| 2700 | /* |
| 2701 | * It's optimal to keep kcompactd on the same CPUs as their memory, but |
| 2702 | * not required for correctness. So if the last cpu in a node goes |
| 2703 | * away, we get changed to run anywhere: as the first one comes back, |
| 2704 | * restore their cpu bindings. |
| 2705 | */ |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2706 | static int kcompactd_cpu_online(unsigned int cpu) |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2707 | { |
| 2708 | int nid; |
| 2709 | |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2710 | for_each_node_state(nid, N_MEMORY) { |
| 2711 | pg_data_t *pgdat = NODE_DATA(nid); |
| 2712 | const struct cpumask *mask; |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2713 | |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2714 | mask = cpumask_of_node(pgdat->node_id); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2715 | |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2716 | if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) |
| 2717 | /* One of our CPUs online: restore mask */ |
| 2718 | set_cpus_allowed_ptr(pgdat->kcompactd, mask); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2719 | } |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2720 | return 0; |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2721 | } |
| 2722 | |
| 2723 | static int __init kcompactd_init(void) |
| 2724 | { |
| 2725 | int nid; |
Anna-Maria Gleixner | e46b1db | 2016-11-27 00:13:42 +0100 | [diff] [blame] | 2726 | int ret; |
| 2727 | |
| 2728 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
| 2729 | "mm/compaction:online", |
| 2730 | kcompactd_cpu_online, NULL); |
| 2731 | if (ret < 0) { |
| 2732 | pr_err("kcompactd: failed to register hotplug callbacks.\n"); |
| 2733 | return ret; |
| 2734 | } |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2735 | |
| 2736 | for_each_node_state(nid, N_MEMORY) |
| 2737 | kcompactd_run(nid); |
Vlastimil Babka | 698b1b3 | 2016-03-17 14:18:08 -0700 | [diff] [blame] | 2738 | return 0; |
| 2739 | } |
| 2740 | subsys_initcall(kcompactd_init) |
| 2741 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 2742 | #endif /* CONFIG_COMPACTION */ |