Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/compaction.c |
| 3 | * |
| 4 | * Memory compaction for the reduction of external fragmentation. Note that |
| 5 | * this heavily depends upon page migration to do all the real heavy |
| 6 | * lifting |
| 7 | * |
| 8 | * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> |
| 9 | */ |
| 10 | #include <linux/swap.h> |
| 11 | #include <linux/migrate.h> |
| 12 | #include <linux/compaction.h> |
| 13 | #include <linux/mm_inline.h> |
| 14 | #include <linux/backing-dev.h> |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 15 | #include <linux/sysctl.h> |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 16 | #include <linux/sysfs.h> |
Rafael Aquini | bf6bddf1 | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 17 | #include <linux/balloon_compaction.h> |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 18 | #include <linux/page-isolation.h> |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame] | 19 | #include <linux/kasan.h> |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 20 | #include "internal.h" |
| 21 | |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 22 | #ifdef CONFIG_COMPACTION |
| 23 | static inline void count_compact_event(enum vm_event_item item) |
| 24 | { |
| 25 | count_vm_event(item); |
| 26 | } |
| 27 | |
| 28 | static inline void count_compact_events(enum vm_event_item item, long delta) |
| 29 | { |
| 30 | count_vm_events(item, delta); |
| 31 | } |
| 32 | #else |
| 33 | #define count_compact_event(item) do { } while (0) |
| 34 | #define count_compact_events(item, delta) do { } while (0) |
| 35 | #endif |
| 36 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 37 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
Joonsoo Kim | 16c4a09 | 2015-02-11 15:27:01 -0800 | [diff] [blame] | 38 | #ifdef CONFIG_TRACEPOINTS |
| 39 | static const char *const compaction_status_string[] = { |
| 40 | "deferred", |
| 41 | "skipped", |
| 42 | "continue", |
| 43 | "partial", |
| 44 | "complete", |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 45 | "no_suitable_page", |
| 46 | "not_suitable_zone", |
Joonsoo Kim | 16c4a09 | 2015-02-11 15:27:01 -0800 | [diff] [blame] | 47 | }; |
| 48 | #endif |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 49 | |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 50 | #define CREATE_TRACE_POINTS |
| 51 | #include <trace/events/compaction.h> |
| 52 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 53 | static unsigned long release_freepages(struct list_head *freelist) |
| 54 | { |
| 55 | struct page *page, *next; |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 56 | unsigned long high_pfn = 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 57 | |
| 58 | list_for_each_entry_safe(page, next, freelist, lru) { |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 59 | unsigned long pfn = page_to_pfn(page); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 60 | list_del(&page->lru); |
| 61 | __free_page(page); |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 62 | if (pfn > high_pfn) |
| 63 | high_pfn = pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 64 | } |
| 65 | |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 66 | return high_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 67 | } |
| 68 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 69 | static void map_pages(struct list_head *list) |
| 70 | { |
| 71 | struct page *page; |
| 72 | |
| 73 | list_for_each_entry(page, list, lru) { |
| 74 | arch_alloc_page(page, 0); |
| 75 | kernel_map_pages(page, 1, 1); |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame] | 76 | kasan_alloc_pages(page, 0); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 77 | } |
| 78 | } |
| 79 | |
Michal Nazarewicz | 47118af | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 80 | static inline bool migrate_async_suitable(int migratetype) |
| 81 | { |
| 82 | return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; |
| 83 | } |
| 84 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 85 | /* |
| 86 | * Check that the whole (or subset of) a pageblock given by the interval of |
| 87 | * [start_pfn, end_pfn) is valid and within the same zone, before scanning it |
| 88 | * with the migration of free compaction scanner. The scanners then need to |
| 89 | * use only pfn_valid_within() check for arches that allow holes within |
| 90 | * pageblocks. |
| 91 | * |
| 92 | * Return struct page pointer of start_pfn, or NULL if checks were not passed. |
| 93 | * |
| 94 | * It's possible on some configurations to have a setup like node0 node1 node0 |
| 95 | * i.e. it's possible that all pages within a zones range of pages do not |
| 96 | * belong to a single zone. We assume that a border between node0 and node1 |
| 97 | * can occur within a single pageblock, but not a node0 node1 node0 |
| 98 | * interleaving within a single pageblock. It is therefore sufficient to check |
| 99 | * the first and last page of a pageblock and avoid checking each individual |
| 100 | * page in a pageblock. |
| 101 | */ |
| 102 | static struct page *pageblock_pfn_to_page(unsigned long start_pfn, |
| 103 | unsigned long end_pfn, struct zone *zone) |
| 104 | { |
| 105 | struct page *start_page; |
| 106 | struct page *end_page; |
| 107 | |
| 108 | /* end_pfn is one past the range we are checking */ |
| 109 | end_pfn--; |
| 110 | |
| 111 | if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) |
| 112 | return NULL; |
| 113 | |
| 114 | start_page = pfn_to_page(start_pfn); |
| 115 | |
| 116 | if (page_zone(start_page) != zone) |
| 117 | return NULL; |
| 118 | |
| 119 | end_page = pfn_to_page(end_pfn); |
| 120 | |
| 121 | /* This gives a shorter code than deriving page_zone(end_page) */ |
| 122 | if (page_zone_id(start_page) != page_zone_id(end_page)) |
| 123 | return NULL; |
| 124 | |
| 125 | return start_page; |
| 126 | } |
| 127 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 128 | #ifdef CONFIG_COMPACTION |
Joonsoo Kim | 24e2716 | 2015-02-11 15:27:09 -0800 | [diff] [blame] | 129 | |
| 130 | /* Do not skip compaction more than 64 times */ |
| 131 | #define COMPACT_MAX_DEFER_SHIFT 6 |
| 132 | |
| 133 | /* |
| 134 | * Compaction is deferred when compaction fails to result in a page |
| 135 | * allocation success. 1 << compact_defer_limit compactions are skipped up |
| 136 | * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT |
| 137 | */ |
| 138 | void defer_compaction(struct zone *zone, int order) |
| 139 | { |
| 140 | zone->compact_considered = 0; |
| 141 | zone->compact_defer_shift++; |
| 142 | |
| 143 | if (order < zone->compact_order_failed) |
| 144 | zone->compact_order_failed = order; |
| 145 | |
| 146 | if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) |
| 147 | zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; |
| 148 | |
| 149 | trace_mm_compaction_defer_compaction(zone, order); |
| 150 | } |
| 151 | |
| 152 | /* Returns true if compaction should be skipped this time */ |
| 153 | bool compaction_deferred(struct zone *zone, int order) |
| 154 | { |
| 155 | unsigned long defer_limit = 1UL << zone->compact_defer_shift; |
| 156 | |
| 157 | if (order < zone->compact_order_failed) |
| 158 | return false; |
| 159 | |
| 160 | /* Avoid possible overflow */ |
| 161 | if (++zone->compact_considered > defer_limit) |
| 162 | zone->compact_considered = defer_limit; |
| 163 | |
| 164 | if (zone->compact_considered >= defer_limit) |
| 165 | return false; |
| 166 | |
| 167 | trace_mm_compaction_deferred(zone, order); |
| 168 | |
| 169 | return true; |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * Update defer tracking counters after successful compaction of given order, |
| 174 | * which means an allocation either succeeded (alloc_success == true) or is |
| 175 | * expected to succeed. |
| 176 | */ |
| 177 | void compaction_defer_reset(struct zone *zone, int order, |
| 178 | bool alloc_success) |
| 179 | { |
| 180 | if (alloc_success) { |
| 181 | zone->compact_considered = 0; |
| 182 | zone->compact_defer_shift = 0; |
| 183 | } |
| 184 | if (order >= zone->compact_order_failed) |
| 185 | zone->compact_order_failed = order + 1; |
| 186 | |
| 187 | trace_mm_compaction_defer_reset(zone, order); |
| 188 | } |
| 189 | |
| 190 | /* Returns true if restarting compaction after many failures */ |
| 191 | bool compaction_restarting(struct zone *zone, int order) |
| 192 | { |
| 193 | if (order < zone->compact_order_failed) |
| 194 | return false; |
| 195 | |
| 196 | return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && |
| 197 | zone->compact_considered >= 1UL << zone->compact_defer_shift; |
| 198 | } |
| 199 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 200 | /* Returns true if the pageblock should be scanned for pages to isolate. */ |
| 201 | static inline bool isolation_suitable(struct compact_control *cc, |
| 202 | struct page *page) |
| 203 | { |
| 204 | if (cc->ignore_skip_hint) |
| 205 | return true; |
| 206 | |
| 207 | return !get_pageblock_skip(page); |
| 208 | } |
| 209 | |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame^] | 210 | static void reset_cached_positions(struct zone *zone) |
| 211 | { |
| 212 | zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; |
| 213 | zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; |
| 214 | zone->compact_cached_free_pfn = zone_end_pfn(zone); |
| 215 | } |
| 216 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 217 | /* |
| 218 | * This function is called to clear all cached information on pageblocks that |
| 219 | * should be skipped for page isolation when the migrate and free page scanner |
| 220 | * meet. |
| 221 | */ |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 222 | static void __reset_isolation_suitable(struct zone *zone) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 223 | { |
| 224 | unsigned long start_pfn = zone->zone_start_pfn; |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 225 | unsigned long end_pfn = zone_end_pfn(zone); |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 226 | unsigned long pfn; |
| 227 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 228 | zone->compact_blockskip_flush = false; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 229 | |
| 230 | /* Walk the zone and mark every pageblock as suitable for isolation */ |
| 231 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
| 232 | struct page *page; |
| 233 | |
| 234 | cond_resched(); |
| 235 | |
| 236 | if (!pfn_valid(pfn)) |
| 237 | continue; |
| 238 | |
| 239 | page = pfn_to_page(pfn); |
| 240 | if (zone != page_zone(page)) |
| 241 | continue; |
| 242 | |
| 243 | clear_pageblock_skip(page); |
| 244 | } |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame^] | 245 | |
| 246 | reset_cached_positions(zone); |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 247 | } |
| 248 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 249 | void reset_isolation_suitable(pg_data_t *pgdat) |
| 250 | { |
| 251 | int zoneid; |
| 252 | |
| 253 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
| 254 | struct zone *zone = &pgdat->node_zones[zoneid]; |
| 255 | if (!populated_zone(zone)) |
| 256 | continue; |
| 257 | |
| 258 | /* Only flush if a full compaction finished recently */ |
| 259 | if (zone->compact_blockskip_flush) |
| 260 | __reset_isolation_suitable(zone); |
| 261 | } |
| 262 | } |
| 263 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 264 | /* |
| 265 | * If no pages were isolated then mark this pageblock to be skipped in the |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 266 | * future. The information is later cleared by __reset_isolation_suitable(). |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 267 | */ |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 268 | static void update_pageblock_skip(struct compact_control *cc, |
| 269 | struct page *page, unsigned long nr_isolated, |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 270 | bool migrate_scanner) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 271 | { |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 272 | struct zone *zone = cc->zone; |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 273 | unsigned long pfn; |
Joonsoo Kim | 6815bf3 | 2013-12-18 17:08:52 -0800 | [diff] [blame] | 274 | |
| 275 | if (cc->ignore_skip_hint) |
| 276 | return; |
| 277 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 278 | if (!page) |
| 279 | return; |
| 280 | |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 281 | if (nr_isolated) |
| 282 | return; |
| 283 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 284 | set_pageblock_skip(page); |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 285 | |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 286 | pfn = page_to_pfn(page); |
| 287 | |
| 288 | /* Update where async and sync compaction should restart */ |
| 289 | if (migrate_scanner) { |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 290 | if (pfn > zone->compact_cached_migrate_pfn[0]) |
| 291 | zone->compact_cached_migrate_pfn[0] = pfn; |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 292 | if (cc->mode != MIGRATE_ASYNC && |
| 293 | pfn > zone->compact_cached_migrate_pfn[1]) |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 294 | zone->compact_cached_migrate_pfn[1] = pfn; |
| 295 | } else { |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 296 | if (pfn < zone->compact_cached_free_pfn) |
| 297 | zone->compact_cached_free_pfn = pfn; |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 298 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 299 | } |
| 300 | #else |
| 301 | static inline bool isolation_suitable(struct compact_control *cc, |
| 302 | struct page *page) |
| 303 | { |
| 304 | return true; |
| 305 | } |
| 306 | |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 307 | static void update_pageblock_skip(struct compact_control *cc, |
| 308 | struct page *page, unsigned long nr_isolated, |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 309 | bool migrate_scanner) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 310 | { |
| 311 | } |
| 312 | #endif /* CONFIG_COMPACTION */ |
| 313 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 314 | /* |
| 315 | * Compaction requires the taking of some coarse locks that are potentially |
| 316 | * very heavily contended. For async compaction, back out if the lock cannot |
| 317 | * be taken immediately. For sync compaction, spin on the lock if needed. |
| 318 | * |
| 319 | * Returns true if the lock is held |
| 320 | * Returns false if the lock is not held and compaction should abort |
| 321 | */ |
| 322 | static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, |
| 323 | struct compact_control *cc) |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 324 | { |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 325 | if (cc->mode == MIGRATE_ASYNC) { |
| 326 | if (!spin_trylock_irqsave(lock, *flags)) { |
| 327 | cc->contended = COMPACT_CONTENDED_LOCK; |
| 328 | return false; |
| 329 | } |
| 330 | } else { |
| 331 | spin_lock_irqsave(lock, *flags); |
| 332 | } |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 333 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 334 | return true; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 335 | } |
| 336 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 337 | /* |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 338 | * Compaction requires the taking of some coarse locks that are potentially |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 339 | * very heavily contended. The lock should be periodically unlocked to avoid |
| 340 | * having disabled IRQs for a long time, even when there is nobody waiting on |
| 341 | * the lock. It might also be that allowing the IRQs will result in |
| 342 | * need_resched() becoming true. If scheduling is needed, async compaction |
| 343 | * aborts. Sync compaction schedules. |
| 344 | * Either compaction type will also abort if a fatal signal is pending. |
| 345 | * In either case if the lock was locked, it is dropped and not regained. |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 346 | * |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 347 | * Returns true if compaction should abort due to fatal signal pending, or |
| 348 | * async compaction due to need_resched() |
| 349 | * Returns false when compaction can continue (sync compaction might have |
| 350 | * scheduled) |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 351 | */ |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 352 | static bool compact_unlock_should_abort(spinlock_t *lock, |
| 353 | unsigned long flags, bool *locked, struct compact_control *cc) |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 354 | { |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 355 | if (*locked) { |
| 356 | spin_unlock_irqrestore(lock, flags); |
| 357 | *locked = false; |
| 358 | } |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 359 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 360 | if (fatal_signal_pending(current)) { |
| 361 | cc->contended = COMPACT_CONTENDED_SCHED; |
| 362 | return true; |
| 363 | } |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 364 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 365 | if (need_resched()) { |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 366 | if (cc->mode == MIGRATE_ASYNC) { |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 367 | cc->contended = COMPACT_CONTENDED_SCHED; |
| 368 | return true; |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 369 | } |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 370 | cond_resched(); |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 371 | } |
| 372 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 373 | return false; |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 374 | } |
| 375 | |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 376 | /* |
| 377 | * Aside from avoiding lock contention, compaction also periodically checks |
| 378 | * need_resched() and either schedules in sync compaction or aborts async |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 379 | * compaction. This is similar to what compact_unlock_should_abort() does, but |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 380 | * is used where no lock is concerned. |
| 381 | * |
| 382 | * Returns false when no scheduling was needed, or sync compaction scheduled. |
| 383 | * Returns true when async compaction should abort. |
| 384 | */ |
| 385 | static inline bool compact_should_abort(struct compact_control *cc) |
| 386 | { |
| 387 | /* async compaction aborts if contended */ |
| 388 | if (need_resched()) { |
| 389 | if (cc->mode == MIGRATE_ASYNC) { |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 390 | cc->contended = COMPACT_CONTENDED_SCHED; |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 391 | return true; |
| 392 | } |
| 393 | |
| 394 | cond_resched(); |
| 395 | } |
| 396 | |
| 397 | return false; |
| 398 | } |
| 399 | |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 400 | /* |
Jerome Marchand | 9e4be47 | 2013-11-12 15:07:12 -0800 | [diff] [blame] | 401 | * Isolate free pages onto a private freelist. If @strict is true, will abort |
| 402 | * returning 0 on any invalid PFNs or non-free pages inside of the pageblock |
| 403 | * (even though it may still end up isolating some pages). |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 404 | */ |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 405 | static unsigned long isolate_freepages_block(struct compact_control *cc, |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 406 | unsigned long *start_pfn, |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 407 | unsigned long end_pfn, |
| 408 | struct list_head *freelist, |
| 409 | bool strict) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 410 | { |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 411 | int nr_scanned = 0, total_isolated = 0; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 412 | struct page *cursor, *valid_page = NULL; |
Xiubo Li | b8b2d82 | 2014-10-09 15:28:21 -0700 | [diff] [blame] | 413 | unsigned long flags = 0; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 414 | bool locked = false; |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 415 | unsigned long blockpfn = *start_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 416 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 417 | cursor = pfn_to_page(blockpfn); |
| 418 | |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 419 | /* Isolate free pages. */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 420 | for (; blockpfn < end_pfn; blockpfn++, cursor++) { |
| 421 | int isolated, i; |
| 422 | struct page *page = cursor; |
| 423 | |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 424 | /* |
| 425 | * Periodically drop the lock (if held) regardless of its |
| 426 | * contention, to give chance to IRQs. Abort if fatal signal |
| 427 | * pending or async compaction detects need_resched() |
| 428 | */ |
| 429 | if (!(blockpfn % SWAP_CLUSTER_MAX) |
| 430 | && compact_unlock_should_abort(&cc->zone->lock, flags, |
| 431 | &locked, cc)) |
| 432 | break; |
| 433 | |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 434 | nr_scanned++; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 435 | if (!pfn_valid_within(blockpfn)) |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 436 | goto isolate_fail; |
| 437 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 438 | if (!valid_page) |
| 439 | valid_page = page; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 440 | if (!PageBuddy(page)) |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 441 | goto isolate_fail; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 442 | |
| 443 | /* |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 444 | * If we already hold the lock, we can skip some rechecking. |
| 445 | * Note that if we hold the lock now, checked_pageblock was |
| 446 | * already set in some previous iteration (or strict is true), |
| 447 | * so it is correct to skip the suitable migration target |
| 448 | * recheck as well. |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 449 | */ |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 450 | if (!locked) { |
| 451 | /* |
| 452 | * The zone lock must be held to isolate freepages. |
| 453 | * Unfortunately this is a very coarse lock and can be |
| 454 | * heavily contended if there are parallel allocations |
| 455 | * or parallel compactions. For async compaction do not |
| 456 | * spin on the lock and we acquire the lock as late as |
| 457 | * possible. |
| 458 | */ |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 459 | locked = compact_trylock_irqsave(&cc->zone->lock, |
| 460 | &flags, cc); |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 461 | if (!locked) |
| 462 | break; |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 463 | |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 464 | /* Recheck this is a buddy page under lock */ |
| 465 | if (!PageBuddy(page)) |
| 466 | goto isolate_fail; |
| 467 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 468 | |
| 469 | /* Found a free page, break it into order-0 pages */ |
| 470 | isolated = split_free_page(page); |
| 471 | total_isolated += isolated; |
| 472 | for (i = 0; i < isolated; i++) { |
| 473 | list_add(&page->lru, freelist); |
| 474 | page++; |
| 475 | } |
| 476 | |
| 477 | /* If a page was split, advance to the end of it */ |
| 478 | if (isolated) { |
Joonsoo Kim | 932ff6b | 2015-02-12 14:59:53 -0800 | [diff] [blame] | 479 | cc->nr_freepages += isolated; |
| 480 | if (!strict && |
| 481 | cc->nr_migratepages <= cc->nr_freepages) { |
| 482 | blockpfn += isolated; |
| 483 | break; |
| 484 | } |
| 485 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 486 | blockpfn += isolated - 1; |
| 487 | cursor += isolated - 1; |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 488 | continue; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 489 | } |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 490 | |
| 491 | isolate_fail: |
| 492 | if (strict) |
| 493 | break; |
| 494 | else |
| 495 | continue; |
| 496 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 497 | } |
| 498 | |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 499 | trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, |
| 500 | nr_scanned, total_isolated); |
| 501 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 502 | /* Record how far we have got within the block */ |
| 503 | *start_pfn = blockpfn; |
| 504 | |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 505 | /* |
| 506 | * If strict isolation is requested by CMA then check that all the |
| 507 | * pages requested were isolated. If there were any failures, 0 is |
| 508 | * returned and CMA will fail. |
| 509 | */ |
Laura Abbott | 2af120b | 2014-03-10 15:49:44 -0700 | [diff] [blame] | 510 | if (strict && blockpfn < end_pfn) |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 511 | total_isolated = 0; |
| 512 | |
| 513 | if (locked) |
| 514 | spin_unlock_irqrestore(&cc->zone->lock, flags); |
| 515 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 516 | /* Update the pageblock-skip if the whole pageblock was scanned */ |
| 517 | if (blockpfn == end_pfn) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 518 | update_pageblock_skip(cc, valid_page, total_isolated, false); |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 519 | |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 520 | count_compact_events(COMPACTFREE_SCANNED, nr_scanned); |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 521 | if (total_isolated) |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 522 | count_compact_events(COMPACTISOLATED, total_isolated); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 523 | return total_isolated; |
| 524 | } |
| 525 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 526 | /** |
| 527 | * isolate_freepages_range() - isolate free pages. |
| 528 | * @start_pfn: The first PFN to start isolating. |
| 529 | * @end_pfn: The one-past-last PFN. |
| 530 | * |
| 531 | * Non-free pages, invalid PFNs, or zone boundaries within the |
| 532 | * [start_pfn, end_pfn) range are considered errors, cause function to |
| 533 | * undo its actions and return zero. |
| 534 | * |
| 535 | * Otherwise, function returns one-past-the-last PFN of isolated page |
| 536 | * (which may be greater then end_pfn if end fell in a middle of |
| 537 | * a free page). |
| 538 | */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 539 | unsigned long |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 540 | isolate_freepages_range(struct compact_control *cc, |
| 541 | unsigned long start_pfn, unsigned long end_pfn) |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 542 | { |
Mel Gorman | f40d1e4 | 2012-10-08 16:32:36 -0700 | [diff] [blame] | 543 | unsigned long isolated, pfn, block_end_pfn; |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 544 | LIST_HEAD(freelist); |
| 545 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 546 | pfn = start_pfn; |
| 547 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 548 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 549 | for (; pfn < end_pfn; pfn += isolated, |
| 550 | block_end_pfn += pageblock_nr_pages) { |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 551 | /* Protect pfn from changing by isolate_freepages_block */ |
| 552 | unsigned long isolate_start_pfn = pfn; |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 553 | |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 554 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 555 | |
Joonsoo Kim | 5842001 | 2014-11-13 15:19:07 -0800 | [diff] [blame] | 556 | /* |
| 557 | * pfn could pass the block_end_pfn if isolated freepage |
| 558 | * is more than pageblock order. In this case, we adjust |
| 559 | * scanning range to right one. |
| 560 | */ |
| 561 | if (pfn >= block_end_pfn) { |
| 562 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
| 563 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 564 | } |
| 565 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 566 | if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) |
| 567 | break; |
| 568 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 569 | isolated = isolate_freepages_block(cc, &isolate_start_pfn, |
| 570 | block_end_pfn, &freelist, true); |
Michal Nazarewicz | 85aa125 | 2012-01-30 13:24:03 +0100 | [diff] [blame] | 571 | |
| 572 | /* |
| 573 | * In strict mode, isolate_freepages_block() returns 0 if |
| 574 | * there are any holes in the block (ie. invalid PFNs or |
| 575 | * non-free pages). |
| 576 | */ |
| 577 | if (!isolated) |
| 578 | break; |
| 579 | |
| 580 | /* |
| 581 | * If we managed to isolate pages, it is always (1 << n) * |
| 582 | * pageblock_nr_pages for some non-negative n. (Max order |
| 583 | * page may span two pageblocks). |
| 584 | */ |
| 585 | } |
| 586 | |
| 587 | /* split_free_page does not map the pages */ |
| 588 | map_pages(&freelist); |
| 589 | |
| 590 | if (pfn < end_pfn) { |
| 591 | /* Loop terminated early, cleanup. */ |
| 592 | release_freepages(&freelist); |
| 593 | return 0; |
| 594 | } |
| 595 | |
| 596 | /* We don't use freelists for anything. */ |
| 597 | return pfn; |
| 598 | } |
| 599 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 600 | /* Update the number of anon and file isolated pages in the zone */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 601 | static void acct_isolated(struct zone *zone, struct compact_control *cc) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 602 | { |
| 603 | struct page *page; |
Minchan Kim | b9e84ac | 2011-10-31 17:06:44 -0700 | [diff] [blame] | 604 | unsigned int count[2] = { 0, }; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 605 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 606 | if (list_empty(&cc->migratepages)) |
| 607 | return; |
| 608 | |
Minchan Kim | b9e84ac | 2011-10-31 17:06:44 -0700 | [diff] [blame] | 609 | list_for_each_entry(page, &cc->migratepages, lru) |
| 610 | count[!!page_is_file_cache(page)]++; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 611 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 612 | mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); |
| 613 | mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 614 | } |
| 615 | |
| 616 | /* Similar to reclaim, but different enough that they don't share logic */ |
| 617 | static bool too_many_isolated(struct zone *zone) |
| 618 | { |
Minchan Kim | bc69304 | 2010-09-09 16:38:00 -0700 | [diff] [blame] | 619 | unsigned long active, inactive, isolated; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 620 | |
| 621 | inactive = zone_page_state(zone, NR_INACTIVE_FILE) + |
| 622 | zone_page_state(zone, NR_INACTIVE_ANON); |
Minchan Kim | bc69304 | 2010-09-09 16:38:00 -0700 | [diff] [blame] | 623 | active = zone_page_state(zone, NR_ACTIVE_FILE) + |
| 624 | zone_page_state(zone, NR_ACTIVE_ANON); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 625 | isolated = zone_page_state(zone, NR_ISOLATED_FILE) + |
| 626 | zone_page_state(zone, NR_ISOLATED_ANON); |
| 627 | |
Minchan Kim | bc69304 | 2010-09-09 16:38:00 -0700 | [diff] [blame] | 628 | return isolated > (inactive + active) / 2; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 629 | } |
| 630 | |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 631 | /** |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 632 | * isolate_migratepages_block() - isolate all migrate-able pages within |
| 633 | * a single pageblock |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 634 | * @cc: Compaction control structure. |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 635 | * @low_pfn: The first PFN to isolate |
| 636 | * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock |
| 637 | * @isolate_mode: Isolation mode to be used. |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 638 | * |
| 639 | * Isolate all pages that can be migrated from the range specified by |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 640 | * [low_pfn, end_pfn). The range is expected to be within same pageblock. |
| 641 | * Returns zero if there is a fatal signal pending, otherwise PFN of the |
| 642 | * first page that was not scanned (which may be both less, equal to or more |
| 643 | * than end_pfn). |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 644 | * |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 645 | * The pages are isolated on cc->migratepages list (not required to be empty), |
| 646 | * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field |
| 647 | * is neither read nor updated. |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 648 | */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 649 | static unsigned long |
| 650 | isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, |
| 651 | unsigned long end_pfn, isolate_mode_t isolate_mode) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 652 | { |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 653 | struct zone *zone = cc->zone; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 654 | unsigned long nr_scanned = 0, nr_isolated = 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 655 | struct list_head *migratelist = &cc->migratepages; |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 656 | struct lruvec *lruvec; |
Xiubo Li | b8b2d82 | 2014-10-09 15:28:21 -0700 | [diff] [blame] | 657 | unsigned long flags = 0; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 658 | bool locked = false; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 659 | struct page *page = NULL, *valid_page = NULL; |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 660 | unsigned long start_pfn = low_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 661 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 662 | /* |
| 663 | * Ensure that there are not too many pages isolated from the LRU |
| 664 | * list by either parallel reclaimers or compaction. If there are, |
| 665 | * delay for some time until fewer pages are isolated |
| 666 | */ |
| 667 | while (unlikely(too_many_isolated(zone))) { |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 668 | /* async migration should just abort */ |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 669 | if (cc->mode == MIGRATE_ASYNC) |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 670 | return 0; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 671 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 672 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
| 673 | |
| 674 | if (fatal_signal_pending(current)) |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 675 | return 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 676 | } |
| 677 | |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 678 | if (compact_should_abort(cc)) |
| 679 | return 0; |
David Rientjes | aeef4b8 | 2014-06-04 16:08:31 -0700 | [diff] [blame] | 680 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 681 | /* Time to isolate some pages for migration */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 682 | for (; low_pfn < end_pfn; low_pfn++) { |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 683 | /* |
| 684 | * Periodically drop the lock (if held) regardless of its |
| 685 | * contention, to give chance to IRQs. Abort async compaction |
| 686 | * if contended. |
| 687 | */ |
| 688 | if (!(low_pfn % SWAP_CLUSTER_MAX) |
| 689 | && compact_unlock_should_abort(&zone->lru_lock, flags, |
| 690 | &locked, cc)) |
| 691 | break; |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 692 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 693 | if (!pfn_valid_within(low_pfn)) |
| 694 | continue; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 695 | nr_scanned++; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 696 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 697 | page = pfn_to_page(low_pfn); |
Mel Gorman | dc90860 | 2012-02-08 17:13:38 -0800 | [diff] [blame] | 698 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 699 | if (!valid_page) |
| 700 | valid_page = page; |
| 701 | |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 702 | /* |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 703 | * Skip if free. We read page order here without zone lock |
| 704 | * which is generally unsafe, but the race window is small and |
| 705 | * the worst thing that can happen is that we skip some |
| 706 | * potential isolation targets. |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 707 | */ |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 708 | if (PageBuddy(page)) { |
| 709 | unsigned long freepage_order = page_order_unsafe(page); |
| 710 | |
| 711 | /* |
| 712 | * Without lock, we cannot be sure that what we got is |
| 713 | * a valid page order. Consider only values in the |
| 714 | * valid order range to prevent low_pfn overflow. |
| 715 | */ |
| 716 | if (freepage_order > 0 && freepage_order < MAX_ORDER) |
| 717 | low_pfn += (1UL << freepage_order) - 1; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 718 | continue; |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 719 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 720 | |
Mel Gorman | 9927af74 | 2011-01-13 15:45:59 -0800 | [diff] [blame] | 721 | /* |
Rafael Aquini | bf6bddf1 | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 722 | * Check may be lockless but that's ok as we recheck later. |
| 723 | * It's possible to migrate LRU pages and balloon pages |
| 724 | * Skip any other type of page |
| 725 | */ |
| 726 | if (!PageLRU(page)) { |
| 727 | if (unlikely(balloon_page_movable(page))) { |
Konstantin Khlebnikov | d6d86c0 | 2014-10-09 15:29:27 -0700 | [diff] [blame] | 728 | if (balloon_page_isolate(page)) { |
Rafael Aquini | bf6bddf1 | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 729 | /* Successfully isolated */ |
Joonsoo Kim | b6c7501 | 2014-04-07 15:37:07 -0700 | [diff] [blame] | 730 | goto isolate_success; |
Rafael Aquini | bf6bddf1 | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 731 | } |
| 732 | } |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 733 | continue; |
Rafael Aquini | bf6bddf1 | 2012-12-11 16:02:42 -0800 | [diff] [blame] | 734 | } |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 735 | |
| 736 | /* |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 737 | * PageLRU is set. lru_lock normally excludes isolation |
| 738 | * splitting and collapsing (collapsing has already happened |
| 739 | * if PageLRU is set) but the lock is not necessarily taken |
| 740 | * here and it is wasteful to take it just to check transhuge. |
| 741 | * Check TransHuge without lock and skip the whole pageblock if |
| 742 | * it's either a transhuge or hugetlbfs page, as calling |
| 743 | * compound_order() without preventing THP from splitting the |
| 744 | * page underneath us may return surprising results. |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 745 | */ |
| 746 | if (PageTransHuge(page)) { |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 747 | if (!locked) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 748 | low_pfn = ALIGN(low_pfn + 1, |
| 749 | pageblock_nr_pages) - 1; |
| 750 | else |
| 751 | low_pfn += (1 << compound_order(page)) - 1; |
| 752 | |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 753 | continue; |
| 754 | } |
| 755 | |
David Rientjes | 119d6d5 | 2014-04-03 14:48:00 -0700 | [diff] [blame] | 756 | /* |
| 757 | * Migration will fail if an anonymous page is pinned in memory, |
| 758 | * so avoid taking lru_lock and isolating it unnecessarily in an |
| 759 | * admittedly racy check. |
| 760 | */ |
| 761 | if (!page_mapping(page) && |
| 762 | page_count(page) > page_mapcount(page)) |
| 763 | continue; |
| 764 | |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 765 | /* If we already hold the lock, we can skip some rechecking */ |
| 766 | if (!locked) { |
Vlastimil Babka | 8b44d27 | 2014-10-09 15:27:16 -0700 | [diff] [blame] | 767 | locked = compact_trylock_irqsave(&zone->lru_lock, |
| 768 | &flags, cc); |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 769 | if (!locked) |
| 770 | break; |
Mel Gorman | 2a1402a | 2012-10-08 16:32:33 -0700 | [diff] [blame] | 771 | |
Vlastimil Babka | 69b7189 | 2014-10-09 15:27:18 -0700 | [diff] [blame] | 772 | /* Recheck PageLRU and PageTransHuge under lock */ |
| 773 | if (!PageLRU(page)) |
| 774 | continue; |
| 775 | if (PageTransHuge(page)) { |
| 776 | low_pfn += (1 << compound_order(page)) - 1; |
| 777 | continue; |
| 778 | } |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 779 | } |
| 780 | |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 781 | lruvec = mem_cgroup_page_lruvec(page, zone); |
| 782 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 783 | /* Try isolate the page */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 784 | if (__isolate_lru_page(page, isolate_mode) != 0) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 785 | continue; |
| 786 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 787 | VM_BUG_ON_PAGE(PageTransCompound(page), page); |
Andrea Arcangeli | bc83501 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 788 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 789 | /* Successfully isolated */ |
Hugh Dickins | fa9add6 | 2012-05-29 15:07:09 -0700 | [diff] [blame] | 790 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
Joonsoo Kim | b6c7501 | 2014-04-07 15:37:07 -0700 | [diff] [blame] | 791 | |
| 792 | isolate_success: |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 793 | list_add(&page->lru, migratelist); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 794 | cc->nr_migratepages++; |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 795 | nr_isolated++; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 796 | |
| 797 | /* Avoid isolating too much */ |
Hillf Danton | 31b8384 | 2012-01-10 15:07:59 -0800 | [diff] [blame] | 798 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { |
| 799 | ++low_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 800 | break; |
Hillf Danton | 31b8384 | 2012-01-10 15:07:59 -0800 | [diff] [blame] | 801 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 802 | } |
| 803 | |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 804 | /* |
| 805 | * The PageBuddy() check could have potentially brought us outside |
| 806 | * the range to be scanned. |
| 807 | */ |
| 808 | if (unlikely(low_pfn > end_pfn)) |
| 809 | low_pfn = end_pfn; |
| 810 | |
Mel Gorman | c67fe37 | 2012-08-21 16:16:17 -0700 | [diff] [blame] | 811 | if (locked) |
| 812 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 813 | |
Vlastimil Babka | 50b5b09 | 2014-01-21 15:51:10 -0800 | [diff] [blame] | 814 | /* |
| 815 | * Update the pageblock-skip information and cached scanner pfn, |
| 816 | * if the whole pageblock was scanned without isolating any page. |
Vlastimil Babka | 50b5b09 | 2014-01-21 15:51:10 -0800 | [diff] [blame] | 817 | */ |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 818 | if (low_pfn == end_pfn) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 819 | update_pageblock_skip(cc, valid_page, nr_isolated, true); |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 820 | |
Joonsoo Kim | e34d85f | 2015-02-11 15:27:04 -0800 | [diff] [blame] | 821 | trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, |
| 822 | nr_scanned, nr_isolated); |
Mel Gorman | b7aba69 | 2011-01-13 15:45:54 -0800 | [diff] [blame] | 823 | |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 824 | count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 825 | if (nr_isolated) |
Minchan Kim | 010fc29 | 2012-12-20 15:05:06 -0800 | [diff] [blame] | 826 | count_compact_events(COMPACTISOLATED, nr_isolated); |
Mel Gorman | 397487d | 2012-10-19 12:00:10 +0100 | [diff] [blame] | 827 | |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 828 | return low_pfn; |
| 829 | } |
| 830 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 831 | /** |
| 832 | * isolate_migratepages_range() - isolate migrate-able pages in a PFN range |
| 833 | * @cc: Compaction control structure. |
| 834 | * @start_pfn: The first PFN to start isolating. |
| 835 | * @end_pfn: The one-past-last PFN. |
| 836 | * |
| 837 | * Returns zero if isolation fails fatally due to e.g. pending signal. |
| 838 | * Otherwise, function returns one-past-the-last PFN of isolated page |
| 839 | * (which may be greater than end_pfn if end fell in a middle of a THP page). |
| 840 | */ |
| 841 | unsigned long |
| 842 | isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, |
| 843 | unsigned long end_pfn) |
| 844 | { |
| 845 | unsigned long pfn, block_end_pfn; |
| 846 | |
| 847 | /* Scan block by block. First and last block may be incomplete */ |
| 848 | pfn = start_pfn; |
| 849 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
| 850 | |
| 851 | for (; pfn < end_pfn; pfn = block_end_pfn, |
| 852 | block_end_pfn += pageblock_nr_pages) { |
| 853 | |
| 854 | block_end_pfn = min(block_end_pfn, end_pfn); |
| 855 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 856 | if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 857 | continue; |
| 858 | |
| 859 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, |
| 860 | ISOLATE_UNEVICTABLE); |
| 861 | |
| 862 | /* |
| 863 | * In case of fatal failure, release everything that might |
| 864 | * have been isolated in the previous iteration, and signal |
| 865 | * the failure back to caller. |
| 866 | */ |
| 867 | if (!pfn) { |
| 868 | putback_movable_pages(&cc->migratepages); |
| 869 | cc->nr_migratepages = 0; |
| 870 | break; |
| 871 | } |
Joonsoo Kim | 6ea41c0 | 2014-10-29 14:50:20 -0700 | [diff] [blame] | 872 | |
| 873 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) |
| 874 | break; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 875 | } |
| 876 | acct_isolated(cc->zone, cc); |
| 877 | |
| 878 | return pfn; |
| 879 | } |
| 880 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 881 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ |
| 882 | #ifdef CONFIG_COMPACTION |
Andrew Morton | 018e9a4 | 2015-04-15 16:15:20 -0700 | [diff] [blame] | 883 | |
| 884 | /* Returns true if the page is within a block suitable for migration to */ |
| 885 | static bool suitable_migration_target(struct page *page) |
| 886 | { |
| 887 | /* If the page is a large free page, then disallow migration */ |
| 888 | if (PageBuddy(page)) { |
| 889 | /* |
| 890 | * We are checking page_order without zone->lock taken. But |
| 891 | * the only small danger is that we skip a potentially suitable |
| 892 | * pageblock, so it's not worth to check order for valid range. |
| 893 | */ |
| 894 | if (page_order_unsafe(page) >= pageblock_order) |
| 895 | return false; |
| 896 | } |
| 897 | |
| 898 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ |
| 899 | if (migrate_async_suitable(get_pageblock_migratetype(page))) |
| 900 | return true; |
| 901 | |
| 902 | /* Otherwise skip the block */ |
| 903 | return false; |
| 904 | } |
| 905 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 906 | /* |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 907 | * Test whether the free scanner has reached the same or lower pageblock than |
| 908 | * the migration scanner, and compaction should thus terminate. |
| 909 | */ |
| 910 | static inline bool compact_scanners_met(struct compact_control *cc) |
| 911 | { |
| 912 | return (cc->free_pfn >> pageblock_order) |
| 913 | <= (cc->migrate_pfn >> pageblock_order); |
| 914 | } |
| 915 | |
| 916 | /* |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 917 | * Based on information in the current compact_control, find blocks |
| 918 | * suitable for isolating free pages from and then isolate them. |
| 919 | */ |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 920 | static void isolate_freepages(struct compact_control *cc) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 921 | { |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 922 | struct zone *zone = cc->zone; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 923 | struct page *page; |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 924 | unsigned long block_start_pfn; /* start of current pageblock */ |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 925 | unsigned long isolate_start_pfn; /* exact pfn we start at */ |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 926 | unsigned long block_end_pfn; /* end of current pageblock */ |
| 927 | unsigned long low_pfn; /* lowest pfn scanner is able to scan */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 928 | struct list_head *freelist = &cc->freepages; |
| 929 | |
| 930 | /* |
| 931 | * Initialise the free scanner. The starting point is where we last |
Vlastimil Babka | 49e068f | 2014-05-06 12:50:03 -0700 | [diff] [blame] | 932 | * successfully isolated from, zone-cached value, or the end of the |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 933 | * zone when isolating for the first time. For looping we also need |
| 934 | * this pfn aligned down to the pageblock boundary, because we do |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 935 | * block_start_pfn -= pageblock_nr_pages in the for loop. |
| 936 | * For ending point, take care when isolating in last pageblock of a |
| 937 | * a zone which ends in the middle of a pageblock. |
Vlastimil Babka | 49e068f | 2014-05-06 12:50:03 -0700 | [diff] [blame] | 938 | * The low boundary is the end of the pageblock the migration scanner |
| 939 | * is using. |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 940 | */ |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 941 | isolate_start_pfn = cc->free_pfn; |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 942 | block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); |
| 943 | block_end_pfn = min(block_start_pfn + pageblock_nr_pages, |
| 944 | zone_end_pfn(zone)); |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 945 | low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 946 | |
| 947 | /* |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 948 | * Isolate free pages until enough are available to migrate the |
| 949 | * pages on cc->migratepages. We stop searching if the migrate |
| 950 | * and free page scanners meet or enough free pages are isolated. |
| 951 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 952 | for (; block_start_pfn >= low_pfn; |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 953 | block_end_pfn = block_start_pfn, |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 954 | block_start_pfn -= pageblock_nr_pages, |
| 955 | isolate_start_pfn = block_start_pfn) { |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 956 | |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 957 | /* |
| 958 | * This can iterate a massively long zone without finding any |
| 959 | * suitable migration targets, so periodically check if we need |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 960 | * to schedule, or even abort async compaction. |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 961 | */ |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 962 | if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) |
| 963 | && compact_should_abort(cc)) |
| 964 | break; |
David Rientjes | f6ea3ad | 2013-09-30 13:45:03 -0700 | [diff] [blame] | 965 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 966 | page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, |
| 967 | zone); |
| 968 | if (!page) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 969 | continue; |
| 970 | |
| 971 | /* Check the block is suitable for migration */ |
Linus Torvalds | 68e3e92 | 2012-06-03 20:05:57 -0700 | [diff] [blame] | 972 | if (!suitable_migration_target(page)) |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 973 | continue; |
Linus Torvalds | 68e3e92 | 2012-06-03 20:05:57 -0700 | [diff] [blame] | 974 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 975 | /* If isolation recently failed, do not retry */ |
| 976 | if (!isolation_suitable(cc, page)) |
| 977 | continue; |
| 978 | |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 979 | /* Found a block suitable for isolating free pages from. */ |
Joonsoo Kim | 932ff6b | 2015-02-12 14:59:53 -0800 | [diff] [blame] | 980 | isolate_freepages_block(cc, &isolate_start_pfn, |
Vlastimil Babka | c96b9e5 | 2014-06-04 16:07:26 -0700 | [diff] [blame] | 981 | block_end_pfn, freelist, false); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 982 | |
| 983 | /* |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 984 | * If we isolated enough freepages, or aborted due to async |
| 985 | * compaction being contended, terminate the loop. |
Vlastimil Babka | e14c720 | 2014-10-09 15:27:20 -0700 | [diff] [blame] | 986 | * Remember where the free scanner should restart next time, |
| 987 | * which is where isolate_freepages_block() left off. |
| 988 | * But if it scanned the whole pageblock, isolate_start_pfn |
| 989 | * now points at block_end_pfn, which is the start of the next |
| 990 | * pageblock. |
| 991 | * In that case we will however want to restart at the start |
| 992 | * of the previous pageblock. |
| 993 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 994 | if ((cc->nr_freepages >= cc->nr_migratepages) |
| 995 | || cc->contended) { |
| 996 | if (isolate_start_pfn >= block_end_pfn) |
| 997 | isolate_start_pfn = |
| 998 | block_start_pfn - pageblock_nr_pages; |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 999 | break; |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1000 | } else { |
| 1001 | /* |
| 1002 | * isolate_freepages_block() should not terminate |
| 1003 | * prematurely unless contended, or isolated enough |
| 1004 | */ |
| 1005 | VM_BUG_ON(isolate_start_pfn < block_end_pfn); |
| 1006 | } |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 1007 | } |
| 1008 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1009 | /* split_free_page does not map the pages */ |
| 1010 | map_pages(freelist); |
Michal Nazarewicz | 2fe86e0 | 2012-01-30 13:16:26 +0100 | [diff] [blame] | 1011 | |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 1012 | /* |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1013 | * Record where the free scanner will restart next time. Either we |
| 1014 | * broke from the loop and set isolate_start_pfn based on the last |
| 1015 | * call to isolate_freepages_block(), or we met the migration scanner |
| 1016 | * and the loop terminated due to isolate_start_pfn < low_pfn |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 1017 | */ |
Vlastimil Babka | f5f61a3 | 2015-09-08 15:02:39 -0700 | [diff] [blame] | 1018 | cc->free_pfn = isolate_start_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1019 | } |
| 1020 | |
| 1021 | /* |
| 1022 | * This is a migrate-callback that "allocates" freepages by taking pages |
| 1023 | * from the isolated freelists in the block we are migrating to. |
| 1024 | */ |
| 1025 | static struct page *compaction_alloc(struct page *migratepage, |
| 1026 | unsigned long data, |
| 1027 | int **result) |
| 1028 | { |
| 1029 | struct compact_control *cc = (struct compact_control *)data; |
| 1030 | struct page *freepage; |
| 1031 | |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1032 | /* |
| 1033 | * Isolate free pages if necessary, and if we are not aborting due to |
| 1034 | * contention. |
| 1035 | */ |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1036 | if (list_empty(&cc->freepages)) { |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1037 | if (!cc->contended) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1038 | isolate_freepages(cc); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1039 | |
| 1040 | if (list_empty(&cc->freepages)) |
| 1041 | return NULL; |
| 1042 | } |
| 1043 | |
| 1044 | freepage = list_entry(cc->freepages.next, struct page, lru); |
| 1045 | list_del(&freepage->lru); |
| 1046 | cc->nr_freepages--; |
| 1047 | |
| 1048 | return freepage; |
| 1049 | } |
| 1050 | |
| 1051 | /* |
David Rientjes | d53aea3 | 2014-06-04 16:08:26 -0700 | [diff] [blame] | 1052 | * This is a migrate-callback that "frees" freepages back to the isolated |
| 1053 | * freelist. All pages on the freelist are from the same zone, so there is no |
| 1054 | * special handling needed for NUMA. |
| 1055 | */ |
| 1056 | static void compaction_free(struct page *page, unsigned long data) |
| 1057 | { |
| 1058 | struct compact_control *cc = (struct compact_control *)data; |
| 1059 | |
| 1060 | list_add(&page->lru, &cc->freepages); |
| 1061 | cc->nr_freepages++; |
| 1062 | } |
| 1063 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1064 | /* possible outcome of isolate_migratepages */ |
| 1065 | typedef enum { |
| 1066 | ISOLATE_ABORT, /* Abort compaction now */ |
| 1067 | ISOLATE_NONE, /* No pages isolated, continue scanning */ |
| 1068 | ISOLATE_SUCCESS, /* Pages isolated, migrate */ |
| 1069 | } isolate_migrate_t; |
| 1070 | |
| 1071 | /* |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1072 | * Allow userspace to control policy on scanning the unevictable LRU for |
| 1073 | * compactable pages. |
| 1074 | */ |
| 1075 | int sysctl_compact_unevictable_allowed __read_mostly = 1; |
| 1076 | |
| 1077 | /* |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1078 | * Isolate all pages that can be migrated from the first suitable block, |
| 1079 | * starting at the block pointed to by the migrate scanner pfn within |
| 1080 | * compact_control. |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1081 | */ |
| 1082 | static isolate_migrate_t isolate_migratepages(struct zone *zone, |
| 1083 | struct compact_control *cc) |
| 1084 | { |
| 1085 | unsigned long low_pfn, end_pfn; |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1086 | struct page *page; |
| 1087 | const isolate_mode_t isolate_mode = |
Eric B Munson | 5bbe354 | 2015-04-15 16:13:20 -0700 | [diff] [blame] | 1088 | (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1089 | (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1090 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1091 | /* |
| 1092 | * Start at where we last stopped, or beginning of the zone as |
| 1093 | * initialized by compact_zone() |
| 1094 | */ |
| 1095 | low_pfn = cc->migrate_pfn; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1096 | |
| 1097 | /* Only scan within a pageblock boundary */ |
Mel Gorman | a9aacbc | 2013-02-22 16:32:25 -0800 | [diff] [blame] | 1098 | end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1099 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1100 | /* |
| 1101 | * Iterate over whole pageblocks until we find the first suitable. |
| 1102 | * Do not cross the free scanner. |
| 1103 | */ |
| 1104 | for (; end_pfn <= cc->free_pfn; |
| 1105 | low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { |
| 1106 | |
| 1107 | /* |
| 1108 | * This can potentially iterate a massively long zone with |
| 1109 | * many pageblocks unsuitable, so periodically check if we |
| 1110 | * need to schedule, or even abort async compaction. |
| 1111 | */ |
| 1112 | if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) |
| 1113 | && compact_should_abort(cc)) |
| 1114 | break; |
| 1115 | |
Vlastimil Babka | 7d49d88 | 2014-10-09 15:27:11 -0700 | [diff] [blame] | 1116 | page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); |
| 1117 | if (!page) |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1118 | continue; |
| 1119 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1120 | /* If isolation recently failed, do not retry */ |
| 1121 | if (!isolation_suitable(cc, page)) |
| 1122 | continue; |
| 1123 | |
| 1124 | /* |
| 1125 | * For async compaction, also only scan in MOVABLE blocks. |
| 1126 | * Async compaction is optimistic to see if the minimum amount |
| 1127 | * of work satisfies the allocation. |
| 1128 | */ |
| 1129 | if (cc->mode == MIGRATE_ASYNC && |
| 1130 | !migrate_async_suitable(get_pageblock_migratetype(page))) |
| 1131 | continue; |
| 1132 | |
| 1133 | /* Perform the isolation */ |
| 1134 | low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, |
| 1135 | isolate_mode); |
| 1136 | |
Hugh Dickins | ff59909 | 2015-02-12 15:00:28 -0800 | [diff] [blame] | 1137 | if (!low_pfn || cc->contended) { |
| 1138 | acct_isolated(zone, cc); |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1139 | return ISOLATE_ABORT; |
Hugh Dickins | ff59909 | 2015-02-12 15:00:28 -0800 | [diff] [blame] | 1140 | } |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1141 | |
| 1142 | /* |
| 1143 | * Either we isolated something and proceed with migration. Or |
| 1144 | * we failed and compact_zone should decide if we should |
| 1145 | * continue or not. |
| 1146 | */ |
| 1147 | break; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1148 | } |
| 1149 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1150 | acct_isolated(zone, cc); |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1151 | /* Record where migration scanner will be restarted. */ |
| 1152 | cc->migrate_pfn = low_pfn; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1153 | |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 1154 | return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1155 | } |
| 1156 | |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1157 | static int __compact_finished(struct zone *zone, struct compact_control *cc, |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 1158 | const int migratetype) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1159 | { |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1160 | unsigned int order; |
Andrea Arcangeli | 5a03b05 | 2011-01-13 15:47:11 -0800 | [diff] [blame] | 1161 | unsigned long watermark; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1162 | |
Vlastimil Babka | be97657 | 2014-06-04 16:10:41 -0700 | [diff] [blame] | 1163 | if (cc->contended || fatal_signal_pending(current)) |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1164 | return COMPACT_PARTIAL; |
| 1165 | |
Mel Gorman | 753341a | 2012-10-08 16:32:40 -0700 | [diff] [blame] | 1166 | /* Compaction run completes if the migrate and free scanner meet */ |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1167 | if (compact_scanners_met(cc)) { |
Vlastimil Babka | 55b7c4c | 2014-01-21 15:51:11 -0800 | [diff] [blame] | 1168 | /* Let the next compaction start anew. */ |
Vlastimil Babka | 02333641 | 2015-09-08 15:02:42 -0700 | [diff] [blame^] | 1169 | reset_cached_positions(zone); |
Vlastimil Babka | 55b7c4c | 2014-01-21 15:51:11 -0800 | [diff] [blame] | 1170 | |
Mel Gorman | 6299702 | 2012-10-08 16:32:47 -0700 | [diff] [blame] | 1171 | /* |
| 1172 | * Mark that the PG_migrate_skip information should be cleared |
| 1173 | * by kswapd when it goes to sleep. kswapd does not set the |
| 1174 | * flag itself as the decision to be clear should be directly |
| 1175 | * based on an allocation request. |
| 1176 | */ |
| 1177 | if (!current_is_kswapd()) |
| 1178 | zone->compact_blockskip_flush = true; |
| 1179 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1180 | return COMPACT_COMPLETE; |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 1181 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1182 | |
Johannes Weiner | 82478fb | 2011-01-20 14:44:21 -0800 | [diff] [blame] | 1183 | /* |
| 1184 | * order == -1 is expected when compacting via |
| 1185 | * /proc/sys/vm/compact_memory |
| 1186 | */ |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1187 | if (cc->order == -1) |
| 1188 | return COMPACT_CONTINUE; |
| 1189 | |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1190 | /* Compaction run is not finished if the watermark is not met */ |
| 1191 | watermark = low_wmark_pages(zone); |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1192 | |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1193 | if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, |
| 1194 | cc->alloc_flags)) |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1195 | return COMPACT_CONTINUE; |
| 1196 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1197 | /* Direct compactor: Is a suitable page free? */ |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1198 | for (order = cc->order; order < MAX_ORDER; order++) { |
| 1199 | struct free_area *area = &zone->free_area[order]; |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 1200 | bool can_steal; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1201 | |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1202 | /* Job done if page is free of the right migratetype */ |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 1203 | if (!list_empty(&area->free_list[migratetype])) |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1204 | return COMPACT_PARTIAL; |
| 1205 | |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 1206 | #ifdef CONFIG_CMA |
| 1207 | /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ |
| 1208 | if (migratetype == MIGRATE_MOVABLE && |
| 1209 | !list_empty(&area->free_list[MIGRATE_CMA])) |
| 1210 | return COMPACT_PARTIAL; |
| 1211 | #endif |
| 1212 | /* |
| 1213 | * Job done if allocation would steal freepages from |
| 1214 | * other migratetype buddy lists. |
| 1215 | */ |
| 1216 | if (find_suitable_fallback(area, order, migratetype, |
| 1217 | true, &can_steal) != -1) |
Mel Gorman | 8fb74b9 | 2013-01-11 14:32:16 -0800 | [diff] [blame] | 1218 | return COMPACT_PARTIAL; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1219 | } |
| 1220 | |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1221 | return COMPACT_NO_SUITABLE_PAGE; |
| 1222 | } |
| 1223 | |
| 1224 | static int compact_finished(struct zone *zone, struct compact_control *cc, |
| 1225 | const int migratetype) |
| 1226 | { |
| 1227 | int ret; |
| 1228 | |
| 1229 | ret = __compact_finished(zone, cc, migratetype); |
| 1230 | trace_mm_compaction_finished(zone, cc->order, ret); |
| 1231 | if (ret == COMPACT_NO_SUITABLE_PAGE) |
| 1232 | ret = COMPACT_CONTINUE; |
| 1233 | |
| 1234 | return ret; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1235 | } |
| 1236 | |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1237 | /* |
| 1238 | * compaction_suitable: Is this suitable to run compaction on this zone now? |
| 1239 | * Returns |
| 1240 | * COMPACT_SKIPPED - If there are too few free pages for compaction |
| 1241 | * COMPACT_PARTIAL - If the allocation would succeed without compaction |
| 1242 | * COMPACT_CONTINUE - If compaction should run now |
| 1243 | */ |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1244 | static unsigned long __compaction_suitable(struct zone *zone, int order, |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1245 | int alloc_flags, int classzone_idx) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1246 | { |
| 1247 | int fragindex; |
| 1248 | unsigned long watermark; |
| 1249 | |
| 1250 | /* |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1251 | * order == -1 is expected when compacting via |
| 1252 | * /proc/sys/vm/compact_memory |
| 1253 | */ |
| 1254 | if (order == -1) |
| 1255 | return COMPACT_CONTINUE; |
| 1256 | |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1257 | watermark = low_wmark_pages(zone); |
| 1258 | /* |
| 1259 | * If watermarks for high-order allocation are already met, there |
| 1260 | * should be no need for compaction at all. |
| 1261 | */ |
| 1262 | if (zone_watermark_ok(zone, order, watermark, classzone_idx, |
| 1263 | alloc_flags)) |
| 1264 | return COMPACT_PARTIAL; |
| 1265 | |
Michal Hocko | 3957c77 | 2011-06-15 15:08:25 -0700 | [diff] [blame] | 1266 | /* |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1267 | * Watermarks for order-0 must be met for compaction. Note the 2UL. |
| 1268 | * This is because during migration, copies of pages need to be |
| 1269 | * allocated and for a short time, the footprint is higher |
| 1270 | */ |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1271 | watermark += (2UL << order); |
| 1272 | if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags)) |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1273 | return COMPACT_SKIPPED; |
| 1274 | |
| 1275 | /* |
| 1276 | * fragmentation index determines if allocation failures are due to |
| 1277 | * low memory or external fragmentation |
| 1278 | * |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1279 | * index of -1000 would imply allocations might succeed depending on |
| 1280 | * watermarks, but we already failed the high-order watermark check |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1281 | * index towards 0 implies failure is due to lack of memory |
| 1282 | * index towards 1000 implies failure is due to fragmentation |
| 1283 | * |
| 1284 | * Only compact if a failure would be due to fragmentation. |
| 1285 | */ |
| 1286 | fragindex = fragmentation_index(zone, order); |
| 1287 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1288 | return COMPACT_NOT_SUITABLE_ZONE; |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1289 | |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1290 | return COMPACT_CONTINUE; |
| 1291 | } |
| 1292 | |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1293 | unsigned long compaction_suitable(struct zone *zone, int order, |
| 1294 | int alloc_flags, int classzone_idx) |
| 1295 | { |
| 1296 | unsigned long ret; |
| 1297 | |
| 1298 | ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx); |
| 1299 | trace_mm_compaction_suitable(zone, order, ret); |
| 1300 | if (ret == COMPACT_NOT_SUITABLE_ZONE) |
| 1301 | ret = COMPACT_SKIPPED; |
| 1302 | |
| 1303 | return ret; |
| 1304 | } |
| 1305 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1306 | static int compact_zone(struct zone *zone, struct compact_control *cc) |
| 1307 | { |
| 1308 | int ret; |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1309 | unsigned long start_pfn = zone->zone_start_pfn; |
Cody P Schafer | 108bcc9 | 2013-02-22 16:35:23 -0800 | [diff] [blame] | 1310 | unsigned long end_pfn = zone_end_pfn(zone); |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 1311 | const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1312 | const bool sync = cc->mode != MIGRATE_ASYNC; |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1313 | unsigned long last_migrated_pfn = 0; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1314 | |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1315 | ret = compaction_suitable(zone, cc->order, cc->alloc_flags, |
| 1316 | cc->classzone_idx); |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1317 | switch (ret) { |
| 1318 | case COMPACT_PARTIAL: |
| 1319 | case COMPACT_SKIPPED: |
| 1320 | /* Compaction is likely to fail */ |
| 1321 | return ret; |
| 1322 | case COMPACT_CONTINUE: |
| 1323 | /* Fall through to compaction */ |
| 1324 | ; |
| 1325 | } |
| 1326 | |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1327 | /* |
Vlastimil Babka | d3132e4 | 2014-01-21 15:51:08 -0800 | [diff] [blame] | 1328 | * Clear pageblock skip if there were failures recently and compaction |
| 1329 | * is about to be retried after being deferred. kswapd does not do |
| 1330 | * this reset as it'll reset the cached information when going to sleep. |
| 1331 | */ |
| 1332 | if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) |
| 1333 | __reset_isolation_suitable(zone); |
| 1334 | |
| 1335 | /* |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1336 | * Setup to move all movable pages to the end of the zone. Used cached |
| 1337 | * information on where the scanners should start but check that it |
| 1338 | * is initialised by ensuring the values are within zone boundaries. |
| 1339 | */ |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1340 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1341 | cc->free_pfn = zone->compact_cached_free_pfn; |
| 1342 | if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { |
| 1343 | cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); |
| 1344 | zone->compact_cached_free_pfn = cc->free_pfn; |
| 1345 | } |
| 1346 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { |
| 1347 | cc->migrate_pfn = start_pfn; |
David Rientjes | 35979ef | 2014-06-04 16:08:27 -0700 | [diff] [blame] | 1348 | zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; |
| 1349 | zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; |
Mel Gorman | c89511a | 2012-10-08 16:32:45 -0700 | [diff] [blame] | 1350 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1351 | |
Joonsoo Kim | 16c4a09 | 2015-02-11 15:27:01 -0800 | [diff] [blame] | 1352 | trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, |
| 1353 | cc->free_pfn, end_pfn, sync); |
Mel Gorman | 0eb927c | 2014-01-21 15:51:05 -0800 | [diff] [blame] | 1354 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1355 | migrate_prep_local(); |
| 1356 | |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 1357 | while ((ret = compact_finished(zone, cc, migratetype)) == |
| 1358 | COMPACT_CONTINUE) { |
Minchan Kim | 9d502c1 | 2011-03-22 16:30:39 -0700 | [diff] [blame] | 1359 | int err; |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1360 | unsigned long isolate_start_pfn = cc->migrate_pfn; |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1361 | |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 1362 | switch (isolate_migratepages(zone, cc)) { |
| 1363 | case ISOLATE_ABORT: |
| 1364 | ret = COMPACT_PARTIAL; |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 1365 | putback_movable_pages(&cc->migratepages); |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 1366 | cc->nr_migratepages = 0; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 1367 | goto out; |
| 1368 | case ISOLATE_NONE: |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1369 | /* |
| 1370 | * We haven't isolated and migrated anything, but |
| 1371 | * there might still be unflushed migrations from |
| 1372 | * previous cc->order aligned block. |
| 1373 | */ |
| 1374 | goto check_drain; |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 1375 | case ISOLATE_SUCCESS: |
| 1376 | ; |
| 1377 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1378 | |
David Rientjes | d53aea3 | 2014-06-04 16:08:26 -0700 | [diff] [blame] | 1379 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1380 | compaction_free, (unsigned long)cc, cc->mode, |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 1381 | MR_COMPACTION); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1382 | |
Vlastimil Babka | f8c9301 | 2014-06-04 16:08:32 -0700 | [diff] [blame] | 1383 | trace_mm_compaction_migratepages(cc->nr_migratepages, err, |
| 1384 | &cc->migratepages); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1385 | |
Vlastimil Babka | f8c9301 | 2014-06-04 16:08:32 -0700 | [diff] [blame] | 1386 | /* All pages were either migrated or will be released */ |
| 1387 | cc->nr_migratepages = 0; |
Minchan Kim | 9d502c1 | 2011-03-22 16:30:39 -0700 | [diff] [blame] | 1388 | if (err) { |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 1389 | putback_movable_pages(&cc->migratepages); |
Vlastimil Babka | 7ed695e | 2014-01-21 15:51:09 -0800 | [diff] [blame] | 1390 | /* |
| 1391 | * migrate_pages() may return -ENOMEM when scanners meet |
| 1392 | * and we want compact_finished() to detect it |
| 1393 | */ |
Vlastimil Babka | f2849aa | 2015-09-08 15:02:36 -0700 | [diff] [blame] | 1394 | if (err == -ENOMEM && !compact_scanners_met(cc)) { |
David Rientjes | 4bf2bba | 2012-07-11 14:02:13 -0700 | [diff] [blame] | 1395 | ret = COMPACT_PARTIAL; |
| 1396 | goto out; |
| 1397 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1398 | } |
Vlastimil Babka | fdaf7f5 | 2014-12-10 15:43:34 -0800 | [diff] [blame] | 1399 | |
| 1400 | /* |
| 1401 | * Record where we could have freed pages by migration and not |
| 1402 | * yet flushed them to buddy allocator. We use the pfn that |
| 1403 | * isolate_migratepages() started from in this loop iteration |
| 1404 | * - this is the lowest page that could have been isolated and |
| 1405 | * then freed by migration. |
| 1406 | */ |
| 1407 | if (!last_migrated_pfn) |
| 1408 | last_migrated_pfn = isolate_start_pfn; |
| 1409 | |
| 1410 | check_drain: |
| 1411 | /* |
| 1412 | * Has the migration scanner moved away from the previous |
| 1413 | * cc->order aligned block where we migrated from? If yes, |
| 1414 | * flush the pages that were freed, so that they can merge and |
| 1415 | * compact_finished() can detect immediately if allocation |
| 1416 | * would succeed. |
| 1417 | */ |
| 1418 | if (cc->order > 0 && last_migrated_pfn) { |
| 1419 | int cpu; |
| 1420 | unsigned long current_block_start = |
| 1421 | cc->migrate_pfn & ~((1UL << cc->order) - 1); |
| 1422 | |
| 1423 | if (last_migrated_pfn < current_block_start) { |
| 1424 | cpu = get_cpu(); |
| 1425 | lru_add_drain_cpu(cpu); |
| 1426 | drain_local_pages(zone); |
| 1427 | put_cpu(); |
| 1428 | /* No more flushing until we migrate again */ |
| 1429 | last_migrated_pfn = 0; |
| 1430 | } |
| 1431 | } |
| 1432 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1433 | } |
| 1434 | |
Mel Gorman | f9e35b3 | 2011-06-15 15:08:52 -0700 | [diff] [blame] | 1435 | out: |
Vlastimil Babka | 6bace09 | 2014-12-10 15:43:31 -0800 | [diff] [blame] | 1436 | /* |
| 1437 | * Release free pages and update where the free scanner should restart, |
| 1438 | * so we don't leave any returned pages behind in the next attempt. |
| 1439 | */ |
| 1440 | if (cc->nr_freepages > 0) { |
| 1441 | unsigned long free_pfn = release_freepages(&cc->freepages); |
| 1442 | |
| 1443 | cc->nr_freepages = 0; |
| 1444 | VM_BUG_ON(free_pfn == 0); |
| 1445 | /* The cached pfn is always the first in a pageblock */ |
| 1446 | free_pfn &= ~(pageblock_nr_pages-1); |
| 1447 | /* |
| 1448 | * Only go back, not forward. The cached pfn might have been |
| 1449 | * already reset to zone end in compact_finished() |
| 1450 | */ |
| 1451 | if (free_pfn > zone->compact_cached_free_pfn) |
| 1452 | zone->compact_cached_free_pfn = free_pfn; |
| 1453 | } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1454 | |
Joonsoo Kim | 16c4a09 | 2015-02-11 15:27:01 -0800 | [diff] [blame] | 1455 | trace_mm_compaction_end(start_pfn, cc->migrate_pfn, |
| 1456 | cc->free_pfn, end_pfn, sync, ret); |
Mel Gorman | 0eb927c | 2014-01-21 15:51:05 -0800 | [diff] [blame] | 1457 | |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 1458 | return ret; |
| 1459 | } |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1460 | |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1461 | static unsigned long compact_zone_order(struct zone *zone, int order, |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1462 | gfp_t gfp_mask, enum migrate_mode mode, int *contended, |
| 1463 | int alloc_flags, int classzone_idx) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1464 | { |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 1465 | unsigned long ret; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1466 | struct compact_control cc = { |
| 1467 | .nr_freepages = 0, |
| 1468 | .nr_migratepages = 0, |
| 1469 | .order = order, |
David Rientjes | 6d7ce55 | 2014-10-09 15:27:27 -0700 | [diff] [blame] | 1470 | .gfp_mask = gfp_mask, |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1471 | .zone = zone, |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1472 | .mode = mode, |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1473 | .alloc_flags = alloc_flags, |
| 1474 | .classzone_idx = classzone_idx, |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1475 | }; |
| 1476 | INIT_LIST_HEAD(&cc.freepages); |
| 1477 | INIT_LIST_HEAD(&cc.migratepages); |
| 1478 | |
Shaohua Li | e64c523 | 2012-10-08 16:32:27 -0700 | [diff] [blame] | 1479 | ret = compact_zone(zone, &cc); |
| 1480 | |
| 1481 | VM_BUG_ON(!list_empty(&cc.freepages)); |
| 1482 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
| 1483 | |
| 1484 | *contended = cc.contended; |
| 1485 | return ret; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1486 | } |
| 1487 | |
Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 1488 | int sysctl_extfrag_threshold = 500; |
| 1489 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1490 | /** |
| 1491 | * try_to_compact_pages - Direct compact to satisfy a high-order allocation |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1492 | * @gfp_mask: The GFP mask of the current allocation |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 1493 | * @order: The order of the current allocation |
| 1494 | * @alloc_flags: The allocation flags of the current allocation |
| 1495 | * @ac: The context of current allocation |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1496 | * @mode: The migration mode for async, sync light, or sync migration |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1497 | * @contended: Return value that determines if compaction was aborted due to |
| 1498 | * need_resched() or lock contention |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1499 | * |
| 1500 | * This is the main entry point for direct page compaction. |
| 1501 | */ |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 1502 | unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, |
| 1503 | int alloc_flags, const struct alloc_context *ac, |
| 1504 | enum migrate_mode mode, int *contended) |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1505 | { |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1506 | int may_enter_fs = gfp_mask & __GFP_FS; |
| 1507 | int may_perform_io = gfp_mask & __GFP_IO; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1508 | struct zoneref *z; |
| 1509 | struct zone *zone; |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1510 | int rc = COMPACT_DEFERRED; |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1511 | int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ |
| 1512 | |
| 1513 | *contended = COMPACT_CONTENDED_NONE; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1514 | |
Mel Gorman | 4ffb633 | 2012-10-08 16:29:09 -0700 | [diff] [blame] | 1515 | /* Check if the GFP flags allow compaction */ |
Andrea Arcangeli | c5a73c3 | 2011-01-13 15:47:11 -0800 | [diff] [blame] | 1516 | if (!order || !may_enter_fs || !may_perform_io) |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1517 | return COMPACT_SKIPPED; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1518 | |
Joonsoo Kim | 837d026 | 2015-02-11 15:27:06 -0800 | [diff] [blame] | 1519 | trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); |
| 1520 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1521 | /* Compact each zone in the list */ |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 1522 | for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, |
| 1523 | ac->nodemask) { |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1524 | int status; |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1525 | int zone_contended; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1526 | |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1527 | if (compaction_deferred(zone, order)) |
| 1528 | continue; |
| 1529 | |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1530 | status = compact_zone_order(zone, order, gfp_mask, mode, |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 1531 | &zone_contended, alloc_flags, |
| 1532 | ac->classzone_idx); |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1533 | rc = max(status, rc); |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1534 | /* |
| 1535 | * It takes at least one zone that wasn't lock contended |
| 1536 | * to clear all_zones_contended. |
| 1537 | */ |
| 1538 | all_zones_contended &= zone_contended; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1539 | |
Mel Gorman | 3e7d344 | 2011-01-13 15:45:56 -0800 | [diff] [blame] | 1540 | /* If a normal allocation would succeed, stop compacting */ |
Vlastimil Babka | ebff398 | 2014-12-10 15:43:22 -0800 | [diff] [blame] | 1541 | if (zone_watermark_ok(zone, order, low_wmark_pages(zone), |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 1542 | ac->classzone_idx, alloc_flags)) { |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1543 | /* |
| 1544 | * We think the allocation will succeed in this zone, |
| 1545 | * but it is not certain, hence the false. The caller |
| 1546 | * will repeat this with true if allocation indeed |
| 1547 | * succeeds in this zone. |
| 1548 | */ |
| 1549 | compaction_defer_reset(zone, order, false); |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1550 | /* |
| 1551 | * It is possible that async compaction aborted due to |
| 1552 | * need_resched() and the watermarks were ok thanks to |
| 1553 | * somebody else freeing memory. The allocation can |
| 1554 | * however still fail so we better signal the |
| 1555 | * need_resched() contention anyway (this will not |
| 1556 | * prevent the allocation attempt). |
| 1557 | */ |
| 1558 | if (zone_contended == COMPACT_CONTENDED_SCHED) |
| 1559 | *contended = COMPACT_CONTENDED_SCHED; |
| 1560 | |
| 1561 | goto break_loop; |
| 1562 | } |
| 1563 | |
Vlastimil Babka | f866979 | 2014-12-10 15:43:28 -0800 | [diff] [blame] | 1564 | if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) { |
Vlastimil Babka | 53853e2 | 2014-10-09 15:27:02 -0700 | [diff] [blame] | 1565 | /* |
| 1566 | * We think that allocation won't succeed in this zone |
| 1567 | * so we defer compaction there. If it ends up |
| 1568 | * succeeding after all, it will be reset. |
| 1569 | */ |
| 1570 | defer_compaction(zone, order); |
| 1571 | } |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1572 | |
| 1573 | /* |
| 1574 | * We might have stopped compacting due to need_resched() in |
| 1575 | * async compaction, or due to a fatal signal detected. In that |
| 1576 | * case do not try further zones and signal need_resched() |
| 1577 | * contention. |
| 1578 | */ |
| 1579 | if ((zone_contended == COMPACT_CONTENDED_SCHED) |
| 1580 | || fatal_signal_pending(current)) { |
| 1581 | *contended = COMPACT_CONTENDED_SCHED; |
| 1582 | goto break_loop; |
| 1583 | } |
| 1584 | |
| 1585 | continue; |
| 1586 | break_loop: |
| 1587 | /* |
| 1588 | * We might not have tried all the zones, so be conservative |
| 1589 | * and assume they are not all lock contended. |
| 1590 | */ |
| 1591 | all_zones_contended = 0; |
| 1592 | break; |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1593 | } |
| 1594 | |
Vlastimil Babka | 1f9efde | 2014-10-09 15:27:14 -0700 | [diff] [blame] | 1595 | /* |
| 1596 | * If at least one zone wasn't deferred or skipped, we report if all |
| 1597 | * zones that were tried were lock contended. |
| 1598 | */ |
| 1599 | if (rc > COMPACT_SKIPPED && all_zones_contended) |
| 1600 | *contended = COMPACT_CONTENDED_LOCK; |
| 1601 | |
Mel Gorman | 56de726 | 2010-05-24 14:32:30 -0700 | [diff] [blame] | 1602 | return rc; |
| 1603 | } |
| 1604 | |
| 1605 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1606 | /* Compact all zones within a node */ |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 1607 | static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1608 | { |
| 1609 | int zoneid; |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1610 | struct zone *zone; |
| 1611 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1612 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1613 | |
| 1614 | zone = &pgdat->node_zones[zoneid]; |
| 1615 | if (!populated_zone(zone)) |
| 1616 | continue; |
| 1617 | |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1618 | cc->nr_freepages = 0; |
| 1619 | cc->nr_migratepages = 0; |
| 1620 | cc->zone = zone; |
| 1621 | INIT_LIST_HEAD(&cc->freepages); |
| 1622 | INIT_LIST_HEAD(&cc->migratepages); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1623 | |
Gioh Kim | 195b0c6 | 2015-04-15 16:13:33 -0700 | [diff] [blame] | 1624 | /* |
| 1625 | * When called via /proc/sys/vm/compact_memory |
| 1626 | * this makes sure we compact the whole zone regardless of |
| 1627 | * cached scanner positions. |
| 1628 | */ |
| 1629 | if (cc->order == -1) |
| 1630 | __reset_isolation_suitable(zone); |
| 1631 | |
Dan Carpenter | aad6ec3 | 2012-03-21 16:33:54 -0700 | [diff] [blame] | 1632 | if (cc->order == -1 || !compaction_deferred(zone, cc->order)) |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1633 | compact_zone(zone, cc); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1634 | |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1635 | if (cc->order > 0) { |
Vlastimil Babka | de6c60a | 2014-01-21 15:51:07 -0800 | [diff] [blame] | 1636 | if (zone_watermark_ok(zone, cc->order, |
| 1637 | low_wmark_pages(zone), 0, 0)) |
| 1638 | compaction_defer_reset(zone, cc->order, false); |
Rik van Riel | aff6224 | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1639 | } |
| 1640 | |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1641 | VM_BUG_ON(!list_empty(&cc->freepages)); |
| 1642 | VM_BUG_ON(!list_empty(&cc->migratepages)); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1643 | } |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1644 | } |
| 1645 | |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 1646 | void compact_pgdat(pg_data_t *pgdat, int order) |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1647 | { |
| 1648 | struct compact_control cc = { |
| 1649 | .order = order, |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1650 | .mode = MIGRATE_ASYNC, |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1651 | }; |
| 1652 | |
Mel Gorman | 3a7200a | 2013-09-11 14:22:19 -0700 | [diff] [blame] | 1653 | if (!order) |
| 1654 | return; |
| 1655 | |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 1656 | __compact_pgdat(pgdat, &cc); |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1657 | } |
| 1658 | |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 1659 | static void compact_node(int nid) |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1660 | { |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1661 | struct compact_control cc = { |
| 1662 | .order = -1, |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 1663 | .mode = MIGRATE_SYNC, |
David Rientjes | 91ca918 | 2014-04-03 14:47:23 -0700 | [diff] [blame] | 1664 | .ignore_skip_hint = true, |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1665 | }; |
| 1666 | |
Andrew Morton | 7103f16 | 2013-02-22 16:32:33 -0800 | [diff] [blame] | 1667 | __compact_pgdat(NODE_DATA(nid), &cc); |
Rik van Riel | 7be62de | 2012-03-21 16:33:52 -0700 | [diff] [blame] | 1668 | } |
| 1669 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1670 | /* Compact all nodes in the system */ |
Jason Liu | 7964c06 | 2013-01-11 14:31:47 -0800 | [diff] [blame] | 1671 | static void compact_nodes(void) |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1672 | { |
| 1673 | int nid; |
| 1674 | |
Hugh Dickins | 8575ec2 | 2012-03-21 16:33:53 -0700 | [diff] [blame] | 1675 | /* Flush pending updates to the LRU lists */ |
| 1676 | lru_add_drain_all(); |
| 1677 | |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1678 | for_each_online_node(nid) |
| 1679 | compact_node(nid); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1680 | } |
| 1681 | |
| 1682 | /* The written value is actually unused, all memory is compacted */ |
| 1683 | int sysctl_compact_memory; |
| 1684 | |
| 1685 | /* This is the entry point for compacting all nodes via /proc/sys/vm */ |
| 1686 | int sysctl_compaction_handler(struct ctl_table *table, int write, |
| 1687 | void __user *buffer, size_t *length, loff_t *ppos) |
| 1688 | { |
| 1689 | if (write) |
Jason Liu | 7964c06 | 2013-01-11 14:31:47 -0800 | [diff] [blame] | 1690 | compact_nodes(); |
Mel Gorman | 76ab0f5 | 2010-05-24 14:32:28 -0700 | [diff] [blame] | 1691 | |
| 1692 | return 0; |
| 1693 | } |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1694 | |
Mel Gorman | 5e77190 | 2010-05-24 14:32:31 -0700 | [diff] [blame] | 1695 | int sysctl_extfrag_handler(struct ctl_table *table, int write, |
| 1696 | void __user *buffer, size_t *length, loff_t *ppos) |
| 1697 | { |
| 1698 | proc_dointvec_minmax(table, write, buffer, length, ppos); |
| 1699 | |
| 1700 | return 0; |
| 1701 | } |
| 1702 | |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1703 | #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
Rashika Kheria | 74e77fb | 2014-04-03 14:48:01 -0700 | [diff] [blame] | 1704 | static ssize_t sysfs_compact_node(struct device *dev, |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1705 | struct device_attribute *attr, |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1706 | const char *buf, size_t count) |
| 1707 | { |
Hugh Dickins | 8575ec2 | 2012-03-21 16:33:53 -0700 | [diff] [blame] | 1708 | int nid = dev->id; |
| 1709 | |
| 1710 | if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { |
| 1711 | /* Flush pending updates to the LRU lists */ |
| 1712 | lru_add_drain_all(); |
| 1713 | |
| 1714 | compact_node(nid); |
| 1715 | } |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1716 | |
| 1717 | return count; |
| 1718 | } |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1719 | static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1720 | |
| 1721 | int compaction_register_node(struct node *node) |
| 1722 | { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1723 | return device_create_file(&node->dev, &dev_attr_compact); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1724 | } |
| 1725 | |
| 1726 | void compaction_unregister_node(struct node *node) |
| 1727 | { |
Kay Sievers | 10fbcf4 | 2011-12-21 14:48:43 -0800 | [diff] [blame] | 1728 | return device_remove_file(&node->dev, &dev_attr_compact); |
Mel Gorman | ed4a6d7 | 2010-05-24 14:32:29 -0700 | [diff] [blame] | 1729 | } |
| 1730 | #endif /* CONFIG_SYSFS && CONFIG_NUMA */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 1731 | |
| 1732 | #endif /* CONFIG_COMPACTION */ |