Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/mm/page_isolation.c |
| 4 | */ |
| 5 | |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 6 | #include <linux/mm.h> |
| 7 | #include <linux/page-isolation.h> |
| 8 | #include <linux/pageblock-flags.h> |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 9 | #include <linux/memory.h> |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 10 | #include <linux/hugetlb.h> |
Joonsoo Kim | 83358ec | 2016-07-26 15:23:43 -0700 | [diff] [blame] | 11 | #include <linux/page_owner.h> |
Michal Hocko | 8b91323 | 2017-07-10 15:48:47 -0700 | [diff] [blame] | 12 | #include <linux/migrate.h> |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 13 | #include "internal.h" |
| 14 | |
Joonsoo Kim | 0f0848e | 2016-01-14 15:18:42 -0800 | [diff] [blame] | 15 | #define CREATE_TRACE_POINTS |
| 16 | #include <trace/events/page_isolation.h> |
| 17 | |
Michal Hocko | d381c54 | 2018-12-28 00:33:56 -0800 | [diff] [blame] | 18 | static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 19 | { |
David Hildenbrand | 1c31cb4 | 2020-10-13 16:55:28 -0700 | [diff] [blame] | 20 | struct zone *zone = page_zone(page); |
| 21 | struct page *unmovable; |
David Hildenbrand | 3f9903b | 2020-01-30 22:14:01 -0800 | [diff] [blame] | 22 | unsigned long flags; |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 23 | |
| 24 | spin_lock_irqsave(&zone->lock, flags); |
| 25 | |
Mike Kravetz | 2c7452a | 2018-04-05 16:25:26 -0700 | [diff] [blame] | 26 | /* |
| 27 | * We assume the caller intended to SET migrate type to isolate. |
| 28 | * If it is already set, then someone else must have raced and |
David Hildenbrand | 51030a5 | 2020-10-13 16:55:21 -0700 | [diff] [blame] | 29 | * set it before us. |
Mike Kravetz | 2c7452a | 2018-04-05 16:25:26 -0700 | [diff] [blame] | 30 | */ |
David Hildenbrand | 51030a5 | 2020-10-13 16:55:21 -0700 | [diff] [blame] | 31 | if (is_migrate_isolate_page(page)) { |
| 32 | spin_unlock_irqrestore(&zone->lock, flags); |
| 33 | return -EBUSY; |
| 34 | } |
Mike Kravetz | 2c7452a | 2018-04-05 16:25:26 -0700 | [diff] [blame] | 35 | |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 36 | /* |
| 37 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. |
| 38 | * We just check MOVABLE pages. |
| 39 | */ |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 40 | unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags); |
| 41 | if (!unmovable) { |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 42 | unsigned long nr_pages; |
Michal Hocko | 4da2ce2 | 2017-11-15 17:33:26 -0800 | [diff] [blame] | 43 | int mt = get_pageblock_migratetype(page); |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 44 | |
Bartlomiej Zolnierkiewicz | a458431 | 2013-01-04 15:35:08 -0800 | [diff] [blame] | 45 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 46 | zone->nr_isolate_pageblock++; |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 47 | nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, |
| 48 | NULL); |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 49 | |
Michal Hocko | 4da2ce2 | 2017-11-15 17:33:26 -0800 | [diff] [blame] | 50 | __mod_zone_freepage_state(zone, -nr_pages, mt); |
David Hildenbrand | 1c31cb4 | 2020-10-13 16:55:28 -0700 | [diff] [blame] | 51 | spin_unlock_irqrestore(&zone->lock, flags); |
David Hildenbrand | 1c31cb4 | 2020-10-13 16:55:28 -0700 | [diff] [blame] | 52 | return 0; |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | spin_unlock_irqrestore(&zone->lock, flags); |
David Hildenbrand | 1c31cb4 | 2020-10-13 16:55:28 -0700 | [diff] [blame] | 56 | if (isol_flags & REPORT_FAILURE) { |
David Hildenbrand | 48381d7 | 2020-10-13 16:55:24 -0700 | [diff] [blame] | 57 | /* |
| 58 | * printk() with zone->lock held will likely trigger a |
| 59 | * lockdep splat, so defer it here. |
| 60 | */ |
| 61 | dump_page(unmovable, "unmovable page"); |
Qian Cai | 3d680bd | 2020-01-30 22:15:01 -0800 | [diff] [blame] | 62 | } |
Qian Cai | 4a55c04 | 2020-01-30 22:14:57 -0800 | [diff] [blame] | 63 | |
David Hildenbrand | 1c31cb4 | 2020-10-13 16:55:28 -0700 | [diff] [blame] | 64 | return -EBUSY; |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 65 | } |
| 66 | |
Naoya Horiguchi | c5b4e1b | 2015-09-08 15:02:09 -0700 | [diff] [blame] | 67 | static void unset_migratetype_isolate(struct page *page, unsigned migratetype) |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 68 | { |
| 69 | struct zone *zone; |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 70 | unsigned long flags, nr_pages; |
Joonsoo Kim | e3a2713 | 2016-07-26 15:24:01 -0700 | [diff] [blame] | 71 | bool isolated_page = false; |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 72 | unsigned int order; |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 73 | unsigned long pfn, buddy_pfn; |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 74 | struct page *buddy; |
Bartlomiej Zolnierkiewicz | 2139cbe | 2012-10-08 16:32:00 -0700 | [diff] [blame] | 75 | |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 76 | zone = page_zone(page); |
| 77 | spin_lock_irqsave(&zone->lock, flags); |
Xishi Qiu | bbf9ce9 | 2017-05-03 14:52:55 -0700 | [diff] [blame] | 78 | if (!is_migrate_isolate_page(page)) |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 79 | goto out; |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 80 | |
| 81 | /* |
| 82 | * Because freepage with more than pageblock_order on isolated |
| 83 | * pageblock is restricted to merge due to freepage counting problem, |
| 84 | * it is possible that there is free buddy page. |
| 85 | * move_freepages_block() doesn't care of merge so we need other |
| 86 | * approach in order to merge them. Isolation and free will make |
| 87 | * these pages to be merged. |
| 88 | */ |
| 89 | if (PageBuddy(page)) { |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 90 | order = buddy_order(page); |
Muchun Song | 2484be0 | 2020-12-14 19:12:27 -0800 | [diff] [blame] | 91 | if (order >= pageblock_order && order < MAX_ORDER - 1) { |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 92 | pfn = page_to_pfn(page); |
| 93 | buddy_pfn = __find_buddy_pfn(pfn, order); |
| 94 | buddy = page + (buddy_pfn - pfn); |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 95 | |
Mike Rapoport | 859a85d | 2021-09-07 19:54:52 -0700 | [diff] [blame] | 96 | if (!is_migrate_isolate_page(buddy)) { |
Miaohe Lin | a500cb3 | 2021-11-05 13:42:19 -0700 | [diff] [blame] | 97 | isolated_page = !!__isolate_free_page(page, order); |
| 98 | /* |
| 99 | * Isolating a free page in an isolated pageblock |
| 100 | * is expected to always work as watermarks don't |
| 101 | * apply here. |
| 102 | */ |
| 103 | VM_WARN_ON(!isolated_page); |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 104 | } |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * If we isolate freepage with more than pageblock_order, there |
| 110 | * should be no freepage in the range, so we could avoid costly |
| 111 | * pageblock scanning for freepage moving. |
David Hildenbrand | 293ffa5 | 2020-10-15 20:09:30 -0700 | [diff] [blame] | 112 | * |
| 113 | * We didn't actually touch any of the isolated pages, so place them |
| 114 | * to the tail of the freelist. This is an optimization for memory |
| 115 | * onlining - just onlined memory won't immediately be considered for |
| 116 | * allocation. |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 117 | */ |
Chen Wandun | a85468b | 2022-02-03 20:49:06 -0800 | [diff] [blame] | 118 | if (!isolated_page) { |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 119 | nr_pages = move_freepages_block(zone, page, migratetype, NULL); |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 120 | __mod_zone_freepage_state(zone, nr_pages, migratetype); |
| 121 | } |
Bartlomiej Zolnierkiewicz | a458431 | 2013-01-04 15:35:08 -0800 | [diff] [blame] | 122 | set_pageblock_migratetype(page, migratetype); |
Alexander Duyck | 624f58d | 2020-04-06 20:04:53 -0700 | [diff] [blame] | 123 | if (isolated_page) |
| 124 | __putback_isolated_page(page, order, migratetype); |
Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 125 | zone->nr_isolate_pageblock--; |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 126 | out: |
| 127 | spin_unlock_irqrestore(&zone->lock, flags); |
| 128 | } |
| 129 | |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 130 | static inline struct page * |
| 131 | __first_valid_page(unsigned long pfn, unsigned long nr_pages) |
| 132 | { |
| 133 | int i; |
Michal Hocko | 2ce1364 | 2017-07-06 15:38:04 -0700 | [diff] [blame] | 134 | |
| 135 | for (i = 0; i < nr_pages; i++) { |
| 136 | struct page *page; |
| 137 | |
Michal Hocko | 2ce1364 | 2017-07-06 15:38:04 -0700 | [diff] [blame] | 138 | page = pfn_to_online_page(pfn + i); |
| 139 | if (!page) |
| 140 | continue; |
| 141 | return page; |
| 142 | } |
| 143 | return NULL; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 144 | } |
| 145 | |
Qian Cai | 9b7ea46 | 2019-03-28 20:43:34 -0700 | [diff] [blame] | 146 | /** |
| 147 | * start_isolate_page_range() - make page-allocation-type of range of pages to |
| 148 | * be MIGRATE_ISOLATE. |
| 149 | * @start_pfn: The lower PFN of the range to be isolated. |
| 150 | * @end_pfn: The upper PFN of the range to be isolated. |
| 151 | * start_pfn/end_pfn must be aligned to pageblock_order. |
| 152 | * @migratetype: Migrate type to set in error recovery. |
| 153 | * @flags: The following flags are allowed (they can be combined in |
| 154 | * a bit mask) |
David Hildenbrand | 756d25b | 2019-11-30 17:54:07 -0800 | [diff] [blame] | 155 | * MEMORY_OFFLINE - isolate to offline (!allocate) memory |
| 156 | * e.g., skip over PageHWPoison() pages |
David Hildenbrand | aa21879 | 2020-05-07 16:01:30 +0200 | [diff] [blame] | 157 | * and PageOffline() pages. |
Qian Cai | 9b7ea46 | 2019-03-28 20:43:34 -0700 | [diff] [blame] | 158 | * REPORT_FAILURE - report details about the failure to |
| 159 | * isolate the range |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 160 | * |
| 161 | * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in |
| 162 | * the range will never be allocated. Any free pages and pages freed in the |
Qian Cai | 9b7ea46 | 2019-03-28 20:43:34 -0700 | [diff] [blame] | 163 | * future will not be allocated again. If specified range includes migrate types |
| 164 | * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all |
| 165 | * pages in the range finally, the caller have to free all pages in the range. |
| 166 | * test_page_isolated() can be used for test it. |
Mike Kravetz | 2c7452a | 2018-04-05 16:25:26 -0700 | [diff] [blame] | 167 | * |
| 168 | * There is no high level synchronization mechanism that prevents two threads |
Qian Cai | 9b7ea46 | 2019-03-28 20:43:34 -0700 | [diff] [blame] | 169 | * from trying to isolate overlapping ranges. If this happens, one thread |
Mike Kravetz | 2c7452a | 2018-04-05 16:25:26 -0700 | [diff] [blame] | 170 | * will notice pageblocks in the overlapping range already set to isolate. |
| 171 | * This happens in set_migratetype_isolate, and set_migratetype_isolate |
Qian Cai | 9b7ea46 | 2019-03-28 20:43:34 -0700 | [diff] [blame] | 172 | * returns an error. We then clean up by restoring the migration type on |
| 173 | * pageblocks we may have modified and return -EBUSY to caller. This |
Mike Kravetz | 2c7452a | 2018-04-05 16:25:26 -0700 | [diff] [blame] | 174 | * prevents two threads from simultaneously working on overlapping ranges. |
Qian Cai | 9b7ea46 | 2019-03-28 20:43:34 -0700 | [diff] [blame] | 175 | * |
Pavel Tatashin | 9683182 | 2020-09-18 21:20:31 -0700 | [diff] [blame] | 176 | * Please note that there is no strong synchronization with the page allocator |
| 177 | * either. Pages might be freed while their page blocks are marked ISOLATED. |
Vlastimil Babka | 7612921 | 2020-12-14 19:10:56 -0800 | [diff] [blame] | 178 | * A call to drain_all_pages() after isolation can flush most of them. However |
| 179 | * in some cases pages might still end up on pcp lists and that would allow |
Pavel Tatashin | 9683182 | 2020-09-18 21:20:31 -0700 | [diff] [blame] | 180 | * for their allocation even when they are in fact isolated already. Depending |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 181 | * on how strong of a guarantee the caller needs, zone_pcp_disable/enable() |
| 182 | * might be used to flush and disable pcplist before isolation and enable after |
| 183 | * unisolation. |
Pavel Tatashin | 9683182 | 2020-09-18 21:20:31 -0700 | [diff] [blame] | 184 | * |
David Hildenbrand | 3fa0c7c | 2020-10-15 20:08:07 -0700 | [diff] [blame] | 185 | * Return: 0 on success and -EBUSY if any part of range cannot be isolated. |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 186 | */ |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 187 | int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
Michal Hocko | d381c54 | 2018-12-28 00:33:56 -0800 | [diff] [blame] | 188 | unsigned migratetype, int flags) |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 189 | { |
| 190 | unsigned long pfn; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 191 | struct page *page; |
| 192 | |
Naoya Horiguchi | fec174d | 2016-01-14 15:22:13 -0800 | [diff] [blame] | 193 | BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); |
| 194 | BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 195 | |
| 196 | for (pfn = start_pfn; |
| 197 | pfn < end_pfn; |
| 198 | pfn += pageblock_nr_pages) { |
| 199 | page = __first_valid_page(pfn, pageblock_nr_pages); |
Miaohe Lin | e1d8c96 | 2021-11-05 13:42:16 -0700 | [diff] [blame] | 200 | if (page && set_migratetype_isolate(page, migratetype, flags)) { |
| 201 | undo_isolate_page_range(start_pfn, pfn, migratetype); |
| 202 | return -EBUSY; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 203 | } |
| 204 | } |
David Hildenbrand | 3fa0c7c | 2020-10-15 20:08:07 -0700 | [diff] [blame] | 205 | return 0; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | /* |
| 209 | * Make isolated pages available again. |
| 210 | */ |
Pingfan Liu | 1fcf0a5 | 2019-07-11 20:54:49 -0700 | [diff] [blame] | 211 | void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 212 | unsigned migratetype) |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 213 | { |
| 214 | unsigned long pfn; |
| 215 | struct page *page; |
Wang Xiaoqiang | 6f8d2b8 | 2016-01-15 16:57:13 -0800 | [diff] [blame] | 216 | |
| 217 | BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); |
| 218 | BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); |
| 219 | |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 220 | for (pfn = start_pfn; |
| 221 | pfn < end_pfn; |
| 222 | pfn += pageblock_nr_pages) { |
| 223 | page = __first_valid_page(pfn, pageblock_nr_pages); |
Xishi Qiu | bbf9ce9 | 2017-05-03 14:52:55 -0700 | [diff] [blame] | 224 | if (!page || !is_migrate_isolate_page(page)) |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 225 | continue; |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 226 | unset_migratetype_isolate(page, migratetype); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 227 | } |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 228 | } |
| 229 | /* |
| 230 | * Test all pages in the range is free(means isolated) or not. |
| 231 | * all pages in [start_pfn...end_pfn) must be in the same zone. |
| 232 | * zone->lock must be held before call this. |
| 233 | * |
Neil Zhang | ec3b688 | 2016-04-01 14:31:37 -0700 | [diff] [blame] | 234 | * Returns the last tested pfn. |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 235 | */ |
Joonsoo Kim | fea85cf | 2016-01-14 15:18:39 -0800 | [diff] [blame] | 236 | static unsigned long |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 237 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, |
David Hildenbrand | 756d25b | 2019-11-30 17:54:07 -0800 | [diff] [blame] | 238 | int flags) |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 239 | { |
| 240 | struct page *page; |
| 241 | |
| 242 | while (pfn < end_pfn) { |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 243 | page = pfn_to_page(pfn); |
Vlastimil Babka | aa016d1 | 2015-09-08 15:01:22 -0700 | [diff] [blame] | 244 | if (PageBuddy(page)) |
Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 245 | /* |
Vlastimil Babka | aa016d1 | 2015-09-08 15:01:22 -0700 | [diff] [blame] | 246 | * If the page is on a free list, it has to be on |
| 247 | * the correct MIGRATE_ISOLATE freelist. There is no |
| 248 | * simple way to verify that as VM_BUG_ON(), though. |
Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 249 | */ |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 250 | pfn += 1 << buddy_order(page); |
David Hildenbrand | 756d25b | 2019-11-30 17:54:07 -0800 | [diff] [blame] | 251 | else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) |
Vlastimil Babka | aa016d1 | 2015-09-08 15:01:22 -0700 | [diff] [blame] | 252 | /* A HWPoisoned page cannot be also PageBuddy */ |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 253 | pfn++; |
David Hildenbrand | aa21879 | 2020-05-07 16:01:30 +0200 | [diff] [blame] | 254 | else if ((flags & MEMORY_OFFLINE) && PageOffline(page) && |
| 255 | !page_count(page)) |
| 256 | /* |
| 257 | * The responsible driver agreed to skip PageOffline() |
| 258 | * pages when offlining memory by dropping its |
| 259 | * reference in MEM_GOING_OFFLINE. |
| 260 | */ |
| 261 | pfn++; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 262 | else |
| 263 | break; |
| 264 | } |
Joonsoo Kim | fea85cf | 2016-01-14 15:18:39 -0800 | [diff] [blame] | 265 | |
| 266 | return pfn; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 267 | } |
| 268 | |
Joonsoo Kim | b9eb631 | 2016-05-19 17:12:06 -0700 | [diff] [blame] | 269 | /* Caller should ensure that requested range is in a single zone */ |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 270 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, |
David Hildenbrand | 756d25b | 2019-11-30 17:54:07 -0800 | [diff] [blame] | 271 | int isol_flags) |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 272 | { |
Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 273 | unsigned long pfn, flags; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 274 | struct page *page; |
Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 275 | struct zone *zone; |
George G. Davis | 1d09510 | 2021-09-02 14:58:16 -0700 | [diff] [blame] | 276 | int ret; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 277 | |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 278 | /* |
Tang Chen | 85dbe70 | 2013-06-20 18:10:19 +0800 | [diff] [blame] | 279 | * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages |
| 280 | * are not aligned to pageblock_nr_pages. |
| 281 | * Then we just check migratetype first. |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 282 | */ |
| 283 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
| 284 | page = __first_valid_page(pfn, pageblock_nr_pages); |
Xishi Qiu | bbf9ce9 | 2017-05-03 14:52:55 -0700 | [diff] [blame] | 285 | if (page && !is_migrate_isolate_page(page)) |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 286 | break; |
| 287 | } |
Gerald Schaefer | a70dcb9 | 2008-11-06 12:53:36 -0800 | [diff] [blame] | 288 | page = __first_valid_page(start_pfn, end_pfn - start_pfn); |
George G. Davis | 1d09510 | 2021-09-02 14:58:16 -0700 | [diff] [blame] | 289 | if ((pfn < end_pfn) || !page) { |
| 290 | ret = -EBUSY; |
| 291 | goto out; |
| 292 | } |
| 293 | |
Tang Chen | 85dbe70 | 2013-06-20 18:10:19 +0800 | [diff] [blame] | 294 | /* Check all pages are free or marked as ISOLATED */ |
Gerald Schaefer | a70dcb9 | 2008-11-06 12:53:36 -0800 | [diff] [blame] | 295 | zone = page_zone(page); |
Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 296 | spin_lock_irqsave(&zone->lock, flags); |
David Hildenbrand | 756d25b | 2019-11-30 17:54:07 -0800 | [diff] [blame] | 297 | pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags); |
Gerald Schaefer | 6c1b7f6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 298 | spin_unlock_irqrestore(&zone->lock, flags); |
Joonsoo Kim | fea85cf | 2016-01-14 15:18:39 -0800 | [diff] [blame] | 299 | |
George G. Davis | 1d09510 | 2021-09-02 14:58:16 -0700 | [diff] [blame] | 300 | ret = pfn < end_pfn ? -EBUSY : 0; |
| 301 | |
| 302 | out: |
Joonsoo Kim | 0f0848e | 2016-01-14 15:18:42 -0800 | [diff] [blame] | 303 | trace_test_pages_isolated(start_pfn, end_pfn, pfn); |
| 304 | |
George G. Davis | 1d09510 | 2021-09-02 14:58:16 -0700 | [diff] [blame] | 305 | return ret; |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 306 | } |