blob: 04ee1663cdbe83732640d0b2692333c3256b4d2e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07002/*
3 * linux/mm/page_isolation.c
4 */
5
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07006#include <linux/mm.h>
7#include <linux/page-isolation.h>
8#include <linux/pageblock-flags.h>
Minchan Kimee6f5092012-07-31 16:43:50 -07009#include <linux/memory.h>
Naoya Horiguchic8721bb2013-09-11 14:22:09 -070010#include <linux/hugetlb.h>
Joonsoo Kim83358ec2016-07-26 15:23:43 -070011#include <linux/page_owner.h>
Michal Hocko8b913232017-07-10 15:48:47 -070012#include <linux/migrate.h>
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070013#include "internal.h"
14
Joonsoo Kim0f0848e2016-01-14 15:18:42 -080015#define CREATE_TRACE_POINTS
16#include <trace/events/page_isolation.h>
17
Michal Hockod381c542018-12-28 00:33:56 -080018static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
Minchan Kimee6f5092012-07-31 16:43:50 -070019{
20 struct zone *zone;
21 unsigned long flags, pfn;
22 struct memory_isolate_notify arg;
23 int notifier_ret;
24 int ret = -EBUSY;
25
26 zone = page_zone(page);
27
28 spin_lock_irqsave(&zone->lock, flags);
29
Mike Kravetz2c7452a2018-04-05 16:25:26 -070030 /*
31 * We assume the caller intended to SET migrate type to isolate.
32 * If it is already set, then someone else must have raced and
33 * set it before us. Return -EBUSY
34 */
35 if (is_migrate_isolate_page(page))
36 goto out;
37
Minchan Kimee6f5092012-07-31 16:43:50 -070038 pfn = page_to_pfn(page);
39 arg.start_pfn = pfn;
40 arg.nr_pages = pageblock_nr_pages;
41 arg.pages_found = 0;
42
43 /*
44 * It may be possible to isolate a pageblock even if the
45 * migratetype is not MIGRATE_MOVABLE. The memory isolation
46 * notifier chain is used by balloon drivers to return the
47 * number of pages in a range that are held by the balloon
48 * driver to shrink memory. If all the pages are accounted for
49 * by balloons, are free, or on the LRU, isolation can continue.
50 * Later, for example, when memory hotplug notifier runs, these
51 * pages reported as "can be isolated" should be isolated(freed)
52 * by the balloon driver through the memory notifier chain.
53 */
54 notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
55 notifier_ret = notifier_to_errno(notifier_ret);
56 if (notifier_ret)
57 goto out;
58 /*
59 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
60 * We just check MOVABLE pages.
61 */
Qian Caif5777bc2019-03-28 20:44:21 -070062 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
63 isol_flags))
Minchan Kimee6f5092012-07-31 16:43:50 -070064 ret = 0;
65
66 /*
Yisheng Xieac34dcd2016-10-07 17:01:16 -070067 * immobile means "not-on-lru" pages. If immobile is larger than
Minchan Kimee6f5092012-07-31 16:43:50 -070068 * removable-by-driver pages reported by notifier, we'll fail.
69 */
70
71out:
72 if (!ret) {
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070073 unsigned long nr_pages;
Michal Hocko4da2ce22017-11-15 17:33:26 -080074 int mt = get_pageblock_migratetype(page);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070075
Bartlomiej Zolnierkiewicza4584312013-01-04 15:35:08 -080076 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
Joonsoo Kimad53f922014-11-13 15:19:11 -080077 zone->nr_isolate_pageblock++;
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -070078 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
79 NULL);
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070080
Michal Hocko4da2ce22017-11-15 17:33:26 -080081 __mod_zone_freepage_state(zone, -nr_pages, mt);
Minchan Kimee6f5092012-07-31 16:43:50 -070082 }
83
84 spin_unlock_irqrestore(&zone->lock, flags);
85 if (!ret)
Vlastimil Babkaec25af82014-12-10 15:43:04 -080086 drain_all_pages(zone);
Minchan Kimee6f5092012-07-31 16:43:50 -070087 return ret;
88}
89
Naoya Horiguchic5b4e1b2015-09-08 15:02:09 -070090static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
Minchan Kimee6f5092012-07-31 16:43:50 -070091{
92 struct zone *zone;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070093 unsigned long flags, nr_pages;
Joonsoo Kime3a27132016-07-26 15:24:01 -070094 bool isolated_page = false;
Joonsoo Kim3c605092014-11-13 15:19:21 -080095 unsigned int order;
Vlastimil Babka76741e72017-02-22 15:41:48 -080096 unsigned long pfn, buddy_pfn;
Joonsoo Kim3c605092014-11-13 15:19:21 -080097 struct page *buddy;
Bartlomiej Zolnierkiewicz2139cbe2012-10-08 16:32:00 -070098
Minchan Kimee6f5092012-07-31 16:43:50 -070099 zone = page_zone(page);
100 spin_lock_irqsave(&zone->lock, flags);
Xishi Qiubbf9ce92017-05-03 14:52:55 -0700101 if (!is_migrate_isolate_page(page))
Minchan Kimee6f5092012-07-31 16:43:50 -0700102 goto out;
Joonsoo Kim3c605092014-11-13 15:19:21 -0800103
104 /*
105 * Because freepage with more than pageblock_order on isolated
106 * pageblock is restricted to merge due to freepage counting problem,
107 * it is possible that there is free buddy page.
108 * move_freepages_block() doesn't care of merge so we need other
109 * approach in order to merge them. Isolation and free will make
110 * these pages to be merged.
111 */
112 if (PageBuddy(page)) {
113 order = page_order(page);
114 if (order >= pageblock_order) {
Vlastimil Babka76741e72017-02-22 15:41:48 -0800115 pfn = page_to_pfn(page);
116 buddy_pfn = __find_buddy_pfn(pfn, order);
117 buddy = page + (buddy_pfn - pfn);
Joonsoo Kim3c605092014-11-13 15:19:21 -0800118
Vlastimil Babka13ad59d2017-02-22 15:41:51 -0800119 if (pfn_valid_within(buddy_pfn) &&
Hui Zhu1ae70132015-05-14 15:17:04 -0700120 !is_migrate_isolate_page(buddy)) {
Joonsoo Kim3c605092014-11-13 15:19:21 -0800121 __isolate_free_page(page, order);
Joonsoo Kime3a27132016-07-26 15:24:01 -0700122 isolated_page = true;
Joonsoo Kim3c605092014-11-13 15:19:21 -0800123 }
124 }
125 }
126
127 /*
128 * If we isolate freepage with more than pageblock_order, there
129 * should be no freepage in the range, so we could avoid costly
130 * pageblock scanning for freepage moving.
131 */
132 if (!isolated_page) {
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -0700133 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
Joonsoo Kim3c605092014-11-13 15:19:21 -0800134 __mod_zone_freepage_state(zone, nr_pages, migratetype);
135 }
Bartlomiej Zolnierkiewicza4584312013-01-04 15:35:08 -0800136 set_pageblock_migratetype(page, migratetype);
Joonsoo Kimad53f922014-11-13 15:19:11 -0800137 zone->nr_isolate_pageblock--;
Minchan Kimee6f5092012-07-31 16:43:50 -0700138out:
139 spin_unlock_irqrestore(&zone->lock, flags);
Joonsoo Kim83358ec2016-07-26 15:23:43 -0700140 if (isolated_page) {
Joonsoo Kim46f24fd2016-07-26 15:23:58 -0700141 post_alloc_hook(page, order, __GFP_MOVABLE);
Joonsoo Kime3a27132016-07-26 15:24:01 -0700142 __free_pages(page, order);
Joonsoo Kim83358ec2016-07-26 15:23:43 -0700143 }
Minchan Kimee6f5092012-07-31 16:43:50 -0700144}
145
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700146static inline struct page *
147__first_valid_page(unsigned long pfn, unsigned long nr_pages)
148{
149 int i;
Michal Hocko2ce13642017-07-06 15:38:04 -0700150
151 for (i = 0; i < nr_pages; i++) {
152 struct page *page;
153
Michal Hocko2ce13642017-07-06 15:38:04 -0700154 page = pfn_to_online_page(pfn + i);
155 if (!page)
156 continue;
157 return page;
158 }
159 return NULL;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700160}
161
Qian Cai9b7ea462019-03-28 20:43:34 -0700162/**
163 * start_isolate_page_range() - make page-allocation-type of range of pages to
164 * be MIGRATE_ISOLATE.
165 * @start_pfn: The lower PFN of the range to be isolated.
166 * @end_pfn: The upper PFN of the range to be isolated.
167 * start_pfn/end_pfn must be aligned to pageblock_order.
168 * @migratetype: Migrate type to set in error recovery.
169 * @flags: The following flags are allowed (they can be combined in
170 * a bit mask)
David Hildenbrand756d25b2019-11-30 17:54:07 -0800171 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
172 * e.g., skip over PageHWPoison() pages
Qian Cai9b7ea462019-03-28 20:43:34 -0700173 * REPORT_FAILURE - report details about the failure to
174 * isolate the range
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700175 *
176 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
177 * the range will never be allocated. Any free pages and pages freed in the
Qian Cai9b7ea462019-03-28 20:43:34 -0700178 * future will not be allocated again. If specified range includes migrate types
179 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
180 * pages in the range finally, the caller have to free all pages in the range.
181 * test_page_isolated() can be used for test it.
Mike Kravetz2c7452a2018-04-05 16:25:26 -0700182 *
183 * There is no high level synchronization mechanism that prevents two threads
Qian Cai9b7ea462019-03-28 20:43:34 -0700184 * from trying to isolate overlapping ranges. If this happens, one thread
Mike Kravetz2c7452a2018-04-05 16:25:26 -0700185 * will notice pageblocks in the overlapping range already set to isolate.
186 * This happens in set_migratetype_isolate, and set_migratetype_isolate
Qian Cai9b7ea462019-03-28 20:43:34 -0700187 * returns an error. We then clean up by restoring the migration type on
188 * pageblocks we may have modified and return -EBUSY to caller. This
Mike Kravetz2c7452a2018-04-05 16:25:26 -0700189 * prevents two threads from simultaneously working on overlapping ranges.
Qian Cai9b7ea462019-03-28 20:43:34 -0700190 *
191 * Return: the number of isolated pageblocks on success and -EBUSY if any part
192 * of range cannot be isolated.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700193 */
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200194int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Michal Hockod381c542018-12-28 00:33:56 -0800195 unsigned migratetype, int flags)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700196{
197 unsigned long pfn;
198 unsigned long undo_pfn;
199 struct page *page;
Qian Cai9b7ea462019-03-28 20:43:34 -0700200 int nr_isolate_pageblock = 0;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700201
Naoya Horiguchifec174d2016-01-14 15:22:13 -0800202 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
203 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700204
205 for (pfn = start_pfn;
206 pfn < end_pfn;
207 pfn += pageblock_nr_pages) {
208 page = __first_valid_page(pfn, pageblock_nr_pages);
Qian Cai9b7ea462019-03-28 20:43:34 -0700209 if (page) {
210 if (set_migratetype_isolate(page, migratetype, flags)) {
211 undo_pfn = pfn;
212 goto undo;
213 }
214 nr_isolate_pageblock++;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700215 }
216 }
Qian Cai9b7ea462019-03-28 20:43:34 -0700217 return nr_isolate_pageblock;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700218undo:
219 for (pfn = start_pfn;
KAMEZAWA Hiroyukidbc0e4c2007-11-14 16:59:12 -0800220 pfn < undo_pfn;
Michal Hocko2ce13642017-07-06 15:38:04 -0700221 pfn += pageblock_nr_pages) {
222 struct page *page = pfn_to_online_page(pfn);
223 if (!page)
224 continue;
225 unset_migratetype_isolate(page, migratetype);
226 }
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700227
228 return -EBUSY;
229}
230
231/*
232 * Make isolated pages available again.
233 */
Pingfan Liu1fcf0a52019-07-11 20:54:49 -0700234void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200235 unsigned migratetype)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700236{
237 unsigned long pfn;
238 struct page *page;
Wang Xiaoqiang6f8d2b82016-01-15 16:57:13 -0800239
240 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
241 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
242
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700243 for (pfn = start_pfn;
244 pfn < end_pfn;
245 pfn += pageblock_nr_pages) {
246 page = __first_valid_page(pfn, pageblock_nr_pages);
Xishi Qiubbf9ce92017-05-03 14:52:55 -0700247 if (!page || !is_migrate_isolate_page(page))
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700248 continue;
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +0200249 unset_migratetype_isolate(page, migratetype);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700250 }
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700251}
252/*
253 * Test all pages in the range is free(means isolated) or not.
254 * all pages in [start_pfn...end_pfn) must be in the same zone.
255 * zone->lock must be held before call this.
256 *
Neil Zhangec3b6882016-04-01 14:31:37 -0700257 * Returns the last tested pfn.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700258 */
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800259static unsigned long
Wen Congyangb023f462012-12-11 16:00:45 -0800260__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
David Hildenbrand756d25b2019-11-30 17:54:07 -0800261 int flags)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700262{
263 struct page *page;
264
265 while (pfn < end_pfn) {
266 if (!pfn_valid_within(pfn)) {
267 pfn++;
268 continue;
269 }
270 page = pfn_to_page(pfn);
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700271 if (PageBuddy(page))
Minchan Kim435b4052012-10-08 16:32:16 -0700272 /*
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700273 * If the page is on a free list, it has to be on
274 * the correct MIGRATE_ISOLATE freelist. There is no
275 * simple way to verify that as VM_BUG_ON(), though.
Minchan Kim435b4052012-10-08 16:32:16 -0700276 */
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700277 pfn += 1 << page_order(page);
David Hildenbrand756d25b2019-11-30 17:54:07 -0800278 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
Vlastimil Babkaaa016d12015-09-08 15:01:22 -0700279 /* A HWPoisoned page cannot be also PageBuddy */
Wen Congyangb023f462012-12-11 16:00:45 -0800280 pfn++;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700281 else
282 break;
283 }
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800284
285 return pfn;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700286}
287
Joonsoo Kimb9eb6312016-05-19 17:12:06 -0700288/* Caller should ensure that requested range is in a single zone */
Wen Congyangb023f462012-12-11 16:00:45 -0800289int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
David Hildenbrand756d25b2019-11-30 17:54:07 -0800290 int isol_flags)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700291{
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700292 unsigned long pfn, flags;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700293 struct page *page;
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700294 struct zone *zone;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700295
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700296 /*
Tang Chen85dbe702013-06-20 18:10:19 +0800297 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
298 * are not aligned to pageblock_nr_pages.
299 * Then we just check migratetype first.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700300 */
301 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
302 page = __first_valid_page(pfn, pageblock_nr_pages);
Xishi Qiubbf9ce92017-05-03 14:52:55 -0700303 if (page && !is_migrate_isolate_page(page))
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700304 break;
305 }
Gerald Schaefera70dcb92008-11-06 12:53:36 -0800306 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
307 if ((pfn < end_pfn) || !page)
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700308 return -EBUSY;
Tang Chen85dbe702013-06-20 18:10:19 +0800309 /* Check all pages are free or marked as ISOLATED */
Gerald Schaefera70dcb92008-11-06 12:53:36 -0800310 zone = page_zone(page);
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700311 spin_lock_irqsave(&zone->lock, flags);
David Hildenbrand756d25b2019-11-30 17:54:07 -0800312 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
Gerald Schaefer6c1b7f62008-10-02 14:50:16 -0700313 spin_unlock_irqrestore(&zone->lock, flags);
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800314
Joonsoo Kim0f0848e2016-01-14 15:18:42 -0800315 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
316
Joonsoo Kimfea85cf2016-01-14 15:18:39 -0800317 return pfn < end_pfn ? -EBUSY : 0;
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -0700318}
Minchan Kim723a0642012-10-08 16:32:52 -0700319
Michal Hocko666feb22018-04-10 16:30:03 -0700320struct page *alloc_migrate_target(struct page *page, unsigned long private)
Minchan Kim723a0642012-10-08 16:32:52 -0700321{
Michal Hocko8b913232017-07-10 15:48:47 -0700322 return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]);
Minchan Kim723a0642012-10-08 16:32:52 -0700323}