Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_PAGEISOLATION_H |
| 3 | #define __LINUX_PAGEISOLATION_H |
| 4 | |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 5 | #ifdef CONFIG_MEMORY_ISOLATION |
Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 6 | static inline bool has_isolate_pageblock(struct zone *zone) |
| 7 | { |
| 8 | return zone->nr_isolate_pageblock; |
| 9 | } |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 10 | static inline bool is_migrate_isolate_page(struct page *page) |
| 11 | { |
| 12 | return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; |
| 13 | } |
| 14 | static inline bool is_migrate_isolate(int migratetype) |
| 15 | { |
| 16 | return migratetype == MIGRATE_ISOLATE; |
| 17 | } |
| 18 | #else |
Joonsoo Kim | ad53f92 | 2014-11-13 15:19:11 -0800 | [diff] [blame] | 19 | static inline bool has_isolate_pageblock(struct zone *zone) |
| 20 | { |
| 21 | return false; |
| 22 | } |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 23 | static inline bool is_migrate_isolate_page(struct page *page) |
| 24 | { |
| 25 | return false; |
| 26 | } |
| 27 | static inline bool is_migrate_isolate(int migratetype) |
| 28 | { |
| 29 | return false; |
| 30 | } |
| 31 | #endif |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 32 | |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 33 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, |
Michal Hocko | 4da2ce2 | 2017-11-15 17:33:26 -0800 | [diff] [blame] | 34 | int migratetype, bool skip_hwpoisoned_pages); |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 35 | void set_pageblock_migratetype(struct page *page, int migratetype); |
| 36 | int move_freepages_block(struct zone *zone, struct page *page, |
Vlastimil Babka | 02aa0cd | 2017-05-08 15:54:40 -0700 | [diff] [blame] | 37 | int migratetype, int *num_movable); |
Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 38 | |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 39 | /* |
| 40 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 41 | * If specified range includes migrate types other than MOVABLE or CMA, |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 42 | * this will fail with -EBUSY. |
| 43 | * |
| 44 | * For isolating all pages in the range finally, the caller have to |
| 45 | * free all pages in the range. test_page_isolated() can be used for |
| 46 | * test it. |
| 47 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 48 | int |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 49 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 50 | unsigned migratetype, bool skip_hwpoisoned_pages); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 51 | |
| 52 | /* |
| 53 | * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. |
| 54 | * target range is [start_pfn, end_pfn) |
| 55 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 56 | int |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 57 | undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
| 58 | unsigned migratetype); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 59 | |
| 60 | /* |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 61 | * Test all pages in [start_pfn, end_pfn) are isolated or not. |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 62 | */ |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 63 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, |
| 64 | bool skip_hwpoisoned_pages); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 65 | |
Michal Hocko | 666feb2 | 2018-04-10 16:30:03 -0700 | [diff] [blame] | 66 | struct page *alloc_migrate_target(struct page *page, unsigned long private); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 67 | |
| 68 | #endif |