blob: 572458016331e7003af6fc7d1d64809102ece5e3 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07002#ifndef __LINUX_PAGEISOLATION_H
3#define __LINUX_PAGEISOLATION_H
4
Minchan Kim194159f2013-02-22 16:33:58 -08005#ifdef CONFIG_MEMORY_ISOLATION
Joonsoo Kimad53f922014-11-13 15:19:11 -08006static inline bool has_isolate_pageblock(struct zone *zone)
7{
8 return zone->nr_isolate_pageblock;
9}
Minchan Kim194159f2013-02-22 16:33:58 -080010static inline bool is_migrate_isolate_page(struct page *page)
11{
12 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
13}
14static inline bool is_migrate_isolate(int migratetype)
15{
16 return migratetype == MIGRATE_ISOLATE;
17}
18#else
Joonsoo Kimad53f922014-11-13 15:19:11 -080019static inline bool has_isolate_pageblock(struct zone *zone)
20{
21 return false;
22}
Minchan Kim194159f2013-02-22 16:33:58 -080023static inline bool is_migrate_isolate_page(struct page *page)
24{
25 return false;
26}
27static inline bool is_migrate_isolate(int migratetype)
28{
29 return false;
30}
31#endif
Minchan Kimee6f5092012-07-31 16:43:50 -070032
David Hildenbrand756d25b2019-11-30 17:54:07 -080033#define MEMORY_OFFLINE 0x1
Michal Hockod381c542018-12-28 00:33:56 -080034#define REPORT_FAILURE 0x2
35
Qian Cai4a55c042020-01-30 22:14:57 -080036struct page *has_unmovable_pages(struct zone *zone, struct page *page,
37 int migratetype, int flags);
Minchan Kimee6f5092012-07-31 16:43:50 -070038void set_pageblock_migratetype(struct page *page, int migratetype);
39int move_freepages_block(struct zone *zone, struct page *page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -070040 int migratetype, int *num_movable);
Minchan Kim435b4052012-10-08 16:32:16 -070041
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070042/*
43 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070044 */
Minchan Kimee6f5092012-07-31 16:43:50 -070045int
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020046start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Michal Hockod381c542018-12-28 00:33:56 -080047 unsigned migratetype, int flags);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070048
49/*
50 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
51 * target range is [start_pfn, end_pfn)
52 */
Pingfan Liu1fcf0a52019-07-11 20:54:49 -070053void
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020054undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
55 unsigned migratetype);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070056
57/*
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020058 * Test all pages in [start_pfn, end_pfn) are isolated or not.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070059 */
Wen Congyangb023f462012-12-11 16:00:45 -080060int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
David Hildenbrand756d25b2019-11-30 17:54:07 -080061 int isol_flags);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070062
Michal Hocko666feb22018-04-10 16:30:03 -070063struct page *alloc_migrate_target(struct page *page, unsigned long private);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070064
65#endif