blob: ccd3ed46434ff4ca43ae33f096198eaea24572c3 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07002#ifndef __LINUX_PAGEISOLATION_H
3#define __LINUX_PAGEISOLATION_H
4
Minchan Kim194159f2013-02-22 16:33:58 -08005#ifdef CONFIG_MEMORY_ISOLATION
Joonsoo Kimad53f922014-11-13 15:19:11 -08006static inline bool has_isolate_pageblock(struct zone *zone)
7{
8 return zone->nr_isolate_pageblock;
9}
Minchan Kim194159f2013-02-22 16:33:58 -080010static inline bool is_migrate_isolate_page(struct page *page)
11{
12 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
13}
14static inline bool is_migrate_isolate(int migratetype)
15{
16 return migratetype == MIGRATE_ISOLATE;
17}
18#else
Joonsoo Kimad53f922014-11-13 15:19:11 -080019static inline bool has_isolate_pageblock(struct zone *zone)
20{
21 return false;
22}
Minchan Kim194159f2013-02-22 16:33:58 -080023static inline bool is_migrate_isolate_page(struct page *page)
24{
25 return false;
26}
27static inline bool is_migrate_isolate(int migratetype)
28{
29 return false;
30}
31#endif
Minchan Kimee6f5092012-07-31 16:43:50 -070032
David Hildenbrand756d25b2019-11-30 17:54:07 -080033#define MEMORY_OFFLINE 0x1
Michal Hockod381c542018-12-28 00:33:56 -080034#define REPORT_FAILURE 0x2
35
Qian Cai4a55c042020-01-30 22:14:57 -080036struct page *has_unmovable_pages(struct zone *zone, struct page *page,
37 int migratetype, int flags);
Minchan Kimee6f5092012-07-31 16:43:50 -070038void set_pageblock_migratetype(struct page *page, int migratetype);
39int move_freepages_block(struct zone *zone, struct page *page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -070040 int migratetype, int *num_movable);
Minchan Kim435b4052012-10-08 16:32:16 -070041
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070042/*
43 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070044 */
Minchan Kimee6f5092012-07-31 16:43:50 -070045int
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020046start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Minchan Kim0e688e92021-06-29 12:08:44 -070047 unsigned migratetype, int flags,
48 unsigned long *failed_pfn);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070049
50/*
51 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
52 * target range is [start_pfn, end_pfn)
53 */
Pingfan Liu1fcf0a52019-07-11 20:54:49 -070054void
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020055undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
56 unsigned migratetype);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070057
58/*
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020059 * Test all pages in [start_pfn, end_pfn) are isolated or not.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070060 */
Wen Congyangb023f462012-12-11 16:00:45 -080061int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
Minchan Kim0e688e92021-06-29 12:08:44 -070062 int isol_flags, unsigned long *failed_pfn);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070063
Michal Hocko666feb22018-04-10 16:30:03 -070064struct page *alloc_migrate_target(struct page *page, unsigned long private);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070065
66#endif