blob: d4cd2014fa6f5a18f03f1b0babfccb21fa3ac55f [file] [log] [blame]
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07001#ifndef __LINUX_PAGEISOLATION_H
2#define __LINUX_PAGEISOLATION_H
3
Minchan Kim194159f2013-02-22 16:33:58 -08004#ifdef CONFIG_MEMORY_ISOLATION
Joonsoo Kimad53f922014-11-13 15:19:11 -08005static inline bool has_isolate_pageblock(struct zone *zone)
6{
7 return zone->nr_isolate_pageblock;
8}
Minchan Kim194159f2013-02-22 16:33:58 -08009static inline bool is_migrate_isolate_page(struct page *page)
10{
11 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
12}
13static inline bool is_migrate_isolate(int migratetype)
14{
15 return migratetype == MIGRATE_ISOLATE;
16}
17#else
Joonsoo Kimad53f922014-11-13 15:19:11 -080018static inline bool has_isolate_pageblock(struct zone *zone)
19{
20 return false;
21}
Minchan Kim194159f2013-02-22 16:33:58 -080022static inline bool is_migrate_isolate_page(struct page *page)
23{
24 return false;
25}
26static inline bool is_migrate_isolate(int migratetype)
27{
28 return false;
29}
30#endif
Minchan Kimee6f5092012-07-31 16:43:50 -070031
Wen Congyangb023f462012-12-11 16:00:45 -080032bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
33 bool skip_hwpoisoned_pages);
Minchan Kimee6f5092012-07-31 16:43:50 -070034void set_pageblock_migratetype(struct page *page, int migratetype);
35int move_freepages_block(struct zone *zone, struct page *page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -070036 int migratetype, int *num_movable);
Minchan Kim435b4052012-10-08 16:32:16 -070037
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070038/*
39 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020040 * If specified range includes migrate types other than MOVABLE or CMA,
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070041 * this will fail with -EBUSY.
42 *
43 * For isolating all pages in the range finally, the caller have to
44 * free all pages in the range. test_page_isolated() can be used for
45 * test it.
46 */
Minchan Kimee6f5092012-07-31 16:43:50 -070047int
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020048start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Wen Congyangb023f462012-12-11 16:00:45 -080049 unsigned migratetype, bool skip_hwpoisoned_pages);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070050
51/*
52 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
53 * target range is [start_pfn, end_pfn)
54 */
Minchan Kimee6f5092012-07-31 16:43:50 -070055int
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020056undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
57 unsigned migratetype);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070058
59/*
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020060 * Test all pages in [start_pfn, end_pfn) are isolated or not.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070061 */
Wen Congyangb023f462012-12-11 16:00:45 -080062int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
63 bool skip_hwpoisoned_pages);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070064
Minchan Kim723a0642012-10-08 16:32:52 -070065struct page *alloc_migrate_target(struct page *page, unsigned long private,
66 int **resultp);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070067
68#endif