blob: 4ae347cbc36d16fcaed260ee9f39ad63259c7ed7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -07002#ifndef __LINUX_PAGEISOLATION_H
3#define __LINUX_PAGEISOLATION_H
4
Minchan Kim194159f2013-02-22 16:33:58 -08005#ifdef CONFIG_MEMORY_ISOLATION
Joonsoo Kimad53f922014-11-13 15:19:11 -08006static inline bool has_isolate_pageblock(struct zone *zone)
7{
8 return zone->nr_isolate_pageblock;
9}
Minchan Kim194159f2013-02-22 16:33:58 -080010static inline bool is_migrate_isolate_page(struct page *page)
11{
12 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
13}
14static inline bool is_migrate_isolate(int migratetype)
15{
16 return migratetype == MIGRATE_ISOLATE;
17}
18#else
Joonsoo Kimad53f922014-11-13 15:19:11 -080019static inline bool has_isolate_pageblock(struct zone *zone)
20{
21 return false;
22}
Minchan Kim194159f2013-02-22 16:33:58 -080023static inline bool is_migrate_isolate_page(struct page *page)
24{
25 return false;
26}
27static inline bool is_migrate_isolate(int migratetype)
28{
29 return false;
30}
31#endif
Minchan Kimee6f5092012-07-31 16:43:50 -070032
Wen Congyangb023f462012-12-11 16:00:45 -080033bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
Michal Hocko4da2ce22017-11-15 17:33:26 -080034 int migratetype, bool skip_hwpoisoned_pages);
Minchan Kimee6f5092012-07-31 16:43:50 -070035void set_pageblock_migratetype(struct page *page, int migratetype);
36int move_freepages_block(struct zone *zone, struct page *page,
Vlastimil Babka02aa0cd2017-05-08 15:54:40 -070037 int migratetype, int *num_movable);
Minchan Kim435b4052012-10-08 16:32:16 -070038
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070039/*
40 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020041 * If specified range includes migrate types other than MOVABLE or CMA,
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070042 * this will fail with -EBUSY.
43 *
44 * For isolating all pages in the range finally, the caller have to
45 * free all pages in the range. test_page_isolated() can be used for
46 * test it.
47 */
Minchan Kimee6f5092012-07-31 16:43:50 -070048int
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020049start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
Wen Congyangb023f462012-12-11 16:00:45 -080050 unsigned migratetype, bool skip_hwpoisoned_pages);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070051
52/*
53 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
54 * target range is [start_pfn, end_pfn)
55 */
Minchan Kimee6f5092012-07-31 16:43:50 -070056int
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020057undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
58 unsigned migratetype);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070059
60/*
Michal Nazarewicz0815f3d2012-04-03 15:06:15 +020061 * Test all pages in [start_pfn, end_pfn) are isolated or not.
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070062 */
Wen Congyangb023f462012-12-11 16:00:45 -080063int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
64 bool skip_hwpoisoned_pages);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070065
Michal Hocko666feb22018-04-10 16:30:03 -070066struct page *alloc_migrate_target(struct page *page, unsigned long private);
KAMEZAWA Hiroyukia5d76b542007-10-16 01:26:11 -070067
68#endif