Thomas Gleixner | 873e65b | 2019-05-27 08:55:15 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Macros for manipulating and testing flags related to a |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 4 | * pageblock_nr_pages number of pages. |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 5 | * |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 6 | * Copyright (C) IBM Corporation, 2006 |
| 7 | * |
| 8 | * Original author, Mel Gorman |
| 9 | * Major cleanups and reduction of bit operations, Andy Whitcroft |
| 10 | */ |
| 11 | #ifndef PAGEBLOCK_FLAGS_H |
| 12 | #define PAGEBLOCK_FLAGS_H |
| 13 | |
| 14 | #include <linux/types.h> |
| 15 | |
Pingfan Liu | 125b860 | 2018-12-28 00:38:43 -0800 | [diff] [blame] | 16 | #define PB_migratetype_bits 3 |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 17 | /* Bit indices that affect a whole block of pages */ |
| 18 | enum pageblock_bits { |
Paul Jackson | c801ed3 | 2008-05-14 08:15:23 -0700 | [diff] [blame] | 19 | PB_migrate, |
Pingfan Liu | 125b860 | 2018-12-28 00:38:43 -0800 | [diff] [blame] | 20 | PB_migrate_end = PB_migrate + PB_migratetype_bits - 1, |
Paul Jackson | c801ed3 | 2008-05-14 08:15:23 -0700 | [diff] [blame] | 21 | /* 3 bits required for migrate types */ |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 22 | PB_migrate_skip,/* If set the block is skipped by compaction */ |
Mel Gorman | e58469b | 2014-06-04 16:10:16 -0700 | [diff] [blame] | 23 | |
| 24 | /* |
| 25 | * Assume the bits will always align on a word. If this assumption |
| 26 | * changes then get/set pageblock needs updating. |
| 27 | */ |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 28 | NR_PAGEBLOCK_BITS |
| 29 | }; |
| 30 | |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 31 | #ifdef CONFIG_HUGETLB_PAGE |
| 32 | |
| 33 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE |
| 34 | |
| 35 | /* Huge page sizes are variable */ |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 36 | extern unsigned int pageblock_order; |
Mel Gorman | d9c2340 | 2007-10-16 01:26:01 -0700 | [diff] [blame] | 37 | |
| 38 | #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ |
| 39 | |
| 40 | /* Huge pages are a constant size */ |
| 41 | #define pageblock_order HUGETLB_PAGE_ORDER |
| 42 | |
| 43 | #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ |
| 44 | |
| 45 | #else /* CONFIG_HUGETLB_PAGE */ |
| 46 | |
| 47 | /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ |
| 48 | #define pageblock_order (MAX_ORDER-1) |
| 49 | |
| 50 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 51 | |
| 52 | #define pageblock_nr_pages (1UL << pageblock_order) |
| 53 | |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 54 | /* Forward declaration */ |
| 55 | struct page; |
| 56 | |
Mel Gorman | dc4b0ca | 2014-06-04 16:10:17 -0700 | [diff] [blame] | 57 | unsigned long get_pfnblock_flags_mask(struct page *page, |
| 58 | unsigned long pfn, |
Mel Gorman | e58469b | 2014-06-04 16:10:16 -0700 | [diff] [blame] | 59 | unsigned long end_bitidx, |
| 60 | unsigned long mask); |
Mel Gorman | dc4b0ca | 2014-06-04 16:10:17 -0700 | [diff] [blame] | 61 | |
| 62 | void set_pfnblock_flags_mask(struct page *page, |
Mel Gorman | e58469b | 2014-06-04 16:10:16 -0700 | [diff] [blame] | 63 | unsigned long flags, |
Mel Gorman | dc4b0ca | 2014-06-04 16:10:17 -0700 | [diff] [blame] | 64 | unsigned long pfn, |
Mel Gorman | e58469b | 2014-06-04 16:10:16 -0700 | [diff] [blame] | 65 | unsigned long end_bitidx, |
| 66 | unsigned long mask); |
| 67 | |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 68 | /* Declarations for getting and setting flags. See mm/page_alloc.c */ |
Mel Gorman | dc4b0ca | 2014-06-04 16:10:17 -0700 | [diff] [blame] | 69 | #define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ |
| 70 | get_pfnblock_flags_mask(page, page_to_pfn(page), \ |
| 71 | end_bitidx, \ |
| 72 | (1 << (end_bitidx - start_bitidx + 1)) - 1) |
| 73 | #define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ |
| 74 | set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ |
| 75 | end_bitidx, \ |
| 76 | (1 << (end_bitidx - start_bitidx + 1)) - 1) |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 77 | |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 78 | #ifdef CONFIG_COMPACTION |
| 79 | #define get_pageblock_skip(page) \ |
| 80 | get_pageblock_flags_group(page, PB_migrate_skip, \ |
Bartlomiej Zolnierkiewicz | 6272605 | 2012-10-10 15:53:55 -0700 | [diff] [blame] | 81 | PB_migrate_skip) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 82 | #define clear_pageblock_skip(page) \ |
| 83 | set_pageblock_flags_group(page, 0, PB_migrate_skip, \ |
Bartlomiej Zolnierkiewicz | 6272605 | 2012-10-10 15:53:55 -0700 | [diff] [blame] | 84 | PB_migrate_skip) |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 85 | #define set_pageblock_skip(page) \ |
| 86 | set_pageblock_flags_group(page, 1, PB_migrate_skip, \ |
Bartlomiej Zolnierkiewicz | 6272605 | 2012-10-10 15:53:55 -0700 | [diff] [blame] | 87 | PB_migrate_skip) |
David Rientjes | 21dc7e0 | 2017-11-17 15:26:30 -0800 | [diff] [blame] | 88 | #else |
| 89 | static inline bool get_pageblock_skip(struct page *page) |
| 90 | { |
| 91 | return false; |
| 92 | } |
| 93 | static inline void clear_pageblock_skip(struct page *page) |
| 94 | { |
| 95 | } |
| 96 | static inline void set_pageblock_skip(struct page *page) |
| 97 | { |
| 98 | } |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 99 | #endif /* CONFIG_COMPACTION */ |
| 100 | |
Mel Gorman | 835c134 | 2007-10-16 01:25:47 -0700 | [diff] [blame] | 101 | #endif /* PAGEBLOCK_FLAGS_H */ |