Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* internal.h: mm/ internal definitions |
| 3 | * |
| 4 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | */ |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 7 | #ifndef __MM_INTERNAL_H |
| 8 | #define __MM_INTERNAL_H |
| 9 | |
Fabian Frederick | 29f175d | 2014-04-07 15:37:55 -0700 | [diff] [blame] | 10 | #include <linux/fs.h> |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 11 | #include <linux/mm.h> |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 12 | #include <linux/pagemap.h> |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 13 | #include <linux/tracepoint-defs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Mel Gorman | dd56b04 | 2015-11-06 16:28:43 -0800 | [diff] [blame] | 15 | /* |
| 16 | * The set of flags that only affect watermark checking and reclaim |
| 17 | * behaviour. This is used by the MM to obey the caller constraints |
| 18 | * about IO, FS and watermark checking while ignoring placement |
| 19 | * hints such as HIGHMEM usage. |
| 20 | */ |
| 21 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
Michal Hocko | dcda9b0 | 2017-07-12 14:36:45 -0700 | [diff] [blame] | 22 | __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ |
Mel Gorman | e838a45 | 2016-06-24 14:49:37 -0700 | [diff] [blame] | 23 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ |
| 24 | __GFP_ATOMIC) |
Mel Gorman | dd56b04 | 2015-11-06 16:28:43 -0800 | [diff] [blame] | 25 | |
| 26 | /* The GFP flags allowed during early boot */ |
| 27 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) |
| 28 | |
| 29 | /* Control allocation cpuset and node placement constraints */ |
| 30 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
| 31 | |
| 32 | /* Do not use these with a slab allocator */ |
| 33 | #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
| 34 | |
Nicholas Piggin | 6290602 | 2016-12-25 13:00:30 +1000 | [diff] [blame] | 35 | void page_writeback_init(void); |
| 36 | |
Matthew Wilcox (Oracle) | 6460100 | 2021-05-07 11:17:34 -0400 | [diff] [blame] | 37 | static inline void *folio_raw_mapping(struct folio *folio) |
| 38 | { |
| 39 | unsigned long mapping = (unsigned long)folio->mapping; |
| 40 | |
| 41 | return (void *)(mapping & ~PAGE_MAPPING_FLAGS); |
| 42 | } |
| 43 | |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 44 | vm_fault_t do_swap_page(struct vm_fault *vmf); |
Matthew Wilcox (Oracle) | 575ced1 | 2020-12-08 01:25:39 -0500 | [diff] [blame] | 45 | void folio_rotate_reclaimable(struct folio *folio); |
Matthew Wilcox (Oracle) | 269ccca3 | 2021-01-15 23:34:16 -0500 | [diff] [blame] | 46 | bool __folio_end_writeback(struct folio *folio); |
Ebru Akagunduz | 8a966ed | 2016-07-26 15:25:03 -0700 | [diff] [blame] | 47 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 48 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
| 49 | unsigned long floor, unsigned long ceiling); |
| 50 | |
Minchan Kim | 9c276cc | 2019-09-25 16:49:08 -0700 | [diff] [blame] | 51 | static inline bool can_madv_lru_vma(struct vm_area_struct *vma) |
Kirill A. Shutemov | 2351907 | 2017-02-22 15:46:39 -0800 | [diff] [blame] | 52 | { |
| 53 | return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); |
| 54 | } |
| 55 | |
Michal Hocko | aac4536 | 2016-03-25 14:20:24 -0700 | [diff] [blame] | 56 | void unmap_page_range(struct mmu_gather *tlb, |
| 57 | struct vm_area_struct *vma, |
| 58 | unsigned long addr, unsigned long end, |
| 59 | struct zap_details *details); |
| 60 | |
David Howells | 7b3df3b | 2020-10-15 20:06:24 -0700 | [diff] [blame] | 61 | void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read, |
| 62 | unsigned long lookahead_size); |
Matthew Wilcox (Oracle) | fcd9ae4 | 2021-04-07 21:18:55 +0100 | [diff] [blame] | 63 | void force_page_cache_ra(struct readahead_control *, unsigned long nr); |
David Howells | 7b3df3b | 2020-10-15 20:06:24 -0700 | [diff] [blame] | 64 | static inline void force_page_cache_readahead(struct address_space *mapping, |
| 65 | struct file *file, pgoff_t index, unsigned long nr_to_read) |
| 66 | { |
Matthew Wilcox (Oracle) | fcd9ae4 | 2021-04-07 21:18:55 +0100 | [diff] [blame] | 67 | DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); |
| 68 | force_page_cache_ra(&ractl, nr_to_read); |
David Howells | 7b3df3b | 2020-10-15 20:06:24 -0700 | [diff] [blame] | 69 | } |
Fabian Frederick | 29f175d | 2014-04-07 15:37:55 -0700 | [diff] [blame] | 70 | |
Matthew Wilcox (Oracle) | 5c211ba | 2021-02-25 17:15:56 -0800 | [diff] [blame] | 71 | unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, |
| 72 | pgoff_t end, struct pagevec *pvec, pgoff_t *indices); |
| 73 | |
Yang Shi | 1eb6234 | 2020-04-01 21:06:20 -0700 | [diff] [blame] | 74 | /** |
Matthew Wilcox (Oracle) | 3eed3ef | 2021-05-14 15:04:28 -0400 | [diff] [blame^] | 75 | * folio_evictable - Test whether a folio is evictable. |
| 76 | * @folio: The folio to test. |
Yang Shi | 1eb6234 | 2020-04-01 21:06:20 -0700 | [diff] [blame] | 77 | * |
Matthew Wilcox (Oracle) | 3eed3ef | 2021-05-14 15:04:28 -0400 | [diff] [blame^] | 78 | * Test whether @folio is evictable -- i.e., should be placed on |
| 79 | * active/inactive lists vs unevictable list. |
Yang Shi | 1eb6234 | 2020-04-01 21:06:20 -0700 | [diff] [blame] | 80 | * |
Matthew Wilcox (Oracle) | 3eed3ef | 2021-05-14 15:04:28 -0400 | [diff] [blame^] | 81 | * Reasons folio might not be evictable: |
| 82 | * 1. folio's mapping marked unevictable |
| 83 | * 2. One of the pages in the folio is part of an mlocked VMA |
Yang Shi | 1eb6234 | 2020-04-01 21:06:20 -0700 | [diff] [blame] | 84 | */ |
Matthew Wilcox (Oracle) | 3eed3ef | 2021-05-14 15:04:28 -0400 | [diff] [blame^] | 85 | static inline bool folio_evictable(struct folio *folio) |
| 86 | { |
| 87 | bool ret; |
| 88 | |
| 89 | /* Prevent address_space of inode and swap cache from being freed */ |
| 90 | rcu_read_lock(); |
| 91 | ret = !mapping_unevictable(folio_mapping(folio)) && |
| 92 | !folio_test_mlocked(folio); |
| 93 | rcu_read_unlock(); |
| 94 | return ret; |
| 95 | } |
| 96 | |
Yang Shi | 1eb6234 | 2020-04-01 21:06:20 -0700 | [diff] [blame] | 97 | static inline bool page_evictable(struct page *page) |
| 98 | { |
| 99 | bool ret; |
| 100 | |
| 101 | /* Prevent address_space of inode and swap cache from being freed */ |
| 102 | rcu_read_lock(); |
| 103 | ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); |
| 104 | rcu_read_unlock(); |
| 105 | return ret; |
| 106 | } |
| 107 | |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 108 | /* |
Joonsoo Kim | 0139aa7 | 2016-05-19 17:10:49 -0700 | [diff] [blame] | 109 | * Turn a non-refcounted page (->_refcount == 0) into refcounted with |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 110 | * a count of one. |
| 111 | */ |
| 112 | static inline void set_page_refcounted(struct page *page) |
| 113 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 114 | VM_BUG_ON_PAGE(PageTail(page), page); |
Joonsoo Kim | fe896d1 | 2016-03-17 14:19:26 -0700 | [diff] [blame] | 115 | VM_BUG_ON_PAGE(page_ref_count(page), page); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 116 | set_page_count(page, 1); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 117 | } |
| 118 | |
Hugh Dickins | 03f6462 | 2009-09-21 17:03:35 -0700 | [diff] [blame] | 119 | extern unsigned long highest_memmap_pfn; |
| 120 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 121 | /* |
Johannes Weiner | c73322d | 2017-05-03 14:51:51 -0700 | [diff] [blame] | 122 | * Maximum number of reclaim retries without progress before the OOM |
| 123 | * killer is consider the only way forward. |
| 124 | */ |
| 125 | #define MAX_RECLAIM_RETRIES 16 |
| 126 | |
| 127 | /* |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 128 | * in mm/vmscan.c: |
| 129 | */ |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 130 | extern int isolate_lru_page(struct page *page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 131 | extern void putback_lru_page(struct page *page); |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 132 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 133 | /* |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 134 | * in mm/rmap.c: |
| 135 | */ |
| 136 | extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); |
| 137 | |
| 138 | /* |
Waiman Long | 494c1df | 2021-06-28 19:37:38 -0700 | [diff] [blame] | 139 | * in mm/memcontrol.c: |
| 140 | */ |
| 141 | extern bool cgroup_memory_nokmem; |
| 142 | |
| 143 | /* |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 144 | * in mm/page_alloc.c |
| 145 | */ |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 146 | |
| 147 | /* |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 148 | * Structure for holding the mostly immutable allocation parameters passed |
| 149 | * between functions involved in allocations, including the alloc_pages* |
| 150 | * family of functions. |
| 151 | * |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 152 | * nodemask, migratetype and highest_zoneidx are initialized only once in |
Matthew Wilcox (Oracle) | 84172f4 | 2021-04-29 23:01:15 -0700 | [diff] [blame] | 153 | * __alloc_pages() and then never change. |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 154 | * |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 155 | * zonelist, preferred_zone and highest_zoneidx are set first in |
Matthew Wilcox (Oracle) | 84172f4 | 2021-04-29 23:01:15 -0700 | [diff] [blame] | 156 | * __alloc_pages() for the fast path, and might be later changed |
Ethon Paul | 68956cc | 2020-06-04 16:49:31 -0700 | [diff] [blame] | 157 | * in __alloc_pages_slowpath(). All other functions pass the whole structure |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 158 | * by a const pointer. |
| 159 | */ |
| 160 | struct alloc_context { |
| 161 | struct zonelist *zonelist; |
| 162 | nodemask_t *nodemask; |
Mel Gorman | c33d6c0 | 2016-05-19 17:14:10 -0700 | [diff] [blame] | 163 | struct zoneref *preferred_zoneref; |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 164 | int migratetype; |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 165 | |
| 166 | /* |
| 167 | * highest_zoneidx represents highest usable zone index of |
| 168 | * the allocation request. Due to the nature of the zone, |
| 169 | * memory on lower zone than the highest_zoneidx will be |
| 170 | * protected by lowmem_reserve[highest_zoneidx]. |
| 171 | * |
| 172 | * highest_zoneidx is also used by reclaim/compaction to limit |
| 173 | * the target zone since higher zone than this index cannot be |
| 174 | * usable for this allocation request. |
| 175 | */ |
| 176 | enum zone_type highest_zoneidx; |
Mel Gorman | c9ab0c4 | 2015-11-06 16:28:12 -0800 | [diff] [blame] | 177 | bool spread_dirty_pages; |
Vlastimil Babka | 1a6d53a | 2015-02-11 15:25:44 -0800 | [diff] [blame] | 178 | }; |
| 179 | |
| 180 | /* |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 181 | * Locate the struct page for both the matching buddy in our |
| 182 | * pair (buddy1) and the combined O(n+1) page they form (page). |
| 183 | * |
| 184 | * 1) Any buddy B1 will have an order O twin B2 which satisfies |
| 185 | * the following equation: |
| 186 | * B2 = B1 ^ (1 << O) |
| 187 | * For example, if the starting buddy (buddy2) is #8 its order |
| 188 | * 1 buddy is #10: |
| 189 | * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 |
| 190 | * |
| 191 | * 2) Any buddy B will have an order O+1 parent P which |
| 192 | * satisfies the following equation: |
| 193 | * P = B & ~(1 << O) |
| 194 | * |
| 195 | * Assumption: *_mem_map is contiguous at least up to MAX_ORDER |
| 196 | */ |
| 197 | static inline unsigned long |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 198 | __find_buddy_pfn(unsigned long page_pfn, unsigned int order) |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 199 | { |
Vlastimil Babka | 76741e7 | 2017-02-22 15:41:48 -0800 | [diff] [blame] | 200 | return page_pfn ^ (1 << order); |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 201 | } |
| 202 | |
Joonsoo Kim | 7cf91a9 | 2016-03-15 14:57:51 -0700 | [diff] [blame] | 203 | extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, |
| 204 | unsigned long end_pfn, struct zone *zone); |
| 205 | |
| 206 | static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, |
| 207 | unsigned long end_pfn, struct zone *zone) |
| 208 | { |
| 209 | if (zone->contiguous) |
| 210 | return pfn_to_page(start_pfn); |
| 211 | |
| 212 | return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); |
| 213 | } |
| 214 | |
Joonsoo Kim | 3c60509 | 2014-11-13 15:19:21 -0800 | [diff] [blame] | 215 | extern int __isolate_free_page(struct page *page, unsigned int order); |
Alexander Duyck | 624f58d | 2020-04-06 20:04:53 -0700 | [diff] [blame] | 216 | extern void __putback_isolated_page(struct page *page, unsigned int order, |
| 217 | int mt); |
Mike Rapoport | 7c2ee34 | 2018-10-30 15:09:36 -0700 | [diff] [blame] | 218 | extern void memblock_free_pages(struct page *page, unsigned long pfn, |
Mel Gorman | d70ddd7 | 2015-06-30 14:56:52 -0700 | [diff] [blame] | 219 | unsigned int order); |
Arun KS | a9cd410 | 2019-03-05 15:42:14 -0800 | [diff] [blame] | 220 | extern void __free_pages_core(struct page *page, unsigned int order); |
Kirill A. Shutemov | d00181b | 2015-11-06 16:29:57 -0800 | [diff] [blame] | 221 | extern void prep_compound_page(struct page *page, unsigned int order); |
Joonsoo Kim | 46f24fd | 2016-07-26 15:23:58 -0700 | [diff] [blame] | 222 | extern void post_alloc_hook(struct page *page, unsigned int order, |
| 223 | gfp_t gfp_flags); |
Han Pingtian | 42aa83c | 2014-01-23 15:53:28 -0800 | [diff] [blame] | 224 | extern int user_min_free_kbytes; |
Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 225 | |
Mel Gorman | 44042b4 | 2021-06-28 19:43:08 -0700 | [diff] [blame] | 226 | extern void free_unref_page(struct page *page, unsigned int order); |
Matthew Wilcox (Oracle) | 0966aeb | 2020-12-14 19:08:02 -0800 | [diff] [blame] | 227 | extern void free_unref_page_list(struct list_head *list); |
| 228 | |
Mel Gorman | 04f8cfe | 2021-06-28 19:42:15 -0700 | [diff] [blame] | 229 | extern void zone_pcp_update(struct zone *zone, int cpu_online); |
Mel Gorman | 6826539 | 2019-11-30 17:55:15 -0800 | [diff] [blame] | 230 | extern void zone_pcp_reset(struct zone *zone); |
Vlastimil Babka | ec6e8c7e | 2020-12-14 19:10:59 -0800 | [diff] [blame] | 231 | extern void zone_pcp_disable(struct zone *zone); |
| 232 | extern void zone_pcp_enable(struct zone *zone); |
Mel Gorman | 6826539 | 2019-11-30 17:55:15 -0800 | [diff] [blame] | 233 | |
Mike Rapoport | c803b3c | 2021-09-02 14:58:02 -0700 | [diff] [blame] | 234 | extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, |
| 235 | phys_addr_t min_addr, |
| 236 | int nid, bool exact_nid); |
| 237 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 238 | #if defined CONFIG_COMPACTION || defined CONFIG_CMA |
| 239 | |
| 240 | /* |
| 241 | * in mm/compaction.c |
| 242 | */ |
| 243 | /* |
| 244 | * compact_control is used to track pages being migrated and the free pages |
| 245 | * they are being migrated to during memory compaction. The free_pfn starts |
| 246 | * at the end of a zone and migrate_pfn begins at the start. Movable pages |
| 247 | * are moved to the end of a zone during a compaction run and the run |
| 248 | * completes when free_pfn <= migrate_pfn |
| 249 | */ |
| 250 | struct compact_control { |
| 251 | struct list_head freepages; /* List of free pages to migrate to */ |
| 252 | struct list_head migratepages; /* List of pages being migrated */ |
Mel Gorman | c5fbd93 | 2019-03-05 15:44:25 -0800 | [diff] [blame] | 253 | unsigned int nr_freepages; /* Number of isolated free pages */ |
| 254 | unsigned int nr_migratepages; /* Number of pages to migrate */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 255 | unsigned long free_pfn; /* isolate_freepages search base */ |
Oscar Salvador | c2ad7a1 | 2021-05-04 18:35:17 -0700 | [diff] [blame] | 256 | /* |
| 257 | * Acts as an in/out parameter to page isolation for migration. |
| 258 | * isolate_migratepages uses it as a search base. |
| 259 | * isolate_migratepages_block will update the value to the next pfn |
| 260 | * after the last isolated one. |
| 261 | */ |
| 262 | unsigned long migrate_pfn; |
Mel Gorman | 70b4459 | 2019-03-05 15:44:54 -0800 | [diff] [blame] | 263 | unsigned long fast_start_pfn; /* a pfn to start linear scan from */ |
Mel Gorman | c5943b9 | 2019-03-05 15:44:28 -0800 | [diff] [blame] | 264 | struct zone *zone; |
| 265 | unsigned long total_migrate_scanned; |
| 266 | unsigned long total_free_scanned; |
Mel Gorman | dbe2d4e | 2019-03-05 15:45:31 -0800 | [diff] [blame] | 267 | unsigned short fast_search_fail;/* failures to use free list searches */ |
| 268 | short search_order; /* order to start a fast search at */ |
Vlastimil Babka | f25ba6d | 2017-05-08 15:54:30 -0700 | [diff] [blame] | 269 | const gfp_t gfp_mask; /* gfp mask of a direct compactor */ |
| 270 | int order; /* order a direct compactor needs */ |
Vlastimil Babka | d39773a | 2017-05-08 15:54:46 -0700 | [diff] [blame] | 271 | int migratetype; /* migratetype of direct compactor */ |
Vlastimil Babka | f25ba6d | 2017-05-08 15:54:30 -0700 | [diff] [blame] | 272 | const unsigned int alloc_flags; /* alloc flags of a direct compactor */ |
Joonsoo Kim | 97a225e | 2020-06-03 15:59:01 -0700 | [diff] [blame] | 273 | const int highest_zoneidx; /* zone index of a direct compactor */ |
David Rientjes | e0b9dae | 2014-06-04 16:08:28 -0700 | [diff] [blame] | 274 | enum migrate_mode mode; /* Async or sync migration mode */ |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 275 | bool ignore_skip_hint; /* Scan blocks even if marked skip */ |
Vlastimil Babka | 2583d67 | 2017-11-17 15:26:38 -0800 | [diff] [blame] | 276 | bool no_set_skip_hint; /* Don't mark blocks for skipping */ |
Vlastimil Babka | 9f7e338 | 2016-10-07 17:00:37 -0700 | [diff] [blame] | 277 | bool ignore_block_suitable; /* Scan blocks considered unsuitable */ |
Vlastimil Babka | accf624 | 2016-03-17 14:18:15 -0700 | [diff] [blame] | 278 | bool direct_compaction; /* False from kcompactd or /proc/... */ |
Nitin Gupta | facdaa9 | 2020-08-11 18:31:00 -0700 | [diff] [blame] | 279 | bool proactive_compaction; /* kcompactd proactive compaction */ |
Vlastimil Babka | 06ed299 | 2016-10-07 16:57:35 -0700 | [diff] [blame] | 280 | bool whole_zone; /* Whole zone should/has been scanned */ |
Vlastimil Babka | c3486f5 | 2016-07-28 15:49:30 -0700 | [diff] [blame] | 281 | bool contended; /* Signal lock or sched contention */ |
Mel Gorman | 804d312 | 2019-03-05 15:45:07 -0800 | [diff] [blame] | 282 | bool rescan; /* Rescanning the same pageblock */ |
Rik van Riel | b06eda0 | 2020-04-01 21:10:28 -0700 | [diff] [blame] | 283 | bool alloc_contig; /* alloc_contig_range allocation */ |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 284 | }; |
| 285 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 286 | /* |
| 287 | * Used in direct compaction when a page should be taken from the freelists |
| 288 | * immediately when one is created during the free path. |
| 289 | */ |
| 290 | struct capture_control { |
| 291 | struct compact_control *cc; |
| 292 | struct page *page; |
| 293 | }; |
| 294 | |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 295 | unsigned long |
Mel Gorman | bb13ffe | 2012-10-08 16:32:41 -0700 | [diff] [blame] | 296 | isolate_freepages_range(struct compact_control *cc, |
| 297 | unsigned long start_pfn, unsigned long end_pfn); |
Oscar Salvador | c2ad7a1 | 2021-05-04 18:35:17 -0700 | [diff] [blame] | 298 | int |
Vlastimil Babka | edc2ca6 | 2014-10-09 15:27:09 -0700 | [diff] [blame] | 299 | isolate_migratepages_range(struct compact_control *cc, |
| 300 | unsigned long low_pfn, unsigned long end_pfn); |
Mel Gorman | ffd8f25 | 2021-06-30 18:53:53 -0700 | [diff] [blame] | 301 | #endif |
Joonsoo Kim | 2149cda | 2015-04-14 15:45:21 -0700 | [diff] [blame] | 302 | int find_suitable_fallback(struct free_area *area, unsigned int order, |
| 303 | int migratetype, bool only_stealable, bool *can_steal); |
Michal Nazarewicz | ff9543f | 2011-12-29 13:09:50 +0100 | [diff] [blame] | 304 | |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 305 | /* |
Mel Gorman | 6c14466 | 2014-01-23 15:53:38 -0800 | [diff] [blame] | 306 | * This function returns the order of a free page in the buddy system. In |
| 307 | * general, page_zone(page)->lock must be held by the caller to prevent the |
| 308 | * page from being allocated in parallel and returning garbage as the order. |
| 309 | * If a caller does not hold page_zone(page)->lock, it must guarantee that the |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 310 | * page cannot be allocated or merged in parallel. Alternatively, it must |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 311 | * handle invalid values gracefully, and use buddy_order_unsafe() below. |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 312 | */ |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 313 | static inline unsigned int buddy_order(struct page *page) |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 314 | { |
KAMEZAWA Hiroyuki | 572438f | 2010-10-26 14:22:08 -0700 | [diff] [blame] | 315 | /* PageBuddy() must be checked by the caller */ |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 316 | return page_private(page); |
| 317 | } |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 318 | |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 319 | /* |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 320 | * Like buddy_order(), but for callers who cannot afford to hold the zone lock. |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 321 | * PageBuddy() should be checked first by the caller to minimize race window, |
| 322 | * and invalid values must be handled gracefully. |
| 323 | * |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 324 | * READ_ONCE is used so that if the caller assigns the result into a local |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 325 | * variable and e.g. tests it for valid range before using, the compiler cannot |
| 326 | * decide to remove the variable and inline the page_private(page) multiple |
| 327 | * times, potentially observing different values in the tests and the actual |
| 328 | * use of the result. |
| 329 | */ |
Matthew Wilcox (Oracle) | ab130f91 | 2020-10-15 20:10:15 -0700 | [diff] [blame] | 330 | #define buddy_order_unsafe(page) READ_ONCE(page_private(page)) |
Vlastimil Babka | 99c0fd5 | 2014-10-09 15:27:23 -0700 | [diff] [blame] | 331 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 332 | /* |
| 333 | * These three helpers classifies VMAs for virtual memory accounting. |
| 334 | */ |
| 335 | |
| 336 | /* |
| 337 | * Executable code area - executable, not writable, not stack |
| 338 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 339 | static inline bool is_exec_mapping(vm_flags_t flags) |
| 340 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 341 | return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 342 | } |
| 343 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 344 | /* |
Ingo Molnar | f0953a1 | 2021-05-06 18:06:47 -0700 | [diff] [blame] | 345 | * Stack area - automatically grows in one direction |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 346 | * |
| 347 | * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: |
| 348 | * do_mmap() forbids all other combinations. |
| 349 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 350 | static inline bool is_stack_mapping(vm_flags_t flags) |
| 351 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 352 | return (flags & VM_STACK) == VM_STACK; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 353 | } |
| 354 | |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 355 | /* |
| 356 | * Data area - private, writable, not stack |
| 357 | */ |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 358 | static inline bool is_data_mapping(vm_flags_t flags) |
| 359 | { |
Konstantin Khlebnikov | 30bdbb7 | 2016-02-02 16:57:46 -0800 | [diff] [blame] | 360 | return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; |
Konstantin Khlebnikov | d977d56 | 2016-02-02 16:57:43 -0800 | [diff] [blame] | 361 | } |
| 362 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 363 | /* mm/util.c */ |
| 364 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
Wei Yang | aba6dfb | 2019-11-30 17:50:53 -0800 | [diff] [blame] | 365 | struct vm_area_struct *prev); |
Wei Yang | 1b9fc5b2 | 2019-11-30 17:50:49 -0800 | [diff] [blame] | 366 | void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 367 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 368 | #ifdef CONFIG_MMU |
Kirill A. Shutemov | fc05f56 | 2015-04-14 15:44:39 -0700 | [diff] [blame] | 369 | extern long populate_vma_page_range(struct vm_area_struct *vma, |
David Hildenbrand | a78f1cc | 2021-06-30 18:52:23 -0700 | [diff] [blame] | 370 | unsigned long start, unsigned long end, int *locked); |
David Hildenbrand | 4ca9b385 | 2021-06-30 18:52:28 -0700 | [diff] [blame] | 371 | extern long faultin_vma_page_range(struct vm_area_struct *vma, |
| 372 | unsigned long start, unsigned long end, |
| 373 | bool write, int *locked); |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 374 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
| 375 | unsigned long start, unsigned long end); |
| 376 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) |
| 377 | { |
| 378 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
| 379 | } |
| 380 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 381 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 382 | * must be called with vma's mmap_lock held for read or write, and page locked. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 383 | */ |
| 384 | extern void mlock_vma_page(struct page *page); |
Michel Lespinasse | ff6a6da | 2013-02-27 17:02:44 -0800 | [diff] [blame] | 385 | extern unsigned int munlock_vma_page(struct page *page); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 386 | |
Mike Rapoport | 6aeb254 | 2021-07-07 18:07:50 -0700 | [diff] [blame] | 387 | extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, |
| 388 | unsigned long len); |
| 389 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 390 | /* |
| 391 | * Clear the page's PageMlocked(). This can be useful in a situation where |
| 392 | * we want to unconditionally remove a page from the pagecache -- e.g., |
| 393 | * on truncation or freeing. |
| 394 | * |
| 395 | * It is legal to call this function for any page, mlocked or not. |
| 396 | * If called for a page that is still mapped by mlocked vmas, all we do |
| 397 | * is revert to lazy LRU behaviour -- semantics are not broken. |
| 398 | */ |
Hugh Dickins | e6c509f | 2012-10-08 16:33:19 -0700 | [diff] [blame] | 399 | extern void clear_page_mlock(struct page *page); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 400 | |
Linus Torvalds | f55e101 | 2017-11-29 09:01:01 -0800 | [diff] [blame] | 401 | extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 402 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 403 | /* |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 404 | * At what user virtual address is page expected in vma? |
| 405 | * Returns -EFAULT if all of the page is outside the range of vma. |
| 406 | * If page is a compound head, the entire compound page is considered. |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 407 | */ |
| 408 | static inline unsigned long |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 409 | vma_address(struct page *page, struct vm_area_struct *vma) |
| 410 | { |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 411 | pgoff_t pgoff; |
| 412 | unsigned long address; |
Kirill A. Shutemov | a8fa41ad | 2017-02-24 14:57:54 -0800 | [diff] [blame] | 413 | |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 414 | VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ |
| 415 | pgoff = page_to_pgoff(page); |
| 416 | if (pgoff >= vma->vm_pgoff) { |
| 417 | address = vma->vm_start + |
| 418 | ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
| 419 | /* Check for address beyond vma (or wrapped through 0?) */ |
| 420 | if (address < vma->vm_start || address >= vma->vm_end) |
| 421 | address = -EFAULT; |
| 422 | } else if (PageHead(page) && |
| 423 | pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) { |
| 424 | /* Test above avoids possibility of wrap to 0 on 32-bit */ |
| 425 | address = vma->vm_start; |
| 426 | } else { |
| 427 | address = -EFAULT; |
| 428 | } |
| 429 | return address; |
| 430 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 431 | |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 432 | /* |
| 433 | * Then at what user virtual address will none of the page be found in vma? |
| 434 | * Assumes that vma_address() already returned a good starting address. |
| 435 | * If page is a compound head, the entire compound page is considered. |
| 436 | */ |
| 437 | static inline unsigned long |
| 438 | vma_address_end(struct page *page, struct vm_area_struct *vma) |
| 439 | { |
| 440 | pgoff_t pgoff; |
| 441 | unsigned long address; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 442 | |
Hugh Dickins | 494334e | 2021-06-15 18:23:56 -0700 | [diff] [blame] | 443 | VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ |
| 444 | pgoff = page_to_pgoff(page) + compound_nr(page); |
| 445 | address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); |
| 446 | /* Check for address beyond vma (or wrapped through 0?) */ |
| 447 | if (address < vma->vm_start || address > vma->vm_end) |
| 448 | address = vma->vm_end; |
| 449 | return address; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 450 | } |
| 451 | |
Johannes Weiner | 89b1533 | 2019-11-30 17:50:22 -0800 | [diff] [blame] | 452 | static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, |
| 453 | struct file *fpin) |
| 454 | { |
| 455 | int flags = vmf->flags; |
| 456 | |
| 457 | if (fpin) |
| 458 | return fpin; |
| 459 | |
| 460 | /* |
| 461 | * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 462 | * anything, so we only pin the file and drop the mmap_lock if only |
Peter Xu | 4064b98 | 2020-04-01 21:08:45 -0700 | [diff] [blame] | 463 | * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt. |
Johannes Weiner | 89b1533 | 2019-11-30 17:50:22 -0800 | [diff] [blame] | 464 | */ |
Peter Xu | 4064b98 | 2020-04-01 21:08:45 -0700 | [diff] [blame] | 465 | if (fault_flag_allow_retry_first(flags) && |
| 466 | !(flags & FAULT_FLAG_RETRY_NOWAIT)) { |
Johannes Weiner | 89b1533 | 2019-11-30 17:50:22 -0800 | [diff] [blame] | 467 | fpin = get_file(vmf->vma->vm_file); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 468 | mmap_read_unlock(vmf->vma->vm_mm); |
Johannes Weiner | 89b1533 | 2019-11-30 17:50:22 -0800 | [diff] [blame] | 469 | } |
| 470 | return fpin; |
| 471 | } |
| 472 | |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 473 | #else /* !CONFIG_MMU */ |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 474 | static inline void clear_page_mlock(struct page *page) { } |
| 475 | static inline void mlock_vma_page(struct page *page) { } |
Nicholas Piggin | 4ad0ae8 | 2021-04-29 22:59:01 -0700 | [diff] [blame] | 476 | static inline void vunmap_range_noflush(unsigned long start, unsigned long end) |
| 477 | { |
| 478 | } |
Hugh Dickins | af8e335 | 2009-12-14 17:58:59 -0800 | [diff] [blame] | 479 | #endif /* !CONFIG_MMU */ |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 480 | |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 481 | /* |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 482 | * Return the mem_map entry representing the 'offset' subpage within |
| 483 | * the maximally aligned gigantic page 'base'. Handle any discontiguity |
| 484 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
| 485 | */ |
| 486 | static inline struct page *mem_map_offset(struct page *base, int offset) |
| 487 | { |
| 488 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
Fabian Frederick | bc7f84c | 2014-08-06 16:05:17 -0700 | [diff] [blame] | 489 | return nth_page(base, offset); |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 490 | return base + offset; |
| 491 | } |
| 492 | |
| 493 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 494 | * Iterator over all subpages within the maximally aligned gigantic |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 495 | * page 'base'. Handle any discontiguity in the mem_map. |
| 496 | */ |
| 497 | static inline struct page *mem_map_next(struct page *iter, |
| 498 | struct page *base, int offset) |
| 499 | { |
| 500 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
| 501 | unsigned long pfn = page_to_pfn(base) + offset; |
| 502 | if (!pfn_valid(pfn)) |
| 503 | return NULL; |
| 504 | return pfn_to_page(pfn); |
| 505 | } |
| 506 | return iter + 1; |
| 507 | } |
| 508 | |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 509 | /* Memory initialisation debug and verification */ |
| 510 | enum mminit_level { |
| 511 | MMINIT_WARNING, |
| 512 | MMINIT_VERIFY, |
| 513 | MMINIT_TRACE |
| 514 | }; |
| 515 | |
| 516 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
| 517 | |
| 518 | extern int mminit_loglevel; |
| 519 | |
| 520 | #define mminit_dprintk(level, prefix, fmt, arg...) \ |
| 521 | do { \ |
| 522 | if (level < mminit_loglevel) { \ |
Rasmus Villemoes | fc5199d | 2015-02-12 15:00:02 -0800 | [diff] [blame] | 523 | if (level <= MMINIT_WARNING) \ |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 524 | pr_warn("mminit::" prefix " " fmt, ##arg); \ |
Rasmus Villemoes | fc5199d | 2015-02-12 15:00:02 -0800 | [diff] [blame] | 525 | else \ |
| 526 | printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 527 | } \ |
| 528 | } while (0) |
| 529 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 530 | extern void mminit_verify_pageflags_layout(void); |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 531 | extern void mminit_verify_zonelist(void); |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 532 | #else |
| 533 | |
| 534 | static inline void mminit_dprintk(enum mminit_level level, |
| 535 | const char *prefix, const char *fmt, ...) |
| 536 | { |
| 537 | } |
| 538 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 539 | static inline void mminit_verify_pageflags_layout(void) |
| 540 | { |
| 541 | } |
| 542 | |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 543 | static inline void mminit_verify_zonelist(void) |
| 544 | { |
| 545 | } |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 546 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 547 | |
| 548 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ |
| 549 | #if defined(CONFIG_SPARSEMEM) |
| 550 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 551 | unsigned long *end_pfn); |
| 552 | #else |
| 553 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 554 | unsigned long *end_pfn) |
| 555 | { |
| 556 | } |
| 557 | #endif /* CONFIG_SPARSEMEM */ |
| 558 | |
Mel Gorman | a5f5f91 | 2016-07-28 15:46:32 -0700 | [diff] [blame] | 559 | #define NODE_RECLAIM_NOSCAN -2 |
| 560 | #define NODE_RECLAIM_FULL -1 |
| 561 | #define NODE_RECLAIM_SOME 0 |
| 562 | #define NODE_RECLAIM_SUCCESS 1 |
Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 563 | |
Wei Yang | 8b09549 | 2018-12-28 00:34:36 -0800 | [diff] [blame] | 564 | #ifdef CONFIG_NUMA |
| 565 | extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); |
Dave Hansen | 79c28a4 | 2021-09-02 14:59:06 -0700 | [diff] [blame] | 566 | extern int find_next_best_node(int node, nodemask_t *used_node_mask); |
Wei Yang | 8b09549 | 2018-12-28 00:34:36 -0800 | [diff] [blame] | 567 | #else |
| 568 | static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, |
| 569 | unsigned int order) |
| 570 | { |
| 571 | return NODE_RECLAIM_NOSCAN; |
| 572 | } |
Dave Hansen | 79c28a4 | 2021-09-02 14:59:06 -0700 | [diff] [blame] | 573 | static inline int find_next_best_node(int node, nodemask_t *used_node_mask) |
| 574 | { |
| 575 | return NUMA_NO_NODE; |
| 576 | } |
Wei Yang | 8b09549 | 2018-12-28 00:34:36 -0800 | [diff] [blame] | 577 | #endif |
| 578 | |
Wu Fengguang | 31d3d34 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 579 | extern int hwpoison_filter(struct page *p); |
| 580 | |
Wu Fengguang | 7c116f2 | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 581 | extern u32 hwpoison_filter_dev_major; |
| 582 | extern u32 hwpoison_filter_dev_minor; |
Wu Fengguang | 478c5ff | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 583 | extern u64 hwpoison_filter_flags_mask; |
| 584 | extern u64 hwpoison_filter_flags_value; |
Andi Kleen | 4fd466e | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 585 | extern u64 hwpoison_filter_memcg; |
Haicheng Li | 1bfe5fe | 2009-12-16 12:19:59 +0100 | [diff] [blame] | 586 | extern u32 hwpoison_filter_enable; |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 587 | |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 588 | extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, |
Al Viro | eb36c58 | 2012-05-30 20:17:35 -0400 | [diff] [blame] | 589 | unsigned long, unsigned long, |
Michal Hocko | 9fbeb5a | 2016-05-23 16:25:30 -0700 | [diff] [blame] | 590 | unsigned long, unsigned long); |
Xishi Qiu | ca57df7 | 2012-07-31 16:43:19 -0700 | [diff] [blame] | 591 | |
| 592 | extern void set_pageblock_order(void); |
Maninder Singh | 730ec8c | 2020-06-03 16:01:18 -0700 | [diff] [blame] | 593 | unsigned int reclaim_clean_pages_from_list(struct zone *zone, |
Minchan Kim | 02c6de8 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 594 | struct list_head *page_list); |
Bartlomiej Zolnierkiewicz | d95ea5d | 2012-10-08 16:32:05 -0700 | [diff] [blame] | 595 | /* The ALLOC_WMARK bits are used as an index to zone->watermark */ |
| 596 | #define ALLOC_WMARK_MIN WMARK_MIN |
| 597 | #define ALLOC_WMARK_LOW WMARK_LOW |
| 598 | #define ALLOC_WMARK_HIGH WMARK_HIGH |
| 599 | #define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ |
| 600 | |
| 601 | /* Mask to get the watermark bits */ |
| 602 | #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) |
| 603 | |
Michal Hocko | cd04ae1 | 2017-09-06 16:24:50 -0700 | [diff] [blame] | 604 | /* |
| 605 | * Only MMU archs have async oom victim reclaim - aka oom_reaper so we |
| 606 | * cannot assume a reduced access to memory reserves is sufficient for |
| 607 | * !MMU |
| 608 | */ |
| 609 | #ifdef CONFIG_MMU |
| 610 | #define ALLOC_OOM 0x08 |
| 611 | #else |
| 612 | #define ALLOC_OOM ALLOC_NO_WATERMARKS |
| 613 | #endif |
| 614 | |
Mel Gorman | 6bb1545 | 2018-12-28 00:35:41 -0800 | [diff] [blame] | 615 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ |
| 616 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ |
| 617 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ |
| 618 | #define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ |
| 619 | #ifdef CONFIG_ZONE_DMA32 |
| 620 | #define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ |
| 621 | #else |
| 622 | #define ALLOC_NOFRAGMENT 0x0 |
| 623 | #endif |
Mateusz Nosek | 736838e | 2020-04-01 21:09:47 -0700 | [diff] [blame] | 624 | #define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */ |
Bartlomiej Zolnierkiewicz | d95ea5d | 2012-10-08 16:32:05 -0700 | [diff] [blame] | 625 | |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 626 | enum ttu_flags; |
| 627 | struct tlbflush_unmap_batch; |
| 628 | |
Michal Hocko | ce61287 | 2017-04-07 16:05:05 -0700 | [diff] [blame] | 629 | |
| 630 | /* |
| 631 | * only for MM internal work items which do not depend on |
| 632 | * any allocations or locks which might depend on allocations |
| 633 | */ |
| 634 | extern struct workqueue_struct *mm_percpu_wq; |
| 635 | |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 636 | #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH |
| 637 | void try_to_unmap_flush(void); |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 638 | void try_to_unmap_flush_dirty(void); |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 639 | void flush_tlb_batched_pending(struct mm_struct *mm); |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 640 | #else |
| 641 | static inline void try_to_unmap_flush(void) |
| 642 | { |
| 643 | } |
Mel Gorman | d950c94 | 2015-09-04 15:47:35 -0700 | [diff] [blame] | 644 | static inline void try_to_unmap_flush_dirty(void) |
| 645 | { |
| 646 | } |
Mel Gorman | 3ea2771 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 647 | static inline void flush_tlb_batched_pending(struct mm_struct *mm) |
| 648 | { |
| 649 | } |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 650 | #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ |
Vlastimil Babka | edf14cd | 2016-03-15 14:55:56 -0700 | [diff] [blame] | 651 | |
| 652 | extern const struct trace_print_flags pageflag_names[]; |
| 653 | extern const struct trace_print_flags vmaflag_names[]; |
| 654 | extern const struct trace_print_flags gfpflag_names[]; |
| 655 | |
Xishi Qiu | a6ffdc0 | 2017-05-03 14:52:52 -0700 | [diff] [blame] | 656 | static inline bool is_migrate_highatomic(enum migratetype migratetype) |
| 657 | { |
| 658 | return migratetype == MIGRATE_HIGHATOMIC; |
| 659 | } |
| 660 | |
| 661 | static inline bool is_migrate_highatomic_page(struct page *page) |
| 662 | { |
| 663 | return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; |
| 664 | } |
| 665 | |
Michal Hocko | 72675e1 | 2017-09-06 16:20:24 -0700 | [diff] [blame] | 666 | void setup_zone_pageset(struct zone *zone); |
Joonsoo Kim | 19fc7be | 2020-08-11 18:37:25 -0700 | [diff] [blame] | 667 | |
| 668 | struct migration_target_control { |
| 669 | int nid; /* preferred node id */ |
| 670 | nodemask_t *nmask; |
| 671 | gfp_t gfp_mask; |
| 672 | }; |
| 673 | |
Nicholas Piggin | b67177e | 2021-04-29 22:58:53 -0700 | [diff] [blame] | 674 | /* |
| 675 | * mm/vmalloc.c |
| 676 | */ |
Nicholas Piggin | 4ad0ae8 | 2021-04-29 22:59:01 -0700 | [diff] [blame] | 677 | #ifdef CONFIG_MMU |
Nicholas Piggin | b67177e | 2021-04-29 22:58:53 -0700 | [diff] [blame] | 678 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
| 679 | pgprot_t prot, struct page **pages, unsigned int page_shift); |
Nicholas Piggin | 4ad0ae8 | 2021-04-29 22:59:01 -0700 | [diff] [blame] | 680 | #else |
| 681 | static inline |
| 682 | int vmap_pages_range_noflush(unsigned long addr, unsigned long end, |
| 683 | pgprot_t prot, struct page **pages, unsigned int page_shift) |
| 684 | { |
| 685 | return -EINVAL; |
| 686 | } |
| 687 | #endif |
| 688 | |
| 689 | void vunmap_range_noflush(unsigned long start, unsigned long end); |
Nicholas Piggin | b67177e | 2021-04-29 22:58:53 -0700 | [diff] [blame] | 690 | |
Yang Shi | f4c0d83 | 2021-06-30 18:51:39 -0700 | [diff] [blame] | 691 | int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, |
| 692 | unsigned long addr, int page_nid, int *flags); |
| 693 | |
Michel Lespinasse | db97141 | 2012-10-08 16:29:34 -0700 | [diff] [blame] | 694 | #endif /* __MM_INTERNAL_H */ |