blob: fd6734be9c85c704f56cc92917e77d6369052563 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* internal.h: mm/ internal definitions
3 *
4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
Nick Piggin0f8053a2006-03-22 00:08:33 -08007#ifndef __MM_INTERNAL_H
8#define __MM_INTERNAL_H
9
Fabian Frederick29f175d2014-04-07 15:37:55 -070010#include <linux/fs.h>
Nick Piggin0f8053a2006-03-22 00:08:33 -080011#include <linux/mm.h>
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -080012#include <linux/pagemap.h>
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -070013#include <linux/tracepoint-defs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Mel Gormandd56b042015-11-06 16:28:43 -080015/*
16 * The set of flags that only affect watermark checking and reclaim
17 * behaviour. This is used by the MM to obey the caller constraints
18 * about IO, FS and watermark checking while ignoring placement
19 * hints such as HIGHMEM usage.
20 */
21#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
Michal Hockodcda9b02017-07-12 14:36:45 -070022 __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
Mel Gormane838a452016-06-24 14:49:37 -070023 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
24 __GFP_ATOMIC)
Mel Gormandd56b042015-11-06 16:28:43 -080025
26/* The GFP flags allowed during early boot */
27#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
28
29/* Control allocation cpuset and node placement constraints */
30#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
31
32/* Do not use these with a slab allocator */
33#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
34
Nicholas Piggin62906022016-12-25 13:00:30 +100035void page_writeback_init(void);
36
Souptick Joarder2b740302018-08-23 17:01:36 -070037vm_fault_t do_swap_page(struct vm_fault *vmf);
Ebru Akagunduz8a966ed2016-07-26 15:25:03 -070038
Jan Beulich42b77722008-07-23 21:27:10 -070039void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
40 unsigned long floor, unsigned long ceiling);
41
Minchan Kim9c276cc2019-09-25 16:49:08 -070042static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
Kirill A. Shutemov23519072017-02-22 15:46:39 -080043{
44 return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP));
45}
46
Michal Hockoaac45362016-03-25 14:20:24 -070047void unmap_page_range(struct mmu_gather *tlb,
48 struct vm_area_struct *vma,
49 unsigned long addr, unsigned long end,
50 struct zap_details *details);
51
David Howells7b3df3b2020-10-15 20:06:24 -070052void do_page_cache_ra(struct readahead_control *, unsigned long nr_to_read,
53 unsigned long lookahead_size);
David Howellsb1647dc2020-10-15 20:06:35 -070054void force_page_cache_ra(struct readahead_control *, struct file_ra_state *,
55 unsigned long nr);
David Howells7b3df3b2020-10-15 20:06:24 -070056static inline void force_page_cache_readahead(struct address_space *mapping,
57 struct file *file, pgoff_t index, unsigned long nr_to_read)
58{
59 DEFINE_READAHEAD(ractl, file, mapping, index);
David Howellsb1647dc2020-10-15 20:06:35 -070060 force_page_cache_ra(&ractl, &file->f_ra, nr_to_read);
David Howells7b3df3b2020-10-15 20:06:24 -070061}
Fabian Frederick29f175d2014-04-07 15:37:55 -070062
Matthew Wilcox (Oracle)9dfc8ff2020-10-13 16:51:31 -070063struct page *find_get_entry(struct address_space *mapping, pgoff_t index);
64struct page *find_lock_entry(struct address_space *mapping, pgoff_t index);
65
Yang Shi1eb62342020-04-01 21:06:20 -070066/**
67 * page_evictable - test whether a page is evictable
68 * @page: the page to test
69 *
70 * Test whether page is evictable--i.e., should be placed on active/inactive
71 * lists vs unevictable list.
72 *
73 * Reasons page might not be evictable:
74 * (1) page's mapping marked unevictable
75 * (2) page is part of an mlocked VMA
76 *
77 */
78static inline bool page_evictable(struct page *page)
79{
80 bool ret;
81
82 /* Prevent address_space of inode and swap cache from being freed */
83 rcu_read_lock();
84 ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
85 rcu_read_unlock();
86 return ret;
87}
88
Nick Piggin7835e982006-03-22 00:08:40 -080089/*
Joonsoo Kim0139aa72016-05-19 17:10:49 -070090 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
Nick Piggin7835e982006-03-22 00:08:40 -080091 * a count of one.
92 */
93static inline void set_page_refcounted(struct page *page)
94{
Sasha Levin309381fea2014-01-23 15:52:54 -080095 VM_BUG_ON_PAGE(PageTail(page), page);
Joonsoo Kimfe896d12016-03-17 14:19:26 -070096 VM_BUG_ON_PAGE(page_ref_count(page), page);
Nick Piggin77a8a782006-01-06 00:10:57 -080097 set_page_count(page, 1);
Nick Piggin77a8a782006-01-06 00:10:57 -080098}
99
Hugh Dickins03f64622009-09-21 17:03:35 -0700100extern unsigned long highest_memmap_pfn;
101
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700102/*
Johannes Weinerc73322d2017-05-03 14:51:51 -0700103 * Maximum number of reclaim retries without progress before the OOM
104 * killer is consider the only way forward.
105 */
106#define MAX_RECLAIM_RETRIES 16
107
108/*
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700109 * in mm/vmscan.c:
110 */
Nick Piggin62695a82008-10-18 20:26:09 -0700111extern int isolate_lru_page(struct page *page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700112extern void putback_lru_page(struct page *page);
Nick Piggin62695a82008-10-18 20:26:09 -0700113
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700114/*
Bob Liu62190492012-12-11 16:00:37 -0800115 * in mm/rmap.c:
116 */
117extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
118
119/*
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700120 * in mm/page_alloc.c
121 */
Joonsoo Kim3c605092014-11-13 15:19:21 -0800122
123/*
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -0800124 * Structure for holding the mostly immutable allocation parameters passed
125 * between functions involved in allocations, including the alloc_pages*
126 * family of functions.
127 *
Joonsoo Kim97a225e2020-06-03 15:59:01 -0700128 * nodemask, migratetype and highest_zoneidx are initialized only once in
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -0800129 * __alloc_pages_nodemask() and then never change.
130 *
Joonsoo Kim97a225e2020-06-03 15:59:01 -0700131 * zonelist, preferred_zone and highest_zoneidx are set first in
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -0800132 * __alloc_pages_nodemask() for the fast path, and might be later changed
Ethon Paul68956cc2020-06-04 16:49:31 -0700133 * in __alloc_pages_slowpath(). All other functions pass the whole structure
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -0800134 * by a const pointer.
135 */
136struct alloc_context {
137 struct zonelist *zonelist;
138 nodemask_t *nodemask;
Mel Gormanc33d6c02016-05-19 17:14:10 -0700139 struct zoneref *preferred_zoneref;
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -0800140 int migratetype;
Joonsoo Kim97a225e2020-06-03 15:59:01 -0700141
142 /*
143 * highest_zoneidx represents highest usable zone index of
144 * the allocation request. Due to the nature of the zone,
145 * memory on lower zone than the highest_zoneidx will be
146 * protected by lowmem_reserve[highest_zoneidx].
147 *
148 * highest_zoneidx is also used by reclaim/compaction to limit
149 * the target zone since higher zone than this index cannot be
150 * usable for this allocation request.
151 */
152 enum zone_type highest_zoneidx;
Mel Gormanc9ab0c42015-11-06 16:28:12 -0800153 bool spread_dirty_pages;
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -0800154};
155
156/*
Joonsoo Kim3c605092014-11-13 15:19:21 -0800157 * Locate the struct page for both the matching buddy in our
158 * pair (buddy1) and the combined O(n+1) page they form (page).
159 *
160 * 1) Any buddy B1 will have an order O twin B2 which satisfies
161 * the following equation:
162 * B2 = B1 ^ (1 << O)
163 * For example, if the starting buddy (buddy2) is #8 its order
164 * 1 buddy is #10:
165 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
166 *
167 * 2) Any buddy B will have an order O+1 parent P which
168 * satisfies the following equation:
169 * P = B & ~(1 << O)
170 *
171 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
172 */
173static inline unsigned long
Vlastimil Babka76741e72017-02-22 15:41:48 -0800174__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
Joonsoo Kim3c605092014-11-13 15:19:21 -0800175{
Vlastimil Babka76741e72017-02-22 15:41:48 -0800176 return page_pfn ^ (1 << order);
Joonsoo Kim3c605092014-11-13 15:19:21 -0800177}
178
Joonsoo Kim7cf91a92016-03-15 14:57:51 -0700179extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
180 unsigned long end_pfn, struct zone *zone);
181
182static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
183 unsigned long end_pfn, struct zone *zone)
184{
185 if (zone->contiguous)
186 return pfn_to_page(start_pfn);
187
188 return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
189}
190
Joonsoo Kim3c605092014-11-13 15:19:21 -0800191extern int __isolate_free_page(struct page *page, unsigned int order);
Alexander Duyck624f58d2020-04-06 20:04:53 -0700192extern void __putback_isolated_page(struct page *page, unsigned int order,
193 int mt);
Mike Rapoport7c2ee342018-10-30 15:09:36 -0700194extern void memblock_free_pages(struct page *page, unsigned long pfn,
Mel Gormand70ddd72015-06-30 14:56:52 -0700195 unsigned int order);
Arun KSa9cd4102019-03-05 15:42:14 -0800196extern void __free_pages_core(struct page *page, unsigned int order);
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800197extern void prep_compound_page(struct page *page, unsigned int order);
Joonsoo Kim46f24fd2016-07-26 15:23:58 -0700198extern void post_alloc_hook(struct page *page, unsigned int order,
199 gfp_t gfp_flags);
Han Pingtian42aa83c2014-01-23 15:53:28 -0800200extern int user_min_free_kbytes;
Wu Fengguang20a03072009-06-16 15:32:22 -0700201
Matthew Wilcox (Oracle)0966aeb2020-12-14 19:08:02 -0800202extern void free_unref_page(struct page *page);
203extern void free_unref_page_list(struct list_head *list);
204
Mel Gorman68265392019-11-30 17:55:15 -0800205extern void zone_pcp_update(struct zone *zone);
206extern void zone_pcp_reset(struct zone *zone);
207
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100208#if defined CONFIG_COMPACTION || defined CONFIG_CMA
209
210/*
211 * in mm/compaction.c
212 */
213/*
214 * compact_control is used to track pages being migrated and the free pages
215 * they are being migrated to during memory compaction. The free_pfn starts
216 * at the end of a zone and migrate_pfn begins at the start. Movable pages
217 * are moved to the end of a zone during a compaction run and the run
218 * completes when free_pfn <= migrate_pfn
219 */
220struct compact_control {
221 struct list_head freepages; /* List of free pages to migrate to */
222 struct list_head migratepages; /* List of pages being migrated */
Mel Gormanc5fbd932019-03-05 15:44:25 -0800223 unsigned int nr_freepages; /* Number of isolated free pages */
224 unsigned int nr_migratepages; /* Number of pages to migrate */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100225 unsigned long free_pfn; /* isolate_freepages search base */
226 unsigned long migrate_pfn; /* isolate_migratepages search base */
Mel Gorman70b44592019-03-05 15:44:54 -0800227 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
Mel Gormanc5943b92019-03-05 15:44:28 -0800228 struct zone *zone;
229 unsigned long total_migrate_scanned;
230 unsigned long total_free_scanned;
Mel Gormandbe2d4e2019-03-05 15:45:31 -0800231 unsigned short fast_search_fail;/* failures to use free list searches */
232 short search_order; /* order to start a fast search at */
Vlastimil Babkaf25ba6d2017-05-08 15:54:30 -0700233 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
234 int order; /* order a direct compactor needs */
Vlastimil Babkad39773a2017-05-08 15:54:46 -0700235 int migratetype; /* migratetype of direct compactor */
Vlastimil Babkaf25ba6d2017-05-08 15:54:30 -0700236 const unsigned int alloc_flags; /* alloc flags of a direct compactor */
Joonsoo Kim97a225e2020-06-03 15:59:01 -0700237 const int highest_zoneidx; /* zone index of a direct compactor */
David Rientjese0b9dae2014-06-04 16:08:28 -0700238 enum migrate_mode mode; /* Async or sync migration mode */
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700239 bool ignore_skip_hint; /* Scan blocks even if marked skip */
Vlastimil Babka2583d672017-11-17 15:26:38 -0800240 bool no_set_skip_hint; /* Don't mark blocks for skipping */
Vlastimil Babka9f7e3382016-10-07 17:00:37 -0700241 bool ignore_block_suitable; /* Scan blocks considered unsuitable */
Vlastimil Babkaaccf6242016-03-17 14:18:15 -0700242 bool direct_compaction; /* False from kcompactd or /proc/... */
Nitin Guptafacdaa92020-08-11 18:31:00 -0700243 bool proactive_compaction; /* kcompactd proactive compaction */
Vlastimil Babka06ed2992016-10-07 16:57:35 -0700244 bool whole_zone; /* Whole zone should/has been scanned */
Vlastimil Babkac3486f52016-07-28 15:49:30 -0700245 bool contended; /* Signal lock or sched contention */
Mel Gorman804d3122019-03-05 15:45:07 -0800246 bool rescan; /* Rescanning the same pageblock */
Rik van Rielb06eda02020-04-01 21:10:28 -0700247 bool alloc_contig; /* alloc_contig_range allocation */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100248};
249
Mel Gorman5e1f0f02019-03-05 15:45:41 -0800250/*
251 * Used in direct compaction when a page should be taken from the freelists
252 * immediately when one is created during the free path.
253 */
254struct capture_control {
255 struct compact_control *cc;
256 struct page *page;
257};
258
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100259unsigned long
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700260isolate_freepages_range(struct compact_control *cc,
261 unsigned long start_pfn, unsigned long end_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100262unsigned long
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700263isolate_migratepages_range(struct compact_control *cc,
264 unsigned long low_pfn, unsigned long end_pfn);
Joonsoo Kim2149cda2015-04-14 15:45:21 -0700265int find_suitable_fallback(struct free_area *area, unsigned int order,
266 int migratetype, bool only_stealable, bool *can_steal);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100267
268#endif
Nick Piggin0f8053a2006-03-22 00:08:33 -0800269
Mel Gorman48f13bf2007-10-16 01:26:10 -0700270/*
Mel Gorman6c144662014-01-23 15:53:38 -0800271 * This function returns the order of a free page in the buddy system. In
272 * general, page_zone(page)->lock must be held by the caller to prevent the
273 * page from being allocated in parallel and returning garbage as the order.
274 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700275 * page cannot be allocated or merged in parallel. Alternatively, it must
Matthew Wilcox (Oracle)ab130f912020-10-15 20:10:15 -0700276 * handle invalid values gracefully, and use buddy_order_unsafe() below.
Mel Gorman48f13bf2007-10-16 01:26:10 -0700277 */
Matthew Wilcox (Oracle)ab130f912020-10-15 20:10:15 -0700278static inline unsigned int buddy_order(struct page *page)
Mel Gorman48f13bf2007-10-16 01:26:10 -0700279{
KAMEZAWA Hiroyuki572438f2010-10-26 14:22:08 -0700280 /* PageBuddy() must be checked by the caller */
Mel Gorman48f13bf2007-10-16 01:26:10 -0700281 return page_private(page);
282}
Alexander van Heukelumb5a0e012008-02-23 15:24:06 -0800283
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700284/*
Matthew Wilcox (Oracle)ab130f912020-10-15 20:10:15 -0700285 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700286 * PageBuddy() should be checked first by the caller to minimize race window,
287 * and invalid values must be handled gracefully.
288 *
Jason Low4db0c3c2015-04-15 16:14:08 -0700289 * READ_ONCE is used so that if the caller assigns the result into a local
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700290 * variable and e.g. tests it for valid range before using, the compiler cannot
291 * decide to remove the variable and inline the page_private(page) multiple
292 * times, potentially observing different values in the tests and the actual
293 * use of the result.
294 */
Matthew Wilcox (Oracle)ab130f912020-10-15 20:10:15 -0700295#define buddy_order_unsafe(page) READ_ONCE(page_private(page))
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700296
Kirill A. Shutemov4bbd4c72014-06-04 16:08:10 -0700297static inline bool is_cow_mapping(vm_flags_t flags)
298{
299 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
300}
301
Konstantin Khlebnikov30bdbb72016-02-02 16:57:46 -0800302/*
303 * These three helpers classifies VMAs for virtual memory accounting.
304 */
305
306/*
307 * Executable code area - executable, not writable, not stack
308 */
Konstantin Khlebnikovd977d562016-02-02 16:57:43 -0800309static inline bool is_exec_mapping(vm_flags_t flags)
310{
Konstantin Khlebnikov30bdbb72016-02-02 16:57:46 -0800311 return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
Konstantin Khlebnikovd977d562016-02-02 16:57:43 -0800312}
313
Konstantin Khlebnikov30bdbb72016-02-02 16:57:46 -0800314/*
315 * Stack area - atomatically grows in one direction
316 *
317 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
318 * do_mmap() forbids all other combinations.
319 */
Konstantin Khlebnikovd977d562016-02-02 16:57:43 -0800320static inline bool is_stack_mapping(vm_flags_t flags)
321{
Konstantin Khlebnikov30bdbb72016-02-02 16:57:46 -0800322 return (flags & VM_STACK) == VM_STACK;
Konstantin Khlebnikovd977d562016-02-02 16:57:43 -0800323}
324
Konstantin Khlebnikov30bdbb72016-02-02 16:57:46 -0800325/*
326 * Data area - private, writable, not stack
327 */
Konstantin Khlebnikovd977d562016-02-02 16:57:43 -0800328static inline bool is_data_mapping(vm_flags_t flags)
329{
Konstantin Khlebnikov30bdbb72016-02-02 16:57:46 -0800330 return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
Konstantin Khlebnikovd977d562016-02-02 16:57:43 -0800331}
332
Namhyung Kim6038def2011-05-24 17:11:22 -0700333/* mm/util.c */
334void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
Wei Yangaba6dfb2019-11-30 17:50:53 -0800335 struct vm_area_struct *prev);
Wei Yang1b9fc5b22019-11-30 17:50:49 -0800336void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
Namhyung Kim6038def2011-05-24 17:11:22 -0700337
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800338#ifdef CONFIG_MMU
Kirill A. Shutemovfc05f562015-04-14 15:44:39 -0700339extern long populate_vma_page_range(struct vm_area_struct *vma,
Michel Lespinassecea10a12013-02-22 16:32:44 -0800340 unsigned long start, unsigned long end, int *nonblocking);
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800341extern void munlock_vma_pages_range(struct vm_area_struct *vma,
342 unsigned long start, unsigned long end);
343static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
344{
345 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
346}
347
Nick Pigginb291f002008-10-18 20:26:44 -0700348/*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700349 * must be called with vma's mmap_lock held for read or write, and page locked.
Nick Pigginb291f002008-10-18 20:26:44 -0700350 */
351extern void mlock_vma_page(struct page *page);
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800352extern unsigned int munlock_vma_page(struct page *page);
Nick Pigginb291f002008-10-18 20:26:44 -0700353
354/*
355 * Clear the page's PageMlocked(). This can be useful in a situation where
356 * we want to unconditionally remove a page from the pagecache -- e.g.,
357 * on truncation or freeing.
358 *
359 * It is legal to call this function for any page, mlocked or not.
360 * If called for a page that is still mapped by mlocked vmas, all we do
361 * is revert to lazy LRU behaviour -- semantics are not broken.
362 */
Hugh Dickinse6c509f2012-10-08 16:33:19 -0700363extern void clear_page_mlock(struct page *page);
Nick Pigginb291f002008-10-18 20:26:44 -0700364
365/*
Hugh Dickins51afb122015-11-05 18:49:37 -0800366 * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
367 * (because that does not go through the full procedure of migration ptes):
368 * to migrate the Mlocked page flag; update statistics.
Nick Pigginb291f002008-10-18 20:26:44 -0700369 */
370static inline void mlock_migrate_page(struct page *newpage, struct page *page)
371{
Nick Piggin5344b7e2008-10-18 20:26:51 -0700372 if (TestClearPageMlocked(page)) {
Matthew Wilcox (Oracle)6c357842020-08-14 17:30:37 -0700373 int nr_pages = thp_nr_pages(page);
Nick Piggin5344b7e2008-10-18 20:26:51 -0700374
Hugh Dickins51afb122015-11-05 18:49:37 -0800375 /* Holding pmd lock, no change in irq context: __mod is safe */
Mel Gormanb32967f2012-11-19 12:35:47 +0000376 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
Nick Pigginb291f002008-10-18 20:26:44 -0700377 SetPageMlocked(newpage);
Mel Gormanb32967f2012-11-19 12:35:47 +0000378 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
Nick Piggin5344b7e2008-10-18 20:26:51 -0700379 }
Nick Pigginb291f002008-10-18 20:26:44 -0700380}
381
Linus Torvaldsf55e1012017-11-29 09:01:01 -0800382extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
Mel Gormanb32967f2012-11-19 12:35:47 +0000383
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800384/*
385 * At what user virtual address is page expected in @vma?
386 */
387static inline unsigned long
388__vma_address(struct page *page, struct vm_area_struct *vma)
389{
390 pgoff_t pgoff = page_to_pgoff(page);
391 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
392}
393
394static inline unsigned long
395vma_address(struct page *page, struct vm_area_struct *vma)
396{
Kirill A. Shutemova8fa41ad2017-02-24 14:57:54 -0800397 unsigned long start, end;
398
399 start = __vma_address(page, vma);
Matthew Wilcox (Oracle)af3bbc12020-08-14 17:30:33 -0700400 end = start + thp_size(page) - PAGE_SIZE;
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800401
402 /* page should be within @vma mapping range */
Kirill A. Shutemova8fa41ad2017-02-24 14:57:54 -0800403 VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800404
Kirill A. Shutemova8fa41ad2017-02-24 14:57:54 -0800405 return max(start, vma->vm_start);
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800406}
407
Johannes Weiner89b15332019-11-30 17:50:22 -0800408static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
409 struct file *fpin)
410{
411 int flags = vmf->flags;
412
413 if (fpin)
414 return fpin;
415
416 /*
417 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700418 * anything, so we only pin the file and drop the mmap_lock if only
Peter Xu4064b982020-04-01 21:08:45 -0700419 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
Johannes Weiner89b15332019-11-30 17:50:22 -0800420 */
Peter Xu4064b982020-04-01 21:08:45 -0700421 if (fault_flag_allow_retry_first(flags) &&
422 !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
Johannes Weiner89b15332019-11-30 17:50:22 -0800423 fpin = get_file(vmf->vma->vm_file);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700424 mmap_read_unlock(vmf->vma->vm_mm);
Johannes Weiner89b15332019-11-30 17:50:22 -0800425 }
426 return fpin;
427}
428
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800429#else /* !CONFIG_MMU */
Nick Pigginb291f002008-10-18 20:26:44 -0700430static inline void clear_page_mlock(struct page *page) { }
431static inline void mlock_vma_page(struct page *page) { }
432static inline void mlock_migrate_page(struct page *new, struct page *old) { }
433
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800434#endif /* !CONFIG_MMU */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700435
Alexander van Heukelumb5a0e012008-02-23 15:24:06 -0800436/*
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800437 * Return the mem_map entry representing the 'offset' subpage within
438 * the maximally aligned gigantic page 'base'. Handle any discontiguity
439 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
440 */
441static inline struct page *mem_map_offset(struct page *base, int offset)
442{
443 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
Fabian Frederickbc7f84c2014-08-06 16:05:17 -0700444 return nth_page(base, offset);
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800445 return base + offset;
446}
447
448/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300449 * Iterator over all subpages within the maximally aligned gigantic
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800450 * page 'base'. Handle any discontiguity in the mem_map.
451 */
452static inline struct page *mem_map_next(struct page *iter,
453 struct page *base, int offset)
454{
455 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
456 unsigned long pfn = page_to_pfn(base) + offset;
457 if (!pfn_valid(pfn))
458 return NULL;
459 return pfn_to_page(pfn);
460 }
461 return iter + 1;
462}
463
Mel Gorman6b74ab92008-07-23 21:26:49 -0700464/* Memory initialisation debug and verification */
465enum mminit_level {
466 MMINIT_WARNING,
467 MMINIT_VERIFY,
468 MMINIT_TRACE
469};
470
471#ifdef CONFIG_DEBUG_MEMORY_INIT
472
473extern int mminit_loglevel;
474
475#define mminit_dprintk(level, prefix, fmt, arg...) \
476do { \
477 if (level < mminit_loglevel) { \
Rasmus Villemoesfc5199d2015-02-12 15:00:02 -0800478 if (level <= MMINIT_WARNING) \
Joe Perches11705322016-03-17 14:19:50 -0700479 pr_warn("mminit::" prefix " " fmt, ##arg); \
Rasmus Villemoesfc5199d2015-02-12 15:00:02 -0800480 else \
481 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
Mel Gorman6b74ab92008-07-23 21:26:49 -0700482 } \
483} while (0)
484
Mel Gorman708614e2008-07-23 21:26:51 -0700485extern void mminit_verify_pageflags_layout(void);
Mel Gorman68ad8df2008-07-23 21:26:52 -0700486extern void mminit_verify_zonelist(void);
Mel Gorman6b74ab92008-07-23 21:26:49 -0700487#else
488
489static inline void mminit_dprintk(enum mminit_level level,
490 const char *prefix, const char *fmt, ...)
491{
492}
493
Mel Gorman708614e2008-07-23 21:26:51 -0700494static inline void mminit_verify_pageflags_layout(void)
495{
496}
497
Mel Gorman68ad8df2008-07-23 21:26:52 -0700498static inline void mminit_verify_zonelist(void)
499{
500}
Mel Gorman6b74ab92008-07-23 21:26:49 -0700501#endif /* CONFIG_DEBUG_MEMORY_INIT */
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700502
503/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
504#if defined(CONFIG_SPARSEMEM)
505extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
506 unsigned long *end_pfn);
507#else
508static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
509 unsigned long *end_pfn)
510{
511}
512#endif /* CONFIG_SPARSEMEM */
513
Mel Gormana5f5f912016-07-28 15:46:32 -0700514#define NODE_RECLAIM_NOSCAN -2
515#define NODE_RECLAIM_FULL -1
516#define NODE_RECLAIM_SOME 0
517#define NODE_RECLAIM_SUCCESS 1
Wu Fengguang7c116f22009-12-16 12:19:59 +0100518
Wei Yang8b095492018-12-28 00:34:36 -0800519#ifdef CONFIG_NUMA
520extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
521#else
522static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
523 unsigned int order)
524{
525 return NODE_RECLAIM_NOSCAN;
526}
527#endif
528
Wu Fengguang31d3d342009-12-16 12:19:59 +0100529extern int hwpoison_filter(struct page *p);
530
Wu Fengguang7c116f22009-12-16 12:19:59 +0100531extern u32 hwpoison_filter_dev_major;
532extern u32 hwpoison_filter_dev_minor;
Wu Fengguang478c5ff2009-12-16 12:19:59 +0100533extern u64 hwpoison_filter_flags_mask;
534extern u64 hwpoison_filter_flags_value;
Andi Kleen4fd466e2009-12-16 12:19:59 +0100535extern u64 hwpoison_filter_memcg;
Haicheng Li1bfe5fe2009-12-16 12:19:59 +0100536extern u32 hwpoison_filter_enable;
Al Viroeb36c582012-05-30 20:17:35 -0400537
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700538extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long,
Al Viroeb36c582012-05-30 20:17:35 -0400539 unsigned long, unsigned long,
Michal Hocko9fbeb5a2016-05-23 16:25:30 -0700540 unsigned long, unsigned long);
Xishi Qiuca57df72012-07-31 16:43:19 -0700541
542extern void set_pageblock_order(void);
Maninder Singh730ec8c2020-06-03 16:01:18 -0700543unsigned int reclaim_clean_pages_from_list(struct zone *zone,
Minchan Kim02c6de82012-10-08 16:31:55 -0700544 struct list_head *page_list);
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -0700545/* The ALLOC_WMARK bits are used as an index to zone->watermark */
546#define ALLOC_WMARK_MIN WMARK_MIN
547#define ALLOC_WMARK_LOW WMARK_LOW
548#define ALLOC_WMARK_HIGH WMARK_HIGH
549#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
550
551/* Mask to get the watermark bits */
552#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
553
Michal Hockocd04ae12017-09-06 16:24:50 -0700554/*
555 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
556 * cannot assume a reduced access to memory reserves is sufficient for
557 * !MMU
558 */
559#ifdef CONFIG_MMU
560#define ALLOC_OOM 0x08
561#else
562#define ALLOC_OOM ALLOC_NO_WATERMARKS
563#endif
564
Mel Gorman6bb15452018-12-28 00:35:41 -0800565#define ALLOC_HARDER 0x10 /* try to alloc harder */
566#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
567#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
568#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
569#ifdef CONFIG_ZONE_DMA32
570#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */
571#else
572#define ALLOC_NOFRAGMENT 0x0
573#endif
Mateusz Nosek736838e2020-04-01 21:09:47 -0700574#define ALLOC_KSWAPD 0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -0700575
Mel Gorman72b252a2015-09-04 15:47:32 -0700576enum ttu_flags;
577struct tlbflush_unmap_batch;
578
Michal Hockoce612872017-04-07 16:05:05 -0700579
580/*
581 * only for MM internal work items which do not depend on
582 * any allocations or locks which might depend on allocations
583 */
584extern struct workqueue_struct *mm_percpu_wq;
585
Mel Gorman72b252a2015-09-04 15:47:32 -0700586#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
587void try_to_unmap_flush(void);
Mel Gormand950c942015-09-04 15:47:35 -0700588void try_to_unmap_flush_dirty(void);
Mel Gorman3ea27712017-08-02 13:31:52 -0700589void flush_tlb_batched_pending(struct mm_struct *mm);
Mel Gorman72b252a2015-09-04 15:47:32 -0700590#else
591static inline void try_to_unmap_flush(void)
592{
593}
Mel Gormand950c942015-09-04 15:47:35 -0700594static inline void try_to_unmap_flush_dirty(void)
595{
596}
Mel Gorman3ea27712017-08-02 13:31:52 -0700597static inline void flush_tlb_batched_pending(struct mm_struct *mm)
598{
599}
Mel Gorman72b252a2015-09-04 15:47:32 -0700600#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
Vlastimil Babkaedf14cd2016-03-15 14:55:56 -0700601
602extern const struct trace_print_flags pageflag_names[];
603extern const struct trace_print_flags vmaflag_names[];
604extern const struct trace_print_flags gfpflag_names[];
605
Xishi Qiua6ffdc02017-05-03 14:52:52 -0700606static inline bool is_migrate_highatomic(enum migratetype migratetype)
607{
608 return migratetype == MIGRATE_HIGHATOMIC;
609}
610
611static inline bool is_migrate_highatomic_page(struct page *page)
612{
613 return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
614}
615
Michal Hocko72675e12017-09-06 16:20:24 -0700616void setup_zone_pageset(struct zone *zone);
Joonsoo Kim19fc7be2020-08-11 18:37:25 -0700617
618struct migration_target_control {
619 int nid; /* preferred node id */
620 nodemask_t *nmask;
621 gfp_t gfp_mask;
622};
623
Michel Lespinassedb971412012-10-08 16:29:34 -0700624#endif /* __MM_INTERNAL_H */