blob: 6e976302ddd800870466d0cff0a0dafa98d15b59 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* internal.h: mm/ internal definitions
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Nick Piggin0f8053a2006-03-22 00:08:33 -080011#ifndef __MM_INTERNAL_H
12#define __MM_INTERNAL_H
13
Fabian Frederick29f175d2014-04-07 15:37:55 -070014#include <linux/fs.h>
Nick Piggin0f8053a2006-03-22 00:08:33 -080015#include <linux/mm.h>
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -080016#include <linux/pagemap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Mel Gormandd56b042015-11-06 16:28:43 -080018/*
19 * The set of flags that only affect watermark checking and reclaim
20 * behaviour. This is used by the MM to obey the caller constraints
21 * about IO, FS and watermark checking while ignoring placement
22 * hints such as HIGHMEM usage.
23 */
24#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
25 __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
26 __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
27
28/* The GFP flags allowed during early boot */
29#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
30
31/* Control allocation cpuset and node placement constraints */
32#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
33
34/* Do not use these with a slab allocator */
35#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
36
Jan Beulich42b77722008-07-23 21:27:10 -070037void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
38 unsigned long floor, unsigned long ceiling);
39
Nick Piggin7835e982006-03-22 00:08:40 -080040static inline void set_page_count(struct page *page, int v)
Nick Piggin77a8a782006-01-06 00:10:57 -080041{
Nick Piggin7835e982006-03-22 00:08:40 -080042 atomic_set(&page->_count, v);
43}
44
Fabian Frederick29f175d2014-04-07 15:37:55 -070045extern int __do_page_cache_readahead(struct address_space *mapping,
46 struct file *filp, pgoff_t offset, unsigned long nr_to_read,
47 unsigned long lookahead_size);
48
49/*
50 * Submit IO for the read-ahead request in file_ra_state.
51 */
52static inline unsigned long ra_submit(struct file_ra_state *ra,
53 struct address_space *mapping, struct file *filp)
54{
55 return __do_page_cache_readahead(mapping, filp,
56 ra->start, ra->size, ra->async_size);
57}
58
Nick Piggin7835e982006-03-22 00:08:40 -080059/*
60 * Turn a non-refcounted page (->_count == 0) into refcounted with
61 * a count of one.
62 */
63static inline void set_page_refcounted(struct page *page)
64{
Sasha Levin309381fea2014-01-23 15:52:54 -080065 VM_BUG_ON_PAGE(PageTail(page), page);
66 VM_BUG_ON_PAGE(atomic_read(&page->_count), page);
Nick Piggin77a8a782006-01-06 00:10:57 -080067 set_page_count(page, 1);
Nick Piggin77a8a782006-01-06 00:10:57 -080068}
69
Hugh Dickins03f64622009-09-21 17:03:35 -070070extern unsigned long highest_memmap_pfn;
71
Lee Schermerhorn894bc312008-10-18 20:26:39 -070072/*
73 * in mm/vmscan.c:
74 */
Nick Piggin62695a82008-10-18 20:26:09 -070075extern int isolate_lru_page(struct page *page);
Lee Schermerhorn894bc312008-10-18 20:26:39 -070076extern void putback_lru_page(struct page *page);
Lisa Du6e543d52013-09-11 14:22:36 -070077extern bool zone_reclaimable(struct zone *zone);
Nick Piggin62695a82008-10-18 20:26:09 -070078
Lee Schermerhorn894bc312008-10-18 20:26:39 -070079/*
Bob Liu62190492012-12-11 16:00:37 -080080 * in mm/rmap.c:
81 */
82extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
83
84/*
Lee Schermerhorn894bc312008-10-18 20:26:39 -070085 * in mm/page_alloc.c
86 */
Joonsoo Kim3c605092014-11-13 15:19:21 -080087
88/*
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -080089 * Structure for holding the mostly immutable allocation parameters passed
90 * between functions involved in allocations, including the alloc_pages*
91 * family of functions.
92 *
93 * nodemask, migratetype and high_zoneidx are initialized only once in
94 * __alloc_pages_nodemask() and then never change.
95 *
96 * zonelist, preferred_zone and classzone_idx are set first in
97 * __alloc_pages_nodemask() for the fast path, and might be later changed
98 * in __alloc_pages_slowpath(). All other functions pass the whole strucure
99 * by a const pointer.
100 */
101struct alloc_context {
102 struct zonelist *zonelist;
103 nodemask_t *nodemask;
104 struct zone *preferred_zone;
105 int classzone_idx;
106 int migratetype;
107 enum zone_type high_zoneidx;
Mel Gormanc9ab0c42015-11-06 16:28:12 -0800108 bool spread_dirty_pages;
Vlastimil Babka1a6d53a2015-02-11 15:25:44 -0800109};
110
111/*
Joonsoo Kim3c605092014-11-13 15:19:21 -0800112 * Locate the struct page for both the matching buddy in our
113 * pair (buddy1) and the combined O(n+1) page they form (page).
114 *
115 * 1) Any buddy B1 will have an order O twin B2 which satisfies
116 * the following equation:
117 * B2 = B1 ^ (1 << O)
118 * For example, if the starting buddy (buddy2) is #8 its order
119 * 1 buddy is #10:
120 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
121 *
122 * 2) Any buddy B will have an order O+1 parent P which
123 * satisfies the following equation:
124 * P = B & ~(1 << O)
125 *
126 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
127 */
128static inline unsigned long
129__find_buddy_index(unsigned long page_idx, unsigned int order)
130{
131 return page_idx ^ (1 << order);
132}
133
134extern int __isolate_free_page(struct page *page, unsigned int order);
Mel Gormand70ddd72015-06-30 14:56:52 -0700135extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
136 unsigned int order);
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800137extern void prep_compound_page(struct page *page, unsigned int order);
Wu Fengguang8d22ba12009-12-16 12:19:58 +0100138#ifdef CONFIG_MEMORY_FAILURE
139extern bool is_free_buddy_page(struct page *page);
140#endif
Han Pingtian42aa83c2014-01-23 15:53:28 -0800141extern int user_min_free_kbytes;
Wu Fengguang20a03072009-06-16 15:32:22 -0700142
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100143#if defined CONFIG_COMPACTION || defined CONFIG_CMA
144
145/*
146 * in mm/compaction.c
147 */
148/*
149 * compact_control is used to track pages being migrated and the free pages
150 * they are being migrated to during memory compaction. The free_pfn starts
151 * at the end of a zone and migrate_pfn begins at the start. Movable pages
152 * are moved to the end of a zone during a compaction run and the run
153 * completes when free_pfn <= migrate_pfn
154 */
155struct compact_control {
156 struct list_head freepages; /* List of free pages to migrate to */
157 struct list_head migratepages; /* List of pages being migrated */
158 unsigned long nr_freepages; /* Number of isolated free pages */
159 unsigned long nr_migratepages; /* Number of pages to migrate */
160 unsigned long free_pfn; /* isolate_freepages search base */
161 unsigned long migrate_pfn; /* isolate_migratepages search base */
Joonsoo Kim1a167182015-09-08 15:03:59 -0700162 unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
David Rientjese0b9dae2014-06-04 16:08:28 -0700163 enum migrate_mode mode; /* Async or sync migration mode */
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700164 bool ignore_skip_hint; /* Scan blocks even if marked skip */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100165 int order; /* order a direct compactor needs */
David Rientjes6d7ce552014-10-09 15:27:27 -0700166 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
Vlastimil Babkaebff3982014-12-10 15:43:22 -0800167 const int alloc_flags; /* alloc flags of a direct compactor */
168 const int classzone_idx; /* zone index of a direct compactor */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100169 struct zone *zone;
Vlastimil Babka1f9efde2014-10-09 15:27:14 -0700170 int contended; /* Signal need_sched() or lock
171 * contention detected during
Vlastimil Babkabe976572014-06-04 16:10:41 -0700172 * compaction
173 */
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100174};
175
176unsigned long
Mel Gormanbb13ffe2012-10-08 16:32:41 -0700177isolate_freepages_range(struct compact_control *cc,
178 unsigned long start_pfn, unsigned long end_pfn);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100179unsigned long
Vlastimil Babkaedc2ca62014-10-09 15:27:09 -0700180isolate_migratepages_range(struct compact_control *cc,
181 unsigned long low_pfn, unsigned long end_pfn);
Joonsoo Kim2149cda2015-04-14 15:45:21 -0700182int find_suitable_fallback(struct free_area *area, unsigned int order,
183 int migratetype, bool only_stealable, bool *can_steal);
Michal Nazarewiczff9543f2011-12-29 13:09:50 +0100184
185#endif
Nick Piggin0f8053a2006-03-22 00:08:33 -0800186
Mel Gorman48f13bf2007-10-16 01:26:10 -0700187/*
Mel Gorman6c144662014-01-23 15:53:38 -0800188 * This function returns the order of a free page in the buddy system. In
189 * general, page_zone(page)->lock must be held by the caller to prevent the
190 * page from being allocated in parallel and returning garbage as the order.
191 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700192 * page cannot be allocated or merged in parallel. Alternatively, it must
193 * handle invalid values gracefully, and use page_order_unsafe() below.
Mel Gorman48f13bf2007-10-16 01:26:10 -0700194 */
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -0800195static inline unsigned int page_order(struct page *page)
Mel Gorman48f13bf2007-10-16 01:26:10 -0700196{
KAMEZAWA Hiroyuki572438f2010-10-26 14:22:08 -0700197 /* PageBuddy() must be checked by the caller */
Mel Gorman48f13bf2007-10-16 01:26:10 -0700198 return page_private(page);
199}
Alexander van Heukelumb5a0e012008-02-23 15:24:06 -0800200
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700201/*
202 * Like page_order(), but for callers who cannot afford to hold the zone lock.
203 * PageBuddy() should be checked first by the caller to minimize race window,
204 * and invalid values must be handled gracefully.
205 *
Jason Low4db0c3c2015-04-15 16:14:08 -0700206 * READ_ONCE is used so that if the caller assigns the result into a local
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700207 * variable and e.g. tests it for valid range before using, the compiler cannot
208 * decide to remove the variable and inline the page_private(page) multiple
209 * times, potentially observing different values in the tests and the actual
210 * use of the result.
211 */
Jason Low4db0c3c2015-04-15 16:14:08 -0700212#define page_order_unsafe(page) READ_ONCE(page_private(page))
Vlastimil Babka99c0fd52014-10-09 15:27:23 -0700213
Kirill A. Shutemov4bbd4c72014-06-04 16:08:10 -0700214static inline bool is_cow_mapping(vm_flags_t flags)
215{
216 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
217}
218
Konstantin Khlebnikovd977d562016-02-02 16:57:43 -0800219static inline bool is_exec_mapping(vm_flags_t flags)
220{
221 return (flags & (VM_EXEC | VM_WRITE)) == VM_EXEC;
222}
223
224static inline bool is_stack_mapping(vm_flags_t flags)
225{
226 return (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN))) != 0;
227}
228
229static inline bool is_data_mapping(vm_flags_t flags)
230{
231 return (flags & ((VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN)) |
232 VM_WRITE | VM_SHARED)) == VM_WRITE;
233}
234
Namhyung Kim6038def2011-05-24 17:11:22 -0700235/* mm/util.c */
236void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
237 struct vm_area_struct *prev, struct rb_node *rb_parent);
238
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800239#ifdef CONFIG_MMU
Kirill A. Shutemovfc05f562015-04-14 15:44:39 -0700240extern long populate_vma_page_range(struct vm_area_struct *vma,
Michel Lespinassecea10a12013-02-22 16:32:44 -0800241 unsigned long start, unsigned long end, int *nonblocking);
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800242extern void munlock_vma_pages_range(struct vm_area_struct *vma,
243 unsigned long start, unsigned long end);
244static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
245{
246 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
247}
248
Nick Pigginb291f002008-10-18 20:26:44 -0700249/*
Hugh Dickins73848b42009-12-14 17:59:22 -0800250 * must be called with vma's mmap_sem held for read or write, and page locked.
Nick Pigginb291f002008-10-18 20:26:44 -0700251 */
252extern void mlock_vma_page(struct page *page);
Michel Lespinasseff6a6da2013-02-27 17:02:44 -0800253extern unsigned int munlock_vma_page(struct page *page);
Nick Pigginb291f002008-10-18 20:26:44 -0700254
255/*
256 * Clear the page's PageMlocked(). This can be useful in a situation where
257 * we want to unconditionally remove a page from the pagecache -- e.g.,
258 * on truncation or freeing.
259 *
260 * It is legal to call this function for any page, mlocked or not.
261 * If called for a page that is still mapped by mlocked vmas, all we do
262 * is revert to lazy LRU behaviour -- semantics are not broken.
263 */
Hugh Dickinse6c509f2012-10-08 16:33:19 -0700264extern void clear_page_mlock(struct page *page);
Nick Pigginb291f002008-10-18 20:26:44 -0700265
266/*
Hugh Dickins51afb122015-11-05 18:49:37 -0800267 * mlock_migrate_page - called only from migrate_misplaced_transhuge_page()
268 * (because that does not go through the full procedure of migration ptes):
269 * to migrate the Mlocked page flag; update statistics.
Nick Pigginb291f002008-10-18 20:26:44 -0700270 */
271static inline void mlock_migrate_page(struct page *newpage, struct page *page)
272{
Nick Piggin5344b7e2008-10-18 20:26:51 -0700273 if (TestClearPageMlocked(page)) {
Mel Gormanb32967f2012-11-19 12:35:47 +0000274 int nr_pages = hpage_nr_pages(page);
Nick Piggin5344b7e2008-10-18 20:26:51 -0700275
Hugh Dickins51afb122015-11-05 18:49:37 -0800276 /* Holding pmd lock, no change in irq context: __mod is safe */
Mel Gormanb32967f2012-11-19 12:35:47 +0000277 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
Nick Pigginb291f002008-10-18 20:26:44 -0700278 SetPageMlocked(newpage);
Mel Gormanb32967f2012-11-19 12:35:47 +0000279 __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
Nick Piggin5344b7e2008-10-18 20:26:51 -0700280 }
Nick Pigginb291f002008-10-18 20:26:44 -0700281}
282
Mel Gormanb32967f2012-11-19 12:35:47 +0000283extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
284
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -0800285/*
286 * At what user virtual address is page expected in @vma?
287 */
288static inline unsigned long
289__vma_address(struct page *page, struct vm_area_struct *vma)
290{
291 pgoff_t pgoff = page_to_pgoff(page);
292 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
293}
294
295static inline unsigned long
296vma_address(struct page *page, struct vm_area_struct *vma)
297{
298 unsigned long address = __vma_address(page, vma);
299
300 /* page should be within @vma mapping range */
301 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
302
303 return address;
304}
305
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800306#else /* !CONFIG_MMU */
Nick Pigginb291f002008-10-18 20:26:44 -0700307static inline void clear_page_mlock(struct page *page) { }
308static inline void mlock_vma_page(struct page *page) { }
309static inline void mlock_migrate_page(struct page *new, struct page *old) { }
310
Hugh Dickinsaf8e3352009-12-14 17:58:59 -0800311#endif /* !CONFIG_MMU */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700312
Alexander van Heukelumb5a0e012008-02-23 15:24:06 -0800313/*
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800314 * Return the mem_map entry representing the 'offset' subpage within
315 * the maximally aligned gigantic page 'base'. Handle any discontiguity
316 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
317 */
318static inline struct page *mem_map_offset(struct page *base, int offset)
319{
320 if (unlikely(offset >= MAX_ORDER_NR_PAGES))
Fabian Frederickbc7f84c2014-08-06 16:05:17 -0700321 return nth_page(base, offset);
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800322 return base + offset;
323}
324
325/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300326 * Iterator over all subpages within the maximally aligned gigantic
Andy Whitcroft69d177c2008-11-06 12:53:26 -0800327 * page 'base'. Handle any discontiguity in the mem_map.
328 */
329static inline struct page *mem_map_next(struct page *iter,
330 struct page *base, int offset)
331{
332 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
333 unsigned long pfn = page_to_pfn(base) + offset;
334 if (!pfn_valid(pfn))
335 return NULL;
336 return pfn_to_page(pfn);
337 }
338 return iter + 1;
339}
340
341/*
Alexander van Heukelumb5a0e012008-02-23 15:24:06 -0800342 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
343 * so all functions starting at paging_init should be marked __init
344 * in those cases. SPARSEMEM, however, allows for memory hotplug,
345 * and alloc_bootmem_node is not used.
346 */
347#ifdef CONFIG_SPARSEMEM
348#define __paginginit __meminit
349#else
350#define __paginginit __init
351#endif
352
Mel Gorman6b74ab92008-07-23 21:26:49 -0700353/* Memory initialisation debug and verification */
354enum mminit_level {
355 MMINIT_WARNING,
356 MMINIT_VERIFY,
357 MMINIT_TRACE
358};
359
360#ifdef CONFIG_DEBUG_MEMORY_INIT
361
362extern int mminit_loglevel;
363
364#define mminit_dprintk(level, prefix, fmt, arg...) \
365do { \
366 if (level < mminit_loglevel) { \
Rasmus Villemoesfc5199d2015-02-12 15:00:02 -0800367 if (level <= MMINIT_WARNING) \
368 printk(KERN_WARNING "mminit::" prefix " " fmt, ##arg); \
369 else \
370 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
Mel Gorman6b74ab92008-07-23 21:26:49 -0700371 } \
372} while (0)
373
Mel Gorman708614e2008-07-23 21:26:51 -0700374extern void mminit_verify_pageflags_layout(void);
Mel Gorman68ad8df2008-07-23 21:26:52 -0700375extern void mminit_verify_zonelist(void);
Mel Gorman6b74ab92008-07-23 21:26:49 -0700376#else
377
378static inline void mminit_dprintk(enum mminit_level level,
379 const char *prefix, const char *fmt, ...)
380{
381}
382
Mel Gorman708614e2008-07-23 21:26:51 -0700383static inline void mminit_verify_pageflags_layout(void)
384{
385}
386
Mel Gorman68ad8df2008-07-23 21:26:52 -0700387static inline void mminit_verify_zonelist(void)
388{
389}
Mel Gorman6b74ab92008-07-23 21:26:49 -0700390#endif /* CONFIG_DEBUG_MEMORY_INIT */
Mel Gorman2dbb51c2008-07-23 21:26:52 -0700391
392/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
393#if defined(CONFIG_SPARSEMEM)
394extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
395 unsigned long *end_pfn);
396#else
397static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
398 unsigned long *end_pfn)
399{
400}
401#endif /* CONFIG_SPARSEMEM */
402
Mel Gormanfa5e0842009-06-16 15:33:22 -0700403#define ZONE_RECLAIM_NOSCAN -2
404#define ZONE_RECLAIM_FULL -1
405#define ZONE_RECLAIM_SOME 0
406#define ZONE_RECLAIM_SUCCESS 1
Wu Fengguang7c116f22009-12-16 12:19:59 +0100407
Wu Fengguang31d3d342009-12-16 12:19:59 +0100408extern int hwpoison_filter(struct page *p);
409
Wu Fengguang7c116f22009-12-16 12:19:59 +0100410extern u32 hwpoison_filter_dev_major;
411extern u32 hwpoison_filter_dev_minor;
Wu Fengguang478c5ff2009-12-16 12:19:59 +0100412extern u64 hwpoison_filter_flags_mask;
413extern u64 hwpoison_filter_flags_value;
Andi Kleen4fd466e2009-12-16 12:19:59 +0100414extern u64 hwpoison_filter_memcg;
Haicheng Li1bfe5fe2009-12-16 12:19:59 +0100415extern u32 hwpoison_filter_enable;
Al Viroeb36c582012-05-30 20:17:35 -0400416
417extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
418 unsigned long, unsigned long,
419 unsigned long, unsigned long);
Xishi Qiuca57df72012-07-31 16:43:19 -0700420
421extern void set_pageblock_order(void);
Minchan Kim02c6de82012-10-08 16:31:55 -0700422unsigned long reclaim_clean_pages_from_list(struct zone *zone,
423 struct list_head *page_list);
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -0700424/* The ALLOC_WMARK bits are used as an index to zone->watermark */
425#define ALLOC_WMARK_MIN WMARK_MIN
426#define ALLOC_WMARK_LOW WMARK_LOW
427#define ALLOC_WMARK_HIGH WMARK_HIGH
428#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */
429
430/* Mask to get the watermark bits */
431#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
432
433#define ALLOC_HARDER 0x10 /* try to alloc harder */
434#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
435#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
436#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
Johannes Weiner3a025762014-04-07 15:37:48 -0700437#define ALLOC_FAIR 0x100 /* fair zone allocation */
Bartlomiej Zolnierkiewiczd95ea5d2012-10-08 16:32:05 -0700438
Mel Gorman72b252a2015-09-04 15:47:32 -0700439enum ttu_flags;
440struct tlbflush_unmap_batch;
441
442#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
443void try_to_unmap_flush(void);
Mel Gormand950c942015-09-04 15:47:35 -0700444void try_to_unmap_flush_dirty(void);
Mel Gorman72b252a2015-09-04 15:47:32 -0700445#else
446static inline void try_to_unmap_flush(void)
447{
448}
Mel Gormand950c942015-09-04 15:47:35 -0700449static inline void try_to_unmap_flush_dirty(void)
450{
451}
Mel Gorman72b252a2015-09-04 15:47:32 -0700452
453#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
Michel Lespinassedb971412012-10-08 16:29:34 -0700454#endif /* __MM_INTERNAL_H */