Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* internal.h: mm/ internal definitions |
| 2 | * |
| 3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 11 | #ifndef __MM_INTERNAL_H |
| 12 | #define __MM_INTERNAL_H |
| 13 | |
| 14 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, |
| 17 | unsigned long floor, unsigned long ceiling); |
| 18 | |
Andi Kleen | 01ad1c0 | 2008-07-23 21:27:46 -0700 | [diff] [blame] | 19 | extern void prep_compound_page(struct page *page, unsigned long order); |
Andy Whitcroft | 18229df | 2008-11-06 12:53:27 -0800 | [diff] [blame^] | 20 | extern void prep_compound_gigantic_page(struct page *page, unsigned long order); |
Andi Kleen | 01ad1c0 | 2008-07-23 21:27:46 -0700 | [diff] [blame] | 21 | |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 22 | static inline void set_page_count(struct page *page, int v) |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 23 | { |
Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 24 | atomic_set(&page->_count, v); |
| 25 | } |
| 26 | |
| 27 | /* |
| 28 | * Turn a non-refcounted page (->_count == 0) into refcounted with |
| 29 | * a count of one. |
| 30 | */ |
| 31 | static inline void set_page_refcounted(struct page *page) |
| 32 | { |
Qi Yong | ae1276b | 2008-02-04 22:29:27 -0800 | [diff] [blame] | 33 | VM_BUG_ON(PageTail(page)); |
Nick Piggin | 725d704 | 2006-09-25 23:30:55 -0700 | [diff] [blame] | 34 | VM_BUG_ON(atomic_read(&page->_count)); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 35 | set_page_count(page, 1); |
Nick Piggin | 77a8a78 | 2006-01-06 00:10:57 -0800 | [diff] [blame] | 36 | } |
| 37 | |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 38 | static inline void __put_page(struct page *page) |
| 39 | { |
| 40 | atomic_dec(&page->_count); |
| 41 | } |
| 42 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 43 | /* |
| 44 | * in mm/vmscan.c: |
| 45 | */ |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 46 | extern int isolate_lru_page(struct page *page); |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 47 | extern void putback_lru_page(struct page *page); |
Nick Piggin | 62695a8 | 2008-10-18 20:26:09 -0700 | [diff] [blame] | 48 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 49 | /* |
| 50 | * in mm/page_alloc.c |
| 51 | */ |
Yasunori Goto | 0c0a4a5 | 2008-04-28 02:13:34 -0700 | [diff] [blame] | 52 | extern void __free_pages_bootmem(struct page *page, unsigned int order); |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 53 | |
Mel Gorman | 48f13bf | 2007-10-16 01:26:10 -0700 | [diff] [blame] | 54 | /* |
| 55 | * function for dealing with page's order in buddy system. |
| 56 | * zone->lock is already acquired when we use these. |
| 57 | * So, we don't need atomic page->flags operations here. |
| 58 | */ |
| 59 | static inline unsigned long page_order(struct page *page) |
| 60 | { |
| 61 | VM_BUG_ON(!PageBuddy(page)); |
| 62 | return page_private(page); |
| 63 | } |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 64 | |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 65 | extern long mlock_vma_pages_range(struct vm_area_struct *vma, |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 66 | unsigned long start, unsigned long end); |
Rik van Riel | ba470de | 2008-10-18 20:26:50 -0700 | [diff] [blame] | 67 | extern void munlock_vma_pages_range(struct vm_area_struct *vma, |
| 68 | unsigned long start, unsigned long end); |
| 69 | static inline void munlock_vma_pages_all(struct vm_area_struct *vma) |
| 70 | { |
| 71 | munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); |
| 72 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 73 | |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 74 | #ifdef CONFIG_UNEVICTABLE_LRU |
| 75 | /* |
| 76 | * unevictable_migrate_page() called only from migrate_page_copy() to |
| 77 | * migrate unevictable flag to new page. |
| 78 | * Note that the old page has been isolated from the LRU lists at this |
| 79 | * point so we don't need to worry about LRU statistics. |
| 80 | */ |
| 81 | static inline void unevictable_migrate_page(struct page *new, struct page *old) |
| 82 | { |
| 83 | if (TestClearPageUnevictable(old)) |
| 84 | SetPageUnevictable(new); |
| 85 | } |
| 86 | #else |
| 87 | static inline void unevictable_migrate_page(struct page *new, struct page *old) |
| 88 | { |
| 89 | } |
| 90 | #endif |
| 91 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 92 | #ifdef CONFIG_UNEVICTABLE_LRU |
| 93 | /* |
| 94 | * Called only in fault path via page_evictable() for a new page |
| 95 | * to determine if it's being mapped into a LOCKED vma. |
| 96 | * If so, mark page as mlocked. |
| 97 | */ |
| 98 | static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) |
| 99 | { |
| 100 | VM_BUG_ON(PageLRU(page)); |
| 101 | |
| 102 | if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) |
| 103 | return 0; |
| 104 | |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 105 | if (!TestSetPageMlocked(page)) { |
| 106 | inc_zone_page_state(page, NR_MLOCK); |
| 107 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
| 108 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 109 | return 1; |
| 110 | } |
| 111 | |
| 112 | /* |
| 113 | * must be called with vma's mmap_sem held for read, and page locked. |
| 114 | */ |
| 115 | extern void mlock_vma_page(struct page *page); |
| 116 | |
| 117 | /* |
| 118 | * Clear the page's PageMlocked(). This can be useful in a situation where |
| 119 | * we want to unconditionally remove a page from the pagecache -- e.g., |
| 120 | * on truncation or freeing. |
| 121 | * |
| 122 | * It is legal to call this function for any page, mlocked or not. |
| 123 | * If called for a page that is still mapped by mlocked vmas, all we do |
| 124 | * is revert to lazy LRU behaviour -- semantics are not broken. |
| 125 | */ |
| 126 | extern void __clear_page_mlock(struct page *page); |
| 127 | static inline void clear_page_mlock(struct page *page) |
| 128 | { |
| 129 | if (unlikely(TestClearPageMlocked(page))) |
| 130 | __clear_page_mlock(page); |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * mlock_migrate_page - called only from migrate_page_copy() to |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 135 | * migrate the Mlocked page flag; update statistics. |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 136 | */ |
| 137 | static inline void mlock_migrate_page(struct page *newpage, struct page *page) |
| 138 | { |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 139 | if (TestClearPageMlocked(page)) { |
| 140 | unsigned long flags; |
| 141 | |
| 142 | local_irq_save(flags); |
| 143 | __dec_zone_page_state(page, NR_MLOCK); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 144 | SetPageMlocked(newpage); |
Nick Piggin | 5344b7e | 2008-10-18 20:26:51 -0700 | [diff] [blame] | 145 | __inc_zone_page_state(newpage, NR_MLOCK); |
| 146 | local_irq_restore(flags); |
| 147 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Lee Schermerhorn | 985737c | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 150 | /* |
| 151 | * free_page_mlock() -- clean up attempts to free and mlocked() page. |
| 152 | * Page should not be on lru, so no need to fix that up. |
| 153 | * free_pages_check() will verify... |
| 154 | */ |
| 155 | static inline void free_page_mlock(struct page *page) |
| 156 | { |
| 157 | if (unlikely(TestClearPageMlocked(page))) { |
| 158 | unsigned long flags; |
| 159 | |
| 160 | local_irq_save(flags); |
| 161 | __dec_zone_page_state(page, NR_MLOCK); |
| 162 | __count_vm_event(UNEVICTABLE_MLOCKFREED); |
| 163 | local_irq_restore(flags); |
| 164 | } |
| 165 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 166 | |
| 167 | #else /* CONFIG_UNEVICTABLE_LRU */ |
| 168 | static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) |
| 169 | { |
| 170 | return 0; |
| 171 | } |
| 172 | static inline void clear_page_mlock(struct page *page) { } |
| 173 | static inline void mlock_vma_page(struct page *page) { } |
| 174 | static inline void mlock_migrate_page(struct page *new, struct page *old) { } |
Lee Schermerhorn | 985737c | 2008-10-18 20:26:53 -0700 | [diff] [blame] | 175 | static inline void free_page_mlock(struct page *page) { } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 176 | |
| 177 | #endif /* CONFIG_UNEVICTABLE_LRU */ |
Lee Schermerhorn | 894bc31 | 2008-10-18 20:26:39 -0700 | [diff] [blame] | 178 | |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 179 | /* |
Andy Whitcroft | 69d177c | 2008-11-06 12:53:26 -0800 | [diff] [blame] | 180 | * Return the mem_map entry representing the 'offset' subpage within |
| 181 | * the maximally aligned gigantic page 'base'. Handle any discontiguity |
| 182 | * in the mem_map at MAX_ORDER_NR_PAGES boundaries. |
| 183 | */ |
| 184 | static inline struct page *mem_map_offset(struct page *base, int offset) |
| 185 | { |
| 186 | if (unlikely(offset >= MAX_ORDER_NR_PAGES)) |
| 187 | return pfn_to_page(page_to_pfn(base) + offset); |
| 188 | return base + offset; |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * Iterator over all subpages withing the maximally aligned gigantic |
| 193 | * page 'base'. Handle any discontiguity in the mem_map. |
| 194 | */ |
| 195 | static inline struct page *mem_map_next(struct page *iter, |
| 196 | struct page *base, int offset) |
| 197 | { |
| 198 | if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { |
| 199 | unsigned long pfn = page_to_pfn(base) + offset; |
| 200 | if (!pfn_valid(pfn)) |
| 201 | return NULL; |
| 202 | return pfn_to_page(pfn); |
| 203 | } |
| 204 | return iter + 1; |
| 205 | } |
| 206 | |
| 207 | /* |
Alexander van Heukelum | b5a0e01 | 2008-02-23 15:24:06 -0800 | [diff] [blame] | 208 | * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, |
| 209 | * so all functions starting at paging_init should be marked __init |
| 210 | * in those cases. SPARSEMEM, however, allows for memory hotplug, |
| 211 | * and alloc_bootmem_node is not used. |
| 212 | */ |
| 213 | #ifdef CONFIG_SPARSEMEM |
| 214 | #define __paginginit __meminit |
| 215 | #else |
| 216 | #define __paginginit __init |
| 217 | #endif |
| 218 | |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 219 | /* Memory initialisation debug and verification */ |
| 220 | enum mminit_level { |
| 221 | MMINIT_WARNING, |
| 222 | MMINIT_VERIFY, |
| 223 | MMINIT_TRACE |
| 224 | }; |
| 225 | |
| 226 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
| 227 | |
| 228 | extern int mminit_loglevel; |
| 229 | |
| 230 | #define mminit_dprintk(level, prefix, fmt, arg...) \ |
| 231 | do { \ |
| 232 | if (level < mminit_loglevel) { \ |
| 233 | printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ |
| 234 | printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ |
| 235 | } \ |
| 236 | } while (0) |
| 237 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 238 | extern void mminit_verify_pageflags_layout(void); |
| 239 | extern void mminit_verify_page_links(struct page *page, |
| 240 | enum zone_type zone, unsigned long nid, unsigned long pfn); |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 241 | extern void mminit_verify_zonelist(void); |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 242 | |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 243 | #else |
| 244 | |
| 245 | static inline void mminit_dprintk(enum mminit_level level, |
| 246 | const char *prefix, const char *fmt, ...) |
| 247 | { |
| 248 | } |
| 249 | |
Mel Gorman | 708614e | 2008-07-23 21:26:51 -0700 | [diff] [blame] | 250 | static inline void mminit_verify_pageflags_layout(void) |
| 251 | { |
| 252 | } |
| 253 | |
| 254 | static inline void mminit_verify_page_links(struct page *page, |
| 255 | enum zone_type zone, unsigned long nid, unsigned long pfn) |
| 256 | { |
| 257 | } |
Mel Gorman | 68ad8df | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 258 | |
| 259 | static inline void mminit_verify_zonelist(void) |
| 260 | { |
| 261 | } |
Mel Gorman | 6b74ab9 | 2008-07-23 21:26:49 -0700 | [diff] [blame] | 262 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
Mel Gorman | 2dbb51c | 2008-07-23 21:26:52 -0700 | [diff] [blame] | 263 | |
| 264 | /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ |
| 265 | #if defined(CONFIG_SPARSEMEM) |
| 266 | extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 267 | unsigned long *end_pfn); |
| 268 | #else |
| 269 | static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, |
| 270 | unsigned long *end_pfn) |
| 271 | { |
| 272 | } |
| 273 | #endif /* CONFIG_SPARSEMEM */ |
| 274 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 275 | #define GUP_FLAGS_WRITE 0x1 |
| 276 | #define GUP_FLAGS_FORCE 0x2 |
| 277 | #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4 |
| 278 | |
| 279 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 280 | unsigned long start, int len, int flags, |
| 281 | struct page **pages, struct vm_area_struct **vmas); |
| 282 | |
Nick Piggin | 0f8053a | 2006-03-22 00:08:33 -0800 | [diff] [blame] | 283 | #endif |