Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_RMAP_H |
| 3 | #define _LINUX_RMAP_H |
| 4 | /* |
| 5 | * Declarations for Reverse Mapping functions in mm/rmap.c |
| 6 | */ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/list.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/mm.h> |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 11 | #include <linux/rwsem.h> |
Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 12 | #include <linux/memcontrol.h> |
Kirill A. Shutemov | ace71a1 | 2017-02-24 14:57:45 -0800 | [diff] [blame] | 13 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
| 15 | /* |
| 16 | * The anon_vma heads a list of private "related" vmas, to scan if |
| 17 | * an anonymous page pointing to this anon_vma needs to be unmapped: |
| 18 | * the vmas on the list will be related by forking, or by splitting. |
| 19 | * |
| 20 | * Since vmas come and go as they are split and merged (particularly |
| 21 | * in mprotect), the mapping field of an anonymous page cannot point |
| 22 | * directly to a vma: instead it points to an anon_vma, on whose list |
| 23 | * the related vmas can be easily linked or unlinked. |
| 24 | * |
| 25 | * After unlinking the last vma on the list, we must garbage collect |
| 26 | * the anon_vma object itself: we're guaranteed no page can be |
| 27 | * pointing to this anon_vma once its vma list is empty. |
| 28 | */ |
| 29 | struct anon_vma { |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 30 | struct anon_vma *root; /* Root of this anon_vma tree */ |
| 31 | struct rw_semaphore rwsem; /* W: modification, R: walking the list */ |
Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame] | 32 | /* |
Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 33 | * The refcount is taken on an anon_vma when there is no |
Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame] | 34 | * guarantee that the vma of page tables will exist for |
| 35 | * the duration of the operation. A caller that takes |
| 36 | * the reference is responsible for clearing up the |
| 37 | * anon_vma if they are the last user on release |
| 38 | */ |
Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 39 | atomic_t refcount; |
| 40 | |
Andrea Arcangeli | 7906d00 | 2008-07-28 15:46:26 -0700 | [diff] [blame] | 41 | /* |
Konstantin Khlebnikov | 7a3ef20 | 2015-01-08 14:32:15 -0800 | [diff] [blame] | 42 | * Count of child anon_vmas and VMAs which points to this anon_vma. |
| 43 | * |
| 44 | * This counter is used for making decision about reusing anon_vma |
| 45 | * instead of forking new one. See comments in function anon_vma_clone. |
| 46 | */ |
| 47 | unsigned degree; |
| 48 | |
| 49 | struct anon_vma *parent; /* Parent of this anon_vma */ |
| 50 | |
| 51 | /* |
Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 52 | * NOTE: the LSB of the rb_root.rb_node is set by |
Andrea Arcangeli | 7906d00 | 2008-07-28 15:46:26 -0700 | [diff] [blame] | 53 | * mm_take_all_locks() _after_ taking the above lock. So the |
Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 54 | * rb_root must only be read/written after taking the above lock |
Andrea Arcangeli | 7906d00 | 2008-07-28 15:46:26 -0700 | [diff] [blame] | 55 | * to be sure to see a valid next pointer. The LSB bit itself |
| 56 | * is serialized by a system wide lock only visible to |
| 57 | * mm_take_all_locks() (mm_all_locks_mutex). |
| 58 | */ |
Davidlohr Bueso | f808c13 | 2017-09-08 16:15:08 -0700 | [diff] [blame] | 59 | |
| 60 | /* Interval tree of private "related" vmas */ |
| 61 | struct rb_root_cached rb_root; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 62 | }; |
| 63 | |
| 64 | /* |
| 65 | * The copy-on-write semantics of fork mean that an anon_vma |
| 66 | * can become associated with multiple processes. Furthermore, |
| 67 | * each child process will have its own anon_vma, where new |
| 68 | * pages for that process are instantiated. |
| 69 | * |
| 70 | * This structure allows us to find the anon_vmas associated |
| 71 | * with a VMA, or the VMAs associated with an anon_vma. |
| 72 | * The "same_vma" list contains the anon_vma_chains linking |
| 73 | * all the anon_vmas associated with this VMA. |
Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 74 | * The "rb" field indexes on an interval tree the anon_vma_chains |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 75 | * which link all the VMAs associated with this anon_vma. |
| 76 | */ |
| 77 | struct anon_vma_chain { |
| 78 | struct vm_area_struct *vma; |
| 79 | struct anon_vma *anon_vma; |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 80 | struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 81 | struct rb_node rb; /* locked by anon_vma->rwsem */ |
Michel Lespinasse | bf181b9 | 2012-10-08 16:31:39 -0700 | [diff] [blame] | 82 | unsigned long rb_subtree_last; |
Michel Lespinasse | ed8ea81 | 2012-10-08 16:31:45 -0700 | [diff] [blame] | 83 | #ifdef CONFIG_DEBUG_VM_RB |
| 84 | unsigned long cached_vma_start, cached_vma_last; |
| 85 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | }; |
| 87 | |
Minchan Kim | 02c6de8 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 88 | enum ttu_flags { |
Shaohua Li | a128ca7 | 2017-05-03 14:52:22 -0700 | [diff] [blame] | 89 | TTU_MIGRATION = 0x1, /* migration mode */ |
| 90 | TTU_MUNLOCK = 0x2, /* munlock mode */ |
Minchan Kim | 02c6de8 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 91 | |
Shaohua Li | a128ca7 | 2017-05-03 14:52:22 -0700 | [diff] [blame] | 92 | TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ |
| 93 | TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ |
| 94 | TTU_IGNORE_ACCESS = 0x10, /* don't age */ |
| 95 | TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ |
| 96 | TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 97 | * and caller guarantees they will |
| 98 | * do a final flush if necessary */ |
Naoya Horiguchi | b5ff816 | 2017-09-08 16:10:49 -0700 | [diff] [blame] | 99 | TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock: |
Kirill A. Shutemov | 2a52bcb | 2016-03-17 14:20:04 -0700 | [diff] [blame] | 100 | * caller holds it */ |
Naoya Horiguchi | b5ff816 | 2017-09-08 16:10:49 -0700 | [diff] [blame] | 101 | TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */ |
Minchan Kim | 02c6de8 | 2012-10-08 16:31:55 -0700 | [diff] [blame] | 102 | }; |
| 103 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #ifdef CONFIG_MMU |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 105 | static inline void get_anon_vma(struct anon_vma *anon_vma) |
| 106 | { |
Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 107 | atomic_inc(&anon_vma->refcount); |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 108 | } |
| 109 | |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 110 | void __put_anon_vma(struct anon_vma *anon_vma); |
| 111 | |
| 112 | static inline void put_anon_vma(struct anon_vma *anon_vma) |
| 113 | { |
| 114 | if (atomic_dec_and_test(&anon_vma->refcount)) |
| 115 | __put_anon_vma(anon_vma); |
| 116 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 118 | static inline void anon_vma_lock_write(struct anon_vma *anon_vma) |
Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 119 | { |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 120 | down_write(&anon_vma->root->rwsem); |
Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 121 | } |
| 122 | |
Konstantin Khlebnikov | 08b5270 | 2013-02-22 16:34:40 -0800 | [diff] [blame] | 123 | static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) |
Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 124 | { |
Ingo Molnar | 5a50508 | 2012-12-02 19:56:46 +0000 | [diff] [blame] | 125 | up_write(&anon_vma->root->rwsem); |
Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 126 | } |
| 127 | |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 128 | static inline void anon_vma_lock_read(struct anon_vma *anon_vma) |
| 129 | { |
| 130 | down_read(&anon_vma->root->rwsem); |
| 131 | } |
| 132 | |
| 133 | static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) |
| 134 | { |
| 135 | up_read(&anon_vma->root->rwsem); |
| 136 | } |
| 137 | |
| 138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | /* |
| 140 | * anon_vma helper functions. |
| 141 | */ |
| 142 | void anon_vma_init(void); /* create anon_vma_cachep */ |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 143 | int __anon_vma_prepare(struct vm_area_struct *); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 144 | void unlink_anon_vmas(struct vm_area_struct *); |
| 145 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); |
| 146 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | |
Vlastimil Babka | d5a187d | 2016-12-12 16:44:38 -0800 | [diff] [blame] | 148 | static inline int anon_vma_prepare(struct vm_area_struct *vma) |
| 149 | { |
| 150 | if (likely(vma->anon_vma)) |
| 151 | return 0; |
| 152 | |
| 153 | return __anon_vma_prepare(vma); |
| 154 | } |
| 155 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 156 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
| 157 | struct vm_area_struct *next) |
| 158 | { |
Sasha Levin | 81d1b09 | 2014-10-09 15:28:10 -0700 | [diff] [blame] | 159 | VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 160 | unlink_anon_vmas(next); |
| 161 | } |
| 162 | |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 163 | struct anon_vma *page_get_anon_vma(struct page *page); |
| 164 | |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 165 | /* bitflags for do_page_add_anon_rmap() */ |
| 166 | #define RMAP_EXCLUSIVE 0x01 |
| 167 | #define RMAP_COMPOUND 0x02 |
| 168 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | /* |
| 170 | * rmap interfaces called when adding or removing pte of page |
| 171 | */ |
Hugh Dickins | 5a49973 | 2016-07-14 12:07:38 -0700 | [diff] [blame] | 172 | void page_move_anon_rmap(struct page *, struct vm_area_struct *); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 173 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, |
| 174 | unsigned long, bool); |
Rik van Riel | ad8c2ee | 2010-08-09 17:19:48 -0700 | [diff] [blame] | 175 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
| 176 | unsigned long, int); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 177 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
| 178 | unsigned long, bool); |
Kirill A. Shutemov | dd78fed | 2016-07-26 15:25:26 -0700 | [diff] [blame] | 179 | void page_add_file_rmap(struct page *, bool); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 180 | void page_remove_rmap(struct page *, bool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 182 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, |
| 183 | unsigned long); |
| 184 | void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
| 185 | unsigned long); |
| 186 | |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 187 | static inline void page_dup_rmap(struct page *page, bool compound) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | { |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 189 | atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | /* |
| 193 | * Called from mm/vmscan.c to handle paging out |
| 194 | */ |
Wu Fengguang | 6fe6b7e | 2009-06-16 15:33:05 -0700 | [diff] [blame] | 195 | int page_referenced(struct page *, int is_locked, |
Johannes Weiner | 72835c8 | 2012-01-12 17:18:32 -0800 | [diff] [blame] | 196 | struct mem_cgroup *memcg, unsigned long *vm_flags); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 197 | |
Minchan Kim | 666e5a4 | 2017-05-03 14:54:20 -0700 | [diff] [blame] | 198 | bool try_to_unmap(struct page *, enum ttu_flags flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
Kirill A. Shutemov | ace71a1 | 2017-02-24 14:57:45 -0800 | [diff] [blame] | 200 | /* Avoid racy checks */ |
| 201 | #define PVMW_SYNC (1 << 0) |
| 202 | /* Look for migarion entries rather than present PTEs */ |
| 203 | #define PVMW_MIGRATION (1 << 1) |
| 204 | |
| 205 | struct page_vma_mapped_walk { |
| 206 | struct page *page; |
| 207 | struct vm_area_struct *vma; |
| 208 | unsigned long address; |
| 209 | pmd_t *pmd; |
| 210 | pte_t *pte; |
| 211 | spinlock_t *ptl; |
| 212 | unsigned int flags; |
| 213 | }; |
| 214 | |
| 215 | static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) |
| 216 | { |
| 217 | if (pvmw->pte) |
| 218 | pte_unmap(pvmw->pte); |
| 219 | if (pvmw->ptl) |
| 220 | spin_unlock(pvmw->ptl); |
| 221 | } |
| 222 | |
| 223 | bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); |
| 224 | |
Vladimir Davydov | 8749cfe | 2016-01-15 16:54:45 -0800 | [diff] [blame] | 225 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | * Used by swapoff to help locate where page is expected in vma. |
| 227 | */ |
| 228 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); |
| 229 | |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 230 | /* |
| 231 | * Cleans the PTEs of shared mappings. |
| 232 | * (and since clean PTEs should also be readonly, write protects them too) |
| 233 | * |
| 234 | * returns the number of cleaned PTEs. |
| 235 | */ |
| 236 | int page_mkclean(struct page *); |
| 237 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 238 | /* |
| 239 | * called in munlock()/munmap() path to check for other vmas holding |
| 240 | * the page mlocked. |
| 241 | */ |
Minchan Kim | 192d723 | 2017-05-03 14:54:10 -0700 | [diff] [blame] | 242 | void try_to_munlock(struct page *); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 243 | |
Kirill A. Shutemov | e388466 | 2016-03-17 14:20:07 -0700 | [diff] [blame] | 244 | void remove_migration_ptes(struct page *old, struct page *new, bool locked); |
| 245 | |
Andi Kleen | 10be22d | 2009-09-16 11:50:04 +0200 | [diff] [blame] | 246 | /* |
| 247 | * Called by memory-failure.c to kill processes. |
| 248 | */ |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 249 | struct anon_vma *page_lock_anon_vma_read(struct page *page); |
| 250 | void page_unlock_anon_vma_read(struct anon_vma *anon_vma); |
Andi Kleen | 6a46079 | 2009-09-16 11:50:15 +0200 | [diff] [blame] | 251 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
Andi Kleen | 10be22d | 2009-09-16 11:50:04 +0200 | [diff] [blame] | 252 | |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 253 | /* |
| 254 | * rmap_walk_control: To control rmap traversing for specific needs |
| 255 | * |
| 256 | * arg: passed to rmap_one() and invalid_vma() |
| 257 | * rmap_one: executed on each vma where page is mapped |
| 258 | * done: for checking traversing termination condition |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 259 | * anon_lock: for getting anon_lock by optimized way rather than default |
| 260 | * invalid_vma: for skipping uninterested vma |
| 261 | */ |
Joonsoo Kim | 051ac83 | 2014-01-21 15:49:48 -0800 | [diff] [blame] | 262 | struct rmap_walk_control { |
| 263 | void *arg; |
Minchan Kim | e4b8222 | 2017-05-03 14:54:27 -0700 | [diff] [blame] | 264 | /* |
| 265 | * Return false if page table scanning in rmap_walk should be stopped. |
| 266 | * Otherwise, return true. |
| 267 | */ |
| 268 | bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, |
Joonsoo Kim | 051ac83 | 2014-01-21 15:49:48 -0800 | [diff] [blame] | 269 | unsigned long addr, void *arg); |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 270 | int (*done)(struct page *page); |
Joonsoo Kim | 0dd1c7b | 2014-01-21 15:49:49 -0800 | [diff] [blame] | 271 | struct anon_vma *(*anon_lock)(struct page *page); |
| 272 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); |
Joonsoo Kim | 051ac83 | 2014-01-21 15:49:48 -0800 | [diff] [blame] | 273 | }; |
| 274 | |
Minchan Kim | 1df631a | 2017-05-03 14:54:23 -0700 | [diff] [blame] | 275 | void rmap_walk(struct page *page, struct rmap_walk_control *rwc); |
| 276 | void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 277 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | #else /* !CONFIG_MMU */ |
| 279 | |
| 280 | #define anon_vma_init() do {} while (0) |
| 281 | #define anon_vma_prepare(vma) (0) |
| 282 | #define anon_vma_link(vma) do {} while (0) |
| 283 | |
Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 284 | static inline int page_referenced(struct page *page, int is_locked, |
Johannes Weiner | 72835c8 | 2012-01-12 17:18:32 -0800 | [diff] [blame] | 285 | struct mem_cgroup *memcg, |
Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 286 | unsigned long *vm_flags) |
| 287 | { |
| 288 | *vm_flags = 0; |
Johannes Weiner | 64574746 | 2010-03-05 13:42:22 -0800 | [diff] [blame] | 289 | return 0; |
Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 290 | } |
| 291 | |
Minchan Kim | 666e5a4 | 2017-05-03 14:54:20 -0700 | [diff] [blame] | 292 | #define try_to_unmap(page, refs) false |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 294 | static inline int page_mkclean(struct page *page) |
| 295 | { |
| 296 | return 0; |
| 297 | } |
| 298 | |
| 299 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | #endif /* CONFIG_MMU */ |
| 301 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | #endif /* _LINUX_RMAP_H */ |