Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/madvise.c |
| 3 | * |
| 4 | * Copyright (C) 1999 Linus Torvalds |
| 5 | * Copyright (C) 2002 Christoph Hellwig |
| 6 | */ |
| 7 | |
| 8 | #include <linux/mman.h> |
| 9 | #include <linux/pagemap.h> |
| 10 | #include <linux/syscalls.h> |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 11 | #include <linux/mempolicy.h> |
Andi Kleen | afcf938 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 12 | #include <linux/page-isolation.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/hugetlb.h> |
Hugh Dickins | 3f31d07 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 14 | #include <linux/falloc.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 15 | #include <linux/sched.h> |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 16 | #include <linux/ksm.h> |
Hugh Dickins | 3f31d07 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 17 | #include <linux/fs.h> |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 18 | #include <linux/file.h> |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 19 | #include <linux/blkdev.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 20 | #include <linux/backing-dev.h> |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 21 | #include <linux/swap.h> |
| 22 | #include <linux/swapops.h> |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 23 | #include <linux/mmu_notifier.h> |
Mel Gorman | 5a1eef7 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 24 | #include "internal.h" |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 25 | |
| 26 | #include <asm/tlb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Kirill A. Shutemov | 7ade374 | 2017-02-22 15:46:39 -0800 | [diff] [blame] | 28 | #include "internal.h" |
| 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | /* |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 31 | * Any behaviour which results in changes to the vma->vm_flags needs to |
| 32 | * take mmap_sem for writing. Others, which simply traverse vmas, need |
| 33 | * to only take it for reading. |
| 34 | */ |
| 35 | static int madvise_need_mmap_write(int behavior) |
| 36 | { |
| 37 | switch (behavior) { |
| 38 | case MADV_REMOVE: |
| 39 | case MADV_WILLNEED: |
| 40 | case MADV_DONTNEED: |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 41 | case MADV_FREE: |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 42 | return 0; |
| 43 | default: |
| 44 | /* be safe, default to 1. list exceptions explicitly */ |
| 45 | return 1; |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | * We can potentially split a vm area into separate |
| 51 | * areas, each area with its own behavior. |
| 52 | */ |
Vladimir Cernov | ec9bed9 | 2013-09-11 14:20:15 -0700 | [diff] [blame] | 53 | static long madvise_behavior(struct vm_area_struct *vma, |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 54 | struct vm_area_struct **prev, |
| 55 | unsigned long start, unsigned long end, int behavior) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | { |
Vladimir Cernov | ec9bed9 | 2013-09-11 14:20:15 -0700 | [diff] [blame] | 57 | struct mm_struct *mm = vma->vm_mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | int error = 0; |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 59 | pgoff_t pgoff; |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 60 | unsigned long new_flags = vma->vm_flags; |
Prasanna Meda | e798c6e | 2005-06-21 17:14:36 -0700 | [diff] [blame] | 61 | |
| 62 | switch (behavior) { |
Michael S. Tsirkin | f822566 | 2006-02-14 13:53:08 -0800 | [diff] [blame] | 63 | case MADV_NORMAL: |
| 64 | new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; |
| 65 | break; |
Prasanna Meda | e798c6e | 2005-06-21 17:14:36 -0700 | [diff] [blame] | 66 | case MADV_SEQUENTIAL: |
Michael S. Tsirkin | f822566 | 2006-02-14 13:53:08 -0800 | [diff] [blame] | 67 | new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; |
Prasanna Meda | e798c6e | 2005-06-21 17:14:36 -0700 | [diff] [blame] | 68 | break; |
| 69 | case MADV_RANDOM: |
Michael S. Tsirkin | f822566 | 2006-02-14 13:53:08 -0800 | [diff] [blame] | 70 | new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; |
Prasanna Meda | e798c6e | 2005-06-21 17:14:36 -0700 | [diff] [blame] | 71 | break; |
Michael S. Tsirkin | f822566 | 2006-02-14 13:53:08 -0800 | [diff] [blame] | 72 | case MADV_DONTFORK: |
| 73 | new_flags |= VM_DONTCOPY; |
| 74 | break; |
| 75 | case MADV_DOFORK: |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 76 | if (vma->vm_flags & VM_IO) { |
| 77 | error = -EINVAL; |
| 78 | goto out; |
| 79 | } |
Michael S. Tsirkin | f822566 | 2006-02-14 13:53:08 -0800 | [diff] [blame] | 80 | new_flags &= ~VM_DONTCOPY; |
Prasanna Meda | e798c6e | 2005-06-21 17:14:36 -0700 | [diff] [blame] | 81 | break; |
Jason Baron | accb61f | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 82 | case MADV_DONTDUMP: |
Konstantin Khlebnikov | 0103bd1 | 2012-10-08 16:28:59 -0700 | [diff] [blame] | 83 | new_flags |= VM_DONTDUMP; |
Jason Baron | accb61f | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 84 | break; |
| 85 | case MADV_DODUMP: |
Konstantin Khlebnikov | 0103bd1 | 2012-10-08 16:28:59 -0700 | [diff] [blame] | 86 | if (new_flags & VM_SPECIAL) { |
| 87 | error = -EINVAL; |
| 88 | goto out; |
| 89 | } |
| 90 | new_flags &= ~VM_DONTDUMP; |
Jason Baron | accb61f | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 91 | break; |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 92 | case MADV_MERGEABLE: |
| 93 | case MADV_UNMERGEABLE: |
| 94 | error = ksm_madvise(vma, start, end, behavior, &new_flags); |
| 95 | if (error) |
| 96 | goto out; |
| 97 | break; |
Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 98 | case MADV_HUGEPAGE: |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 99 | case MADV_NOHUGEPAGE: |
Andrea Arcangeli | 60ab324 | 2011-01-13 15:47:18 -0800 | [diff] [blame] | 100 | error = hugepage_madvise(vma, &new_flags, behavior); |
Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 101 | if (error) |
| 102 | goto out; |
| 103 | break; |
Prasanna Meda | e798c6e | 2005-06-21 17:14:36 -0700 | [diff] [blame] | 104 | } |
| 105 | |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 106 | if (new_flags == vma->vm_flags) { |
| 107 | *prev = vma; |
Hugh Dickins | 836d5ff | 2005-09-03 15:54:53 -0700 | [diff] [blame] | 108 | goto out; |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
| 112 | *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, |
Andrea Arcangeli | 19a809a | 2015-09-04 15:46:24 -0700 | [diff] [blame] | 113 | vma->vm_file, pgoff, vma_policy(vma), |
Colin Cross | 3e4578f | 2015-10-27 16:42:08 -0700 | [diff] [blame] | 114 | vma->vm_userfaultfd_ctx, vma_get_anon_name(vma)); |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 115 | if (*prev) { |
| 116 | vma = *prev; |
| 117 | goto success; |
| 118 | } |
| 119 | |
| 120 | *prev = vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
| 122 | if (start != vma->vm_start) { |
| 123 | error = split_vma(mm, vma, start, 1); |
| 124 | if (error) |
| 125 | goto out; |
| 126 | } |
| 127 | |
| 128 | if (end != vma->vm_end) { |
| 129 | error = split_vma(mm, vma, end, 0); |
| 130 | if (error) |
| 131 | goto out; |
| 132 | } |
| 133 | |
Hugh Dickins | 836d5ff | 2005-09-03 15:54:53 -0700 | [diff] [blame] | 134 | success: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | /* |
| 136 | * vm_flags is protected by the mmap_sem held in write mode. |
| 137 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | |
Laurent Dufour | dd2b465 | 2018-04-17 16:33:15 +0200 | [diff] [blame] | 139 | vm_write_begin(vma); |
| 140 | WRITE_ONCE(vma->vm_flags, new_flags); |
| 141 | vm_write_end(vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | out: |
| 143 | if (error == -ENOMEM) |
| 144 | error = -EAGAIN; |
| 145 | return error; |
| 146 | } |
| 147 | |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 148 | #ifdef CONFIG_SWAP |
| 149 | static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, |
| 150 | unsigned long end, struct mm_walk *walk) |
| 151 | { |
| 152 | pte_t *orig_pte; |
| 153 | struct vm_area_struct *vma = walk->private; |
| 154 | unsigned long index; |
| 155 | |
| 156 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
| 157 | return 0; |
| 158 | |
| 159 | for (index = start; index != end; index += PAGE_SIZE) { |
| 160 | pte_t pte; |
| 161 | swp_entry_t entry; |
| 162 | struct page *page; |
| 163 | spinlock_t *ptl; |
| 164 | |
| 165 | orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); |
| 166 | pte = *(orig_pte + ((index - start) / PAGE_SIZE)); |
| 167 | pte_unmap_unlock(orig_pte, ptl); |
| 168 | |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 169 | if (pte_present(pte) || pte_none(pte)) |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 170 | continue; |
| 171 | entry = pte_to_swp_entry(pte); |
| 172 | if (unlikely(non_swap_entry(entry))) |
| 173 | continue; |
| 174 | |
| 175 | page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, |
| 176 | vma, index); |
| 177 | if (page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 178 | put_page(page); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | return 0; |
| 182 | } |
| 183 | |
| 184 | static void force_swapin_readahead(struct vm_area_struct *vma, |
| 185 | unsigned long start, unsigned long end) |
| 186 | { |
| 187 | struct mm_walk walk = { |
| 188 | .mm = vma->vm_mm, |
| 189 | .pmd_entry = swapin_walk_pmd_entry, |
| 190 | .private = vma, |
| 191 | }; |
| 192 | |
| 193 | walk_page_range(start, end, &walk); |
| 194 | |
| 195 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
| 196 | } |
| 197 | |
| 198 | static void force_shm_swapin_readahead(struct vm_area_struct *vma, |
| 199 | unsigned long start, unsigned long end, |
| 200 | struct address_space *mapping) |
| 201 | { |
| 202 | pgoff_t index; |
| 203 | struct page *page; |
| 204 | swp_entry_t swap; |
| 205 | |
| 206 | for (; start < end; start += PAGE_SIZE) { |
| 207 | index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 208 | |
Johannes Weiner | 55231e5 | 2014-05-22 11:54:17 -0700 | [diff] [blame] | 209 | page = find_get_entry(mapping, index); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 210 | if (!radix_tree_exceptional_entry(page)) { |
| 211 | if (page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 212 | put_page(page); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 213 | continue; |
| 214 | } |
| 215 | swap = radix_to_swp_entry(page); |
| 216 | page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, |
| 217 | NULL, 0); |
| 218 | if (page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 219 | put_page(page); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
| 223 | } |
| 224 | #endif /* CONFIG_SWAP */ |
| 225 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | /* |
| 227 | * Schedule all required I/O operations. Do not wait for completion. |
| 228 | */ |
Vladimir Cernov | ec9bed9 | 2013-09-11 14:20:15 -0700 | [diff] [blame] | 229 | static long madvise_willneed(struct vm_area_struct *vma, |
| 230 | struct vm_area_struct **prev, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | unsigned long start, unsigned long end) |
| 232 | { |
| 233 | struct file *file = vma->vm_file; |
| 234 | |
chenjie | ba32d7d | 2017-11-29 16:10:54 -0800 | [diff] [blame] | 235 | *prev = vma; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 236 | #ifdef CONFIG_SWAP |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 237 | if (!file) { |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 238 | force_swapin_readahead(vma, start, end); |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 239 | return 0; |
| 240 | } |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 241 | |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 242 | if (shmem_mapping(file->f_mapping)) { |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 243 | force_shm_swapin_readahead(vma, start, end, |
| 244 | file->f_mapping); |
| 245 | return 0; |
| 246 | } |
| 247 | #else |
Suzuki | 1bef400 | 2005-10-11 08:29:06 -0700 | [diff] [blame] | 248 | if (!file) |
| 249 | return -EBADF; |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 250 | #endif |
Suzuki | 1bef400 | 2005-10-11 08:29:06 -0700 | [diff] [blame] | 251 | |
Matthew Wilcox | e748dcd | 2015-02-16 15:59:12 -0800 | [diff] [blame] | 252 | if (IS_DAX(file_inode(file))) { |
Carsten Otte | fe77ba6 | 2005-06-23 22:05:29 -0700 | [diff] [blame] | 253 | /* no bad return value, but ignore advice */ |
| 254 | return 0; |
| 255 | } |
| 256 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 258 | if (end > vma->vm_end) |
| 259 | end = vma->vm_end; |
| 260 | end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 261 | |
Wu Fengguang | f7e839d | 2009-06-16 15:31:20 -0700 | [diff] [blame] | 262 | force_page_cache_readahead(file->f_mapping, file, start, end - start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | return 0; |
| 264 | } |
| 265 | |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 266 | static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, |
| 267 | unsigned long end, struct mm_walk *walk) |
| 268 | |
| 269 | { |
| 270 | struct mmu_gather *tlb = walk->private; |
| 271 | struct mm_struct *mm = tlb->mm; |
| 272 | struct vm_area_struct *vma = walk->vma; |
| 273 | spinlock_t *ptl; |
| 274 | pte_t *orig_pte, *pte, ptent; |
| 275 | struct page *page; |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 276 | int nr_swap = 0; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 277 | unsigned long next; |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 278 | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 279 | next = pmd_addr_end(addr, end); |
| 280 | if (pmd_trans_huge(*pmd)) |
| 281 | if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) |
| 282 | goto next; |
| 283 | |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 284 | if (pmd_trans_unstable(pmd)) |
| 285 | return 0; |
| 286 | |
| 287 | orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
Mel Gorman | 5a1eef7 | 2017-08-02 13:31:52 -0700 | [diff] [blame] | 288 | flush_tlb_batched_pending(mm); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 289 | arch_enter_lazy_mmu_mode(); |
| 290 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
| 291 | ptent = *pte; |
| 292 | |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 293 | if (pte_none(ptent)) |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 294 | continue; |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 295 | /* |
| 296 | * If the pte has swp_entry, just clear page table to |
| 297 | * prevent swap-in which is more expensive rather than |
| 298 | * (page allocation + zeroing). |
| 299 | */ |
| 300 | if (!pte_present(ptent)) { |
| 301 | swp_entry_t entry; |
| 302 | |
| 303 | entry = pte_to_swp_entry(ptent); |
| 304 | if (non_swap_entry(entry)) |
| 305 | continue; |
| 306 | nr_swap--; |
| 307 | free_swap_and_cache(entry); |
| 308 | pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); |
| 309 | continue; |
| 310 | } |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 311 | |
| 312 | page = vm_normal_page(vma, addr, ptent); |
| 313 | if (!page) |
| 314 | continue; |
| 315 | |
| 316 | /* |
| 317 | * If pmd isn't transhuge but the page is THP and |
| 318 | * is owned by only this process, split it and |
| 319 | * deactivate all pages. |
| 320 | */ |
| 321 | if (PageTransCompound(page)) { |
| 322 | if (page_mapcount(page) != 1) |
| 323 | goto out; |
| 324 | get_page(page); |
| 325 | if (!trylock_page(page)) { |
| 326 | put_page(page); |
| 327 | goto out; |
| 328 | } |
| 329 | pte_unmap_unlock(orig_pte, ptl); |
| 330 | if (split_huge_page(page)) { |
| 331 | unlock_page(page); |
| 332 | put_page(page); |
| 333 | pte_offset_map_lock(mm, pmd, addr, &ptl); |
| 334 | goto out; |
| 335 | } |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 336 | unlock_page(page); |
Eric Biggers | 0f49b05 | 2017-08-25 15:55:39 -0700 | [diff] [blame] | 337 | put_page(page); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 338 | pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
| 339 | pte--; |
| 340 | addr -= PAGE_SIZE; |
| 341 | continue; |
| 342 | } |
| 343 | |
| 344 | VM_BUG_ON_PAGE(PageTransCompound(page), page); |
| 345 | |
| 346 | if (PageSwapCache(page) || PageDirty(page)) { |
| 347 | if (!trylock_page(page)) |
| 348 | continue; |
| 349 | /* |
| 350 | * If page is shared with others, we couldn't clear |
| 351 | * PG_dirty of the page. |
| 352 | */ |
| 353 | if (page_mapcount(page) != 1) { |
| 354 | unlock_page(page); |
| 355 | continue; |
| 356 | } |
| 357 | |
| 358 | if (PageSwapCache(page) && !try_to_free_swap(page)) { |
| 359 | unlock_page(page); |
| 360 | continue; |
| 361 | } |
| 362 | |
| 363 | ClearPageDirty(page); |
| 364 | unlock_page(page); |
| 365 | } |
| 366 | |
| 367 | if (pte_young(ptent) || pte_dirty(ptent)) { |
| 368 | /* |
| 369 | * Some of architecture(ex, PPC) don't update TLB |
| 370 | * with set_pte_at and tlb_remove_tlb_entry so for |
| 371 | * the portability, remap the pte with old|clean |
| 372 | * after pte clearing. |
| 373 | */ |
| 374 | ptent = ptep_get_and_clear_full(mm, addr, pte, |
| 375 | tlb->fullmm); |
| 376 | |
| 377 | ptent = pte_mkold(ptent); |
| 378 | ptent = pte_mkclean(ptent); |
| 379 | set_pte_at(mm, addr, pte, ptent); |
Minchan Kim | 10853a0 | 2016-01-15 16:55:11 -0800 | [diff] [blame] | 380 | if (PageActive(page)) |
| 381 | deactivate_page(page); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 382 | tlb_remove_tlb_entry(tlb, pte, addr); |
| 383 | } |
| 384 | } |
| 385 | out: |
Minchan Kim | 64b42bc | 2016-01-15 16:55:06 -0800 | [diff] [blame] | 386 | if (nr_swap) { |
| 387 | if (current->mm == mm) |
| 388 | sync_mm_rss(mm); |
| 389 | |
| 390 | add_mm_counter(mm, MM_SWAPENTS, nr_swap); |
| 391 | } |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 392 | arch_leave_lazy_mmu_mode(); |
| 393 | pte_unmap_unlock(orig_pte, ptl); |
| 394 | cond_resched(); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 395 | next: |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 396 | return 0; |
| 397 | } |
| 398 | |
| 399 | static void madvise_free_page_range(struct mmu_gather *tlb, |
| 400 | struct vm_area_struct *vma, |
| 401 | unsigned long addr, unsigned long end) |
| 402 | { |
| 403 | struct mm_walk free_walk = { |
| 404 | .pmd_entry = madvise_free_pte_range, |
| 405 | .mm = vma->vm_mm, |
| 406 | .private = tlb, |
| 407 | }; |
| 408 | |
Laurent Dufour | dd2b465 | 2018-04-17 16:33:15 +0200 | [diff] [blame] | 409 | vm_write_begin(vma); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 410 | tlb_start_vma(tlb, vma); |
| 411 | walk_page_range(addr, end, &free_walk); |
| 412 | tlb_end_vma(tlb, vma); |
Laurent Dufour | dd2b465 | 2018-04-17 16:33:15 +0200 | [diff] [blame] | 413 | vm_write_end(vma); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 414 | } |
| 415 | |
| 416 | static int madvise_free_single_vma(struct vm_area_struct *vma, |
| 417 | unsigned long start_addr, unsigned long end_addr) |
| 418 | { |
| 419 | unsigned long start, end; |
| 420 | struct mm_struct *mm = vma->vm_mm; |
| 421 | struct mmu_gather tlb; |
| 422 | |
| 423 | if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) |
| 424 | return -EINVAL; |
| 425 | |
| 426 | /* MADV_FREE works for only anon vma at the moment */ |
| 427 | if (!vma_is_anonymous(vma)) |
| 428 | return -EINVAL; |
| 429 | |
| 430 | start = max(vma->vm_start, start_addr); |
| 431 | if (start >= vma->vm_end) |
| 432 | return -EINVAL; |
| 433 | end = min(vma->vm_end, end_addr); |
| 434 | if (end <= vma->vm_start) |
| 435 | return -EINVAL; |
| 436 | |
| 437 | lru_add_drain(); |
| 438 | tlb_gather_mmu(&tlb, mm, start, end); |
| 439 | update_hiwater_rss(mm); |
| 440 | |
| 441 | mmu_notifier_invalidate_range_start(mm, start, end); |
| 442 | madvise_free_page_range(&tlb, vma, start, end); |
| 443 | mmu_notifier_invalidate_range_end(mm, start, end); |
| 444 | tlb_finish_mmu(&tlb, start, end); |
| 445 | |
| 446 | return 0; |
| 447 | } |
| 448 | |
| 449 | static long madvise_free(struct vm_area_struct *vma, |
| 450 | struct vm_area_struct **prev, |
| 451 | unsigned long start, unsigned long end) |
| 452 | { |
| 453 | *prev = vma; |
| 454 | return madvise_free_single_vma(vma, start, end); |
| 455 | } |
| 456 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | /* |
| 458 | * Application no longer needs these pages. If the pages are dirty, |
| 459 | * it's OK to just throw them away. The app will be more careful about |
| 460 | * data it wants to keep. Be sure to free swap resources too. The |
Fernando Luis Vazquez Cao | 7e6cbea | 2008-07-29 22:33:39 -0700 | [diff] [blame] | 461 | * zap_page_range call sets things up for shrink_active_list to actually free |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | * these pages later if no one else has touched them in the meantime, |
| 463 | * although we could add these pages to a global reuse list for |
Fernando Luis Vazquez Cao | 7e6cbea | 2008-07-29 22:33:39 -0700 | [diff] [blame] | 464 | * shrink_active_list to pick up before reclaiming other pages. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | * |
| 466 | * NB: This interface discards data rather than pushes it out to swap, |
| 467 | * as some implementations do. This has performance implications for |
| 468 | * applications like large transactional databases which want to discard |
| 469 | * pages in anonymous maps after committing to backing store the data |
| 470 | * that was kept in them. There is no reason to write this data out to |
| 471 | * the swap area if the application is discarding it. |
| 472 | * |
| 473 | * An interface that causes the system to free clean pages and flush |
| 474 | * dirty pages is already available as msync(MS_INVALIDATE). |
| 475 | */ |
Vladimir Cernov | ec9bed9 | 2013-09-11 14:20:15 -0700 | [diff] [blame] | 476 | static long madvise_dontneed(struct vm_area_struct *vma, |
| 477 | struct vm_area_struct **prev, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | unsigned long start, unsigned long end) |
| 479 | { |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 480 | *prev = vma; |
Kirill A. Shutemov | 7ade374 | 2017-02-22 15:46:39 -0800 | [diff] [blame] | 481 | if (!can_madv_dontneed_vma(vma)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | return -EINVAL; |
| 483 | |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 484 | zap_page_range(vma, start, end - start, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | return 0; |
| 486 | } |
| 487 | |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 488 | /* |
| 489 | * Application wants to free up the pages and associated backing store. |
| 490 | * This is effectively punching a hole into the middle of a file. |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 491 | */ |
| 492 | static long madvise_remove(struct vm_area_struct *vma, |
Nick Piggin | 00e9fa2 | 2007-03-16 13:38:10 -0800 | [diff] [blame] | 493 | struct vm_area_struct **prev, |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 494 | unsigned long start, unsigned long end) |
| 495 | { |
Hugh Dickins | 3f31d07 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 496 | loff_t offset; |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 497 | int error; |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 498 | struct file *f; |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 499 | |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 500 | *prev = NULL; /* tell sys_madvise we drop mmap_sem */ |
Nick Piggin | 00e9fa2 | 2007-03-16 13:38:10 -0800 | [diff] [blame] | 501 | |
Mike Kravetz | 72079ba | 2015-09-08 15:01:57 -0700 | [diff] [blame] | 502 | if (vma->vm_flags & VM_LOCKED) |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 503 | return -EINVAL; |
| 504 | |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 505 | f = vma->vm_file; |
| 506 | |
| 507 | if (!f || !f->f_mapping || !f->f_mapping->host) { |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 508 | return -EINVAL; |
| 509 | } |
| 510 | |
Hugh Dickins | 69cf0fa | 2006-04-17 22:46:32 +0100 | [diff] [blame] | 511 | if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) |
| 512 | return -EACCES; |
| 513 | |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 514 | offset = (loff_t)(start - vma->vm_start) |
| 515 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 516 | |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 517 | /* |
| 518 | * Filesystem's fallocate may need to take i_mutex. We need to |
| 519 | * explicitly grab a reference because the vma (and hence the |
| 520 | * vma's reference to the file) can go away as soon as we drop |
| 521 | * mmap_sem. |
| 522 | */ |
| 523 | get_file(f); |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 524 | up_read(¤t->mm->mmap_sem); |
Anna Schumaker | 72c72bd | 2014-11-07 14:44:25 -0500 | [diff] [blame] | 525 | error = vfs_fallocate(f, |
Hugh Dickins | 3f31d07 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 526 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, |
| 527 | offset, end - start); |
Andy Lutomirski | 9ab4233 | 2012-07-05 16:00:11 -0700 | [diff] [blame] | 528 | fput(f); |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 529 | down_read(¤t->mm->mmap_sem); |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 530 | return error; |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 531 | } |
| 532 | |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 533 | #ifdef CONFIG_MEMORY_FAILURE |
| 534 | /* |
| 535 | * Error injection support for memory error handling. |
| 536 | */ |
Andi Kleen | afcf938 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 537 | static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 538 | { |
Wanpeng Li | 20cb6ca | 2013-09-30 13:45:21 -0700 | [diff] [blame] | 539 | struct page *p; |
Mel Gorman | 8cc3acf | 2017-08-31 16:15:30 -0700 | [diff] [blame] | 540 | struct zone *zone; |
| 541 | |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 542 | if (!capable(CAP_SYS_ADMIN)) |
| 543 | return -EPERM; |
Wanpeng Li | 20cb6ca | 2013-09-30 13:45:21 -0700 | [diff] [blame] | 544 | for (; start < end; start += PAGE_SIZE << |
| 545 | compound_order(compound_head(p))) { |
Andrew Morton | 325c4ef | 2013-09-11 14:23:03 -0700 | [diff] [blame] | 546 | int ret; |
| 547 | |
| 548 | ret = get_user_pages_fast(start, 1, 0, &p); |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 549 | if (ret != 1) |
| 550 | return ret; |
Andrew Morton | 325c4ef | 2013-09-11 14:23:03 -0700 | [diff] [blame] | 551 | |
Wanpeng Li | 29b4eed | 2013-09-11 14:22:59 -0700 | [diff] [blame] | 552 | if (PageHWPoison(p)) { |
| 553 | put_page(p); |
| 554 | continue; |
| 555 | } |
Andi Kleen | afcf938 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 556 | if (bhv == MADV_SOFT_OFFLINE) { |
Wanpeng Li | b194b8c | 2013-09-11 14:22:57 -0700 | [diff] [blame] | 557 | pr_info("Soft offlining page %#lx at %#lx\n", |
Andi Kleen | afcf938 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 558 | page_to_pfn(p), start); |
| 559 | ret = soft_offline_page(p, MF_COUNT_INCREASED); |
| 560 | if (ret) |
Wanpeng Li | 8302423 | 2013-09-11 14:23:02 -0700 | [diff] [blame] | 561 | return ret; |
Andi Kleen | afcf938 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 562 | continue; |
| 563 | } |
Wanpeng Li | b194b8c | 2013-09-11 14:22:57 -0700 | [diff] [blame] | 564 | pr_info("Injecting memory failure for page %#lx at %#lx\n", |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 565 | page_to_pfn(p), start); |
Naoya Horiguchi | 23a003b | 2016-03-15 14:56:36 -0700 | [diff] [blame] | 566 | ret = memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); |
| 567 | if (ret) |
| 568 | return ret; |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 569 | } |
Mel Gorman | 8cc3acf | 2017-08-31 16:15:30 -0700 | [diff] [blame] | 570 | |
| 571 | /* Ensure that all poisoned pages are removed from per-cpu lists */ |
| 572 | for_each_populated_zone(zone) |
| 573 | drain_all_pages(zone); |
| 574 | |
Andrew Morton | 325c4ef | 2013-09-11 14:23:03 -0700 | [diff] [blame] | 575 | return 0; |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 576 | } |
| 577 | #endif |
| 578 | |
suzuki | 165cd40 | 2005-07-27 11:43:59 -0700 | [diff] [blame] | 579 | static long |
| 580 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, |
| 581 | unsigned long start, unsigned long end, int behavior) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | switch (behavior) { |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 584 | case MADV_REMOVE: |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 585 | return madvise_remove(vma, prev, start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | case MADV_WILLNEED: |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 587 | return madvise_willneed(vma, prev, start, end); |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 588 | case MADV_FREE: |
| 589 | /* |
| 590 | * XXX: In this implementation, MADV_FREE works like |
| 591 | * MADV_DONTNEED on swapless system or full swap. |
| 592 | */ |
| 593 | if (get_nr_swap_pages() > 0) |
| 594 | return madvise_free(vma, prev, start, end); |
| 595 | /* passthrough */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | case MADV_DONTNEED: |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 597 | return madvise_dontneed(vma, prev, start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | default: |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 599 | return madvise_behavior(vma, prev, start, end, behavior); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | } |
| 602 | |
Nicholas Krause | 1ecef9e | 2015-09-04 15:48:24 -0700 | [diff] [blame] | 603 | static bool |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 604 | madvise_behavior_valid(int behavior) |
| 605 | { |
| 606 | switch (behavior) { |
| 607 | case MADV_DOFORK: |
| 608 | case MADV_DONTFORK: |
| 609 | case MADV_NORMAL: |
| 610 | case MADV_SEQUENTIAL: |
| 611 | case MADV_RANDOM: |
| 612 | case MADV_REMOVE: |
| 613 | case MADV_WILLNEED: |
| 614 | case MADV_DONTNEED: |
Minchan Kim | 854e9ed | 2016-01-15 16:54:53 -0800 | [diff] [blame] | 615 | case MADV_FREE: |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 616 | #ifdef CONFIG_KSM |
| 617 | case MADV_MERGEABLE: |
| 618 | case MADV_UNMERGEABLE: |
| 619 | #endif |
Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 620 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 621 | case MADV_HUGEPAGE: |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 622 | case MADV_NOHUGEPAGE: |
Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 623 | #endif |
Jason Baron | accb61f | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 624 | case MADV_DONTDUMP: |
| 625 | case MADV_DODUMP: |
Nicholas Krause | 1ecef9e | 2015-09-04 15:48:24 -0700 | [diff] [blame] | 626 | return true; |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 627 | |
| 628 | default: |
Nicholas Krause | 1ecef9e | 2015-09-04 15:48:24 -0700 | [diff] [blame] | 629 | return false; |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 630 | } |
| 631 | } |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 632 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | /* |
| 634 | * The madvise(2) system call. |
| 635 | * |
| 636 | * Applications can use madvise() to advise the kernel how it should |
| 637 | * handle paging I/O in this VM area. The idea is to help the kernel |
| 638 | * use appropriate read-ahead and caching techniques. The information |
| 639 | * provided is advisory only, and can be safely disregarded by the |
| 640 | * kernel without affecting the correct operation of the application. |
| 641 | * |
| 642 | * behavior values: |
| 643 | * MADV_NORMAL - the default behavior is to read clusters. This |
| 644 | * results in some read-ahead and read-behind. |
| 645 | * MADV_RANDOM - the system should read the minimum amount of data |
| 646 | * on any access, since it is unlikely that the appli- |
| 647 | * cation will need more than what it asks for. |
| 648 | * MADV_SEQUENTIAL - pages in the given range will probably be accessed |
| 649 | * once, so they can be aggressively read ahead, and |
| 650 | * can be freed soon after they are accessed. |
| 651 | * MADV_WILLNEED - the application is notifying the system to read |
| 652 | * some pages ahead. |
| 653 | * MADV_DONTNEED - the application is finished with the given range, |
| 654 | * so the kernel can free resources associated with it. |
Naoya Horiguchi | d7206a7 | 2016-03-15 14:56:58 -0700 | [diff] [blame] | 655 | * MADV_FREE - the application marks pages in the given range as lazy free, |
| 656 | * where actual purges are postponed until memory pressure happens. |
Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 657 | * MADV_REMOVE - the application wants to free up the given range of |
| 658 | * pages and associated backing store. |
Hugh Dickins | 3866ea9 | 2009-09-21 17:01:52 -0700 | [diff] [blame] | 659 | * MADV_DONTFORK - omit this area from child's address space when forking: |
| 660 | * typically, to avoid COWing pages pinned by get_user_pages(). |
| 661 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. |
Naoya Horiguchi | d7206a7 | 2016-03-15 14:56:58 -0700 | [diff] [blame] | 662 | * MADV_HWPOISON - trigger memory error handler as if the given memory range |
| 663 | * were corrupted by unrecoverable hardware memory failure. |
| 664 | * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. |
Hugh Dickins | f8af4da | 2009-09-21 17:01:57 -0700 | [diff] [blame] | 665 | * MADV_MERGEABLE - the application recommends that KSM try to merge pages in |
| 666 | * this area with pages of identical content from other such areas. |
| 667 | * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. |
Naoya Horiguchi | d7206a7 | 2016-03-15 14:56:58 -0700 | [diff] [blame] | 668 | * MADV_HUGEPAGE - the application wants to back the given range by transparent |
| 669 | * huge pages in the future. Existing pages might be coalesced and |
| 670 | * new pages might be allocated as THP. |
| 671 | * MADV_NOHUGEPAGE - mark the given range as not worth being backed by |
| 672 | * transparent huge pages so the existing pages will not be |
| 673 | * coalesced into THP and new pages will not be allocated as THP. |
| 674 | * MADV_DONTDUMP - the application wants to prevent pages in the given range |
| 675 | * from being included in its core dump. |
| 676 | * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | * |
| 678 | * return values: |
| 679 | * zero - success |
| 680 | * -EINVAL - start + len < 0, start is not page-aligned, |
| 681 | * "behavior" is not a valid value, or application |
| 682 | * is attempting to release locked or shared pages. |
| 683 | * -ENOMEM - addresses in the specified range are not currently |
| 684 | * mapped, or are outside the AS of the process. |
| 685 | * -EIO - an I/O error occurred while paging in data. |
| 686 | * -EBADF - map exists, but area maps something that isn't a file. |
| 687 | * -EAGAIN - a kernel resource was temporarily unavailable. |
| 688 | */ |
Heiko Carstens | 3480b25 | 2009-01-14 14:14:16 +0100 | [diff] [blame] | 689 | SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | { |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 691 | unsigned long end, tmp; |
Vladimir Cernov | ec9bed9 | 2013-09-11 14:20:15 -0700 | [diff] [blame] | 692 | struct vm_area_struct *vma, *prev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | int unmapped_error = 0; |
| 694 | int error = -EINVAL; |
Jason Baron | f797779 | 2007-07-15 23:38:21 -0700 | [diff] [blame] | 695 | int write; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | size_t len; |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 697 | struct blk_plug plug; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 699 | #ifdef CONFIG_MEMORY_FAILURE |
Andi Kleen | afcf938 | 2009-12-16 12:20:00 +0100 | [diff] [blame] | 700 | if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) |
| 701 | return madvise_hwpoison(behavior, start, start+len_in); |
Andi Kleen | 9893e49 | 2009-09-16 11:50:17 +0200 | [diff] [blame] | 702 | #endif |
Nick Piggin | 75927af | 2009-06-16 15:32:38 -0700 | [diff] [blame] | 703 | if (!madvise_behavior_valid(behavior)) |
| 704 | return error; |
| 705 | |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 706 | if (start & ~PAGE_MASK) |
| 707 | return error; |
| 708 | len = (len_in + ~PAGE_MASK) & PAGE_MASK; |
| 709 | |
| 710 | /* Check to see whether len was rounded up from small -ve to zero */ |
| 711 | if (len_in && !len) |
| 712 | return error; |
| 713 | |
| 714 | end = start + len; |
| 715 | if (end < start) |
| 716 | return error; |
| 717 | |
| 718 | error = 0; |
| 719 | if (end == start) |
| 720 | return error; |
| 721 | |
Jason Baron | f797779 | 2007-07-15 23:38:21 -0700 | [diff] [blame] | 722 | write = madvise_need_mmap_write(behavior); |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 723 | if (write) { |
| 724 | if (down_write_killable(¤t->mm->mmap_sem)) |
| 725 | return -EINTR; |
| 726 | } else { |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 727 | down_read(¤t->mm->mmap_sem); |
Michal Hocko | dc0ef0d | 2016-05-23 16:25:27 -0700 | [diff] [blame] | 728 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | /* |
| 731 | * If the interval [start,end) covers some unmapped address |
| 732 | * ranges, just ignore them, but return -ENOMEM at the end. |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 733 | * - different from the way of handling in mlock etc. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | */ |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 735 | vma = find_vma_prev(current->mm, start, &prev); |
Hugh Dickins | 836d5ff | 2005-09-03 15:54:53 -0700 | [diff] [blame] | 736 | if (vma && start > vma->vm_start) |
| 737 | prev = vma; |
| 738 | |
Shaohua Li | 1998cc0 | 2013-02-22 16:32:31 -0800 | [diff] [blame] | 739 | blk_start_plug(&plug); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | for (;;) { |
| 741 | /* Still start < end. */ |
| 742 | error = -ENOMEM; |
| 743 | if (!vma) |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 744 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 746 | /* Here start < (end|vma->vm_end). */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | if (start < vma->vm_start) { |
| 748 | unmapped_error = -ENOMEM; |
| 749 | start = vma->vm_start; |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 750 | if (start >= end) |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 751 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | } |
| 753 | |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 754 | /* Here vma->vm_start <= start < (end|vma->vm_end) */ |
| 755 | tmp = vma->vm_end; |
| 756 | if (end < tmp) |
| 757 | tmp = end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 759 | /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ |
| 760 | error = madvise_vma(vma, &prev, start, tmp, behavior); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | if (error) |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 762 | goto out; |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 763 | start = tmp; |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 764 | if (prev && start < prev->vm_end) |
Prasanna Meda | 05b7438 | 2005-06-21 17:14:37 -0700 | [diff] [blame] | 765 | start = prev->vm_end; |
| 766 | error = unmapped_error; |
| 767 | if (start >= end) |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 768 | goto out; |
Hugh Dickins | 90ed52e | 2007-03-29 01:20:38 -0700 | [diff] [blame] | 769 | if (prev) |
| 770 | vma = prev->vm_next; |
| 771 | else /* madvise_remove dropped mmap_sem */ |
| 772 | vma = find_vma(current->mm, start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 774 | out: |
Rasmus Villemoes | 84d96d8 | 2013-04-29 15:08:23 -0700 | [diff] [blame] | 775 | blk_finish_plug(&plug); |
Jason Baron | f797779 | 2007-07-15 23:38:21 -0700 | [diff] [blame] | 776 | if (write) |
Nick Piggin | 0a27a14 | 2007-05-06 14:49:53 -0700 | [diff] [blame] | 777 | up_write(¤t->mm->mmap_sem); |
| 778 | else |
| 779 | up_read(¤t->mm->mmap_sem); |
| 780 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | return error; |
| 782 | } |