blob: 68ab988ad4333b43ae6102d147e94891097c0ae6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9#include <linux/mman.h>
10#include <linux/pagemap.h>
11#include <linux/syscalls.h>
Prasanna Meda05b74382005-06-21 17:14:37 -070012#include <linux/mempolicy.h>
Andi Kleenafcf9382009-12-16 12:20:00 +010013#include <linux/page-isolation.h>
Pavel Emelyanov05ce7722017-02-22 15:42:40 -080014#include <linux/userfaultfd_k.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/hugetlb.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070016#include <linux/falloc.h>
Jan Kara692fe622019-08-29 09:04:11 -070017#include <linux/fadvise.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040018#include <linux/sched.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070019#include <linux/ksm.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070020#include <linux/fs.h>
Andy Lutomirski9ab42332012-07-05 16:00:11 -070021#include <linux/file.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080022#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040023#include <linux/backing-dev.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020024#include <linux/pagewalk.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080025#include <linux/swap.h>
26#include <linux/swapops.h>
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080027#include <linux/shmem_fs.h>
Minchan Kim854e9ed2016-01-15 16:54:53 -080028#include <linux/mmu_notifier.h>
29
30#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Kirill A. Shutemov23519072017-02-22 15:46:39 -080032#include "internal.h"
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
Nick Piggin0a27a142007-05-06 14:49:53 -070035 * Any behaviour which results in changes to the vma->vm_flags needs to
36 * take mmap_sem for writing. Others, which simply traverse vmas, need
37 * to only take it for reading.
38 */
39static int madvise_need_mmap_write(int behavior)
40{
41 switch (behavior) {
42 case MADV_REMOVE:
43 case MADV_WILLNEED:
44 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -080045 case MADV_FREE:
Nick Piggin0a27a142007-05-06 14:49:53 -070046 return 0;
47 default:
48 /* be safe, default to 1. list exceptions explicitly */
49 return 1;
50 }
51}
52
53/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 * We can potentially split a vm area into separate
55 * areas, each area with its own behavior.
56 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -070057static long madvise_behavior(struct vm_area_struct *vma,
Prasanna Meda05b74382005-06-21 17:14:37 -070058 struct vm_area_struct **prev,
59 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Vladimir Cernovec9bed92013-09-11 14:20:15 -070061 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 int error = 0;
Prasanna Meda05b74382005-06-21 17:14:37 -070063 pgoff_t pgoff;
Hugh Dickins3866ea92009-09-21 17:01:52 -070064 unsigned long new_flags = vma->vm_flags;
Prasanna Medae798c6e2005-06-21 17:14:36 -070065
66 switch (behavior) {
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080067 case MADV_NORMAL:
68 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
69 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070070 case MADV_SEQUENTIAL:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080071 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070072 break;
73 case MADV_RANDOM:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080074 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070075 break;
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080076 case MADV_DONTFORK:
77 new_flags |= VM_DONTCOPY;
78 break;
79 case MADV_DOFORK:
Hugh Dickins3866ea92009-09-21 17:01:52 -070080 if (vma->vm_flags & VM_IO) {
81 error = -EINVAL;
82 goto out;
83 }
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080084 new_flags &= ~VM_DONTCOPY;
Prasanna Medae798c6e2005-06-21 17:14:36 -070085 break;
Rik van Rield2cd9ed2017-09-06 16:25:15 -070086 case MADV_WIPEONFORK:
87 /* MADV_WIPEONFORK is only supported on anonymous memory. */
88 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
89 error = -EINVAL;
90 goto out;
91 }
92 new_flags |= VM_WIPEONFORK;
93 break;
94 case MADV_KEEPONFORK:
95 new_flags &= ~VM_WIPEONFORK;
96 break;
Jason Baronaccb61f2012-03-23 15:02:51 -070097 case MADV_DONTDUMP:
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -070098 new_flags |= VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -070099 break;
100 case MADV_DODUMP:
Daniel Blackd41aa522018-10-05 15:52:19 -0700101 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -0700102 error = -EINVAL;
103 goto out;
104 }
105 new_flags &= ~VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -0700106 break;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700107 case MADV_MERGEABLE:
108 case MADV_UNMERGEABLE:
109 error = ksm_madvise(vma, start, end, behavior, &new_flags);
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700110 if (error)
111 goto out_convert_errno;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700112 break;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800113 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800114 case MADV_NOHUGEPAGE:
Andrea Arcangeli60ab3242011-01-13 15:47:18 -0800115 error = hugepage_madvise(vma, &new_flags, behavior);
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700116 if (error)
117 goto out_convert_errno;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800118 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -0700119 }
120
Prasanna Meda05b74382005-06-21 17:14:37 -0700121 if (new_flags == vma->vm_flags) {
122 *prev = vma;
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700123 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700124 }
125
126 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
127 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700128 vma->vm_file, pgoff, vma_policy(vma),
129 vma->vm_userfaultfd_ctx);
Prasanna Meda05b74382005-06-21 17:14:37 -0700130 if (*prev) {
131 vma = *prev;
132 goto success;
133 }
134
135 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 if (start != vma->vm_start) {
David Rientjesdef5efe2017-02-24 14:58:47 -0800138 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
139 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800141 }
142 error = __split_vma(mm, vma, start, 1);
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700143 if (error)
144 goto out_convert_errno;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 }
146
147 if (end != vma->vm_end) {
David Rientjesdef5efe2017-02-24 14:58:47 -0800148 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
149 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800151 }
152 error = __split_vma(mm, vma, end, 0);
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700153 if (error)
154 goto out_convert_errno;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700157success:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 /*
159 * vm_flags is protected by the mmap_sem held in write mode.
160 */
Prasanna Medae798c6e2005-06-21 17:14:36 -0700161 vma->vm_flags = new_flags;
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700162
163out_convert_errno:
164 /*
165 * madvise() returns EAGAIN if kernel resources, such as
166 * slab, are temporarily unavailable.
167 */
168 if (error == -ENOMEM)
169 error = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 return error;
172}
173
Shaohua Li1998cc02013-02-22 16:32:31 -0800174#ifdef CONFIG_SWAP
175static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
176 unsigned long end, struct mm_walk *walk)
177{
178 pte_t *orig_pte;
179 struct vm_area_struct *vma = walk->private;
180 unsigned long index;
181
182 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
183 return 0;
184
185 for (index = start; index != end; index += PAGE_SIZE) {
186 pte_t pte;
187 swp_entry_t entry;
188 struct page *page;
189 spinlock_t *ptl;
190
191 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
192 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
193 pte_unmap_unlock(orig_pte, ptl);
194
Kirill A. Shutemov0661a332015-02-10 14:10:04 -0800195 if (pte_present(pte) || pte_none(pte))
Shaohua Li1998cc02013-02-22 16:32:31 -0800196 continue;
197 entry = pte_to_swp_entry(pte);
198 if (unlikely(non_swap_entry(entry)))
199 continue;
200
201 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
Shaohua Li23955622017-07-10 15:47:11 -0700202 vma, index, false);
Shaohua Li1998cc02013-02-22 16:32:31 -0800203 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300204 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800205 }
206
207 return 0;
208}
209
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200210static const struct mm_walk_ops swapin_walk_ops = {
211 .pmd_entry = swapin_walk_pmd_entry,
212};
Shaohua Li1998cc02013-02-22 16:32:31 -0800213
214static void force_shm_swapin_readahead(struct vm_area_struct *vma,
215 unsigned long start, unsigned long end,
216 struct address_space *mapping)
217{
218 pgoff_t index;
219 struct page *page;
220 swp_entry_t swap;
221
222 for (; start < end; start += PAGE_SIZE) {
223 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
224
Johannes Weiner55231e52014-05-22 11:54:17 -0700225 page = find_get_entry(mapping, index);
Matthew Wilcox3159f942017-11-03 13:30:42 -0400226 if (!xa_is_value(page)) {
Shaohua Li1998cc02013-02-22 16:32:31 -0800227 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300228 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800229 continue;
230 }
231 swap = radix_to_swp_entry(page);
232 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
Shaohua Li23955622017-07-10 15:47:11 -0700233 NULL, 0, false);
Shaohua Li1998cc02013-02-22 16:32:31 -0800234 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300235 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800236 }
237
238 lru_add_drain(); /* Push any new pages onto the LRU now */
239}
240#endif /* CONFIG_SWAP */
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242/*
243 * Schedule all required I/O operations. Do not wait for completion.
244 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700245static long madvise_willneed(struct vm_area_struct *vma,
246 struct vm_area_struct **prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 unsigned long start, unsigned long end)
248{
249 struct file *file = vma->vm_file;
Jan Kara692fe622019-08-29 09:04:11 -0700250 loff_t offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
chenjie6ea8d952017-11-29 16:10:54 -0800252 *prev = vma;
Shaohua Li1998cc02013-02-22 16:32:31 -0800253#ifdef CONFIG_SWAP
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100254 if (!file) {
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200255 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
256 lru_add_drain(); /* Push any new pages onto the LRU now */
Shaohua Li1998cc02013-02-22 16:32:31 -0800257 return 0;
258 }
Shaohua Li1998cc02013-02-22 16:32:31 -0800259
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100260 if (shmem_mapping(file->f_mapping)) {
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100261 force_shm_swapin_readahead(vma, start, end,
262 file->f_mapping);
263 return 0;
264 }
265#else
Suzuki1bef4002005-10-11 08:29:06 -0700266 if (!file)
267 return -EBADF;
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100268#endif
Suzuki1bef4002005-10-11 08:29:06 -0700269
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -0800270 if (IS_DAX(file_inode(file))) {
Carsten Ottefe77ba62005-06-23 22:05:29 -0700271 /* no bad return value, but ignore advice */
272 return 0;
273 }
274
Jan Kara692fe622019-08-29 09:04:11 -0700275 /*
276 * Filesystem's fadvise may need to take various locks. We need to
277 * explicitly grab a reference because the vma (and hence the
278 * vma's reference to the file) can go away as soon as we drop
279 * mmap_sem.
280 */
281 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
282 get_file(file);
283 up_read(&current->mm->mmap_sem);
284 offset = (loff_t)(start - vma->vm_start)
285 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
286 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
287 fput(file);
288 down_read(&current->mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 return 0;
290}
291
Minchan Kim854e9ed2016-01-15 16:54:53 -0800292static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
293 unsigned long end, struct mm_walk *walk)
294
295{
296 struct mmu_gather *tlb = walk->private;
297 struct mm_struct *mm = tlb->mm;
298 struct vm_area_struct *vma = walk->vma;
299 spinlock_t *ptl;
300 pte_t *orig_pte, *pte, ptent;
301 struct page *page;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800302 int nr_swap = 0;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800303 unsigned long next;
Minchan Kim854e9ed2016-01-15 16:54:53 -0800304
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800305 next = pmd_addr_end(addr, end);
306 if (pmd_trans_huge(*pmd))
307 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
308 goto next;
309
Minchan Kim854e9ed2016-01-15 16:54:53 -0800310 if (pmd_trans_unstable(pmd))
311 return 0;
312
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200313 tlb_change_page_size(tlb, PAGE_SIZE);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800314 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Mel Gorman3ea27712017-08-02 13:31:52 -0700315 flush_tlb_batched_pending(mm);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800316 arch_enter_lazy_mmu_mode();
317 for (; addr != end; pte++, addr += PAGE_SIZE) {
318 ptent = *pte;
319
Minchan Kim64b42bc2016-01-15 16:55:06 -0800320 if (pte_none(ptent))
Minchan Kim854e9ed2016-01-15 16:54:53 -0800321 continue;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800322 /*
323 * If the pte has swp_entry, just clear page table to
324 * prevent swap-in which is more expensive rather than
325 * (page allocation + zeroing).
326 */
327 if (!pte_present(ptent)) {
328 swp_entry_t entry;
329
330 entry = pte_to_swp_entry(ptent);
331 if (non_swap_entry(entry))
332 continue;
333 nr_swap--;
334 free_swap_and_cache(entry);
335 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
336 continue;
337 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800338
Christoph Hellwig25b29952019-06-13 22:50:49 +0200339 page = vm_normal_page(vma, addr, ptent);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800340 if (!page)
341 continue;
342
343 /*
344 * If pmd isn't transhuge but the page is THP and
345 * is owned by only this process, split it and
346 * deactivate all pages.
347 */
348 if (PageTransCompound(page)) {
349 if (page_mapcount(page) != 1)
350 goto out;
351 get_page(page);
352 if (!trylock_page(page)) {
353 put_page(page);
354 goto out;
355 }
356 pte_unmap_unlock(orig_pte, ptl);
357 if (split_huge_page(page)) {
358 unlock_page(page);
359 put_page(page);
360 pte_offset_map_lock(mm, pmd, addr, &ptl);
361 goto out;
362 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800363 unlock_page(page);
Eric Biggers263630e2017-08-25 15:55:39 -0700364 put_page(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800365 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
366 pte--;
367 addr -= PAGE_SIZE;
368 continue;
369 }
370
371 VM_BUG_ON_PAGE(PageTransCompound(page), page);
372
373 if (PageSwapCache(page) || PageDirty(page)) {
374 if (!trylock_page(page))
375 continue;
376 /*
377 * If page is shared with others, we couldn't clear
378 * PG_dirty of the page.
379 */
380 if (page_mapcount(page) != 1) {
381 unlock_page(page);
382 continue;
383 }
384
385 if (PageSwapCache(page) && !try_to_free_swap(page)) {
386 unlock_page(page);
387 continue;
388 }
389
390 ClearPageDirty(page);
391 unlock_page(page);
392 }
393
394 if (pte_young(ptent) || pte_dirty(ptent)) {
395 /*
396 * Some of architecture(ex, PPC) don't update TLB
397 * with set_pte_at and tlb_remove_tlb_entry so for
398 * the portability, remap the pte with old|clean
399 * after pte clearing.
400 */
401 ptent = ptep_get_and_clear_full(mm, addr, pte,
402 tlb->fullmm);
403
404 ptent = pte_mkold(ptent);
405 ptent = pte_mkclean(ptent);
406 set_pte_at(mm, addr, pte, ptent);
407 tlb_remove_tlb_entry(tlb, pte, addr);
408 }
Shaohua Li802a3a92017-05-03 14:52:32 -0700409 mark_page_lazyfree(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800410 }
411out:
Minchan Kim64b42bc2016-01-15 16:55:06 -0800412 if (nr_swap) {
413 if (current->mm == mm)
414 sync_mm_rss(mm);
415
416 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
417 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800418 arch_leave_lazy_mmu_mode();
419 pte_unmap_unlock(orig_pte, ptl);
420 cond_resched();
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800421next:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800422 return 0;
423}
424
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200425static const struct mm_walk_ops madvise_free_walk_ops = {
426 .pmd_entry = madvise_free_pte_range,
427};
Minchan Kim854e9ed2016-01-15 16:54:53 -0800428
429static int madvise_free_single_vma(struct vm_area_struct *vma,
430 unsigned long start_addr, unsigned long end_addr)
431{
Minchan Kim854e9ed2016-01-15 16:54:53 -0800432 struct mm_struct *mm = vma->vm_mm;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800433 struct mmu_notifier_range range;
Minchan Kim854e9ed2016-01-15 16:54:53 -0800434 struct mmu_gather tlb;
435
Minchan Kim854e9ed2016-01-15 16:54:53 -0800436 /* MADV_FREE works for only anon vma at the moment */
437 if (!vma_is_anonymous(vma))
438 return -EINVAL;
439
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800440 range.start = max(vma->vm_start, start_addr);
441 if (range.start >= vma->vm_end)
Minchan Kim854e9ed2016-01-15 16:54:53 -0800442 return -EINVAL;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800443 range.end = min(vma->vm_end, end_addr);
444 if (range.end <= vma->vm_start)
Minchan Kim854e9ed2016-01-15 16:54:53 -0800445 return -EINVAL;
Jérôme Glisse7269f992019-05-13 17:20:53 -0700446 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -0700447 range.start, range.end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800448
449 lru_add_drain();
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800450 tlb_gather_mmu(&tlb, mm, range.start, range.end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800451 update_hiwater_rss(mm);
452
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800453 mmu_notifier_invalidate_range_start(&range);
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200454 tlb_start_vma(&tlb, vma);
455 walk_page_range(vma->vm_mm, range.start, range.end,
456 &madvise_free_walk_ops, &tlb);
457 tlb_end_vma(&tlb, vma);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800458 mmu_notifier_invalidate_range_end(&range);
459 tlb_finish_mmu(&tlb, range.start, range.end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800460
461 return 0;
462}
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464/*
465 * Application no longer needs these pages. If the pages are dirty,
466 * it's OK to just throw them away. The app will be more careful about
467 * data it wants to keep. Be sure to free swap resources too. The
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700468 * zap_page_range call sets things up for shrink_active_list to actually free
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 * these pages later if no one else has touched them in the meantime,
470 * although we could add these pages to a global reuse list for
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700471 * shrink_active_list to pick up before reclaiming other pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 *
473 * NB: This interface discards data rather than pushes it out to swap,
474 * as some implementations do. This has performance implications for
475 * applications like large transactional databases which want to discard
476 * pages in anonymous maps after committing to backing store the data
477 * that was kept in them. There is no reason to write this data out to
478 * the swap area if the application is discarding it.
479 *
480 * An interface that causes the system to free clean pages and flush
481 * dirty pages is already available as msync(MS_INVALIDATE).
482 */
Mike Rapoport230ca982017-07-10 15:49:02 -0700483static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
484 unsigned long start, unsigned long end)
485{
486 zap_page_range(vma, start, end - start);
487 return 0;
488}
489
490static long madvise_dontneed_free(struct vm_area_struct *vma,
491 struct vm_area_struct **prev,
492 unsigned long start, unsigned long end,
493 int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
Prasanna Meda05b74382005-06-21 17:14:37 -0700495 *prev = vma;
Kirill A. Shutemov23519072017-02-22 15:46:39 -0800496 if (!can_madv_dontneed_vma(vma))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 return -EINVAL;
498
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800499 if (!userfaultfd_remove(vma, start, end)) {
500 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
501
502 down_read(&current->mm->mmap_sem);
503 vma = find_vma(current->mm, start);
504 if (!vma)
505 return -ENOMEM;
506 if (start < vma->vm_start) {
507 /*
508 * This "vma" under revalidation is the one
509 * with the lowest vma->vm_start where start
510 * is also < vma->vm_end. If start <
511 * vma->vm_start it means an hole materialized
512 * in the user address space within the
Mike Rapoport230ca982017-07-10 15:49:02 -0700513 * virtual range passed to MADV_DONTNEED
514 * or MADV_FREE.
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800515 */
516 return -ENOMEM;
517 }
518 if (!can_madv_dontneed_vma(vma))
519 return -EINVAL;
520 if (end > vma->vm_end) {
521 /*
522 * Don't fail if end > vma->vm_end. If the old
523 * vma was splitted while the mmap_sem was
524 * released the effect of the concurrent
Mike Rapoport230ca982017-07-10 15:49:02 -0700525 * operation may not cause madvise() to
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800526 * have an undefined result. There may be an
527 * adjacent next vma that we'll walk
528 * next. userfaultfd_remove() will generate an
529 * UFFD_EVENT_REMOVE repetition on the
530 * end-vma->vm_end range, but the manager can
531 * handle a repetition fine.
532 */
533 end = vma->vm_end;
534 }
535 VM_WARN_ON(start >= end);
536 }
Mike Rapoport230ca982017-07-10 15:49:02 -0700537
538 if (behavior == MADV_DONTNEED)
539 return madvise_dontneed_single_vma(vma, start, end);
540 else if (behavior == MADV_FREE)
541 return madvise_free_single_vma(vma, start, end);
542 else
543 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544}
545
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800546/*
547 * Application wants to free up the pages and associated backing store.
548 * This is effectively punching a hole into the middle of a file.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800549 */
550static long madvise_remove(struct vm_area_struct *vma,
Nick Piggin00e9fa22007-03-16 13:38:10 -0800551 struct vm_area_struct **prev,
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800552 unsigned long start, unsigned long end)
553{
Hugh Dickins3f31d072012-05-29 15:06:40 -0700554 loff_t offset;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700555 int error;
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700556 struct file *f;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800557
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700558 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
Nick Piggin00e9fa22007-03-16 13:38:10 -0800559
Mike Kravetz72079ba2015-09-08 15:01:57 -0700560 if (vma->vm_flags & VM_LOCKED)
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800561 return -EINVAL;
562
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700563 f = vma->vm_file;
564
565 if (!f || !f->f_mapping || !f->f_mapping->host) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800566 return -EINVAL;
567 }
568
Hugh Dickins69cf0fa2006-04-17 22:46:32 +0100569 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
570 return -EACCES;
571
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800572 offset = (loff_t)(start - vma->vm_start)
573 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700574
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700575 /*
576 * Filesystem's fallocate may need to take i_mutex. We need to
577 * explicitly grab a reference because the vma (and hence the
578 * vma's reference to the file) can go away as soon as we drop
579 * mmap_sem.
580 */
581 get_file(f);
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800582 if (userfaultfd_remove(vma, start, end)) {
583 /* mmap_sem was not released by userfaultfd_remove() */
584 up_read(&current->mm->mmap_sem);
585 }
Anna Schumaker72c72bd2014-11-07 14:44:25 -0500586 error = vfs_fallocate(f,
Hugh Dickins3f31d072012-05-29 15:06:40 -0700587 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
588 offset, end - start);
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700589 fput(f);
Nick Piggin0a27a142007-05-06 14:49:53 -0700590 down_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700591 return error;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800592}
593
Andi Kleen9893e492009-09-16 11:50:17 +0200594#ifdef CONFIG_MEMORY_FAILURE
595/*
596 * Error injection support for memory error handling.
597 */
Anshuman Khandual97167a72017-05-03 14:55:25 -0700598static int madvise_inject_error(int behavior,
599 unsigned long start, unsigned long end)
Andi Kleen9893e492009-09-16 11:50:17 +0200600{
Anshuman Khandual97167a72017-05-03 14:55:25 -0700601 struct page *page;
Mel Gormanc461ad62017-08-31 16:15:30 -0700602 struct zone *zone;
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700603 unsigned int order;
Anshuman Khandual97167a72017-05-03 14:55:25 -0700604
Andi Kleen9893e492009-09-16 11:50:17 +0200605 if (!capable(CAP_SYS_ADMIN))
606 return -EPERM;
Anshuman Khandual97167a72017-05-03 14:55:25 -0700607
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700608
609 for (; start < end; start += PAGE_SIZE << order) {
Dan Williams23e7b5c2018-07-13 21:50:06 -0700610 unsigned long pfn;
Andrew Morton325c4ef2013-09-11 14:23:03 -0700611 int ret;
612
Anshuman Khandual97167a72017-05-03 14:55:25 -0700613 ret = get_user_pages_fast(start, 1, 0, &page);
Andi Kleen9893e492009-09-16 11:50:17 +0200614 if (ret != 1)
615 return ret;
Dan Williams23e7b5c2018-07-13 21:50:06 -0700616 pfn = page_to_pfn(page);
Andrew Morton325c4ef2013-09-11 14:23:03 -0700617
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700618 /*
619 * When soft offlining hugepages, after migrating the page
620 * we dissolve it, therefore in the second loop "page" will
621 * no longer be a compound page, and order will be 0.
622 */
623 order = compound_order(compound_head(page));
624
Anshuman Khandual97167a72017-05-03 14:55:25 -0700625 if (PageHWPoison(page)) {
626 put_page(page);
Wanpeng Li29b4eed2013-09-11 14:22:59 -0700627 continue;
628 }
Anshuman Khandual97167a72017-05-03 14:55:25 -0700629
630 if (behavior == MADV_SOFT_OFFLINE) {
631 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
Dan Williams23e7b5c2018-07-13 21:50:06 -0700632 pfn, start);
Anshuman Khandual97167a72017-05-03 14:55:25 -0700633
634 ret = soft_offline_page(page, MF_COUNT_INCREASED);
Andi Kleenafcf9382009-12-16 12:20:00 +0100635 if (ret)
Wanpeng Li83024232013-09-11 14:23:02 -0700636 return ret;
Andi Kleenafcf9382009-12-16 12:20:00 +0100637 continue;
638 }
Anshuman Khandual97167a72017-05-03 14:55:25 -0700639
Dan Williams23e7b5c2018-07-13 21:50:06 -0700640 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
641 pfn, start);
642
643 /*
644 * Drop the page reference taken by get_user_pages_fast(). In
645 * the absence of MF_COUNT_INCREASED the memory_failure()
646 * routine is responsible for pinning the page to prevent it
647 * from being released back to the page allocator.
648 */
649 put_page(page);
650 ret = memory_failure(pfn, 0);
Naoya Horiguchi23a003b2016-03-15 14:56:36 -0700651 if (ret)
652 return ret;
Andi Kleen9893e492009-09-16 11:50:17 +0200653 }
Mel Gormanc461ad62017-08-31 16:15:30 -0700654
655 /* Ensure that all poisoned pages are removed from per-cpu lists */
656 for_each_populated_zone(zone)
657 drain_all_pages(zone);
658
Andrew Morton325c4ef2013-09-11 14:23:03 -0700659 return 0;
Andi Kleen9893e492009-09-16 11:50:17 +0200660}
661#endif
662
suzuki165cd402005-07-27 11:43:59 -0700663static long
664madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
665 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 switch (behavior) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800668 case MADV_REMOVE:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700669 return madvise_remove(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 case MADV_WILLNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700671 return madvise_willneed(vma, prev, start, end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800672 case MADV_FREE:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 case MADV_DONTNEED:
Mike Rapoport230ca982017-07-10 15:49:02 -0700674 return madvise_dontneed_free(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 default:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700676 return madvise_behavior(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700680static bool
Nick Piggin75927af2009-06-16 15:32:38 -0700681madvise_behavior_valid(int behavior)
682{
683 switch (behavior) {
684 case MADV_DOFORK:
685 case MADV_DONTFORK:
686 case MADV_NORMAL:
687 case MADV_SEQUENTIAL:
688 case MADV_RANDOM:
689 case MADV_REMOVE:
690 case MADV_WILLNEED:
691 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800692 case MADV_FREE:
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700693#ifdef CONFIG_KSM
694 case MADV_MERGEABLE:
695 case MADV_UNMERGEABLE:
696#endif
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800697#ifdef CONFIG_TRANSPARENT_HUGEPAGE
698 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800699 case MADV_NOHUGEPAGE:
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800700#endif
Jason Baronaccb61f2012-03-23 15:02:51 -0700701 case MADV_DONTDUMP:
702 case MADV_DODUMP:
Rik van Rield2cd9ed2017-09-06 16:25:15 -0700703 case MADV_WIPEONFORK:
704 case MADV_KEEPONFORK:
Anshuman Khandual5e451be2017-05-03 14:55:28 -0700705#ifdef CONFIG_MEMORY_FAILURE
706 case MADV_SOFT_OFFLINE:
707 case MADV_HWPOISON:
708#endif
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700709 return true;
Nick Piggin75927af2009-06-16 15:32:38 -0700710
711 default:
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700712 return false;
Nick Piggin75927af2009-06-16 15:32:38 -0700713 }
714}
Hugh Dickins3866ea92009-09-21 17:01:52 -0700715
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716/*
717 * The madvise(2) system call.
718 *
719 * Applications can use madvise() to advise the kernel how it should
720 * handle paging I/O in this VM area. The idea is to help the kernel
721 * use appropriate read-ahead and caching techniques. The information
722 * provided is advisory only, and can be safely disregarded by the
723 * kernel without affecting the correct operation of the application.
724 *
725 * behavior values:
726 * MADV_NORMAL - the default behavior is to read clusters. This
727 * results in some read-ahead and read-behind.
728 * MADV_RANDOM - the system should read the minimum amount of data
729 * on any access, since it is unlikely that the appli-
730 * cation will need more than what it asks for.
731 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
732 * once, so they can be aggressively read ahead, and
733 * can be freed soon after they are accessed.
734 * MADV_WILLNEED - the application is notifying the system to read
735 * some pages ahead.
736 * MADV_DONTNEED - the application is finished with the given range,
737 * so the kernel can free resources associated with it.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700738 * MADV_FREE - the application marks pages in the given range as lazy free,
739 * where actual purges are postponed until memory pressure happens.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800740 * MADV_REMOVE - the application wants to free up the given range of
741 * pages and associated backing store.
Hugh Dickins3866ea92009-09-21 17:01:52 -0700742 * MADV_DONTFORK - omit this area from child's address space when forking:
743 * typically, to avoid COWing pages pinned by get_user_pages().
744 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
Yang Shic02c3002017-10-13 15:57:37 -0700745 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
746 * range after a fork.
747 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700748 * MADV_HWPOISON - trigger memory error handler as if the given memory range
749 * were corrupted by unrecoverable hardware memory failure.
750 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700751 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
752 * this area with pages of identical content from other such areas.
753 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700754 * MADV_HUGEPAGE - the application wants to back the given range by transparent
755 * huge pages in the future. Existing pages might be coalesced and
756 * new pages might be allocated as THP.
757 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
758 * transparent huge pages so the existing pages will not be
759 * coalesced into THP and new pages will not be allocated as THP.
760 * MADV_DONTDUMP - the application wants to prevent pages in the given range
761 * from being included in its core dump.
762 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 *
764 * return values:
765 * zero - success
766 * -EINVAL - start + len < 0, start is not page-aligned,
767 * "behavior" is not a valid value, or application
Yang Shic02c3002017-10-13 15:57:37 -0700768 * is attempting to release locked or shared pages,
769 * or the specified address range includes file, Huge TLB,
770 * MAP_SHARED or VMPFNMAP range.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 * -ENOMEM - addresses in the specified range are not currently
772 * mapped, or are outside the AS of the process.
773 * -EIO - an I/O error occurred while paging in data.
774 * -EBADF - map exists, but area maps something that isn't a file.
775 * -EAGAIN - a kernel resource was temporarily unavailable.
776 */
Heiko Carstens3480b252009-01-14 14:14:16 +0100777SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
Prasanna Meda05b74382005-06-21 17:14:37 -0700779 unsigned long end, tmp;
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700780 struct vm_area_struct *vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 int unmapped_error = 0;
782 int error = -EINVAL;
Jason Baronf7977792007-07-15 23:38:21 -0700783 int write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 size_t len;
Shaohua Li1998cc02013-02-22 16:32:31 -0800785 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Nick Piggin75927af2009-06-16 15:32:38 -0700787 if (!madvise_behavior_valid(behavior))
788 return error;
789
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700790 if (start & ~PAGE_MASK)
791 return error;
792 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
793
794 /* Check to see whether len was rounded up from small -ve to zero */
795 if (len_in && !len)
796 return error;
797
798 end = start + len;
799 if (end < start)
800 return error;
801
802 error = 0;
803 if (end == start)
804 return error;
805
Anshuman Khandual5e451be2017-05-03 14:55:28 -0700806#ifdef CONFIG_MEMORY_FAILURE
807 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
808 return madvise_inject_error(behavior, start, start + len_in);
809#endif
810
Jason Baronf7977792007-07-15 23:38:21 -0700811 write = madvise_need_mmap_write(behavior);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700812 if (write) {
813 if (down_write_killable(&current->mm->mmap_sem))
814 return -EINTR;
815 } else {
Nick Piggin0a27a142007-05-06 14:49:53 -0700816 down_read(&current->mm->mmap_sem);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700817 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 /*
820 * If the interval [start,end) covers some unmapped address
821 * ranges, just ignore them, but return -ENOMEM at the end.
Prasanna Meda05b74382005-06-21 17:14:37 -0700822 * - different from the way of handling in mlock etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 */
Prasanna Meda05b74382005-06-21 17:14:37 -0700824 vma = find_vma_prev(current->mm, start, &prev);
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700825 if (vma && start > vma->vm_start)
826 prev = vma;
827
Shaohua Li1998cc02013-02-22 16:32:31 -0800828 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 for (;;) {
830 /* Still start < end. */
831 error = -ENOMEM;
832 if (!vma)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700833 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Prasanna Meda05b74382005-06-21 17:14:37 -0700835 /* Here start < (end|vma->vm_end). */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 if (start < vma->vm_start) {
837 unmapped_error = -ENOMEM;
838 start = vma->vm_start;
Prasanna Meda05b74382005-06-21 17:14:37 -0700839 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700840 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
842
Prasanna Meda05b74382005-06-21 17:14:37 -0700843 /* Here vma->vm_start <= start < (end|vma->vm_end) */
844 tmp = vma->vm_end;
845 if (end < tmp)
846 tmp = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Prasanna Meda05b74382005-06-21 17:14:37 -0700848 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
849 error = madvise_vma(vma, &prev, start, tmp, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 if (error)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700851 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700852 start = tmp;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700853 if (prev && start < prev->vm_end)
Prasanna Meda05b74382005-06-21 17:14:37 -0700854 start = prev->vm_end;
855 error = unmapped_error;
856 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700857 goto out;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700858 if (prev)
859 vma = prev->vm_next;
860 else /* madvise_remove dropped mmap_sem */
861 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863out:
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700864 blk_finish_plug(&plug);
Jason Baronf7977792007-07-15 23:38:21 -0700865 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -0700866 up_write(&current->mm->mmap_sem);
867 else
868 up_read(&current->mm->mmap_sem);
869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 return error;
871}