blob: c859b117f8e55ddb0bffa709f45b748a95e1655a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * mm/mremap.c
4 *
5 * (C) Copyright 1996 Linus Torvalds
6 *
Alan Cox046c6882009-01-05 14:06:29 +00007 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
9 */
10
11#include <linux/mm.h>
12#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/shm.h>
Hugh Dickins1ff829952009-09-21 17:02:05 -070014#include <linux/ksm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mman.h>
16#include <linux/swap.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080017#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/fs.h>
Cyrill Gorcunov6dec97d2013-08-27 12:37:18 +040019#include <linux/swapops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/highmem.h>
21#include <linux/security.h>
22#include <linux/syscalls.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070023#include <linux/mmu_notifier.h>
Paul McQuade2581d202014-10-09 15:29:01 -070024#include <linux/uaccess.h>
Laurent Dufour4abad2c2015-06-24 16:56:19 -070025#include <linux/mm-arch-hooks.h>
Pavel Emelyanov72f87652017-02-22 15:42:34 -080026#include <linux/userfaultfd_k.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/cacheflush.h>
29#include <asm/tlbflush.h>
30
Rik van Rielba470de2008-10-18 20:26:50 -070031#include "internal.h"
32
Kalesh Singhdcceb192020-12-14 19:07:30 -080033static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
35 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030036 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39 pgd = pgd_offset(mm, addr);
40 if (pgd_none_or_clear_bad(pgd))
41 return NULL;
42
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030043 p4d = p4d_offset(pgd, addr);
44 if (p4d_none_or_clear_bad(p4d))
45 return NULL;
46
47 pud = pud_offset(p4d, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 if (pud_none_or_clear_bad(pud))
49 return NULL;
50
Kalesh Singhdcceb192020-12-14 19:07:30 -080051 return pud;
52}
53
54static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
55{
56 pud_t *pud;
57 pmd_t *pmd;
58
59 pud = get_old_pud(mm, addr);
60 if (!pud)
61 return NULL;
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 pmd = pmd_offset(pud, addr);
Andrea Arcangeli37a1c492011-10-31 17:08:30 -070064 if (pmd_none(*pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 return NULL;
66
Hugh Dickins7be7a542005-10-29 18:16:00 -070067 return pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
Kalesh Singhdcceb192020-12-14 19:07:30 -080070static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -080071 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030074 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76 pgd = pgd_offset(mm, addr);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030077 p4d = p4d_alloc(mm, pgd, addr);
78 if (!p4d)
79 return NULL;
Kalesh Singhdcceb192020-12-14 19:07:30 -080080
81 return pud_alloc(mm, p4d, addr);
82}
83
84static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
85 unsigned long addr)
86{
87 pud_t *pud;
88 pmd_t *pmd;
89
90 pud = alloc_new_pud(mm, vma, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 if (!pud)
Hugh Dickinsc74df322005-10-29 18:16:23 -070092 return NULL;
Hugh Dickins7be7a542005-10-29 18:16:00 -070093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 pmd = pmd_alloc(mm, pud, addr);
Hugh Dickins57a8f0c2013-10-16 13:47:09 -070095 if (!pmd)
Hugh Dickinsc74df322005-10-29 18:16:23 -070096 return NULL;
Hugh Dickins7be7a542005-10-29 18:16:00 -070097
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -080098 VM_BUG_ON(pmd_trans_huge(*pmd));
Hugh Dickinsc74df322005-10-29 18:16:23 -070099
Hugh Dickins7be7a542005-10-29 18:16:00 -0700100 return pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101}
102
Hugh Dickins1d069b72016-05-19 17:12:57 -0700103static void take_rmap_locks(struct vm_area_struct *vma)
104{
105 if (vma->vm_file)
106 i_mmap_lock_write(vma->vm_file->f_mapping);
107 if (vma->anon_vma)
108 anon_vma_lock_write(vma->anon_vma);
109}
110
111static void drop_rmap_locks(struct vm_area_struct *vma)
112{
113 if (vma->anon_vma)
114 anon_vma_unlock_write(vma->anon_vma);
115 if (vma->vm_file)
116 i_mmap_unlock_write(vma->vm_file->f_mapping);
117}
118
Cyrill Gorcunov6dec97d2013-08-27 12:37:18 +0400119static pte_t move_soft_dirty_pte(pte_t pte)
120{
121 /*
122 * Set soft dirty bit so we can notice
123 * in userspace the ptes were moved.
124 */
125#ifdef CONFIG_MEM_SOFT_DIRTY
126 if (pte_present(pte))
127 pte = pte_mksoft_dirty(pte);
128 else if (is_swap_pte(pte))
129 pte = pte_swp_mksoft_dirty(pte);
Cyrill Gorcunov6dec97d2013-08-27 12:37:18 +0400130#endif
131 return pte;
132}
133
Hugh Dickins7be7a542005-10-29 18:16:00 -0700134static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
135 unsigned long old_addr, unsigned long old_end,
136 struct vm_area_struct *new_vma, pmd_t *new_pmd,
Linus Torvaldseb66ae02018-10-12 15:22:59 -0700137 unsigned long new_addr, bool need_rmap_locks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 struct mm_struct *mm = vma->vm_mm;
Hugh Dickins7be7a542005-10-29 18:16:00 -0700140 pte_t *old_pte, *new_pte, pte;
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700141 spinlock_t *old_ptl, *new_ptl;
Aaron Lu5d190422016-11-10 17:16:33 +0800142 bool force_flush = false;
143 unsigned long len = old_end - old_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Michel Lespinasse38a76012012-10-08 16:31:50 -0700145 /*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800146 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
Michel Lespinasse38a76012012-10-08 16:31:50 -0700147 * locks to ensure that rmap will always observe either the old or the
148 * new ptes. This is the easiest way to avoid races with
149 * truncate_pagecache(), page migration, etc...
150 *
151 * When need_rmap_locks is false, we use other ways to avoid
152 * such races:
153 *
154 * - During exec() shift_arg_pages(), we use a specially tagged vma
Anshuman Khandual222100e2020-04-01 21:07:52 -0700155 * which rmap call sites look for using vma_is_temporary_stack().
Michel Lespinasse38a76012012-10-08 16:31:50 -0700156 *
157 * - During mremap(), new_vma is often known to be placed after vma
158 * in rmap traversal order. This ensures rmap will always observe
159 * either the old pte, or the new pte, or both (the page table locks
160 * serialize access to individual ptes, but only rmap traversal
161 * order guarantees that we won't miss both the old and new ptes).
162 */
Hugh Dickins1d069b72016-05-19 17:12:57 -0700163 if (need_rmap_locks)
164 take_rmap_locks(vma);
Hugh Dickins7be7a542005-10-29 18:16:00 -0700165
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700166 /*
167 * We don't have to worry about the ordering of src and dst
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700168 * pte locks because exclusive mmap_lock prevents deadlock.
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700169 */
Hugh Dickinsc74df322005-10-29 18:16:23 -0700170 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
Peter Zijlstraece0e2b2010-10-26 14:21:52 -0700171 new_pte = pte_offset_map(new_pmd, new_addr);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700172 new_ptl = pte_lockptr(mm, new_pmd);
173 if (new_ptl != old_ptl)
Ingo Molnarf20dc5f2006-07-03 00:25:08 -0700174 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
Mel Gorman3ea27712017-08-02 13:31:52 -0700175 flush_tlb_batched_pending(vma->vm_mm);
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700176 arch_enter_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Hugh Dickins7be7a542005-10-29 18:16:00 -0700178 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
179 new_pte++, new_addr += PAGE_SIZE) {
180 if (pte_none(*old_pte))
181 continue;
Aaron Lu5d190422016-11-10 17:16:33 +0800182
Andrea Arcangeli7b6efc22011-10-31 17:08:26 -0700183 pte = ptep_get_and_clear(mm, old_addr, old_pte);
Aaron Lua2ce2662016-11-29 13:27:31 +0800184 /*
Linus Torvaldseb66ae02018-10-12 15:22:59 -0700185 * If we are remapping a valid PTE, make sure
Aaron Lua2ce2662016-11-29 13:27:31 +0800186 * to flush TLB before we drop the PTL for the
Linus Torvaldseb66ae02018-10-12 15:22:59 -0700187 * PTE.
Aaron Lua2ce2662016-11-29 13:27:31 +0800188 *
Linus Torvaldseb66ae02018-10-12 15:22:59 -0700189 * NOTE! Both old and new PTL matter: the old one
190 * for racing with page_mkclean(), the new one to
191 * make sure the physical page stays valid until
192 * the TLB entry for the old mapping has been
193 * flushed.
Aaron Lua2ce2662016-11-29 13:27:31 +0800194 */
Linus Torvaldseb66ae02018-10-12 15:22:59 -0700195 if (pte_present(pte))
Aaron Lua2ce2662016-11-29 13:27:31 +0800196 force_flush = true;
Hugh Dickins7be7a542005-10-29 18:16:00 -0700197 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
Cyrill Gorcunov6dec97d2013-08-27 12:37:18 +0400198 pte = move_soft_dirty_pte(pte);
199 set_pte_at(mm, new_addr, new_pte, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 }
Hugh Dickins7be7a542005-10-29 18:16:00 -0700201
Zachary Amsden6606c3e2006-09-30 23:29:33 -0700202 arch_leave_lazy_mmu_mode();
Linus Torvaldseb66ae02018-10-12 15:22:59 -0700203 if (force_flush)
204 flush_tlb_range(vma, old_end - len, old_end);
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700205 if (new_ptl != old_ptl)
206 spin_unlock(new_ptl);
Peter Zijlstraece0e2b2010-10-26 14:21:52 -0700207 pte_unmap(new_pte - 1);
Hugh Dickinsc74df322005-10-29 18:16:23 -0700208 pte_unmap_unlock(old_pte - 1, old_ptl);
Hugh Dickins1d069b72016-05-19 17:12:57 -0700209 if (need_rmap_locks)
210 drop_rmap_locks(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
212
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800213#ifdef CONFIG_HAVE_MOVE_PMD
214static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
Wei Yangb8aa9d92020-08-06 23:23:40 -0700215 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800216{
217 spinlock_t *old_ptl, *new_ptl;
218 struct mm_struct *mm = vma->vm_mm;
219 pmd_t pmd;
220
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800221 /*
222 * The destination pmd shouldn't be established, free_pgtables()
Linus Torvaldsf81fdd02020-07-13 11:37:39 -0700223 * should have released it.
224 *
225 * However, there's a case during execve() where we use mremap
226 * to move the initial stack, and in that case the target area
227 * may overlap the source area (always moving down).
228 *
229 * If everything is PMD-aligned, that works fine, as moving
230 * each pmd down will clear the source pmd. But if we first
231 * have a few 4kB-only pages that get moved down, and then
232 * hit the "now the rest is PMD-aligned, let's do everything
233 * one pmd at a time", we will still have the old (now empty
234 * of any 4kB pages, but still there) PMD in the page table
235 * tree.
236 *
237 * Warn on it once - because we really should try to figure
238 * out how to do this better - but then say "I won't move
239 * this pmd".
240 *
241 * One alternative might be to just unmap the target pmd at
242 * this point, and verify that it really is empty. We'll see.
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800243 */
Linus Torvaldsf81fdd02020-07-13 11:37:39 -0700244 if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800245 return false;
246
247 /*
248 * We don't have to worry about the ordering of src and dst
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700249 * ptlocks because exclusive mmap_lock prevents deadlock.
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800250 */
251 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
252 new_ptl = pmd_lockptr(mm, new_pmd);
253 if (new_ptl != old_ptl)
254 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
255
256 /* Clear the pmd */
257 pmd = *old_pmd;
258 pmd_clear(old_pmd);
259
260 VM_BUG_ON(!pmd_none(*new_pmd));
261
262 /* Set the new pmd */
263 set_pmd_at(mm, new_addr, new_pmd, pmd);
264 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
265 if (new_ptl != old_ptl)
266 spin_unlock(new_ptl);
267 spin_unlock(old_ptl);
268
269 return true;
270}
Kalesh Singhdcceb192020-12-14 19:07:30 -0800271#else
272static inline bool move_normal_pmd(struct vm_area_struct *vma,
273 unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
274 pmd_t *new_pmd)
275{
276 return false;
277}
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800278#endif
279
Kalesh Singhdcceb192020-12-14 19:07:30 -0800280#ifdef CONFIG_HAVE_MOVE_PUD
281static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
282 unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
283{
284 spinlock_t *old_ptl, *new_ptl;
285 struct mm_struct *mm = vma->vm_mm;
286 pud_t pud;
287
288 /*
289 * The destination pud shouldn't be established, free_pgtables()
290 * should have released it.
291 */
292 if (WARN_ON_ONCE(!pud_none(*new_pud)))
293 return false;
294
295 /*
296 * We don't have to worry about the ordering of src and dst
297 * ptlocks because exclusive mmap_lock prevents deadlock.
298 */
299 old_ptl = pud_lock(vma->vm_mm, old_pud);
300 new_ptl = pud_lockptr(mm, new_pud);
301 if (new_ptl != old_ptl)
302 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
303
304 /* Clear the pud */
305 pud = *old_pud;
306 pud_clear(old_pud);
307
308 VM_BUG_ON(!pud_none(*new_pud));
309
310 /* Set the new pud */
311 set_pud_at(mm, new_addr, new_pud, pud);
312 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
313 if (new_ptl != old_ptl)
314 spin_unlock(new_ptl);
315 spin_unlock(old_ptl);
316
317 return true;
318}
319#else
320static inline bool move_normal_pud(struct vm_area_struct *vma,
321 unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
322 pud_t *new_pud)
323{
324 return false;
325}
326#endif
327
328enum pgt_entry {
329 NORMAL_PMD,
330 HPAGE_PMD,
331 NORMAL_PUD,
332};
333
334/*
335 * Returns an extent of the corresponding size for the pgt_entry specified if
336 * valid. Else returns a smaller extent bounded by the end of the source and
337 * destination pgt_entry.
338 */
Arnd Bergmann45b1eb72021-02-09 13:42:10 -0800339static __always_inline unsigned long get_extent(enum pgt_entry entry,
340 unsigned long old_addr, unsigned long old_end,
341 unsigned long new_addr)
Kalesh Singhdcceb192020-12-14 19:07:30 -0800342{
343 unsigned long next, extent, mask, size;
344
345 switch (entry) {
346 case HPAGE_PMD:
347 case NORMAL_PMD:
348 mask = PMD_MASK;
349 size = PMD_SIZE;
350 break;
351 case NORMAL_PUD:
352 mask = PUD_MASK;
353 size = PUD_SIZE;
354 break;
355 default:
356 BUILD_BUG();
357 break;
358 }
359
360 next = (old_addr + size) & mask;
361 /* even if next overflowed, extent below will be ok */
Kalesh Singh62098d32020-12-29 15:14:40 -0800362 extent = next - old_addr;
363 if (extent > old_end - old_addr)
364 extent = old_end - old_addr;
Kalesh Singhdcceb192020-12-14 19:07:30 -0800365 next = (new_addr + size) & mask;
366 if (extent > next - new_addr)
367 extent = next - new_addr;
368 return extent;
369}
370
371/*
372 * Attempts to speedup the move by moving entry at the level corresponding to
373 * pgt_entry. Returns true if the move was successful, else false.
374 */
375static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
376 unsigned long old_addr, unsigned long new_addr,
377 void *old_entry, void *new_entry, bool need_rmap_locks)
378{
379 bool moved = false;
380
381 /* See comment in move_ptes() */
382 if (need_rmap_locks)
383 take_rmap_locks(vma);
384
385 switch (entry) {
386 case NORMAL_PMD:
387 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
388 new_entry);
389 break;
390 case NORMAL_PUD:
391 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
392 new_entry);
393 break;
394 case HPAGE_PMD:
395 moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
396 move_huge_pmd(vma, old_addr, new_addr, old_entry,
397 new_entry);
398 break;
399 default:
400 WARN_ON_ONCE(1);
401 break;
402 }
403
404 if (need_rmap_locks)
405 drop_rmap_locks(vma);
406
407 return moved;
408}
409
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700410unsigned long move_page_tables(struct vm_area_struct *vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 unsigned long old_addr, struct vm_area_struct *new_vma,
Michel Lespinasse38a76012012-10-08 16:31:50 -0700412 unsigned long new_addr, unsigned long len,
413 bool need_rmap_locks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Kalesh Singhdcceb192020-12-14 19:07:30 -0800415 unsigned long extent, old_end;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800416 struct mmu_notifier_range range;
Hugh Dickins7be7a542005-10-29 18:16:00 -0700417 pmd_t *old_pmd, *new_pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Hugh Dickins7be7a542005-10-29 18:16:00 -0700419 old_end = old_addr + len;
420 flush_cache_range(vma, old_addr, old_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -0700422 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
423 old_addr, old_end);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800424 mmu_notifier_invalidate_range_start(&range);
Andrea Arcangeli7b6efc22011-10-31 17:08:26 -0700425
Hugh Dickins7be7a542005-10-29 18:16:00 -0700426 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 cond_resched();
Kalesh Singhdcceb192020-12-14 19:07:30 -0800428 /*
429 * If extent is PUD-sized try to speed up the move by moving at the
430 * PUD level if possible.
431 */
432 extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
433 if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
434 pud_t *old_pud, *new_pud;
435
436 old_pud = get_old_pud(vma->vm_mm, old_addr);
437 if (!old_pud)
438 continue;
439 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
440 if (!new_pud)
441 break;
442 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
Aneesh Kumar K.Ve37cc8a02021-07-07 18:10:15 -0700443 old_pud, new_pud, true))
Kalesh Singhdcceb192020-12-14 19:07:30 -0800444 continue;
445 }
446
447 extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
Hugh Dickins7be7a542005-10-29 18:16:00 -0700448 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
449 if (!old_pmd)
450 continue;
Andrea Arcangeli8ac1f832011-01-13 15:46:43 -0800451 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
Hugh Dickins7be7a542005-10-29 18:16:00 -0700452 if (!new_pmd)
453 break;
Kalesh Singhdcceb192020-12-14 19:07:30 -0800454 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
455 pmd_devmap(*old_pmd)) {
456 if (extent == HPAGE_PMD_SIZE &&
457 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
458 old_pmd, new_pmd, need_rmap_locks))
459 continue;
Kirill A. Shutemov4b471e82016-01-15 16:53:39 -0800460 split_huge_pmd(vma, old_pmd, old_addr);
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700461 if (pmd_trans_unstable(old_pmd))
Kirill A. Shutemov6b9116a2016-02-11 16:13:03 -0800462 continue;
Kalesh Singhdcceb192020-12-14 19:07:30 -0800463 } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
464 extent == PMD_SIZE) {
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800465 /*
466 * If the extent is PMD-sized, try to speed the move by
467 * moving at the PMD level if possible.
468 */
Kalesh Singhdcceb192020-12-14 19:07:30 -0800469 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
Aneesh Kumar K.Ve37cc8a02021-07-07 18:10:15 -0700470 old_pmd, new_pmd, true))
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800471 continue;
Andrea Arcangeli37a1c492011-10-31 17:08:30 -0700472 }
Joel Fernandes (Google)2c91bd42019-01-03 15:28:38 -0800473
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800474 if (pte_alloc(new_vma->vm_mm, new_pmd))
Andrea Arcangeli37a1c492011-10-31 17:08:30 -0700475 break;
Aaron Lu5d190422016-11-10 17:16:33 +0800476 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
Linus Torvaldseb66ae02018-10-12 15:22:59 -0700477 new_pmd, new_addr, need_rmap_locks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 }
Andrea Arcangeli7b6efc22011-10-31 17:08:26 -0700479
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800480 mmu_notifier_invalidate_range_end(&range);
Hugh Dickins7be7a542005-10-29 18:16:00 -0700481
482 return len + old_addr - old_end; /* how much done */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485static unsigned long move_vma(struct vm_area_struct *vma,
486 unsigned long old_addr, unsigned long old_len,
Pavel Emelyanov72f87652017-02-22 15:42:34 -0800487 unsigned long new_len, unsigned long new_addr,
Brian Geffone346b382020-04-01 21:09:17 -0700488 bool *locked, unsigned long flags,
489 struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490{
491 struct mm_struct *mm = vma->vm_mm;
492 struct vm_area_struct *new_vma;
493 unsigned long vm_flags = vma->vm_flags;
494 unsigned long new_pgoff;
495 unsigned long moved_len;
496 unsigned long excess = 0;
Hugh Dickins365e9c872005-10-29 18:16:18 -0700497 unsigned long hiwater_vm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 int split = 0;
Hugh Dickins7103ad32009-09-21 17:02:28 -0700499 int err;
Michel Lespinasse38a76012012-10-08 16:31:50 -0700500 bool need_rmap_locks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 /*
503 * We'd prefer to avoid failure later on in do_munmap:
504 * which may split one vma into three before unmapping.
505 */
506 if (mm->map_count >= sysctl_max_map_count - 3)
507 return -ENOMEM;
508
Hugh Dickins1ff829952009-09-21 17:02:05 -0700509 /*
510 * Advise KSM to break any KSM pages in the area to be moved:
511 * it would be confusing if they were to turn up at the new
512 * location, where they happen to coincide with different KSM
513 * pages recently unmapped. But leave vma->vm_flags as it was,
514 * so KSM can come around to merge on vma and new_vma afterwards.
515 */
Hugh Dickins7103ad32009-09-21 17:02:28 -0700516 err = ksm_madvise(vma, old_addr, old_addr + old_len,
517 MADV_UNMERGEABLE, &vm_flags);
518 if (err)
519 return err;
Hugh Dickins1ff829952009-09-21 17:02:05 -0700520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
Michel Lespinasse38a76012012-10-08 16:31:50 -0700522 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
523 &need_rmap_locks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 if (!new_vma)
525 return -ENOMEM;
526
Laurent Dufour05257562018-04-17 16:33:16 +0200527 /* new_vma is returned protected by copy_vma, to prevent speculative
528 * page fault to be done in the destination area before we move the pte.
529 * Now, we must also protect the source VMA since we don't want pages
530 * to be mapped in our back while we are copying the PTEs.
531 */
532 if (vma != new_vma)
Vinayak Menonc9201632021-01-15 19:52:40 +0530533 vm_write_begin(vma);
Laurent Dufour05257562018-04-17 16:33:16 +0200534
Michel Lespinasse38a76012012-10-08 16:31:50 -0700535 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
536 need_rmap_locks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 if (moved_len < old_len) {
Oleg Nesterovdf1eab32015-09-04 15:48:01 -0700538 err = -ENOMEM;
Oleg Nesterov5477e702015-09-04 15:48:04 -0700539 } else if (vma->vm_ops && vma->vm_ops->mremap) {
540 err = vma->vm_ops->mremap(new_vma);
Oleg Nesterovdf1eab32015-09-04 15:48:01 -0700541 }
542
543 if (unlikely(err)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /*
545 * On error, move entries back from new area to old,
546 * which will succeed since page tables still there,
547 * and then proceed to unmap new area instead of old.
548 */
Michel Lespinasse38a76012012-10-08 16:31:50 -0700549 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
550 true);
Laurent Dufour05257562018-04-17 16:33:16 +0200551 if (vma != new_vma)
Vinayak Menonc9201632021-01-15 19:52:40 +0530552 vm_write_end(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 vma = new_vma;
554 old_len = new_len;
555 old_addr = new_addr;
Oleg Nesterovdf1eab32015-09-04 15:48:01 -0700556 new_addr = err;
Laurent Dufour4abad2c2015-06-24 16:56:19 -0700557 } else {
Pavel Emelyanov72f87652017-02-22 15:42:34 -0800558 mremap_userfaultfd_prep(new_vma, uf);
Laurent Dufour4abad2c2015-06-24 16:56:19 -0700559 arch_remap(mm, old_addr, old_addr + old_len,
560 new_addr, new_addr + new_len);
Laurent Dufour05257562018-04-17 16:33:16 +0200561 if (vma != new_vma)
Vinayak Menonc9201632021-01-15 19:52:40 +0530562 vm_write_end(vma);
Al Virob2edffd2015-04-06 17:48:54 -0400563 }
Vinayak Menonc9201632021-01-15 19:52:40 +0530564 vm_write_end(new_vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 /* Conceal VM_ACCOUNT so old reservation is not undone */
567 if (vm_flags & VM_ACCOUNT) {
568 vma->vm_flags &= ~VM_ACCOUNT;
569 excess = vma->vm_end - vma->vm_start - old_len;
570 if (old_addr > vma->vm_start &&
571 old_addr + old_len < vma->vm_end)
572 split = 1;
573 }
574
Kirill Korotaev71799062005-05-16 21:53:18 -0700575 /*
Hugh Dickins365e9c872005-10-29 18:16:18 -0700576 * If we failed to move page tables we still do total_vm increment
577 * since do_munmap() will decrement it by old_len == new_len.
578 *
579 * Since total_vm is about to be raised artificially high for a
580 * moment, we need to restore high watermark afterwards: if stats
581 * are taken meanwhile, total_vm and hiwater_vm appear too high.
582 * If this were a serious issue, we'd add a flag to do_munmap().
Kirill Korotaev71799062005-05-16 21:53:18 -0700583 */
Hugh Dickins365e9c872005-10-29 18:16:18 -0700584 hiwater_vm = mm->hiwater_vm;
Konstantin Khlebnikov84638332016-01-14 15:22:07 -0800585 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
Kirill Korotaev71799062005-05-16 21:53:18 -0700586
Toshi Kanid9fe4fa2015-12-22 17:54:23 -0700587 /* Tell pfnmap has moved from this vma */
588 if (unlikely(vma->vm_flags & VM_PFNMAP))
589 untrack_pfn_moved(vma);
590
Brian Geffone346b382020-04-01 21:09:17 -0700591 if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
592 if (vm_flags & VM_ACCOUNT) {
593 /* Always put back VM_ACCOUNT since we won't unmap */
594 vma->vm_flags |= VM_ACCOUNT;
595
Brian Geffondadbd852020-04-17 10:25:56 -0700596 vm_acct_memory(new_len >> PAGE_SHIFT);
Brian Geffone346b382020-04-01 21:09:17 -0700597 }
598
Brian Geffondadbd852020-04-17 10:25:56 -0700599 /*
600 * VMAs can actually be merged back together in copy_vma
601 * calling merge_vma. This can happen with anonymous vmas
602 * which have not yet been faulted, so if we were to consider
603 * this VMA split we'll end up adding VM_ACCOUNT on the
604 * next VMA, which is completely unrelated if this VMA
605 * was re-merged.
606 */
607 if (split && new_vma == vma)
608 split = 0;
609
Brian Geffone346b382020-04-01 21:09:17 -0700610 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
611 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
612
613 /* Because we won't unmap we don't need to touch locked_vm */
614 goto out;
615 }
616
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800617 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 /* OOM: unable to split vma, just get accounts right */
619 vm_unacct_memory(excess >> PAGE_SHIFT);
620 excess = 0;
621 }
Brian Geffone346b382020-04-01 21:09:17 -0700622
623 if (vm_flags & VM_LOCKED) {
624 mm->locked_vm += new_len >> PAGE_SHIFT;
625 *locked = true;
626 }
627out:
Hugh Dickins365e9c872005-10-29 18:16:18 -0700628 mm->hiwater_vm = hiwater_vm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
630 /* Restore VM_ACCOUNT if one or two pieces of vma left */
631 if (excess) {
632 vma->vm_flags |= VM_ACCOUNT;
633 if (split)
634 vma->vm_next->vm_flags |= VM_ACCOUNT;
635 }
636
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 return new_addr;
638}
639
Al Viro54f5de72009-11-24 07:17:46 -0500640static struct vm_area_struct *vma_to_resize(unsigned long addr,
Brian Geffone346b382020-04-01 21:09:17 -0700641 unsigned long old_len, unsigned long new_len, unsigned long flags,
642 unsigned long *p)
Al Viro54f5de72009-11-24 07:17:46 -0500643{
644 struct mm_struct *mm = current->mm;
645 struct vm_area_struct *vma = find_vma(mm, addr);
Oleg Nesterov1d391682015-09-04 15:48:10 -0700646 unsigned long pgoff;
Al Viro54f5de72009-11-24 07:17:46 -0500647
648 if (!vma || vma->vm_start > addr)
Derek6cd576132015-04-15 16:14:02 -0700649 return ERR_PTR(-EFAULT);
Al Viro54f5de72009-11-24 07:17:46 -0500650
Mike Kravetzdba58d32017-09-06 16:20:55 -0700651 /*
652 * !old_len is a special case where an attempt is made to 'duplicate'
653 * a mapping. This makes no sense for private mappings as it will
654 * instead create a fresh/new mapping unrelated to the original. This
655 * is contrary to the basic idea of mremap which creates new mappings
656 * based on the original. There are no known use cases for this
657 * behavior. As a result, fail such attempts.
658 */
659 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
660 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
661 return ERR_PTR(-EINVAL);
662 }
663
Brian Geffon1d695cc2021-03-23 11:25:17 -0700664 if ((flags & MREMAP_DONTUNMAP) &&
665 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
Brian Geffone346b382020-04-01 21:09:17 -0700666 return ERR_PTR(-EINVAL);
667
Al Viro54f5de72009-11-24 07:17:46 -0500668 if (is_vm_hugetlb_page(vma))
Derek6cd576132015-04-15 16:14:02 -0700669 return ERR_PTR(-EINVAL);
Al Viro54f5de72009-11-24 07:17:46 -0500670
671 /* We can't remap across vm area boundaries */
672 if (old_len > vma->vm_end - addr)
Derek6cd576132015-04-15 16:14:02 -0700673 return ERR_PTR(-EFAULT);
Al Viro54f5de72009-11-24 07:17:46 -0500674
Oleg Nesterov1d391682015-09-04 15:48:10 -0700675 if (new_len == old_len)
676 return vma;
Linus Torvalds982134b2011-04-07 07:35:50 -0700677
Oleg Nesterov1d391682015-09-04 15:48:10 -0700678 /* Need to be careful about a growing mapping */
679 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
680 pgoff += vma->vm_pgoff;
681 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
682 return ERR_PTR(-EINVAL);
683
684 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
685 return ERR_PTR(-EFAULT);
Al Viro54f5de72009-11-24 07:17:46 -0500686
687 if (vma->vm_flags & VM_LOCKED) {
688 unsigned long locked, lock_limit;
689 locked = mm->locked_vm << PAGE_SHIFT;
Jiri Slaby59e99e52010-03-05 13:41:44 -0800690 lock_limit = rlimit(RLIMIT_MEMLOCK);
Al Viro54f5de72009-11-24 07:17:46 -0500691 locked += new_len - old_len;
692 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
Derek6cd576132015-04-15 16:14:02 -0700693 return ERR_PTR(-EAGAIN);
Al Viro54f5de72009-11-24 07:17:46 -0500694 }
695
Konstantin Khlebnikov84638332016-01-14 15:22:07 -0800696 if (!may_expand_vm(mm, vma->vm_flags,
697 (new_len - old_len) >> PAGE_SHIFT))
Derek6cd576132015-04-15 16:14:02 -0700698 return ERR_PTR(-ENOMEM);
Al Viro54f5de72009-11-24 07:17:46 -0500699
700 if (vma->vm_flags & VM_ACCOUNT) {
701 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
Al Viro191c5422012-02-13 03:58:52 +0000702 if (security_vm_enough_memory_mm(mm, charged))
Derek6cd576132015-04-15 16:14:02 -0700703 return ERR_PTR(-ENOMEM);
Al Viro54f5de72009-11-24 07:17:46 -0500704 *p = charged;
705 }
706
707 return vma;
Al Viro54f5de72009-11-24 07:17:46 -0500708}
709
Michel Lespinasse81909b82013-02-22 16:32:41 -0800710static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
Pavel Emelyanov72f87652017-02-22 15:42:34 -0800711 unsigned long new_addr, unsigned long new_len, bool *locked,
Brian Geffone346b382020-04-01 21:09:17 -0700712 unsigned long flags, struct vm_userfaultfd_ctx *uf,
Mike Rapoportb2282372017-08-02 13:31:55 -0700713 struct list_head *uf_unmap_early,
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800714 struct list_head *uf_unmap)
Al Viroecc1a892009-11-24 07:28:07 -0500715{
716 struct mm_struct *mm = current->mm;
717 struct vm_area_struct *vma;
718 unsigned long ret = -EINVAL;
719 unsigned long charged = 0;
Brian Geffone346b382020-04-01 21:09:17 -0700720 unsigned long map_flags = 0;
Al Viroecc1a892009-11-24 07:28:07 -0500721
Alexander Kuleshovf19cb112015-11-05 18:46:57 -0800722 if (offset_in_page(new_addr))
Al Viroecc1a892009-11-24 07:28:07 -0500723 goto out;
724
725 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
726 goto out;
727
Oleg Nesterov99432422015-09-04 15:48:13 -0700728 /* Ensure the old/new locations do not overlap */
729 if (addr + old_len > new_addr && new_addr + new_len > addr)
Al Viroecc1a892009-11-24 07:28:07 -0500730 goto out;
731
Oscar Salvadorea2c3f62019-03-05 15:50:14 -0800732 /*
733 * move_vma() need us to stay 4 maps below the threshold, otherwise
734 * it will bail out at the very beginning.
735 * That is a problem if we have already unmaped the regions here
736 * (new_addr, and old_addr), because userspace will not know the
737 * state of the vma's after it gets -ENOMEM.
738 * So, to avoid such scenario we can pre-compute if the whole
739 * operation has high chances to success map-wise.
740 * Worst-scenario case is when both vma's (new_addr and old_addr) get
741 * split in 3 before unmaping it.
742 * That means 2 more maps (1 for each) to the ones we already hold.
743 * Check whether current map count plus 2 still leads us to 4 maps below
744 * the threshold, otherwise return -ENOMEM here to be more safe.
745 */
746 if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
747 return -ENOMEM;
748
Brian Geffone346b382020-04-01 21:09:17 -0700749 if (flags & MREMAP_FIXED) {
750 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
751 if (ret)
752 goto out;
753 }
Al Viroecc1a892009-11-24 07:28:07 -0500754
755 if (old_len >= new_len) {
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800756 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
Al Viroecc1a892009-11-24 07:28:07 -0500757 if (ret && old_len != new_len)
758 goto out;
759 old_len = new_len;
760 }
761
Brian Geffone346b382020-04-01 21:09:17 -0700762 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
Al Viroecc1a892009-11-24 07:28:07 -0500763 if (IS_ERR(vma)) {
764 ret = PTR_ERR(vma);
765 goto out;
766 }
767
Brian Geffone346b382020-04-01 21:09:17 -0700768 /* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
769 if (flags & MREMAP_DONTUNMAP &&
770 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
771 ret = -ENOMEM;
772 goto out;
773 }
774
775 if (flags & MREMAP_FIXED)
776 map_flags |= MAP_FIXED;
777
Al Viro097eed12009-11-24 08:43:52 -0500778 if (vma->vm_flags & VM_MAYSHARE)
779 map_flags |= MAP_SHARED;
Al Viro9206de92009-12-03 15:23:11 -0500780
Al Viro097eed12009-11-24 08:43:52 -0500781 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
782 ((addr - vma->vm_start) >> PAGE_SHIFT),
783 map_flags);
Gaowei Puff68dac2019-11-30 17:51:03 -0800784 if (IS_ERR_VALUE(ret))
Al Viro097eed12009-11-24 08:43:52 -0500785 goto out1;
786
Brian Geffone346b382020-04-01 21:09:17 -0700787 /* We got a new mapping */
788 if (!(flags & MREMAP_FIXED))
789 new_addr = ret;
790
791 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800792 uf_unmap);
Brian Geffone346b382020-04-01 21:09:17 -0700793
Alexander Kuleshovf19cb112015-11-05 18:46:57 -0800794 if (!(offset_in_page(ret)))
Al Viro097eed12009-11-24 08:43:52 -0500795 goto out;
Brian Geffone346b382020-04-01 21:09:17 -0700796
Al Viro097eed12009-11-24 08:43:52 -0500797out1:
798 vm_unacct_memory(charged);
Al Viroecc1a892009-11-24 07:28:07 -0500799
800out:
801 return ret;
802}
803
Al Viro1a0ef852009-11-24 07:43:18 -0500804static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
805{
Al Virof106af42009-11-24 08:25:18 -0500806 unsigned long end = vma->vm_end + delta;
Al Viro9206de92009-12-03 15:23:11 -0500807 if (end < vma->vm_end) /* overflow */
Al Viro1a0ef852009-11-24 07:43:18 -0500808 return 0;
Al Viro9206de92009-12-03 15:23:11 -0500809 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
Al Virof106af42009-11-24 08:25:18 -0500810 return 0;
811 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
812 0, MAP_FIXED) & ~PAGE_MASK)
813 return 0;
Al Viro1a0ef852009-11-24 07:43:18 -0500814 return 1;
815}
816
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817/*
818 * Expand (or shrink) an existing mapping, potentially moving it at the
819 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
820 *
821 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
822 * This option implies MREMAP_MAYMOVE.
823 */
Al Viro63a81db2012-05-30 11:32:04 -0400824SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
825 unsigned long, new_len, unsigned long, flags,
826 unsigned long, new_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
Hugh Dickinsd0de32d2005-10-29 18:16:16 -0700828 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 struct vm_area_struct *vma;
830 unsigned long ret = -EINVAL;
831 unsigned long charged = 0;
Michel Lespinasse81909b82013-02-22 16:32:41 -0800832 bool locked = false;
Yang Shi85a06832018-10-26 15:08:50 -0700833 bool downgraded = false;
Pavel Emelyanov72f87652017-02-22 15:42:34 -0800834 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
Mike Rapoportb2282372017-08-02 13:31:55 -0700835 LIST_HEAD(uf_unmap_early);
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800836 LIST_HEAD(uf_unmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Will Deaconb2a84de2020-03-25 11:13:46 +0000838 /*
839 * There is a deliberate asymmetry here: we strip the pointer tag
840 * from the old address but leave the new address alone. This is
841 * for consistency with mmap(), where we prevent the creation of
842 * aliasing mappings in userspace by leaving the tag bits of the
843 * mapping address intact. A non-zero tag will cause the subsequent
844 * range checks to reject the address as invalid.
845 *
846 * See Documentation/arm64/tagged-address-abi.rst for more information.
847 */
Andrey Konovalov057d33892019-09-25 16:48:30 -0700848 addr = untagged_addr(addr);
849
Brian Geffone346b382020-04-01 21:09:17 -0700850 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
Rasmus Villemoes9a2458a2013-07-08 15:59:48 -0700851 return ret;
852
853 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
854 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Brian Geffone346b382020-04-01 21:09:17 -0700856 /*
857 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
858 * in the process.
859 */
860 if (flags & MREMAP_DONTUNMAP &&
861 (!(flags & MREMAP_MAYMOVE) || old_len != new_len))
862 return ret;
863
864
Alexander Kuleshovf19cb112015-11-05 18:46:57 -0800865 if (offset_in_page(addr))
Rasmus Villemoes9a2458a2013-07-08 15:59:48 -0700866 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 old_len = PAGE_ALIGN(old_len);
869 new_len = PAGE_ALIGN(new_len);
870
871 /*
872 * We allow a zero old-len as a special case
873 * for DOS-emu "duplicate shm area" thing. But
874 * a zero new-len is nonsensical.
875 */
876 if (!new_len)
Rasmus Villemoes9a2458a2013-07-08 15:59:48 -0700877 return ret;
878
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700879 if (mmap_write_lock_killable(current->mm))
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700880 return -EINTR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Brian Geffone346b382020-04-01 21:09:17 -0700882 if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
Rasmus Villemoes9a2458a2013-07-08 15:59:48 -0700883 ret = mremap_to(addr, old_len, new_addr, new_len,
Brian Geffone346b382020-04-01 21:09:17 -0700884 &locked, flags, &uf, &uf_unmap_early,
885 &uf_unmap);
Al Viroecc1a892009-11-24 07:28:07 -0500886 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 }
888
889 /*
890 * Always allow a shrinking remap: that just unmaps
891 * the unnecessary pages..
Yang Shi85a06832018-10-26 15:08:50 -0700892 * __do_munmap does all the needed commit accounting, and
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700893 * downgrades mmap_lock to read if so directed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 */
895 if (old_len >= new_len) {
Yang Shi85a06832018-10-26 15:08:50 -0700896 int retval;
897
898 retval = __do_munmap(mm, addr+new_len, old_len - new_len,
899 &uf_unmap, true);
900 if (retval < 0 && old_len != new_len) {
901 ret = retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 goto out;
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700903 /* Returning 1 indicates mmap_lock is downgraded to read. */
Yang Shi85a06832018-10-26 15:08:50 -0700904 } else if (retval == 1)
905 downgraded = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 ret = addr;
Al Viroecc1a892009-11-24 07:28:07 -0500907 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 }
909
910 /*
Al Viroecc1a892009-11-24 07:28:07 -0500911 * Ok, we need to grow..
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 */
Brian Geffone346b382020-04-01 21:09:17 -0700913 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
Al Viro54f5de72009-11-24 07:17:46 -0500914 if (IS_ERR(vma)) {
915 ret = PTR_ERR(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 }
918
919 /* old_len exactly to the end of the area..
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 */
Al Viroecc1a892009-11-24 07:28:07 -0500921 if (old_len == vma->vm_end - addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 /* can we just expand the current mapping? */
Al Viro1a0ef852009-11-24 07:43:18 -0500923 if (vma_expandable(vma, new_len - old_len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 int pages = (new_len - old_len) >> PAGE_SHIFT;
925
Rik van Riel5beb4932010-03-05 13:42:07 -0800926 if (vma_adjust(vma, vma->vm_start, addr + new_len,
927 vma->vm_pgoff, NULL)) {
928 ret = -ENOMEM;
929 goto out;
930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
Konstantin Khlebnikov84638332016-01-14 15:22:07 -0800932 vm_stat_account(mm, vma->vm_flags, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 if (vma->vm_flags & VM_LOCKED) {
Hugh Dickinsd0de32d2005-10-29 18:16:16 -0700934 mm->locked_vm += pages;
Michel Lespinasse81909b82013-02-22 16:32:41 -0800935 locked = true;
936 new_addr = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 }
938 ret = addr;
939 goto out;
940 }
941 }
942
943 /*
944 * We weren't able to just expand or shrink the area,
945 * we need to create a new one and move it..
946 */
947 ret = -ENOMEM;
948 if (flags & MREMAP_MAYMOVE) {
Al Viroecc1a892009-11-24 07:28:07 -0500949 unsigned long map_flags = 0;
950 if (vma->vm_flags & VM_MAYSHARE)
951 map_flags |= MAP_SHARED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Al Viroecc1a892009-11-24 07:28:07 -0500953 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
Al Viro93587412009-11-24 08:45:24 -0500954 vma->vm_pgoff +
955 ((addr - vma->vm_start) >> PAGE_SHIFT),
956 map_flags);
Gaowei Puff68dac2019-11-30 17:51:03 -0800957 if (IS_ERR_VALUE(new_addr)) {
Al Viroecc1a892009-11-24 07:28:07 -0500958 ret = new_addr;
959 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 }
Al Viroecc1a892009-11-24 07:28:07 -0500961
Pavel Emelyanov72f87652017-02-22 15:42:34 -0800962 ret = move_vma(vma, addr, old_len, new_len, new_addr,
Brian Geffone346b382020-04-01 21:09:17 -0700963 &locked, flags, &uf, &uf_unmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 }
965out:
Alexander Kuleshovf19cb112015-11-05 18:46:57 -0800966 if (offset_in_page(ret)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 vm_unacct_memory(charged);
Zou Weifa1f68c2020-06-04 16:49:46 -0700968 locked = false;
Oleg Nesterovd456fb92015-09-04 15:48:07 -0700969 }
Yang Shi85a06832018-10-26 15:08:50 -0700970 if (downgraded)
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700971 mmap_read_unlock(current->mm);
Yang Shi85a06832018-10-26 15:08:50 -0700972 else
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700973 mmap_write_unlock(current->mm);
Michel Lespinasse81909b82013-02-22 16:32:41 -0800974 if (locked && new_len > old_len)
975 mm_populate(new_addr + old_len, new_len - old_len);
Mike Rapoportb2282372017-08-02 13:31:55 -0700976 userfaultfd_unmap_complete(mm, &uf_unmap_early);
Brian Geffond15649262020-05-13 17:50:44 -0700977 mremap_userfaultfd_complete(&uf, addr, ret, old_len);
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800978 userfaultfd_unmap_complete(mm, &uf_unmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 return ret;
980}