blob: 0780c2a57ff11ba42d4a2974367ddc862578efcd [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrea Arcangelic1a4de92015-09-04 15:47:04 -07002/*
3 * mm/userfaultfd.c
4 *
5 * Copyright (C) 2015 Red Hat, Inc.
Andrea Arcangelic1a4de92015-09-04 15:47:04 -07006 */
7
8#include <linux/mm.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +01009#include <linux/sched/signal.h>
Andrea Arcangelic1a4de92015-09-04 15:47:04 -070010#include <linux/pagemap.h>
11#include <linux/rmap.h>
12#include <linux/swap.h>
13#include <linux/swapops.h>
14#include <linux/userfaultfd_k.h>
15#include <linux/mmu_notifier.h>
Mike Kravetz60d4d2d2017-02-22 15:42:55 -080016#include <linux/hugetlb.h>
Mike Rapoport26071ce2017-02-22 15:43:34 -080017#include <linux/shmem_fs.h>
Andrea Arcangelic1a4de92015-09-04 15:47:04 -070018#include <asm/tlbflush.h>
19#include "internal.h"
20
Wei Yang643aa362019-11-30 17:57:55 -080021static __always_inline
22struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
23 unsigned long dst_start,
24 unsigned long len)
25{
26 /*
27 * Make sure that the dst range is both valid and fully within a
28 * single existing vma.
29 */
30 struct vm_area_struct *dst_vma;
31
32 dst_vma = find_vma(dst_mm, dst_start);
33 if (!dst_vma)
34 return NULL;
35
36 if (dst_start < dst_vma->vm_start ||
37 dst_start + len > dst_vma->vm_end)
38 return NULL;
39
40 /*
41 * Check the vma is registered in uffd, this is required to
42 * enforce the VM_MAYWRITE check done at uffd registration
43 * time.
44 */
45 if (!dst_vma->vm_userfaultfd_ctx.ctx)
46 return NULL;
47
48 return dst_vma;
49}
50
Axel Rasmussen15313252021-06-30 18:49:24 -070051/*
52 * Install PTEs, to map dst_addr (within dst_vma) to page.
53 *
Axel Rasmussen7d64ae32021-06-30 18:49:31 -070054 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
55 * and anon, and for both shared and private VMAs.
Axel Rasmussen15313252021-06-30 18:49:24 -070056 */
Axel Rasmussen7d64ae32021-06-30 18:49:31 -070057int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
58 struct vm_area_struct *dst_vma,
59 unsigned long dst_addr, struct page *page,
60 bool newly_allocated, bool wp_copy)
Axel Rasmussen15313252021-06-30 18:49:24 -070061{
62 int ret;
63 pte_t _dst_pte, *dst_pte;
64 bool writable = dst_vma->vm_flags & VM_WRITE;
65 bool vm_shared = dst_vma->vm_flags & VM_SHARED;
66 bool page_in_cache = page->mapping;
67 spinlock_t *ptl;
68 struct inode *inode;
69 pgoff_t offset, max_off;
70
71 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
Peter Xu9ae0f872021-11-05 13:38:24 -070072 _dst_pte = pte_mkdirty(_dst_pte);
Axel Rasmussen15313252021-06-30 18:49:24 -070073 if (page_in_cache && !vm_shared)
74 writable = false;
Axel Rasmussen15313252021-06-30 18:49:24 -070075 if (writable) {
76 if (wp_copy)
77 _dst_pte = pte_mkuffd_wp(_dst_pte);
78 else
79 _dst_pte = pte_mkwrite(_dst_pte);
80 }
81
82 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
83
84 if (vma_is_shmem(dst_vma)) {
85 /* serialize against truncate with the page table lock */
86 inode = dst_vma->vm_file->f_inode;
87 offset = linear_page_index(dst_vma, dst_addr);
88 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
89 ret = -EFAULT;
90 if (unlikely(offset >= max_off))
91 goto out_unlock;
92 }
93
94 ret = -EEXIST;
95 if (!pte_none(*dst_pte))
96 goto out_unlock;
97
98 if (page_in_cache)
99 page_add_file_rmap(page, false);
100 else
101 page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
102
103 /*
104 * Must happen after rmap, as mm_counter() checks mapping (via
105 * PageAnon()), which is set by __page_set_anon_rmap().
106 */
107 inc_mm_counter(dst_mm, mm_counter(page));
108
109 if (newly_allocated)
110 lru_cache_add_inactive_or_unevictable(page, dst_vma);
111
112 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
113
114 /* No need to invalidate - it was non-present before */
115 update_mmu_cache(dst_vma, dst_addr, dst_pte);
116 ret = 0;
117out_unlock:
118 pte_unmap_unlock(dst_pte, ptl);
119 return ret;
120}
121
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700122static int mcopy_atomic_pte(struct mm_struct *dst_mm,
123 pmd_t *dst_pmd,
124 struct vm_area_struct *dst_vma,
125 unsigned long dst_addr,
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700126 unsigned long src_addr,
Andrea Arcangeli72981e02020-04-06 20:05:41 -0700127 struct page **pagep,
128 bool wp_copy)
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700129{
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700130 void *page_kaddr;
131 int ret;
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700132 struct page *page;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700133
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700134 if (!*pagep) {
135 ret = -ENOMEM;
136 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
137 if (!page)
138 goto out;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700139
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700140 page_kaddr = kmap_atomic(page);
141 ret = copy_from_user(page_kaddr,
142 (const void __user *) src_addr,
143 PAGE_SIZE);
144 kunmap_atomic(page_kaddr);
145
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700146 /* fallback to copy_from_user outside mmap_lock */
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700147 if (unlikely(ret)) {
Andrea Arcangeli9e368252018-11-30 14:09:25 -0800148 ret = -ENOENT;
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700149 *pagep = page;
150 /* don't free the page */
151 goto out;
152 }
153 } else {
154 page = *pagep;
155 *pagep = NULL;
156 }
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700157
158 /*
159 * The memory barrier inside __SetPageUptodate makes sure that
Wei Yangf4f53292019-11-30 17:58:17 -0800160 * preceding stores to the page contents become visible before
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700161 * the set_pte_at() write.
162 */
163 __SetPageUptodate(page);
164
165 ret = -ENOMEM;
Matthew Wilcox (Oracle)8f425e42021-06-25 09:27:04 -0400166 if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL))
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700167 goto out_release;
168
Axel Rasmussen15313252021-06-30 18:49:24 -0700169 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
170 page, true, wp_copy);
171 if (ret)
172 goto out_release;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700173out:
174 return ret;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700175out_release:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300176 put_page(page);
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700177 goto out;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700178}
179
180static int mfill_zeropage_pte(struct mm_struct *dst_mm,
181 pmd_t *dst_pmd,
182 struct vm_area_struct *dst_vma,
183 unsigned long dst_addr)
184{
185 pte_t _dst_pte, *dst_pte;
186 spinlock_t *ptl;
187 int ret;
Andrea Arcangelie2a50c12018-11-30 14:09:37 -0800188 pgoff_t offset, max_off;
189 struct inode *inode;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700190
191 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
192 dst_vma->vm_page_prot));
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700193 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
Andrea Arcangelie2a50c12018-11-30 14:09:37 -0800194 if (dst_vma->vm_file) {
195 /* the shmem MAP_PRIVATE case requires checking the i_size */
196 inode = dst_vma->vm_file->f_inode;
197 offset = linear_page_index(dst_vma, dst_addr);
198 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
199 ret = -EFAULT;
200 if (unlikely(offset >= max_off))
201 goto out_unlock;
202 }
203 ret = -EEXIST;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700204 if (!pte_none(*dst_pte))
205 goto out_unlock;
206 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
207 /* No need to invalidate - it was non-present before */
208 update_mmu_cache(dst_vma, dst_addr, dst_pte);
209 ret = 0;
210out_unlock:
211 pte_unmap_unlock(dst_pte, ptl);
212 return ret;
213}
214
Axel Rasmussen15313252021-06-30 18:49:24 -0700215/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
216static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
217 pmd_t *dst_pmd,
218 struct vm_area_struct *dst_vma,
219 unsigned long dst_addr,
220 bool wp_copy)
221{
222 struct inode *inode = file_inode(dst_vma->vm_file);
223 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
224 struct page *page;
225 int ret;
226
227 ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
228 if (ret)
229 goto out;
230 if (!page) {
231 ret = -EFAULT;
232 goto out;
233 }
234
Yang Shia7605422022-01-14 14:05:19 -0800235 if (PageHWPoison(page)) {
236 ret = -EIO;
237 goto out_release;
238 }
239
Axel Rasmussen15313252021-06-30 18:49:24 -0700240 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
241 page, false, wp_copy);
242 if (ret)
243 goto out_release;
244
245 unlock_page(page);
246 ret = 0;
247out:
248 return ret;
249out_release:
250 unlock_page(page);
251 put_page(page);
252 goto out;
253}
254
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700255static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
256{
257 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300258 p4d_t *p4d;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700259 pud_t *pud;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700260
261 pgd = pgd_offset(mm, address);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300262 p4d = p4d_alloc(mm, pgd, address);
263 if (!p4d)
264 return NULL;
265 pud = pud_alloc(mm, p4d, address);
266 if (!pud)
267 return NULL;
268 /*
269 * Note that we didn't run this because the pmd was
270 * missing, the *pmd may be already established and in
271 * turn it may also be a trans_huge_pmd.
272 */
273 return pmd_alloc(mm, pud, address);
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700274}
275
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800276#ifdef CONFIG_HUGETLB_PAGE
277/*
278 * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700279 * called with mmap_lock held, it will release mmap_lock before returning.
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800280 */
281static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
282 struct vm_area_struct *dst_vma,
283 unsigned long dst_start,
284 unsigned long src_start,
285 unsigned long len,
Axel Rasmussenf6191472021-05-04 18:35:49 -0700286 enum mcopy_atomic_mode mode)
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800287{
Mike Kravetz1c9e8de2017-02-22 15:43:43 -0800288 int vm_shared = dst_vma->vm_flags & VM_SHARED;
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800289 ssize_t err;
290 pte_t *dst_pte;
291 unsigned long src_addr, dst_addr;
292 long copied;
293 struct page *page;
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800294 unsigned long vma_hpagesize;
295 pgoff_t idx;
296 u32 hash;
297 struct address_space *mapping;
298
299 /*
300 * There is no default zero huge page for all huge page sizes as
301 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
302 * by THP. Since we can not reliably insert a zero page, this
303 * feature is not supported.
304 */
Axel Rasmussenf6191472021-05-04 18:35:49 -0700305 if (mode == MCOPY_ATOMIC_ZEROPAGE) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700306 mmap_read_unlock(dst_mm);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800307 return -EINVAL;
308 }
309
310 src_addr = src_start;
311 dst_addr = dst_start;
312 copied = 0;
313 page = NULL;
314 vma_hpagesize = vma_kernel_pagesize(dst_vma);
315
316 /*
317 * Validate alignment based on huge page size
318 */
319 err = -EINVAL;
320 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
321 goto out_unlock;
322
323retry:
324 /*
Michel Lespinassec1e8d7c2020-06-08 21:33:54 -0700325 * On routine entry dst_vma is set. If we had to drop mmap_lock and
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800326 * retry, dst_vma will be set to NULL and we must lookup again.
327 */
328 if (!dst_vma) {
Mike Rapoport27d02562017-02-24 14:58:28 -0800329 err = -ENOENT;
Wei Yang643aa362019-11-30 17:57:55 -0800330 dst_vma = find_dst_vma(dst_mm, dst_start, len);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800331 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
332 goto out_unlock;
Mike Kravetz1c9e8de2017-02-22 15:43:43 -0800333
Mike Rapoport27d02562017-02-24 14:58:28 -0800334 err = -EINVAL;
335 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
336 goto out_unlock;
337
Mike Kravetz1c9e8de2017-02-22 15:43:43 -0800338 vm_shared = dst_vma->vm_flags & VM_SHARED;
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800339 }
340
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800341 /*
Mike Kravetz1c9e8de2017-02-22 15:43:43 -0800342 * If not shared, ensure the dst_vma has a anon_vma.
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800343 */
344 err = -ENOMEM;
Mike Kravetz1c9e8de2017-02-22 15:43:43 -0800345 if (!vm_shared) {
346 if (unlikely(anon_vma_prepare(dst_vma)))
347 goto out_unlock;
348 }
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800349
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800350 while (src_addr < src_start + len) {
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800351 BUG_ON(dst_addr >= dst_start + len);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800352
353 /*
Mike Kravetzc0d03812020-04-01 21:11:05 -0700354 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
355 * i_mmap_rwsem ensures the dst_pte remains valid even
356 * in the case of shared pmds. fault mutex prevents
357 * races with other faulting threads.
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800358 */
Mike Kravetzddeaab32019-01-08 15:23:36 -0800359 mapping = dst_vma->vm_file->f_mapping;
Mike Kravetzc0d03812020-04-01 21:11:05 -0700360 i_mmap_lock_read(mapping);
361 idx = linear_page_index(dst_vma, dst_addr);
Wei Yang188b04a2019-11-30 17:57:02 -0800362 hash = hugetlb_fault_mutex_hash(mapping, idx);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800363 mutex_lock(&hugetlb_fault_mutex_table[hash]);
364
365 err = -ENOMEM;
Peter Xuaec44e02021-05-04 18:33:00 -0700366 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800367 if (!dst_pte) {
368 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetzc0d03812020-04-01 21:11:05 -0700369 i_mmap_unlock_read(mapping);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800370 goto out_unlock;
371 }
372
Axel Rasmussenf6191472021-05-04 18:35:49 -0700373 if (mode != MCOPY_ATOMIC_CONTINUE &&
374 !huge_pte_none(huge_ptep_get(dst_pte))) {
375 err = -EEXIST;
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800376 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetzc0d03812020-04-01 21:11:05 -0700377 i_mmap_unlock_read(mapping);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800378 goto out_unlock;
379 }
380
381 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
Axel Rasmussenf6191472021-05-04 18:35:49 -0700382 dst_addr, src_addr, mode, &page);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800383
384 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Mike Kravetzc0d03812020-04-01 21:11:05 -0700385 i_mmap_unlock_read(mapping);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800386
387 cond_resched();
388
Andrea Arcangeli9e368252018-11-30 14:09:25 -0800389 if (unlikely(err == -ENOENT)) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700390 mmap_read_unlock(dst_mm);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800391 BUG_ON(!page);
392
393 err = copy_huge_page_from_user(page,
394 (const void __user *)src_addr,
Wei Yang4fb07ee2019-11-30 17:57:49 -0800395 vma_hpagesize / PAGE_SIZE,
396 true);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800397 if (unlikely(err)) {
398 err = -EFAULT;
399 goto out;
400 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700401 mmap_read_lock(dst_mm);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800402
403 dst_vma = NULL;
404 goto retry;
405 } else
406 BUG_ON(page);
407
408 if (!err) {
409 dst_addr += vma_hpagesize;
410 src_addr += vma_hpagesize;
411 copied += vma_hpagesize;
412
413 if (fatal_signal_pending(current))
414 err = -EINTR;
415 }
416 if (err)
417 break;
418 }
419
420out_unlock:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700421 mmap_read_unlock(dst_mm);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800422out:
Mina Almasry8cc5fcb2021-06-30 18:48:19 -0700423 if (page)
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800424 put_page(page);
425 BUG_ON(copied < 0);
426 BUG_ON(err > 0);
427 BUG_ON(!copied && !err);
428 return copied ? copied : err;
429}
430#else /* !CONFIG_HUGETLB_PAGE */
431/* fail at build time if gcc attempts to use this */
432extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
433 struct vm_area_struct *dst_vma,
434 unsigned long dst_start,
435 unsigned long src_start,
436 unsigned long len,
Axel Rasmussenf6191472021-05-04 18:35:49 -0700437 enum mcopy_atomic_mode mode);
Mike Kravetz60d4d2d2017-02-22 15:42:55 -0800438#endif /* CONFIG_HUGETLB_PAGE */
439
Mike Rapoport3217d3c2017-09-06 16:23:06 -0700440static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
441 pmd_t *dst_pmd,
442 struct vm_area_struct *dst_vma,
443 unsigned long dst_addr,
444 unsigned long src_addr,
445 struct page **page,
Axel Rasmussen15313252021-06-30 18:49:24 -0700446 enum mcopy_atomic_mode mode,
Andrea Arcangeli72981e02020-04-06 20:05:41 -0700447 bool wp_copy)
Mike Rapoport3217d3c2017-09-06 16:23:06 -0700448{
449 ssize_t err;
450
Axel Rasmussen15313252021-06-30 18:49:24 -0700451 if (mode == MCOPY_ATOMIC_CONTINUE) {
452 return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
453 wp_copy);
454 }
455
Andrea Arcangeli5b510722018-11-30 14:09:28 -0800456 /*
457 * The normal page fault path for a shmem will invoke the
458 * fault, fill the hole in the file and COW it right away. The
459 * result generates plain anonymous memory. So when we are
460 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
461 * generate anonymous memory directly without actually filling
462 * the hole. For the MAP_PRIVATE case the robustness check
463 * only happens in the pagetable (to verify it's still none)
464 * and not in the radix tree.
465 */
466 if (!(dst_vma->vm_flags & VM_SHARED)) {
Axel Rasmussen15313252021-06-30 18:49:24 -0700467 if (mode == MCOPY_ATOMIC_NORMAL)
Mike Rapoport3217d3c2017-09-06 16:23:06 -0700468 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
Andrea Arcangeli72981e02020-04-06 20:05:41 -0700469 dst_addr, src_addr, page,
470 wp_copy);
Mike Rapoport3217d3c2017-09-06 16:23:06 -0700471 else
472 err = mfill_zeropage_pte(dst_mm, dst_pmd,
473 dst_vma, dst_addr);
474 } else {
Andrea Arcangeli72981e02020-04-06 20:05:41 -0700475 VM_WARN_ON_ONCE(wp_copy);
Axel Rasmussen3460f6e2021-06-30 18:49:17 -0700476 err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
Axel Rasmussen15313252021-06-30 18:49:24 -0700477 dst_addr, src_addr,
478 mode != MCOPY_ATOMIC_NORMAL,
Axel Rasmussen3460f6e2021-06-30 18:49:17 -0700479 page);
Mike Rapoport3217d3c2017-09-06 16:23:06 -0700480 }
481
482 return err;
483}
484
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700485static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
486 unsigned long dst_start,
487 unsigned long src_start,
488 unsigned long len,
Axel Rasmussenf6191472021-05-04 18:35:49 -0700489 enum mcopy_atomic_mode mcopy_mode,
Nadav Amita759a902021-09-02 14:58:56 -0700490 atomic_t *mmap_changing,
Andrea Arcangeli72981e02020-04-06 20:05:41 -0700491 __u64 mode)
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700492{
493 struct vm_area_struct *dst_vma;
494 ssize_t err;
495 pmd_t *dst_pmd;
496 unsigned long src_addr, dst_addr;
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700497 long copied;
498 struct page *page;
Andrea Arcangeli72981e02020-04-06 20:05:41 -0700499 bool wp_copy;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700500
501 /*
502 * Sanitize the command parameters:
503 */
504 BUG_ON(dst_start & ~PAGE_MASK);
505 BUG_ON(len & ~PAGE_MASK);
506
507 /* Does the address range wrap, or is the span zero-sized? */
508 BUG_ON(src_start + len <= src_start);
509 BUG_ON(dst_start + len <= dst_start);
510
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700511 src_addr = src_start;
512 dst_addr = dst_start;
513 copied = 0;
514 page = NULL;
515retry:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700516 mmap_read_lock(dst_mm);
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700517
518 /*
Mike Rapoportdf2cc962018-06-07 17:09:25 -0700519 * If memory mappings are changing because of non-cooperative
520 * operation (e.g. mremap) running in parallel, bail out and
521 * request the user to retry later
522 */
523 err = -EAGAIN;
Nadav Amita759a902021-09-02 14:58:56 -0700524 if (mmap_changing && atomic_read(mmap_changing))
Mike Rapoportdf2cc962018-06-07 17:09:25 -0700525 goto out_unlock;
526
527 /*
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700528 * Make sure the vma is not shared, that the dst range is
529 * both valid and fully within a single existing vma.
530 */
Mike Rapoport27d02562017-02-24 14:58:28 -0800531 err = -ENOENT;
Wei Yang643aa362019-11-30 17:57:55 -0800532 dst_vma = find_dst_vma(dst_mm, dst_start, len);
Mike Rapoport26071ce2017-02-22 15:43:34 -0800533 if (!dst_vma)
534 goto out_unlock;
Mike Rapoport27d02562017-02-24 14:58:28 -0800535
536 err = -EINVAL;
537 /*
538 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
539 * it will overwrite vm_ops, so vma_is_anonymous must return false.
540 */
541 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
542 dst_vma->vm_flags & VM_SHARED))
543 goto out_unlock;
544
545 /*
Andrea Arcangeli72981e02020-04-06 20:05:41 -0700546 * validate 'mode' now that we know the dst_vma: don't allow
547 * a wrprotect copy if the userfaultfd didn't register as WP.
548 */
549 wp_copy = mode & UFFDIO_COPY_MODE_WP;
550 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
551 goto out_unlock;
552
553 /*
Mike Rapoport27d02562017-02-24 14:58:28 -0800554 * If this is a HUGETLB vma, pass off to appropriate routine
555 */
556 if (is_vm_hugetlb_page(dst_vma))
557 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
Axel Rasmussenf6191472021-05-04 18:35:49 -0700558 src_start, len, mcopy_mode);
Mike Rapoport27d02562017-02-24 14:58:28 -0800559
Mike Rapoport26071ce2017-02-22 15:43:34 -0800560 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700561 goto out_unlock;
Axel Rasmussen15313252021-06-30 18:49:24 -0700562 if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
Axel Rasmussenf6191472021-05-04 18:35:49 -0700563 goto out_unlock;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700564
565 /*
566 * Ensure the dst_vma has a anon_vma or this page
567 * would get a NULL anon_vma when moved in the
568 * dst_vma.
569 */
570 err = -ENOMEM;
Andrea Arcangeli5b510722018-11-30 14:09:28 -0800571 if (!(dst_vma->vm_flags & VM_SHARED) &&
572 unlikely(anon_vma_prepare(dst_vma)))
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700573 goto out_unlock;
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700574
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700575 while (src_addr < src_start + len) {
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700576 pmd_t dst_pmdval;
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700577
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700578 BUG_ON(dst_addr >= dst_start + len);
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700579
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700580 dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
581 if (unlikely(!dst_pmd)) {
582 err = -ENOMEM;
583 break;
584 }
585
586 dst_pmdval = pmd_read_atomic(dst_pmd);
587 /*
588 * If the dst_pmd is mapped as THP don't
589 * override it and just be strict.
590 */
591 if (unlikely(pmd_trans_huge(dst_pmdval))) {
592 err = -EEXIST;
593 break;
594 }
595 if (unlikely(pmd_none(dst_pmdval)) &&
Joel Fernandes (Google)4cf58922019-01-03 15:28:34 -0800596 unlikely(__pte_alloc(dst_mm, dst_pmd))) {
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700597 err = -ENOMEM;
598 break;
599 }
600 /* If an huge pmd materialized from under us fail */
601 if (unlikely(pmd_trans_huge(*dst_pmd))) {
602 err = -EFAULT;
603 break;
604 }
605
606 BUG_ON(pmd_none(*dst_pmd));
607 BUG_ON(pmd_trans_huge(*dst_pmd));
608
Mike Rapoport3217d3c2017-09-06 16:23:06 -0700609 err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
Axel Rasmussen15313252021-06-30 18:49:24 -0700610 src_addr, &page, mcopy_mode, wp_copy);
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700611 cond_resched();
612
Andrea Arcangeli9e368252018-11-30 14:09:25 -0800613 if (unlikely(err == -ENOENT)) {
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700614 void *page_kaddr;
615
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700616 mmap_read_unlock(dst_mm);
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700617 BUG_ON(!page);
618
619 page_kaddr = kmap(page);
620 err = copy_from_user(page_kaddr,
621 (const void __user *) src_addr,
622 PAGE_SIZE);
623 kunmap(page);
624 if (unlikely(err)) {
625 err = -EFAULT;
626 goto out;
627 }
628 goto retry;
629 } else
630 BUG_ON(page);
631
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700632 if (!err) {
633 dst_addr += PAGE_SIZE;
634 src_addr += PAGE_SIZE;
635 copied += PAGE_SIZE;
636
637 if (fatal_signal_pending(current))
638 err = -EINTR;
639 }
640 if (err)
641 break;
642 }
643
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700644out_unlock:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700645 mmap_read_unlock(dst_mm);
Andrea Arcangelib6ebaed2015-09-04 15:47:08 -0700646out:
647 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300648 put_page(page);
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700649 BUG_ON(copied < 0);
650 BUG_ON(err > 0);
651 BUG_ON(!copied && !err);
652 return copied ? copied : err;
653}
654
655ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
Mike Rapoportdf2cc962018-06-07 17:09:25 -0700656 unsigned long src_start, unsigned long len,
Nadav Amita759a902021-09-02 14:58:56 -0700657 atomic_t *mmap_changing, __u64 mode)
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700658{
Axel Rasmussenf6191472021-05-04 18:35:49 -0700659 return __mcopy_atomic(dst_mm, dst_start, src_start, len,
660 MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700661}
662
663ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
Nadav Amita759a902021-09-02 14:58:56 -0700664 unsigned long len, atomic_t *mmap_changing)
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700665{
Axel Rasmussenf6191472021-05-04 18:35:49 -0700666 return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
667 mmap_changing, 0);
668}
669
670ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
Nadav Amita759a902021-09-02 14:58:56 -0700671 unsigned long len, atomic_t *mmap_changing)
Axel Rasmussenf6191472021-05-04 18:35:49 -0700672{
673 return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
674 mmap_changing, 0);
Andrea Arcangelic1a4de92015-09-04 15:47:04 -0700675}
Shaohua Liffd05792020-04-06 20:06:09 -0700676
677int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
Nadav Amita759a902021-09-02 14:58:56 -0700678 unsigned long len, bool enable_wp,
679 atomic_t *mmap_changing)
Shaohua Liffd05792020-04-06 20:06:09 -0700680{
681 struct vm_area_struct *dst_vma;
682 pgprot_t newprot;
683 int err;
684
685 /*
686 * Sanitize the command parameters:
687 */
688 BUG_ON(start & ~PAGE_MASK);
689 BUG_ON(len & ~PAGE_MASK);
690
691 /* Does the address range wrap, or is the span zero-sized? */
692 BUG_ON(start + len <= start);
693
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700694 mmap_read_lock(dst_mm);
Shaohua Liffd05792020-04-06 20:06:09 -0700695
696 /*
697 * If memory mappings are changing because of non-cooperative
698 * operation (e.g. mremap) running in parallel, bail out and
699 * request the user to retry later
700 */
701 err = -EAGAIN;
Nadav Amita759a902021-09-02 14:58:56 -0700702 if (mmap_changing && atomic_read(mmap_changing))
Shaohua Liffd05792020-04-06 20:06:09 -0700703 goto out_unlock;
704
705 err = -ENOENT;
706 dst_vma = find_dst_vma(dst_mm, start, len);
707 /*
708 * Make sure the vma is not shared, that the dst range is
709 * both valid and fully within a single existing vma.
710 */
711 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
712 goto out_unlock;
713 if (!userfaultfd_wp(dst_vma))
714 goto out_unlock;
715 if (!vma_is_anonymous(dst_vma))
716 goto out_unlock;
717
718 if (enable_wp)
719 newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
720 else
721 newprot = vm_get_page_prot(dst_vma->vm_flags);
722
723 change_protection(dst_vma, start, start + len, newprot,
724 enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
725
726 err = 0;
727out_unlock:
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700728 mmap_read_unlock(dst_mm);
Shaohua Liffd05792020-04-06 20:06:09 -0700729 return err;
730}