Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 2 | /* |
| 3 | * mm/userfaultfd.c |
| 4 | * |
| 5 | * Copyright (C) 2015 Red Hat, Inc. |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/mm.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 9 | #include <linux/sched/signal.h> |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/rmap.h> |
| 12 | #include <linux/swap.h> |
| 13 | #include <linux/swapops.h> |
| 14 | #include <linux/userfaultfd_k.h> |
| 15 | #include <linux/mmu_notifier.h> |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 16 | #include <linux/hugetlb.h> |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 17 | #include <linux/shmem_fs.h> |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 18 | #include <asm/tlbflush.h> |
| 19 | #include "internal.h" |
| 20 | |
| 21 | static int mcopy_atomic_pte(struct mm_struct *dst_mm, |
| 22 | pmd_t *dst_pmd, |
| 23 | struct vm_area_struct *dst_vma, |
| 24 | unsigned long dst_addr, |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 25 | unsigned long src_addr, |
| 26 | struct page **pagep) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 27 | { |
| 28 | struct mem_cgroup *memcg; |
| 29 | pte_t _dst_pte, *dst_pte; |
| 30 | spinlock_t *ptl; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 31 | void *page_kaddr; |
| 32 | int ret; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 33 | struct page *page; |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 34 | pgoff_t offset, max_off; |
| 35 | struct inode *inode; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 36 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 37 | if (!*pagep) { |
| 38 | ret = -ENOMEM; |
| 39 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); |
| 40 | if (!page) |
| 41 | goto out; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 42 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 43 | page_kaddr = kmap_atomic(page); |
| 44 | ret = copy_from_user(page_kaddr, |
| 45 | (const void __user *) src_addr, |
| 46 | PAGE_SIZE); |
| 47 | kunmap_atomic(page_kaddr); |
| 48 | |
| 49 | /* fallback to copy_from_user outside mmap_sem */ |
| 50 | if (unlikely(ret)) { |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 51 | ret = -ENOENT; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 52 | *pagep = page; |
| 53 | /* don't free the page */ |
| 54 | goto out; |
| 55 | } |
| 56 | } else { |
| 57 | page = *pagep; |
| 58 | *pagep = NULL; |
| 59 | } |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * The memory barrier inside __SetPageUptodate makes sure that |
| 63 | * preceeding stores to the page contents become visible before |
| 64 | * the set_pte_at() write. |
| 65 | */ |
| 66 | __SetPageUptodate(page); |
| 67 | |
| 68 | ret = -ENOMEM; |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 69 | if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 70 | goto out_release; |
| 71 | |
| 72 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); |
| 73 | if (dst_vma->vm_flags & VM_WRITE) |
| 74 | _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); |
| 75 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 76 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 77 | if (dst_vma->vm_file) { |
| 78 | /* the shmem MAP_PRIVATE case requires checking the i_size */ |
| 79 | inode = dst_vma->vm_file->f_inode; |
| 80 | offset = linear_page_index(dst_vma, dst_addr); |
| 81 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
| 82 | ret = -EFAULT; |
| 83 | if (unlikely(offset >= max_off)) |
| 84 | goto out_release_uncharge_unlock; |
| 85 | } |
| 86 | ret = -EEXIST; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 87 | if (!pte_none(*dst_pte)) |
| 88 | goto out_release_uncharge_unlock; |
| 89 | |
| 90 | inc_mm_counter(dst_mm, MM_ANONPAGES); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 91 | page_add_new_anon_rmap(page, dst_vma, dst_addr, false); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 92 | mem_cgroup_commit_charge(page, memcg, false, false); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 93 | lru_cache_add_active_or_unevictable(page, dst_vma); |
| 94 | |
| 95 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 96 | |
| 97 | /* No need to invalidate - it was non-present before */ |
| 98 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 99 | |
| 100 | pte_unmap_unlock(dst_pte, ptl); |
| 101 | ret = 0; |
| 102 | out: |
| 103 | return ret; |
| 104 | out_release_uncharge_unlock: |
| 105 | pte_unmap_unlock(dst_pte, ptl); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 106 | mem_cgroup_cancel_charge(page, memcg, false); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 107 | out_release: |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 108 | put_page(page); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 109 | goto out; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | static int mfill_zeropage_pte(struct mm_struct *dst_mm, |
| 113 | pmd_t *dst_pmd, |
| 114 | struct vm_area_struct *dst_vma, |
| 115 | unsigned long dst_addr) |
| 116 | { |
| 117 | pte_t _dst_pte, *dst_pte; |
| 118 | spinlock_t *ptl; |
| 119 | int ret; |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 120 | pgoff_t offset, max_off; |
| 121 | struct inode *inode; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 122 | |
| 123 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), |
| 124 | dst_vma->vm_page_prot)); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 125 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
Andrea Arcangeli | e2a50c1 | 2018-11-30 14:09:37 -0800 | [diff] [blame] | 126 | if (dst_vma->vm_file) { |
| 127 | /* the shmem MAP_PRIVATE case requires checking the i_size */ |
| 128 | inode = dst_vma->vm_file->f_inode; |
| 129 | offset = linear_page_index(dst_vma, dst_addr); |
| 130 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); |
| 131 | ret = -EFAULT; |
| 132 | if (unlikely(offset >= max_off)) |
| 133 | goto out_unlock; |
| 134 | } |
| 135 | ret = -EEXIST; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 136 | if (!pte_none(*dst_pte)) |
| 137 | goto out_unlock; |
| 138 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 139 | /* No need to invalidate - it was non-present before */ |
| 140 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 141 | ret = 0; |
| 142 | out_unlock: |
| 143 | pte_unmap_unlock(dst_pte, ptl); |
| 144 | return ret; |
| 145 | } |
| 146 | |
| 147 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
| 148 | { |
| 149 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 150 | p4d_t *p4d; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 151 | pud_t *pud; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 152 | |
| 153 | pgd = pgd_offset(mm, address); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 154 | p4d = p4d_alloc(mm, pgd, address); |
| 155 | if (!p4d) |
| 156 | return NULL; |
| 157 | pud = pud_alloc(mm, p4d, address); |
| 158 | if (!pud) |
| 159 | return NULL; |
| 160 | /* |
| 161 | * Note that we didn't run this because the pmd was |
| 162 | * missing, the *pmd may be already established and in |
| 163 | * turn it may also be a trans_huge_pmd. |
| 164 | */ |
| 165 | return pmd_alloc(mm, pud, address); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 168 | #ifdef CONFIG_HUGETLB_PAGE |
| 169 | /* |
| 170 | * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is |
| 171 | * called with mmap_sem held, it will release mmap_sem before returning. |
| 172 | */ |
| 173 | static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, |
| 174 | struct vm_area_struct *dst_vma, |
| 175 | unsigned long dst_start, |
| 176 | unsigned long src_start, |
| 177 | unsigned long len, |
| 178 | bool zeropage) |
| 179 | { |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 180 | int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED; |
| 181 | int vm_shared = dst_vma->vm_flags & VM_SHARED; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 182 | ssize_t err; |
| 183 | pte_t *dst_pte; |
| 184 | unsigned long src_addr, dst_addr; |
| 185 | long copied; |
| 186 | struct page *page; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 187 | unsigned long vma_hpagesize; |
| 188 | pgoff_t idx; |
| 189 | u32 hash; |
| 190 | struct address_space *mapping; |
| 191 | |
| 192 | /* |
| 193 | * There is no default zero huge page for all huge page sizes as |
| 194 | * supported by hugetlb. A PMD_SIZE huge pages may exist as used |
| 195 | * by THP. Since we can not reliably insert a zero page, this |
| 196 | * feature is not supported. |
| 197 | */ |
| 198 | if (zeropage) { |
| 199 | up_read(&dst_mm->mmap_sem); |
| 200 | return -EINVAL; |
| 201 | } |
| 202 | |
| 203 | src_addr = src_start; |
| 204 | dst_addr = dst_start; |
| 205 | copied = 0; |
| 206 | page = NULL; |
| 207 | vma_hpagesize = vma_kernel_pagesize(dst_vma); |
| 208 | |
| 209 | /* |
| 210 | * Validate alignment based on huge page size |
| 211 | */ |
| 212 | err = -EINVAL; |
| 213 | if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) |
| 214 | goto out_unlock; |
| 215 | |
| 216 | retry: |
| 217 | /* |
| 218 | * On routine entry dst_vma is set. If we had to drop mmap_sem and |
| 219 | * retry, dst_vma will be set to NULL and we must lookup again. |
| 220 | */ |
| 221 | if (!dst_vma) { |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 222 | err = -ENOENT; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 223 | dst_vma = find_vma(dst_mm, dst_start); |
| 224 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) |
| 225 | goto out_unlock; |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 226 | /* |
Andrea Arcangeli | 29ec9066 | 2018-11-30 14:09:32 -0800 | [diff] [blame] | 227 | * Check the vma is registered in uffd, this is |
| 228 | * required to enforce the VM_MAYWRITE check done at |
| 229 | * uffd registration time. |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 230 | */ |
| 231 | if (!dst_vma->vm_userfaultfd_ctx.ctx) |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 232 | goto out_unlock; |
| 233 | |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 234 | if (dst_start < dst_vma->vm_start || |
| 235 | dst_start + len > dst_vma->vm_end) |
| 236 | goto out_unlock; |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 237 | |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 238 | err = -EINVAL; |
| 239 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) |
| 240 | goto out_unlock; |
| 241 | |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 242 | vm_shared = dst_vma->vm_flags & VM_SHARED; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | if (WARN_ON(dst_addr & (vma_hpagesize - 1) || |
| 246 | (len - copied) & (vma_hpagesize - 1))) |
| 247 | goto out_unlock; |
| 248 | |
| 249 | /* |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 250 | * If not shared, ensure the dst_vma has a anon_vma. |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 251 | */ |
| 252 | err = -ENOMEM; |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 253 | if (!vm_shared) { |
| 254 | if (unlikely(anon_vma_prepare(dst_vma))) |
| 255 | goto out_unlock; |
| 256 | } |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 257 | |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 258 | while (src_addr < src_start + len) { |
| 259 | pte_t dst_pteval; |
| 260 | |
| 261 | BUG_ON(dst_addr >= dst_start + len); |
| 262 | VM_BUG_ON(dst_addr & ~huge_page_mask(h)); |
| 263 | |
| 264 | /* |
Mike Kravetz | ddeaab3 | 2019-01-08 15:23:36 -0800 | [diff] [blame] | 265 | * Serialize via hugetlb_fault_mutex |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 266 | */ |
Mike Kravetz | b43a9990 | 2018-12-28 00:39:38 -0800 | [diff] [blame] | 267 | idx = linear_page_index(dst_vma, dst_addr); |
Mike Kravetz | ddeaab3 | 2019-01-08 15:23:36 -0800 | [diff] [blame] | 268 | mapping = dst_vma->vm_file->f_mapping; |
Wei Yang | 188b04a | 2019-11-30 17:57:02 -0800 | [diff] [blame^] | 269 | hash = hugetlb_fault_mutex_hash(mapping, idx); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 270 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 271 | |
| 272 | err = -ENOMEM; |
| 273 | dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); |
| 274 | if (!dst_pte) { |
| 275 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 276 | goto out_unlock; |
| 277 | } |
| 278 | |
| 279 | err = -EEXIST; |
| 280 | dst_pteval = huge_ptep_get(dst_pte); |
| 281 | if (!huge_pte_none(dst_pteval)) { |
| 282 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 283 | goto out_unlock; |
| 284 | } |
| 285 | |
| 286 | err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, |
| 287 | dst_addr, src_addr, &page); |
| 288 | |
| 289 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 290 | vm_alloc_shared = vm_shared; |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 291 | |
| 292 | cond_resched(); |
| 293 | |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 294 | if (unlikely(err == -ENOENT)) { |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 295 | up_read(&dst_mm->mmap_sem); |
| 296 | BUG_ON(!page); |
| 297 | |
| 298 | err = copy_huge_page_from_user(page, |
| 299 | (const void __user *)src_addr, |
Mike Kravetz | 810a56b | 2017-02-22 15:42:58 -0800 | [diff] [blame] | 300 | pages_per_huge_page(h), true); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 301 | if (unlikely(err)) { |
| 302 | err = -EFAULT; |
| 303 | goto out; |
| 304 | } |
| 305 | down_read(&dst_mm->mmap_sem); |
| 306 | |
| 307 | dst_vma = NULL; |
| 308 | goto retry; |
| 309 | } else |
| 310 | BUG_ON(page); |
| 311 | |
| 312 | if (!err) { |
| 313 | dst_addr += vma_hpagesize; |
| 314 | src_addr += vma_hpagesize; |
| 315 | copied += vma_hpagesize; |
| 316 | |
| 317 | if (fatal_signal_pending(current)) |
| 318 | err = -EINTR; |
| 319 | } |
| 320 | if (err) |
| 321 | break; |
| 322 | } |
| 323 | |
| 324 | out_unlock: |
| 325 | up_read(&dst_mm->mmap_sem); |
| 326 | out: |
Mike Kravetz | 21205bf | 2017-02-22 15:43:16 -0800 | [diff] [blame] | 327 | if (page) { |
| 328 | /* |
| 329 | * We encountered an error and are about to free a newly |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 330 | * allocated huge page. |
| 331 | * |
| 332 | * Reservation handling is very subtle, and is different for |
| 333 | * private and shared mappings. See the routine |
| 334 | * restore_reserve_on_error for details. Unfortunately, we |
| 335 | * can not call restore_reserve_on_error now as it would |
| 336 | * require holding mmap_sem. |
| 337 | * |
| 338 | * If a reservation for the page existed in the reservation |
| 339 | * map of a private mapping, the map was modified to indicate |
| 340 | * the reservation was consumed when the page was allocated. |
| 341 | * We clear the PagePrivate flag now so that the global |
Mike Kravetz | 21205bf | 2017-02-22 15:43:16 -0800 | [diff] [blame] | 342 | * reserve count will not be incremented in free_huge_page. |
| 343 | * The reservation map will still indicate the reservation |
| 344 | * was consumed and possibly prevent later page allocation. |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 345 | * This is better than leaking a global reservation. If no |
| 346 | * reservation existed, it is still safe to clear PagePrivate |
| 347 | * as no adjustments to reservation counts were made during |
| 348 | * allocation. |
| 349 | * |
| 350 | * The reservation map for shared mappings indicates which |
| 351 | * pages have reservations. When a huge page is allocated |
| 352 | * for an address with a reservation, no change is made to |
| 353 | * the reserve map. In this case PagePrivate will be set |
| 354 | * to indicate that the global reservation count should be |
| 355 | * incremented when the page is freed. This is the desired |
| 356 | * behavior. However, when a huge page is allocated for an |
| 357 | * address without a reservation a reservation entry is added |
| 358 | * to the reservation map, and PagePrivate will not be set. |
| 359 | * When the page is freed, the global reserve count will NOT |
| 360 | * be incremented and it will appear as though we have leaked |
| 361 | * reserved page. In this case, set PagePrivate so that the |
| 362 | * global reserve count will be incremented to match the |
| 363 | * reservation map entry which was created. |
| 364 | * |
| 365 | * Note that vm_alloc_shared is based on the flags of the vma |
| 366 | * for which the page was originally allocated. dst_vma could |
| 367 | * be different or NULL on error. |
Mike Kravetz | 21205bf | 2017-02-22 15:43:16 -0800 | [diff] [blame] | 368 | */ |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 369 | if (vm_alloc_shared) |
| 370 | SetPagePrivate(page); |
| 371 | else |
| 372 | ClearPagePrivate(page); |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 373 | put_page(page); |
Mike Kravetz | 21205bf | 2017-02-22 15:43:16 -0800 | [diff] [blame] | 374 | } |
Mike Kravetz | 60d4d2d | 2017-02-22 15:42:55 -0800 | [diff] [blame] | 375 | BUG_ON(copied < 0); |
| 376 | BUG_ON(err > 0); |
| 377 | BUG_ON(!copied && !err); |
| 378 | return copied ? copied : err; |
| 379 | } |
| 380 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 381 | /* fail at build time if gcc attempts to use this */ |
| 382 | extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, |
| 383 | struct vm_area_struct *dst_vma, |
| 384 | unsigned long dst_start, |
| 385 | unsigned long src_start, |
| 386 | unsigned long len, |
| 387 | bool zeropage); |
| 388 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 389 | |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 390 | static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, |
| 391 | pmd_t *dst_pmd, |
| 392 | struct vm_area_struct *dst_vma, |
| 393 | unsigned long dst_addr, |
| 394 | unsigned long src_addr, |
| 395 | struct page **page, |
| 396 | bool zeropage) |
| 397 | { |
| 398 | ssize_t err; |
| 399 | |
Andrea Arcangeli | 5b51072 | 2018-11-30 14:09:28 -0800 | [diff] [blame] | 400 | /* |
| 401 | * The normal page fault path for a shmem will invoke the |
| 402 | * fault, fill the hole in the file and COW it right away. The |
| 403 | * result generates plain anonymous memory. So when we are |
| 404 | * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll |
| 405 | * generate anonymous memory directly without actually filling |
| 406 | * the hole. For the MAP_PRIVATE case the robustness check |
| 407 | * only happens in the pagetable (to verify it's still none) |
| 408 | * and not in the radix tree. |
| 409 | */ |
| 410 | if (!(dst_vma->vm_flags & VM_SHARED)) { |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 411 | if (!zeropage) |
| 412 | err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, |
| 413 | dst_addr, src_addr, page); |
| 414 | else |
| 415 | err = mfill_zeropage_pte(dst_mm, dst_pmd, |
| 416 | dst_vma, dst_addr); |
| 417 | } else { |
Mike Rapoport | 8fb44e5 | 2017-09-06 16:23:09 -0700 | [diff] [blame] | 418 | if (!zeropage) |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 419 | err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, |
| 420 | dst_vma, dst_addr, |
| 421 | src_addr, page); |
Mike Rapoport | 8fb44e5 | 2017-09-06 16:23:09 -0700 | [diff] [blame] | 422 | else |
| 423 | err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd, |
| 424 | dst_vma, dst_addr); |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | return err; |
| 428 | } |
| 429 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 430 | static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, |
| 431 | unsigned long dst_start, |
| 432 | unsigned long src_start, |
| 433 | unsigned long len, |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 434 | bool zeropage, |
| 435 | bool *mmap_changing) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 436 | { |
| 437 | struct vm_area_struct *dst_vma; |
| 438 | ssize_t err; |
| 439 | pmd_t *dst_pmd; |
| 440 | unsigned long src_addr, dst_addr; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 441 | long copied; |
| 442 | struct page *page; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 443 | |
| 444 | /* |
| 445 | * Sanitize the command parameters: |
| 446 | */ |
| 447 | BUG_ON(dst_start & ~PAGE_MASK); |
| 448 | BUG_ON(len & ~PAGE_MASK); |
| 449 | |
| 450 | /* Does the address range wrap, or is the span zero-sized? */ |
| 451 | BUG_ON(src_start + len <= src_start); |
| 452 | BUG_ON(dst_start + len <= dst_start); |
| 453 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 454 | src_addr = src_start; |
| 455 | dst_addr = dst_start; |
| 456 | copied = 0; |
| 457 | page = NULL; |
| 458 | retry: |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 459 | down_read(&dst_mm->mmap_sem); |
| 460 | |
| 461 | /* |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 462 | * If memory mappings are changing because of non-cooperative |
| 463 | * operation (e.g. mremap) running in parallel, bail out and |
| 464 | * request the user to retry later |
| 465 | */ |
| 466 | err = -EAGAIN; |
| 467 | if (mmap_changing && READ_ONCE(*mmap_changing)) |
| 468 | goto out_unlock; |
| 469 | |
| 470 | /* |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 471 | * Make sure the vma is not shared, that the dst range is |
| 472 | * both valid and fully within a single existing vma. |
| 473 | */ |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 474 | err = -ENOENT; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 475 | dst_vma = find_vma(dst_mm, dst_start); |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 476 | if (!dst_vma) |
| 477 | goto out_unlock; |
Mike Kravetz | 1c9e8de | 2017-02-22 15:43:43 -0800 | [diff] [blame] | 478 | /* |
Andrea Arcangeli | 29ec9066 | 2018-11-30 14:09:32 -0800 | [diff] [blame] | 479 | * Check the vma is registered in uffd, this is required to |
| 480 | * enforce the VM_MAYWRITE check done at uffd registration |
| 481 | * time. |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 482 | */ |
| 483 | if (!dst_vma->vm_userfaultfd_ctx.ctx) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 484 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 485 | |
Mike Rapoport | 27d0256 | 2017-02-24 14:58:28 -0800 | [diff] [blame] | 486 | if (dst_start < dst_vma->vm_start || |
| 487 | dst_start + len > dst_vma->vm_end) |
| 488 | goto out_unlock; |
| 489 | |
| 490 | err = -EINVAL; |
| 491 | /* |
| 492 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but |
| 493 | * it will overwrite vm_ops, so vma_is_anonymous must return false. |
| 494 | */ |
| 495 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && |
| 496 | dst_vma->vm_flags & VM_SHARED)) |
| 497 | goto out_unlock; |
| 498 | |
| 499 | /* |
| 500 | * If this is a HUGETLB vma, pass off to appropriate routine |
| 501 | */ |
| 502 | if (is_vm_hugetlb_page(dst_vma)) |
| 503 | return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, |
| 504 | src_start, len, zeropage); |
| 505 | |
Mike Rapoport | 26071ce | 2017-02-22 15:43:34 -0800 | [diff] [blame] | 506 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 507 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 508 | |
| 509 | /* |
| 510 | * Ensure the dst_vma has a anon_vma or this page |
| 511 | * would get a NULL anon_vma when moved in the |
| 512 | * dst_vma. |
| 513 | */ |
| 514 | err = -ENOMEM; |
Andrea Arcangeli | 5b51072 | 2018-11-30 14:09:28 -0800 | [diff] [blame] | 515 | if (!(dst_vma->vm_flags & VM_SHARED) && |
| 516 | unlikely(anon_vma_prepare(dst_vma))) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 517 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 518 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 519 | while (src_addr < src_start + len) { |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 520 | pmd_t dst_pmdval; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 521 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 522 | BUG_ON(dst_addr >= dst_start + len); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 523 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 524 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
| 525 | if (unlikely(!dst_pmd)) { |
| 526 | err = -ENOMEM; |
| 527 | break; |
| 528 | } |
| 529 | |
| 530 | dst_pmdval = pmd_read_atomic(dst_pmd); |
| 531 | /* |
| 532 | * If the dst_pmd is mapped as THP don't |
| 533 | * override it and just be strict. |
| 534 | */ |
| 535 | if (unlikely(pmd_trans_huge(dst_pmdval))) { |
| 536 | err = -EEXIST; |
| 537 | break; |
| 538 | } |
| 539 | if (unlikely(pmd_none(dst_pmdval)) && |
Joel Fernandes (Google) | 4cf5892 | 2019-01-03 15:28:34 -0800 | [diff] [blame] | 540 | unlikely(__pte_alloc(dst_mm, dst_pmd))) { |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 541 | err = -ENOMEM; |
| 542 | break; |
| 543 | } |
| 544 | /* If an huge pmd materialized from under us fail */ |
| 545 | if (unlikely(pmd_trans_huge(*dst_pmd))) { |
| 546 | err = -EFAULT; |
| 547 | break; |
| 548 | } |
| 549 | |
| 550 | BUG_ON(pmd_none(*dst_pmd)); |
| 551 | BUG_ON(pmd_trans_huge(*dst_pmd)); |
| 552 | |
Mike Rapoport | 3217d3c | 2017-09-06 16:23:06 -0700 | [diff] [blame] | 553 | err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, |
| 554 | src_addr, &page, zeropage); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 555 | cond_resched(); |
| 556 | |
Andrea Arcangeli | 9e36825 | 2018-11-30 14:09:25 -0800 | [diff] [blame] | 557 | if (unlikely(err == -ENOENT)) { |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 558 | void *page_kaddr; |
| 559 | |
| 560 | up_read(&dst_mm->mmap_sem); |
| 561 | BUG_ON(!page); |
| 562 | |
| 563 | page_kaddr = kmap(page); |
| 564 | err = copy_from_user(page_kaddr, |
| 565 | (const void __user *) src_addr, |
| 566 | PAGE_SIZE); |
| 567 | kunmap(page); |
| 568 | if (unlikely(err)) { |
| 569 | err = -EFAULT; |
| 570 | goto out; |
| 571 | } |
| 572 | goto retry; |
| 573 | } else |
| 574 | BUG_ON(page); |
| 575 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 576 | if (!err) { |
| 577 | dst_addr += PAGE_SIZE; |
| 578 | src_addr += PAGE_SIZE; |
| 579 | copied += PAGE_SIZE; |
| 580 | |
| 581 | if (fatal_signal_pending(current)) |
| 582 | err = -EINTR; |
| 583 | } |
| 584 | if (err) |
| 585 | break; |
| 586 | } |
| 587 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 588 | out_unlock: |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 589 | up_read(&dst_mm->mmap_sem); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 590 | out: |
| 591 | if (page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 592 | put_page(page); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 593 | BUG_ON(copied < 0); |
| 594 | BUG_ON(err > 0); |
| 595 | BUG_ON(!copied && !err); |
| 596 | return copied ? copied : err; |
| 597 | } |
| 598 | |
| 599 | ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 600 | unsigned long src_start, unsigned long len, |
| 601 | bool *mmap_changing) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 602 | { |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 603 | return __mcopy_atomic(dst_mm, dst_start, src_start, len, false, |
| 604 | mmap_changing); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 605 | } |
| 606 | |
| 607 | ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 608 | unsigned long len, bool *mmap_changing) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 609 | { |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 610 | return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 611 | } |