Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * mm/userfaultfd.c |
| 3 | * |
| 4 | * Copyright (C) 2015 Red Hat, Inc. |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 7 | * the COPYING file in the top-level directory. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/pagemap.h> |
| 12 | #include <linux/rmap.h> |
| 13 | #include <linux/swap.h> |
| 14 | #include <linux/swapops.h> |
| 15 | #include <linux/userfaultfd_k.h> |
| 16 | #include <linux/mmu_notifier.h> |
| 17 | #include <asm/tlbflush.h> |
| 18 | #include "internal.h" |
| 19 | |
| 20 | static int mcopy_atomic_pte(struct mm_struct *dst_mm, |
| 21 | pmd_t *dst_pmd, |
| 22 | struct vm_area_struct *dst_vma, |
| 23 | unsigned long dst_addr, |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 24 | unsigned long src_addr, |
| 25 | struct page **pagep) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 26 | { |
| 27 | struct mem_cgroup *memcg; |
| 28 | pte_t _dst_pte, *dst_pte; |
| 29 | spinlock_t *ptl; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 30 | void *page_kaddr; |
| 31 | int ret; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 32 | struct page *page; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 33 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 34 | if (!*pagep) { |
| 35 | ret = -ENOMEM; |
| 36 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); |
| 37 | if (!page) |
| 38 | goto out; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 39 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 40 | page_kaddr = kmap_atomic(page); |
| 41 | ret = copy_from_user(page_kaddr, |
| 42 | (const void __user *) src_addr, |
| 43 | PAGE_SIZE); |
| 44 | kunmap_atomic(page_kaddr); |
| 45 | |
| 46 | /* fallback to copy_from_user outside mmap_sem */ |
| 47 | if (unlikely(ret)) { |
| 48 | ret = -EFAULT; |
| 49 | *pagep = page; |
| 50 | /* don't free the page */ |
| 51 | goto out; |
| 52 | } |
Muchun Song | e53914e | 2022-03-22 14:42:08 -0700 | [diff] [blame] | 53 | |
| 54 | flush_dcache_page(page); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 55 | } else { |
| 56 | page = *pagep; |
| 57 | *pagep = NULL; |
| 58 | } |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 59 | |
| 60 | /* |
| 61 | * The memory barrier inside __SetPageUptodate makes sure that |
| 62 | * preceeding stores to the page contents become visible before |
| 63 | * the set_pte_at() write. |
| 64 | */ |
| 65 | __SetPageUptodate(page); |
| 66 | |
| 67 | ret = -ENOMEM; |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 68 | if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 69 | goto out_release; |
| 70 | |
| 71 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); |
| 72 | if (dst_vma->vm_flags & VM_WRITE) |
| 73 | _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); |
| 74 | |
| 75 | ret = -EEXIST; |
| 76 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
| 77 | if (!pte_none(*dst_pte)) |
| 78 | goto out_release_uncharge_unlock; |
| 79 | |
| 80 | inc_mm_counter(dst_mm, MM_ANONPAGES); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 81 | page_add_new_anon_rmap(page, dst_vma, dst_addr, false); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 82 | mem_cgroup_commit_charge(page, memcg, false, false); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 83 | lru_cache_add_active_or_unevictable(page, dst_vma); |
| 84 | |
| 85 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 86 | |
| 87 | /* No need to invalidate - it was non-present before */ |
| 88 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 89 | |
| 90 | pte_unmap_unlock(dst_pte, ptl); |
| 91 | ret = 0; |
| 92 | out: |
| 93 | return ret; |
| 94 | out_release_uncharge_unlock: |
| 95 | pte_unmap_unlock(dst_pte, ptl); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 96 | mem_cgroup_cancel_charge(page, memcg, false); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 97 | out_release: |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 98 | put_page(page); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 99 | goto out; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | static int mfill_zeropage_pte(struct mm_struct *dst_mm, |
| 103 | pmd_t *dst_pmd, |
| 104 | struct vm_area_struct *dst_vma, |
| 105 | unsigned long dst_addr) |
| 106 | { |
| 107 | pte_t _dst_pte, *dst_pte; |
| 108 | spinlock_t *ptl; |
| 109 | int ret; |
| 110 | |
| 111 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), |
| 112 | dst_vma->vm_page_prot)); |
| 113 | ret = -EEXIST; |
| 114 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
| 115 | if (!pte_none(*dst_pte)) |
| 116 | goto out_unlock; |
| 117 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
| 118 | /* No need to invalidate - it was non-present before */ |
| 119 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
| 120 | ret = 0; |
| 121 | out_unlock: |
| 122 | pte_unmap_unlock(dst_pte, ptl); |
| 123 | return ret; |
| 124 | } |
| 125 | |
| 126 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
| 127 | { |
| 128 | pgd_t *pgd; |
| 129 | pud_t *pud; |
| 130 | pmd_t *pmd = NULL; |
| 131 | |
| 132 | pgd = pgd_offset(mm, address); |
| 133 | pud = pud_alloc(mm, pgd, address); |
| 134 | if (pud) |
| 135 | /* |
| 136 | * Note that we didn't run this because the pmd was |
| 137 | * missing, the *pmd may be already established and in |
| 138 | * turn it may also be a trans_huge_pmd. |
| 139 | */ |
| 140 | pmd = pmd_alloc(mm, pud, address); |
| 141 | return pmd; |
| 142 | } |
| 143 | |
| 144 | static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, |
| 145 | unsigned long dst_start, |
| 146 | unsigned long src_start, |
| 147 | unsigned long len, |
| 148 | bool zeropage) |
| 149 | { |
| 150 | struct vm_area_struct *dst_vma; |
| 151 | ssize_t err; |
| 152 | pmd_t *dst_pmd; |
| 153 | unsigned long src_addr, dst_addr; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 154 | long copied; |
| 155 | struct page *page; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 156 | |
| 157 | /* |
| 158 | * Sanitize the command parameters: |
| 159 | */ |
| 160 | BUG_ON(dst_start & ~PAGE_MASK); |
| 161 | BUG_ON(len & ~PAGE_MASK); |
| 162 | |
| 163 | /* Does the address range wrap, or is the span zero-sized? */ |
| 164 | BUG_ON(src_start + len <= src_start); |
| 165 | BUG_ON(dst_start + len <= dst_start); |
| 166 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 167 | src_addr = src_start; |
| 168 | dst_addr = dst_start; |
| 169 | copied = 0; |
| 170 | page = NULL; |
| 171 | retry: |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 172 | down_read(&dst_mm->mmap_sem); |
| 173 | |
| 174 | /* |
| 175 | * Make sure the vma is not shared, that the dst range is |
| 176 | * both valid and fully within a single existing vma. |
| 177 | */ |
| 178 | err = -EINVAL; |
| 179 | dst_vma = find_vma(dst_mm, dst_start); |
| 180 | if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 181 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 182 | if (dst_start < dst_vma->vm_start || |
| 183 | dst_start + len > dst_vma->vm_end) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 184 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 185 | |
| 186 | /* |
Joel Fernandes | 3de7f845 | 2019-02-15 16:48:07 -0500 | [diff] [blame] | 187 | * Check the vma is registered in uffd, this is required to |
| 188 | * enforce the VM_MAYWRITE check done at uffd registration |
| 189 | * time. |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 190 | */ |
| 191 | if (!dst_vma->vm_userfaultfd_ctx.ctx) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 192 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 193 | |
| 194 | /* |
| 195 | * FIXME: only allow copying on anonymous vmas, tmpfs should |
| 196 | * be added. |
| 197 | */ |
| 198 | if (dst_vma->vm_ops) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 199 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 200 | |
| 201 | /* |
| 202 | * Ensure the dst_vma has a anon_vma or this page |
| 203 | * would get a NULL anon_vma when moved in the |
| 204 | * dst_vma. |
| 205 | */ |
| 206 | err = -ENOMEM; |
| 207 | if (unlikely(anon_vma_prepare(dst_vma))) |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 208 | goto out_unlock; |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 209 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 210 | while (src_addr < src_start + len) { |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 211 | pmd_t dst_pmdval; |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 212 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 213 | BUG_ON(dst_addr >= dst_start + len); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 214 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 215 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
| 216 | if (unlikely(!dst_pmd)) { |
| 217 | err = -ENOMEM; |
| 218 | break; |
| 219 | } |
| 220 | |
| 221 | dst_pmdval = pmd_read_atomic(dst_pmd); |
| 222 | /* |
| 223 | * If the dst_pmd is mapped as THP don't |
| 224 | * override it and just be strict. |
| 225 | */ |
| 226 | if (unlikely(pmd_trans_huge(dst_pmdval))) { |
| 227 | err = -EEXIST; |
| 228 | break; |
| 229 | } |
| 230 | if (unlikely(pmd_none(dst_pmdval)) && |
Kirill A. Shutemov | 3ed3a4f | 2016-03-17 14:19:11 -0700 | [diff] [blame] | 231 | unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 232 | err = -ENOMEM; |
| 233 | break; |
| 234 | } |
| 235 | /* If an huge pmd materialized from under us fail */ |
| 236 | if (unlikely(pmd_trans_huge(*dst_pmd))) { |
| 237 | err = -EFAULT; |
| 238 | break; |
| 239 | } |
| 240 | |
| 241 | BUG_ON(pmd_none(*dst_pmd)); |
| 242 | BUG_ON(pmd_trans_huge(*dst_pmd)); |
| 243 | |
| 244 | if (!zeropage) |
| 245 | err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 246 | dst_addr, src_addr, &page); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 247 | else |
| 248 | err = mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, |
| 249 | dst_addr); |
| 250 | |
| 251 | cond_resched(); |
| 252 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 253 | if (unlikely(err == -EFAULT)) { |
| 254 | void *page_kaddr; |
| 255 | |
| 256 | up_read(&dst_mm->mmap_sem); |
| 257 | BUG_ON(!page); |
| 258 | |
| 259 | page_kaddr = kmap(page); |
| 260 | err = copy_from_user(page_kaddr, |
| 261 | (const void __user *) src_addr, |
| 262 | PAGE_SIZE); |
| 263 | kunmap(page); |
| 264 | if (unlikely(err)) { |
| 265 | err = -EFAULT; |
| 266 | goto out; |
| 267 | } |
Muchun Song | e53914e | 2022-03-22 14:42:08 -0700 | [diff] [blame] | 268 | flush_dcache_page(page); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 269 | goto retry; |
| 270 | } else |
| 271 | BUG_ON(page); |
| 272 | |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 273 | if (!err) { |
| 274 | dst_addr += PAGE_SIZE; |
| 275 | src_addr += PAGE_SIZE; |
| 276 | copied += PAGE_SIZE; |
| 277 | |
| 278 | if (fatal_signal_pending(current)) |
| 279 | err = -EINTR; |
| 280 | } |
| 281 | if (err) |
| 282 | break; |
| 283 | } |
| 284 | |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 285 | out_unlock: |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 286 | up_read(&dst_mm->mmap_sem); |
Andrea Arcangeli | b6ebaed | 2015-09-04 15:47:08 -0700 | [diff] [blame] | 287 | out: |
| 288 | if (page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 289 | put_page(page); |
Andrea Arcangeli | c1a4de9 | 2015-09-04 15:47:04 -0700 | [diff] [blame] | 290 | BUG_ON(copied < 0); |
| 291 | BUG_ON(err > 0); |
| 292 | BUG_ON(!copied && !err); |
| 293 | return copied ? copied : err; |
| 294 | } |
| 295 | |
| 296 | ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, |
| 297 | unsigned long src_start, unsigned long len) |
| 298 | { |
| 299 | return __mcopy_atomic(dst_mm, dst_start, src_start, len, false); |
| 300 | } |
| 301 | |
| 302 | ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, |
| 303 | unsigned long len) |
| 304 | { |
| 305 | return __mcopy_atomic(dst_mm, start, 0, len, true); |
| 306 | } |