Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1 | #include <linux/kernel.h> |
| 2 | #include <linux/errno.h> |
| 3 | #include <linux/err.h> |
| 4 | #include <linux/spinlock.h> |
| 5 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 6 | #include <linux/mm.h> |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 7 | #include <linux/memremap.h> |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 8 | #include <linux/pagemap.h> |
| 9 | #include <linux/rmap.h> |
| 10 | #include <linux/swap.h> |
| 11 | #include <linux/swapops.h> |
| 12 | |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 13 | #include <linux/sched/signal.h> |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 14 | #include <linux/rwsem.h> |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 15 | #include <linux/hugetlb.h> |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 16 | #include <linux/migrate.h> |
| 17 | #include <linux/mm_inline.h> |
| 18 | #include <linux/sched/mm.h> |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 19 | |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 20 | #include <asm/mmu_context.h> |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 21 | #include <asm/pgtable.h> |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 22 | #include <asm/tlbflush.h> |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 23 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 24 | #include "internal.h" |
| 25 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 26 | struct follow_page_context { |
| 27 | struct dev_pagemap *pgmap; |
| 28 | unsigned int page_mask; |
| 29 | }; |
| 30 | |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 31 | static struct page *no_page_table(struct vm_area_struct *vma, |
| 32 | unsigned int flags) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 33 | { |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 34 | /* |
| 35 | * When core dumping an enormous anonymous area that nobody |
| 36 | * has touched so far, we don't want to allocate unnecessary pages or |
| 37 | * page tables. Return error instead of NULL to skip handle_mm_fault, |
| 38 | * then get_dump_page() will return NULL to leave a hole in the dump. |
| 39 | * But we can only make this optimization where a hole would surely |
| 40 | * be zero-filled if handle_mm_fault() actually did handle it. |
| 41 | */ |
| 42 | if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) |
| 43 | return ERR_PTR(-EFAULT); |
| 44 | return NULL; |
| 45 | } |
| 46 | |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 47 | static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, |
| 48 | pte_t *pte, unsigned int flags) |
| 49 | { |
| 50 | /* No page to get reference */ |
| 51 | if (flags & FOLL_GET) |
| 52 | return -EFAULT; |
| 53 | |
| 54 | if (flags & FOLL_TOUCH) { |
| 55 | pte_t entry = *pte; |
| 56 | |
| 57 | if (flags & FOLL_WRITE) |
| 58 | entry = pte_mkdirty(entry); |
| 59 | entry = pte_mkyoung(entry); |
| 60 | |
| 61 | if (!pte_same(*pte, entry)) { |
| 62 | set_pte_at(vma->vm_mm, address, pte, entry); |
| 63 | update_mmu_cache(vma, address, pte); |
| 64 | } |
| 65 | } |
| 66 | |
| 67 | /* Proper page table entry exists, but no corresponding struct page */ |
| 68 | return -EEXIST; |
| 69 | } |
| 70 | |
Linus Torvalds | 19be0ea | 2016-10-13 13:07:36 -0700 | [diff] [blame] | 71 | /* |
| 72 | * FOLL_FORCE can write to even unwritable pte's, but only |
| 73 | * after we've gone through a COW cycle and they are dirty. |
| 74 | */ |
| 75 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) |
| 76 | { |
Linus Torvalds | f6f3732 | 2017-12-15 18:53:22 -0800 | [diff] [blame] | 77 | return pte_write(pte) || |
Linus Torvalds | 19be0ea | 2016-10-13 13:07:36 -0700 | [diff] [blame] | 78 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); |
| 79 | } |
| 80 | |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 81 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 82 | unsigned long address, pmd_t *pmd, unsigned int flags, |
| 83 | struct dev_pagemap **pgmap) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 84 | { |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 85 | struct mm_struct *mm = vma->vm_mm; |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 86 | struct page *page; |
| 87 | spinlock_t *ptl; |
| 88 | pte_t *ptep, pte; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 89 | |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 90 | retry: |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 91 | if (unlikely(pmd_bad(*pmd))) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 92 | return no_page_table(vma, flags); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 93 | |
| 94 | ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 95 | pte = *ptep; |
| 96 | if (!pte_present(pte)) { |
| 97 | swp_entry_t entry; |
| 98 | /* |
| 99 | * KSM's break_ksm() relies upon recognizing a ksm page |
| 100 | * even while it is being migrated, so for that case we |
| 101 | * need migration_entry_wait(). |
| 102 | */ |
| 103 | if (likely(!(flags & FOLL_MIGRATION))) |
| 104 | goto no_page; |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 105 | if (pte_none(pte)) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 106 | goto no_page; |
| 107 | entry = pte_to_swp_entry(pte); |
| 108 | if (!is_migration_entry(entry)) |
| 109 | goto no_page; |
| 110 | pte_unmap_unlock(ptep, ptl); |
| 111 | migration_entry_wait(mm, pmd, address); |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 112 | goto retry; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 113 | } |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 114 | if ((flags & FOLL_NUMA) && pte_protnone(pte)) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 115 | goto no_page; |
Linus Torvalds | 19be0ea | 2016-10-13 13:07:36 -0700 | [diff] [blame] | 116 | if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 117 | pte_unmap_unlock(ptep, ptl); |
| 118 | return NULL; |
| 119 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 120 | |
| 121 | page = vm_normal_page(vma, address, pte); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 122 | if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { |
| 123 | /* |
| 124 | * Only return device mapping pages in the FOLL_GET case since |
| 125 | * they are only valid while holding the pgmap reference. |
| 126 | */ |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 127 | *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); |
| 128 | if (*pgmap) |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 129 | page = pte_page(pte); |
| 130 | else |
| 131 | goto no_page; |
| 132 | } else if (unlikely(!page)) { |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 133 | if (flags & FOLL_DUMP) { |
| 134 | /* Avoid special (like zero) pages in core dumps */ |
| 135 | page = ERR_PTR(-EFAULT); |
| 136 | goto out; |
| 137 | } |
| 138 | |
| 139 | if (is_zero_pfn(pte_pfn(pte))) { |
| 140 | page = pte_page(pte); |
| 141 | } else { |
| 142 | int ret; |
| 143 | |
| 144 | ret = follow_pfn_pte(vma, address, ptep, flags); |
| 145 | page = ERR_PTR(ret); |
| 146 | goto out; |
| 147 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 150 | if (flags & FOLL_SPLIT && PageTransCompound(page)) { |
| 151 | int ret; |
| 152 | get_page(page); |
| 153 | pte_unmap_unlock(ptep, ptl); |
| 154 | lock_page(page); |
| 155 | ret = split_huge_page(page); |
| 156 | unlock_page(page); |
| 157 | put_page(page); |
| 158 | if (ret) |
| 159 | return ERR_PTR(ret); |
| 160 | goto retry; |
| 161 | } |
| 162 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 163 | if (flags & FOLL_GET) |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 164 | get_page(page); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 165 | if (flags & FOLL_TOUCH) { |
| 166 | if ((flags & FOLL_WRITE) && |
| 167 | !pte_dirty(pte) && !PageDirty(page)) |
| 168 | set_page_dirty(page); |
| 169 | /* |
| 170 | * pte_mkyoung() would be more correct here, but atomic care |
| 171 | * is needed to avoid losing the dirty bit: it is easier to use |
| 172 | * mark_page_accessed(). |
| 173 | */ |
| 174 | mark_page_accessed(page); |
| 175 | } |
Eric B Munson | de60f5f | 2015-11-05 18:51:36 -0800 | [diff] [blame] | 176 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 177 | /* Do not mlock pte-mapped THP */ |
| 178 | if (PageTransCompound(page)) |
| 179 | goto out; |
| 180 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 181 | /* |
| 182 | * The preliminary mapping check is mainly to avoid the |
| 183 | * pointless overhead of lock_page on the ZERO_PAGE |
| 184 | * which might bounce very badly if there is contention. |
| 185 | * |
| 186 | * If the page is already locked, we don't need to |
| 187 | * handle it now - vmscan will handle it later if and |
| 188 | * when it attempts to reclaim the page. |
| 189 | */ |
| 190 | if (page->mapping && trylock_page(page)) { |
| 191 | lru_add_drain(); /* push cached pages to LRU */ |
| 192 | /* |
| 193 | * Because we lock page here, and migration is |
| 194 | * blocked by the pte's page reference, and we |
| 195 | * know the page is still mapped, we don't even |
| 196 | * need to check for file-cache page truncation. |
| 197 | */ |
| 198 | mlock_vma_page(page); |
| 199 | unlock_page(page); |
| 200 | } |
| 201 | } |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 202 | out: |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 203 | pte_unmap_unlock(ptep, ptl); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 204 | return page; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 205 | no_page: |
| 206 | pte_unmap_unlock(ptep, ptl); |
| 207 | if (!pte_none(pte)) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 208 | return NULL; |
| 209 | return no_page_table(vma, flags); |
| 210 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 211 | |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 212 | static struct page *follow_pmd_mask(struct vm_area_struct *vma, |
| 213 | unsigned long address, pud_t *pudp, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 214 | unsigned int flags, |
| 215 | struct follow_page_context *ctx) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 216 | { |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 217 | pmd_t *pmd, pmdval; |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 218 | spinlock_t *ptl; |
| 219 | struct page *page; |
| 220 | struct mm_struct *mm = vma->vm_mm; |
| 221 | |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 222 | pmd = pmd_offset(pudp, address); |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 223 | /* |
| 224 | * The READ_ONCE() will stabilize the pmdval in a register or |
| 225 | * on the stack so that it will stop changing under the code. |
| 226 | */ |
| 227 | pmdval = READ_ONCE(*pmd); |
| 228 | if (pmd_none(pmdval)) |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 229 | return no_page_table(vma, flags); |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 230 | if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 231 | page = follow_huge_pmd(mm, address, pmd, flags); |
| 232 | if (page) |
| 233 | return page; |
| 234 | return no_page_table(vma, flags); |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 235 | } |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 236 | if (is_hugepd(__hugepd(pmd_val(pmdval)))) { |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 237 | page = follow_huge_pd(vma, address, |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 238 | __hugepd(pmd_val(pmdval)), flags, |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 239 | PMD_SHIFT); |
| 240 | if (page) |
| 241 | return page; |
| 242 | return no_page_table(vma, flags); |
| 243 | } |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 244 | retry: |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 245 | if (!pmd_present(pmdval)) { |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 246 | if (likely(!(flags & FOLL_MIGRATION))) |
| 247 | return no_page_table(vma, flags); |
| 248 | VM_BUG_ON(thp_migration_supported() && |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 249 | !is_pmd_migration_entry(pmdval)); |
| 250 | if (is_pmd_migration_entry(pmdval)) |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 251 | pmd_migration_entry_wait(mm, pmd); |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 252 | pmdval = READ_ONCE(*pmd); |
| 253 | /* |
| 254 | * MADV_DONTNEED may convert the pmd to null because |
| 255 | * mmap_sem is held in read mode |
| 256 | */ |
| 257 | if (pmd_none(pmdval)) |
| 258 | return no_page_table(vma, flags); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 259 | goto retry; |
| 260 | } |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 261 | if (pmd_devmap(pmdval)) { |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 262 | ptl = pmd_lock(mm, pmd); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 263 | page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); |
Dan Williams | 3565fce | 2016-01-15 16:56:55 -0800 | [diff] [blame] | 264 | spin_unlock(ptl); |
| 265 | if (page) |
| 266 | return page; |
| 267 | } |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 268 | if (likely(!pmd_trans_huge(pmdval))) |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 269 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 270 | |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 271 | if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) |
Aneesh Kumar K.V | db08f20 | 2017-02-24 14:59:53 -0800 | [diff] [blame] | 272 | return no_page_table(vma, flags); |
| 273 | |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 274 | retry_locked: |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 275 | ptl = pmd_lock(mm, pmd); |
Huang Ying | 6882728 | 2018-06-07 17:06:34 -0700 | [diff] [blame] | 276 | if (unlikely(pmd_none(*pmd))) { |
| 277 | spin_unlock(ptl); |
| 278 | return no_page_table(vma, flags); |
| 279 | } |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 280 | if (unlikely(!pmd_present(*pmd))) { |
| 281 | spin_unlock(ptl); |
| 282 | if (likely(!(flags & FOLL_MIGRATION))) |
| 283 | return no_page_table(vma, flags); |
| 284 | pmd_migration_entry_wait(mm, pmd); |
| 285 | goto retry_locked; |
| 286 | } |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 287 | if (unlikely(!pmd_trans_huge(*pmd))) { |
| 288 | spin_unlock(ptl); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 289 | return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
Kirill A. Shutemov | 69e68b4 | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 290 | } |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 291 | if (flags & FOLL_SPLIT) { |
| 292 | int ret; |
| 293 | page = pmd_page(*pmd); |
| 294 | if (is_huge_zero_page(page)) { |
| 295 | spin_unlock(ptl); |
| 296 | ret = 0; |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 297 | split_huge_pmd(vma, pmd, address); |
Naoya Horiguchi | 337d9ab | 2016-07-26 15:24:03 -0700 | [diff] [blame] | 298 | if (pmd_trans_unstable(pmd)) |
| 299 | ret = -EBUSY; |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 300 | } else { |
| 301 | get_page(page); |
| 302 | spin_unlock(ptl); |
| 303 | lock_page(page); |
| 304 | ret = split_huge_page(page); |
| 305 | unlock_page(page); |
| 306 | put_page(page); |
Kirill A. Shutemov | baa355f | 2016-07-26 15:25:51 -0700 | [diff] [blame] | 307 | if (pmd_none(*pmd)) |
| 308 | return no_page_table(vma, flags); |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 309 | } |
| 310 | |
| 311 | return ret ? ERR_PTR(ret) : |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 312 | follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 313 | } |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 314 | page = follow_trans_huge_pmd(vma, address, pmd, flags); |
| 315 | spin_unlock(ptl); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 316 | ctx->page_mask = HPAGE_PMD_NR - 1; |
Kirill A. Shutemov | 6742d29 | 2016-01-15 16:52:28 -0800 | [diff] [blame] | 317 | return page; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 318 | } |
| 319 | |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 320 | static struct page *follow_pud_mask(struct vm_area_struct *vma, |
| 321 | unsigned long address, p4d_t *p4dp, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 322 | unsigned int flags, |
| 323 | struct follow_page_context *ctx) |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 324 | { |
| 325 | pud_t *pud; |
| 326 | spinlock_t *ptl; |
| 327 | struct page *page; |
| 328 | struct mm_struct *mm = vma->vm_mm; |
| 329 | |
| 330 | pud = pud_offset(p4dp, address); |
| 331 | if (pud_none(*pud)) |
| 332 | return no_page_table(vma, flags); |
| 333 | if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { |
| 334 | page = follow_huge_pud(mm, address, pud, flags); |
| 335 | if (page) |
| 336 | return page; |
| 337 | return no_page_table(vma, flags); |
| 338 | } |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 339 | if (is_hugepd(__hugepd(pud_val(*pud)))) { |
| 340 | page = follow_huge_pd(vma, address, |
| 341 | __hugepd(pud_val(*pud)), flags, |
| 342 | PUD_SHIFT); |
| 343 | if (page) |
| 344 | return page; |
| 345 | return no_page_table(vma, flags); |
| 346 | } |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 347 | if (pud_devmap(*pud)) { |
| 348 | ptl = pud_lock(mm, pud); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 349 | page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 350 | spin_unlock(ptl); |
| 351 | if (page) |
| 352 | return page; |
| 353 | } |
| 354 | if (unlikely(pud_bad(*pud))) |
| 355 | return no_page_table(vma, flags); |
| 356 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 357 | return follow_pmd_mask(vma, address, pud, flags, ctx); |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 358 | } |
| 359 | |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 360 | static struct page *follow_p4d_mask(struct vm_area_struct *vma, |
| 361 | unsigned long address, pgd_t *pgdp, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 362 | unsigned int flags, |
| 363 | struct follow_page_context *ctx) |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 364 | { |
| 365 | p4d_t *p4d; |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 366 | struct page *page; |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 367 | |
| 368 | p4d = p4d_offset(pgdp, address); |
| 369 | if (p4d_none(*p4d)) |
| 370 | return no_page_table(vma, flags); |
| 371 | BUILD_BUG_ON(p4d_huge(*p4d)); |
| 372 | if (unlikely(p4d_bad(*p4d))) |
| 373 | return no_page_table(vma, flags); |
| 374 | |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 375 | if (is_hugepd(__hugepd(p4d_val(*p4d)))) { |
| 376 | page = follow_huge_pd(vma, address, |
| 377 | __hugepd(p4d_val(*p4d)), flags, |
| 378 | P4D_SHIFT); |
| 379 | if (page) |
| 380 | return page; |
| 381 | return no_page_table(vma, flags); |
| 382 | } |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 383 | return follow_pud_mask(vma, address, p4d, flags, ctx); |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 384 | } |
| 385 | |
| 386 | /** |
| 387 | * follow_page_mask - look up a page descriptor from a user-virtual address |
| 388 | * @vma: vm_area_struct mapping @address |
| 389 | * @address: virtual address to look up |
| 390 | * @flags: flags modifying lookup behaviour |
Mike Rapoport | 7817955 | 2018-11-16 15:08:29 -0800 | [diff] [blame] | 391 | * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a |
| 392 | * pointer to output page_mask |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 393 | * |
| 394 | * @flags can have FOLL_ flags set, defined in <linux/mm.h> |
| 395 | * |
Mike Rapoport | 7817955 | 2018-11-16 15:08:29 -0800 | [diff] [blame] | 396 | * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches |
| 397 | * the device's dev_pagemap metadata to avoid repeating expensive lookups. |
| 398 | * |
| 399 | * On output, the @ctx->page_mask is set according to the size of the page. |
| 400 | * |
| 401 | * Return: the mapped (struct page *), %NULL if no mapping exists, or |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 402 | * an error pointer if there is a mapping to something not represented |
| 403 | * by a page descriptor (see also vm_normal_page()). |
| 404 | */ |
| 405 | struct page *follow_page_mask(struct vm_area_struct *vma, |
| 406 | unsigned long address, unsigned int flags, |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 407 | struct follow_page_context *ctx) |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 408 | { |
| 409 | pgd_t *pgd; |
| 410 | struct page *page; |
| 411 | struct mm_struct *mm = vma->vm_mm; |
| 412 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 413 | ctx->page_mask = 0; |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 414 | |
| 415 | /* make this handle hugepd */ |
| 416 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); |
| 417 | if (!IS_ERR(page)) { |
| 418 | BUG_ON(flags & FOLL_GET); |
| 419 | return page; |
| 420 | } |
| 421 | |
| 422 | pgd = pgd_offset(mm, address); |
| 423 | |
| 424 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
| 425 | return no_page_table(vma, flags); |
| 426 | |
Anshuman Khandual | faaa5b6 | 2017-07-06 15:38:50 -0700 | [diff] [blame] | 427 | if (pgd_huge(*pgd)) { |
| 428 | page = follow_huge_pgd(mm, address, pgd, flags); |
| 429 | if (page) |
| 430 | return page; |
| 431 | return no_page_table(vma, flags); |
| 432 | } |
Aneesh Kumar K.V | 4dc7145 | 2017-07-06 15:38:56 -0700 | [diff] [blame] | 433 | if (is_hugepd(__hugepd(pgd_val(*pgd)))) { |
| 434 | page = follow_huge_pd(vma, address, |
| 435 | __hugepd(pgd_val(*pgd)), flags, |
| 436 | PGDIR_SHIFT); |
| 437 | if (page) |
| 438 | return page; |
| 439 | return no_page_table(vma, flags); |
| 440 | } |
Anshuman Khandual | faaa5b6 | 2017-07-06 15:38:50 -0700 | [diff] [blame] | 441 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 442 | return follow_p4d_mask(vma, address, pgd, flags, ctx); |
| 443 | } |
| 444 | |
| 445 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
| 446 | unsigned int foll_flags) |
| 447 | { |
| 448 | struct follow_page_context ctx = { NULL }; |
| 449 | struct page *page; |
| 450 | |
| 451 | page = follow_page_mask(vma, address, foll_flags, &ctx); |
| 452 | if (ctx.pgmap) |
| 453 | put_dev_pagemap(ctx.pgmap); |
| 454 | return page; |
Aneesh Kumar K.V | 080dbb6 | 2017-07-06 15:38:44 -0700 | [diff] [blame] | 455 | } |
| 456 | |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 457 | static int get_gate_page(struct mm_struct *mm, unsigned long address, |
| 458 | unsigned int gup_flags, struct vm_area_struct **vma, |
| 459 | struct page **page) |
| 460 | { |
| 461 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 462 | p4d_t *p4d; |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 463 | pud_t *pud; |
| 464 | pmd_t *pmd; |
| 465 | pte_t *pte; |
| 466 | int ret = -EFAULT; |
| 467 | |
| 468 | /* user gate pages are read-only */ |
| 469 | if (gup_flags & FOLL_WRITE) |
| 470 | return -EFAULT; |
| 471 | if (address > TASK_SIZE) |
| 472 | pgd = pgd_offset_k(address); |
| 473 | else |
| 474 | pgd = pgd_offset_gate(mm, address); |
| 475 | BUG_ON(pgd_none(*pgd)); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 476 | p4d = p4d_offset(pgd, address); |
| 477 | BUG_ON(p4d_none(*p4d)); |
| 478 | pud = pud_offset(p4d, address); |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 479 | BUG_ON(pud_none(*pud)); |
| 480 | pmd = pmd_offset(pud, address); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 481 | if (!pmd_present(*pmd)) |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 482 | return -EFAULT; |
| 483 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
| 484 | pte = pte_offset_map(pmd, address); |
| 485 | if (pte_none(*pte)) |
| 486 | goto unmap; |
| 487 | *vma = get_gate_vma(mm); |
| 488 | if (!page) |
| 489 | goto out; |
| 490 | *page = vm_normal_page(*vma, address, *pte); |
| 491 | if (!*page) { |
| 492 | if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) |
| 493 | goto unmap; |
| 494 | *page = pte_page(*pte); |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 495 | |
| 496 | /* |
| 497 | * This should never happen (a device public page in the gate |
| 498 | * area). |
| 499 | */ |
| 500 | if (is_device_public_page(*page)) |
| 501 | goto unmap; |
Kirill A. Shutemov | f2b495c | 2014-06-04 16:08:11 -0700 | [diff] [blame] | 502 | } |
| 503 | get_page(*page); |
| 504 | out: |
| 505 | ret = 0; |
| 506 | unmap: |
| 507 | pte_unmap(pte); |
| 508 | return ret; |
| 509 | } |
| 510 | |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 511 | /* |
| 512 | * mmap_sem must be held on entry. If @nonblocking != NULL and |
| 513 | * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. |
| 514 | * If it is, *@nonblocking will be set to 0 and -EBUSY returned. |
| 515 | */ |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 516 | static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, |
| 517 | unsigned long address, unsigned int *flags, int *nonblocking) |
| 518 | { |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 519 | unsigned int fault_flags = 0; |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 520 | vm_fault_t ret; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 521 | |
Eric B Munson | de60f5f | 2015-11-05 18:51:36 -0800 | [diff] [blame] | 522 | /* mlock all present pages, but do not fault in new pages */ |
| 523 | if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) |
| 524 | return -ENOENT; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 525 | if (*flags & FOLL_WRITE) |
| 526 | fault_flags |= FAULT_FLAG_WRITE; |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 527 | if (*flags & FOLL_REMOTE) |
| 528 | fault_flags |= FAULT_FLAG_REMOTE; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 529 | if (nonblocking) |
| 530 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; |
| 531 | if (*flags & FOLL_NOWAIT) |
| 532 | fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; |
Andres Lagar-Cavilla | 234b239 | 2014-09-17 10:51:48 -0700 | [diff] [blame] | 533 | if (*flags & FOLL_TRIED) { |
| 534 | VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); |
| 535 | fault_flags |= FAULT_FLAG_TRIED; |
| 536 | } |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 537 | |
Kirill A. Shutemov | dcddffd | 2016-07-26 15:25:18 -0700 | [diff] [blame] | 538 | ret = handle_mm_fault(vma, address, fault_flags); |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 539 | if (ret & VM_FAULT_ERROR) { |
James Morse | 9a291a7 | 2017-06-02 14:46:46 -0700 | [diff] [blame] | 540 | int err = vm_fault_to_errno(ret, *flags); |
| 541 | |
| 542 | if (err) |
| 543 | return err; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 544 | BUG(); |
| 545 | } |
| 546 | |
| 547 | if (tsk) { |
| 548 | if (ret & VM_FAULT_MAJOR) |
| 549 | tsk->maj_flt++; |
| 550 | else |
| 551 | tsk->min_flt++; |
| 552 | } |
| 553 | |
| 554 | if (ret & VM_FAULT_RETRY) { |
Andrea Arcangeli | 96312e6 | 2018-03-09 15:51:06 -0800 | [diff] [blame] | 555 | if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 556 | *nonblocking = 0; |
| 557 | return -EBUSY; |
| 558 | } |
| 559 | |
| 560 | /* |
| 561 | * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when |
| 562 | * necessary, even if maybe_mkwrite decided not to set pte_write. We |
| 563 | * can thus safely do subsequent page lookups as if they were reads. |
| 564 | * But only do so when looping for pte_write is futile: in some cases |
| 565 | * userspace may also be wanting to write to the gotten user page, |
| 566 | * which a read fault here might prevent (a readonly page might get |
| 567 | * reCOWed by userspace write). |
| 568 | */ |
| 569 | if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) |
Mario Leinweber | 2923117 | 2018-04-05 16:24:18 -0700 | [diff] [blame] | 570 | *flags |= FOLL_COW; |
Kirill A. Shutemov | 1674448 | 2014-06-04 16:08:12 -0700 | [diff] [blame] | 571 | return 0; |
| 572 | } |
| 573 | |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 574 | static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) |
| 575 | { |
| 576 | vm_flags_t vm_flags = vma->vm_flags; |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 577 | int write = (gup_flags & FOLL_WRITE); |
| 578 | int foreign = (gup_flags & FOLL_REMOTE); |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 579 | |
| 580 | if (vm_flags & (VM_IO | VM_PFNMAP)) |
| 581 | return -EFAULT; |
| 582 | |
Willy Tarreau | 7f7ccc2 | 2018-05-11 08:11:44 +0200 | [diff] [blame] | 583 | if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) |
| 584 | return -EFAULT; |
| 585 | |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 586 | if (write) { |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 587 | if (!(vm_flags & VM_WRITE)) { |
| 588 | if (!(gup_flags & FOLL_FORCE)) |
| 589 | return -EFAULT; |
| 590 | /* |
| 591 | * We used to let the write,force case do COW in a |
| 592 | * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could |
| 593 | * set a breakpoint in a read-only mapping of an |
| 594 | * executable, without corrupting the file (yet only |
| 595 | * when that file had been opened for writing!). |
| 596 | * Anon pages in shared mappings are surprising: now |
| 597 | * just reject it. |
| 598 | */ |
Hugh Dickins | 4643536 | 2016-01-30 18:03:16 -0800 | [diff] [blame] | 599 | if (!is_cow_mapping(vm_flags)) |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 600 | return -EFAULT; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 601 | } |
| 602 | } else if (!(vm_flags & VM_READ)) { |
| 603 | if (!(gup_flags & FOLL_FORCE)) |
| 604 | return -EFAULT; |
| 605 | /* |
| 606 | * Is there actually any vma we can reach here which does not |
| 607 | * have VM_MAYREAD set? |
| 608 | */ |
| 609 | if (!(vm_flags & VM_MAYREAD)) |
| 610 | return -EFAULT; |
| 611 | } |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 612 | /* |
| 613 | * gups are always data accesses, not instruction |
| 614 | * fetches, so execute=false here |
| 615 | */ |
| 616 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 617 | return -EFAULT; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 618 | return 0; |
| 619 | } |
| 620 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 621 | /** |
| 622 | * __get_user_pages() - pin user pages in memory |
| 623 | * @tsk: task_struct of target task |
| 624 | * @mm: mm_struct of target mm |
| 625 | * @start: starting user address |
| 626 | * @nr_pages: number of pages from start to pin |
| 627 | * @gup_flags: flags modifying pin behaviour |
| 628 | * @pages: array that receives pointers to the pages pinned. |
| 629 | * Should be at least nr_pages long. Or NULL, if caller |
| 630 | * only intends to ensure the pages are faulted in. |
| 631 | * @vmas: array of pointers to vmas corresponding to each page. |
| 632 | * Or NULL if the caller does not require them. |
| 633 | * @nonblocking: whether waiting for disk IO or mmap_sem contention |
| 634 | * |
| 635 | * Returns number of pages pinned. This may be fewer than the number |
| 636 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
| 637 | * were pinned, returns -errno. Each page returned must be released |
| 638 | * with a put_page() call when it is finished with. vmas will only |
| 639 | * remain valid while mmap_sem is held. |
| 640 | * |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 641 | * Must be called with mmap_sem held. It may be released. See below. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 642 | * |
| 643 | * __get_user_pages walks a process's page tables and takes a reference to |
| 644 | * each struct page that each user address corresponds to at a given |
| 645 | * instant. That is, it takes the page that would be accessed if a user |
| 646 | * thread accesses the given user virtual address at that instant. |
| 647 | * |
| 648 | * This does not guarantee that the page exists in the user mappings when |
| 649 | * __get_user_pages returns, and there may even be a completely different |
| 650 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
| 651 | * and subsequently re faulted). However it does guarantee that the page |
| 652 | * won't be freed completely. And mostly callers simply care that the page |
| 653 | * contains data that was valid *at some point in time*. Typically, an IO |
| 654 | * or similar operation cannot guarantee anything stronger anyway because |
| 655 | * locks can't be held over the syscall boundary. |
| 656 | * |
| 657 | * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If |
| 658 | * the page is written to, set_page_dirty (or set_page_dirty_lock, as |
| 659 | * appropriate) must be called after the page is finished with, and |
| 660 | * before put_page is called. |
| 661 | * |
| 662 | * If @nonblocking != NULL, __get_user_pages will not wait for disk IO |
| 663 | * or mmap_sem contention, and if waiting is needed to pin all pages, |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 664 | * *@nonblocking will be set to 0. Further, if @gup_flags does not |
| 665 | * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in |
| 666 | * this case. |
| 667 | * |
| 668 | * A caller using such a combination of @nonblocking and @gup_flags |
| 669 | * must therefore hold the mmap_sem for reading only, and recognize |
| 670 | * when it's been released. Otherwise, it must be held for either |
| 671 | * reading or writing and will not be released. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 672 | * |
| 673 | * In most cases, get_user_pages or get_user_pages_fast should be used |
| 674 | * instead of __get_user_pages. __get_user_pages should be used only if |
| 675 | * you need some special @gup_flags. |
| 676 | */ |
Lorenzo Stoakes | 0d73175 | 2016-10-24 10:57:25 +0100 | [diff] [blame] | 677 | static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 678 | unsigned long start, unsigned long nr_pages, |
| 679 | unsigned int gup_flags, struct page **pages, |
| 680 | struct vm_area_struct **vmas, int *nonblocking) |
| 681 | { |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 682 | long ret = 0, i = 0; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 683 | struct vm_area_struct *vma = NULL; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 684 | struct follow_page_context ctx = { NULL }; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 685 | |
| 686 | if (!nr_pages) |
| 687 | return 0; |
| 688 | |
| 689 | VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); |
| 690 | |
| 691 | /* |
| 692 | * If FOLL_FORCE is set then do not force a full fault as the hinting |
| 693 | * fault information is unrelated to the reference behaviour of a task |
| 694 | * using the address space |
| 695 | */ |
| 696 | if (!(gup_flags & FOLL_FORCE)) |
| 697 | gup_flags |= FOLL_NUMA; |
| 698 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 699 | do { |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 700 | struct page *page; |
| 701 | unsigned int foll_flags = gup_flags; |
| 702 | unsigned int page_increm; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 703 | |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 704 | /* first iteration or cross vma bound */ |
| 705 | if (!vma || start >= vma->vm_end) { |
| 706 | vma = find_extend_vma(mm, start); |
| 707 | if (!vma && in_gate_area(mm, start)) { |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 708 | ret = get_gate_page(mm, start & PAGE_MASK, |
| 709 | gup_flags, &vma, |
| 710 | pages ? &pages[i] : NULL); |
| 711 | if (ret) |
John Hubbard | 08be37b | 2018-11-30 14:08:53 -0800 | [diff] [blame] | 712 | goto out; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 713 | ctx.page_mask = 0; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 714 | goto next_page; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 715 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 716 | |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 717 | if (!vma || check_vma_flags(vma, gup_flags)) { |
| 718 | ret = -EFAULT; |
| 719 | goto out; |
| 720 | } |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 721 | if (is_vm_hugetlb_page(vma)) { |
| 722 | i = follow_hugetlb_page(mm, vma, pages, vmas, |
| 723 | &start, &nr_pages, i, |
Andrea Arcangeli | 87ffc11 | 2017-02-22 15:43:13 -0800 | [diff] [blame] | 724 | gup_flags, nonblocking); |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 725 | continue; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 726 | } |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 727 | } |
| 728 | retry: |
| 729 | /* |
| 730 | * If we have a pending SIGKILL, don't keep faulting pages and |
| 731 | * potentially allocating memory. |
| 732 | */ |
Davidlohr Bueso | fa45f11 | 2019-01-03 15:28:55 -0800 | [diff] [blame] | 733 | if (fatal_signal_pending(current)) { |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 734 | ret = -ERESTARTSYS; |
| 735 | goto out; |
| 736 | } |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 737 | cond_resched(); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 738 | |
| 739 | page = follow_page_mask(vma, start, foll_flags, &ctx); |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 740 | if (!page) { |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 741 | ret = faultin_page(tsk, vma, start, &foll_flags, |
| 742 | nonblocking); |
| 743 | switch (ret) { |
| 744 | case 0: |
| 745 | goto retry; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 746 | case -EBUSY: |
| 747 | ret = 0; |
| 748 | /* FALLTHRU */ |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 749 | case -EFAULT: |
| 750 | case -ENOMEM: |
| 751 | case -EHWPOISON: |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 752 | goto out; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 753 | case -ENOENT: |
| 754 | goto next_page; |
| 755 | } |
| 756 | BUG(); |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 757 | } else if (PTR_ERR(page) == -EEXIST) { |
| 758 | /* |
| 759 | * Proper page table entry exists, but no corresponding |
| 760 | * struct page. |
| 761 | */ |
| 762 | goto next_page; |
| 763 | } else if (IS_ERR(page)) { |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 764 | ret = PTR_ERR(page); |
| 765 | goto out; |
Kirill A. Shutemov | 1027e44 | 2015-09-04 15:47:55 -0700 | [diff] [blame] | 766 | } |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 767 | if (pages) { |
| 768 | pages[i] = page; |
| 769 | flush_anon_page(vma, page, start); |
| 770 | flush_dcache_page(page); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 771 | ctx.page_mask = 0; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 772 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 773 | next_page: |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 774 | if (vmas) { |
| 775 | vmas[i] = vma; |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 776 | ctx.page_mask = 0; |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 777 | } |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 778 | page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); |
Kirill A. Shutemov | fa5bb20 | 2014-06-04 16:08:13 -0700 | [diff] [blame] | 779 | if (page_increm > nr_pages) |
| 780 | page_increm = nr_pages; |
| 781 | i += page_increm; |
| 782 | start += page_increm * PAGE_SIZE; |
| 783 | nr_pages -= page_increm; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 784 | } while (nr_pages); |
Keith Busch | df06b37 | 2018-10-26 15:10:28 -0700 | [diff] [blame] | 785 | out: |
| 786 | if (ctx.pgmap) |
| 787 | put_dev_pagemap(ctx.pgmap); |
| 788 | return i ? i : ret; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 789 | } |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 790 | |
Tobias Klauser | 771ab43 | 2016-12-12 16:41:53 -0800 | [diff] [blame] | 791 | static bool vma_permits_fault(struct vm_area_struct *vma, |
| 792 | unsigned int fault_flags) |
Dave Hansen | d4925e0 | 2016-02-12 13:02:16 -0800 | [diff] [blame] | 793 | { |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 794 | bool write = !!(fault_flags & FAULT_FLAG_WRITE); |
| 795 | bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 796 | vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; |
Dave Hansen | d4925e0 | 2016-02-12 13:02:16 -0800 | [diff] [blame] | 797 | |
| 798 | if (!(vm_flags & vma->vm_flags)) |
| 799 | return false; |
| 800 | |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 801 | /* |
| 802 | * The architecture might have a hardware protection |
Dave Hansen | 1b2ee12 | 2016-02-12 13:02:21 -0800 | [diff] [blame] | 803 | * mechanism other than read/write that can deny access. |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 804 | * |
| 805 | * gup always represents data access, not instruction |
| 806 | * fetches, so execute=false here: |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 807 | */ |
Dave Hansen | d61172b | 2016-02-12 13:02:24 -0800 | [diff] [blame] | 808 | if (!arch_vma_access_permitted(vma, write, false, foreign)) |
Dave Hansen | 33a709b | 2016-02-12 13:02:19 -0800 | [diff] [blame] | 809 | return false; |
| 810 | |
Dave Hansen | d4925e0 | 2016-02-12 13:02:16 -0800 | [diff] [blame] | 811 | return true; |
| 812 | } |
| 813 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 814 | /* |
| 815 | * fixup_user_fault() - manually resolve a user page fault |
| 816 | * @tsk: the task_struct to use for page fault accounting, or |
| 817 | * NULL if faults are not to be recorded. |
| 818 | * @mm: mm_struct of target mm |
| 819 | * @address: user address |
| 820 | * @fault_flags:flags to pass down to handle_mm_fault() |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 821 | * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller |
| 822 | * does not allow retry |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 823 | * |
| 824 | * This is meant to be called in the specific scenario where for locking reasons |
| 825 | * we try to access user memory in atomic context (within a pagefault_disable() |
| 826 | * section), this returns -EFAULT, and we want to resolve the user fault before |
| 827 | * trying again. |
| 828 | * |
| 829 | * Typically this is meant to be used by the futex code. |
| 830 | * |
| 831 | * The main difference with get_user_pages() is that this function will |
| 832 | * unconditionally call handle_mm_fault() which will in turn perform all the |
| 833 | * necessary SW fixup of the dirty and young bits in the PTE, while |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 834 | * get_user_pages() only guarantees to update these in the struct page. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 835 | * |
| 836 | * This is important for some architectures where those bits also gate the |
| 837 | * access permission to the page because they are maintained in software. On |
| 838 | * such architectures, gup() will not be enough to make a subsequent access |
| 839 | * succeed. |
| 840 | * |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 841 | * This function will not return with an unlocked mmap_sem. So it has not the |
| 842 | * same semantics wrt the @mm->mmap_sem as does filemap_fault(). |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 843 | */ |
| 844 | int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 845 | unsigned long address, unsigned int fault_flags, |
| 846 | bool *unlocked) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 847 | { |
| 848 | struct vm_area_struct *vma; |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 849 | vm_fault_t ret, major = 0; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 850 | |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 851 | if (unlocked) |
| 852 | fault_flags |= FAULT_FLAG_ALLOW_RETRY; |
| 853 | |
| 854 | retry: |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 855 | vma = find_extend_vma(mm, address); |
| 856 | if (!vma || address < vma->vm_start) |
| 857 | return -EFAULT; |
| 858 | |
Dave Hansen | d4925e0 | 2016-02-12 13:02:16 -0800 | [diff] [blame] | 859 | if (!vma_permits_fault(vma, fault_flags)) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 860 | return -EFAULT; |
| 861 | |
Kirill A. Shutemov | dcddffd | 2016-07-26 15:25:18 -0700 | [diff] [blame] | 862 | ret = handle_mm_fault(vma, address, fault_flags); |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 863 | major |= ret & VM_FAULT_MAJOR; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 864 | if (ret & VM_FAULT_ERROR) { |
James Morse | 9a291a7 | 2017-06-02 14:46:46 -0700 | [diff] [blame] | 865 | int err = vm_fault_to_errno(ret, 0); |
| 866 | |
| 867 | if (err) |
| 868 | return err; |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 869 | BUG(); |
| 870 | } |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 871 | |
| 872 | if (ret & VM_FAULT_RETRY) { |
| 873 | down_read(&mm->mmap_sem); |
| 874 | if (!(fault_flags & FAULT_FLAG_TRIED)) { |
| 875 | *unlocked = true; |
| 876 | fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; |
| 877 | fault_flags |= FAULT_FLAG_TRIED; |
| 878 | goto retry; |
| 879 | } |
| 880 | } |
| 881 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 882 | if (tsk) { |
Dominik Dingel | 4a9e1cd | 2016-01-15 16:57:04 -0800 | [diff] [blame] | 883 | if (major) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 884 | tsk->maj_flt++; |
| 885 | else |
| 886 | tsk->min_flt++; |
| 887 | } |
| 888 | return 0; |
| 889 | } |
Paolo Bonzini | add6a0c | 2016-06-07 17:51:18 +0200 | [diff] [blame] | 890 | EXPORT_SYMBOL_GPL(fixup_user_fault); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 891 | |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 892 | static __always_inline long __get_user_pages_locked(struct task_struct *tsk, |
| 893 | struct mm_struct *mm, |
| 894 | unsigned long start, |
| 895 | unsigned long nr_pages, |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 896 | struct page **pages, |
| 897 | struct vm_area_struct **vmas, |
Al Viro | e716712 | 2017-11-19 11:32:05 -0500 | [diff] [blame] | 898 | int *locked, |
Andrea Arcangeli | 0fd71a5 | 2015-02-11 15:27:20 -0800 | [diff] [blame] | 899 | unsigned int flags) |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 900 | { |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 901 | long ret, pages_done; |
| 902 | bool lock_dropped; |
| 903 | |
| 904 | if (locked) { |
| 905 | /* if VM_FAULT_RETRY can be returned, vmas become invalid */ |
| 906 | BUG_ON(vmas); |
| 907 | /* check caller initialized locked */ |
| 908 | BUG_ON(*locked != 1); |
| 909 | } |
| 910 | |
| 911 | if (pages) |
| 912 | flags |= FOLL_GET; |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 913 | |
| 914 | pages_done = 0; |
| 915 | lock_dropped = false; |
| 916 | for (;;) { |
| 917 | ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, |
| 918 | vmas, locked); |
| 919 | if (!locked) |
| 920 | /* VM_FAULT_RETRY couldn't trigger, bypass */ |
| 921 | return ret; |
| 922 | |
| 923 | /* VM_FAULT_RETRY cannot return errors */ |
| 924 | if (!*locked) { |
| 925 | BUG_ON(ret < 0); |
| 926 | BUG_ON(ret >= nr_pages); |
| 927 | } |
| 928 | |
| 929 | if (!pages) |
| 930 | /* If it's a prefault don't insist harder */ |
| 931 | return ret; |
| 932 | |
| 933 | if (ret > 0) { |
| 934 | nr_pages -= ret; |
| 935 | pages_done += ret; |
| 936 | if (!nr_pages) |
| 937 | break; |
| 938 | } |
| 939 | if (*locked) { |
Andrea Arcangeli | 96312e6 | 2018-03-09 15:51:06 -0800 | [diff] [blame] | 940 | /* |
| 941 | * VM_FAULT_RETRY didn't trigger or it was a |
| 942 | * FOLL_NOWAIT. |
| 943 | */ |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 944 | if (!pages_done) |
| 945 | pages_done = ret; |
| 946 | break; |
| 947 | } |
| 948 | /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ |
| 949 | pages += ret; |
| 950 | start += ret << PAGE_SHIFT; |
| 951 | |
| 952 | /* |
| 953 | * Repeat on the address that fired VM_FAULT_RETRY |
| 954 | * without FAULT_FLAG_ALLOW_RETRY but with |
| 955 | * FAULT_FLAG_TRIED. |
| 956 | */ |
| 957 | *locked = 1; |
| 958 | lock_dropped = true; |
| 959 | down_read(&mm->mmap_sem); |
| 960 | ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, |
| 961 | pages, NULL, NULL); |
| 962 | if (ret != 1) { |
| 963 | BUG_ON(ret > 1); |
| 964 | if (!pages_done) |
| 965 | pages_done = ret; |
| 966 | break; |
| 967 | } |
| 968 | nr_pages--; |
| 969 | pages_done++; |
| 970 | if (!nr_pages) |
| 971 | break; |
| 972 | pages++; |
| 973 | start += PAGE_SIZE; |
| 974 | } |
Al Viro | e716712 | 2017-11-19 11:32:05 -0500 | [diff] [blame] | 975 | if (lock_dropped && *locked) { |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 976 | /* |
| 977 | * We must let the caller know we temporarily dropped the lock |
| 978 | * and so the critical section protected by it was lost. |
| 979 | */ |
| 980 | up_read(&mm->mmap_sem); |
| 981 | *locked = 0; |
| 982 | } |
| 983 | return pages_done; |
| 984 | } |
| 985 | |
| 986 | /* |
| 987 | * We can leverage the VM_FAULT_RETRY functionality in the page fault |
| 988 | * paths better by using either get_user_pages_locked() or |
| 989 | * get_user_pages_unlocked(). |
| 990 | * |
| 991 | * get_user_pages_locked() is suitable to replace the form: |
| 992 | * |
| 993 | * down_read(&mm->mmap_sem); |
| 994 | * do_something() |
| 995 | * get_user_pages(tsk, mm, ..., pages, NULL); |
| 996 | * up_read(&mm->mmap_sem); |
| 997 | * |
| 998 | * to: |
| 999 | * |
| 1000 | * int locked = 1; |
| 1001 | * down_read(&mm->mmap_sem); |
| 1002 | * do_something() |
| 1003 | * get_user_pages_locked(tsk, mm, ..., pages, &locked); |
| 1004 | * if (locked) |
| 1005 | * up_read(&mm->mmap_sem); |
| 1006 | */ |
Ingo Molnar | c12d2da | 2016-04-04 10:24:58 +0200 | [diff] [blame] | 1007 | long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
Lorenzo Stoakes | 3b91317 | 2016-10-13 01:20:14 +0100 | [diff] [blame] | 1008 | unsigned int gup_flags, struct page **pages, |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1009 | int *locked) |
| 1010 | { |
Dave Hansen | cde7014 | 2016-02-12 13:01:55 -0800 | [diff] [blame] | 1011 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
Al Viro | e716712 | 2017-11-19 11:32:05 -0500 | [diff] [blame] | 1012 | pages, NULL, locked, |
Lorenzo Stoakes | 3b91317 | 2016-10-13 01:20:14 +0100 | [diff] [blame] | 1013 | gup_flags | FOLL_TOUCH); |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1014 | } |
Ingo Molnar | c12d2da | 2016-04-04 10:24:58 +0200 | [diff] [blame] | 1015 | EXPORT_SYMBOL(get_user_pages_locked); |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1016 | |
| 1017 | /* |
| 1018 | * get_user_pages_unlocked() is suitable to replace the form: |
| 1019 | * |
| 1020 | * down_read(&mm->mmap_sem); |
| 1021 | * get_user_pages(tsk, mm, ..., pages, NULL); |
| 1022 | * up_read(&mm->mmap_sem); |
| 1023 | * |
| 1024 | * with: |
| 1025 | * |
| 1026 | * get_user_pages_unlocked(tsk, mm, ..., pages); |
| 1027 | * |
| 1028 | * It is functionally equivalent to get_user_pages_fast so |
Lorenzo Stoakes | 80a7951 | 2016-12-12 16:42:46 -0800 | [diff] [blame] | 1029 | * get_user_pages_fast should be used instead if specific gup_flags |
| 1030 | * (e.g. FOLL_FORCE) are not required. |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1031 | */ |
Ingo Molnar | c12d2da | 2016-04-04 10:24:58 +0200 | [diff] [blame] | 1032 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
Lorenzo Stoakes | c164154 | 2016-10-13 01:20:13 +0100 | [diff] [blame] | 1033 | struct page **pages, unsigned int gup_flags) |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1034 | { |
Al Viro | c803c9c | 2017-11-18 14:17:46 -0500 | [diff] [blame] | 1035 | struct mm_struct *mm = current->mm; |
| 1036 | int locked = 1; |
| 1037 | long ret; |
| 1038 | |
| 1039 | down_read(&mm->mmap_sem); |
| 1040 | ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, |
Al Viro | e716712 | 2017-11-19 11:32:05 -0500 | [diff] [blame] | 1041 | &locked, gup_flags | FOLL_TOUCH); |
Al Viro | c803c9c | 2017-11-18 14:17:46 -0500 | [diff] [blame] | 1042 | if (locked) |
| 1043 | up_read(&mm->mmap_sem); |
| 1044 | return ret; |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1045 | } |
Ingo Molnar | c12d2da | 2016-04-04 10:24:58 +0200 | [diff] [blame] | 1046 | EXPORT_SYMBOL(get_user_pages_unlocked); |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1047 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1048 | /* |
Dave Hansen | 1e98779 | 2016-02-12 13:01:54 -0800 | [diff] [blame] | 1049 | * get_user_pages_remote() - pin user pages in memory |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1050 | * @tsk: the task_struct to use for page fault accounting, or |
| 1051 | * NULL if faults are not to be recorded. |
| 1052 | * @mm: mm_struct of target mm |
| 1053 | * @start: starting user address |
| 1054 | * @nr_pages: number of pages from start to pin |
Lorenzo Stoakes | 9beae1e | 2016-10-13 01:20:17 +0100 | [diff] [blame] | 1055 | * @gup_flags: flags modifying lookup behaviour |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1056 | * @pages: array that receives pointers to the pages pinned. |
| 1057 | * Should be at least nr_pages long. Or NULL, if caller |
| 1058 | * only intends to ensure the pages are faulted in. |
| 1059 | * @vmas: array of pointers to vmas corresponding to each page. |
| 1060 | * Or NULL if the caller does not require them. |
Lorenzo Stoakes | 5b56d49 | 2016-12-14 15:06:52 -0800 | [diff] [blame] | 1061 | * @locked: pointer to lock flag indicating whether lock is held and |
| 1062 | * subsequently whether VM_FAULT_RETRY functionality can be |
| 1063 | * utilised. Lock must initially be held. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1064 | * |
| 1065 | * Returns number of pages pinned. This may be fewer than the number |
| 1066 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
| 1067 | * were pinned, returns -errno. Each page returned must be released |
| 1068 | * with a put_page() call when it is finished with. vmas will only |
| 1069 | * remain valid while mmap_sem is held. |
| 1070 | * |
| 1071 | * Must be called with mmap_sem held for read or write. |
| 1072 | * |
| 1073 | * get_user_pages walks a process's page tables and takes a reference to |
| 1074 | * each struct page that each user address corresponds to at a given |
| 1075 | * instant. That is, it takes the page that would be accessed if a user |
| 1076 | * thread accesses the given user virtual address at that instant. |
| 1077 | * |
| 1078 | * This does not guarantee that the page exists in the user mappings when |
| 1079 | * get_user_pages returns, and there may even be a completely different |
| 1080 | * page there in some cases (eg. if mmapped pagecache has been invalidated |
| 1081 | * and subsequently re faulted). However it does guarantee that the page |
| 1082 | * won't be freed completely. And mostly callers simply care that the page |
| 1083 | * contains data that was valid *at some point in time*. Typically, an IO |
| 1084 | * or similar operation cannot guarantee anything stronger anyway because |
| 1085 | * locks can't be held over the syscall boundary. |
| 1086 | * |
Lorenzo Stoakes | 9beae1e | 2016-10-13 01:20:17 +0100 | [diff] [blame] | 1087 | * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page |
| 1088 | * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must |
| 1089 | * be called after the page is finished with, and before put_page is called. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1090 | * |
| 1091 | * get_user_pages is typically used for fewer-copy IO operations, to get a |
| 1092 | * handle on the memory by some means other than accesses via the user virtual |
| 1093 | * addresses. The pages may be submitted for DMA to devices or accessed via |
| 1094 | * their kernel linear mapping (via the kmap APIs). Care should be taken to |
| 1095 | * use the correct cache flushing APIs. |
| 1096 | * |
| 1097 | * See also get_user_pages_fast, for performance critical applications. |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 1098 | * |
| 1099 | * get_user_pages should be phased out in favor of |
| 1100 | * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing |
| 1101 | * should use get_user_pages because it cannot pass |
| 1102 | * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1103 | */ |
Dave Hansen | 1e98779 | 2016-02-12 13:01:54 -0800 | [diff] [blame] | 1104 | long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, |
| 1105 | unsigned long start, unsigned long nr_pages, |
Lorenzo Stoakes | 9beae1e | 2016-10-13 01:20:17 +0100 | [diff] [blame] | 1106 | unsigned int gup_flags, struct page **pages, |
Lorenzo Stoakes | 5b56d49 | 2016-12-14 15:06:52 -0800 | [diff] [blame] | 1107 | struct vm_area_struct **vmas, int *locked) |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1108 | { |
Lorenzo Stoakes | 859110d | 2016-10-13 01:20:11 +0100 | [diff] [blame] | 1109 | return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, |
Al Viro | e716712 | 2017-11-19 11:32:05 -0500 | [diff] [blame] | 1110 | locked, |
Lorenzo Stoakes | 9beae1e | 2016-10-13 01:20:17 +0100 | [diff] [blame] | 1111 | gup_flags | FOLL_TOUCH | FOLL_REMOTE); |
Dave Hansen | 1e98779 | 2016-02-12 13:01:54 -0800 | [diff] [blame] | 1112 | } |
| 1113 | EXPORT_SYMBOL(get_user_pages_remote); |
| 1114 | |
| 1115 | /* |
Dave Hansen | d4edcf0 | 2016-02-12 13:01:56 -0800 | [diff] [blame] | 1116 | * This is the same as get_user_pages_remote(), just with a |
| 1117 | * less-flexible calling convention where we assume that the task |
Lorenzo Stoakes | 5b56d49 | 2016-12-14 15:06:52 -0800 | [diff] [blame] | 1118 | * and mm being operated on are the current task's and don't allow |
| 1119 | * passing of a locked parameter. We also obviously don't pass |
| 1120 | * FOLL_REMOTE in here. |
Dave Hansen | 1e98779 | 2016-02-12 13:01:54 -0800 | [diff] [blame] | 1121 | */ |
Ingo Molnar | c12d2da | 2016-04-04 10:24:58 +0200 | [diff] [blame] | 1122 | long get_user_pages(unsigned long start, unsigned long nr_pages, |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 1123 | unsigned int gup_flags, struct page **pages, |
Dave Hansen | 1e98779 | 2016-02-12 13:01:54 -0800 | [diff] [blame] | 1124 | struct vm_area_struct **vmas) |
| 1125 | { |
Dave Hansen | cde7014 | 2016-02-12 13:01:55 -0800 | [diff] [blame] | 1126 | return __get_user_pages_locked(current, current->mm, start, nr_pages, |
Al Viro | e716712 | 2017-11-19 11:32:05 -0500 | [diff] [blame] | 1127 | pages, vmas, NULL, |
Lorenzo Stoakes | 768ae30 | 2016-10-13 01:20:16 +0100 | [diff] [blame] | 1128 | gup_flags | FOLL_TOUCH); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1129 | } |
Ingo Molnar | c12d2da | 2016-04-04 10:24:58 +0200 | [diff] [blame] | 1130 | EXPORT_SYMBOL(get_user_pages); |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1131 | |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1132 | #if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA) |
| 1133 | |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1134 | #ifdef CONFIG_FS_DAX |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1135 | static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages) |
| 1136 | { |
| 1137 | long i; |
| 1138 | struct vm_area_struct *vma_prev = NULL; |
| 1139 | |
| 1140 | for (i = 0; i < nr_pages; i++) { |
| 1141 | struct vm_area_struct *vma = vmas[i]; |
| 1142 | |
| 1143 | if (vma == vma_prev) |
| 1144 | continue; |
| 1145 | |
| 1146 | vma_prev = vma; |
| 1147 | |
| 1148 | if (vma_is_fsdax(vma)) |
| 1149 | return true; |
| 1150 | } |
| 1151 | return false; |
| 1152 | } |
| 1153 | #else |
| 1154 | static inline bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages) |
| 1155 | { |
| 1156 | return false; |
| 1157 | } |
| 1158 | #endif |
| 1159 | |
| 1160 | #ifdef CONFIG_CMA |
| 1161 | static struct page *new_non_cma_page(struct page *page, unsigned long private) |
| 1162 | { |
| 1163 | /* |
| 1164 | * We want to make sure we allocate the new page from the same node |
| 1165 | * as the source page. |
| 1166 | */ |
| 1167 | int nid = page_to_nid(page); |
| 1168 | /* |
| 1169 | * Trying to allocate a page for migration. Ignore allocation |
| 1170 | * failure warnings. We don't force __GFP_THISNODE here because |
| 1171 | * this node here is the node where we have CMA reservation and |
| 1172 | * in some case these nodes will have really less non movable |
| 1173 | * allocation memory. |
| 1174 | */ |
| 1175 | gfp_t gfp_mask = GFP_USER | __GFP_NOWARN; |
| 1176 | |
| 1177 | if (PageHighMem(page)) |
| 1178 | gfp_mask |= __GFP_HIGHMEM; |
| 1179 | |
| 1180 | #ifdef CONFIG_HUGETLB_PAGE |
| 1181 | if (PageHuge(page)) { |
| 1182 | struct hstate *h = page_hstate(page); |
| 1183 | /* |
| 1184 | * We don't want to dequeue from the pool because pool pages will |
| 1185 | * mostly be from the CMA region. |
| 1186 | */ |
| 1187 | return alloc_migrate_huge_page(h, gfp_mask, nid, NULL); |
| 1188 | } |
| 1189 | #endif |
| 1190 | if (PageTransHuge(page)) { |
| 1191 | struct page *thp; |
| 1192 | /* |
| 1193 | * ignore allocation failure warnings |
| 1194 | */ |
| 1195 | gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN; |
| 1196 | |
| 1197 | /* |
| 1198 | * Remove the movable mask so that we don't allocate from |
| 1199 | * CMA area again. |
| 1200 | */ |
| 1201 | thp_gfpmask &= ~__GFP_MOVABLE; |
| 1202 | thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER); |
| 1203 | if (!thp) |
| 1204 | return NULL; |
| 1205 | prep_transhuge_page(thp); |
| 1206 | return thp; |
| 1207 | } |
| 1208 | |
| 1209 | return __alloc_pages_node(nid, gfp_mask, 0); |
| 1210 | } |
| 1211 | |
| 1212 | static long check_and_migrate_cma_pages(unsigned long start, long nr_pages, |
| 1213 | unsigned int gup_flags, |
| 1214 | struct page **pages, |
| 1215 | struct vm_area_struct **vmas) |
| 1216 | { |
| 1217 | long i; |
| 1218 | bool drain_allow = true; |
| 1219 | bool migrate_allow = true; |
| 1220 | LIST_HEAD(cma_page_list); |
| 1221 | |
| 1222 | check_again: |
| 1223 | for (i = 0; i < nr_pages; i++) { |
| 1224 | /* |
| 1225 | * If we get a page from the CMA zone, since we are going to |
| 1226 | * be pinning these entries, we might as well move them out |
| 1227 | * of the CMA zone if possible. |
| 1228 | */ |
| 1229 | if (is_migrate_cma_page(pages[i])) { |
| 1230 | |
| 1231 | struct page *head = compound_head(pages[i]); |
| 1232 | |
| 1233 | if (PageHuge(head)) { |
| 1234 | isolate_huge_page(head, &cma_page_list); |
| 1235 | } else { |
| 1236 | if (!PageLRU(head) && drain_allow) { |
| 1237 | lru_add_drain_all(); |
| 1238 | drain_allow = false; |
| 1239 | } |
| 1240 | |
| 1241 | if (!isolate_lru_page(head)) { |
| 1242 | list_add_tail(&head->lru, &cma_page_list); |
| 1243 | mod_node_page_state(page_pgdat(head), |
| 1244 | NR_ISOLATED_ANON + |
| 1245 | page_is_file_cache(head), |
| 1246 | hpage_nr_pages(head)); |
| 1247 | } |
| 1248 | } |
| 1249 | } |
| 1250 | } |
| 1251 | |
| 1252 | if (!list_empty(&cma_page_list)) { |
| 1253 | /* |
| 1254 | * drop the above get_user_pages reference. |
| 1255 | */ |
| 1256 | for (i = 0; i < nr_pages; i++) |
| 1257 | put_page(pages[i]); |
| 1258 | |
| 1259 | if (migrate_pages(&cma_page_list, new_non_cma_page, |
| 1260 | NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) { |
| 1261 | /* |
| 1262 | * some of the pages failed migration. Do get_user_pages |
| 1263 | * without migration. |
| 1264 | */ |
| 1265 | migrate_allow = false; |
| 1266 | |
| 1267 | if (!list_empty(&cma_page_list)) |
| 1268 | putback_movable_pages(&cma_page_list); |
| 1269 | } |
| 1270 | /* |
| 1271 | * We did migrate all the pages, Try to get the page references again |
| 1272 | * migrating any new CMA pages which we failed to isolate earlier. |
| 1273 | */ |
| 1274 | nr_pages = get_user_pages(start, nr_pages, gup_flags, pages, vmas); |
| 1275 | if ((nr_pages > 0) && migrate_allow) { |
| 1276 | drain_allow = true; |
| 1277 | goto check_again; |
| 1278 | } |
| 1279 | } |
| 1280 | |
| 1281 | return nr_pages; |
| 1282 | } |
| 1283 | #else |
| 1284 | static inline long check_and_migrate_cma_pages(unsigned long start, long nr_pages, |
| 1285 | unsigned int gup_flags, |
| 1286 | struct page **pages, |
| 1287 | struct vm_area_struct **vmas) |
| 1288 | { |
| 1289 | return nr_pages; |
| 1290 | } |
| 1291 | #endif |
| 1292 | |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1293 | /* |
| 1294 | * This is the same as get_user_pages() in that it assumes we are |
| 1295 | * operating on the current task's mm, but it goes further to validate |
| 1296 | * that the vmas associated with the address range are suitable for |
| 1297 | * longterm elevated page reference counts. For example, filesystem-dax |
| 1298 | * mappings are subject to the lifetime enforced by the filesystem and |
| 1299 | * we need guarantees that longterm users like RDMA and V4L2 only |
| 1300 | * establish mappings that have a kernel enforced revocation mechanism. |
| 1301 | * |
| 1302 | * "longterm" == userspace controlled elevated page count lifetime. |
| 1303 | * Contrast this to iov_iter_get_pages() usages which are transient. |
| 1304 | */ |
| 1305 | long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1306 | unsigned int gup_flags, struct page **pages, |
| 1307 | struct vm_area_struct **vmas_arg) |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1308 | { |
| 1309 | struct vm_area_struct **vmas = vmas_arg; |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1310 | unsigned long flags; |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1311 | long rc, i; |
| 1312 | |
| 1313 | if (!pages) |
| 1314 | return -EINVAL; |
| 1315 | |
| 1316 | if (!vmas) { |
| 1317 | vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), |
| 1318 | GFP_KERNEL); |
| 1319 | if (!vmas) |
| 1320 | return -ENOMEM; |
| 1321 | } |
| 1322 | |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1323 | flags = memalloc_nocma_save(); |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1324 | rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1325 | memalloc_nocma_restore(flags); |
| 1326 | if (rc < 0) |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1327 | goto out; |
| 1328 | |
Aneesh Kumar K.V | 9a4e9f3 | 2019-03-05 15:47:44 -0800 | [diff] [blame] | 1329 | if (check_dax_vmas(vmas, rc)) { |
| 1330 | for (i = 0; i < rc; i++) |
| 1331 | put_page(pages[i]); |
| 1332 | rc = -EOPNOTSUPP; |
| 1333 | goto out; |
| 1334 | } |
| 1335 | |
| 1336 | rc = check_and_migrate_cma_pages(start, rc, gup_flags, pages, vmas); |
Dan Williams | 2bb6d28 | 2017-11-29 16:10:35 -0800 | [diff] [blame] | 1337 | out: |
| 1338 | if (vmas != vmas_arg) |
| 1339 | kfree(vmas); |
| 1340 | return rc; |
| 1341 | } |
| 1342 | EXPORT_SYMBOL(get_user_pages_longterm); |
| 1343 | #endif /* CONFIG_FS_DAX */ |
| 1344 | |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1345 | /** |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 1346 | * populate_vma_page_range() - populate a range of pages in the vma. |
| 1347 | * @vma: target vma |
| 1348 | * @start: start address |
| 1349 | * @end: end address |
| 1350 | * @nonblocking: |
| 1351 | * |
| 1352 | * This takes care of mlocking the pages too if VM_LOCKED is set. |
| 1353 | * |
| 1354 | * return 0 on success, negative error code on error. |
| 1355 | * |
| 1356 | * vma->vm_mm->mmap_sem must be held. |
| 1357 | * |
| 1358 | * If @nonblocking is NULL, it may be held for read or write and will |
| 1359 | * be unperturbed. |
| 1360 | * |
| 1361 | * If @nonblocking is non-NULL, it must held for read only and may be |
| 1362 | * released. If it's released, *@nonblocking will be set to 0. |
| 1363 | */ |
| 1364 | long populate_vma_page_range(struct vm_area_struct *vma, |
| 1365 | unsigned long start, unsigned long end, int *nonblocking) |
| 1366 | { |
| 1367 | struct mm_struct *mm = vma->vm_mm; |
| 1368 | unsigned long nr_pages = (end - start) / PAGE_SIZE; |
| 1369 | int gup_flags; |
| 1370 | |
| 1371 | VM_BUG_ON(start & ~PAGE_MASK); |
| 1372 | VM_BUG_ON(end & ~PAGE_MASK); |
| 1373 | VM_BUG_ON_VMA(start < vma->vm_start, vma); |
| 1374 | VM_BUG_ON_VMA(end > vma->vm_end, vma); |
| 1375 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); |
| 1376 | |
Eric B Munson | de60f5f | 2015-11-05 18:51:36 -0800 | [diff] [blame] | 1377 | gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; |
| 1378 | if (vma->vm_flags & VM_LOCKONFAULT) |
| 1379 | gup_flags &= ~FOLL_POPULATE; |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 1380 | /* |
| 1381 | * We want to touch writable mappings with a write fault in order |
| 1382 | * to break COW, except for shared mappings because these don't COW |
| 1383 | * and we would not want to dirty them for nothing. |
| 1384 | */ |
| 1385 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) |
| 1386 | gup_flags |= FOLL_WRITE; |
| 1387 | |
| 1388 | /* |
| 1389 | * We want mlock to succeed for regions that have any permissions |
| 1390 | * other than PROT_NONE. |
| 1391 | */ |
| 1392 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) |
| 1393 | gup_flags |= FOLL_FORCE; |
| 1394 | |
| 1395 | /* |
| 1396 | * We made sure addr is within a VMA, so the following will |
| 1397 | * not result in a stack expansion that recurses back here. |
| 1398 | */ |
| 1399 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, |
| 1400 | NULL, NULL, nonblocking); |
| 1401 | } |
| 1402 | |
| 1403 | /* |
| 1404 | * __mm_populate - populate and/or mlock pages within a range of address space. |
| 1405 | * |
| 1406 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap |
| 1407 | * flags. VMAs must be already marked with the desired vm_flags, and |
| 1408 | * mmap_sem must not be held. |
| 1409 | */ |
| 1410 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) |
| 1411 | { |
| 1412 | struct mm_struct *mm = current->mm; |
| 1413 | unsigned long end, nstart, nend; |
| 1414 | struct vm_area_struct *vma = NULL; |
| 1415 | int locked = 0; |
| 1416 | long ret = 0; |
| 1417 | |
Kirill A. Shutemov | acc3c8d | 2015-04-14 15:44:45 -0700 | [diff] [blame] | 1418 | end = start + len; |
| 1419 | |
| 1420 | for (nstart = start; nstart < end; nstart = nend) { |
| 1421 | /* |
| 1422 | * We want to fault in pages for [nstart; end) address range. |
| 1423 | * Find first corresponding VMA. |
| 1424 | */ |
| 1425 | if (!locked) { |
| 1426 | locked = 1; |
| 1427 | down_read(&mm->mmap_sem); |
| 1428 | vma = find_vma(mm, nstart); |
| 1429 | } else if (nstart >= vma->vm_end) |
| 1430 | vma = vma->vm_next; |
| 1431 | if (!vma || vma->vm_start >= end) |
| 1432 | break; |
| 1433 | /* |
| 1434 | * Set [nstart; nend) to intersection of desired address |
| 1435 | * range with the first VMA. Also, skip undesirable VMA types. |
| 1436 | */ |
| 1437 | nend = min(end, vma->vm_end); |
| 1438 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
| 1439 | continue; |
| 1440 | if (nstart < vma->vm_start) |
| 1441 | nstart = vma->vm_start; |
| 1442 | /* |
| 1443 | * Now fault in a range of pages. populate_vma_page_range() |
| 1444 | * double checks the vma flags, so that it won't mlock pages |
| 1445 | * if the vma was already munlocked. |
| 1446 | */ |
| 1447 | ret = populate_vma_page_range(vma, nstart, nend, &locked); |
| 1448 | if (ret < 0) { |
| 1449 | if (ignore_errors) { |
| 1450 | ret = 0; |
| 1451 | continue; /* continue at next VMA */ |
| 1452 | } |
| 1453 | break; |
| 1454 | } |
| 1455 | nend = nstart + ret * PAGE_SIZE; |
| 1456 | ret = 0; |
| 1457 | } |
| 1458 | if (locked) |
| 1459 | up_read(&mm->mmap_sem); |
| 1460 | return ret; /* 0 or negative error code */ |
| 1461 | } |
| 1462 | |
| 1463 | /** |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1464 | * get_dump_page() - pin user page in memory while writing it to core dump |
| 1465 | * @addr: user address |
| 1466 | * |
| 1467 | * Returns struct page pointer of user page pinned for dump, |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 1468 | * to be freed afterwards by put_page(). |
Kirill A. Shutemov | 4bbd4c7 | 2014-06-04 16:08:10 -0700 | [diff] [blame] | 1469 | * |
| 1470 | * Returns NULL on any kind of failure - a hole must then be inserted into |
| 1471 | * the corefile, to preserve alignment with its headers; and also returns |
| 1472 | * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - |
| 1473 | * allowing a hole to be left in the corefile to save diskspace. |
| 1474 | * |
| 1475 | * Called without mmap_sem, but after all other threads have been killed. |
| 1476 | */ |
| 1477 | #ifdef CONFIG_ELF_CORE |
| 1478 | struct page *get_dump_page(unsigned long addr) |
| 1479 | { |
| 1480 | struct vm_area_struct *vma; |
| 1481 | struct page *page; |
| 1482 | |
| 1483 | if (__get_user_pages(current, current->mm, addr, 1, |
| 1484 | FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, |
| 1485 | NULL) < 1) |
| 1486 | return NULL; |
| 1487 | flush_cache_page(vma, addr, page_to_pfn(page)); |
| 1488 | return page; |
| 1489 | } |
| 1490 | #endif /* CONFIG_ELF_CORE */ |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1491 | |
| 1492 | /* |
Kirill A. Shutemov | e585513 | 2017-06-06 14:31:20 +0300 | [diff] [blame] | 1493 | * Generic Fast GUP |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1494 | * |
| 1495 | * get_user_pages_fast attempts to pin user pages by walking the page |
| 1496 | * tables directly and avoids taking locks. Thus the walker needs to be |
| 1497 | * protected from page table pages being freed from under it, and should |
| 1498 | * block any THP splits. |
| 1499 | * |
| 1500 | * One way to achieve this is to have the walker disable interrupts, and |
| 1501 | * rely on IPIs from the TLB flushing code blocking before the page table |
| 1502 | * pages are freed. This is unsuitable for architectures that do not need |
| 1503 | * to broadcast an IPI when invalidating TLBs. |
| 1504 | * |
| 1505 | * Another way to achieve this is to batch up page table containing pages |
| 1506 | * belonging to more than one mm_user, then rcu_sched a callback to free those |
| 1507 | * pages. Disabling interrupts will allow the fast_gup walker to both block |
| 1508 | * the rcu_sched callback, and an IPI that we broadcast for splitting THPs |
| 1509 | * (which is a relatively rare event). The code below adopts this strategy. |
| 1510 | * |
| 1511 | * Before activating this code, please be aware that the following assumptions |
| 1512 | * are currently made: |
| 1513 | * |
Kirill A. Shutemov | e585513 | 2017-06-06 14:31:20 +0300 | [diff] [blame] | 1514 | * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to |
| 1515 | * free pages containing page tables or TLB flushing requires IPI broadcast. |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1516 | * |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1517 | * *) ptes can be read atomically by the architecture. |
| 1518 | * |
| 1519 | * *) access_ok is sufficient to validate userspace address ranges. |
| 1520 | * |
| 1521 | * The last two assumptions can be relaxed by the addition of helper functions. |
| 1522 | * |
| 1523 | * This code is based heavily on the PowerPC implementation by Nick Piggin. |
| 1524 | */ |
Kirill A. Shutemov | e585513 | 2017-06-06 14:31:20 +0300 | [diff] [blame] | 1525 | #ifdef CONFIG_HAVE_GENERIC_GUP |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1526 | |
Kirill A. Shutemov | 0005d20 | 2017-03-16 18:26:51 +0300 | [diff] [blame] | 1527 | #ifndef gup_get_pte |
| 1528 | /* |
| 1529 | * We assume that the PTE can be read atomically. If this is not the case for |
| 1530 | * your architecture, please provide the helper. |
| 1531 | */ |
| 1532 | static inline pte_t gup_get_pte(pte_t *ptep) |
| 1533 | { |
| 1534 | return READ_ONCE(*ptep); |
| 1535 | } |
| 1536 | #endif |
| 1537 | |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1538 | static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) |
| 1539 | { |
| 1540 | while ((*nr) - nr_start) { |
| 1541 | struct page *page = pages[--(*nr)]; |
| 1542 | |
| 1543 | ClearPageReferenced(page); |
| 1544 | put_page(page); |
| 1545 | } |
| 1546 | } |
| 1547 | |
Laurent Dufour | 3010a5e | 2018-06-07 17:06:08 -0700 | [diff] [blame] | 1548 | #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1549 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, |
| 1550 | int write, struct page **pages, int *nr) |
| 1551 | { |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1552 | struct dev_pagemap *pgmap = NULL; |
| 1553 | int nr_start = *nr, ret = 0; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1554 | pte_t *ptep, *ptem; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1555 | |
| 1556 | ptem = ptep = pte_offset_map(&pmd, addr); |
| 1557 | do { |
Kirill A. Shutemov | 0005d20 | 2017-03-16 18:26:51 +0300 | [diff] [blame] | 1558 | pte_t pte = gup_get_pte(ptep); |
Kirill A. Shutemov | 7aef417 | 2016-01-15 16:52:32 -0800 | [diff] [blame] | 1559 | struct page *head, *page; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1560 | |
| 1561 | /* |
| 1562 | * Similar to the PMD case below, NUMA hinting must take slow |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 1563 | * path using the pte_protnone check. |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1564 | */ |
Kirill A. Shutemov | e7884f8 | 2017-03-16 18:26:50 +0300 | [diff] [blame] | 1565 | if (pte_protnone(pte)) |
| 1566 | goto pte_unmap; |
| 1567 | |
| 1568 | if (!pte_access_permitted(pte, write)) |
| 1569 | goto pte_unmap; |
| 1570 | |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1571 | if (pte_devmap(pte)) { |
| 1572 | pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); |
| 1573 | if (unlikely(!pgmap)) { |
| 1574 | undo_dev_pagemap(nr, nr_start, pages); |
| 1575 | goto pte_unmap; |
| 1576 | } |
| 1577 | } else if (pte_special(pte)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1578 | goto pte_unmap; |
| 1579 | |
| 1580 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
| 1581 | page = pte_page(pte); |
Kirill A. Shutemov | 7aef417 | 2016-01-15 16:52:32 -0800 | [diff] [blame] | 1582 | head = compound_head(page); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1583 | |
Kirill A. Shutemov | 7aef417 | 2016-01-15 16:52:32 -0800 | [diff] [blame] | 1584 | if (!page_cache_get_speculative(head)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1585 | goto pte_unmap; |
| 1586 | |
| 1587 | if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
Kirill A. Shutemov | 7aef417 | 2016-01-15 16:52:32 -0800 | [diff] [blame] | 1588 | put_page(head); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1589 | goto pte_unmap; |
| 1590 | } |
| 1591 | |
Kirill A. Shutemov | 7aef417 | 2016-01-15 16:52:32 -0800 | [diff] [blame] | 1592 | VM_BUG_ON_PAGE(compound_head(page) != head, page); |
Kirill A. Shutemov | e934805 | 2017-03-16 18:26:52 +0300 | [diff] [blame] | 1593 | |
| 1594 | SetPageReferenced(page); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1595 | pages[*nr] = page; |
| 1596 | (*nr)++; |
| 1597 | |
| 1598 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
| 1599 | |
| 1600 | ret = 1; |
| 1601 | |
| 1602 | pte_unmap: |
Christoph Hellwig | 832d7aa | 2017-12-29 08:54:01 +0100 | [diff] [blame] | 1603 | if (pgmap) |
| 1604 | put_dev_pagemap(pgmap); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1605 | pte_unmap(ptem); |
| 1606 | return ret; |
| 1607 | } |
| 1608 | #else |
| 1609 | |
| 1610 | /* |
| 1611 | * If we can't determine whether or not a pte is special, then fail immediately |
| 1612 | * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not |
| 1613 | * to be special. |
| 1614 | * |
| 1615 | * For a futex to be placed on a THP tail page, get_futex_key requires a |
| 1616 | * __get_user_pages_fast implementation that can pin pages. Thus it's still |
| 1617 | * useful to have gup_huge_pmd even if we can't operate on ptes. |
| 1618 | */ |
| 1619 | static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, |
| 1620 | int write, struct page **pages, int *nr) |
| 1621 | { |
| 1622 | return 0; |
| 1623 | } |
Laurent Dufour | 3010a5e | 2018-06-07 17:06:08 -0700 | [diff] [blame] | 1624 | #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1625 | |
Oliver O'Halloran | 09180ca | 2017-09-06 16:20:58 -0700 | [diff] [blame] | 1626 | #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1627 | static int __gup_device_huge(unsigned long pfn, unsigned long addr, |
| 1628 | unsigned long end, struct page **pages, int *nr) |
| 1629 | { |
| 1630 | int nr_start = *nr; |
| 1631 | struct dev_pagemap *pgmap = NULL; |
| 1632 | |
| 1633 | do { |
| 1634 | struct page *page = pfn_to_page(pfn); |
| 1635 | |
| 1636 | pgmap = get_dev_pagemap(pfn, pgmap); |
| 1637 | if (unlikely(!pgmap)) { |
| 1638 | undo_dev_pagemap(nr, nr_start, pages); |
| 1639 | return 0; |
| 1640 | } |
| 1641 | SetPageReferenced(page); |
| 1642 | pages[*nr] = page; |
| 1643 | get_page(page); |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1644 | (*nr)++; |
| 1645 | pfn++; |
| 1646 | } while (addr += PAGE_SIZE, addr != end); |
Christoph Hellwig | 832d7aa | 2017-12-29 08:54:01 +0100 | [diff] [blame] | 1647 | |
| 1648 | if (pgmap) |
| 1649 | put_dev_pagemap(pgmap); |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1650 | return 1; |
| 1651 | } |
| 1652 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1653 | static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1654 | unsigned long end, struct page **pages, int *nr) |
| 1655 | { |
| 1656 | unsigned long fault_pfn; |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1657 | int nr_start = *nr; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1658 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1659 | fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
| 1660 | if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) |
| 1661 | return 0; |
| 1662 | |
| 1663 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { |
| 1664 | undo_dev_pagemap(nr, nr_start, pages); |
| 1665 | return 0; |
| 1666 | } |
| 1667 | return 1; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1668 | } |
| 1669 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1670 | static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1671 | unsigned long end, struct page **pages, int *nr) |
| 1672 | { |
| 1673 | unsigned long fault_pfn; |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1674 | int nr_start = *nr; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1675 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1676 | fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
| 1677 | if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) |
| 1678 | return 0; |
| 1679 | |
| 1680 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { |
| 1681 | undo_dev_pagemap(nr, nr_start, pages); |
| 1682 | return 0; |
| 1683 | } |
| 1684 | return 1; |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1685 | } |
| 1686 | #else |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1687 | static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1688 | unsigned long end, struct page **pages, int *nr) |
| 1689 | { |
| 1690 | BUILD_BUG(); |
| 1691 | return 0; |
| 1692 | } |
| 1693 | |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1694 | static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1695 | unsigned long end, struct page **pages, int *nr) |
| 1696 | { |
| 1697 | BUILD_BUG(); |
| 1698 | return 0; |
| 1699 | } |
| 1700 | #endif |
| 1701 | |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1702 | static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
| 1703 | unsigned long end, int write, struct page **pages, int *nr) |
| 1704 | { |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1705 | struct page *head, *page; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1706 | int refs; |
| 1707 | |
Kirill A. Shutemov | e7884f8 | 2017-03-16 18:26:50 +0300 | [diff] [blame] | 1708 | if (!pmd_access_permitted(orig, write)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1709 | return 0; |
| 1710 | |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1711 | if (pmd_devmap(orig)) |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1712 | return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1713 | |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1714 | refs = 0; |
Punit Agrawal | d63206e | 2017-07-06 15:39:39 -0700 | [diff] [blame] | 1715 | page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1716 | do { |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1717 | pages[*nr] = page; |
| 1718 | (*nr)++; |
| 1719 | page++; |
| 1720 | refs++; |
| 1721 | } while (addr += PAGE_SIZE, addr != end); |
| 1722 | |
Punit Agrawal | d63206e | 2017-07-06 15:39:39 -0700 | [diff] [blame] | 1723 | head = compound_head(pmd_page(orig)); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1724 | if (!page_cache_add_speculative(head, refs)) { |
| 1725 | *nr -= refs; |
| 1726 | return 0; |
| 1727 | } |
| 1728 | |
| 1729 | if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { |
| 1730 | *nr -= refs; |
| 1731 | while (refs--) |
| 1732 | put_page(head); |
| 1733 | return 0; |
| 1734 | } |
| 1735 | |
Kirill A. Shutemov | e934805 | 2017-03-16 18:26:52 +0300 | [diff] [blame] | 1736 | SetPageReferenced(head); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1737 | return 1; |
| 1738 | } |
| 1739 | |
| 1740 | static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, |
| 1741 | unsigned long end, int write, struct page **pages, int *nr) |
| 1742 | { |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1743 | struct page *head, *page; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1744 | int refs; |
| 1745 | |
Kirill A. Shutemov | e7884f8 | 2017-03-16 18:26:50 +0300 | [diff] [blame] | 1746 | if (!pud_access_permitted(orig, write)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1747 | return 0; |
| 1748 | |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1749 | if (pud_devmap(orig)) |
Dan Williams | a9b6de7 | 2018-04-19 21:32:19 -0700 | [diff] [blame] | 1750 | return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1751 | |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1752 | refs = 0; |
Punit Agrawal | d63206e | 2017-07-06 15:39:39 -0700 | [diff] [blame] | 1753 | page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1754 | do { |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1755 | pages[*nr] = page; |
| 1756 | (*nr)++; |
| 1757 | page++; |
| 1758 | refs++; |
| 1759 | } while (addr += PAGE_SIZE, addr != end); |
| 1760 | |
Punit Agrawal | d63206e | 2017-07-06 15:39:39 -0700 | [diff] [blame] | 1761 | head = compound_head(pud_page(orig)); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1762 | if (!page_cache_add_speculative(head, refs)) { |
| 1763 | *nr -= refs; |
| 1764 | return 0; |
| 1765 | } |
| 1766 | |
| 1767 | if (unlikely(pud_val(orig) != pud_val(*pudp))) { |
| 1768 | *nr -= refs; |
| 1769 | while (refs--) |
| 1770 | put_page(head); |
| 1771 | return 0; |
| 1772 | } |
| 1773 | |
Kirill A. Shutemov | e934805 | 2017-03-16 18:26:52 +0300 | [diff] [blame] | 1774 | SetPageReferenced(head); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1775 | return 1; |
| 1776 | } |
| 1777 | |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1778 | static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, |
| 1779 | unsigned long end, int write, |
| 1780 | struct page **pages, int *nr) |
| 1781 | { |
| 1782 | int refs; |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1783 | struct page *head, *page; |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1784 | |
Kirill A. Shutemov | e7884f8 | 2017-03-16 18:26:50 +0300 | [diff] [blame] | 1785 | if (!pgd_access_permitted(orig, write)) |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1786 | return 0; |
| 1787 | |
Kirill A. Shutemov | b59f65f | 2017-03-16 18:26:53 +0300 | [diff] [blame] | 1788 | BUILD_BUG_ON(pgd_devmap(orig)); |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1789 | refs = 0; |
Punit Agrawal | d63206e | 2017-07-06 15:39:39 -0700 | [diff] [blame] | 1790 | page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1791 | do { |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1792 | pages[*nr] = page; |
| 1793 | (*nr)++; |
| 1794 | page++; |
| 1795 | refs++; |
| 1796 | } while (addr += PAGE_SIZE, addr != end); |
| 1797 | |
Punit Agrawal | d63206e | 2017-07-06 15:39:39 -0700 | [diff] [blame] | 1798 | head = compound_head(pgd_page(orig)); |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1799 | if (!page_cache_add_speculative(head, refs)) { |
| 1800 | *nr -= refs; |
| 1801 | return 0; |
| 1802 | } |
| 1803 | |
| 1804 | if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { |
| 1805 | *nr -= refs; |
| 1806 | while (refs--) |
| 1807 | put_page(head); |
| 1808 | return 0; |
| 1809 | } |
| 1810 | |
Kirill A. Shutemov | e934805 | 2017-03-16 18:26:52 +0300 | [diff] [blame] | 1811 | SetPageReferenced(head); |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1812 | return 1; |
| 1813 | } |
| 1814 | |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1815 | static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, |
| 1816 | int write, struct page **pages, int *nr) |
| 1817 | { |
| 1818 | unsigned long next; |
| 1819 | pmd_t *pmdp; |
| 1820 | |
| 1821 | pmdp = pmd_offset(&pud, addr); |
| 1822 | do { |
Christian Borntraeger | 38c5ce9 | 2015-01-06 22:54:46 +0100 | [diff] [blame] | 1823 | pmd_t pmd = READ_ONCE(*pmdp); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1824 | |
| 1825 | next = pmd_addr_end(addr, end); |
Zi Yan | 84c3fc4 | 2017-09-08 16:11:01 -0700 | [diff] [blame] | 1826 | if (!pmd_present(pmd)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1827 | return 0; |
| 1828 | |
Yu Zhao | 414fd08 | 2019-02-12 15:35:58 -0800 | [diff] [blame] | 1829 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || |
| 1830 | pmd_devmap(pmd))) { |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1831 | /* |
| 1832 | * NUMA hinting faults need to be handled in the GUP |
| 1833 | * slowpath for accounting purposes and so that they |
| 1834 | * can be serialised against THP migration. |
| 1835 | */ |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 1836 | if (pmd_protnone(pmd)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1837 | return 0; |
| 1838 | |
| 1839 | if (!gup_huge_pmd(pmd, pmdp, addr, next, write, |
| 1840 | pages, nr)) |
| 1841 | return 0; |
| 1842 | |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1843 | } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { |
| 1844 | /* |
| 1845 | * architecture have different format for hugetlbfs |
| 1846 | * pmd format and THP pmd format |
| 1847 | */ |
| 1848 | if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, |
| 1849 | PMD_SHIFT, next, write, pages, nr)) |
| 1850 | return 0; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1851 | } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) |
Mario Leinweber | 2923117 | 2018-04-05 16:24:18 -0700 | [diff] [blame] | 1852 | return 0; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1853 | } while (pmdp++, addr = next, addr != end); |
| 1854 | |
| 1855 | return 1; |
| 1856 | } |
| 1857 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 1858 | static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1859 | int write, struct page **pages, int *nr) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1860 | { |
| 1861 | unsigned long next; |
| 1862 | pud_t *pudp; |
| 1863 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 1864 | pudp = pud_offset(&p4d, addr); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1865 | do { |
Christian Borntraeger | e37c698 | 2014-12-07 21:41:33 +0100 | [diff] [blame] | 1866 | pud_t pud = READ_ONCE(*pudp); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1867 | |
| 1868 | next = pud_addr_end(addr, end); |
| 1869 | if (pud_none(pud)) |
| 1870 | return 0; |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1871 | if (unlikely(pud_huge(pud))) { |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1872 | if (!gup_huge_pud(pud, pudp, addr, next, write, |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 1873 | pages, nr)) |
| 1874 | return 0; |
| 1875 | } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { |
| 1876 | if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, |
| 1877 | PUD_SHIFT, next, write, pages, nr)) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1878 | return 0; |
| 1879 | } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) |
| 1880 | return 0; |
| 1881 | } while (pudp++, addr = next, addr != end); |
| 1882 | |
| 1883 | return 1; |
| 1884 | } |
| 1885 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 1886 | static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, |
| 1887 | int write, struct page **pages, int *nr) |
| 1888 | { |
| 1889 | unsigned long next; |
| 1890 | p4d_t *p4dp; |
| 1891 | |
| 1892 | p4dp = p4d_offset(&pgd, addr); |
| 1893 | do { |
| 1894 | p4d_t p4d = READ_ONCE(*p4dp); |
| 1895 | |
| 1896 | next = p4d_addr_end(addr, end); |
| 1897 | if (p4d_none(p4d)) |
| 1898 | return 0; |
| 1899 | BUILD_BUG_ON(p4d_huge(p4d)); |
| 1900 | if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { |
| 1901 | if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, |
| 1902 | P4D_SHIFT, next, write, pages, nr)) |
| 1903 | return 0; |
Kirill A. Shutemov | ce70df0 | 2017-03-13 08:22:13 +0300 | [diff] [blame] | 1904 | } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 1905 | return 0; |
| 1906 | } while (p4dp++, addr = next, addr != end); |
| 1907 | |
| 1908 | return 1; |
| 1909 | } |
| 1910 | |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 1911 | static void gup_pgd_range(unsigned long addr, unsigned long end, |
| 1912 | int write, struct page **pages, int *nr) |
| 1913 | { |
| 1914 | unsigned long next; |
| 1915 | pgd_t *pgdp; |
| 1916 | |
| 1917 | pgdp = pgd_offset(current->mm, addr); |
| 1918 | do { |
| 1919 | pgd_t pgd = READ_ONCE(*pgdp); |
| 1920 | |
| 1921 | next = pgd_addr_end(addr, end); |
| 1922 | if (pgd_none(pgd)) |
| 1923 | return; |
| 1924 | if (unlikely(pgd_huge(pgd))) { |
| 1925 | if (!gup_huge_pgd(pgd, pgdp, addr, next, write, |
| 1926 | pages, nr)) |
| 1927 | return; |
| 1928 | } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { |
| 1929 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, |
| 1930 | PGDIR_SHIFT, next, write, pages, nr)) |
| 1931 | return; |
| 1932 | } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) |
| 1933 | return; |
| 1934 | } while (pgdp++, addr = next, addr != end); |
| 1935 | } |
| 1936 | |
| 1937 | #ifndef gup_fast_permitted |
| 1938 | /* |
| 1939 | * Check if it's allowed to use __get_user_pages_fast() for the range, or |
| 1940 | * we need to fall back to the slow version: |
| 1941 | */ |
Ira Weiny | ad8cfb9 | 2019-02-10 14:34:24 -0800 | [diff] [blame] | 1942 | bool gup_fast_permitted(unsigned long start, int nr_pages) |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 1943 | { |
| 1944 | unsigned long len, end; |
| 1945 | |
| 1946 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
| 1947 | end = start + len; |
| 1948 | return end >= start; |
| 1949 | } |
| 1950 | #endif |
| 1951 | |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1952 | /* |
| 1953 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to |
Michael S. Tsirkin | d081107 | 2018-04-13 15:35:23 -0700 | [diff] [blame] | 1954 | * the regular GUP. |
| 1955 | * Note a difference with get_user_pages_fast: this always returns the |
| 1956 | * number of pages pinned, 0 if no pages were pinned. |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1957 | */ |
| 1958 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
| 1959 | struct page **pages) |
| 1960 | { |
Wei Yang | d4faa40 | 2018-10-26 15:07:55 -0700 | [diff] [blame] | 1961 | unsigned long len, end; |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 1962 | unsigned long flags; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1963 | int nr = 0; |
| 1964 | |
| 1965 | start &= PAGE_MASK; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1966 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
| 1967 | end = start + len; |
| 1968 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 1969 | if (unlikely(!access_ok((void __user *)start, len))) |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1970 | return 0; |
| 1971 | |
| 1972 | /* |
| 1973 | * Disable interrupts. We use the nested form as we can already have |
| 1974 | * interrupts disabled by get_futex_key. |
| 1975 | * |
| 1976 | * With interrupts disabled, we block page table pages from being |
Fengguang Wu | 2ebe822 | 2018-10-30 15:10:51 -0700 | [diff] [blame] | 1977 | * freed from under us. See struct mmu_table_batch comments in |
| 1978 | * include/asm-generic/tlb.h for more details. |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1979 | * |
| 1980 | * We do not adopt an rcu_read_lock(.) here as we also want to |
| 1981 | * block IPIs that come from THPs splitting. |
| 1982 | */ |
| 1983 | |
Ira Weiny | ad8cfb9 | 2019-02-10 14:34:24 -0800 | [diff] [blame] | 1984 | if (gup_fast_permitted(start, nr_pages)) { |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 1985 | local_irq_save(flags); |
Wei Yang | d4faa40 | 2018-10-26 15:07:55 -0700 | [diff] [blame] | 1986 | gup_pgd_range(start, end, write, pages, &nr); |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 1987 | local_irq_restore(flags); |
| 1988 | } |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 1989 | |
| 1990 | return nr; |
| 1991 | } |
| 1992 | |
| 1993 | /** |
| 1994 | * get_user_pages_fast() - pin user pages in memory |
| 1995 | * @start: starting user address |
| 1996 | * @nr_pages: number of pages from start to pin |
| 1997 | * @write: whether pages will be written to |
| 1998 | * @pages: array that receives pointers to the pages pinned. |
| 1999 | * Should be at least nr_pages long. |
| 2000 | * |
| 2001 | * Attempt to pin user pages in memory without taking mm->mmap_sem. |
| 2002 | * If not successful, it will fall back to taking the lock and |
| 2003 | * calling get_user_pages(). |
| 2004 | * |
| 2005 | * Returns number of pages pinned. This may be fewer than the number |
| 2006 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
| 2007 | * were pinned, returns -errno. |
| 2008 | */ |
| 2009 | int get_user_pages_fast(unsigned long start, int nr_pages, int write, |
| 2010 | struct page **pages) |
| 2011 | { |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2012 | unsigned long addr, len, end; |
Kirill A. Shutemov | 73e10a6 | 2017-03-16 18:26:54 +0300 | [diff] [blame] | 2013 | int nr = 0, ret = 0; |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2014 | |
| 2015 | start &= PAGE_MASK; |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2016 | addr = start; |
| 2017 | len = (unsigned long) nr_pages << PAGE_SHIFT; |
| 2018 | end = start + len; |
| 2019 | |
Michael S. Tsirkin | c61611f | 2018-04-13 15:35:20 -0700 | [diff] [blame] | 2020 | if (nr_pages <= 0) |
| 2021 | return 0; |
| 2022 | |
Linus Torvalds | 96d4f26 | 2019-01-03 18:57:57 -0800 | [diff] [blame] | 2023 | if (unlikely(!access_ok((void __user *)start, len))) |
Michael S. Tsirkin | c61611f | 2018-04-13 15:35:20 -0700 | [diff] [blame] | 2024 | return -EFAULT; |
Kirill A. Shutemov | 73e10a6 | 2017-03-16 18:26:54 +0300 | [diff] [blame] | 2025 | |
Ira Weiny | ad8cfb9 | 2019-02-10 14:34:24 -0800 | [diff] [blame] | 2026 | if (gup_fast_permitted(start, nr_pages)) { |
Kirill A. Shutemov | 5b65c467 | 2017-09-09 00:56:03 +0300 | [diff] [blame] | 2027 | local_irq_disable(); |
| 2028 | gup_pgd_range(addr, end, write, pages, &nr); |
| 2029 | local_irq_enable(); |
Kirill A. Shutemov | 73e10a6 | 2017-03-16 18:26:54 +0300 | [diff] [blame] | 2030 | ret = nr; |
| 2031 | } |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2032 | |
| 2033 | if (nr < nr_pages) { |
| 2034 | /* Try to get the remaining pages with get_user_pages */ |
| 2035 | start += nr << PAGE_SHIFT; |
| 2036 | pages += nr; |
| 2037 | |
Lorenzo Stoakes | c164154 | 2016-10-13 01:20:13 +0100 | [diff] [blame] | 2038 | ret = get_user_pages_unlocked(start, nr_pages - nr, pages, |
| 2039 | write ? FOLL_WRITE : 0); |
Steve Capper | 2667f50 | 2014-10-09 15:29:14 -0700 | [diff] [blame] | 2040 | |
| 2041 | /* Have to be a bit careful with return values */ |
| 2042 | if (nr > 0) { |
| 2043 | if (ret < 0) |
| 2044 | ret = nr; |
| 2045 | else |
| 2046 | ret += nr; |
| 2047 | } |
| 2048 | } |
| 2049 | |
| 2050 | return ret; |
| 2051 | } |
| 2052 | |
Kirill A. Shutemov | e585513 | 2017-06-06 14:31:20 +0300 | [diff] [blame] | 2053 | #endif /* CONFIG_HAVE_GENERIC_GUP */ |