Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2 | /* |
| 3 | * fs/userfaultfd.c |
| 4 | * |
| 5 | * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> |
| 6 | * Copyright (C) 2008-2009 Red Hat, Inc. |
| 7 | * Copyright (C) 2015 Red Hat, Inc. |
| 8 | * |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 9 | * Some part derived from fs/eventfd.c (anon inode setup) and |
| 10 | * mm/ksm.c (mm hashing). |
| 11 | */ |
| 12 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 13 | #include <linux/list.h> |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 14 | #include <linux/hashtable.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 15 | #include <linux/sched/signal.h> |
Ingo Molnar | 6e84f31 | 2017-02-08 18:51:29 +0100 | [diff] [blame] | 16 | #include <linux/sched/mm.h> |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> |
Peter Xu | 6dfeaff | 2021-05-04 18:33:13 -0700 | [diff] [blame] | 18 | #include <linux/mmu_notifier.h> |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 19 | #include <linux/poll.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/seq_file.h> |
| 22 | #include <linux/file.h> |
| 23 | #include <linux/bug.h> |
| 24 | #include <linux/anon_inodes.h> |
| 25 | #include <linux/syscalls.h> |
| 26 | #include <linux/userfaultfd_k.h> |
| 27 | #include <linux/mempolicy.h> |
| 28 | #include <linux/ioctl.h> |
| 29 | #include <linux/security.h> |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 30 | #include <linux/hugetlb.h> |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 31 | |
Lokesh Gidra | d0d4730 | 2020-12-14 19:13:54 -0800 | [diff] [blame] | 32 | int sysctl_unprivileged_userfaultfd __read_mostly; |
Peter Xu | cefdca0 | 2019-05-13 17:16:41 -0700 | [diff] [blame] | 33 | |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 34 | static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly; |
| 35 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 36 | enum userfaultfd_state { |
| 37 | UFFD_STATE_WAIT_API, |
| 38 | UFFD_STATE_RUNNING, |
| 39 | }; |
| 40 | |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 41 | /* |
| 42 | * Start with fault_pending_wqh and fault_wqh so they're more likely |
| 43 | * to be in the same cacheline. |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 44 | * |
| 45 | * Locking order: |
| 46 | * fd_wqh.lock |
| 47 | * fault_pending_wqh.lock |
| 48 | * fault_wqh.lock |
| 49 | * event_wqh.lock |
| 50 | * |
| 51 | * To avoid deadlocks, IRQs must be disabled when taking any of the above locks, |
| 52 | * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's |
| 53 | * also taken in IRQ context. |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 54 | */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 55 | struct userfaultfd_ctx { |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 56 | /* waitqueue head for the pending (i.e. not read) userfaults */ |
| 57 | wait_queue_head_t fault_pending_wqh; |
| 58 | /* waitqueue head for the userfaults */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 59 | wait_queue_head_t fault_wqh; |
| 60 | /* waitqueue head for the pseudo fd to wakeup poll/read */ |
| 61 | wait_queue_head_t fd_wqh; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 62 | /* waitqueue head for events */ |
| 63 | wait_queue_head_t event_wqh; |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 64 | /* a refile sequence protected by fault_pending_wqh lock */ |
Ahmed S. Darwish | 2ca97ac | 2020-07-20 17:55:28 +0200 | [diff] [blame] | 65 | seqcount_spinlock_t refile_seq; |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 66 | /* pseudo fd refcounting */ |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 67 | refcount_t refcount; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 68 | /* userfaultfd syscall flags */ |
| 69 | unsigned int flags; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 70 | /* features requested from the userspace */ |
| 71 | unsigned int features; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 72 | /* state machine */ |
| 73 | enum userfaultfd_state state; |
| 74 | /* released */ |
| 75 | bool released; |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 76 | /* memory mappings are changing because of non-cooperative event */ |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 77 | atomic_t mmap_changing; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 78 | /* mm with one ore more vmas attached to this userfaultfd_ctx */ |
| 79 | struct mm_struct *mm; |
| 80 | }; |
| 81 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 82 | struct userfaultfd_fork_ctx { |
| 83 | struct userfaultfd_ctx *orig; |
| 84 | struct userfaultfd_ctx *new; |
| 85 | struct list_head list; |
| 86 | }; |
| 87 | |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 88 | struct userfaultfd_unmap_ctx { |
| 89 | struct userfaultfd_ctx *ctx; |
| 90 | unsigned long start; |
| 91 | unsigned long end; |
| 92 | struct list_head list; |
| 93 | }; |
| 94 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 95 | struct userfaultfd_wait_queue { |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 96 | struct uffd_msg msg; |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 97 | wait_queue_entry_t wq; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 98 | struct userfaultfd_ctx *ctx; |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 99 | bool waken; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 100 | }; |
| 101 | |
| 102 | struct userfaultfd_wake_range { |
| 103 | unsigned long start; |
| 104 | unsigned long len; |
| 105 | }; |
| 106 | |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 107 | static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 108 | int wake_flags, void *key) |
| 109 | { |
| 110 | struct userfaultfd_wake_range *range = key; |
| 111 | int ret; |
| 112 | struct userfaultfd_wait_queue *uwq; |
| 113 | unsigned long start, len; |
| 114 | |
| 115 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
| 116 | ret = 0; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 117 | /* len == 0 means wake all */ |
| 118 | start = range->start; |
| 119 | len = range->len; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 120 | if (len && (start > uwq->msg.arg.pagefault.address || |
| 121 | start + len <= uwq->msg.arg.pagefault.address)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 122 | goto out; |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 123 | WRITE_ONCE(uwq->waken, true); |
| 124 | /* |
Peter Zijlstra | a9668cd | 2017-06-07 17:51:27 +0200 | [diff] [blame] | 125 | * The Program-Order guarantees provided by the scheduler |
| 126 | * ensure uwq->waken is visible before the task is woken. |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 127 | */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 128 | ret = wake_up_state(wq->private, mode); |
Peter Zijlstra | a9668cd | 2017-06-07 17:51:27 +0200 | [diff] [blame] | 129 | if (ret) { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 130 | /* |
| 131 | * Wake only once, autoremove behavior. |
| 132 | * |
Peter Zijlstra | a9668cd | 2017-06-07 17:51:27 +0200 | [diff] [blame] | 133 | * After the effect of list_del_init is visible to the other |
| 134 | * CPUs, the waitqueue may disappear from under us, see the |
| 135 | * !list_empty_careful() in handle_userfault(). |
| 136 | * |
| 137 | * try_to_wake_up() has an implicit smp_mb(), and the |
| 138 | * wq->private is read before calling the extern function |
| 139 | * "wake_up_state" (which in turns calls try_to_wake_up). |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 140 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 141 | list_del_init(&wq->entry); |
Peter Zijlstra | a9668cd | 2017-06-07 17:51:27 +0200 | [diff] [blame] | 142 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 143 | out: |
| 144 | return ret; |
| 145 | } |
| 146 | |
| 147 | /** |
| 148 | * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd |
| 149 | * context. |
| 150 | * @ctx: [in] Pointer to the userfaultfd context. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 151 | */ |
| 152 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) |
| 153 | { |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 154 | refcount_inc(&ctx->refcount); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | /** |
| 158 | * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd |
| 159 | * context. |
| 160 | * @ctx: [in] Pointer to userfaultfd context. |
| 161 | * |
| 162 | * The userfaultfd context reference must have been previously acquired either |
| 163 | * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). |
| 164 | */ |
| 165 | static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) |
| 166 | { |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 167 | if (refcount_dec_and_test(&ctx->refcount)) { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 168 | VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); |
| 169 | VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); |
| 170 | VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); |
| 171 | VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 172 | VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); |
| 173 | VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 174 | VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); |
| 175 | VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 176 | mmdrop(ctx->mm); |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 177 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 178 | } |
| 179 | } |
| 180 | |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 181 | static inline void msg_init(struct uffd_msg *msg) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 182 | { |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 183 | BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); |
| 184 | /* |
| 185 | * Must use memset to zero out the paddings or kernel data is |
| 186 | * leaked to userland. |
| 187 | */ |
| 188 | memset(msg, 0, sizeof(struct uffd_msg)); |
| 189 | } |
| 190 | |
| 191 | static inline struct uffd_msg userfault_msg(unsigned long address, |
| 192 | unsigned int flags, |
Alexey Perevalov | 9d4ac93 | 2017-09-06 16:23:56 -0700 | [diff] [blame] | 193 | unsigned long reason, |
| 194 | unsigned int features) |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 195 | { |
| 196 | struct uffd_msg msg; |
| 197 | msg_init(&msg); |
| 198 | msg.event = UFFD_EVENT_PAGEFAULT; |
| 199 | msg.arg.pagefault.address = address; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 200 | /* |
| 201 | * These flags indicate why the userfault occurred: |
| 202 | * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. |
| 203 | * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. |
| 204 | * - Neither of these flags being set indicates a MISSING fault. |
| 205 | * |
| 206 | * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write |
| 207 | * fault. Otherwise, it was a read fault. |
| 208 | */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 209 | if (flags & FAULT_FLAG_WRITE) |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 210 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 211 | if (reason & VM_UFFD_WP) |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 212 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 213 | if (reason & VM_UFFD_MINOR) |
| 214 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; |
Alexey Perevalov | 9d4ac93 | 2017-09-06 16:23:56 -0700 | [diff] [blame] | 215 | if (features & UFFD_FEATURE_THREAD_ID) |
Andrea Arcangeli | a36985d | 2017-09-06 16:23:59 -0700 | [diff] [blame] | 216 | msg.arg.pagefault.feat.ptid = task_pid_vnr(current); |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 217 | return msg; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 218 | } |
| 219 | |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 220 | #ifdef CONFIG_HUGETLB_PAGE |
| 221 | /* |
| 222 | * Same functionality as userfaultfd_must_wait below with modifications for |
| 223 | * hugepmd ranges. |
| 224 | */ |
| 225 | static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, |
Punit Agrawal | 7868a20 | 2017-07-06 15:39:42 -0700 | [diff] [blame] | 226 | struct vm_area_struct *vma, |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 227 | unsigned long address, |
| 228 | unsigned long flags, |
| 229 | unsigned long reason) |
| 230 | { |
| 231 | struct mm_struct *mm = ctx->mm; |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 232 | pte_t *ptep, pte; |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 233 | bool ret = true; |
| 234 | |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 235 | mmap_assert_locked(mm); |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 236 | |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 237 | ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); |
| 238 | |
| 239 | if (!ptep) |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 240 | goto out; |
| 241 | |
| 242 | ret = false; |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 243 | pte = huge_ptep_get(ptep); |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 244 | |
| 245 | /* |
| 246 | * Lockless access: we're in a wait_event so it's ok if it |
| 247 | * changes under us. |
| 248 | */ |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 249 | if (huge_pte_none(pte)) |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 250 | ret = true; |
Janosch Frank | 1e2c043 | 2018-07-03 17:02:39 -0700 | [diff] [blame] | 251 | if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 252 | ret = true; |
| 253 | out: |
| 254 | return ret; |
| 255 | } |
| 256 | #else |
| 257 | static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, |
Punit Agrawal | 7868a20 | 2017-07-06 15:39:42 -0700 | [diff] [blame] | 258 | struct vm_area_struct *vma, |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 259 | unsigned long address, |
| 260 | unsigned long flags, |
| 261 | unsigned long reason) |
| 262 | { |
| 263 | return false; /* should never get here */ |
| 264 | } |
| 265 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 266 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 267 | /* |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 268 | * Verify the pagetables are still not ok after having reigstered into |
| 269 | * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any |
| 270 | * userfault that has already been resolved, if userfaultfd_read and |
| 271 | * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different |
| 272 | * threads. |
| 273 | */ |
| 274 | static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, |
| 275 | unsigned long address, |
| 276 | unsigned long flags, |
| 277 | unsigned long reason) |
| 278 | { |
| 279 | struct mm_struct *mm = ctx->mm; |
| 280 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 281 | p4d_t *p4d; |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 282 | pud_t *pud; |
| 283 | pmd_t *pmd, _pmd; |
| 284 | pte_t *pte; |
| 285 | bool ret = true; |
| 286 | |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 287 | mmap_assert_locked(mm); |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 288 | |
| 289 | pgd = pgd_offset(mm, address); |
| 290 | if (!pgd_present(*pgd)) |
| 291 | goto out; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 292 | p4d = p4d_offset(pgd, address); |
| 293 | if (!p4d_present(*p4d)) |
| 294 | goto out; |
| 295 | pud = pud_offset(p4d, address); |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 296 | if (!pud_present(*pud)) |
| 297 | goto out; |
| 298 | pmd = pmd_offset(pud, address); |
| 299 | /* |
| 300 | * READ_ONCE must function as a barrier with narrower scope |
| 301 | * and it must be equivalent to: |
| 302 | * _pmd = *pmd; barrier(); |
| 303 | * |
| 304 | * This is to deal with the instability (as in |
| 305 | * pmd_trans_unstable) of the pmd. |
| 306 | */ |
| 307 | _pmd = READ_ONCE(*pmd); |
Huang Ying | a365ac0 | 2018-01-31 16:17:32 -0800 | [diff] [blame] | 308 | if (pmd_none(_pmd)) |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 309 | goto out; |
| 310 | |
| 311 | ret = false; |
Huang Ying | a365ac0 | 2018-01-31 16:17:32 -0800 | [diff] [blame] | 312 | if (!pmd_present(_pmd)) |
| 313 | goto out; |
| 314 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 315 | if (pmd_trans_huge(_pmd)) { |
| 316 | if (!pmd_write(_pmd) && (reason & VM_UFFD_WP)) |
| 317 | ret = true; |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 318 | goto out; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 319 | } |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 320 | |
| 321 | /* |
| 322 | * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it |
| 323 | * and use the standard pte_offset_map() instead of parsing _pmd. |
| 324 | */ |
| 325 | pte = pte_offset_map(pmd, address); |
| 326 | /* |
| 327 | * Lockless access: we're in a wait_event so it's ok if it |
| 328 | * changes under us. |
| 329 | */ |
| 330 | if (pte_none(*pte)) |
| 331 | ret = true; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 332 | if (!pte_write(*pte) && (reason & VM_UFFD_WP)) |
| 333 | ret = true; |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 334 | pte_unmap(pte); |
| 335 | |
| 336 | out: |
| 337 | return ret; |
| 338 | } |
| 339 | |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 340 | static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags) |
Peter Xu | 3e69ad0 | 2020-04-01 21:09:00 -0700 | [diff] [blame] | 341 | { |
| 342 | if (flags & FAULT_FLAG_INTERRUPTIBLE) |
| 343 | return TASK_INTERRUPTIBLE; |
| 344 | |
| 345 | if (flags & FAULT_FLAG_KILLABLE) |
| 346 | return TASK_KILLABLE; |
| 347 | |
| 348 | return TASK_UNINTERRUPTIBLE; |
| 349 | } |
| 350 | |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 351 | /* |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 352 | * The locking rules involved in returning VM_FAULT_RETRY depending on |
| 353 | * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and |
| 354 | * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" |
| 355 | * recommendation in __lock_page_or_retry is not an understatement. |
| 356 | * |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 357 | * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 358 | * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is |
| 359 | * not set. |
| 360 | * |
| 361 | * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not |
| 362 | * set, VM_FAULT_RETRY can still be returned if and only if there are |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 363 | * fatal_signal_pending()s, and the mmap_lock must be released before |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 364 | * returning it. |
| 365 | */ |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 366 | vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 367 | { |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 368 | struct mm_struct *mm = vmf->vma->vm_mm; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 369 | struct userfaultfd_ctx *ctx; |
| 370 | struct userfaultfd_wait_queue uwq; |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 371 | vm_fault_t ret = VM_FAULT_SIGBUS; |
Peter Xu | 3e69ad0 | 2020-04-01 21:09:00 -0700 | [diff] [blame] | 372 | bool must_wait; |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 373 | unsigned int blocking_state; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 374 | |
Andrea Arcangeli | 64c2b20 | 2017-06-16 14:02:37 -0700 | [diff] [blame] | 375 | /* |
| 376 | * We don't do userfault handling for the final child pid update. |
| 377 | * |
| 378 | * We also don't do userfault handling during |
| 379 | * coredumping. hugetlbfs has the special |
| 380 | * follow_hugetlb_page() to skip missing pages in the |
| 381 | * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with |
| 382 | * the no_page_table() helper in follow_page_mask(), but the |
| 383 | * shmem_vm_ops->fault method is invoked even during |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 384 | * coredumping without mmap_lock and it ends up here. |
Andrea Arcangeli | 64c2b20 | 2017-06-16 14:02:37 -0700 | [diff] [blame] | 385 | */ |
| 386 | if (current->flags & (PF_EXITING|PF_DUMPCORE)) |
| 387 | goto out; |
| 388 | |
| 389 | /* |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 390 | * Coredumping runs without mmap_lock so we can only check that |
| 391 | * the mmap_lock is held, if PF_DUMPCORE was not set. |
Andrea Arcangeli | 64c2b20 | 2017-06-16 14:02:37 -0700 | [diff] [blame] | 392 | */ |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 393 | mmap_assert_locked(mm); |
Andrea Arcangeli | 64c2b20 | 2017-06-16 14:02:37 -0700 | [diff] [blame] | 394 | |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 395 | ctx = vmf->vma->vm_userfaultfd_ctx.ctx; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 396 | if (!ctx) |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 397 | goto out; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 398 | |
| 399 | BUG_ON(ctx->mm != mm); |
| 400 | |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 401 | /* Any unrecognized flag is a bug. */ |
| 402 | VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); |
| 403 | /* 0 or > 1 flags set is a bug; we expect exactly 1. */ |
| 404 | VM_BUG_ON(!reason || (reason & (reason - 1))); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 405 | |
Prakash Sangappa | 2d6d6f5 | 2017-09-06 16:23:39 -0700 | [diff] [blame] | 406 | if (ctx->features & UFFD_FEATURE_SIGBUS) |
| 407 | goto out; |
Lokesh Gidra | 37cd057 | 2020-12-14 19:13:49 -0800 | [diff] [blame] | 408 | if ((vmf->flags & FAULT_FLAG_USER) == 0 && |
| 409 | ctx->flags & UFFD_USER_MODE_ONLY) { |
| 410 | printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd " |
| 411 | "sysctl knob to 1 if kernel faults must be handled " |
| 412 | "without obtaining CAP_SYS_PTRACE capability\n"); |
| 413 | goto out; |
| 414 | } |
Prakash Sangappa | 2d6d6f5 | 2017-09-06 16:23:39 -0700 | [diff] [blame] | 415 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 416 | /* |
| 417 | * If it's already released don't get it. This avoids to loop |
| 418 | * in __get_user_pages if userfaultfd_release waits on the |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 419 | * caller of handle_userfault to release the mmap_lock. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 420 | */ |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 421 | if (unlikely(READ_ONCE(ctx->released))) { |
Andrea Arcangeli | 656710a | 2017-09-08 16:12:42 -0700 | [diff] [blame] | 422 | /* |
| 423 | * Don't return VM_FAULT_SIGBUS in this case, so a non |
| 424 | * cooperative manager can close the uffd after the |
| 425 | * last UFFDIO_COPY, without risking to trigger an |
| 426 | * involuntary SIGBUS if the process was starting the |
| 427 | * userfaultfd while the userfaultfd was still armed |
| 428 | * (but after the last UFFDIO_COPY). If the uffd |
| 429 | * wasn't already closed when the userfault reached |
| 430 | * this point, that would normally be solved by |
| 431 | * userfaultfd_must_wait returning 'false'. |
| 432 | * |
| 433 | * If we were to return VM_FAULT_SIGBUS here, the non |
| 434 | * cooperative manager would be instead forced to |
| 435 | * always call UFFDIO_UNREGISTER before it can safely |
| 436 | * close the uffd. |
| 437 | */ |
| 438 | ret = VM_FAULT_NOPAGE; |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 439 | goto out; |
Andrea Arcangeli | 656710a | 2017-09-08 16:12:42 -0700 | [diff] [blame] | 440 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 441 | |
| 442 | /* |
| 443 | * Check that we can return VM_FAULT_RETRY. |
| 444 | * |
| 445 | * NOTE: it should become possible to return VM_FAULT_RETRY |
| 446 | * even if FAULT_FLAG_TRIED is set without leading to gup() |
| 447 | * -EBUSY failures, if the userfaultfd is to be extended for |
| 448 | * VM_UFFD_WP tracking and we intend to arm the userfault |
| 449 | * without first stopping userland access to the memory. For |
| 450 | * VM_UFFD_MISSING userfaults this is enough for now. |
| 451 | */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 452 | if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 453 | /* |
| 454 | * Validate the invariant that nowait must allow retry |
| 455 | * to be sure not to return SIGBUS erroneously on |
| 456 | * nowait invocations. |
| 457 | */ |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 458 | BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 459 | #ifdef CONFIG_DEBUG_VM |
| 460 | if (printk_ratelimit()) { |
| 461 | printk(KERN_WARNING |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 462 | "FAULT_FLAG_ALLOW_RETRY missing %x\n", |
| 463 | vmf->flags); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 464 | dump_stack(); |
| 465 | } |
| 466 | #endif |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 467 | goto out; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 468 | } |
| 469 | |
| 470 | /* |
| 471 | * Handle nowait, not much to do other than tell it to retry |
| 472 | * and wait. |
| 473 | */ |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 474 | ret = VM_FAULT_RETRY; |
Jan Kara | 82b0f8c | 2016-12-14 15:06:58 -0800 | [diff] [blame] | 475 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 476 | goto out; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 477 | |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 478 | /* take the reference before dropping the mmap_lock */ |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 479 | userfaultfd_ctx_get(ctx); |
| 480 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 481 | init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); |
| 482 | uwq.wq.private = current; |
Alexey Perevalov | 9d4ac93 | 2017-09-06 16:23:56 -0700 | [diff] [blame] | 483 | uwq.msg = userfault_msg(vmf->address, vmf->flags, reason, |
| 484 | ctx->features); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 485 | uwq.ctx = ctx; |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 486 | uwq.waken = false; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 487 | |
Peter Xu | 3e69ad0 | 2020-04-01 21:09:00 -0700 | [diff] [blame] | 488 | blocking_state = userfaultfd_get_blocking_state(vmf->flags); |
Andrea Arcangeli | dfa37dc | 2015-09-04 15:47:18 -0700 | [diff] [blame] | 489 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 490 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 491 | /* |
| 492 | * After the __add_wait_queue the uwq is visible to userland |
| 493 | * through poll/read(). |
| 494 | */ |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 495 | __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); |
| 496 | /* |
| 497 | * The smp_mb() after __set_current_state prevents the reads |
| 498 | * following the spin_unlock to happen before the list_add in |
| 499 | * __add_wait_queue. |
| 500 | */ |
Andrea Arcangeli | 15a77c6 | 2017-01-24 15:17:59 -0800 | [diff] [blame] | 501 | set_current_state(blocking_state); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 502 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 503 | |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 504 | if (!is_vm_hugetlb_page(vmf->vma)) |
| 505 | must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, |
| 506 | reason); |
| 507 | else |
Punit Agrawal | 7868a20 | 2017-07-06 15:39:42 -0700 | [diff] [blame] | 508 | must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, |
| 509 | vmf->address, |
Mike Kravetz | 369cd212 | 2017-02-22 15:43:10 -0800 | [diff] [blame] | 510 | vmf->flags, reason); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 511 | mmap_read_unlock(mm); |
Andrea Arcangeli | 8d2afd9 | 2015-09-04 15:46:51 -0700 | [diff] [blame] | 512 | |
Linus Torvalds | f9bf352 | 2020-08-02 10:42:31 -0700 | [diff] [blame] | 513 | if (likely(must_wait && !READ_ONCE(ctx->released))) { |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 514 | wake_up_poll(&ctx->fd_wqh, EPOLLIN); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 515 | schedule(); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 516 | } |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 517 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 518 | __set_current_state(TASK_RUNNING); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 519 | |
| 520 | /* |
| 521 | * Here we race with the list_del; list_add in |
| 522 | * userfaultfd_ctx_read(), however because we don't ever run |
| 523 | * list_del_init() to refile across the two lists, the prev |
| 524 | * and next pointers will never point to self. list_add also |
| 525 | * would never let any of the two pointers to point to |
| 526 | * self. So list_empty_careful won't risk to see both pointers |
| 527 | * pointing to self at any time during the list refile. The |
| 528 | * only case where list_del_init() is called is the full |
| 529 | * removal in the wake function and there we don't re-list_add |
| 530 | * and it's fine not to block on the spinlock. The uwq on this |
| 531 | * kernel stack can be released after the list_del_init. |
| 532 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 533 | if (!list_empty_careful(&uwq.wq.entry)) { |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 534 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 535 | /* |
| 536 | * No need of list_del_init(), the uwq on the stack |
| 537 | * will be freed shortly anyway. |
| 538 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 539 | list_del(&uwq.wq.entry); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 540 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 541 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 542 | |
| 543 | /* |
| 544 | * ctx may go away after this if the userfault pseudo fd is |
| 545 | * already released. |
| 546 | */ |
| 547 | userfaultfd_ctx_put(ctx); |
| 548 | |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 549 | out: |
| 550 | return ret; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 551 | } |
| 552 | |
Andrea Arcangeli | 8c9e7bb | 2017-03-09 16:16:54 -0800 | [diff] [blame] | 553 | static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, |
| 554 | struct userfaultfd_wait_queue *ewq) |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 555 | { |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 556 | struct userfaultfd_ctx *release_new_ctx; |
| 557 | |
Andrea Arcangeli | 9a69a82 | 2017-03-09 16:16:52 -0800 | [diff] [blame] | 558 | if (WARN_ON_ONCE(current->flags & PF_EXITING)) |
| 559 | goto out; |
| 560 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 561 | ewq->ctx = ctx; |
| 562 | init_waitqueue_entry(&ewq->wq, current); |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 563 | release_new_ctx = NULL; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 564 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 565 | spin_lock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 566 | /* |
| 567 | * After the __add_wait_queue the uwq is visible to userland |
| 568 | * through poll/read(). |
| 569 | */ |
| 570 | __add_wait_queue(&ctx->event_wqh, &ewq->wq); |
| 571 | for (;;) { |
| 572 | set_current_state(TASK_KILLABLE); |
| 573 | if (ewq->msg.event == 0) |
| 574 | break; |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 575 | if (READ_ONCE(ctx->released) || |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 576 | fatal_signal_pending(current)) { |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 577 | /* |
| 578 | * &ewq->wq may be queued in fork_event, but |
| 579 | * __remove_wait_queue ignores the head |
| 580 | * parameter. It would be a problem if it |
| 581 | * didn't. |
| 582 | */ |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 583 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
Mike Rapoport | 7eb76d4 | 2017-03-09 16:17:09 -0800 | [diff] [blame] | 584 | if (ewq->msg.event == UFFD_EVENT_FORK) { |
| 585 | struct userfaultfd_ctx *new; |
| 586 | |
| 587 | new = (struct userfaultfd_ctx *) |
| 588 | (unsigned long) |
| 589 | ewq->msg.arg.reserved.reserved1; |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 590 | release_new_ctx = new; |
Mike Rapoport | 7eb76d4 | 2017-03-09 16:17:09 -0800 | [diff] [blame] | 591 | } |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 592 | break; |
| 593 | } |
| 594 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 595 | spin_unlock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 596 | |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 597 | wake_up_poll(&ctx->fd_wqh, EPOLLIN); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 598 | schedule(); |
| 599 | |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 600 | spin_lock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 601 | } |
| 602 | __set_current_state(TASK_RUNNING); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 603 | spin_unlock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 604 | |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 605 | if (release_new_ctx) { |
| 606 | struct vm_area_struct *vma; |
| 607 | struct mm_struct *mm = release_new_ctx->mm; |
| 608 | |
| 609 | /* the various vma->vm_userfaultfd_ctx still points to it */ |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 610 | mmap_write_lock(mm); |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 611 | for (vma = mm->mmap; vma; vma = vma->vm_next) |
Mike Rapoport | 31e810a | 2018-08-02 15:36:09 -0700 | [diff] [blame] | 612 | if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 613 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 614 | vma->vm_flags &= ~__VM_UFFD_FLAGS; |
Mike Rapoport | 31e810a | 2018-08-02 15:36:09 -0700 | [diff] [blame] | 615 | } |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 616 | mmap_write_unlock(mm); |
Andrea Arcangeli | 0cbb4b4 | 2018-01-04 16:18:09 -0800 | [diff] [blame] | 617 | |
| 618 | userfaultfd_ctx_put(release_new_ctx); |
| 619 | } |
| 620 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 621 | /* |
| 622 | * ctx may go away after this if the userfault pseudo fd is |
| 623 | * already released. |
| 624 | */ |
Andrea Arcangeli | 9a69a82 | 2017-03-09 16:16:52 -0800 | [diff] [blame] | 625 | out: |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 626 | atomic_dec(&ctx->mmap_changing); |
| 627 | VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 628 | userfaultfd_ctx_put(ctx); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 629 | } |
| 630 | |
| 631 | static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, |
| 632 | struct userfaultfd_wait_queue *ewq) |
| 633 | { |
| 634 | ewq->msg.event = 0; |
| 635 | wake_up_locked(&ctx->event_wqh); |
| 636 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
| 637 | } |
| 638 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 639 | int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) |
| 640 | { |
| 641 | struct userfaultfd_ctx *ctx = NULL, *octx; |
| 642 | struct userfaultfd_fork_ctx *fctx; |
| 643 | |
| 644 | octx = vma->vm_userfaultfd_ctx.ctx; |
| 645 | if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { |
| 646 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 647 | vma->vm_flags &= ~__VM_UFFD_FLAGS; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 648 | return 0; |
| 649 | } |
| 650 | |
| 651 | list_for_each_entry(fctx, fcs, list) |
| 652 | if (fctx->orig == octx) { |
| 653 | ctx = fctx->new; |
| 654 | break; |
| 655 | } |
| 656 | |
| 657 | if (!ctx) { |
| 658 | fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); |
| 659 | if (!fctx) |
| 660 | return -ENOMEM; |
| 661 | |
| 662 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); |
| 663 | if (!ctx) { |
| 664 | kfree(fctx); |
| 665 | return -ENOMEM; |
| 666 | } |
| 667 | |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 668 | refcount_set(&ctx->refcount, 1); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 669 | ctx->flags = octx->flags; |
| 670 | ctx->state = UFFD_STATE_RUNNING; |
| 671 | ctx->features = octx->features; |
| 672 | ctx->released = false; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 673 | atomic_set(&ctx->mmap_changing, 0); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 674 | ctx->mm = vma->vm_mm; |
Mike Rapoport | 00bb31f | 2017-11-15 17:36:56 -0800 | [diff] [blame] | 675 | mmgrab(ctx->mm); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 676 | |
| 677 | userfaultfd_ctx_get(octx); |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 678 | atomic_inc(&octx->mmap_changing); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 679 | fctx->orig = octx; |
| 680 | fctx->new = ctx; |
| 681 | list_add_tail(&fctx->list, fcs); |
| 682 | } |
| 683 | |
| 684 | vma->vm_userfaultfd_ctx.ctx = ctx; |
| 685 | return 0; |
| 686 | } |
| 687 | |
Andrea Arcangeli | 8c9e7bb | 2017-03-09 16:16:54 -0800 | [diff] [blame] | 688 | static void dup_fctx(struct userfaultfd_fork_ctx *fctx) |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 689 | { |
| 690 | struct userfaultfd_ctx *ctx = fctx->orig; |
| 691 | struct userfaultfd_wait_queue ewq; |
| 692 | |
| 693 | msg_init(&ewq.msg); |
| 694 | |
| 695 | ewq.msg.event = UFFD_EVENT_FORK; |
| 696 | ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; |
| 697 | |
Andrea Arcangeli | 8c9e7bb | 2017-03-09 16:16:54 -0800 | [diff] [blame] | 698 | userfaultfd_event_wait_completion(ctx, &ewq); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 699 | } |
| 700 | |
| 701 | void dup_userfaultfd_complete(struct list_head *fcs) |
| 702 | { |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 703 | struct userfaultfd_fork_ctx *fctx, *n; |
| 704 | |
| 705 | list_for_each_entry_safe(fctx, n, fcs, list) { |
Andrea Arcangeli | 8c9e7bb | 2017-03-09 16:16:54 -0800 | [diff] [blame] | 706 | dup_fctx(fctx); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 707 | list_del(&fctx->list); |
| 708 | kfree(fctx); |
| 709 | } |
| 710 | } |
| 711 | |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 712 | void mremap_userfaultfd_prep(struct vm_area_struct *vma, |
| 713 | struct vm_userfaultfd_ctx *vm_ctx) |
| 714 | { |
| 715 | struct userfaultfd_ctx *ctx; |
| 716 | |
| 717 | ctx = vma->vm_userfaultfd_ctx.ctx; |
Peter Xu | 3cfd22b | 2018-12-28 00:38:47 -0800 | [diff] [blame] | 718 | |
| 719 | if (!ctx) |
| 720 | return; |
| 721 | |
| 722 | if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 723 | vm_ctx->ctx = ctx; |
| 724 | userfaultfd_ctx_get(ctx); |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 725 | atomic_inc(&ctx->mmap_changing); |
Peter Xu | 3cfd22b | 2018-12-28 00:38:47 -0800 | [diff] [blame] | 726 | } else { |
| 727 | /* Drop uffd context if remap feature not enabled */ |
| 728 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 729 | vma->vm_flags &= ~__VM_UFFD_FLAGS; |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 730 | } |
| 731 | } |
| 732 | |
Andrea Arcangeli | 90794bf | 2017-02-22 15:42:37 -0800 | [diff] [blame] | 733 | void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 734 | unsigned long from, unsigned long to, |
| 735 | unsigned long len) |
| 736 | { |
Andrea Arcangeli | 90794bf | 2017-02-22 15:42:37 -0800 | [diff] [blame] | 737 | struct userfaultfd_ctx *ctx = vm_ctx->ctx; |
Pavel Emelyanov | 72f8765 | 2017-02-22 15:42:34 -0800 | [diff] [blame] | 738 | struct userfaultfd_wait_queue ewq; |
| 739 | |
| 740 | if (!ctx) |
| 741 | return; |
| 742 | |
| 743 | if (to & ~PAGE_MASK) { |
| 744 | userfaultfd_ctx_put(ctx); |
| 745 | return; |
| 746 | } |
| 747 | |
| 748 | msg_init(&ewq.msg); |
| 749 | |
| 750 | ewq.msg.event = UFFD_EVENT_REMAP; |
| 751 | ewq.msg.arg.remap.from = from; |
| 752 | ewq.msg.arg.remap.to = to; |
| 753 | ewq.msg.arg.remap.len = len; |
| 754 | |
| 755 | userfaultfd_event_wait_completion(ctx, &ewq); |
| 756 | } |
| 757 | |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 758 | bool userfaultfd_remove(struct vm_area_struct *vma, |
Mike Rapoport | d811914 | 2017-02-24 14:56:02 -0800 | [diff] [blame] | 759 | unsigned long start, unsigned long end) |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 760 | { |
| 761 | struct mm_struct *mm = vma->vm_mm; |
| 762 | struct userfaultfd_ctx *ctx; |
| 763 | struct userfaultfd_wait_queue ewq; |
| 764 | |
| 765 | ctx = vma->vm_userfaultfd_ctx.ctx; |
Mike Rapoport | d811914 | 2017-02-24 14:56:02 -0800 | [diff] [blame] | 766 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 767 | return true; |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 768 | |
| 769 | userfaultfd_ctx_get(ctx); |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 770 | atomic_inc(&ctx->mmap_changing); |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 771 | mmap_read_unlock(mm); |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 772 | |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 773 | msg_init(&ewq.msg); |
| 774 | |
Mike Rapoport | d811914 | 2017-02-24 14:56:02 -0800 | [diff] [blame] | 775 | ewq.msg.event = UFFD_EVENT_REMOVE; |
| 776 | ewq.msg.arg.remove.start = start; |
| 777 | ewq.msg.arg.remove.end = end; |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 778 | |
| 779 | userfaultfd_event_wait_completion(ctx, &ewq); |
| 780 | |
Andrea Arcangeli | 70ccb92 | 2017-03-09 16:17:11 -0800 | [diff] [blame] | 781 | return false; |
Pavel Emelyanov | 05ce772 | 2017-02-22 15:42:40 -0800 | [diff] [blame] | 782 | } |
| 783 | |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 784 | static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, |
| 785 | unsigned long start, unsigned long end) |
| 786 | { |
| 787 | struct userfaultfd_unmap_ctx *unmap_ctx; |
| 788 | |
| 789 | list_for_each_entry(unmap_ctx, unmaps, list) |
| 790 | if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && |
| 791 | unmap_ctx->end == end) |
| 792 | return true; |
| 793 | |
| 794 | return false; |
| 795 | } |
| 796 | |
| 797 | int userfaultfd_unmap_prep(struct vm_area_struct *vma, |
| 798 | unsigned long start, unsigned long end, |
| 799 | struct list_head *unmaps) |
| 800 | { |
| 801 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) { |
| 802 | struct userfaultfd_unmap_ctx *unmap_ctx; |
| 803 | struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; |
| 804 | |
| 805 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || |
| 806 | has_unmap_ctx(ctx, unmaps, start, end)) |
| 807 | continue; |
| 808 | |
| 809 | unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); |
| 810 | if (!unmap_ctx) |
| 811 | return -ENOMEM; |
| 812 | |
| 813 | userfaultfd_ctx_get(ctx); |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 814 | atomic_inc(&ctx->mmap_changing); |
Mike Rapoport | 897ab3e | 2017-02-24 14:58:22 -0800 | [diff] [blame] | 815 | unmap_ctx->ctx = ctx; |
| 816 | unmap_ctx->start = start; |
| 817 | unmap_ctx->end = end; |
| 818 | list_add_tail(&unmap_ctx->list, unmaps); |
| 819 | } |
| 820 | |
| 821 | return 0; |
| 822 | } |
| 823 | |
| 824 | void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) |
| 825 | { |
| 826 | struct userfaultfd_unmap_ctx *ctx, *n; |
| 827 | struct userfaultfd_wait_queue ewq; |
| 828 | |
| 829 | list_for_each_entry_safe(ctx, n, uf, list) { |
| 830 | msg_init(&ewq.msg); |
| 831 | |
| 832 | ewq.msg.event = UFFD_EVENT_UNMAP; |
| 833 | ewq.msg.arg.remove.start = ctx->start; |
| 834 | ewq.msg.arg.remove.end = ctx->end; |
| 835 | |
| 836 | userfaultfd_event_wait_completion(ctx->ctx, &ewq); |
| 837 | |
| 838 | list_del(&ctx->list); |
| 839 | kfree(ctx); |
| 840 | } |
| 841 | } |
| 842 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 843 | static int userfaultfd_release(struct inode *inode, struct file *file) |
| 844 | { |
| 845 | struct userfaultfd_ctx *ctx = file->private_data; |
| 846 | struct mm_struct *mm = ctx->mm; |
| 847 | struct vm_area_struct *vma, *prev; |
| 848 | /* len == 0 means wake all */ |
| 849 | struct userfaultfd_wake_range range = { .len = 0, }; |
| 850 | unsigned long new_flags; |
| 851 | |
Mark Rutland | 6aa7de0 | 2017-10-23 14:07:29 -0700 | [diff] [blame] | 852 | WRITE_ONCE(ctx->released, true); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 853 | |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 854 | if (!mmget_not_zero(mm)) |
| 855 | goto wakeup; |
| 856 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 857 | /* |
| 858 | * Flush page faults out of all CPUs. NOTE: all page faults |
| 859 | * must be retried without returning VM_FAULT_SIGBUS if |
| 860 | * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 861 | * changes while handle_userfault released the mmap_lock. So |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 862 | * it's critical that released is set to true (above), before |
Michel Lespinasse | c1e8d7c | 2020-06-08 21:33:54 -0700 | [diff] [blame] | 863 | * taking the mmap_lock for writing. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 864 | */ |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 865 | mmap_write_lock(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 866 | prev = NULL; |
| 867 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 868 | cond_resched(); |
| 869 | BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 870 | !!(vma->vm_flags & __VM_UFFD_FLAGS)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 871 | if (vma->vm_userfaultfd_ctx.ctx != ctx) { |
| 872 | prev = vma; |
| 873 | continue; |
| 874 | } |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 875 | new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; |
Jann Horn | 4d45e75 | 2020-10-15 20:13:00 -0700 | [diff] [blame] | 876 | prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, |
| 877 | new_flags, vma->anon_vma, |
| 878 | vma->vm_file, vma->vm_pgoff, |
| 879 | vma_policy(vma), |
| 880 | NULL_VM_UFFD_CTX); |
| 881 | if (prev) |
| 882 | vma = prev; |
| 883 | else |
| 884 | prev = vma; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 885 | vma->vm_flags = new_flags; |
| 886 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
| 887 | } |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 888 | mmap_write_unlock(mm); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 889 | mmput(mm); |
| 890 | wakeup: |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 891 | /* |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 892 | * After no new page faults can wait on this fault_*wqh, flush |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 893 | * the last page faults that may have been already waiting on |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 894 | * the fault_*wqh. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 895 | */ |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 896 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | ac5be6b | 2015-09-22 14:58:49 -0700 | [diff] [blame] | 897 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); |
Matthew Wilcox | c430d1e | 2018-08-21 21:56:30 -0700 | [diff] [blame] | 898 | __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 899 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 900 | |
Mike Rapoport | 5a18b64 | 2017-08-02 13:32:24 -0700 | [diff] [blame] | 901 | /* Flush pending events that may still wait on event_wqh */ |
| 902 | wake_up_all(&ctx->event_wqh); |
| 903 | |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 904 | wake_up_poll(&ctx->fd_wqh, EPOLLHUP); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 905 | userfaultfd_ctx_put(ctx); |
| 906 | return 0; |
| 907 | } |
| 908 | |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 909 | /* fault_pending_wqh.lock must be hold by the caller */ |
Pavel Emelyanov | 6dcc27f | 2017-02-22 15:42:18 -0800 | [diff] [blame] | 910 | static inline struct userfaultfd_wait_queue *find_userfault_in( |
| 911 | wait_queue_head_t *wqh) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 912 | { |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 913 | wait_queue_entry_t *wq; |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 914 | struct userfaultfd_wait_queue *uwq; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 915 | |
Lance Roy | 456a737 | 2018-10-04 23:45:44 -0700 | [diff] [blame] | 916 | lockdep_assert_held(&wqh->lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 917 | |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 918 | uwq = NULL; |
Pavel Emelyanov | 6dcc27f | 2017-02-22 15:42:18 -0800 | [diff] [blame] | 919 | if (!waitqueue_active(wqh)) |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 920 | goto out; |
| 921 | /* walk in reverse to provide FIFO behavior to read userfaults */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 922 | wq = list_last_entry(&wqh->head, typeof(*wq), entry); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 923 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
| 924 | out: |
| 925 | return uwq; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 926 | } |
| 927 | |
Pavel Emelyanov | 6dcc27f | 2017-02-22 15:42:18 -0800 | [diff] [blame] | 928 | static inline struct userfaultfd_wait_queue *find_userfault( |
| 929 | struct userfaultfd_ctx *ctx) |
| 930 | { |
| 931 | return find_userfault_in(&ctx->fault_pending_wqh); |
| 932 | } |
| 933 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 934 | static inline struct userfaultfd_wait_queue *find_userfault_evt( |
| 935 | struct userfaultfd_ctx *ctx) |
| 936 | { |
| 937 | return find_userfault_in(&ctx->event_wqh); |
| 938 | } |
| 939 | |
Al Viro | 076ccb7 | 2017-07-03 01:02:18 -0400 | [diff] [blame] | 940 | static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 941 | { |
| 942 | struct userfaultfd_ctx *ctx = file->private_data; |
Al Viro | 076ccb7 | 2017-07-03 01:02:18 -0400 | [diff] [blame] | 943 | __poll_t ret; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 944 | |
| 945 | poll_wait(file, &ctx->fd_wqh, wait); |
| 946 | |
| 947 | switch (ctx->state) { |
| 948 | case UFFD_STATE_WAIT_API: |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 949 | return EPOLLERR; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 950 | case UFFD_STATE_RUNNING: |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 951 | /* |
| 952 | * poll() never guarantees that read won't block. |
| 953 | * userfaults can be waken before they're read(). |
| 954 | */ |
| 955 | if (unlikely(!(file->f_flags & O_NONBLOCK))) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 956 | return EPOLLERR; |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 957 | /* |
| 958 | * lockless access to see if there are pending faults |
| 959 | * __pollwait last action is the add_wait_queue but |
| 960 | * the spin_unlock would allow the waitqueue_active to |
| 961 | * pass above the actual list_add inside |
| 962 | * add_wait_queue critical section. So use a full |
| 963 | * memory barrier to serialize the list_add write of |
| 964 | * add_wait_queue() with the waitqueue_active read |
| 965 | * below. |
| 966 | */ |
| 967 | ret = 0; |
| 968 | smp_mb(); |
| 969 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 970 | ret = EPOLLIN; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 971 | else if (waitqueue_active(&ctx->event_wqh)) |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 972 | ret = EPOLLIN; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 973 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 974 | return ret; |
| 975 | default: |
Andrea Arcangeli | 8474901 | 2017-02-22 15:42:12 -0800 | [diff] [blame] | 976 | WARN_ON_ONCE(1); |
Linus Torvalds | a9a0884 | 2018-02-11 14:34:03 -0800 | [diff] [blame] | 977 | return EPOLLERR; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 978 | } |
| 979 | } |
| 980 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 981 | static const struct file_operations userfaultfd_fops; |
| 982 | |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 983 | static int resolve_userfault_fork(struct userfaultfd_ctx *new, |
| 984 | struct inode *inode, |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 985 | struct uffd_msg *msg) |
| 986 | { |
| 987 | int fd; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 988 | |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 989 | fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new, |
| 990 | O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 991 | if (fd < 0) |
| 992 | return fd; |
| 993 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 994 | msg->arg.reserved.reserved1 = 0; |
| 995 | msg->arg.fork.ufd = fd; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 996 | return 0; |
| 997 | } |
| 998 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 999 | static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 1000 | struct uffd_msg *msg, struct inode *inode) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1001 | { |
| 1002 | ssize_t ret; |
| 1003 | DECLARE_WAITQUEUE(wait, current); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1004 | struct userfaultfd_wait_queue *uwq; |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1005 | /* |
| 1006 | * Handling fork event requires sleeping operations, so |
| 1007 | * we drop the event_wqh lock, then do these ops, then |
| 1008 | * lock it back and wake up the waiter. While the lock is |
| 1009 | * dropped the ewq may go away so we keep track of it |
| 1010 | * carefully. |
| 1011 | */ |
| 1012 | LIST_HEAD(fork_event); |
| 1013 | struct userfaultfd_ctx *fork_nctx = NULL; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1014 | |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1015 | /* always take the fd_wqh lock before the fault_pending_wqh lock */ |
Christoph Hellwig | ae62c16 | 2018-10-26 15:02:19 -0700 | [diff] [blame] | 1016 | spin_lock_irq(&ctx->fd_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1017 | __add_wait_queue(&ctx->fd_wqh, &wait); |
| 1018 | for (;;) { |
| 1019 | set_current_state(TASK_INTERRUPTIBLE); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1020 | spin_lock(&ctx->fault_pending_wqh.lock); |
| 1021 | uwq = find_userfault(ctx); |
| 1022 | if (uwq) { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1023 | /* |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 1024 | * Use a seqcount to repeat the lockless check |
| 1025 | * in wake_userfault() to avoid missing |
| 1026 | * wakeups because during the refile both |
| 1027 | * waitqueue could become empty if this is the |
| 1028 | * only userfault. |
| 1029 | */ |
| 1030 | write_seqcount_begin(&ctx->refile_seq); |
| 1031 | |
| 1032 | /* |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1033 | * The fault_pending_wqh.lock prevents the uwq |
| 1034 | * to disappear from under us. |
| 1035 | * |
| 1036 | * Refile this userfault from |
| 1037 | * fault_pending_wqh to fault_wqh, it's not |
| 1038 | * pending anymore after we read it. |
| 1039 | * |
| 1040 | * Use list_del() by hand (as |
| 1041 | * userfaultfd_wake_function also uses |
| 1042 | * list_del_init() by hand) to be sure nobody |
| 1043 | * changes __remove_wait_queue() to use |
| 1044 | * list_del_init() in turn breaking the |
| 1045 | * !list_empty_careful() check in |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 1046 | * handle_userfault(). The uwq->wq.head list |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1047 | * must never be empty at any time during the |
| 1048 | * refile, or the waitqueue could disappear |
| 1049 | * from under us. The "wait_queue_head_t" |
| 1050 | * parameter of __remove_wait_queue() is unused |
| 1051 | * anyway. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1052 | */ |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 1053 | list_del(&uwq->wq.entry); |
Matthew Wilcox | c430d1e | 2018-08-21 21:56:30 -0700 | [diff] [blame] | 1054 | add_wait_queue(&ctx->fault_wqh, &uwq->wq); |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1055 | |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 1056 | write_seqcount_end(&ctx->refile_seq); |
| 1057 | |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1058 | /* careful to always initialize msg if ret == 0 */ |
| 1059 | *msg = uwq->msg; |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1060 | spin_unlock(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1061 | ret = 0; |
| 1062 | break; |
| 1063 | } |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1064 | spin_unlock(&ctx->fault_pending_wqh.lock); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 1065 | |
| 1066 | spin_lock(&ctx->event_wqh.lock); |
| 1067 | uwq = find_userfault_evt(ctx); |
| 1068 | if (uwq) { |
| 1069 | *msg = uwq->msg; |
| 1070 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1071 | if (uwq->msg.event == UFFD_EVENT_FORK) { |
| 1072 | fork_nctx = (struct userfaultfd_ctx *) |
| 1073 | (unsigned long) |
| 1074 | uwq->msg.arg.reserved.reserved1; |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 1075 | list_move(&uwq->wq.entry, &fork_event); |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 1076 | /* |
| 1077 | * fork_nctx can be freed as soon as |
| 1078 | * we drop the lock, unless we take a |
| 1079 | * reference on it. |
| 1080 | */ |
| 1081 | userfaultfd_ctx_get(fork_nctx); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1082 | spin_unlock(&ctx->event_wqh.lock); |
| 1083 | ret = 0; |
| 1084 | break; |
| 1085 | } |
| 1086 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 1087 | userfaultfd_event_complete(ctx, uwq); |
| 1088 | spin_unlock(&ctx->event_wqh.lock); |
| 1089 | ret = 0; |
| 1090 | break; |
| 1091 | } |
| 1092 | spin_unlock(&ctx->event_wqh.lock); |
| 1093 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1094 | if (signal_pending(current)) { |
| 1095 | ret = -ERESTARTSYS; |
| 1096 | break; |
| 1097 | } |
| 1098 | if (no_wait) { |
| 1099 | ret = -EAGAIN; |
| 1100 | break; |
| 1101 | } |
Christoph Hellwig | ae62c16 | 2018-10-26 15:02:19 -0700 | [diff] [blame] | 1102 | spin_unlock_irq(&ctx->fd_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1103 | schedule(); |
Christoph Hellwig | ae62c16 | 2018-10-26 15:02:19 -0700 | [diff] [blame] | 1104 | spin_lock_irq(&ctx->fd_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1105 | } |
| 1106 | __remove_wait_queue(&ctx->fd_wqh, &wait); |
| 1107 | __set_current_state(TASK_RUNNING); |
Christoph Hellwig | ae62c16 | 2018-10-26 15:02:19 -0700 | [diff] [blame] | 1108 | spin_unlock_irq(&ctx->fd_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1109 | |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1110 | if (!ret && msg->event == UFFD_EVENT_FORK) { |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 1111 | ret = resolve_userfault_fork(fork_nctx, inode, msg); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1112 | spin_lock_irq(&ctx->event_wqh.lock); |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 1113 | if (!list_empty(&fork_event)) { |
| 1114 | /* |
| 1115 | * The fork thread didn't abort, so we can |
| 1116 | * drop the temporary refcount. |
| 1117 | */ |
| 1118 | userfaultfd_ctx_put(fork_nctx); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1119 | |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 1120 | uwq = list_first_entry(&fork_event, |
| 1121 | typeof(*uwq), |
| 1122 | wq.entry); |
| 1123 | /* |
| 1124 | * If fork_event list wasn't empty and in turn |
| 1125 | * the event wasn't already released by fork |
| 1126 | * (the event is allocated on fork kernel |
| 1127 | * stack), put the event back to its place in |
| 1128 | * the event_wq. fork_event head will be freed |
| 1129 | * as soon as we return so the event cannot |
| 1130 | * stay queued there no matter the current |
| 1131 | * "ret" value. |
| 1132 | */ |
| 1133 | list_del(&uwq->wq.entry); |
| 1134 | __add_wait_queue(&ctx->event_wqh, &uwq->wq); |
| 1135 | |
| 1136 | /* |
| 1137 | * Leave the event in the waitqueue and report |
| 1138 | * error to userland if we failed to resolve |
| 1139 | * the userfault fork. |
| 1140 | */ |
| 1141 | if (likely(!ret)) |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1142 | userfaultfd_event_complete(ctx, uwq); |
Andrea Arcangeli | 384632e | 2017-10-03 16:15:38 -0700 | [diff] [blame] | 1143 | } else { |
| 1144 | /* |
| 1145 | * Here the fork thread aborted and the |
| 1146 | * refcount from the fork thread on fork_nctx |
| 1147 | * has already been released. We still hold |
| 1148 | * the reference we took before releasing the |
| 1149 | * lock above. If resolve_userfault_fork |
| 1150 | * failed we've to drop it because the |
| 1151 | * fork_nctx has to be freed in such case. If |
| 1152 | * it succeeded we'll hold it because the new |
| 1153 | * uffd references it. |
| 1154 | */ |
| 1155 | if (ret) |
| 1156 | userfaultfd_ctx_put(fork_nctx); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1157 | } |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1158 | spin_unlock_irq(&ctx->event_wqh.lock); |
Pavel Emelyanov | 893e26e | 2017-02-22 15:42:27 -0800 | [diff] [blame] | 1159 | } |
| 1160 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1161 | return ret; |
| 1162 | } |
| 1163 | |
| 1164 | static ssize_t userfaultfd_read(struct file *file, char __user *buf, |
| 1165 | size_t count, loff_t *ppos) |
| 1166 | { |
| 1167 | struct userfaultfd_ctx *ctx = file->private_data; |
| 1168 | ssize_t _ret, ret = 0; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1169 | struct uffd_msg msg; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1170 | int no_wait = file->f_flags & O_NONBLOCK; |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 1171 | struct inode *inode = file_inode(file); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1172 | |
| 1173 | if (ctx->state == UFFD_STATE_WAIT_API) |
| 1174 | return -EINVAL; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1175 | |
| 1176 | for (;;) { |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1177 | if (count < sizeof(msg)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1178 | return ret ? ret : -EINVAL; |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 1179 | _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1180 | if (_ret < 0) |
| 1181 | return ret ? ret : _ret; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1182 | if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1183 | return ret ? ret : -EFAULT; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1184 | ret += sizeof(msg); |
| 1185 | buf += sizeof(msg); |
| 1186 | count -= sizeof(msg); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1187 | /* |
| 1188 | * Allow to read more than one fault at time but only |
| 1189 | * block if waiting for the very first one. |
| 1190 | */ |
| 1191 | no_wait = O_NONBLOCK; |
| 1192 | } |
| 1193 | } |
| 1194 | |
| 1195 | static void __wake_userfault(struct userfaultfd_ctx *ctx, |
| 1196 | struct userfaultfd_wake_range *range) |
| 1197 | { |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1198 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1199 | /* wake all in the range and autoremove */ |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1200 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
Andrea Arcangeli | ac5be6b | 2015-09-22 14:58:49 -0700 | [diff] [blame] | 1201 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1202 | range); |
| 1203 | if (waitqueue_active(&ctx->fault_wqh)) |
Matthew Wilcox | c430d1e | 2018-08-21 21:56:30 -0700 | [diff] [blame] | 1204 | __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1205 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1206 | } |
| 1207 | |
| 1208 | static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, |
| 1209 | struct userfaultfd_wake_range *range) |
| 1210 | { |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 1211 | unsigned seq; |
| 1212 | bool need_wakeup; |
| 1213 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1214 | /* |
| 1215 | * To be sure waitqueue_active() is not reordered by the CPU |
| 1216 | * before the pagetable update, use an explicit SMP memory |
Michel Lespinasse | 3e4e28c | 2020-06-08 21:33:51 -0700 | [diff] [blame] | 1217 | * barrier here. PT lock release or mmap_read_unlock(mm) still |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1218 | * have release semantics that can allow the |
| 1219 | * waitqueue_active() to be reordered before the pte update. |
| 1220 | */ |
| 1221 | smp_mb(); |
| 1222 | |
| 1223 | /* |
| 1224 | * Use waitqueue_active because it's very frequent to |
| 1225 | * change the address space atomically even if there are no |
| 1226 | * userfaults yet. So we take the spinlock only when we're |
| 1227 | * sure we've userfaults to wake. |
| 1228 | */ |
Andrea Arcangeli | 2c5b7e1 | 2015-09-04 15:47:23 -0700 | [diff] [blame] | 1229 | do { |
| 1230 | seq = read_seqcount_begin(&ctx->refile_seq); |
| 1231 | need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || |
| 1232 | waitqueue_active(&ctx->fault_wqh); |
| 1233 | cond_resched(); |
| 1234 | } while (read_seqcount_retry(&ctx->refile_seq, seq)); |
| 1235 | if (need_wakeup) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1236 | __wake_userfault(ctx, range); |
| 1237 | } |
| 1238 | |
| 1239 | static __always_inline int validate_range(struct mm_struct *mm, |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1240 | __u64 start, __u64 len) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1241 | { |
| 1242 | __u64 task_size = mm->task_size; |
| 1243 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1244 | if (start & ~PAGE_MASK) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1245 | return -EINVAL; |
| 1246 | if (len & ~PAGE_MASK) |
| 1247 | return -EINVAL; |
| 1248 | if (!len) |
| 1249 | return -EINVAL; |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1250 | if (start < mmap_min_addr) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1251 | return -EINVAL; |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1252 | if (start >= task_size) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1253 | return -EINVAL; |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1254 | if (len > task_size - start) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1255 | return -EINVAL; |
| 1256 | return 0; |
| 1257 | } |
| 1258 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1259 | static inline bool vma_can_userfault(struct vm_area_struct *vma, |
| 1260 | unsigned long vm_flags) |
Mike Rapoport | ba6907d | 2017-02-22 15:43:22 -0800 | [diff] [blame] | 1261 | { |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1262 | /* FIXME: add WP support to hugetlbfs and shmem */ |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1263 | if (vm_flags & VM_UFFD_WP) { |
| 1264 | if (is_vm_hugetlb_page(vma) || vma_is_shmem(vma)) |
| 1265 | return false; |
| 1266 | } |
| 1267 | |
| 1268 | if (vm_flags & VM_UFFD_MINOR) { |
Axel Rasmussen | c949b09 | 2021-06-30 18:49:20 -0700 | [diff] [blame] | 1269 | if (!(is_vm_hugetlb_page(vma) || vma_is_shmem(vma))) |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1270 | return false; |
| 1271 | } |
| 1272 | |
| 1273 | return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || |
| 1274 | vma_is_shmem(vma); |
Mike Rapoport | ba6907d | 2017-02-22 15:43:22 -0800 | [diff] [blame] | 1275 | } |
| 1276 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1277 | static int userfaultfd_register(struct userfaultfd_ctx *ctx, |
| 1278 | unsigned long arg) |
| 1279 | { |
| 1280 | struct mm_struct *mm = ctx->mm; |
| 1281 | struct vm_area_struct *vma, *prev, *cur; |
| 1282 | int ret; |
| 1283 | struct uffdio_register uffdio_register; |
| 1284 | struct uffdio_register __user *user_uffdio_register; |
| 1285 | unsigned long vm_flags, new_flags; |
| 1286 | bool found; |
Mike Rapoport | ce53e8e | 2017-09-06 16:23:12 -0700 | [diff] [blame] | 1287 | bool basic_ioctls; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1288 | unsigned long start, end, vma_end; |
| 1289 | |
| 1290 | user_uffdio_register = (struct uffdio_register __user *) arg; |
| 1291 | |
| 1292 | ret = -EFAULT; |
| 1293 | if (copy_from_user(&uffdio_register, user_uffdio_register, |
| 1294 | sizeof(uffdio_register)-sizeof(__u64))) |
| 1295 | goto out; |
| 1296 | |
| 1297 | ret = -EINVAL; |
| 1298 | if (!uffdio_register.mode) |
| 1299 | goto out; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1300 | if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1301 | goto out; |
| 1302 | vm_flags = 0; |
| 1303 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) |
| 1304 | vm_flags |= VM_UFFD_MISSING; |
Peter Xu | 00b151f | 2021-06-30 18:49:06 -0700 | [diff] [blame] | 1305 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { |
| 1306 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP |
| 1307 | goto out; |
| 1308 | #endif |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1309 | vm_flags |= VM_UFFD_WP; |
Peter Xu | 00b151f | 2021-06-30 18:49:06 -0700 | [diff] [blame] | 1310 | } |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1311 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { |
| 1312 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR |
| 1313 | goto out; |
| 1314 | #endif |
| 1315 | vm_flags |= VM_UFFD_MINOR; |
| 1316 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1317 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1318 | ret = validate_range(mm, uffdio_register.range.start, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1319 | uffdio_register.range.len); |
| 1320 | if (ret) |
| 1321 | goto out; |
| 1322 | |
| 1323 | start = uffdio_register.range.start; |
| 1324 | end = start + uffdio_register.range.len; |
| 1325 | |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1326 | ret = -ENOMEM; |
| 1327 | if (!mmget_not_zero(mm)) |
| 1328 | goto out; |
| 1329 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1330 | mmap_write_lock(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1331 | vma = find_vma_prev(mm, start, &prev); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1332 | if (!vma) |
| 1333 | goto out_unlock; |
| 1334 | |
| 1335 | /* check that there's at least one vma in the range */ |
| 1336 | ret = -EINVAL; |
| 1337 | if (vma->vm_start >= end) |
| 1338 | goto out_unlock; |
| 1339 | |
| 1340 | /* |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1341 | * If the first vma contains huge pages, make sure start address |
| 1342 | * is aligned to huge page size. |
| 1343 | */ |
| 1344 | if (is_vm_hugetlb_page(vma)) { |
| 1345 | unsigned long vma_hpagesize = vma_kernel_pagesize(vma); |
| 1346 | |
| 1347 | if (start & (vma_hpagesize - 1)) |
| 1348 | goto out_unlock; |
| 1349 | } |
| 1350 | |
| 1351 | /* |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1352 | * Search for not compatible vmas. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1353 | */ |
| 1354 | found = false; |
Mike Rapoport | ce53e8e | 2017-09-06 16:23:12 -0700 | [diff] [blame] | 1355 | basic_ioctls = false; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1356 | for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { |
| 1357 | cond_resched(); |
| 1358 | |
| 1359 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1360 | !!(cur->vm_flags & __VM_UFFD_FLAGS)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1361 | |
| 1362 | /* check not compatible vmas */ |
| 1363 | ret = -EINVAL; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1364 | if (!vma_can_userfault(cur, vm_flags)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1365 | goto out_unlock; |
Andrea Arcangeli | 29ec9066 | 2018-11-30 14:09:32 -0800 | [diff] [blame] | 1366 | |
| 1367 | /* |
| 1368 | * UFFDIO_COPY will fill file holes even without |
| 1369 | * PROT_WRITE. This check enforces that if this is a |
| 1370 | * MAP_SHARED, the process has write permission to the backing |
| 1371 | * file. If VM_MAYWRITE is set it also enforces that on a |
| 1372 | * MAP_SHARED vma: there is no F_WRITE_SEAL and no further |
| 1373 | * F_WRITE_SEAL can be taken until the vma is destroyed. |
| 1374 | */ |
| 1375 | ret = -EPERM; |
| 1376 | if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) |
| 1377 | goto out_unlock; |
| 1378 | |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1379 | /* |
| 1380 | * If this vma contains ending address, and huge pages |
| 1381 | * check alignment. |
| 1382 | */ |
| 1383 | if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && |
| 1384 | end > cur->vm_start) { |
| 1385 | unsigned long vma_hpagesize = vma_kernel_pagesize(cur); |
| 1386 | |
| 1387 | ret = -EINVAL; |
| 1388 | |
| 1389 | if (end & (vma_hpagesize - 1)) |
| 1390 | goto out_unlock; |
| 1391 | } |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1392 | if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE)) |
| 1393 | goto out_unlock; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1394 | |
| 1395 | /* |
| 1396 | * Check that this vma isn't already owned by a |
| 1397 | * different userfaultfd. We can't allow more than one |
| 1398 | * userfaultfd to own a single vma simultaneously or we |
| 1399 | * wouldn't know which one to deliver the userfaults to. |
| 1400 | */ |
| 1401 | ret = -EBUSY; |
| 1402 | if (cur->vm_userfaultfd_ctx.ctx && |
| 1403 | cur->vm_userfaultfd_ctx.ctx != ctx) |
| 1404 | goto out_unlock; |
| 1405 | |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1406 | /* |
| 1407 | * Note vmas containing huge pages |
| 1408 | */ |
Mike Rapoport | ce53e8e | 2017-09-06 16:23:12 -0700 | [diff] [blame] | 1409 | if (is_vm_hugetlb_page(cur)) |
| 1410 | basic_ioctls = true; |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1411 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1412 | found = true; |
| 1413 | } |
| 1414 | BUG_ON(!found); |
| 1415 | |
| 1416 | if (vma->vm_start < start) |
| 1417 | prev = vma; |
| 1418 | |
| 1419 | ret = 0; |
| 1420 | do { |
| 1421 | cond_resched(); |
| 1422 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1423 | BUG_ON(!vma_can_userfault(vma, vm_flags)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1424 | BUG_ON(vma->vm_userfaultfd_ctx.ctx && |
| 1425 | vma->vm_userfaultfd_ctx.ctx != ctx); |
Andrea Arcangeli | 29ec9066 | 2018-11-30 14:09:32 -0800 | [diff] [blame] | 1426 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1427 | |
| 1428 | /* |
| 1429 | * Nothing to do: this vma is already registered into this |
| 1430 | * userfaultfd and with the right tracking mode too. |
| 1431 | */ |
| 1432 | if (vma->vm_userfaultfd_ctx.ctx == ctx && |
| 1433 | (vma->vm_flags & vm_flags) == vm_flags) |
| 1434 | goto skip; |
| 1435 | |
| 1436 | if (vma->vm_start > start) |
| 1437 | start = vma->vm_start; |
| 1438 | vma_end = min(end, vma->vm_end); |
| 1439 | |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1440 | new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1441 | prev = vma_merge(mm, prev, start, vma_end, new_flags, |
| 1442 | vma->anon_vma, vma->vm_file, vma->vm_pgoff, |
| 1443 | vma_policy(vma), |
| 1444 | ((struct vm_userfaultfd_ctx){ ctx })); |
| 1445 | if (prev) { |
| 1446 | vma = prev; |
| 1447 | goto next; |
| 1448 | } |
| 1449 | if (vma->vm_start < start) { |
| 1450 | ret = split_vma(mm, vma, start, 1); |
| 1451 | if (ret) |
| 1452 | break; |
| 1453 | } |
| 1454 | if (vma->vm_end > end) { |
| 1455 | ret = split_vma(mm, vma, end, 0); |
| 1456 | if (ret) |
| 1457 | break; |
| 1458 | } |
| 1459 | next: |
| 1460 | /* |
| 1461 | * In the vma_merge() successful mprotect-like case 8: |
| 1462 | * the next vma was merged into the current one and |
| 1463 | * the current one has not been updated yet. |
| 1464 | */ |
| 1465 | vma->vm_flags = new_flags; |
| 1466 | vma->vm_userfaultfd_ctx.ctx = ctx; |
| 1467 | |
Peter Xu | 6dfeaff | 2021-05-04 18:33:13 -0700 | [diff] [blame] | 1468 | if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) |
| 1469 | hugetlb_unshare_all_pmds(vma); |
| 1470 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1471 | skip: |
| 1472 | prev = vma; |
| 1473 | start = vma->vm_end; |
| 1474 | vma = vma->vm_next; |
| 1475 | } while (vma && vma->vm_start < end); |
| 1476 | out_unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1477 | mmap_write_unlock(mm); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1478 | mmput(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1479 | if (!ret) { |
Peter Xu | 1481930 | 2020-04-06 20:06:29 -0700 | [diff] [blame] | 1480 | __u64 ioctls_out; |
| 1481 | |
| 1482 | ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : |
| 1483 | UFFD_API_RANGE_IOCTLS; |
| 1484 | |
| 1485 | /* |
| 1486 | * Declare the WP ioctl only if the WP mode is |
| 1487 | * specified and all checks passed with the range |
| 1488 | */ |
| 1489 | if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) |
| 1490 | ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); |
| 1491 | |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1492 | /* CONTINUE ioctl is only supported for MINOR ranges. */ |
| 1493 | if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) |
| 1494 | ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); |
| 1495 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1496 | /* |
| 1497 | * Now that we scanned all vmas we can already tell |
| 1498 | * userland which ioctls methods are guaranteed to |
| 1499 | * succeed on this range. |
| 1500 | */ |
Peter Xu | 1481930 | 2020-04-06 20:06:29 -0700 | [diff] [blame] | 1501 | if (put_user(ioctls_out, &user_uffdio_register->ioctls)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1502 | ret = -EFAULT; |
| 1503 | } |
| 1504 | out: |
| 1505 | return ret; |
| 1506 | } |
| 1507 | |
| 1508 | static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, |
| 1509 | unsigned long arg) |
| 1510 | { |
| 1511 | struct mm_struct *mm = ctx->mm; |
| 1512 | struct vm_area_struct *vma, *prev, *cur; |
| 1513 | int ret; |
| 1514 | struct uffdio_range uffdio_unregister; |
| 1515 | unsigned long new_flags; |
| 1516 | bool found; |
| 1517 | unsigned long start, end, vma_end; |
| 1518 | const void __user *buf = (void __user *)arg; |
| 1519 | |
| 1520 | ret = -EFAULT; |
| 1521 | if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) |
| 1522 | goto out; |
| 1523 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1524 | ret = validate_range(mm, uffdio_unregister.start, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1525 | uffdio_unregister.len); |
| 1526 | if (ret) |
| 1527 | goto out; |
| 1528 | |
| 1529 | start = uffdio_unregister.start; |
| 1530 | end = start + uffdio_unregister.len; |
| 1531 | |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1532 | ret = -ENOMEM; |
| 1533 | if (!mmget_not_zero(mm)) |
| 1534 | goto out; |
| 1535 | |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1536 | mmap_write_lock(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1537 | vma = find_vma_prev(mm, start, &prev); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1538 | if (!vma) |
| 1539 | goto out_unlock; |
| 1540 | |
| 1541 | /* check that there's at least one vma in the range */ |
| 1542 | ret = -EINVAL; |
| 1543 | if (vma->vm_start >= end) |
| 1544 | goto out_unlock; |
| 1545 | |
| 1546 | /* |
Mike Kravetz | cab350a | 2017-02-22 15:43:04 -0800 | [diff] [blame] | 1547 | * If the first vma contains huge pages, make sure start address |
| 1548 | * is aligned to huge page size. |
| 1549 | */ |
| 1550 | if (is_vm_hugetlb_page(vma)) { |
| 1551 | unsigned long vma_hpagesize = vma_kernel_pagesize(vma); |
| 1552 | |
| 1553 | if (start & (vma_hpagesize - 1)) |
| 1554 | goto out_unlock; |
| 1555 | } |
| 1556 | |
| 1557 | /* |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1558 | * Search for not compatible vmas. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1559 | */ |
| 1560 | found = false; |
| 1561 | ret = -EINVAL; |
| 1562 | for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) { |
| 1563 | cond_resched(); |
| 1564 | |
| 1565 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1566 | !!(cur->vm_flags & __VM_UFFD_FLAGS)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1567 | |
| 1568 | /* |
| 1569 | * Check not compatible vmas, not strictly required |
| 1570 | * here as not compatible vmas cannot have an |
| 1571 | * userfaultfd_ctx registered on them, but this |
| 1572 | * provides for more strict behavior to notice |
| 1573 | * unregistration errors. |
| 1574 | */ |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1575 | if (!vma_can_userfault(cur, cur->vm_flags)) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1576 | goto out_unlock; |
| 1577 | |
| 1578 | found = true; |
| 1579 | } |
| 1580 | BUG_ON(!found); |
| 1581 | |
| 1582 | if (vma->vm_start < start) |
| 1583 | prev = vma; |
| 1584 | |
| 1585 | ret = 0; |
| 1586 | do { |
| 1587 | cond_resched(); |
| 1588 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1589 | BUG_ON(!vma_can_userfault(vma, vma->vm_flags)); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1590 | |
| 1591 | /* |
| 1592 | * Nothing to do: this vma is already registered into this |
| 1593 | * userfaultfd and with the right tracking mode too. |
| 1594 | */ |
| 1595 | if (!vma->vm_userfaultfd_ctx.ctx) |
| 1596 | goto skip; |
| 1597 | |
Andrea Arcangeli | 01e881f | 2018-12-14 14:17:17 -0800 | [diff] [blame] | 1598 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
| 1599 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1600 | if (vma->vm_start > start) |
| 1601 | start = vma->vm_start; |
| 1602 | vma_end = min(end, vma->vm_end); |
| 1603 | |
Andrea Arcangeli | 09fa529 | 2017-02-22 15:42:46 -0800 | [diff] [blame] | 1604 | if (userfaultfd_missing(vma)) { |
| 1605 | /* |
| 1606 | * Wake any concurrent pending userfault while |
| 1607 | * we unregister, so they will not hang |
| 1608 | * permanently and it avoids userland to call |
| 1609 | * UFFDIO_WAKE explicitly. |
| 1610 | */ |
| 1611 | struct userfaultfd_wake_range range; |
| 1612 | range.start = start; |
| 1613 | range.len = vma_end - start; |
| 1614 | wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); |
| 1615 | } |
| 1616 | |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1617 | new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1618 | prev = vma_merge(mm, prev, start, vma_end, new_flags, |
| 1619 | vma->anon_vma, vma->vm_file, vma->vm_pgoff, |
| 1620 | vma_policy(vma), |
| 1621 | NULL_VM_UFFD_CTX); |
| 1622 | if (prev) { |
| 1623 | vma = prev; |
| 1624 | goto next; |
| 1625 | } |
| 1626 | if (vma->vm_start < start) { |
| 1627 | ret = split_vma(mm, vma, start, 1); |
| 1628 | if (ret) |
| 1629 | break; |
| 1630 | } |
| 1631 | if (vma->vm_end > end) { |
| 1632 | ret = split_vma(mm, vma, end, 0); |
| 1633 | if (ret) |
| 1634 | break; |
| 1635 | } |
| 1636 | next: |
| 1637 | /* |
| 1638 | * In the vma_merge() successful mprotect-like case 8: |
| 1639 | * the next vma was merged into the current one and |
| 1640 | * the current one has not been updated yet. |
| 1641 | */ |
| 1642 | vma->vm_flags = new_flags; |
| 1643 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
| 1644 | |
| 1645 | skip: |
| 1646 | prev = vma; |
| 1647 | start = vma->vm_end; |
| 1648 | vma = vma->vm_next; |
| 1649 | } while (vma && vma->vm_start < end); |
| 1650 | out_unlock: |
Michel Lespinasse | d8ed45c | 2020-06-08 21:33:25 -0700 | [diff] [blame] | 1651 | mmap_write_unlock(mm); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1652 | mmput(mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1653 | out: |
| 1654 | return ret; |
| 1655 | } |
| 1656 | |
| 1657 | /* |
Andrea Arcangeli | ba85c70 | 2015-09-04 15:46:41 -0700 | [diff] [blame] | 1658 | * userfaultfd_wake may be used in combination with the |
| 1659 | * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1660 | */ |
| 1661 | static int userfaultfd_wake(struct userfaultfd_ctx *ctx, |
| 1662 | unsigned long arg) |
| 1663 | { |
| 1664 | int ret; |
| 1665 | struct uffdio_range uffdio_wake; |
| 1666 | struct userfaultfd_wake_range range; |
| 1667 | const void __user *buf = (void __user *)arg; |
| 1668 | |
| 1669 | ret = -EFAULT; |
| 1670 | if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) |
| 1671 | goto out; |
| 1672 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1673 | ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1674 | if (ret) |
| 1675 | goto out; |
| 1676 | |
| 1677 | range.start = uffdio_wake.start; |
| 1678 | range.len = uffdio_wake.len; |
| 1679 | |
| 1680 | /* |
| 1681 | * len == 0 means wake all and we don't want to wake all here, |
| 1682 | * so check it again to be sure. |
| 1683 | */ |
| 1684 | VM_BUG_ON(!range.len); |
| 1685 | |
| 1686 | wake_userfault(ctx, &range); |
| 1687 | ret = 0; |
| 1688 | |
| 1689 | out: |
| 1690 | return ret; |
| 1691 | } |
| 1692 | |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1693 | static int userfaultfd_copy(struct userfaultfd_ctx *ctx, |
| 1694 | unsigned long arg) |
| 1695 | { |
| 1696 | __s64 ret; |
| 1697 | struct uffdio_copy uffdio_copy; |
| 1698 | struct uffdio_copy __user *user_uffdio_copy; |
| 1699 | struct userfaultfd_wake_range range; |
| 1700 | |
| 1701 | user_uffdio_copy = (struct uffdio_copy __user *) arg; |
| 1702 | |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1703 | ret = -EAGAIN; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 1704 | if (atomic_read(&ctx->mmap_changing)) |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1705 | goto out; |
| 1706 | |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1707 | ret = -EFAULT; |
| 1708 | if (copy_from_user(&uffdio_copy, user_uffdio_copy, |
| 1709 | /* don't copy "copy" last field */ |
| 1710 | sizeof(uffdio_copy)-sizeof(__s64))) |
| 1711 | goto out; |
| 1712 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1713 | ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1714 | if (ret) |
| 1715 | goto out; |
| 1716 | /* |
| 1717 | * double check for wraparound just in case. copy_from_user() |
| 1718 | * will later check uffdio_copy.src + uffdio_copy.len to fit |
| 1719 | * in the userland range. |
| 1720 | */ |
| 1721 | ret = -EINVAL; |
| 1722 | if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src) |
| 1723 | goto out; |
Andrea Arcangeli | 72981e0 | 2020-04-06 20:05:41 -0700 | [diff] [blame] | 1724 | if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1725 | goto out; |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1726 | if (mmget_not_zero(ctx->mm)) { |
| 1727 | ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, |
Andrea Arcangeli | 72981e0 | 2020-04-06 20:05:41 -0700 | [diff] [blame] | 1728 | uffdio_copy.len, &ctx->mmap_changing, |
| 1729 | uffdio_copy.mode); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1730 | mmput(ctx->mm); |
Mike Rapoport | 9633318 | 2017-02-24 14:58:31 -0800 | [diff] [blame] | 1731 | } else { |
Mike Rapoport | e86b298 | 2017-08-10 15:24:32 -0700 | [diff] [blame] | 1732 | return -ESRCH; |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1733 | } |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1734 | if (unlikely(put_user(ret, &user_uffdio_copy->copy))) |
| 1735 | return -EFAULT; |
| 1736 | if (ret < 0) |
| 1737 | goto out; |
| 1738 | BUG_ON(!ret); |
| 1739 | /* len == 0 would wake all */ |
| 1740 | range.len = ret; |
| 1741 | if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { |
| 1742 | range.start = uffdio_copy.dst; |
| 1743 | wake_userfault(ctx, &range); |
| 1744 | } |
| 1745 | ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; |
| 1746 | out: |
| 1747 | return ret; |
| 1748 | } |
| 1749 | |
| 1750 | static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, |
| 1751 | unsigned long arg) |
| 1752 | { |
| 1753 | __s64 ret; |
| 1754 | struct uffdio_zeropage uffdio_zeropage; |
| 1755 | struct uffdio_zeropage __user *user_uffdio_zeropage; |
| 1756 | struct userfaultfd_wake_range range; |
| 1757 | |
| 1758 | user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; |
| 1759 | |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1760 | ret = -EAGAIN; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 1761 | if (atomic_read(&ctx->mmap_changing)) |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1762 | goto out; |
| 1763 | |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1764 | ret = -EFAULT; |
| 1765 | if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, |
| 1766 | /* don't copy "zeropage" last field */ |
| 1767 | sizeof(uffdio_zeropage)-sizeof(__s64))) |
| 1768 | goto out; |
| 1769 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1770 | ret = validate_range(ctx->mm, uffdio_zeropage.range.start, |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1771 | uffdio_zeropage.range.len); |
| 1772 | if (ret) |
| 1773 | goto out; |
| 1774 | ret = -EINVAL; |
| 1775 | if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) |
| 1776 | goto out; |
| 1777 | |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1778 | if (mmget_not_zero(ctx->mm)) { |
| 1779 | ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, |
Mike Rapoport | df2cc96 | 2018-06-07 17:09:25 -0700 | [diff] [blame] | 1780 | uffdio_zeropage.range.len, |
| 1781 | &ctx->mmap_changing); |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1782 | mmput(ctx->mm); |
Mike Rapoport | 9d95aa4 | 2017-08-02 13:32:15 -0700 | [diff] [blame] | 1783 | } else { |
Mike Rapoport | e86b298 | 2017-08-10 15:24:32 -0700 | [diff] [blame] | 1784 | return -ESRCH; |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 1785 | } |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1786 | if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) |
| 1787 | return -EFAULT; |
| 1788 | if (ret < 0) |
| 1789 | goto out; |
| 1790 | /* len == 0 would wake all */ |
| 1791 | BUG_ON(!ret); |
| 1792 | range.len = ret; |
| 1793 | if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { |
| 1794 | range.start = uffdio_zeropage.range.start; |
| 1795 | wake_userfault(ctx, &range); |
| 1796 | } |
| 1797 | ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; |
| 1798 | out: |
| 1799 | return ret; |
| 1800 | } |
| 1801 | |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1802 | static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, |
| 1803 | unsigned long arg) |
| 1804 | { |
| 1805 | int ret; |
| 1806 | struct uffdio_writeprotect uffdio_wp; |
| 1807 | struct uffdio_writeprotect __user *user_uffdio_wp; |
| 1808 | struct userfaultfd_wake_range range; |
Peter Xu | 23080e2 | 2020-04-06 20:06:20 -0700 | [diff] [blame] | 1809 | bool mode_wp, mode_dontwake; |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1810 | |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 1811 | if (atomic_read(&ctx->mmap_changing)) |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1812 | return -EAGAIN; |
| 1813 | |
| 1814 | user_uffdio_wp = (struct uffdio_writeprotect __user *) arg; |
| 1815 | |
| 1816 | if (copy_from_user(&uffdio_wp, user_uffdio_wp, |
| 1817 | sizeof(struct uffdio_writeprotect))) |
| 1818 | return -EFAULT; |
| 1819 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1820 | ret = validate_range(ctx->mm, uffdio_wp.range.start, |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1821 | uffdio_wp.range.len); |
| 1822 | if (ret) |
| 1823 | return ret; |
| 1824 | |
| 1825 | if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE | |
| 1826 | UFFDIO_WRITEPROTECT_MODE_WP)) |
| 1827 | return -EINVAL; |
Peter Xu | 23080e2 | 2020-04-06 20:06:20 -0700 | [diff] [blame] | 1828 | |
| 1829 | mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP; |
| 1830 | mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE; |
| 1831 | |
| 1832 | if (mode_wp && mode_dontwake) |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1833 | return -EINVAL; |
| 1834 | |
| 1835 | ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start, |
Peter Xu | 23080e2 | 2020-04-06 20:06:20 -0700 | [diff] [blame] | 1836 | uffdio_wp.range.len, mode_wp, |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1837 | &ctx->mmap_changing); |
| 1838 | if (ret) |
| 1839 | return ret; |
| 1840 | |
Peter Xu | 23080e2 | 2020-04-06 20:06:20 -0700 | [diff] [blame] | 1841 | if (!mode_wp && !mode_dontwake) { |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1842 | range.start = uffdio_wp.range.start; |
| 1843 | range.len = uffdio_wp.range.len; |
| 1844 | wake_userfault(ctx, &range); |
| 1845 | } |
| 1846 | return ret; |
| 1847 | } |
| 1848 | |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1849 | static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) |
| 1850 | { |
| 1851 | __s64 ret; |
| 1852 | struct uffdio_continue uffdio_continue; |
| 1853 | struct uffdio_continue __user *user_uffdio_continue; |
| 1854 | struct userfaultfd_wake_range range; |
| 1855 | |
| 1856 | user_uffdio_continue = (struct uffdio_continue __user *)arg; |
| 1857 | |
| 1858 | ret = -EAGAIN; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 1859 | if (atomic_read(&ctx->mmap_changing)) |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1860 | goto out; |
| 1861 | |
| 1862 | ret = -EFAULT; |
| 1863 | if (copy_from_user(&uffdio_continue, user_uffdio_continue, |
| 1864 | /* don't copy the output fields */ |
| 1865 | sizeof(uffdio_continue) - (sizeof(__s64)))) |
| 1866 | goto out; |
| 1867 | |
Peter Collingbourne | e71e2ac | 2021-07-23 15:50:01 -0700 | [diff] [blame] | 1868 | ret = validate_range(ctx->mm, uffdio_continue.range.start, |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 1869 | uffdio_continue.range.len); |
| 1870 | if (ret) |
| 1871 | goto out; |
| 1872 | |
| 1873 | ret = -EINVAL; |
| 1874 | /* double check for wraparound just in case. */ |
| 1875 | if (uffdio_continue.range.start + uffdio_continue.range.len <= |
| 1876 | uffdio_continue.range.start) { |
| 1877 | goto out; |
| 1878 | } |
| 1879 | if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE) |
| 1880 | goto out; |
| 1881 | |
| 1882 | if (mmget_not_zero(ctx->mm)) { |
| 1883 | ret = mcopy_continue(ctx->mm, uffdio_continue.range.start, |
| 1884 | uffdio_continue.range.len, |
| 1885 | &ctx->mmap_changing); |
| 1886 | mmput(ctx->mm); |
| 1887 | } else { |
| 1888 | return -ESRCH; |
| 1889 | } |
| 1890 | |
| 1891 | if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) |
| 1892 | return -EFAULT; |
| 1893 | if (ret < 0) |
| 1894 | goto out; |
| 1895 | |
| 1896 | /* len == 0 would wake all */ |
| 1897 | BUG_ON(!ret); |
| 1898 | range.len = ret; |
| 1899 | if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { |
| 1900 | range.start = uffdio_continue.range.start; |
| 1901 | wake_userfault(ctx, &range); |
| 1902 | } |
| 1903 | ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; |
| 1904 | |
| 1905 | out: |
| 1906 | return ret; |
| 1907 | } |
| 1908 | |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 1909 | static inline unsigned int uffd_ctx_features(__u64 user_features) |
| 1910 | { |
| 1911 | /* |
| 1912 | * For the current set of features the bits just coincide |
| 1913 | */ |
| 1914 | return (unsigned int)user_features; |
| 1915 | } |
| 1916 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1917 | /* |
| 1918 | * userland asks for a certain API version and we return which bits |
| 1919 | * and ioctl commands are implemented in this kernel for such API |
| 1920 | * version or -EINVAL if unknown. |
| 1921 | */ |
| 1922 | static int userfaultfd_api(struct userfaultfd_ctx *ctx, |
| 1923 | unsigned long arg) |
| 1924 | { |
| 1925 | struct uffdio_api uffdio_api; |
| 1926 | void __user *buf = (void __user *)arg; |
| 1927 | int ret; |
Andrea Arcangeli | 6560314 | 2017-02-22 15:42:24 -0800 | [diff] [blame] | 1928 | __u64 features; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1929 | |
| 1930 | ret = -EINVAL; |
| 1931 | if (ctx->state != UFFD_STATE_WAIT_API) |
| 1932 | goto out; |
| 1933 | ret = -EFAULT; |
Andrea Arcangeli | a9b85f9 | 2015-09-04 15:46:37 -0700 | [diff] [blame] | 1934 | if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1935 | goto out; |
Andrea Arcangeli | 6560314 | 2017-02-22 15:42:24 -0800 | [diff] [blame] | 1936 | features = uffdio_api.features; |
Mike Rapoport | 3c1c24d | 2019-11-30 17:58:01 -0800 | [diff] [blame] | 1937 | ret = -EINVAL; |
| 1938 | if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) |
| 1939 | goto err_out; |
| 1940 | ret = -EPERM; |
| 1941 | if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) |
| 1942 | goto err_out; |
Andrea Arcangeli | 6560314 | 2017-02-22 15:42:24 -0800 | [diff] [blame] | 1943 | /* report all available features and ioctls to userland */ |
| 1944 | uffdio_api.features = UFFD_API_FEATURES; |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1945 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR |
Axel Rasmussen | 964ab00 | 2021-06-30 18:49:27 -0700 | [diff] [blame] | 1946 | uffdio_api.features &= |
| 1947 | ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM); |
Axel Rasmussen | 7677f7f | 2021-05-04 18:35:36 -0700 | [diff] [blame] | 1948 | #endif |
Peter Xu | 00b151f | 2021-06-30 18:49:06 -0700 | [diff] [blame] | 1949 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP |
| 1950 | uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1951 | #endif |
| 1952 | uffdio_api.ioctls = UFFD_API_IOCTLS; |
| 1953 | ret = -EFAULT; |
| 1954 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) |
| 1955 | goto out; |
| 1956 | ctx->state = UFFD_STATE_RUNNING; |
| 1957 | /* only enable the requested features for this uffd context */ |
| 1958 | ctx->features = uffd_ctx_features(features); |
Andrea Arcangeli | e6485a4 | 2015-09-04 15:47:15 -0700 | [diff] [blame] | 1959 | ret = 0; |
| 1960 | out: |
| 1961 | return ret; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1962 | err_out: |
| 1963 | memset(&uffdio_api, 0, sizeof(uffdio_api)); |
| 1964 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) |
| 1965 | ret = -EFAULT; |
| 1966 | goto out; |
| 1967 | } |
| 1968 | |
| 1969 | static long userfaultfd_ioctl(struct file *file, unsigned cmd, |
| 1970 | unsigned long arg) |
| 1971 | { |
| 1972 | int ret = -EINVAL; |
| 1973 | struct userfaultfd_ctx *ctx = file->private_data; |
| 1974 | |
Andrea Arcangeli | ad465cae | 2015-09-04 15:47:11 -0700 | [diff] [blame] | 1975 | if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API) |
| 1976 | return -EINVAL; |
| 1977 | |
| 1978 | switch(cmd) { |
| 1979 | case UFFDIO_API: |
| 1980 | ret = userfaultfd_api(ctx, arg); |
Andrea Arcangeli | 63b2d41 | 2020-04-06 20:06:12 -0700 | [diff] [blame] | 1981 | break; |
| 1982 | case UFFDIO_REGISTER: |
| 1983 | ret = userfaultfd_register(ctx, arg); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1984 | break; |
| 1985 | case UFFDIO_UNREGISTER: |
| 1986 | ret = userfaultfd_unregister(ctx, arg); |
| 1987 | break; |
| 1988 | case UFFDIO_WAKE: |
| 1989 | ret = userfaultfd_wake(ctx, arg); |
| 1990 | break; |
| 1991 | case UFFDIO_COPY: |
Ingo Molnar | ac6424b | 2017-06-20 12:06:13 +0200 | [diff] [blame] | 1992 | ret = userfaultfd_copy(ctx, arg); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1993 | break; |
| 1994 | case UFFDIO_ZEROPAGE: |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 1995 | ret = userfaultfd_zeropage(ctx, arg); |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 1996 | break; |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 1997 | case UFFDIO_WRITEPROTECT: |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 1998 | ret = userfaultfd_writeprotect(ctx, arg); |
| 1999 | break; |
Axel Rasmussen | f619147 | 2021-05-04 18:35:49 -0700 | [diff] [blame] | 2000 | case UFFDIO_CONTINUE: |
| 2001 | ret = userfaultfd_continue(ctx, arg); |
| 2002 | break; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2003 | } |
| 2004 | return ret; |
| 2005 | } |
| 2006 | |
| 2007 | #ifdef CONFIG_PROC_FS |
| 2008 | static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) |
| 2009 | { |
| 2010 | struct userfaultfd_ctx *ctx = f->private_data; |
| 2011 | wait_queue_entry_t *wq; |
| 2012 | unsigned long pending = 0, total = 0; |
| 2013 | |
| 2014 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
| 2015 | list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { |
| 2016 | pending++; |
| 2017 | total++; |
| 2018 | } |
Ingo Molnar | 2055da9 | 2017-06-20 12:06:46 +0200 | [diff] [blame] | 2019 | list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { |
Andrea Arcangeli | 15b726e | 2015-09-04 15:46:44 -0700 | [diff] [blame] | 2020 | total++; |
| 2021 | } |
Eric Biggers | cbcfa13 | 2019-07-04 15:14:39 -0700 | [diff] [blame] | 2022 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2023 | |
| 2024 | /* |
| 2025 | * If more protocols will be added, there will be all shown |
| 2026 | * separated by a space. Like this: |
| 2027 | * protocols: aa:... bb:... |
| 2028 | */ |
| 2029 | seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", |
Mike Rapoport | 045098e | 2017-04-07 16:04:42 -0700 | [diff] [blame] | 2030 | pending, total, UFFD_API, ctx->features, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2031 | UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); |
| 2032 | } |
| 2033 | #endif |
| 2034 | |
| 2035 | static const struct file_operations userfaultfd_fops = { |
| 2036 | #ifdef CONFIG_PROC_FS |
| 2037 | .show_fdinfo = userfaultfd_show_fdinfo, |
| 2038 | #endif |
| 2039 | .release = userfaultfd_release, |
| 2040 | .poll = userfaultfd_poll, |
| 2041 | .read = userfaultfd_read, |
| 2042 | .unlocked_ioctl = userfaultfd_ioctl, |
Arnd Bergmann | 1832f2d | 2018-09-11 21:59:08 +0200 | [diff] [blame] | 2043 | .compat_ioctl = compat_ptr_ioctl, |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2044 | .llseek = noop_llseek, |
| 2045 | }; |
| 2046 | |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2047 | static void init_once_userfaultfd_ctx(void *mem) |
| 2048 | { |
| 2049 | struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; |
| 2050 | |
| 2051 | init_waitqueue_head(&ctx->fault_pending_wqh); |
| 2052 | init_waitqueue_head(&ctx->fault_wqh); |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 2053 | init_waitqueue_head(&ctx->event_wqh); |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2054 | init_waitqueue_head(&ctx->fd_wqh); |
Ahmed S. Darwish | 2ca97ac | 2020-07-20 17:55:28 +0200 | [diff] [blame] | 2055 | seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2056 | } |
| 2057 | |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2058 | SYSCALL_DEFINE1(userfaultfd, int, flags) |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2059 | { |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2060 | struct userfaultfd_ctx *ctx; |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2061 | int fd; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2062 | |
Lokesh Gidra | d0d4730 | 2020-12-14 19:13:54 -0800 | [diff] [blame] | 2063 | if (!sysctl_unprivileged_userfaultfd && |
| 2064 | (flags & UFFD_USER_MODE_ONLY) == 0 && |
| 2065 | !capable(CAP_SYS_PTRACE)) { |
| 2066 | printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd " |
| 2067 | "sysctl knob to 1 if kernel faults must be handled " |
| 2068 | "without obtaining CAP_SYS_PTRACE capability\n"); |
Peter Xu | cefdca0 | 2019-05-13 17:16:41 -0700 | [diff] [blame] | 2069 | return -EPERM; |
Lokesh Gidra | d0d4730 | 2020-12-14 19:13:54 -0800 | [diff] [blame] | 2070 | } |
Peter Xu | cefdca0 | 2019-05-13 17:16:41 -0700 | [diff] [blame] | 2071 | |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2072 | BUG_ON(!current->mm); |
| 2073 | |
| 2074 | /* Check the UFFD_* constants for consistency. */ |
Lokesh Gidra | 37cd057 | 2020-12-14 19:13:49 -0800 | [diff] [blame] | 2075 | BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2076 | BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); |
| 2077 | BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); |
| 2078 | |
Lokesh Gidra | 37cd057 | 2020-12-14 19:13:49 -0800 | [diff] [blame] | 2079 | if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY)) |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2080 | return -EINVAL; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2081 | |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2082 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2083 | if (!ctx) |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2084 | return -ENOMEM; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2085 | |
Eric Biggers | ca88042 | 2018-12-28 00:34:43 -0800 | [diff] [blame] | 2086 | refcount_set(&ctx->refcount, 1); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2087 | ctx->flags = flags; |
Pavel Emelyanov | 9cd75c3 | 2017-02-22 15:42:21 -0800 | [diff] [blame] | 2088 | ctx->features = 0; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2089 | ctx->state = UFFD_STATE_WAIT_API; |
| 2090 | ctx->released = false; |
Nadav Amit | a759a90 | 2021-09-02 14:58:56 -0700 | [diff] [blame^] | 2091 | atomic_set(&ctx->mmap_changing, 0); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2092 | ctx->mm = current->mm; |
| 2093 | /* prevent the mm struct to be freed */ |
Vegard Nossum | f1f1007 | 2017-02-27 14:30:07 -0800 | [diff] [blame] | 2094 | mmgrab(ctx->mm); |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2095 | |
Daniel Colascione | b537900 | 2021-01-08 14:22:23 -0800 | [diff] [blame] | 2096 | fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx, |
| 2097 | O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); |
Eric Biggers | 284cd24 | 2018-01-31 16:19:48 -0800 | [diff] [blame] | 2098 | if (fd < 0) { |
Oleg Nesterov | d2005e3 | 2016-05-20 16:58:36 -0700 | [diff] [blame] | 2099 | mmdrop(ctx->mm); |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2100 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
Eric Biggers | c03e946 | 2015-09-17 16:01:54 -0700 | [diff] [blame] | 2101 | } |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2102 | return fd; |
Andrea Arcangeli | 86039bd | 2015-09-04 15:46:31 -0700 | [diff] [blame] | 2103 | } |
Andrea Arcangeli | 3004ec9 | 2015-09-04 15:46:48 -0700 | [diff] [blame] | 2104 | |
| 2105 | static int __init userfaultfd_init(void) |
| 2106 | { |
| 2107 | userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", |
| 2108 | sizeof(struct userfaultfd_ctx), |
| 2109 | 0, |
| 2110 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, |
| 2111 | init_once_userfaultfd_ctx); |
| 2112 | return 0; |
| 2113 | } |
| 2114 | __initcall(userfaultfd_init); |