blob: caead6c113d4c7c420ea28961742deda17dcb47c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
Ingo Molnar0771dfe2006-03-27 01:16:22 -080011 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
Ingo Molnarc87e2832006-06-27 02:54:58 -070015 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
Eric Dumazet34f01cc2007-05-09 02:35:04 -070019 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
Darren Hart52400ba2009-04-03 13:40:49 -070022 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
Arnd Bergmann04e77122018-04-17 16:31:07 +020047#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/slab.h>
49#include <linux/poll.h>
50#include <linux/fs.h>
51#include <linux/file.h>
52#include <linux/jhash.h>
53#include <linux/init.h>
54#include <linux/futex.h>
55#include <linux/mount.h>
56#include <linux/pagemap.h>
57#include <linux/syscalls.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070058#include <linux/signal.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040059#include <linux/export.h>
Andrey Mirkinfd5eea42007-10-16 23:30:13 -070060#include <linux/magic.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070061#include <linux/pid.h>
62#include <linux/nsproxy.h>
Kees Cookbdbb7762012-03-19 16:12:53 -070063#include <linux/ptrace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060064#include <linux/sched/rt.h>
Ingo Molnar84f001e2017-02-01 16:36:40 +010065#include <linux/sched/wake_q.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010066#include <linux/sched/mm.h>
Zhang Yi13d60f42013-06-25 21:19:31 +080067#include <linux/hugetlb.h>
Colin Cross88c80042013-05-01 18:35:05 -070068#include <linux/freezer.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070069#include <linux/memblock.h>
Davidlohr Buesoab51fba2015-06-29 23:26:02 -070070#include <linux/fault-inject.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070071
Jakub Jelinek4732efbe2005-09-06 15:16:25 -070072#include <asm/futex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Peter Zijlstra1696a8b2013-10-31 18:18:19 +010074#include "locking/rtmutex_common.h"
Ingo Molnarc87e2832006-06-27 02:54:58 -070075
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080076/*
Davidlohr Buesod7e8af12014-04-09 11:55:07 -070077 * READ this before attempting to hack on futexes!
78 *
79 * Basic futex operation and ordering guarantees
80 * =============================================
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080081 *
82 * The waiter reads the futex value in user space and calls
83 * futex_wait(). This function computes the hash bucket and acquires
84 * the hash bucket lock. After that it reads the futex user space value
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080085 * again and verifies that the data has not changed. If it has not changed
86 * it enqueues itself into the hash bucket, releases the hash bucket lock
87 * and schedules.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080088 *
89 * The waker side modifies the user space value of the futex and calls
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080090 * futex_wake(). This function computes the hash bucket and acquires the
91 * hash bucket lock. Then it looks for waiters on that futex in the hash
92 * bucket and wakes them.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080093 *
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080094 * In futex wake up scenarios where no tasks are blocked on a futex, taking
95 * the hb spinlock can be avoided and simply return. In order for this
96 * optimization to work, ordering guarantees must exist so that the waiter
97 * being added to the list is acknowledged when the list is concurrently being
98 * checked by the waker, avoiding scenarios like the following:
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080099 *
100 * CPU 0 CPU 1
101 * val = *futex;
102 * sys_futex(WAIT, futex, val);
103 * futex_wait(futex, val);
104 * uval = *futex;
105 * *futex = newval;
106 * sys_futex(WAKE, futex);
107 * futex_wake(futex);
108 * if (queue_empty())
109 * return;
110 * if (uval == val)
111 * lock(hash_bucket(futex));
112 * queue();
113 * unlock(hash_bucket(futex));
114 * schedule();
115 *
116 * This would cause the waiter on CPU 0 to wait forever because it
117 * missed the transition of the user space value from val to newval
118 * and the waker did not find the waiter in the hash bucket queue.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800119 *
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800120 * The correct serialization ensures that a waiter either observes
121 * the changed user space value before blocking or is woken by a
122 * concurrent waker:
123 *
124 * CPU 0 CPU 1
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800125 * val = *futex;
126 * sys_futex(WAIT, futex, val);
127 * futex_wait(futex, val);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800128 *
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700129 * waiters++; (a)
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800130 * smp_mb(); (A) <-- paired with -.
131 * |
132 * lock(hash_bucket(futex)); |
133 * |
134 * uval = *futex; |
135 * | *futex = newval;
136 * | sys_futex(WAKE, futex);
137 * | futex_wake(futex);
138 * |
139 * `--------> smp_mb(); (B)
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800140 * if (uval == val)
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800141 * queue();
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800142 * unlock(hash_bucket(futex));
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800143 * schedule(); if (waiters)
144 * lock(hash_bucket(futex));
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700145 * else wake_waiters(futex);
146 * waiters--; (b) unlock(hash_bucket(futex));
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800147 *
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700148 * Where (A) orders the waiters increment and the futex value read through
149 * atomic operations (see hb_waiters_inc) and where (B) orders the write
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700150 * to futex and the waiters read -- this is done by the barriers for both
151 * shared and private futexes in get_futex_key_refs().
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800152 *
153 * This yields the following case (where X:=waiters, Y:=futex):
154 *
155 * X = Y = 0
156 *
157 * w[X]=1 w[Y]=1
158 * MB MB
159 * r[Y]=y r[X]=x
160 *
161 * Which guarantees that x==0 && y==0 is impossible; which translates back into
162 * the guarantee that we cannot both miss the futex variable change and the
163 * enqueue.
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700164 *
165 * Note that a new waiter is accounted for in (a) even when it is possible that
166 * the wait call can return error, in which case we backtrack from it in (b).
167 * Refer to the comment in queue_lock().
168 *
169 * Similarly, in order to account for waiters being requeued on another
170 * address we always increment the waiters for the destination bucket before
171 * acquiring the lock. It then decrements them again after releasing it -
172 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
173 * will do the additional required waiter count housekeeping. This is done for
174 * double_lock_hb() and double_unlock_hb(), respectively.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800175 */
176
Arnd Bergmann04e77122018-04-17 16:31:07 +0200177#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
178#define futex_cmpxchg_enabled 1
179#else
180static int __read_mostly futex_cmpxchg_enabled;
Heiko Carstens03b8c7b2014-03-02 13:09:47 +0100181#endif
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/*
Darren Hartb41277d2010-11-08 13:10:09 -0800184 * Futex flags used to encode options to functions and preserve them across
185 * restarts.
186 */
Thomas Gleixner784bdf32016-07-29 16:32:30 +0200187#ifdef CONFIG_MMU
188# define FLAGS_SHARED 0x01
189#else
190/*
191 * NOMMU does not have per process address space. Let the compiler optimize
192 * code away.
193 */
194# define FLAGS_SHARED 0x00
195#endif
Darren Hartb41277d2010-11-08 13:10:09 -0800196#define FLAGS_CLOCKRT 0x02
197#define FLAGS_HAS_TIMEOUT 0x04
198
199/*
Ingo Molnarc87e2832006-06-27 02:54:58 -0700200 * Priority Inheritance state:
201 */
202struct futex_pi_state {
203 /*
204 * list of 'owned' pi_state instances - these have to be
205 * cleaned up in do_exit() if the task exits prematurely:
206 */
207 struct list_head list;
208
209 /*
210 * The PI object:
211 */
212 struct rt_mutex pi_mutex;
213
214 struct task_struct *owner;
215 atomic_t refcount;
216
217 union futex_key key;
Kees Cook3859a272016-10-28 01:22:25 -0700218} __randomize_layout;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700219
Darren Hartd8d88fb2009-09-21 22:30:30 -0700220/**
221 * struct futex_q - The hashed futex queue entry, one per waiting task
Randy Dunlapfb62db22010-10-13 11:02:34 -0700222 * @list: priority-sorted list of tasks waiting on this futex
Darren Hartd8d88fb2009-09-21 22:30:30 -0700223 * @task: the task waiting on the futex
224 * @lock_ptr: the hash bucket lock
225 * @key: the key the futex is hashed on
226 * @pi_state: optional priority inheritance state
227 * @rt_waiter: rt_waiter storage for use with requeue_pi
228 * @requeue_pi_key: the requeue_pi target futex key
229 * @bitset: bitset for the optional bitmasked wakeup
230 *
Ingo Molnarac6424b2017-06-20 12:06:13 +0200231 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 * we can wake only the relevant ones (hashed queues may be shared).
233 *
234 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
Pierre Peifferec92d082007-05-09 02:35:00 -0700235 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
Randy Dunlapfb62db22010-10-13 11:02:34 -0700236 * The order of wakeup is always to make the first condition true, then
Darren Hartd8d88fb2009-09-21 22:30:30 -0700237 * the second.
238 *
239 * PI futexes are typically woken before they are removed from the hash list via
240 * the rt_mutex code. See unqueue_me_pi().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 */
242struct futex_q {
Pierre Peifferec92d082007-05-09 02:35:00 -0700243 struct plist_node list;
Darren Hartd8d88fb2009-09-21 22:30:30 -0700244
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200245 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 spinlock_t *lock_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 union futex_key key;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700248 struct futex_pi_state *pi_state;
Darren Hart52400ba2009-04-03 13:40:49 -0700249 struct rt_mutex_waiter *rt_waiter;
Darren Hart84bc4af2009-08-13 17:36:53 -0700250 union futex_key *requeue_pi_key;
Thomas Gleixnercd689982008-02-01 17:45:14 +0100251 u32 bitset;
Kees Cook3859a272016-10-28 01:22:25 -0700252} __randomize_layout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Darren Hart5bdb05f2010-11-08 13:40:28 -0800254static const struct futex_q futex_q_init = {
255 /* list gets initialized in queue_me()*/
256 .key = FUTEX_KEY_INIT,
257 .bitset = FUTEX_BITSET_MATCH_ANY
258};
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260/*
Darren Hartb2d09942009-03-12 00:55:37 -0700261 * Hash buckets are shared by all the futex_keys that hash to the same
262 * location. Each key may have multiple futex_q structures, one for each task
263 * waiting on a futex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 */
265struct futex_hash_bucket {
Linus Torvalds11d46162014-03-20 22:11:17 -0700266 atomic_t waiters;
Pierre Peifferec92d082007-05-09 02:35:00 -0700267 spinlock_t lock;
268 struct plist_head chain;
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800269} ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Rasmus Villemoesac742d32015-09-09 23:36:40 +0200271/*
272 * The base of the bucket array and its size are always used together
273 * (after initialization only in hash_futex()), so ensure that they
274 * reside in the same cacheline.
275 */
276static struct {
277 struct futex_hash_bucket *queues;
278 unsigned long hashsize;
279} __futex_data __read_mostly __aligned(2*sizeof(long));
280#define futex_queues (__futex_data.queues)
281#define futex_hashsize (__futex_data.hashsize)
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700284/*
285 * Fault injections for futexes.
286 */
287#ifdef CONFIG_FAIL_FUTEX
288
289static struct {
290 struct fault_attr attr;
291
Viresh Kumar621a5f72015-09-26 15:04:07 -0700292 bool ignore_private;
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700293} fail_futex = {
294 .attr = FAULT_ATTR_INITIALIZER,
Viresh Kumar621a5f72015-09-26 15:04:07 -0700295 .ignore_private = false,
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700296};
297
298static int __init setup_fail_futex(char *str)
299{
300 return setup_fault_attr(&fail_futex.attr, str);
301}
302__setup("fail_futex=", setup_fail_futex);
303
kbuild test robot5d285a72015-07-21 01:40:45 +0800304static bool should_fail_futex(bool fshared)
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700305{
306 if (fail_futex.ignore_private && !fshared)
307 return false;
308
309 return should_fail(&fail_futex.attr, 1);
310}
311
312#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
313
314static int __init fail_futex_debugfs(void)
315{
316 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
317 struct dentry *dir;
318
319 dir = fault_create_debugfs_attr("fail_futex", NULL,
320 &fail_futex.attr);
321 if (IS_ERR(dir))
322 return PTR_ERR(dir);
323
324 if (!debugfs_create_bool("ignore-private", mode, dir,
325 &fail_futex.ignore_private)) {
326 debugfs_remove_recursive(dir);
327 return -ENOMEM;
328 }
329
330 return 0;
331}
332
333late_initcall(fail_futex_debugfs);
334
335#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
336
337#else
338static inline bool should_fail_futex(bool fshared)
339{
340 return false;
341}
342#endif /* CONFIG_FAIL_FUTEX */
343
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800344static inline void futex_get_mm(union futex_key *key)
345{
Vegard Nossumf1f10072017-02-27 14:30:07 -0800346 mmgrab(key->private.mm);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800347 /*
348 * Ensure futex_get_mm() implies a full barrier such that
349 * get_futex_key() implies a full barrier. This is relied upon
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800350 * as smp_mb(); (B), see the ordering comment above.
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800351 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100352 smp_mb__after_atomic();
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800353}
354
Linus Torvalds11d46162014-03-20 22:11:17 -0700355/*
356 * Reflects a new waiter being added to the waitqueue.
357 */
358static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800359{
360#ifdef CONFIG_SMP
Linus Torvalds11d46162014-03-20 22:11:17 -0700361 atomic_inc(&hb->waiters);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800362 /*
Linus Torvalds11d46162014-03-20 22:11:17 -0700363 * Full barrier (A), see the ordering comment above.
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800364 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100365 smp_mb__after_atomic();
Linus Torvalds11d46162014-03-20 22:11:17 -0700366#endif
367}
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800368
Linus Torvalds11d46162014-03-20 22:11:17 -0700369/*
370 * Reflects a waiter being removed from the waitqueue by wakeup
371 * paths.
372 */
373static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
374{
375#ifdef CONFIG_SMP
376 atomic_dec(&hb->waiters);
377#endif
378}
379
380static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
381{
382#ifdef CONFIG_SMP
383 return atomic_read(&hb->waiters);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800384#else
Linus Torvalds11d46162014-03-20 22:11:17 -0700385 return 1;
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800386#endif
387}
388
Thomas Gleixnere8b61b32016-06-01 10:43:29 +0200389/**
390 * hash_futex - Return the hash bucket in the global hash
391 * @key: Pointer to the futex key for which the hash is calculated
392 *
393 * We hash on the keys returned from get_futex_key (see below) and return the
394 * corresponding hash bucket in the global hash.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 */
396static struct futex_hash_bucket *hash_futex(union futex_key *key)
397{
398 u32 hash = jhash2((u32*)&key->both.word,
399 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
400 key->both.offset);
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800401 return &futex_queues[hash & (futex_hashsize - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
Thomas Gleixnere8b61b32016-06-01 10:43:29 +0200404
405/**
406 * match_futex - Check whether two futex keys are equal
407 * @key1: Pointer to key1
408 * @key2: Pointer to key2
409 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 * Return 1 if two futex_keys are equal, 0 otherwise.
411 */
412static inline int match_futex(union futex_key *key1, union futex_key *key2)
413{
Darren Hart2bc87202009-10-14 10:12:39 -0700414 return (key1 && key2
415 && key1->both.word == key2->both.word
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 && key1->both.ptr == key2->both.ptr
417 && key1->both.offset == key2->both.offset);
418}
419
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200420/*
421 * Take a reference to the resource addressed by a key.
422 * Can be called while holding spinlocks.
423 *
424 */
425static void get_futex_key_refs(union futex_key *key)
426{
427 if (!key->both.ptr)
428 return;
429
Thomas Gleixner784bdf32016-07-29 16:32:30 +0200430 /*
431 * On MMU less systems futexes are always "private" as there is no per
432 * process address space. We need the smp wmb nevertheless - yes,
433 * arch/blackfin has MMU less SMP ...
434 */
435 if (!IS_ENABLED(CONFIG_MMU)) {
436 smp_mb(); /* explicit smp_mb(); (B) */
437 return;
438 }
439
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200440 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
441 case FUT_OFF_INODE:
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800442 ihold(key->shared.inode); /* implies smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200443 break;
444 case FUT_OFF_MMSHARED:
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800445 futex_get_mm(key); /* implies smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200446 break;
Catalin Marinas76835b0e2014-10-17 17:38:49 +0100447 default:
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700448 /*
449 * Private futexes do not hold reference on an inode or
450 * mm, therefore the only purpose of calling get_futex_key_refs
451 * is because we need the barrier for the lockless waiter check.
452 */
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800453 smp_mb(); /* explicit smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200454 }
455}
456
457/*
458 * Drop a reference to the resource addressed by a key.
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700459 * The hash bucket spinlock must not be held. This is
460 * a no-op for private futexes, see comment in the get
461 * counterpart.
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200462 */
463static void drop_futex_key_refs(union futex_key *key)
464{
Darren Hart90621c42008-12-29 19:43:21 -0800465 if (!key->both.ptr) {
466 /* If we're here then we tried to put a key we failed to get */
467 WARN_ON_ONCE(1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200468 return;
Darren Hart90621c42008-12-29 19:43:21 -0800469 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200470
Thomas Gleixner784bdf32016-07-29 16:32:30 +0200471 if (!IS_ENABLED(CONFIG_MMU))
472 return;
473
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200474 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
475 case FUT_OFF_INODE:
476 iput(key->shared.inode);
477 break;
478 case FUT_OFF_MMSHARED:
479 mmdrop(key->private.mm);
480 break;
481 }
482}
483
Linus Torvalds96d4f262019-01-03 18:57:57 -0800484enum futex_access {
485 FUTEX_READ,
486 FUTEX_WRITE
487};
488
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700489/**
Darren Hartd96ee562009-09-21 22:30:22 -0700490 * get_futex_key() - Get parameters which are the keys for a futex
491 * @uaddr: virtual address of the futex
492 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
493 * @key: address where result is stored.
Linus Torvalds96d4f262019-01-03 18:57:57 -0800494 * @rw: mapping needs to be read/write (values: FUTEX_READ,
495 * FUTEX_WRITE)
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700496 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -0800497 * Return: a negative error code or 0
498 *
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -0300499 * The key words are stored in @key on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 *
Al Viro6131ffa2013-02-27 16:59:05 -0500501 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 * offset_within_page). For private mappings, it's (uaddr, current->mm).
503 * We can usually work out the index without swapping in the page.
504 *
Darren Hartb2d09942009-03-12 00:55:37 -0700505 * lock_page() might sleep, the caller should not hold a spinlock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 */
Thomas Gleixner64d13042009-05-18 21:20:10 +0200507static int
Linus Torvalds96d4f262019-01-03 18:57:57 -0800508get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509{
Ingo Molnare2970f22006-06-27 02:54:47 -0700510 unsigned long address = (unsigned long)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 struct mm_struct *mm = current->mm;
Mel Gorman077fa7a2016-06-08 14:25:22 +0100512 struct page *page, *tail;
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800513 struct address_space *mapping;
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500514 int err, ro = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
516 /*
517 * The futex address must be "naturally" aligned.
518 */
Ingo Molnare2970f22006-06-27 02:54:47 -0700519 key->both.offset = address % PAGE_SIZE;
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700520 if (unlikely((address % sizeof(u32)) != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 return -EINVAL;
Ingo Molnare2970f22006-06-27 02:54:47 -0700522 address -= key->both.offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Linus Torvalds96d4f262019-01-03 18:57:57 -0800524 if (unlikely(!access_ok(uaddr, sizeof(u32))))
Linus Torvalds5cdec2d2013-12-12 09:53:51 -0800525 return -EFAULT;
526
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700527 if (unlikely(should_fail_futex(fshared)))
528 return -EFAULT;
529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 /*
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700531 * PROCESS_PRIVATE futexes are fast.
532 * As the mm cannot disappear under us and the 'key' only needs
533 * virtual address, we dont even have to find the underlying vma.
534 * Note : We do have to check 'uaddr' is a valid user address,
535 * but access_ok() should be faster than find_vma()
536 */
537 if (!fshared) {
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700538 key->private.mm = mm;
539 key->private.address = address;
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800540 get_futex_key_refs(key); /* implies smp_mb(); (B) */
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700541 return 0;
542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200544again:
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700545 /* Ignore any VERIFY_READ mapping (futex common case) */
546 if (unlikely(should_fail_futex(fshared)))
547 return -EFAULT;
548
KOSAKI Motohiro7485d0d2010-01-05 16:32:43 +0900549 err = get_user_pages_fast(address, 1, 1, &page);
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500550 /*
551 * If write access is not required (eg. FUTEX_WAIT), try
552 * and get read-only access.
553 */
Linus Torvalds96d4f262019-01-03 18:57:57 -0800554 if (err == -EFAULT && rw == FUTEX_READ) {
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500555 err = get_user_pages_fast(address, 1, 0, &page);
556 ro = 1;
557 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200558 if (err < 0)
559 return err;
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500560 else
561 err = 0;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200562
Mel Gorman65d8fc72016-02-09 11:15:14 -0800563 /*
564 * The treatment of mapping from this point on is critical. The page
565 * lock protects many things but in this context the page lock
566 * stabilizes mapping, prevents inode freeing in the shared
567 * file-backed region case and guards against movement to swap cache.
568 *
569 * Strictly speaking the page lock is not needed in all cases being
570 * considered here and page lock forces unnecessarily serialization
571 * From this point on, mapping will be re-verified if necessary and
572 * page lock will be acquired only if it is unavoidable
Mel Gorman077fa7a2016-06-08 14:25:22 +0100573 *
574 * Mapping checks require the head page for any compound page so the
575 * head page and mapping is looked up now. For anonymous pages, it
576 * does not matter if the page splits in the future as the key is
577 * based on the address. For filesystem-backed pages, the tail is
578 * required as the index of the page determines the key. For
579 * base pages, there is no tail page and tail == page.
Mel Gorman65d8fc72016-02-09 11:15:14 -0800580 */
Mel Gorman077fa7a2016-06-08 14:25:22 +0100581 tail = page;
Mel Gorman65d8fc72016-02-09 11:15:14 -0800582 page = compound_head(page);
583 mapping = READ_ONCE(page->mapping);
584
Hugh Dickinse6780f72011-12-31 11:44:01 -0800585 /*
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800586 * If page->mapping is NULL, then it cannot be a PageAnon
Hugh Dickinse6780f72011-12-31 11:44:01 -0800587 * page; but it might be the ZERO_PAGE or in the gate area or
588 * in a special mapping (all cases which we are happy to fail);
589 * or it may have been a good file page when get_user_pages_fast
590 * found it, but truncated or holepunched or subjected to
591 * invalidate_complete_page2 before we got the page lock (also
592 * cases which we are happy to fail). And we hold a reference,
593 * so refcount care in invalidate_complete_page's remove_mapping
594 * prevents drop_caches from setting mapping to NULL beneath us.
595 *
596 * The case we do have to guard against is when memory pressure made
597 * shmem_writepage move it from filecache to swapcache beneath us:
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800598 * an unlikely race, but we do need to retry for page->mapping.
Hugh Dickinse6780f72011-12-31 11:44:01 -0800599 */
Mel Gorman65d8fc72016-02-09 11:15:14 -0800600 if (unlikely(!mapping)) {
601 int shmem_swizzled;
602
603 /*
604 * Page lock is required to identify which special case above
605 * applies. If this is really a shmem page then the page lock
606 * will prevent unexpected transitions.
607 */
608 lock_page(page);
609 shmem_swizzled = PageSwapCache(page) || page->mapping;
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800610 unlock_page(page);
611 put_page(page);
Mel Gorman65d8fc72016-02-09 11:15:14 -0800612
Hugh Dickinse6780f72011-12-31 11:44:01 -0800613 if (shmem_swizzled)
614 goto again;
Mel Gorman65d8fc72016-02-09 11:15:14 -0800615
Hugh Dickinse6780f72011-12-31 11:44:01 -0800616 return -EFAULT;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200617 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
619 /*
620 * Private mappings are handled in a simple way.
621 *
Mel Gorman65d8fc72016-02-09 11:15:14 -0800622 * If the futex key is stored on an anonymous page, then the associated
623 * object is the mm which is implicitly pinned by the calling process.
624 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
626 * it's a read-only handle, it's expected that futexes attach to
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200627 * the object not the particular process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 */
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800629 if (PageAnon(page)) {
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500630 /*
631 * A RO anonymous page will never change and thus doesn't make
632 * sense for futex operations.
633 */
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700634 if (unlikely(should_fail_futex(fshared)) || ro) {
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500635 err = -EFAULT;
636 goto out;
637 }
638
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200639 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 key->private.mm = mm;
Ingo Molnare2970f22006-06-27 02:54:47 -0700641 key->private.address = address;
Mel Gorman65d8fc72016-02-09 11:15:14 -0800642
643 get_futex_key_refs(key); /* implies smp_mb(); (B) */
644
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200645 } else {
Mel Gorman65d8fc72016-02-09 11:15:14 -0800646 struct inode *inode;
647
648 /*
649 * The associated futex object in this case is the inode and
650 * the page->mapping must be traversed. Ordinarily this should
651 * be stabilised under page lock but it's not strictly
652 * necessary in this case as we just want to pin the inode, not
653 * update the radix tree or anything like that.
654 *
655 * The RCU read lock is taken as the inode is finally freed
656 * under RCU. If the mapping still matches expectations then the
657 * mapping->host can be safely accessed as being a valid inode.
658 */
659 rcu_read_lock();
660
661 if (READ_ONCE(page->mapping) != mapping) {
662 rcu_read_unlock();
663 put_page(page);
664
665 goto again;
666 }
667
668 inode = READ_ONCE(mapping->host);
669 if (!inode) {
670 rcu_read_unlock();
671 put_page(page);
672
673 goto again;
674 }
675
676 /*
677 * Take a reference unless it is about to be freed. Previously
678 * this reference was taken by ihold under the page lock
679 * pinning the inode in place so i_lock was unnecessary. The
680 * only way for this check to fail is if the inode was
Mel Gorman48fb6f42017-08-09 08:27:11 +0100681 * truncated in parallel which is almost certainly an
682 * application bug. In such a case, just retry.
Mel Gorman65d8fc72016-02-09 11:15:14 -0800683 *
684 * We are not calling into get_futex_key_refs() in file-backed
685 * cases, therefore a successful atomic_inc return below will
686 * guarantee that get_futex_key() will still imply smp_mb(); (B).
687 */
Mel Gorman48fb6f42017-08-09 08:27:11 +0100688 if (!atomic_inc_not_zero(&inode->i_count)) {
Mel Gorman65d8fc72016-02-09 11:15:14 -0800689 rcu_read_unlock();
690 put_page(page);
691
692 goto again;
693 }
694
695 /* Should be impossible but lets be paranoid for now */
696 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
697 err = -EFAULT;
698 rcu_read_unlock();
699 iput(inode);
700
701 goto out;
702 }
703
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200704 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
Mel Gorman65d8fc72016-02-09 11:15:14 -0800705 key->shared.inode = inode;
Mel Gorman077fa7a2016-06-08 14:25:22 +0100706 key->shared.pgoff = basepage_index(tail);
Mel Gorman65d8fc72016-02-09 11:15:14 -0800707 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 }
709
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500710out:
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800711 put_page(page);
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500712 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713}
714
Thomas Gleixnerae791a22010-11-10 13:30:36 +0100715static inline void put_futex_key(union futex_key *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200717 drop_futex_key_refs(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718}
719
Darren Hartd96ee562009-09-21 22:30:22 -0700720/**
721 * fault_in_user_writeable() - Fault in user address and verify RW access
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200722 * @uaddr: pointer to faulting user space address
723 *
724 * Slow path to fixup the fault we just took in the atomic write
725 * access to @uaddr.
726 *
Randy Dunlapfb62db22010-10-13 11:02:34 -0700727 * We have no generic implementation of a non-destructive write to the
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200728 * user address. We know that we faulted in the atomic pagefault
729 * disabled section so we can as well avoid the #PF overhead by
730 * calling get_user_pages() right away.
731 */
732static int fault_in_user_writeable(u32 __user *uaddr)
733{
Andi Kleen722d0172009-12-08 13:19:42 +0100734 struct mm_struct *mm = current->mm;
735 int ret;
736
737 down_read(&mm->mmap_sem);
Benjamin Herrenschmidt2efaca92011-07-25 17:12:32 -0700738 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
Dominik Dingel4a9e1cd2016-01-15 16:57:04 -0800739 FAULT_FLAG_WRITE, NULL);
Andi Kleen722d0172009-12-08 13:19:42 +0100740 up_read(&mm->mmap_sem);
741
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200742 return ret < 0 ? ret : 0;
743}
744
Darren Hart4b1c4862009-04-03 13:39:42 -0700745/**
746 * futex_top_waiter() - Return the highest priority waiter on a futex
Darren Hartd96ee562009-09-21 22:30:22 -0700747 * @hb: the hash bucket the futex_q's reside in
748 * @key: the futex key (to distinguish it from other futex futex_q's)
Darren Hart4b1c4862009-04-03 13:39:42 -0700749 *
750 * Must be called with the hb lock held.
751 */
752static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
753 union futex_key *key)
754{
755 struct futex_q *this;
756
757 plist_for_each_entry(this, &hb->chain, list) {
758 if (match_futex(&this->key, key))
759 return this;
760 }
761 return NULL;
762}
763
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800764static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
765 u32 uval, u32 newval)
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700766{
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800767 int ret;
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700768
769 pagefault_disable();
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800770 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700771 pagefault_enable();
772
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800773 return ret;
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700774}
775
776static int get_futex_value_locked(u32 *dest, u32 __user *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777{
778 int ret;
779
Peter Zijlstraa8663742006-12-06 20:32:20 -0800780 pagefault_disable();
Linus Torvaldsbd28b142016-05-22 17:21:27 -0700781 ret = __get_user(*dest, from);
Peter Zijlstraa8663742006-12-06 20:32:20 -0800782 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783
784 return ret ? -EFAULT : 0;
785}
786
Ingo Molnarc87e2832006-06-27 02:54:58 -0700787
788/*
789 * PI code:
790 */
791static int refill_pi_state_cache(void)
792{
793 struct futex_pi_state *pi_state;
794
795 if (likely(current->pi_state_cache))
796 return 0;
797
Burman Yan4668edc2006-12-06 20:38:51 -0800798 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700799
800 if (!pi_state)
801 return -ENOMEM;
802
Ingo Molnarc87e2832006-06-27 02:54:58 -0700803 INIT_LIST_HEAD(&pi_state->list);
804 /* pi_mutex gets initialized later */
805 pi_state->owner = NULL;
806 atomic_set(&pi_state->refcount, 1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200807 pi_state->key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700808
809 current->pi_state_cache = pi_state;
810
811 return 0;
812}
813
Peter Zijlstrabf92cf32017-03-22 11:35:53 +0100814static struct futex_pi_state *alloc_pi_state(void)
Ingo Molnarc87e2832006-06-27 02:54:58 -0700815{
816 struct futex_pi_state *pi_state = current->pi_state_cache;
817
818 WARN_ON(!pi_state);
819 current->pi_state_cache = NULL;
820
821 return pi_state;
822}
823
Peter Zijlstrabf92cf32017-03-22 11:35:53 +0100824static void get_pi_state(struct futex_pi_state *pi_state)
825{
826 WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
827}
828
Brian Silverman30a6b802014-10-25 20:20:37 -0400829/*
Thomas Gleixner29e9ee52015-12-19 20:07:39 +0000830 * Drops a reference to the pi_state object and frees or caches it
831 * when the last reference is gone.
Brian Silverman30a6b802014-10-25 20:20:37 -0400832 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +0000833static void put_pi_state(struct futex_pi_state *pi_state)
Ingo Molnarc87e2832006-06-27 02:54:58 -0700834{
Brian Silverman30a6b802014-10-25 20:20:37 -0400835 if (!pi_state)
836 return;
837
Ingo Molnarc87e2832006-06-27 02:54:58 -0700838 if (!atomic_dec_and_test(&pi_state->refcount))
839 return;
840
841 /*
842 * If pi_state->owner is NULL, the owner is most probably dying
843 * and has cleaned up the pi_state already
844 */
845 if (pi_state->owner) {
Peter Zijlstrac74aef22017-09-22 17:48:06 +0200846 struct task_struct *owner;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700847
Peter Zijlstrac74aef22017-09-22 17:48:06 +0200848 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
849 owner = pi_state->owner;
850 if (owner) {
851 raw_spin_lock(&owner->pi_lock);
852 list_del_init(&pi_state->list);
853 raw_spin_unlock(&owner->pi_lock);
854 }
855 rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
856 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700857 }
858
Peter Zijlstrac74aef22017-09-22 17:48:06 +0200859 if (current->pi_state_cache) {
Ingo Molnarc87e2832006-06-27 02:54:58 -0700860 kfree(pi_state);
Peter Zijlstrac74aef22017-09-22 17:48:06 +0200861 } else {
Ingo Molnarc87e2832006-06-27 02:54:58 -0700862 /*
863 * pi_state->list is already empty.
864 * clear pi_state->owner.
865 * refcount is at 0 - put it back to 1.
866 */
867 pi_state->owner = NULL;
868 atomic_set(&pi_state->refcount, 1);
869 current->pi_state_cache = pi_state;
870 }
871}
872
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -0400873#ifdef CONFIG_FUTEX_PI
874
Ingo Molnarc87e2832006-06-27 02:54:58 -0700875/*
876 * This task is holding PI mutexes at exit time => bad.
877 * Kernel cleans up PI-state, but userspace is likely hosed.
878 * (Robust-futex cleanup is separate and might save the day for userspace.)
879 */
880void exit_pi_state_list(struct task_struct *curr)
881{
Ingo Molnarc87e2832006-06-27 02:54:58 -0700882 struct list_head *next, *head = &curr->pi_state_list;
883 struct futex_pi_state *pi_state;
Ingo Molnar627371d2006-07-29 05:16:20 +0200884 struct futex_hash_bucket *hb;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200885 union futex_key key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700886
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800887 if (!futex_cmpxchg_enabled)
888 return;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700889 /*
890 * We are a ZOMBIE and nobody can enqueue itself on
891 * pi_state_list anymore, but we have to be careful
Ingo Molnar627371d2006-07-29 05:16:20 +0200892 * versus waiters unqueueing themselves:
Ingo Molnarc87e2832006-06-27 02:54:58 -0700893 */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100894 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700895 while (!list_empty(head)) {
Ingo Molnarc87e2832006-06-27 02:54:58 -0700896 next = head->next;
897 pi_state = list_entry(next, struct futex_pi_state, list);
898 key = pi_state->key;
Ingo Molnar627371d2006-07-29 05:16:20 +0200899 hb = hash_futex(&key);
Peter Zijlstra153fbd12017-10-31 11:18:53 +0100900
901 /*
902 * We can race against put_pi_state() removing itself from the
903 * list (a waiter going away). put_pi_state() will first
904 * decrement the reference count and then modify the list, so
905 * its possible to see the list entry but fail this reference
906 * acquire.
907 *
908 * In that case; drop the locks to let put_pi_state() make
909 * progress and retry the loop.
910 */
911 if (!atomic_inc_not_zero(&pi_state->refcount)) {
912 raw_spin_unlock_irq(&curr->pi_lock);
913 cpu_relax();
914 raw_spin_lock_irq(&curr->pi_lock);
915 continue;
916 }
Thomas Gleixner1d615482009-11-17 14:54:03 +0100917 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700918
Ingo Molnarc87e2832006-06-27 02:54:58 -0700919 spin_lock(&hb->lock);
Peter Zijlstrac74aef22017-09-22 17:48:06 +0200920 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
921 raw_spin_lock(&curr->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200922 /*
923 * We dropped the pi-lock, so re-check whether this
924 * task still owns the PI-state:
925 */
Ingo Molnarc87e2832006-06-27 02:54:58 -0700926 if (head->next != next) {
Peter Zijlstra153fbd12017-10-31 11:18:53 +0100927 /* retain curr->pi_lock for the loop invariant */
Peter Zijlstrac74aef22017-09-22 17:48:06 +0200928 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700929 spin_unlock(&hb->lock);
Peter Zijlstra153fbd12017-10-31 11:18:53 +0100930 put_pi_state(pi_state);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700931 continue;
932 }
933
Ingo Molnarc87e2832006-06-27 02:54:58 -0700934 WARN_ON(pi_state->owner != curr);
Ingo Molnar627371d2006-07-29 05:16:20 +0200935 WARN_ON(list_empty(&pi_state->list));
936 list_del_init(&pi_state->list);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700937 pi_state->owner = NULL;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700938
Peter Zijlstra153fbd12017-10-31 11:18:53 +0100939 raw_spin_unlock(&curr->pi_lock);
Peter Zijlstrac74aef22017-09-22 17:48:06 +0200940 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700941 spin_unlock(&hb->lock);
942
Peter Zijlstra16ffa122017-03-22 11:35:55 +0100943 rt_mutex_futex_unlock(&pi_state->pi_mutex);
944 put_pi_state(pi_state);
945
Thomas Gleixner1d615482009-11-17 14:54:03 +0100946 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700947 }
Thomas Gleixner1d615482009-11-17 14:54:03 +0100948 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700949}
950
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -0400951#endif
952
Thomas Gleixner54a21782014-06-03 12:27:08 +0000953/*
954 * We need to check the following states:
955 *
956 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
957 *
958 * [1] NULL | --- | --- | 0 | 0/1 | Valid
959 * [2] NULL | --- | --- | >0 | 0/1 | Valid
960 *
961 * [3] Found | NULL | -- | Any | 0/1 | Invalid
962 *
963 * [4] Found | Found | NULL | 0 | 1 | Valid
964 * [5] Found | Found | NULL | >0 | 1 | Invalid
965 *
966 * [6] Found | Found | task | 0 | 1 | Valid
967 *
968 * [7] Found | Found | NULL | Any | 0 | Invalid
969 *
970 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
971 * [9] Found | Found | task | 0 | 0 | Invalid
972 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
973 *
974 * [1] Indicates that the kernel can acquire the futex atomically. We
975 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
976 *
977 * [2] Valid, if TID does not belong to a kernel thread. If no matching
978 * thread is found then it indicates that the owner TID has died.
979 *
980 * [3] Invalid. The waiter is queued on a non PI futex
981 *
982 * [4] Valid state after exit_robust_list(), which sets the user space
983 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
984 *
985 * [5] The user space value got manipulated between exit_robust_list()
986 * and exit_pi_state_list()
987 *
988 * [6] Valid state after exit_pi_state_list() which sets the new owner in
989 * the pi_state but cannot access the user space value.
990 *
991 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
992 *
993 * [8] Owner and user space value match
994 *
995 * [9] There is no transient state which sets the user space TID to 0
996 * except exit_robust_list(), but this is indicated by the
997 * FUTEX_OWNER_DIED bit. See [4]
998 *
999 * [10] There is no transient state which leaves owner and user space
1000 * TID out of sync.
Peter Zijlstra734009e2017-03-22 11:35:52 +01001001 *
1002 *
1003 * Serialization and lifetime rules:
1004 *
1005 * hb->lock:
1006 *
1007 * hb -> futex_q, relation
1008 * futex_q -> pi_state, relation
1009 *
1010 * (cannot be raw because hb can contain arbitrary amount
1011 * of futex_q's)
1012 *
1013 * pi_mutex->wait_lock:
1014 *
1015 * {uval, pi_state}
1016 *
1017 * (and pi_mutex 'obviously')
1018 *
1019 * p->pi_lock:
1020 *
1021 * p->pi_state_list -> pi_state->list, relation
1022 *
1023 * pi_state->refcount:
1024 *
1025 * pi_state lifetime
1026 *
1027 *
1028 * Lock order:
1029 *
1030 * hb->lock
1031 * pi_mutex->wait_lock
1032 * p->pi_lock
1033 *
Thomas Gleixner54a21782014-06-03 12:27:08 +00001034 */
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001035
1036/*
1037 * Validate that the existing waiter has a pi_state and sanity check
1038 * the pi_state against the user space value. If correct, attach to
1039 * it.
1040 */
Peter Zijlstra734009e2017-03-22 11:35:52 +01001041static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1042 struct futex_pi_state *pi_state,
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001043 struct futex_pi_state **ps)
1044{
1045 pid_t pid = uval & FUTEX_TID_MASK;
Peter Zijlstra94ffac52017-04-07 09:04:07 +02001046 u32 uval2;
1047 int ret;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001048
1049 /*
1050 * Userspace might have messed up non-PI and PI futexes [3]
1051 */
1052 if (unlikely(!pi_state))
1053 return -EINVAL;
1054
Peter Zijlstra734009e2017-03-22 11:35:52 +01001055 /*
1056 * We get here with hb->lock held, and having found a
1057 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1058 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1059 * which in turn means that futex_lock_pi() still has a reference on
1060 * our pi_state.
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001061 *
1062 * The waiter holding a reference on @pi_state also protects against
1063 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1064 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1065 * free pi_state before we can take a reference ourselves.
Peter Zijlstra734009e2017-03-22 11:35:52 +01001066 */
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001067 WARN_ON(!atomic_read(&pi_state->refcount));
1068
1069 /*
Peter Zijlstra734009e2017-03-22 11:35:52 +01001070 * Now that we have a pi_state, we can acquire wait_lock
1071 * and do the state validation.
1072 */
1073 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1074
1075 /*
1076 * Since {uval, pi_state} is serialized by wait_lock, and our current
1077 * uval was read without holding it, it can have changed. Verify it
1078 * still is what we expect it to be, otherwise retry the entire
1079 * operation.
1080 */
1081 if (get_futex_value_locked(&uval2, uaddr))
1082 goto out_efault;
1083
1084 if (uval != uval2)
1085 goto out_eagain;
1086
1087 /*
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001088 * Handle the owner died case:
1089 */
1090 if (uval & FUTEX_OWNER_DIED) {
1091 /*
1092 * exit_pi_state_list sets owner to NULL and wakes the
1093 * topmost waiter. The task which acquires the
1094 * pi_state->rt_mutex will fixup owner.
1095 */
1096 if (!pi_state->owner) {
1097 /*
1098 * No pi state owner, but the user space TID
1099 * is not 0. Inconsistent state. [5]
1100 */
1101 if (pid)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001102 goto out_einval;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001103 /*
1104 * Take a ref on the state and return success. [4]
1105 */
Peter Zijlstra734009e2017-03-22 11:35:52 +01001106 goto out_attach;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001107 }
1108
1109 /*
1110 * If TID is 0, then either the dying owner has not
1111 * yet executed exit_pi_state_list() or some waiter
1112 * acquired the rtmutex in the pi state, but did not
1113 * yet fixup the TID in user space.
1114 *
1115 * Take a ref on the state and return success. [6]
1116 */
1117 if (!pid)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001118 goto out_attach;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001119 } else {
1120 /*
1121 * If the owner died bit is not set, then the pi_state
1122 * must have an owner. [7]
1123 */
1124 if (!pi_state->owner)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001125 goto out_einval;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001126 }
1127
1128 /*
1129 * Bail out if user space manipulated the futex value. If pi
1130 * state exists then the owner TID must be the same as the
1131 * user space TID. [9/10]
1132 */
1133 if (pid != task_pid_vnr(pi_state->owner))
Peter Zijlstra734009e2017-03-22 11:35:52 +01001134 goto out_einval;
1135
1136out_attach:
Peter Zijlstrabf92cf32017-03-22 11:35:53 +01001137 get_pi_state(pi_state);
Peter Zijlstra734009e2017-03-22 11:35:52 +01001138 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001139 *ps = pi_state;
1140 return 0;
Peter Zijlstra734009e2017-03-22 11:35:52 +01001141
1142out_einval:
1143 ret = -EINVAL;
1144 goto out_error;
1145
1146out_eagain:
1147 ret = -EAGAIN;
1148 goto out_error;
1149
1150out_efault:
1151 ret = -EFAULT;
1152 goto out_error;
1153
1154out_error:
1155 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1156 return ret;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001157}
1158
Thomas Gleixnerda791a62018-12-10 14:35:14 +01001159static int handle_exit_race(u32 __user *uaddr, u32 uval,
1160 struct task_struct *tsk)
1161{
1162 u32 uval2;
1163
1164 /*
1165 * If PF_EXITPIDONE is not yet set, then try again.
1166 */
1167 if (tsk && !(tsk->flags & PF_EXITPIDONE))
1168 return -EAGAIN;
1169
1170 /*
1171 * Reread the user space value to handle the following situation:
1172 *
1173 * CPU0 CPU1
1174 *
1175 * sys_exit() sys_futex()
1176 * do_exit() futex_lock_pi()
1177 * futex_lock_pi_atomic()
1178 * exit_signals(tsk) No waiters:
1179 * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
1180 * mm_release(tsk) Set waiter bit
1181 * exit_robust_list(tsk) { *uaddr = 0x80000PID;
1182 * Set owner died attach_to_pi_owner() {
1183 * *uaddr = 0xC0000000; tsk = get_task(PID);
1184 * } if (!tsk->flags & PF_EXITING) {
1185 * ... attach();
1186 * tsk->flags |= PF_EXITPIDONE; } else {
1187 * if (!(tsk->flags & PF_EXITPIDONE))
1188 * return -EAGAIN;
1189 * return -ESRCH; <--- FAIL
1190 * }
1191 *
1192 * Returning ESRCH unconditionally is wrong here because the
1193 * user space value has been changed by the exiting task.
1194 *
1195 * The same logic applies to the case where the exiting task is
1196 * already gone.
1197 */
1198 if (get_futex_value_locked(&uval2, uaddr))
1199 return -EFAULT;
1200
1201 /* If the user space value has changed, try again. */
1202 if (uval2 != uval)
1203 return -EAGAIN;
1204
1205 /*
1206 * The exiting task did not have a robust list, the robust list was
1207 * corrupted or the user space value in *uaddr is simply bogus.
1208 * Give up and tell user space.
1209 */
1210 return -ESRCH;
1211}
1212
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001213/*
1214 * Lookup the task for the TID provided from user space and attach to
1215 * it after doing proper sanity checks.
1216 */
Thomas Gleixnerda791a62018-12-10 14:35:14 +01001217static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001218 struct futex_pi_state **ps)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001219{
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001220 pid_t pid = uval & FUTEX_TID_MASK;
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001221 struct futex_pi_state *pi_state;
1222 struct task_struct *p;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001223
1224 /*
Ingo Molnare3f2dde2006-07-29 05:17:57 +02001225 * We are the first waiter - try to look up the real owner and attach
Thomas Gleixner54a21782014-06-03 12:27:08 +00001226 * the new pi_state to it, but bail out when TID = 0 [1]
Thomas Gleixnerda791a62018-12-10 14:35:14 +01001227 *
1228 * The !pid check is paranoid. None of the call sites should end up
1229 * with pid == 0, but better safe than sorry. Let the caller retry
Ingo Molnarc87e2832006-06-27 02:54:58 -07001230 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001231 if (!pid)
Thomas Gleixnerda791a62018-12-10 14:35:14 +01001232 return -EAGAIN;
Mike Rapoport2ee08262018-02-06 15:40:17 -08001233 p = find_get_task_by_vpid(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +02001234 if (!p)
Thomas Gleixnerda791a62018-12-10 14:35:14 +01001235 return handle_exit_race(uaddr, uval, NULL);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001236
Oleg Nesterova2129462015-02-02 15:05:36 +01001237 if (unlikely(p->flags & PF_KTHREAD)) {
Thomas Gleixnerf0d71b32014-05-12 20:45:35 +00001238 put_task_struct(p);
1239 return -EPERM;
1240 }
1241
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001242 /*
1243 * We need to look at the task state flags to figure out,
1244 * whether the task is exiting. To protect against the do_exit
1245 * change of the task flags, we do this protected by
1246 * p->pi_lock:
1247 */
Thomas Gleixner1d615482009-11-17 14:54:03 +01001248 raw_spin_lock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001249 if (unlikely(p->flags & PF_EXITING)) {
1250 /*
1251 * The task is on the way out. When PF_EXITPIDONE is
1252 * set, we know that the task has finished the
1253 * cleanup:
1254 */
Thomas Gleixnerda791a62018-12-10 14:35:14 +01001255 int ret = handle_exit_race(uaddr, uval, p);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001256
Thomas Gleixner1d615482009-11-17 14:54:03 +01001257 raw_spin_unlock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001258 put_task_struct(p);
1259 return ret;
1260 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001261
Thomas Gleixner54a21782014-06-03 12:27:08 +00001262 /*
1263 * No existing pi state. First waiter. [2]
Peter Zijlstra734009e2017-03-22 11:35:52 +01001264 *
1265 * This creates pi_state, we have hb->lock held, this means nothing can
1266 * observe this state, wait_lock is irrelevant.
Thomas Gleixner54a21782014-06-03 12:27:08 +00001267 */
Ingo Molnarc87e2832006-06-27 02:54:58 -07001268 pi_state = alloc_pi_state();
1269
1270 /*
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001271 * Initialize the pi_mutex in locked state and make @p
Ingo Molnarc87e2832006-06-27 02:54:58 -07001272 * the owner of it:
1273 */
1274 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1275
1276 /* Store the key for possible exit cleanups: */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001277 pi_state->key = *key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001278
Ingo Molnar627371d2006-07-29 05:16:20 +02001279 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -07001280 list_add(&pi_state->list, &p->pi_state_list);
Peter Zijlstrac74aef22017-09-22 17:48:06 +02001281 /*
1282 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
1283 * because there is no concurrency as the object is not published yet.
1284 */
Ingo Molnarc87e2832006-06-27 02:54:58 -07001285 pi_state->owner = p;
Thomas Gleixner1d615482009-11-17 14:54:03 +01001286 raw_spin_unlock_irq(&p->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001287
1288 put_task_struct(p);
1289
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001290 *ps = pi_state;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001291
1292 return 0;
1293}
1294
Peter Zijlstra734009e2017-03-22 11:35:52 +01001295static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1296 struct futex_hash_bucket *hb,
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001297 union futex_key *key, struct futex_pi_state **ps)
1298{
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01001299 struct futex_q *top_waiter = futex_top_waiter(hb, key);
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001300
1301 /*
1302 * If there is a waiter on that futex, validate it and
1303 * attach to the pi_state when the validation succeeds.
1304 */
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01001305 if (top_waiter)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001306 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001307
1308 /*
1309 * We are the first waiter - try to look up the owner based on
1310 * @uval and attach to it.
1311 */
Thomas Gleixnerda791a62018-12-10 14:35:14 +01001312 return attach_to_pi_owner(uaddr, uval, key, ps);
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001313}
1314
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001315static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1316{
1317 u32 uninitialized_var(curval);
1318
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001319 if (unlikely(should_fail_futex(true)))
1320 return -EFAULT;
1321
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001322 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1323 return -EFAULT;
1324
Peter Zijlstra734009e2017-03-22 11:35:52 +01001325 /* If user space value changed, let the caller retry */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001326 return curval != uval ? -EAGAIN : 0;
1327}
1328
Darren Hart1a520842009-04-03 13:39:52 -07001329/**
Darren Hartd96ee562009-09-21 22:30:22 -07001330 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
Darren Hartbab5bc92009-04-07 23:23:50 -07001331 * @uaddr: the pi futex user address
1332 * @hb: the pi futex hash bucket
1333 * @key: the futex key associated with uaddr and hb
1334 * @ps: the pi_state pointer where we store the result of the
1335 * lookup
1336 * @task: the task to perform the atomic lock work for. This will
1337 * be "current" except in the case of requeue pi.
1338 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart1a520842009-04-03 13:39:52 -07001339 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001340 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03001341 * - 0 - ready to wait;
1342 * - 1 - acquired the lock;
1343 * - <0 - error
Darren Hart1a520842009-04-03 13:39:52 -07001344 *
1345 * The hb->lock and futex_key refs shall be held by the caller.
1346 */
1347static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1348 union futex_key *key,
1349 struct futex_pi_state **ps,
Darren Hartbab5bc92009-04-07 23:23:50 -07001350 struct task_struct *task, int set_waiters)
Darren Hart1a520842009-04-03 13:39:52 -07001351{
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001352 u32 uval, newval, vpid = task_pid_vnr(task);
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01001353 struct futex_q *top_waiter;
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001354 int ret;
Darren Hart1a520842009-04-03 13:39:52 -07001355
1356 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001357 * Read the user space value first so we can validate a few
1358 * things before proceeding further.
Darren Hart1a520842009-04-03 13:39:52 -07001359 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001360 if (get_futex_value_locked(&uval, uaddr))
Darren Hart1a520842009-04-03 13:39:52 -07001361 return -EFAULT;
1362
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001363 if (unlikely(should_fail_futex(true)))
1364 return -EFAULT;
1365
Darren Hart1a520842009-04-03 13:39:52 -07001366 /*
1367 * Detect deadlocks.
1368 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001369 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
Darren Hart1a520842009-04-03 13:39:52 -07001370 return -EDEADLK;
1371
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001372 if ((unlikely(should_fail_futex(true))))
1373 return -EDEADLK;
1374
Darren Hart1a520842009-04-03 13:39:52 -07001375 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001376 * Lookup existing state first. If it exists, try to attach to
1377 * its pi_state.
Darren Hart1a520842009-04-03 13:39:52 -07001378 */
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01001379 top_waiter = futex_top_waiter(hb, key);
1380 if (top_waiter)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001381 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001382
1383 /*
1384 * No waiter and user TID is 0. We are here because the
1385 * waiters or the owner died bit is set or called from
1386 * requeue_cmp_pi or for whatever reason something took the
1387 * syscall.
1388 */
1389 if (!(uval & FUTEX_TID_MASK)) {
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001390 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001391 * We take over the futex. No other waiters and the user space
1392 * TID is 0. We preserve the owner died bit.
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001393 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001394 newval = uval & FUTEX_OWNER_DIED;
1395 newval |= vpid;
1396
1397 /* The futex requeue_pi code can enforce the waiters bit */
1398 if (set_waiters)
1399 newval |= FUTEX_WAITERS;
1400
1401 ret = lock_pi_update_atomic(uaddr, uval, newval);
1402 /* If the take over worked, return 1 */
1403 return ret < 0 ? ret : 1;
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001404 }
Darren Hart1a520842009-04-03 13:39:52 -07001405
Darren Hart1a520842009-04-03 13:39:52 -07001406 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001407 * First waiter. Set the waiters bit before attaching ourself to
1408 * the owner. If owner tries to unlock, it will be forced into
1409 * the kernel and blocked on hb->lock.
Darren Hart1a520842009-04-03 13:39:52 -07001410 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001411 newval = uval | FUTEX_WAITERS;
1412 ret = lock_pi_update_atomic(uaddr, uval, newval);
1413 if (ret)
1414 return ret;
Darren Hart1a520842009-04-03 13:39:52 -07001415 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001416 * If the update of the user space value succeeded, we try to
1417 * attach to the owner. If that fails, no harm done, we only
1418 * set the FUTEX_WAITERS bit in the user space variable.
Darren Hart1a520842009-04-03 13:39:52 -07001419 */
Thomas Gleixnerda791a62018-12-10 14:35:14 +01001420 return attach_to_pi_owner(uaddr, newval, key, ps);
Darren Hart1a520842009-04-03 13:39:52 -07001421}
1422
Lai Jiangshan2e129782010-12-22 14:18:50 +08001423/**
1424 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1425 * @q: The futex_q to unqueue
1426 *
1427 * The q->lock_ptr must not be NULL and must be held by the caller.
1428 */
1429static void __unqueue_futex(struct futex_q *q)
1430{
1431 struct futex_hash_bucket *hb;
1432
Lance Roy4de1a292018-10-02 22:38:57 -07001433 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
Lai Jiangshan2e129782010-12-22 14:18:50 +08001434 return;
Lance Roy4de1a292018-10-02 22:38:57 -07001435 lockdep_assert_held(q->lock_ptr);
Lai Jiangshan2e129782010-12-22 14:18:50 +08001436
1437 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1438 plist_del(&q->list, &hb->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07001439 hb_waiters_dec(hb);
Lai Jiangshan2e129782010-12-22 14:18:50 +08001440}
1441
Ingo Molnarc87e2832006-06-27 02:54:58 -07001442/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 * The hash bucket lock must be held when this is called.
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001444 * Afterwards, the futex_q must not be accessed. Callers
1445 * must ensure to later call wake_up_q() for the actual
1446 * wakeups to occur.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 */
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001448static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449{
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001450 struct task_struct *p = q->task;
1451
Darren Hartaa109902012-11-26 16:29:56 -08001452 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1453 return;
1454
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001455 /*
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001456 * Queue the task for later wakeup for after we've released
1457 * the hb->lock. wake_q_add() grabs reference to p.
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001458 */
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001459 wake_q_add(wake_q, p);
Lai Jiangshan2e129782010-12-22 14:18:50 +08001460 __unqueue_futex(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 /*
Darren Hart (VMware)38fcd062017-04-14 15:31:38 -07001462 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1463 * is written, without taking any locks. This is possible in the event
1464 * of a spurious wakeup, for example. A memory barrier is required here
1465 * to prevent the following store to lock_ptr from getting ahead of the
1466 * plist_del in __unqueue_futex().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 */
Peter Zijlstra1b367ec2017-03-22 11:35:49 +01001468 smp_store_release(&q->lock_ptr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469}
1470
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001471/*
1472 * Caller must hold a reference on @pi_state.
1473 */
1474static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001475{
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03001476 u32 uninitialized_var(curval), newval;
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001477 struct task_struct *new_owner;
Peter Zijlstraaa2bfe52017-03-23 15:56:10 +01001478 bool postunlock = false;
Waiman Long194a6b52016-11-17 11:46:38 -05001479 DEFINE_WAKE_Q(wake_q);
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001480 int ret = 0;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001481
Ingo Molnarc87e2832006-06-27 02:54:58 -07001482 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
Peter Zijlstrabebe5b52017-03-22 11:35:59 +01001483 if (WARN_ON_ONCE(!new_owner)) {
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001484 /*
Peter Zijlstrabebe5b52017-03-22 11:35:59 +01001485 * As per the comment in futex_unlock_pi() this should not happen.
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001486 *
1487 * When this happens, give up our locks and try again, giving
1488 * the futex_lock_pi() instance time to complete, either by
1489 * waiting on the rtmutex or removing itself from the futex
1490 * queue.
1491 */
1492 ret = -EAGAIN;
1493 goto out_unlock;
Peter Zijlstra73d786b2017-03-22 11:35:54 +01001494 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001495
1496 /*
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001497 * We pass it to the next owner. The WAITERS bit is always kept
1498 * enabled while there is PI state around. We cleanup the owner
1499 * died bit, because we are the owner.
Ingo Molnarc87e2832006-06-27 02:54:58 -07001500 */
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001501 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001502
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001503 if (unlikely(should_fail_futex(true)))
1504 ret = -EFAULT;
1505
Sebastian Andrzej Siewior89e9e662016-04-15 14:35:39 +02001506 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001507 ret = -EFAULT;
Peter Zijlstra734009e2017-03-22 11:35:52 +01001508
Sebastian Andrzej Siewior89e9e662016-04-15 14:35:39 +02001509 } else if (curval != uval) {
1510 /*
1511 * If a unconditional UNLOCK_PI operation (user space did not
1512 * try the TID->0 transition) raced with a waiter setting the
1513 * FUTEX_WAITERS flag between get_user() and locking the hash
1514 * bucket lock, retry the operation.
1515 */
1516 if ((FUTEX_TID_MASK & curval) == uval)
1517 ret = -EAGAIN;
1518 else
1519 ret = -EINVAL;
1520 }
Peter Zijlstra734009e2017-03-22 11:35:52 +01001521
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001522 if (ret)
1523 goto out_unlock;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001524
Peter Zijlstra94ffac52017-04-07 09:04:07 +02001525 /*
1526 * This is a point of no return; once we modify the uval there is no
1527 * going back and subsequent operations must not fail.
1528 */
1529
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001530 raw_spin_lock(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001531 WARN_ON(list_empty(&pi_state->list));
1532 list_del_init(&pi_state->list);
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001533 raw_spin_unlock(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001534
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001535 raw_spin_lock(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001536 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -07001537 list_add(&pi_state->list, &new_owner->pi_state_list);
1538 pi_state->owner = new_owner;
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001539 raw_spin_unlock(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001540
Peter Zijlstraaa2bfe52017-03-23 15:56:10 +01001541 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
Peter Zijlstra5293c2e2017-03-22 11:35:51 +01001542
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001543out_unlock:
Peter Zijlstra5293c2e2017-03-22 11:35:51 +01001544 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Peter Zijlstra5293c2e2017-03-22 11:35:51 +01001545
Peter Zijlstraaa2bfe52017-03-23 15:56:10 +01001546 if (postunlock)
1547 rt_mutex_postunlock(&wake_q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001548
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001549 return ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001550}
1551
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552/*
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001553 * Express the locking dependencies for lockdep:
1554 */
1555static inline void
1556double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1557{
1558 if (hb1 <= hb2) {
1559 spin_lock(&hb1->lock);
1560 if (hb1 < hb2)
1561 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1562 } else { /* hb1 > hb2 */
1563 spin_lock(&hb2->lock);
1564 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1565 }
1566}
1567
Darren Hart5eb3dc62009-03-12 00:55:52 -07001568static inline void
1569double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1570{
Darren Hartf061d352009-03-12 15:11:18 -07001571 spin_unlock(&hb1->lock);
Ingo Molnar88f502f2009-03-13 10:32:07 +01001572 if (hb1 != hb2)
1573 spin_unlock(&hb2->lock);
Darren Hart5eb3dc62009-03-12 00:55:52 -07001574}
1575
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001576/*
Darren Hartb2d09942009-03-12 00:55:37 -07001577 * Wake up waiters matching bitset queued on this futex (uaddr).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 */
Darren Hartb41277d2010-11-08 13:10:09 -08001579static int
1580futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581{
Ingo Molnare2970f22006-06-27 02:54:47 -07001582 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 struct futex_q *this, *next;
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001584 union futex_key key = FUTEX_KEY_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 int ret;
Waiman Long194a6b52016-11-17 11:46:38 -05001586 DEFINE_WAKE_Q(wake_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Thomas Gleixnercd689982008-02-01 17:45:14 +01001588 if (!bitset)
1589 return -EINVAL;
1590
Linus Torvalds96d4f262019-01-03 18:57:57 -08001591 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 if (unlikely(ret != 0))
1593 goto out;
1594
Ingo Molnare2970f22006-06-27 02:54:47 -07001595 hb = hash_futex(&key);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -08001596
1597 /* Make sure we really have tasks to wakeup */
1598 if (!hb_waiters_pending(hb))
1599 goto out_put_key;
1600
Ingo Molnare2970f22006-06-27 02:54:47 -07001601 spin_lock(&hb->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602
Jason Low0d00c7b2014-01-12 15:31:22 -08001603 plist_for_each_entry_safe(this, next, &hb->chain, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 if (match_futex (&this->key, &key)) {
Darren Hart52400ba2009-04-03 13:40:49 -07001605 if (this->pi_state || this->rt_waiter) {
Ingo Molnared6f7b12006-07-01 04:35:46 -07001606 ret = -EINVAL;
1607 break;
1608 }
Thomas Gleixnercd689982008-02-01 17:45:14 +01001609
1610 /* Check if one of the bits is set in both bitsets */
1611 if (!(this->bitset & bitset))
1612 continue;
1613
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001614 mark_wake_futex(&wake_q, this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 if (++ret >= nr_wake)
1616 break;
1617 }
1618 }
1619
Ingo Molnare2970f22006-06-27 02:54:47 -07001620 spin_unlock(&hb->lock);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001621 wake_up_q(&wake_q);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -08001622out_put_key:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001623 put_futex_key(&key);
Darren Hart42d35d42008-12-29 15:49:53 -08001624out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 return ret;
1626}
1627
Jiri Slaby30d6e0a2017-08-24 09:31:05 +02001628static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
1629{
1630 unsigned int op = (encoded_op & 0x70000000) >> 28;
1631 unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
Jiri Slabyd70ef222017-11-30 15:35:44 +01001632 int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
1633 int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
Jiri Slaby30d6e0a2017-08-24 09:31:05 +02001634 int oldval, ret;
1635
1636 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
Jiri Slabye78c38f62017-10-23 13:41:51 +02001637 if (oparg < 0 || oparg > 31) {
1638 char comm[sizeof(current->comm)];
1639 /*
1640 * kill this print and return -EINVAL when userspace
1641 * is sane again
1642 */
1643 pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n",
1644 get_task_comm(comm, current), oparg);
1645 oparg &= 31;
1646 }
Jiri Slaby30d6e0a2017-08-24 09:31:05 +02001647 oparg = 1 << oparg;
1648 }
1649
Linus Torvalds96d4f262019-01-03 18:57:57 -08001650 if (!access_ok(uaddr, sizeof(u32)))
Jiri Slaby30d6e0a2017-08-24 09:31:05 +02001651 return -EFAULT;
1652
1653 ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
1654 if (ret)
1655 return ret;
1656
1657 switch (cmp) {
1658 case FUTEX_OP_CMP_EQ:
1659 return oldval == cmparg;
1660 case FUTEX_OP_CMP_NE:
1661 return oldval != cmparg;
1662 case FUTEX_OP_CMP_LT:
1663 return oldval < cmparg;
1664 case FUTEX_OP_CMP_GE:
1665 return oldval >= cmparg;
1666 case FUTEX_OP_CMP_LE:
1667 return oldval <= cmparg;
1668 case FUTEX_OP_CMP_GT:
1669 return oldval > cmparg;
1670 default:
1671 return -ENOSYS;
1672 }
1673}
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675/*
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001676 * Wake up all waiters hashed on the physical page that is mapped
1677 * to this virtual address:
1678 */
Ingo Molnare2970f22006-06-27 02:54:47 -07001679static int
Darren Hartb41277d2010-11-08 13:10:09 -08001680futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
Ingo Molnare2970f22006-06-27 02:54:47 -07001681 int nr_wake, int nr_wake2, int op)
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001682{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001683 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Ingo Molnare2970f22006-06-27 02:54:47 -07001684 struct futex_hash_bucket *hb1, *hb2;
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001685 struct futex_q *this, *next;
Darren Harte4dc5b72009-03-12 00:56:13 -07001686 int ret, op_ret;
Waiman Long194a6b52016-11-17 11:46:38 -05001687 DEFINE_WAKE_Q(wake_q);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001688
Darren Harte4dc5b72009-03-12 00:56:13 -07001689retry:
Linus Torvalds96d4f262019-01-03 18:57:57 -08001690 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001691 if (unlikely(ret != 0))
1692 goto out;
Linus Torvalds96d4f262019-01-03 18:57:57 -08001693 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001694 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001695 goto out_put_key1;
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001696
Ingo Molnare2970f22006-06-27 02:54:47 -07001697 hb1 = hash_futex(&key1);
1698 hb2 = hash_futex(&key2);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001699
Darren Harte4dc5b72009-03-12 00:56:13 -07001700retry_private:
Thomas Gleixnereaaea802009-10-04 09:34:17 +02001701 double_lock_hb(hb1, hb2);
Ingo Molnare2970f22006-06-27 02:54:47 -07001702 op_ret = futex_atomic_op_inuser(op, uaddr2);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001703 if (unlikely(op_ret < 0)) {
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001704
Darren Hart5eb3dc62009-03-12 00:55:52 -07001705 double_unlock_hb(hb1, hb2);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001706
David Howells7ee1dd32006-01-06 00:11:44 -08001707#ifndef CONFIG_MMU
Ingo Molnare2970f22006-06-27 02:54:47 -07001708 /*
1709 * we don't get EFAULT from MMU faults if we don't have an MMU,
1710 * but we might get them from range checking
1711 */
David Howells7ee1dd32006-01-06 00:11:44 -08001712 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -08001713 goto out_put_keys;
David Howells7ee1dd32006-01-06 00:11:44 -08001714#endif
1715
David Gibson796f8d92005-11-07 00:59:33 -08001716 if (unlikely(op_ret != -EFAULT)) {
1717 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -08001718 goto out_put_keys;
David Gibson796f8d92005-11-07 00:59:33 -08001719 }
1720
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001721 ret = fault_in_user_writeable(uaddr2);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001722 if (ret)
Darren Hartde87fcc2009-03-12 00:55:46 -07001723 goto out_put_keys;
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001724
Darren Hartb41277d2010-11-08 13:10:09 -08001725 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07001726 goto retry_private;
1727
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001728 put_futex_key(&key2);
1729 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07001730 goto retry;
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001731 }
1732
Jason Low0d00c7b2014-01-12 15:31:22 -08001733 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001734 if (match_futex (&this->key, &key1)) {
Darren Hartaa109902012-11-26 16:29:56 -08001735 if (this->pi_state || this->rt_waiter) {
1736 ret = -EINVAL;
1737 goto out_unlock;
1738 }
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001739 mark_wake_futex(&wake_q, this);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001740 if (++ret >= nr_wake)
1741 break;
1742 }
1743 }
1744
1745 if (op_ret > 0) {
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001746 op_ret = 0;
Jason Low0d00c7b2014-01-12 15:31:22 -08001747 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001748 if (match_futex (&this->key, &key2)) {
Darren Hartaa109902012-11-26 16:29:56 -08001749 if (this->pi_state || this->rt_waiter) {
1750 ret = -EINVAL;
1751 goto out_unlock;
1752 }
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001753 mark_wake_futex(&wake_q, this);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001754 if (++op_ret >= nr_wake2)
1755 break;
1756 }
1757 }
1758 ret += op_ret;
1759 }
1760
Darren Hartaa109902012-11-26 16:29:56 -08001761out_unlock:
Darren Hart5eb3dc62009-03-12 00:55:52 -07001762 double_unlock_hb(hb1, hb2);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001763 wake_up_q(&wake_q);
Darren Hart42d35d42008-12-29 15:49:53 -08001764out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001765 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08001766out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001767 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08001768out:
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001769 return ret;
1770}
1771
Darren Hart9121e472009-04-03 13:40:31 -07001772/**
1773 * requeue_futex() - Requeue a futex_q from one hb to another
1774 * @q: the futex_q to requeue
1775 * @hb1: the source hash_bucket
1776 * @hb2: the target hash_bucket
1777 * @key2: the new key for the requeued futex_q
1778 */
1779static inline
1780void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1781 struct futex_hash_bucket *hb2, union futex_key *key2)
1782{
1783
1784 /*
1785 * If key1 and key2 hash to the same bucket, no need to
1786 * requeue.
1787 */
1788 if (likely(&hb1->chain != &hb2->chain)) {
1789 plist_del(&q->list, &hb1->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07001790 hb_waiters_dec(hb1);
Linus Torvalds11d46162014-03-20 22:11:17 -07001791 hb_waiters_inc(hb2);
Davidlohr Buesofe1bce92016-04-20 20:09:24 -07001792 plist_add(&q->list, &hb2->chain);
Darren Hart9121e472009-04-03 13:40:31 -07001793 q->lock_ptr = &hb2->lock;
Darren Hart9121e472009-04-03 13:40:31 -07001794 }
1795 get_futex_key_refs(key2);
1796 q->key = *key2;
1797}
1798
Darren Hart52400ba2009-04-03 13:40:49 -07001799/**
1800 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
Darren Hartd96ee562009-09-21 22:30:22 -07001801 * @q: the futex_q
1802 * @key: the key of the requeue target futex
1803 * @hb: the hash_bucket of the requeue target futex
Darren Hart52400ba2009-04-03 13:40:49 -07001804 *
1805 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1806 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1807 * to the requeue target futex so the waiter can detect the wakeup on the right
1808 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
Darren Hartbeda2c72009-08-09 15:34:39 -07001809 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1810 * to protect access to the pi_state to fixup the owner later. Must be called
1811 * with both q->lock_ptr and hb->lock held.
Darren Hart52400ba2009-04-03 13:40:49 -07001812 */
1813static inline
Darren Hartbeda2c72009-08-09 15:34:39 -07001814void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1815 struct futex_hash_bucket *hb)
Darren Hart52400ba2009-04-03 13:40:49 -07001816{
Darren Hart52400ba2009-04-03 13:40:49 -07001817 get_futex_key_refs(key);
1818 q->key = *key;
1819
Lai Jiangshan2e129782010-12-22 14:18:50 +08001820 __unqueue_futex(q);
Darren Hart52400ba2009-04-03 13:40:49 -07001821
1822 WARN_ON(!q->rt_waiter);
1823 q->rt_waiter = NULL;
1824
Darren Hartbeda2c72009-08-09 15:34:39 -07001825 q->lock_ptr = &hb->lock;
Darren Hartbeda2c72009-08-09 15:34:39 -07001826
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001827 wake_up_state(q->task, TASK_NORMAL);
Darren Hart52400ba2009-04-03 13:40:49 -07001828}
1829
1830/**
1831 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
Darren Hartbab5bc92009-04-07 23:23:50 -07001832 * @pifutex: the user address of the to futex
1833 * @hb1: the from futex hash bucket, must be locked by the caller
1834 * @hb2: the to futex hash bucket, must be locked by the caller
1835 * @key1: the from futex key
1836 * @key2: the to futex key
1837 * @ps: address to store the pi_state pointer
1838 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart52400ba2009-04-03 13:40:49 -07001839 *
1840 * Try and get the lock on behalf of the top waiter if we can do it atomically.
Darren Hartbab5bc92009-04-07 23:23:50 -07001841 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1842 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1843 * hb1 and hb2 must be held by the caller.
Darren Hart52400ba2009-04-03 13:40:49 -07001844 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001845 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03001846 * - 0 - failed to acquire the lock atomically;
1847 * - >0 - acquired the lock, return value is vpid of the top_waiter
1848 * - <0 - error
Darren Hart52400ba2009-04-03 13:40:49 -07001849 */
1850static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1851 struct futex_hash_bucket *hb1,
1852 struct futex_hash_bucket *hb2,
1853 union futex_key *key1, union futex_key *key2,
Darren Hartbab5bc92009-04-07 23:23:50 -07001854 struct futex_pi_state **ps, int set_waiters)
Darren Hart52400ba2009-04-03 13:40:49 -07001855{
Darren Hartbab5bc92009-04-07 23:23:50 -07001856 struct futex_q *top_waiter = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07001857 u32 curval;
Thomas Gleixner866293e2014-05-12 20:45:34 +00001858 int ret, vpid;
Darren Hart52400ba2009-04-03 13:40:49 -07001859
1860 if (get_futex_value_locked(&curval, pifutex))
1861 return -EFAULT;
1862
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001863 if (unlikely(should_fail_futex(true)))
1864 return -EFAULT;
1865
Darren Hartbab5bc92009-04-07 23:23:50 -07001866 /*
1867 * Find the top_waiter and determine if there are additional waiters.
1868 * If the caller intends to requeue more than 1 waiter to pifutex,
1869 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1870 * as we have means to handle the possible fault. If not, don't set
1871 * the bit unecessarily as it will force the subsequent unlock to enter
1872 * the kernel.
1873 */
Darren Hart52400ba2009-04-03 13:40:49 -07001874 top_waiter = futex_top_waiter(hb1, key1);
1875
1876 /* There are no waiters, nothing for us to do. */
1877 if (!top_waiter)
1878 return 0;
1879
Darren Hart84bc4af2009-08-13 17:36:53 -07001880 /* Ensure we requeue to the expected futex. */
1881 if (!match_futex(top_waiter->requeue_pi_key, key2))
1882 return -EINVAL;
1883
Darren Hart52400ba2009-04-03 13:40:49 -07001884 /*
Darren Hartbab5bc92009-04-07 23:23:50 -07001885 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1886 * the contended case or if set_waiters is 1. The pi_state is returned
1887 * in ps in contended cases.
Darren Hart52400ba2009-04-03 13:40:49 -07001888 */
Thomas Gleixner866293e2014-05-12 20:45:34 +00001889 vpid = task_pid_vnr(top_waiter->task);
Darren Hartbab5bc92009-04-07 23:23:50 -07001890 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1891 set_waiters);
Thomas Gleixner866293e2014-05-12 20:45:34 +00001892 if (ret == 1) {
Darren Hartbeda2c72009-08-09 15:34:39 -07001893 requeue_pi_wake_futex(top_waiter, key2, hb2);
Thomas Gleixner866293e2014-05-12 20:45:34 +00001894 return vpid;
1895 }
Darren Hart52400ba2009-04-03 13:40:49 -07001896 return ret;
1897}
1898
1899/**
1900 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
Randy Dunlapfb62db22010-10-13 11:02:34 -07001901 * @uaddr1: source futex user address
Darren Hartb41277d2010-11-08 13:10:09 -08001902 * @flags: futex flags (FLAGS_SHARED, etc.)
Randy Dunlapfb62db22010-10-13 11:02:34 -07001903 * @uaddr2: target futex user address
1904 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1905 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1906 * @cmpval: @uaddr1 expected value (or %NULL)
1907 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
Darren Hartb41277d2010-11-08 13:10:09 -08001908 * pi futex (pi to pi requeue is not supported)
Darren Hart52400ba2009-04-03 13:40:49 -07001909 *
1910 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1911 * uaddr2 atomically on behalf of the top waiter.
1912 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001913 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03001914 * - >=0 - on success, the number of tasks requeued or woken;
1915 * - <0 - on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 */
Darren Hartb41277d2010-11-08 13:10:09 -08001917static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1918 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1919 u32 *cmpval, int requeue_pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001921 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Darren Hart52400ba2009-04-03 13:40:49 -07001922 int drop_count = 0, task_count = 0, ret;
1923 struct futex_pi_state *pi_state = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07001924 struct futex_hash_bucket *hb1, *hb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 struct futex_q *this, *next;
Waiman Long194a6b52016-11-17 11:46:38 -05001926 DEFINE_WAKE_Q(wake_q);
Darren Hart52400ba2009-04-03 13:40:49 -07001927
Li Jinyuefbe0e832017-12-14 17:04:54 +08001928 if (nr_wake < 0 || nr_requeue < 0)
1929 return -EINVAL;
1930
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -04001931 /*
1932 * When PI not supported: return -ENOSYS if requeue_pi is true,
1933 * consequently the compiler knows requeue_pi is always false past
1934 * this point which will optimize away all the conditional code
1935 * further down.
1936 */
1937 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
1938 return -ENOSYS;
1939
Darren Hart52400ba2009-04-03 13:40:49 -07001940 if (requeue_pi) {
1941 /*
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00001942 * Requeue PI only works on two distinct uaddrs. This
1943 * check is only valid for private futexes. See below.
1944 */
1945 if (uaddr1 == uaddr2)
1946 return -EINVAL;
1947
1948 /*
Darren Hart52400ba2009-04-03 13:40:49 -07001949 * requeue_pi requires a pi_state, try to allocate it now
1950 * without any locks in case it fails.
1951 */
1952 if (refill_pi_state_cache())
1953 return -ENOMEM;
1954 /*
1955 * requeue_pi must wake as many tasks as it can, up to nr_wake
1956 * + nr_requeue, since it acquires the rt_mutex prior to
1957 * returning to userspace, so as to not leave the rt_mutex with
1958 * waiters and no owner. However, second and third wake-ups
1959 * cannot be predicted as they involve race conditions with the
1960 * first wake and a fault while looking up the pi_state. Both
1961 * pthread_cond_signal() and pthread_cond_broadcast() should
1962 * use nr_wake=1.
1963 */
1964 if (nr_wake != 1)
1965 return -EINVAL;
1966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967
Darren Hart42d35d42008-12-29 15:49:53 -08001968retry:
Linus Torvalds96d4f262019-01-03 18:57:57 -08001969 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 if (unlikely(ret != 0))
1971 goto out;
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001972 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
Linus Torvalds96d4f262019-01-03 18:57:57 -08001973 requeue_pi ? FUTEX_WRITE : FUTEX_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001975 goto out_put_key1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00001977 /*
1978 * The check above which compares uaddrs is not sufficient for
1979 * shared futexes. We need to compare the keys:
1980 */
1981 if (requeue_pi && match_futex(&key1, &key2)) {
1982 ret = -EINVAL;
1983 goto out_put_keys;
1984 }
1985
Ingo Molnare2970f22006-06-27 02:54:47 -07001986 hb1 = hash_futex(&key1);
1987 hb2 = hash_futex(&key2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Darren Harte4dc5b72009-03-12 00:56:13 -07001989retry_private:
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001990 hb_waiters_inc(hb2);
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001991 double_lock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Ingo Molnare2970f22006-06-27 02:54:47 -07001993 if (likely(cmpval != NULL)) {
1994 u32 curval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Ingo Molnare2970f22006-06-27 02:54:47 -07001996 ret = get_futex_value_locked(&curval, uaddr1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
1998 if (unlikely(ret)) {
Darren Hart5eb3dc62009-03-12 00:55:52 -07001999 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07002000 hb_waiters_dec(hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Darren Harte4dc5b72009-03-12 00:56:13 -07002002 ret = get_user(curval, uaddr1);
2003 if (ret)
2004 goto out_put_keys;
2005
Darren Hartb41277d2010-11-08 13:10:09 -08002006 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07002007 goto retry_private;
2008
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002009 put_futex_key(&key2);
2010 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07002011 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 }
Ingo Molnare2970f22006-06-27 02:54:47 -07002013 if (curval != *cmpval) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 ret = -EAGAIN;
2015 goto out_unlock;
2016 }
2017 }
2018
Darren Hart52400ba2009-04-03 13:40:49 -07002019 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
Darren Hartbab5bc92009-04-07 23:23:50 -07002020 /*
2021 * Attempt to acquire uaddr2 and wake the top waiter. If we
2022 * intend to requeue waiters, force setting the FUTEX_WAITERS
2023 * bit. We force this here where we are able to easily handle
2024 * faults rather in the requeue loop below.
2025 */
Darren Hart52400ba2009-04-03 13:40:49 -07002026 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
Darren Hartbab5bc92009-04-07 23:23:50 -07002027 &key2, &pi_state, nr_requeue);
Darren Hart52400ba2009-04-03 13:40:49 -07002028
2029 /*
2030 * At this point the top_waiter has either taken uaddr2 or is
2031 * waiting on it. If the former, then the pi_state will not
2032 * exist yet, look it up one more time to ensure we have a
Thomas Gleixner866293e2014-05-12 20:45:34 +00002033 * reference to it. If the lock was taken, ret contains the
2034 * vpid of the top waiter task.
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002035 * If the lock was not taken, we have pi_state and an initial
2036 * refcount on it. In case of an error we have nothing.
Darren Hart52400ba2009-04-03 13:40:49 -07002037 */
Thomas Gleixner866293e2014-05-12 20:45:34 +00002038 if (ret > 0) {
Darren Hart52400ba2009-04-03 13:40:49 -07002039 WARN_ON(pi_state);
Darren Hart89061d32009-10-15 15:30:48 -07002040 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07002041 task_count++;
Thomas Gleixner866293e2014-05-12 20:45:34 +00002042 /*
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002043 * If we acquired the lock, then the user space value
2044 * of uaddr2 should be vpid. It cannot be changed by
2045 * the top waiter as it is blocked on hb2 lock if it
2046 * tries to do so. If something fiddled with it behind
2047 * our back the pi state lookup might unearth it. So
2048 * we rather use the known value than rereading and
2049 * handing potential crap to lookup_pi_state.
2050 *
2051 * If that call succeeds then we have pi_state and an
2052 * initial refcount on it.
Thomas Gleixner866293e2014-05-12 20:45:34 +00002053 */
Peter Zijlstra734009e2017-03-22 11:35:52 +01002054 ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07002055 }
2056
2057 switch (ret) {
2058 case 0:
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002059 /* We hold a reference on the pi state. */
Darren Hart52400ba2009-04-03 13:40:49 -07002060 break;
Thomas Gleixner4959f2d2015-12-19 20:07:40 +00002061
2062 /* If the above failed, then pi_state is NULL */
Darren Hart52400ba2009-04-03 13:40:49 -07002063 case -EFAULT:
2064 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07002065 hb_waiters_dec(hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002066 put_futex_key(&key2);
2067 put_futex_key(&key1);
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002068 ret = fault_in_user_writeable(uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07002069 if (!ret)
2070 goto retry;
2071 goto out;
2072 case -EAGAIN:
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00002073 /*
2074 * Two reasons for this:
2075 * - Owner is exiting and we just wait for the
2076 * exit to complete.
2077 * - The user space value changed.
2078 */
Darren Hart52400ba2009-04-03 13:40:49 -07002079 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07002080 hb_waiters_dec(hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002081 put_futex_key(&key2);
2082 put_futex_key(&key1);
Darren Hart52400ba2009-04-03 13:40:49 -07002083 cond_resched();
2084 goto retry;
2085 default:
2086 goto out_unlock;
2087 }
2088 }
2089
Jason Low0d00c7b2014-01-12 15:31:22 -08002090 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
Darren Hart52400ba2009-04-03 13:40:49 -07002091 if (task_count - nr_wake >= nr_requeue)
2092 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
Darren Hart52400ba2009-04-03 13:40:49 -07002094 if (!match_futex(&this->key, &key1))
2095 continue;
2096
Darren Hart392741e2009-08-07 15:20:48 -07002097 /*
2098 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
2099 * be paired with each other and no other futex ops.
Darren Hartaa109902012-11-26 16:29:56 -08002100 *
2101 * We should never be requeueing a futex_q with a pi_state,
2102 * which is awaiting a futex_unlock_pi().
Darren Hart392741e2009-08-07 15:20:48 -07002103 */
2104 if ((requeue_pi && !this->rt_waiter) ||
Darren Hartaa109902012-11-26 16:29:56 -08002105 (!requeue_pi && this->rt_waiter) ||
2106 this->pi_state) {
Darren Hart392741e2009-08-07 15:20:48 -07002107 ret = -EINVAL;
2108 break;
2109 }
Darren Hart52400ba2009-04-03 13:40:49 -07002110
2111 /*
2112 * Wake nr_wake waiters. For requeue_pi, if we acquired the
2113 * lock, we already woke the top_waiter. If not, it will be
2114 * woken by futex_unlock_pi().
2115 */
2116 if (++task_count <= nr_wake && !requeue_pi) {
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07002117 mark_wake_futex(&wake_q, this);
Darren Hart52400ba2009-04-03 13:40:49 -07002118 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 }
Darren Hart52400ba2009-04-03 13:40:49 -07002120
Darren Hart84bc4af2009-08-13 17:36:53 -07002121 /* Ensure we requeue to the expected futex for requeue_pi. */
2122 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
2123 ret = -EINVAL;
2124 break;
2125 }
2126
Darren Hart52400ba2009-04-03 13:40:49 -07002127 /*
2128 * Requeue nr_requeue waiters and possibly one more in the case
2129 * of requeue_pi if we couldn't acquire the lock atomically.
2130 */
2131 if (requeue_pi) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002132 /*
2133 * Prepare the waiter to take the rt_mutex. Take a
2134 * refcount on the pi_state and store the pointer in
2135 * the futex_q object of the waiter.
2136 */
Peter Zijlstrabf92cf32017-03-22 11:35:53 +01002137 get_pi_state(pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07002138 this->pi_state = pi_state;
2139 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
2140 this->rt_waiter,
Thomas Gleixnerc051b212014-05-22 03:25:50 +00002141 this->task);
Darren Hart52400ba2009-04-03 13:40:49 -07002142 if (ret == 1) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002143 /*
2144 * We got the lock. We do neither drop the
2145 * refcount on pi_state nor clear
2146 * this->pi_state because the waiter needs the
2147 * pi_state for cleaning up the user space
2148 * value. It will drop the refcount after
2149 * doing so.
2150 */
Darren Hartbeda2c72009-08-09 15:34:39 -07002151 requeue_pi_wake_futex(this, &key2, hb2);
Darren Hart89061d32009-10-15 15:30:48 -07002152 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07002153 continue;
2154 } else if (ret) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002155 /*
2156 * rt_mutex_start_proxy_lock() detected a
2157 * potential deadlock when we tried to queue
2158 * that waiter. Drop the pi_state reference
2159 * which we took above and remove the pointer
2160 * to the state from the waiters futex_q
2161 * object.
2162 */
Darren Hart52400ba2009-04-03 13:40:49 -07002163 this->pi_state = NULL;
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00002164 put_pi_state(pi_state);
Thomas Gleixner885c2cb2015-12-19 20:07:41 +00002165 /*
2166 * We stop queueing more waiters and let user
2167 * space deal with the mess.
2168 */
2169 break;
Darren Hart52400ba2009-04-03 13:40:49 -07002170 }
2171 }
2172 requeue_futex(this, hb1, hb2, &key2);
2173 drop_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 }
2175
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002176 /*
2177 * We took an extra initial reference to the pi_state either
2178 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2179 * need to drop it here again.
2180 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00002181 put_pi_state(pi_state);
Thomas Gleixner885c2cb2015-12-19 20:07:41 +00002182
2183out_unlock:
Darren Hart5eb3dc62009-03-12 00:55:52 -07002184 double_unlock_hb(hb1, hb2);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07002185 wake_up_q(&wake_q);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07002186 hb_waiters_dec(hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Darren Hartcd84a422009-04-02 14:19:38 -07002188 /*
2189 * drop_futex_key_refs() must be called outside the spinlocks. During
2190 * the requeue we moved futex_q's from the hash bucket at key1 to the
2191 * one at key2 and updated their key pointer. We no longer need to
2192 * hold the references to key1.
2193 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 while (--drop_count >= 0)
Rusty Russell9adef582007-05-08 00:26:42 -07002195 drop_futex_key_refs(&key1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
Darren Hart42d35d42008-12-29 15:49:53 -08002197out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002198 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08002199out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002200 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08002201out:
Darren Hart52400ba2009-04-03 13:40:49 -07002202 return ret ? ret : task_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203}
2204
2205/* The key must be already stored in q->key. */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01002206static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09002207 __acquires(&hb->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208{
Ingo Molnare2970f22006-06-27 02:54:47 -07002209 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
Ingo Molnare2970f22006-06-27 02:54:47 -07002211 hb = hash_futex(&q->key);
Linus Torvalds11d46162014-03-20 22:11:17 -07002212
2213 /*
2214 * Increment the counter before taking the lock so that
2215 * a potential waker won't miss a to-be-slept task that is
2216 * waiting for the spinlock. This is safe as all queue_lock()
2217 * users end up calling queue_me(). Similarly, for housekeeping,
2218 * decrement the counter at queue_unlock() when some error has
2219 * occurred and we don't end up adding the task to the list.
2220 */
2221 hb_waiters_inc(hb);
2222
Ingo Molnare2970f22006-06-27 02:54:47 -07002223 q->lock_ptr = &hb->lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -08002225 spin_lock(&hb->lock); /* implies smp_mb(); (A) */
Ingo Molnare2970f22006-06-27 02:54:47 -07002226 return hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227}
2228
Darren Hartd40d65c2009-09-21 22:30:15 -07002229static inline void
Jason Low0d00c7b2014-01-12 15:31:22 -08002230queue_unlock(struct futex_hash_bucket *hb)
Namhyung Kim15e408c2010-09-14 21:43:48 +09002231 __releases(&hb->lock)
Darren Hartd40d65c2009-09-21 22:30:15 -07002232{
2233 spin_unlock(&hb->lock);
Linus Torvalds11d46162014-03-20 22:11:17 -07002234 hb_waiters_dec(hb);
Darren Hartd40d65c2009-09-21 22:30:15 -07002235}
2236
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002237static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238{
Pierre Peifferec92d082007-05-09 02:35:00 -07002239 int prio;
2240
2241 /*
2242 * The priority used to register this element is
2243 * - either the real thread-priority for the real-time threads
2244 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2245 * - or MAX_RT_PRIO for non-RT threads.
2246 * Thus, all RT-threads are woken first in priority order, and
2247 * the others are woken last, in FIFO order.
2248 */
2249 prio = min(current->normal_prio, MAX_RT_PRIO);
2250
2251 plist_node_init(&q->list, prio);
Pierre Peifferec92d082007-05-09 02:35:00 -07002252 plist_add(&q->list, &hb->chain);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002253 q->task = current;
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002254}
2255
2256/**
2257 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2258 * @q: The futex_q to enqueue
2259 * @hb: The destination hash bucket
2260 *
2261 * The hb->lock must be held by the caller, and is released here. A call to
2262 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2263 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2264 * or nothing if the unqueue is done as part of the wake process and the unqueue
2265 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2266 * an example).
2267 */
2268static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2269 __releases(&hb->lock)
2270{
2271 __queue_me(q, hb);
Ingo Molnare2970f22006-06-27 02:54:47 -07002272 spin_unlock(&hb->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273}
2274
Darren Hartd40d65c2009-09-21 22:30:15 -07002275/**
2276 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2277 * @q: The futex_q to unqueue
2278 *
2279 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2280 * be paired with exactly one earlier call to queue_me().
2281 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002282 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03002283 * - 1 - if the futex_q was still queued (and we removed unqueued it);
2284 * - 0 - if the futex_q was already removed by the waking thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286static int unqueue_me(struct futex_q *q)
2287{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 spinlock_t *lock_ptr;
Ingo Molnare2970f22006-06-27 02:54:47 -07002289 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
2291 /* In the common case we don't take the spinlock, which is nice. */
Darren Hart42d35d42008-12-29 15:49:53 -08002292retry:
Jianyu Zhan29b75eb2016-03-07 09:32:24 +08002293 /*
2294 * q->lock_ptr can change between this read and the following spin_lock.
2295 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2296 * optimizing lock_ptr out of the logic below.
2297 */
2298 lock_ptr = READ_ONCE(q->lock_ptr);
Stephen Hemmingerc80544d2007-10-18 03:07:05 -07002299 if (lock_ptr != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 spin_lock(lock_ptr);
2301 /*
2302 * q->lock_ptr can change between reading it and
2303 * spin_lock(), causing us to take the wrong lock. This
2304 * corrects the race condition.
2305 *
2306 * Reasoning goes like this: if we have the wrong lock,
2307 * q->lock_ptr must have changed (maybe several times)
2308 * between reading it and the spin_lock(). It can
2309 * change again after the spin_lock() but only if it was
2310 * already changed before the spin_lock(). It cannot,
2311 * however, change back to the original value. Therefore
2312 * we can detect whether we acquired the correct lock.
2313 */
2314 if (unlikely(lock_ptr != q->lock_ptr)) {
2315 spin_unlock(lock_ptr);
2316 goto retry;
2317 }
Lai Jiangshan2e129782010-12-22 14:18:50 +08002318 __unqueue_futex(q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002319
2320 BUG_ON(q->pi_state);
2321
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 spin_unlock(lock_ptr);
2323 ret = 1;
2324 }
2325
Rusty Russell9adef582007-05-08 00:26:42 -07002326 drop_futex_key_refs(&q->key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 return ret;
2328}
2329
Ingo Molnarc87e2832006-06-27 02:54:58 -07002330/*
2331 * PI futexes can not be requeued and must remove themself from the
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002332 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2333 * and dropped here.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002334 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002335static void unqueue_me_pi(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09002336 __releases(q->lock_ptr)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002337{
Lai Jiangshan2e129782010-12-22 14:18:50 +08002338 __unqueue_futex(q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002339
2340 BUG_ON(!q->pi_state);
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00002341 put_pi_state(q->pi_state);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002342 q->pi_state = NULL;
2343
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002344 spin_unlock(q->lock_ptr);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002345}
2346
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002347static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002348 struct task_struct *argowner)
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002349{
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002350 struct futex_pi_state *pi_state = q->pi_state;
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03002351 u32 uval, uninitialized_var(curval), newval;
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002352 struct task_struct *oldowner, *newowner;
2353 u32 newtid;
Darren Harte4dc5b72009-03-12 00:56:13 -07002354 int ret;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002355
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002356 lockdep_assert_held(q->lock_ptr);
2357
Peter Zijlstra734009e2017-03-22 11:35:52 +01002358 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2359
2360 oldowner = pi_state->owner;
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002361
2362 /*
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002363 * We are here because either:
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002364 *
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002365 * - we stole the lock and pi_state->owner needs updating to reflect
2366 * that (@argowner == current),
2367 *
2368 * or:
2369 *
2370 * - someone stole our lock and we need to fix things to point to the
2371 * new owner (@argowner == NULL).
2372 *
2373 * Either way, we have to replace the TID in the user space variable.
Lai Jiangshan81612392011-01-14 17:09:41 +08002374 * This must be atomic as we have to preserve the owner died bit here.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002375 *
Darren Hartb2d09942009-03-12 00:55:37 -07002376 * Note: We write the user space value _before_ changing the pi_state
2377 * because we can fault here. Imagine swapped out pages or a fork
2378 * that marked all the anonymous memory readonly for cow.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002379 *
Peter Zijlstra734009e2017-03-22 11:35:52 +01002380 * Modifying pi_state _before_ the user space value would leave the
2381 * pi_state in an inconsistent state when we fault here, because we
2382 * need to drop the locks to handle the fault. This might be observed
2383 * in the PID check in lookup_pi_state.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002384 */
2385retry:
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002386 if (!argowner) {
2387 if (oldowner != current) {
2388 /*
2389 * We raced against a concurrent self; things are
2390 * already fixed up. Nothing to do.
2391 */
2392 ret = 0;
2393 goto out_unlock;
2394 }
2395
2396 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
2397 /* We got the lock after all, nothing to fix. */
2398 ret = 0;
2399 goto out_unlock;
2400 }
2401
2402 /*
2403 * Since we just failed the trylock; there must be an owner.
2404 */
2405 newowner = rt_mutex_owner(&pi_state->pi_mutex);
2406 BUG_ON(!newowner);
2407 } else {
2408 WARN_ON_ONCE(argowner != current);
2409 if (oldowner == current) {
2410 /*
2411 * We raced against a concurrent self; things are
2412 * already fixed up. Nothing to do.
2413 */
2414 ret = 0;
2415 goto out_unlock;
2416 }
2417 newowner = argowner;
2418 }
2419
2420 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
Peter Zijlstraa97cb0e2018-01-22 11:39:47 +01002421 /* Owner died? */
2422 if (!pi_state->owner)
2423 newtid |= FUTEX_OWNER_DIED;
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002424
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002425 if (get_futex_value_locked(&uval, uaddr))
2426 goto handle_fault;
2427
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002428 for (;;) {
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002429 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2430
Michel Lespinasse37a9d912011-03-10 18:48:51 -08002431 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002432 goto handle_fault;
2433 if (curval == uval)
2434 break;
2435 uval = curval;
2436 }
2437
2438 /*
2439 * We fixed up user space. Now we need to fix the pi_state
2440 * itself.
2441 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002442 if (pi_state->owner != NULL) {
Peter Zijlstra734009e2017-03-22 11:35:52 +01002443 raw_spin_lock(&pi_state->owner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002444 WARN_ON(list_empty(&pi_state->list));
2445 list_del_init(&pi_state->list);
Peter Zijlstra734009e2017-03-22 11:35:52 +01002446 raw_spin_unlock(&pi_state->owner->pi_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002447 }
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002448
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002449 pi_state->owner = newowner;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002450
Peter Zijlstra734009e2017-03-22 11:35:52 +01002451 raw_spin_lock(&newowner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002452 WARN_ON(!list_empty(&pi_state->list));
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002453 list_add(&pi_state->list, &newowner->pi_state_list);
Peter Zijlstra734009e2017-03-22 11:35:52 +01002454 raw_spin_unlock(&newowner->pi_lock);
2455 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2456
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002457 return 0;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002458
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002459 /*
Peter Zijlstra734009e2017-03-22 11:35:52 +01002460 * To handle the page fault we need to drop the locks here. That gives
2461 * the other task (either the highest priority waiter itself or the
2462 * task which stole the rtmutex) the chance to try the fixup of the
2463 * pi_state. So once we are back from handling the fault we need to
2464 * check the pi_state after reacquiring the locks and before trying to
2465 * do another fixup. When the fixup has been done already we simply
2466 * return.
2467 *
2468 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2469 * drop hb->lock since the caller owns the hb -> futex_q relation.
2470 * Dropping the pi_mutex->wait_lock requires the state revalidate.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002471 */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002472handle_fault:
Peter Zijlstra734009e2017-03-22 11:35:52 +01002473 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002474 spin_unlock(q->lock_ptr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002475
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002476 ret = fault_in_user_writeable(uaddr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002477
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002478 spin_lock(q->lock_ptr);
Peter Zijlstra734009e2017-03-22 11:35:52 +01002479 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002480
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002481 /*
2482 * Check if someone else fixed it for us:
2483 */
Peter Zijlstra734009e2017-03-22 11:35:52 +01002484 if (pi_state->owner != oldowner) {
2485 ret = 0;
2486 goto out_unlock;
2487 }
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002488
2489 if (ret)
Peter Zijlstra734009e2017-03-22 11:35:52 +01002490 goto out_unlock;
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002491
2492 goto retry;
Peter Zijlstra734009e2017-03-22 11:35:52 +01002493
2494out_unlock:
2495 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2496 return ret;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002497}
2498
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002499static long futex_wait_restart(struct restart_block *restart);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07002500
Darren Hartca5f9522009-04-03 13:39:33 -07002501/**
Darren Hartdd973992009-04-03 13:40:02 -07002502 * fixup_owner() - Post lock pi_state and corner case management
2503 * @uaddr: user address of the futex
Darren Hartdd973992009-04-03 13:40:02 -07002504 * @q: futex_q (contains pi_state and access to the rt_mutex)
2505 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2506 *
2507 * After attempting to lock an rt_mutex, this function is called to cleanup
2508 * the pi_state owner as well as handle race conditions that may allow us to
2509 * acquire the lock. Must be called with the hb lock held.
2510 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002511 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03002512 * - 1 - success, lock taken;
2513 * - 0 - success, lock not taken;
2514 * - <0 - on error (-EFAULT)
Darren Hartdd973992009-04-03 13:40:02 -07002515 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002516static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
Darren Hartdd973992009-04-03 13:40:02 -07002517{
Darren Hartdd973992009-04-03 13:40:02 -07002518 int ret = 0;
2519
2520 if (locked) {
2521 /*
2522 * Got the lock. We might not be the anticipated owner if we
2523 * did a lock-steal - fix up the PI-state in that case:
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002524 *
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002525 * Speculative pi_state->owner read (we don't hold wait_lock);
2526 * since we own the lock pi_state->owner == current is the
2527 * stable state, anything else needs more attention.
Darren Hartdd973992009-04-03 13:40:02 -07002528 */
2529 if (q->pi_state->owner != current)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002530 ret = fixup_pi_state_owner(uaddr, q, current);
Darren Hartdd973992009-04-03 13:40:02 -07002531 goto out;
2532 }
2533
2534 /*
Peter Zijlstrac1e2f0e2017-12-08 13:49:39 +01002535 * If we didn't get the lock; check if anybody stole it from us. In
2536 * that case, we need to fix up the uval to point to them instead of
2537 * us, otherwise bad things happen. [10]
2538 *
2539 * Another speculative read; pi_state->owner == current is unstable
2540 * but needs our attention.
2541 */
2542 if (q->pi_state->owner == current) {
2543 ret = fixup_pi_state_owner(uaddr, q, NULL);
2544 goto out;
2545 }
2546
2547 /*
Darren Hartdd973992009-04-03 13:40:02 -07002548 * Paranoia check. If we did not take the lock, then we should not be
Lai Jiangshan81612392011-01-14 17:09:41 +08002549 * the owner of the rt_mutex.
Darren Hartdd973992009-04-03 13:40:02 -07002550 */
Peter Zijlstra73d786b2017-03-22 11:35:54 +01002551 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
Darren Hartdd973992009-04-03 13:40:02 -07002552 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2553 "pi-state %p\n", ret,
2554 q->pi_state->pi_mutex.owner,
2555 q->pi_state->owner);
Peter Zijlstra73d786b2017-03-22 11:35:54 +01002556 }
Darren Hartdd973992009-04-03 13:40:02 -07002557
2558out:
2559 return ret ? ret : locked;
2560}
2561
2562/**
Darren Hartca5f9522009-04-03 13:39:33 -07002563 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2564 * @hb: the futex hash bucket, must be locked by the caller
2565 * @q: the futex_q to queue up on
2566 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
Darren Hartca5f9522009-04-03 13:39:33 -07002567 */
2568static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002569 struct hrtimer_sleeper *timeout)
Darren Hartca5f9522009-04-03 13:39:33 -07002570{
Darren Hart9beba3c2009-09-24 11:54:47 -07002571 /*
2572 * The task state is guaranteed to be set before another task can
Peter Zijlstrab92b8b32015-05-12 10:51:55 +02002573 * wake it. set_current_state() is implemented using smp_store_mb() and
Darren Hart9beba3c2009-09-24 11:54:47 -07002574 * queue_me() calls spin_unlock() upon completion, both serializing
2575 * access to the hash list and forcing another memory barrier.
2576 */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002577 set_current_state(TASK_INTERRUPTIBLE);
Darren Hart0729e192009-09-21 22:30:38 -07002578 queue_me(q, hb);
Darren Hartca5f9522009-04-03 13:39:33 -07002579
2580 /* Arm the timer */
Thomas Gleixner2e4b0d32015-04-14 21:09:13 +00002581 if (timeout)
Darren Hartca5f9522009-04-03 13:39:33 -07002582 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
Darren Hartca5f9522009-04-03 13:39:33 -07002583
2584 /*
Darren Hart0729e192009-09-21 22:30:38 -07002585 * If we have been removed from the hash list, then another task
2586 * has tried to wake us, and we can skip the call to schedule().
Darren Hartca5f9522009-04-03 13:39:33 -07002587 */
2588 if (likely(!plist_node_empty(&q->list))) {
2589 /*
2590 * If the timer has already expired, current will already be
2591 * flagged for rescheduling. Only call schedule if there
2592 * is no timeout, or if it has yet to expire.
2593 */
2594 if (!timeout || timeout->task)
Colin Cross88c80042013-05-01 18:35:05 -07002595 freezable_schedule();
Darren Hartca5f9522009-04-03 13:39:33 -07002596 }
2597 __set_current_state(TASK_RUNNING);
2598}
2599
Darren Hartf8010732009-04-03 13:40:40 -07002600/**
2601 * futex_wait_setup() - Prepare to wait on a futex
2602 * @uaddr: the futex userspace address
2603 * @val: the expected value
Darren Hartb41277d2010-11-08 13:10:09 -08002604 * @flags: futex flags (FLAGS_SHARED, etc.)
Darren Hartf8010732009-04-03 13:40:40 -07002605 * @q: the associated futex_q
2606 * @hb: storage for hash_bucket pointer to be returned to caller
2607 *
2608 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2609 * compare it with the expected value. Handle atomic faults internally.
2610 * Return with the hb lock held and a q.key reference on success, and unlocked
2611 * with no q.key reference on failure.
2612 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002613 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03002614 * - 0 - uaddr contains val and hb has been locked;
2615 * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
Darren Hartf8010732009-04-03 13:40:40 -07002616 */
Darren Hartb41277d2010-11-08 13:10:09 -08002617static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
Darren Hartf8010732009-04-03 13:40:40 -07002618 struct futex_q *q, struct futex_hash_bucket **hb)
2619{
2620 u32 uval;
2621 int ret;
2622
2623 /*
2624 * Access the page AFTER the hash-bucket is locked.
2625 * Order is important:
2626 *
2627 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2628 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2629 *
2630 * The basic logical guarantee of a futex is that it blocks ONLY
2631 * if cond(var) is known to be true at the time of blocking, for
Michel Lespinasse8fe8f542011-03-06 18:07:50 -08002632 * any cond. If we locked the hash-bucket after testing *uaddr, that
2633 * would open a race condition where we could block indefinitely with
Darren Hartf8010732009-04-03 13:40:40 -07002634 * cond(var) false, which would violate the guarantee.
2635 *
Michel Lespinasse8fe8f542011-03-06 18:07:50 -08002636 * On the other hand, we insert q and release the hash-bucket only
2637 * after testing *uaddr. This guarantees that futex_wait() will NOT
2638 * absorb a wakeup if *uaddr does not match the desired values
2639 * while the syscall executes.
Darren Hartf8010732009-04-03 13:40:40 -07002640 */
2641retry:
Linus Torvalds96d4f262019-01-03 18:57:57 -08002642 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ);
Darren Hartf8010732009-04-03 13:40:40 -07002643 if (unlikely(ret != 0))
Darren Harta5a2a0c2009-04-10 09:50:05 -07002644 return ret;
Darren Hartf8010732009-04-03 13:40:40 -07002645
2646retry_private:
2647 *hb = queue_lock(q);
2648
2649 ret = get_futex_value_locked(&uval, uaddr);
2650
2651 if (ret) {
Jason Low0d00c7b2014-01-12 15:31:22 -08002652 queue_unlock(*hb);
Darren Hartf8010732009-04-03 13:40:40 -07002653
2654 ret = get_user(uval, uaddr);
2655 if (ret)
2656 goto out;
2657
Darren Hartb41277d2010-11-08 13:10:09 -08002658 if (!(flags & FLAGS_SHARED))
Darren Hartf8010732009-04-03 13:40:40 -07002659 goto retry_private;
2660
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002661 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07002662 goto retry;
2663 }
2664
2665 if (uval != val) {
Jason Low0d00c7b2014-01-12 15:31:22 -08002666 queue_unlock(*hb);
Darren Hartf8010732009-04-03 13:40:40 -07002667 ret = -EWOULDBLOCK;
2668 }
2669
2670out:
2671 if (ret)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002672 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07002673 return ret;
2674}
2675
Darren Hartb41277d2010-11-08 13:10:09 -08002676static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2677 ktime_t *abs_time, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678{
Darren Hartca5f9522009-04-03 13:39:33 -07002679 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002680 struct restart_block *restart;
Ingo Molnare2970f22006-06-27 02:54:47 -07002681 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08002682 struct futex_q q = futex_q_init;
Ingo Molnare2970f22006-06-27 02:54:47 -07002683 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684
Thomas Gleixnercd689982008-02-01 17:45:14 +01002685 if (!bitset)
2686 return -EINVAL;
Thomas Gleixnercd689982008-02-01 17:45:14 +01002687 q.bitset = bitset;
Darren Hartca5f9522009-04-03 13:39:33 -07002688
2689 if (abs_time) {
2690 to = &timeout;
2691
Darren Hartb41277d2010-11-08 13:10:09 -08002692 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2693 CLOCK_REALTIME : CLOCK_MONOTONIC,
2694 HRTIMER_MODE_ABS);
Darren Hartca5f9522009-04-03 13:39:33 -07002695 hrtimer_init_sleeper(to, current);
2696 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2697 current->timer_slack_ns);
2698 }
2699
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002700retry:
Darren Hart7ada8762010-10-17 08:35:04 -07002701 /*
2702 * Prepare to wait on uaddr. On success, holds hb lock and increments
2703 * q.key refs.
2704 */
Darren Hartb41277d2010-11-08 13:10:09 -08002705 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Darren Hartf8010732009-04-03 13:40:40 -07002706 if (ret)
Darren Hart42d35d42008-12-29 15:49:53 -08002707 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708
Darren Hartca5f9522009-04-03 13:39:33 -07002709 /* queue_me and wait for wakeup, timeout, or a signal. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002710 futex_wait_queue_me(hb, &q, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711
2712 /* If we were woken (and unqueued), we succeeded, whatever. */
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002713 ret = 0;
Darren Hart7ada8762010-10-17 08:35:04 -07002714 /* unqueue_me() drops q.key ref */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 if (!unqueue_me(&q))
Darren Hart7ada8762010-10-17 08:35:04 -07002716 goto out;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002717 ret = -ETIMEDOUT;
Darren Hartca5f9522009-04-03 13:39:33 -07002718 if (to && !to->task)
Darren Hart7ada8762010-10-17 08:35:04 -07002719 goto out;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002720
Ingo Molnare2970f22006-06-27 02:54:47 -07002721 /*
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002722 * We expect signal_pending(current), but we might be the
2723 * victim of a spurious wakeup as well.
Ingo Molnare2970f22006-06-27 02:54:47 -07002724 */
Darren Hart7ada8762010-10-17 08:35:04 -07002725 if (!signal_pending(current))
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002726 goto retry;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002727
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002728 ret = -ERESTARTSYS;
Pierre Peifferc19384b2007-05-09 02:35:02 -07002729 if (!abs_time)
Darren Hart7ada8762010-10-17 08:35:04 -07002730 goto out;
Steven Rostedtce6bd422007-12-05 15:46:09 +01002731
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002732 restart = &current->restart_block;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002733 restart->fn = futex_wait_restart;
Namhyung Kima3c74c52010-09-14 21:43:47 +09002734 restart->futex.uaddr = uaddr;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002735 restart->futex.val = val;
Thomas Gleixner2456e852016-12-25 11:38:40 +01002736 restart->futex.time = *abs_time;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002737 restart->futex.bitset = bitset;
Darren Hart0cd9c642011-04-14 15:41:57 -07002738 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002739
2740 ret = -ERESTART_RESTARTBLOCK;
2741
Darren Hart42d35d42008-12-29 15:49:53 -08002742out:
Darren Hartca5f9522009-04-03 13:39:33 -07002743 if (to) {
2744 hrtimer_cancel(&to->timer);
2745 destroy_hrtimer_on_stack(&to->timer);
2746 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002747 return ret;
2748}
2749
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002750
2751static long futex_wait_restart(struct restart_block *restart)
2752{
Namhyung Kima3c74c52010-09-14 21:43:47 +09002753 u32 __user *uaddr = restart->futex.uaddr;
Darren Harta72188d2009-04-03 13:40:22 -07002754 ktime_t t, *tp = NULL;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002755
Darren Harta72188d2009-04-03 13:40:22 -07002756 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
Thomas Gleixner2456e852016-12-25 11:38:40 +01002757 t = restart->futex.time;
Darren Harta72188d2009-04-03 13:40:22 -07002758 tp = &t;
2759 }
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002760 restart->fn = do_no_restart_syscall;
Darren Hartb41277d2010-11-08 13:10:09 -08002761
2762 return (long)futex_wait(uaddr, restart->futex.flags,
2763 restart->futex.val, tp, restart->futex.bitset);
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002764}
2765
2766
Ingo Molnarc87e2832006-06-27 02:54:58 -07002767/*
2768 * Userspace tried a 0 -> TID atomic transition of the futex value
2769 * and failed. The kernel side here does the whole locking operation:
Davidlohr Bueso767f5092015-06-29 23:26:01 -07002770 * if there are waiters then it will block as a consequence of relying
2771 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2772 * a 0 value of the futex too.).
2773 *
2774 * Also serves as futex trylock_pi()'ing, and due semantics.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002775 */
Michael Kerrisk996636d2015-01-16 20:28:06 +01002776static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
Darren Hartb41277d2010-11-08 13:10:09 -08002777 ktime_t *time, int trylock)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002778{
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002779 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002780 struct futex_pi_state *pi_state = NULL;
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002781 struct rt_mutex_waiter rt_waiter;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002782 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08002783 struct futex_q q = futex_q_init;
Darren Hartdd973992009-04-03 13:40:02 -07002784 int res, ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002785
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -04002786 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2787 return -ENOSYS;
2788
Ingo Molnarc87e2832006-06-27 02:54:58 -07002789 if (refill_pi_state_cache())
2790 return -ENOMEM;
2791
Pierre Peifferc19384b2007-05-09 02:35:02 -07002792 if (time) {
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002793 to = &timeout;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07002794 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2795 HRTIMER_MODE_ABS);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002796 hrtimer_init_sleeper(to, current);
Arjan van de Vencc584b22008-09-01 15:02:30 -07002797 hrtimer_set_expires(&to->timer, *time);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002798 }
2799
Darren Hart42d35d42008-12-29 15:49:53 -08002800retry:
Linus Torvalds96d4f262019-01-03 18:57:57 -08002801 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002802 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08002803 goto out;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002804
Darren Harte4dc5b72009-03-12 00:56:13 -07002805retry_private:
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01002806 hb = queue_lock(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002807
Darren Hartbab5bc92009-04-07 23:23:50 -07002808 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002809 if (unlikely(ret)) {
Davidlohr Bueso767f5092015-06-29 23:26:01 -07002810 /*
2811 * Atomic work succeeded and we got the lock,
2812 * or failed. Either way, we do _not_ block.
2813 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002814 switch (ret) {
Darren Hart1a520842009-04-03 13:39:52 -07002815 case 1:
2816 /* We got the lock. */
2817 ret = 0;
2818 goto out_unlock_put_key;
2819 case -EFAULT:
2820 goto uaddr_faulted;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002821 case -EAGAIN:
2822 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00002823 * Two reasons for this:
2824 * - Task is exiting and we just wait for the
2825 * exit to complete.
2826 * - The user space value changed.
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002827 */
Jason Low0d00c7b2014-01-12 15:31:22 -08002828 queue_unlock(hb);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002829 put_futex_key(&q.key);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002830 cond_resched();
2831 goto retry;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002832 default:
Darren Hart42d35d42008-12-29 15:49:53 -08002833 goto out_unlock_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002834 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002835 }
2836
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002837 WARN_ON(!q.pi_state);
2838
Ingo Molnarc87e2832006-06-27 02:54:58 -07002839 /*
2840 * Only actually queue now that the atomic ops are done:
2841 */
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002842 __queue_me(&q, hb);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002843
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002844 if (trylock) {
Peter Zijlstra5293c2e2017-03-22 11:35:51 +01002845 ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002846 /* Fixup the trylock return value: */
2847 ret = ret ? 0 : -EWOULDBLOCK;
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002848 goto no_block;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002849 }
2850
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002851 rt_mutex_init_waiter(&rt_waiter);
Peter Zijlstra56222b22017-03-22 11:36:00 +01002852
2853 /*
2854 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2855 * hold it while doing rt_mutex_start_proxy(), because then it will
2856 * include hb->lock in the blocking chain, even through we'll not in
2857 * fact hold it while blocking. This will lead it to report -EDEADLK
2858 * and BUG when futex_unlock_pi() interleaves with this.
2859 *
2860 * Therefore acquire wait_lock while holding hb->lock, but drop the
2861 * latter before calling rt_mutex_start_proxy_lock(). This still fully
2862 * serializes against futex_unlock_pi() as that does the exact same
2863 * lock handoff sequence.
2864 */
2865 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2866 spin_unlock(q.lock_ptr);
2867 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2868 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2869
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002870 if (ret) {
2871 if (ret == 1)
2872 ret = 0;
2873
Peter Zijlstra56222b22017-03-22 11:36:00 +01002874 spin_lock(q.lock_ptr);
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002875 goto no_block;
2876 }
2877
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002878
2879 if (unlikely(to))
2880 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
2881
2882 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2883
Vernon Mauerya99e4e42006-07-01 04:35:42 -07002884 spin_lock(q.lock_ptr);
Darren Hartdd973992009-04-03 13:40:02 -07002885 /*
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002886 * If we failed to acquire the lock (signal/timeout), we must
2887 * first acquire the hb->lock before removing the lock from the
2888 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
2889 * wait lists consistent.
Peter Zijlstra56222b22017-03-22 11:36:00 +01002890 *
2891 * In particular; it is important that futex_unlock_pi() can not
2892 * observe this inconsistency.
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002893 */
2894 if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
2895 ret = 0;
2896
2897no_block:
2898 /*
Darren Hartdd973992009-04-03 13:40:02 -07002899 * Fixup the pi_state owner and possibly acquire the lock if we
2900 * haven't already.
2901 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002902 res = fixup_owner(uaddr, &q, !ret);
Darren Hartdd973992009-04-03 13:40:02 -07002903 /*
2904 * If fixup_owner() returned an error, proprogate that. If it acquired
2905 * the lock, clear our -ETIMEDOUT or -EINTR.
2906 */
2907 if (res)
2908 ret = (res < 0) ? res : 0;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002909
Darren Harte8f63862009-03-12 00:56:06 -07002910 /*
Darren Hartdd973992009-04-03 13:40:02 -07002911 * If fixup_owner() faulted and was unable to handle the fault, unlock
2912 * it and return the fault to userspace.
Darren Harte8f63862009-03-12 00:56:06 -07002913 */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002914 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
2915 pi_state = q.pi_state;
2916 get_pi_state(pi_state);
2917 }
Darren Harte8f63862009-03-12 00:56:06 -07002918
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002919 /* Unqueue and drop the lock */
2920 unqueue_me_pi(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002921
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002922 if (pi_state) {
2923 rt_mutex_futex_unlock(&pi_state->pi_mutex);
2924 put_pi_state(pi_state);
2925 }
2926
Mikael Pettersson5ecb01c2010-01-23 22:36:29 +01002927 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002928
Darren Hart42d35d42008-12-29 15:49:53 -08002929out_unlock_put_key:
Jason Low0d00c7b2014-01-12 15:31:22 -08002930 queue_unlock(hb);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002931
Darren Hart42d35d42008-12-29 15:49:53 -08002932out_put_key:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002933 put_futex_key(&q.key);
Darren Hart42d35d42008-12-29 15:49:53 -08002934out:
Thomas Gleixner97181f92017-04-10 18:03:36 +02002935 if (to) {
2936 hrtimer_cancel(&to->timer);
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07002937 destroy_hrtimer_on_stack(&to->timer);
Thomas Gleixner97181f92017-04-10 18:03:36 +02002938 }
Darren Hartdd973992009-04-03 13:40:02 -07002939 return ret != -EINTR ? ret : -ERESTARTNOINTR;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002940
Darren Hart42d35d42008-12-29 15:49:53 -08002941uaddr_faulted:
Jason Low0d00c7b2014-01-12 15:31:22 -08002942 queue_unlock(hb);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002943
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002944 ret = fault_in_user_writeable(uaddr);
Darren Harte4dc5b72009-03-12 00:56:13 -07002945 if (ret)
2946 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002947
Darren Hartb41277d2010-11-08 13:10:09 -08002948 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07002949 goto retry_private;
2950
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002951 put_futex_key(&q.key);
Darren Harte4dc5b72009-03-12 00:56:13 -07002952 goto retry;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002953}
2954
2955/*
Ingo Molnarc87e2832006-06-27 02:54:58 -07002956 * Userspace attempted a TID -> 0 atomic transition, and failed.
2957 * This is the in-kernel slowpath: we look up the PI state (if any),
2958 * and do the rt-mutex unlock.
2959 */
Darren Hartb41277d2010-11-08 13:10:09 -08002960static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002961{
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002962 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
Peter Zijlstra38d47c12008-09-26 19:32:20 +02002963 union futex_key key = FUTEX_KEY_INIT;
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002964 struct futex_hash_bucket *hb;
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01002965 struct futex_q *top_waiter;
Darren Harte4dc5b72009-03-12 00:56:13 -07002966 int ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002967
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -04002968 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2969 return -ENOSYS;
2970
Ingo Molnarc87e2832006-06-27 02:54:58 -07002971retry:
2972 if (get_user(uval, uaddr))
2973 return -EFAULT;
2974 /*
2975 * We release only a lock we actually own:
2976 */
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +01002977 if ((uval & FUTEX_TID_MASK) != vpid)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002978 return -EPERM;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002979
Linus Torvalds96d4f262019-01-03 18:57:57 -08002980 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE);
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002981 if (ret)
2982 return ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002983
2984 hb = hash_futex(&key);
2985 spin_lock(&hb->lock);
2986
Ingo Molnarc87e2832006-06-27 02:54:58 -07002987 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002988 * Check waiters first. We do not trust user space values at
2989 * all and we at least want to know if user space fiddled
2990 * with the futex value instead of blindly unlocking.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002991 */
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01002992 top_waiter = futex_top_waiter(hb, &key);
2993 if (top_waiter) {
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002994 struct futex_pi_state *pi_state = top_waiter->pi_state;
2995
2996 ret = -EINVAL;
2997 if (!pi_state)
2998 goto out_unlock;
2999
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02003000 /*
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003001 * If current does not own the pi_state then the futex is
3002 * inconsistent and user space fiddled with the futex value.
3003 */
3004 if (pi_state->owner != current)
3005 goto out_unlock;
3006
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003007 get_pi_state(pi_state);
Peter Zijlstrabebe5b52017-03-22 11:35:59 +01003008 /*
Peter Zijlstrabebe5b52017-03-22 11:35:59 +01003009 * By taking wait_lock while still holding hb->lock, we ensure
3010 * there is no point where we hold neither; and therefore
3011 * wake_futex_pi() must observe a state consistent with what we
3012 * observed.
3013 */
3014 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003015 spin_unlock(&hb->lock);
3016
Peter Zijlstrac74aef22017-09-22 17:48:06 +02003017 /* drops pi_state->pi_mutex.wait_lock */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003018 ret = wake_futex_pi(uaddr, uval, pi_state);
3019
3020 put_pi_state(pi_state);
3021
3022 /*
3023 * Success, we're done! No tricky corner cases.
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02003024 */
3025 if (!ret)
3026 goto out_putkey;
Ingo Molnarc87e2832006-06-27 02:54:58 -07003027 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00003028 * The atomic access to the futex value generated a
3029 * pagefault, so retry the user-access and the wakeup:
Ingo Molnarc87e2832006-06-27 02:54:58 -07003030 */
3031 if (ret == -EFAULT)
3032 goto pi_faulted;
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02003033 /*
Sebastian Andrzej Siewior89e9e662016-04-15 14:35:39 +02003034 * A unconditional UNLOCK_PI op raced against a waiter
3035 * setting the FUTEX_WAITERS bit. Try again.
3036 */
3037 if (ret == -EAGAIN) {
Sebastian Andrzej Siewior89e9e662016-04-15 14:35:39 +02003038 put_futex_key(&key);
3039 goto retry;
3040 }
3041 /*
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02003042 * wake_futex_pi has detected invalid state. Tell user
3043 * space.
3044 */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003045 goto out_putkey;
Ingo Molnarc87e2832006-06-27 02:54:58 -07003046 }
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00003047
Ingo Molnarc87e2832006-06-27 02:54:58 -07003048 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00003049 * We have no kernel internal state, i.e. no waiters in the
3050 * kernel. Waiters which are about to queue themselves are stuck
3051 * on hb->lock. So we can safely ignore them. We do neither
3052 * preserve the WAITERS bit not the OWNER_DIED one. We are the
3053 * owner.
Ingo Molnarc87e2832006-06-27 02:54:58 -07003054 */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003055 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
3056 spin_unlock(&hb->lock);
Thomas Gleixner13fbca42014-06-03 12:27:07 +00003057 goto pi_faulted;
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003058 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07003059
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00003060 /*
3061 * If uval has changed, let user space handle it.
3062 */
3063 ret = (curval == uval) ? 0 : -EAGAIN;
3064
Ingo Molnarc87e2832006-06-27 02:54:58 -07003065out_unlock:
3066 spin_unlock(&hb->lock);
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02003067out_putkey:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003068 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003069 return ret;
3070
3071pi_faulted:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003072 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003073
Thomas Gleixnerd0725992009-06-11 23:15:43 +02003074 ret = fault_in_user_writeable(uaddr);
Darren Hartb5686362008-12-18 15:06:34 -08003075 if (!ret)
Ingo Molnarc87e2832006-06-27 02:54:58 -07003076 goto retry;
3077
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 return ret;
3079}
3080
Darren Hart52400ba2009-04-03 13:40:49 -07003081/**
3082 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
3083 * @hb: the hash_bucket futex_q was original enqueued on
3084 * @q: the futex_q woken while waiting to be requeued
3085 * @key2: the futex_key of the requeue target futex
3086 * @timeout: the timeout associated with the wait (NULL if none)
3087 *
3088 * Detect if the task was woken on the initial futex as opposed to the requeue
3089 * target futex. If so, determine if it was a timeout or a signal that caused
3090 * the wakeup and return the appropriate error code to the caller. Must be
3091 * called with the hb lock held.
3092 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08003093 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03003094 * - 0 = no early wakeup detected;
3095 * - <0 = -ETIMEDOUT or -ERESTARTNOINTR
Darren Hart52400ba2009-04-03 13:40:49 -07003096 */
3097static inline
3098int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
3099 struct futex_q *q, union futex_key *key2,
3100 struct hrtimer_sleeper *timeout)
3101{
3102 int ret = 0;
3103
3104 /*
3105 * With the hb lock held, we avoid races while we process the wakeup.
3106 * We only need to hold hb (and not hb2) to ensure atomicity as the
3107 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
3108 * It can't be requeued from uaddr2 to something else since we don't
3109 * support a PI aware source futex for requeue.
3110 */
3111 if (!match_futex(&q->key, key2)) {
3112 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
3113 /*
3114 * We were woken prior to requeue by a timeout or a signal.
3115 * Unqueue the futex_q and determine which it was.
3116 */
Lai Jiangshan2e129782010-12-22 14:18:50 +08003117 plist_del(&q->list, &hb->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07003118 hb_waiters_dec(hb);
Darren Hart52400ba2009-04-03 13:40:49 -07003119
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02003120 /* Handle spurious wakeups gracefully */
Thomas Gleixner11df6dd2009-10-28 20:26:48 +01003121 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07003122 if (timeout && !timeout->task)
3123 ret = -ETIMEDOUT;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02003124 else if (signal_pending(current))
Thomas Gleixner1c840c12009-05-20 09:22:40 +02003125 ret = -ERESTARTNOINTR;
Darren Hart52400ba2009-04-03 13:40:49 -07003126 }
3127 return ret;
3128}
3129
3130/**
3131 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
Darren Hart56ec1602009-09-21 22:29:59 -07003132 * @uaddr: the futex we initially wait on (non-pi)
Darren Hartb41277d2010-11-08 13:10:09 -08003133 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07003134 * the same type, no requeueing from private to shared, etc.
Darren Hart52400ba2009-04-03 13:40:49 -07003135 * @val: the expected value of uaddr
3136 * @abs_time: absolute timeout
Darren Hart56ec1602009-09-21 22:29:59 -07003137 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
Darren Hart52400ba2009-04-03 13:40:49 -07003138 * @uaddr2: the pi futex we will take prior to returning to user-space
3139 *
3140 * The caller will wait on uaddr and will be requeued by futex_requeue() to
Darren Hart6f7b0a22012-07-20 11:53:31 -07003141 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
3142 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
3143 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
3144 * without one, the pi logic would not know which task to boost/deboost, if
3145 * there was a need to.
Darren Hart52400ba2009-04-03 13:40:49 -07003146 *
3147 * We call schedule in futex_wait_queue_me() when we enqueue and return there
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08003148 * via the following--
Darren Hart52400ba2009-04-03 13:40:49 -07003149 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
Darren Hartcc6db4e2009-07-31 16:20:10 -07003150 * 2) wakeup on uaddr2 after a requeue
3151 * 3) signal
3152 * 4) timeout
Darren Hart52400ba2009-04-03 13:40:49 -07003153 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07003154 * If 3, cleanup and return -ERESTARTNOINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07003155 *
3156 * If 2, we may then block on trying to take the rt_mutex and return via:
3157 * 5) successful lock
3158 * 6) signal
3159 * 7) timeout
3160 * 8) other lock acquisition failure
3161 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07003162 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
Darren Hart52400ba2009-04-03 13:40:49 -07003163 *
3164 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
3165 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08003166 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03003167 * - 0 - On success;
3168 * - <0 - On error
Darren Hart52400ba2009-04-03 13:40:49 -07003169 */
Darren Hartb41277d2010-11-08 13:10:09 -08003170static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
Darren Hart52400ba2009-04-03 13:40:49 -07003171 u32 val, ktime_t *abs_time, u32 bitset,
Darren Hartb41277d2010-11-08 13:10:09 -08003172 u32 __user *uaddr2)
Darren Hart52400ba2009-04-03 13:40:49 -07003173{
3174 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003175 struct futex_pi_state *pi_state = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07003176 struct rt_mutex_waiter rt_waiter;
Darren Hart52400ba2009-04-03 13:40:49 -07003177 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08003178 union futex_key key2 = FUTEX_KEY_INIT;
3179 struct futex_q q = futex_q_init;
Darren Hart52400ba2009-04-03 13:40:49 -07003180 int res, ret;
Darren Hart52400ba2009-04-03 13:40:49 -07003181
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -04003182 if (!IS_ENABLED(CONFIG_FUTEX_PI))
3183 return -ENOSYS;
3184
Darren Hart6f7b0a22012-07-20 11:53:31 -07003185 if (uaddr == uaddr2)
3186 return -EINVAL;
3187
Darren Hart52400ba2009-04-03 13:40:49 -07003188 if (!bitset)
3189 return -EINVAL;
3190
3191 if (abs_time) {
3192 to = &timeout;
Darren Hartb41277d2010-11-08 13:10:09 -08003193 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
3194 CLOCK_REALTIME : CLOCK_MONOTONIC,
3195 HRTIMER_MODE_ABS);
Darren Hart52400ba2009-04-03 13:40:49 -07003196 hrtimer_init_sleeper(to, current);
3197 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
3198 current->timer_slack_ns);
3199 }
3200
3201 /*
3202 * The waiter is allocated on our stack, manipulated by the requeue
3203 * code while we sleep on uaddr.
3204 */
Peter Zijlstra50809352017-03-22 11:35:56 +01003205 rt_mutex_init_waiter(&rt_waiter);
Darren Hart52400ba2009-04-03 13:40:49 -07003206
Linus Torvalds96d4f262019-01-03 18:57:57 -08003207 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
Darren Hart52400ba2009-04-03 13:40:49 -07003208 if (unlikely(ret != 0))
3209 goto out;
3210
Darren Hart84bc4af2009-08-13 17:36:53 -07003211 q.bitset = bitset;
3212 q.rt_waiter = &rt_waiter;
3213 q.requeue_pi_key = &key2;
3214
Darren Hart7ada8762010-10-17 08:35:04 -07003215 /*
3216 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3217 * count.
3218 */
Darren Hartb41277d2010-11-08 13:10:09 -08003219 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02003220 if (ret)
3221 goto out_key2;
Darren Hart52400ba2009-04-03 13:40:49 -07003222
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00003223 /*
3224 * The check above which compares uaddrs is not sufficient for
3225 * shared futexes. We need to compare the keys:
3226 */
3227 if (match_futex(&q.key, &key2)) {
Thomas Gleixner13c42c22014-09-11 23:44:35 +02003228 queue_unlock(hb);
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00003229 ret = -EINVAL;
3230 goto out_put_keys;
3231 }
3232
Darren Hart52400ba2009-04-03 13:40:49 -07003233 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02003234 futex_wait_queue_me(hb, &q, to);
Darren Hart52400ba2009-04-03 13:40:49 -07003235
3236 spin_lock(&hb->lock);
3237 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
3238 spin_unlock(&hb->lock);
3239 if (ret)
3240 goto out_put_keys;
3241
3242 /*
3243 * In order for us to be here, we know our q.key == key2, and since
3244 * we took the hb->lock above, we also know that futex_requeue() has
3245 * completed and we no longer have to concern ourselves with a wakeup
Darren Hart7ada8762010-10-17 08:35:04 -07003246 * race with the atomic proxy lock acquisition by the requeue code. The
3247 * futex_requeue dropped our key1 reference and incremented our key2
3248 * reference count.
Darren Hart52400ba2009-04-03 13:40:49 -07003249 */
3250
3251 /* Check if the requeue code acquired the second futex for us. */
3252 if (!q.rt_waiter) {
3253 /*
3254 * Got the lock. We might not be the anticipated owner if we
3255 * did a lock-steal - fix up the PI-state in that case.
3256 */
3257 if (q.pi_state && (q.pi_state->owner != current)) {
3258 spin_lock(q.lock_ptr);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003259 ret = fixup_pi_state_owner(uaddr2, &q, current);
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003260 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3261 pi_state = q.pi_state;
3262 get_pi_state(pi_state);
3263 }
Thomas Gleixnerfb75a422015-12-19 20:07:38 +00003264 /*
3265 * Drop the reference to the pi state which
3266 * the requeue_pi() code acquired for us.
3267 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00003268 put_pi_state(q.pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07003269 spin_unlock(q.lock_ptr);
3270 }
3271 } else {
Peter Zijlstrac236c8e2017-03-04 10:27:18 +01003272 struct rt_mutex *pi_mutex;
3273
Darren Hart52400ba2009-04-03 13:40:49 -07003274 /*
3275 * We have been woken up by futex_unlock_pi(), a timeout, or a
3276 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3277 * the pi_state.
3278 */
Darren Hartf27071c2012-07-20 11:53:30 -07003279 WARN_ON(!q.pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07003280 pi_mutex = &q.pi_state->pi_mutex;
Peter Zijlstra38d589f2017-03-22 11:35:57 +01003281 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
Darren Hart52400ba2009-04-03 13:40:49 -07003282
3283 spin_lock(q.lock_ptr);
Peter Zijlstra38d589f2017-03-22 11:35:57 +01003284 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
3285 ret = 0;
3286
3287 debug_rt_mutex_free_waiter(&rt_waiter);
Darren Hart52400ba2009-04-03 13:40:49 -07003288 /*
3289 * Fixup the pi_state owner and possibly acquire the lock if we
3290 * haven't already.
3291 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003292 res = fixup_owner(uaddr2, &q, !ret);
Darren Hart52400ba2009-04-03 13:40:49 -07003293 /*
3294 * If fixup_owner() returned an error, proprogate that. If it
Darren Hart56ec1602009-09-21 22:29:59 -07003295 * acquired the lock, clear -ETIMEDOUT or -EINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07003296 */
3297 if (res)
3298 ret = (res < 0) ? res : 0;
3299
Peter Zijlstrac236c8e2017-03-04 10:27:18 +01003300 /*
3301 * If fixup_pi_state_owner() faulted and was unable to handle
3302 * the fault, unlock the rt_mutex and return the fault to
3303 * userspace.
3304 */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003305 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3306 pi_state = q.pi_state;
3307 get_pi_state(pi_state);
3308 }
Peter Zijlstrac236c8e2017-03-04 10:27:18 +01003309
Darren Hart52400ba2009-04-03 13:40:49 -07003310 /* Unqueue and drop the lock. */
3311 unqueue_me_pi(&q);
3312 }
3313
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003314 if (pi_state) {
3315 rt_mutex_futex_unlock(&pi_state->pi_mutex);
3316 put_pi_state(pi_state);
3317 }
3318
Peter Zijlstrac236c8e2017-03-04 10:27:18 +01003319 if (ret == -EINTR) {
Darren Hart52400ba2009-04-03 13:40:49 -07003320 /*
Darren Hartcc6db4e2009-07-31 16:20:10 -07003321 * We've already been requeued, but cannot restart by calling
3322 * futex_lock_pi() directly. We could restart this syscall, but
3323 * it would detect that the user space "val" changed and return
3324 * -EWOULDBLOCK. Save the overhead of the restart and return
3325 * -EWOULDBLOCK directly.
Darren Hart52400ba2009-04-03 13:40:49 -07003326 */
Thomas Gleixner20708872009-05-19 23:04:59 +02003327 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07003328 }
3329
3330out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003331 put_futex_key(&q.key);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02003332out_key2:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003333 put_futex_key(&key2);
Darren Hart52400ba2009-04-03 13:40:49 -07003334
3335out:
3336 if (to) {
3337 hrtimer_cancel(&to->timer);
3338 destroy_hrtimer_on_stack(&to->timer);
3339 }
3340 return ret;
3341}
3342
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003343/*
3344 * Support for robust futexes: the kernel cleans up held futexes at
3345 * thread exit time.
3346 *
3347 * Implementation: user-space maintains a per-thread list of locks it
3348 * is holding. Upon do_exit(), the kernel carefully walks this list,
3349 * and marks all locks that are owned by this thread with the
Ingo Molnarc87e2832006-06-27 02:54:58 -07003350 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003351 * always manipulated with the lock held, so the list is private and
3352 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3353 * field, to allow the kernel to clean up if the thread dies after
3354 * acquiring the lock, but just before it could have added itself to
3355 * the list. There can only be one such pending lock.
3356 */
3357
3358/**
Darren Hartd96ee562009-09-21 22:30:22 -07003359 * sys_set_robust_list() - Set the robust-futex list head of a task
3360 * @head: pointer to the list-head
3361 * @len: length of the list-head, as userspace expects
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003362 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01003363SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3364 size_t, len)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003365{
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003366 if (!futex_cmpxchg_enabled)
3367 return -ENOSYS;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003368 /*
3369 * The kernel knows only one size for now:
3370 */
3371 if (unlikely(len != sizeof(*head)))
3372 return -EINVAL;
3373
3374 current->robust_list = head;
3375
3376 return 0;
3377}
3378
3379/**
Darren Hartd96ee562009-09-21 22:30:22 -07003380 * sys_get_robust_list() - Get the robust-futex list head of a task
3381 * @pid: pid of the process [zero for current task]
3382 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3383 * @len_ptr: pointer to a length field, the kernel fills in the header size
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003384 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01003385SYSCALL_DEFINE3(get_robust_list, int, pid,
3386 struct robust_list_head __user * __user *, head_ptr,
3387 size_t __user *, len_ptr)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003388{
Al Viroba46df92006-10-10 22:46:07 +01003389 struct robust_list_head __user *head;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003390 unsigned long ret;
Kees Cookbdbb7762012-03-19 16:12:53 -07003391 struct task_struct *p;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003392
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003393 if (!futex_cmpxchg_enabled)
3394 return -ENOSYS;
3395
Kees Cookbdbb7762012-03-19 16:12:53 -07003396 rcu_read_lock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003397
Kees Cookbdbb7762012-03-19 16:12:53 -07003398 ret = -ESRCH;
3399 if (!pid)
3400 p = current;
3401 else {
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003402 p = find_task_by_vpid(pid);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003403 if (!p)
3404 goto err_unlock;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003405 }
3406
Kees Cookbdbb7762012-03-19 16:12:53 -07003407 ret = -EPERM;
Jann Horncaaee622016-01-20 15:00:04 -08003408 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
Kees Cookbdbb7762012-03-19 16:12:53 -07003409 goto err_unlock;
3410
3411 head = p->robust_list;
3412 rcu_read_unlock();
3413
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003414 if (put_user(sizeof(*head), len_ptr))
3415 return -EFAULT;
3416 return put_user(head, head_ptr);
3417
3418err_unlock:
Oleg Nesterovaaa2a972006-09-29 02:00:55 -07003419 rcu_read_unlock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003420
3421 return ret;
3422}
3423
3424/*
3425 * Process a futex-list entry, check whether it's owned by the
3426 * dying task, and do notification if so:
3427 */
Arnd Bergmann04e77122018-04-17 16:31:07 +02003428static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003429{
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03003430 u32 uval, uninitialized_var(nval), mval;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003431
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08003432retry:
3433 if (get_user(uval, uaddr))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003434 return -1;
3435
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003436 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003437 /*
3438 * Ok, this dying thread is truly holding a futex
3439 * of interest. Set the OWNER_DIED bit atomically
3440 * via cmpxchg, and if the value had FUTEX_WAITERS
3441 * set, wake up a waiter (if any). (We have to do a
3442 * futex_wake() even if OWNER_DIED is already set -
3443 * to handle the rare but possible case of recursive
3444 * thread-death.) The rest of the cleanup is done in
3445 * userspace.
3446 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003447 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
Thomas Gleixner6e0aa9f2011-03-14 10:34:35 +01003448 /*
3449 * We are not holding a lock here, but we want to have
3450 * the pagefault_disable/enable() protection because
3451 * we want to handle the fault gracefully. If the
3452 * access fails we try to fault in the futex with R/W
3453 * verification via get_user_pages. get_user() above
3454 * does not guarantee R/W access. If that fails we
3455 * give up and leave the futex locked.
3456 */
3457 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3458 if (fault_in_user_writeable(uaddr))
3459 return -1;
3460 goto retry;
3461 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07003462 if (nval != uval)
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08003463 goto retry;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003464
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003465 /*
3466 * Wake robust non-PI futexes here. The wakeup of
3467 * PI futexes happens in exit_pi_state():
3468 */
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07003469 if (!pi && (uval & FUTEX_WAITERS))
Peter Zijlstrac2f9f202008-09-26 19:32:23 +02003470 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003471 }
3472 return 0;
3473}
3474
3475/*
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003476 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3477 */
3478static inline int fetch_robust_entry(struct robust_list __user **entry,
Al Viroba46df92006-10-10 22:46:07 +01003479 struct robust_list __user * __user *head,
Namhyung Kim1dcc41b2010-09-14 21:43:46 +09003480 unsigned int *pi)
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003481{
3482 unsigned long uentry;
3483
Al Viroba46df92006-10-10 22:46:07 +01003484 if (get_user(uentry, (unsigned long __user *)head))
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003485 return -EFAULT;
3486
Al Viroba46df92006-10-10 22:46:07 +01003487 *entry = (void __user *)(uentry & ~1UL);
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003488 *pi = uentry & 1;
3489
3490 return 0;
3491}
3492
3493/*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003494 * Walk curr->robust_list (very carefully, it's a userspace list!)
3495 * and mark any locks found there dead, and notify any waiters.
3496 *
3497 * We silently return on any sign of list-walking problem.
3498 */
3499void exit_robust_list(struct task_struct *curr)
3500{
3501 struct robust_list_head __user *head = curr->robust_list;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003502 struct robust_list __user *entry, *next_entry, *pending;
Darren Hart4c115e92010-11-04 15:00:00 -04003503 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3504 unsigned int uninitialized_var(next_pi);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003505 unsigned long futex_offset;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003506 int rc;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003507
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003508 if (!futex_cmpxchg_enabled)
3509 return;
3510
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003511 /*
3512 * Fetch the list head (which was registered earlier, via
3513 * sys_set_robust_list()):
3514 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003515 if (fetch_robust_entry(&entry, &head->list.next, &pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003516 return;
3517 /*
3518 * Fetch the relative futex offset:
3519 */
3520 if (get_user(futex_offset, &head->futex_offset))
3521 return;
3522 /*
3523 * Fetch any possibly pending lock-add first, and handle it
3524 * if it exists:
3525 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003526 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003527 return;
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003528
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003529 next_entry = NULL; /* avoid warning with gcc */
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003530 while (entry != &head->list) {
3531 /*
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003532 * Fetch the next entry in the list before calling
3533 * handle_futex_death:
3534 */
3535 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3536 /*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003537 * A pending lock might already be on the list, so
Ingo Molnarc87e2832006-06-27 02:54:58 -07003538 * don't process it twice:
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003539 */
3540 if (entry != pending)
Al Viroba46df92006-10-10 22:46:07 +01003541 if (handle_futex_death((void __user *)entry + futex_offset,
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003542 curr, pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003543 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003544 if (rc)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003545 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003546 entry = next_entry;
3547 pi = next_pi;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003548 /*
3549 * Avoid excessively long or circular lists:
3550 */
3551 if (!--limit)
3552 break;
3553
3554 cond_resched();
3555 }
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003556
3557 if (pending)
3558 handle_futex_death((void __user *)pending + futex_offset,
3559 curr, pip);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003560}
3561
Pierre Peifferc19384b2007-05-09 02:35:02 -07003562long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
Ingo Molnare2970f22006-06-27 02:54:47 -07003563 u32 __user *uaddr2, u32 val2, u32 val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564{
Thomas Gleixner81b40532012-02-15 12:17:09 +01003565 int cmd = op & FUTEX_CMD_MASK;
Darren Hartb41277d2010-11-08 13:10:09 -08003566 unsigned int flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003568 if (!(op & FUTEX_PRIVATE_FLAG))
Darren Hartb41277d2010-11-08 13:10:09 -08003569 flags |= FLAGS_SHARED;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003570
Darren Hartb41277d2010-11-08 13:10:09 -08003571 if (op & FUTEX_CLOCK_REALTIME) {
3572 flags |= FLAGS_CLOCKRT;
Darren Hart337f1302015-12-18 13:36:37 -08003573 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3574 cmd != FUTEX_WAIT_REQUEUE_PI)
Darren Hartb41277d2010-11-08 13:10:09 -08003575 return -ENOSYS;
3576 }
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003577
3578 switch (cmd) {
Thomas Gleixner59263b52012-02-15 12:08:34 +01003579 case FUTEX_LOCK_PI:
3580 case FUTEX_UNLOCK_PI:
3581 case FUTEX_TRYLOCK_PI:
3582 case FUTEX_WAIT_REQUEUE_PI:
3583 case FUTEX_CMP_REQUEUE_PI:
3584 if (!futex_cmpxchg_enabled)
3585 return -ENOSYS;
3586 }
3587
3588 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 case FUTEX_WAIT:
Thomas Gleixnercd689982008-02-01 17:45:14 +01003590 val3 = FUTEX_BITSET_MATCH_ANY;
Gustavo A. R. Silvab6391862018-08-16 12:21:24 -05003591 /* fall through */
Thomas Gleixnercd689982008-02-01 17:45:14 +01003592 case FUTEX_WAIT_BITSET:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003593 return futex_wait(uaddr, flags, val, timeout, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 case FUTEX_WAKE:
Thomas Gleixnercd689982008-02-01 17:45:14 +01003595 val3 = FUTEX_BITSET_MATCH_ANY;
Gustavo A. R. Silvab6391862018-08-16 12:21:24 -05003596 /* fall through */
Thomas Gleixnercd689982008-02-01 17:45:14 +01003597 case FUTEX_WAKE_BITSET:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003598 return futex_wake(uaddr, flags, val, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599 case FUTEX_REQUEUE:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003600 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 case FUTEX_CMP_REQUEUE:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003602 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07003603 case FUTEX_WAKE_OP:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003604 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003605 case FUTEX_LOCK_PI:
Michael Kerrisk996636d2015-01-16 20:28:06 +01003606 return futex_lock_pi(uaddr, flags, timeout, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003607 case FUTEX_UNLOCK_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003608 return futex_unlock_pi(uaddr, flags);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003609 case FUTEX_TRYLOCK_PI:
Michael Kerrisk996636d2015-01-16 20:28:06 +01003610 return futex_lock_pi(uaddr, flags, NULL, 1);
Darren Hart52400ba2009-04-03 13:40:49 -07003611 case FUTEX_WAIT_REQUEUE_PI:
3612 val3 = FUTEX_BITSET_MATCH_ANY;
Thomas Gleixner81b40532012-02-15 12:17:09 +01003613 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3614 uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07003615 case FUTEX_CMP_REQUEUE_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003616 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003617 }
Thomas Gleixner81b40532012-02-15 12:17:09 +01003618 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619}
3620
3621
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003622SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003623 struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003624 u32, val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625{
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003626 struct timespec64 ts;
Pierre Peifferc19384b2007-05-09 02:35:02 -07003627 ktime_t t, *tp = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07003628 u32 val2 = 0;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003629 int cmd = op & FUTEX_CMD_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630
Thomas Gleixnercd689982008-02-01 17:45:14 +01003631 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
Darren Hart52400ba2009-04-03 13:40:49 -07003632 cmd == FUTEX_WAIT_BITSET ||
3633 cmd == FUTEX_WAIT_REQUEUE_PI)) {
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07003634 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3635 return -EFAULT;
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003636 if (get_timespec64(&ts, utime))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 return -EFAULT;
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003638 if (!timespec64_valid(&ts))
Thomas Gleixner9741ef962006-03-31 02:31:32 -08003639 return -EINVAL;
Pierre Peifferc19384b2007-05-09 02:35:02 -07003640
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003641 t = timespec64_to_ktime(ts);
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003642 if (cmd == FUTEX_WAIT)
Thomas Gleixner5a7780e2008-02-13 09:20:43 +01003643 t = ktime_add_safe(ktime_get(), t);
Pierre Peifferc19384b2007-05-09 02:35:02 -07003644 tp = &t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 }
3646 /*
Darren Hart52400ba2009-04-03 13:40:49 -07003647 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
Andreas Schwabf54f0982007-07-31 00:38:51 -07003648 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649 */
Andreas Schwabf54f0982007-07-31 00:38:51 -07003650 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
Darren Hartba9c22f2009-04-20 22:22:22 -07003651 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
Ingo Molnare2970f22006-06-27 02:54:47 -07003652 val2 = (u32) (unsigned long) utime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653
Pierre Peifferc19384b2007-05-09 02:35:02 -07003654 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655}
3656
Arnd Bergmann04e77122018-04-17 16:31:07 +02003657#ifdef CONFIG_COMPAT
3658/*
3659 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3660 */
3661static inline int
3662compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
3663 compat_uptr_t __user *head, unsigned int *pi)
3664{
3665 if (get_user(*uentry, head))
3666 return -EFAULT;
3667
3668 *entry = compat_ptr((*uentry) & ~1);
3669 *pi = (unsigned int)(*uentry) & 1;
3670
3671 return 0;
3672}
3673
3674static void __user *futex_uaddr(struct robust_list __user *entry,
3675 compat_long_t futex_offset)
3676{
3677 compat_uptr_t base = ptr_to_compat(entry);
3678 void __user *uaddr = compat_ptr(base + futex_offset);
3679
3680 return uaddr;
3681}
3682
3683/*
3684 * Walk curr->robust_list (very carefully, it's a userspace list!)
3685 * and mark any locks found there dead, and notify any waiters.
3686 *
3687 * We silently return on any sign of list-walking problem.
3688 */
3689void compat_exit_robust_list(struct task_struct *curr)
3690{
3691 struct compat_robust_list_head __user *head = curr->compat_robust_list;
3692 struct robust_list __user *entry, *next_entry, *pending;
3693 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3694 unsigned int uninitialized_var(next_pi);
3695 compat_uptr_t uentry, next_uentry, upending;
3696 compat_long_t futex_offset;
3697 int rc;
3698
3699 if (!futex_cmpxchg_enabled)
3700 return;
3701
3702 /*
3703 * Fetch the list head (which was registered earlier, via
3704 * sys_set_robust_list()):
3705 */
3706 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
3707 return;
3708 /*
3709 * Fetch the relative futex offset:
3710 */
3711 if (get_user(futex_offset, &head->futex_offset))
3712 return;
3713 /*
3714 * Fetch any possibly pending lock-add first, and handle it
3715 * if it exists:
3716 */
3717 if (compat_fetch_robust_entry(&upending, &pending,
3718 &head->list_op_pending, &pip))
3719 return;
3720
3721 next_entry = NULL; /* avoid warning with gcc */
3722 while (entry != (struct robust_list __user *) &head->list) {
3723 /*
3724 * Fetch the next entry in the list before calling
3725 * handle_futex_death:
3726 */
3727 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
3728 (compat_uptr_t __user *)&entry->next, &next_pi);
3729 /*
3730 * A pending lock might already be on the list, so
3731 * dont process it twice:
3732 */
3733 if (entry != pending) {
3734 void __user *uaddr = futex_uaddr(entry, futex_offset);
3735
3736 if (handle_futex_death(uaddr, curr, pi))
3737 return;
3738 }
3739 if (rc)
3740 return;
3741 uentry = next_uentry;
3742 entry = next_entry;
3743 pi = next_pi;
3744 /*
3745 * Avoid excessively long or circular lists:
3746 */
3747 if (!--limit)
3748 break;
3749
3750 cond_resched();
3751 }
3752 if (pending) {
3753 void __user *uaddr = futex_uaddr(pending, futex_offset);
3754
3755 handle_futex_death(uaddr, curr, pip);
3756 }
3757}
3758
3759COMPAT_SYSCALL_DEFINE2(set_robust_list,
3760 struct compat_robust_list_head __user *, head,
3761 compat_size_t, len)
3762{
3763 if (!futex_cmpxchg_enabled)
3764 return -ENOSYS;
3765
3766 if (unlikely(len != sizeof(*head)))
3767 return -EINVAL;
3768
3769 current->compat_robust_list = head;
3770
3771 return 0;
3772}
3773
3774COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
3775 compat_uptr_t __user *, head_ptr,
3776 compat_size_t __user *, len_ptr)
3777{
3778 struct compat_robust_list_head __user *head;
3779 unsigned long ret;
3780 struct task_struct *p;
3781
3782 if (!futex_cmpxchg_enabled)
3783 return -ENOSYS;
3784
3785 rcu_read_lock();
3786
3787 ret = -ESRCH;
3788 if (!pid)
3789 p = current;
3790 else {
3791 p = find_task_by_vpid(pid);
3792 if (!p)
3793 goto err_unlock;
3794 }
3795
3796 ret = -EPERM;
3797 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
3798 goto err_unlock;
3799
3800 head = p->compat_robust_list;
3801 rcu_read_unlock();
3802
3803 if (put_user(sizeof(*head), len_ptr))
3804 return -EFAULT;
3805 return put_user(ptr_to_compat(head), head_ptr);
3806
3807err_unlock:
3808 rcu_read_unlock();
3809
3810 return ret;
3811}
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003812#endif /* CONFIG_COMPAT */
Arnd Bergmann04e77122018-04-17 16:31:07 +02003813
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003814#ifdef CONFIG_COMPAT_32BIT_TIME
Arnd Bergmann8dabe722019-01-07 00:33:08 +01003815SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val,
Arnd Bergmann04e77122018-04-17 16:31:07 +02003816 struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
3817 u32, val3)
3818{
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003819 struct timespec64 ts;
Arnd Bergmann04e77122018-04-17 16:31:07 +02003820 ktime_t t, *tp = NULL;
3821 int val2 = 0;
3822 int cmd = op & FUTEX_CMD_MASK;
3823
3824 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
3825 cmd == FUTEX_WAIT_BITSET ||
3826 cmd == FUTEX_WAIT_REQUEUE_PI)) {
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003827 if (get_old_timespec32(&ts, utime))
Arnd Bergmann04e77122018-04-17 16:31:07 +02003828 return -EFAULT;
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003829 if (!timespec64_valid(&ts))
Arnd Bergmann04e77122018-04-17 16:31:07 +02003830 return -EINVAL;
3831
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003832 t = timespec64_to_ktime(ts);
Arnd Bergmann04e77122018-04-17 16:31:07 +02003833 if (cmd == FUTEX_WAIT)
3834 t = ktime_add_safe(ktime_get(), t);
3835 tp = &t;
3836 }
3837 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
3838 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
3839 val2 = (int) (unsigned long) utime;
3840
3841 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
3842}
Arnd Bergmannbec2f7c2018-04-17 17:23:35 +02003843#endif /* CONFIG_COMPAT_32BIT_TIME */
Arnd Bergmann04e77122018-04-17 16:31:07 +02003844
Heiko Carstens03b8c7b2014-03-02 13:09:47 +01003845static void __init futex_detect_cmpxchg(void)
3846{
3847#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3848 u32 curval;
3849
3850 /*
3851 * This will fail and we want it. Some arch implementations do
3852 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3853 * functionality. We want to know that before we call in any
3854 * of the complex code paths. Also we want to prevent
3855 * registration of robust lists in that case. NULL is
3856 * guaranteed to fault and we get -EFAULT on functional
3857 * implementation, the non-functional ones will return
3858 * -ENOSYS.
3859 */
3860 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3861 futex_cmpxchg_enabled = 1;
3862#endif
3863}
3864
Benjamin Herrenschmidtf6d107f2008-03-27 14:52:15 +11003865static int __init futex_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866{
Heiko Carstens63b1a812014-01-16 14:54:50 +01003867 unsigned int futex_shift;
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -08003868 unsigned long i;
3869
3870#if CONFIG_BASE_SMALL
3871 futex_hashsize = 16;
3872#else
3873 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3874#endif
3875
3876 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3877 futex_hashsize, 0,
3878 futex_hashsize < 256 ? HASH_SMALL : 0,
Heiko Carstens63b1a812014-01-16 14:54:50 +01003879 &futex_shift, NULL,
3880 futex_hashsize, futex_hashsize);
3881 futex_hashsize = 1UL << futex_shift;
Heiko Carstens03b8c7b2014-03-02 13:09:47 +01003882
3883 futex_detect_cmpxchg();
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003884
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -08003885 for (i = 0; i < futex_hashsize; i++) {
Linus Torvalds11d46162014-03-20 22:11:17 -07003886 atomic_set(&futex_queues[i].waiters, 0);
Dima Zavin732375c2011-07-07 17:27:59 -07003887 plist_head_init(&futex_queues[i].chain);
Thomas Gleixner3e4ab742008-02-23 15:23:55 -08003888 spin_lock_init(&futex_queues[i].lock);
3889 }
3890
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 return 0;
3892}
Yang Yang25f71d12016-12-30 16:17:55 +08003893core_initcall(futex_init);