blob: a5d2e74c89e0b217df98326e5febf3caf687687c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
Ingo Molnar0771dfe2006-03-27 01:16:22 -080011 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
Ingo Molnarc87e2832006-06-27 02:54:58 -070015 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
Eric Dumazet34f01cc2007-05-09 02:35:04 -070019 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
Darren Hart52400ba2009-04-03 13:40:49 -070022 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070057#include <linux/signal.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040058#include <linux/export.h>
Andrey Mirkinfd5eea42007-10-16 23:30:13 -070059#include <linux/magic.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070060#include <linux/pid.h>
61#include <linux/nsproxy.h>
Kees Cookbdbb7762012-03-19 16:12:53 -070062#include <linux/ptrace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060063#include <linux/sched/rt.h>
Zhang Yi13d60f42013-06-25 21:19:31 +080064#include <linux/hugetlb.h>
Colin Cross88c80042013-05-01 18:35:05 -070065#include <linux/freezer.h>
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -080066#include <linux/bootmem.h>
Davidlohr Buesoab51fba2015-06-29 23:26:02 -070067#include <linux/fault-inject.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070068
Jakub Jelinek4732efb2005-09-06 15:16:25 -070069#include <asm/futex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Peter Zijlstra1696a8b2013-10-31 18:18:19 +010071#include "locking/rtmutex_common.h"
Ingo Molnarc87e2832006-06-27 02:54:58 -070072
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080073/*
Davidlohr Buesod7e8af12014-04-09 11:55:07 -070074 * READ this before attempting to hack on futexes!
75 *
76 * Basic futex operation and ordering guarantees
77 * =============================================
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080078 *
79 * The waiter reads the futex value in user space and calls
80 * futex_wait(). This function computes the hash bucket and acquires
81 * the hash bucket lock. After that it reads the futex user space value
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080082 * again and verifies that the data has not changed. If it has not changed
83 * it enqueues itself into the hash bucket, releases the hash bucket lock
84 * and schedules.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080085 *
86 * The waker side modifies the user space value of the futex and calls
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080087 * futex_wake(). This function computes the hash bucket and acquires the
88 * hash bucket lock. Then it looks for waiters on that futex in the hash
89 * bucket and wakes them.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080090 *
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080091 * In futex wake up scenarios where no tasks are blocked on a futex, taking
92 * the hb spinlock can be avoided and simply return. In order for this
93 * optimization to work, ordering guarantees must exist so that the waiter
94 * being added to the list is acknowledged when the list is concurrently being
95 * checked by the waker, avoiding scenarios like the following:
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080096 *
97 * CPU 0 CPU 1
98 * val = *futex;
99 * sys_futex(WAIT, futex, val);
100 * futex_wait(futex, val);
101 * uval = *futex;
102 * *futex = newval;
103 * sys_futex(WAKE, futex);
104 * futex_wake(futex);
105 * if (queue_empty())
106 * return;
107 * if (uval == val)
108 * lock(hash_bucket(futex));
109 * queue();
110 * unlock(hash_bucket(futex));
111 * schedule();
112 *
113 * This would cause the waiter on CPU 0 to wait forever because it
114 * missed the transition of the user space value from val to newval
115 * and the waker did not find the waiter in the hash bucket queue.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800116 *
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800117 * The correct serialization ensures that a waiter either observes
118 * the changed user space value before blocking or is woken by a
119 * concurrent waker:
120 *
121 * CPU 0 CPU 1
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800122 * val = *futex;
123 * sys_futex(WAIT, futex, val);
124 * futex_wait(futex, val);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800125 *
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700126 * waiters++; (a)
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800127 * smp_mb(); (A) <-- paired with -.
128 * |
129 * lock(hash_bucket(futex)); |
130 * |
131 * uval = *futex; |
132 * | *futex = newval;
133 * | sys_futex(WAKE, futex);
134 * | futex_wake(futex);
135 * |
136 * `--------> smp_mb(); (B)
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800137 * if (uval == val)
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800138 * queue();
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800139 * unlock(hash_bucket(futex));
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800140 * schedule(); if (waiters)
141 * lock(hash_bucket(futex));
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700142 * else wake_waiters(futex);
143 * waiters--; (b) unlock(hash_bucket(futex));
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800144 *
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700145 * Where (A) orders the waiters increment and the futex value read through
146 * atomic operations (see hb_waiters_inc) and where (B) orders the write
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700147 * to futex and the waiters read -- this is done by the barriers for both
148 * shared and private futexes in get_futex_key_refs().
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800149 *
150 * This yields the following case (where X:=waiters, Y:=futex):
151 *
152 * X = Y = 0
153 *
154 * w[X]=1 w[Y]=1
155 * MB MB
156 * r[Y]=y r[X]=x
157 *
158 * Which guarantees that x==0 && y==0 is impossible; which translates back into
159 * the guarantee that we cannot both miss the futex variable change and the
160 * enqueue.
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700161 *
162 * Note that a new waiter is accounted for in (a) even when it is possible that
163 * the wait call can return error, in which case we backtrack from it in (b).
164 * Refer to the comment in queue_lock().
165 *
166 * Similarly, in order to account for waiters being requeued on another
167 * address we always increment the waiters for the destination bucket before
168 * acquiring the lock. It then decrements them again after releasing it -
169 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
170 * will do the additional required waiter count housekeeping. This is done for
171 * double_lock_hb() and double_unlock_hb(), respectively.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800172 */
173
Heiko Carstens03b8c7b2014-03-02 13:09:47 +0100174#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800175int __read_mostly futex_cmpxchg_enabled;
Heiko Carstens03b8c7b2014-03-02 13:09:47 +0100176#endif
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178/*
Darren Hartb41277d2010-11-08 13:10:09 -0800179 * Futex flags used to encode options to functions and preserve them across
180 * restarts.
181 */
182#define FLAGS_SHARED 0x01
183#define FLAGS_CLOCKRT 0x02
184#define FLAGS_HAS_TIMEOUT 0x04
185
186/*
Ingo Molnarc87e2832006-06-27 02:54:58 -0700187 * Priority Inheritance state:
188 */
189struct futex_pi_state {
190 /*
191 * list of 'owned' pi_state instances - these have to be
192 * cleaned up in do_exit() if the task exits prematurely:
193 */
194 struct list_head list;
195
196 /*
197 * The PI object:
198 */
199 struct rt_mutex pi_mutex;
200
201 struct task_struct *owner;
202 atomic_t refcount;
203
204 union futex_key key;
205};
206
Darren Hartd8d88fb2009-09-21 22:30:30 -0700207/**
208 * struct futex_q - The hashed futex queue entry, one per waiting task
Randy Dunlapfb62db22010-10-13 11:02:34 -0700209 * @list: priority-sorted list of tasks waiting on this futex
Darren Hartd8d88fb2009-09-21 22:30:30 -0700210 * @task: the task waiting on the futex
211 * @lock_ptr: the hash bucket lock
212 * @key: the key the futex is hashed on
213 * @pi_state: optional priority inheritance state
214 * @rt_waiter: rt_waiter storage for use with requeue_pi
215 * @requeue_pi_key: the requeue_pi target futex key
216 * @bitset: bitset for the optional bitmasked wakeup
217 *
218 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 * we can wake only the relevant ones (hashed queues may be shared).
220 *
221 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
Pierre Peifferec92d082007-05-09 02:35:00 -0700222 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
Randy Dunlapfb62db22010-10-13 11:02:34 -0700223 * The order of wakeup is always to make the first condition true, then
Darren Hartd8d88fb2009-09-21 22:30:30 -0700224 * the second.
225 *
226 * PI futexes are typically woken before they are removed from the hash list via
227 * the rt_mutex code. See unqueue_me_pi().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 */
229struct futex_q {
Pierre Peifferec92d082007-05-09 02:35:00 -0700230 struct plist_node list;
Darren Hartd8d88fb2009-09-21 22:30:30 -0700231
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200232 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 spinlock_t *lock_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 union futex_key key;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700235 struct futex_pi_state *pi_state;
Darren Hart52400ba2009-04-03 13:40:49 -0700236 struct rt_mutex_waiter *rt_waiter;
Darren Hart84bc4af2009-08-13 17:36:53 -0700237 union futex_key *requeue_pi_key;
Thomas Gleixnercd689982008-02-01 17:45:14 +0100238 u32 bitset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239};
240
Darren Hart5bdb05f2010-11-08 13:40:28 -0800241static const struct futex_q futex_q_init = {
242 /* list gets initialized in queue_me()*/
243 .key = FUTEX_KEY_INIT,
244 .bitset = FUTEX_BITSET_MATCH_ANY
245};
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247/*
Darren Hartb2d09942009-03-12 00:55:37 -0700248 * Hash buckets are shared by all the futex_keys that hash to the same
249 * location. Each key may have multiple futex_q structures, one for each task
250 * waiting on a futex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 */
252struct futex_hash_bucket {
Linus Torvalds11d46162014-03-20 22:11:17 -0700253 atomic_t waiters;
Pierre Peifferec92d082007-05-09 02:35:00 -0700254 spinlock_t lock;
255 struct plist_head chain;
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800256} ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Rasmus Villemoesac742d32015-09-09 23:36:40 +0200258/*
259 * The base of the bucket array and its size are always used together
260 * (after initialization only in hash_futex()), so ensure that they
261 * reside in the same cacheline.
262 */
263static struct {
264 struct futex_hash_bucket *queues;
265 unsigned long hashsize;
266} __futex_data __read_mostly __aligned(2*sizeof(long));
267#define futex_queues (__futex_data.queues)
268#define futex_hashsize (__futex_data.hashsize)
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700271/*
272 * Fault injections for futexes.
273 */
274#ifdef CONFIG_FAIL_FUTEX
275
276static struct {
277 struct fault_attr attr;
278
Viresh Kumar621a5f72015-09-26 15:04:07 -0700279 bool ignore_private;
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700280} fail_futex = {
281 .attr = FAULT_ATTR_INITIALIZER,
Viresh Kumar621a5f72015-09-26 15:04:07 -0700282 .ignore_private = false,
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700283};
284
285static int __init setup_fail_futex(char *str)
286{
287 return setup_fault_attr(&fail_futex.attr, str);
288}
289__setup("fail_futex=", setup_fail_futex);
290
kbuild test robot5d285a72015-07-21 01:40:45 +0800291static bool should_fail_futex(bool fshared)
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700292{
293 if (fail_futex.ignore_private && !fshared)
294 return false;
295
296 return should_fail(&fail_futex.attr, 1);
297}
298
299#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
300
301static int __init fail_futex_debugfs(void)
302{
303 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
304 struct dentry *dir;
305
306 dir = fault_create_debugfs_attr("fail_futex", NULL,
307 &fail_futex.attr);
308 if (IS_ERR(dir))
309 return PTR_ERR(dir);
310
311 if (!debugfs_create_bool("ignore-private", mode, dir,
312 &fail_futex.ignore_private)) {
313 debugfs_remove_recursive(dir);
314 return -ENOMEM;
315 }
316
317 return 0;
318}
319
320late_initcall(fail_futex_debugfs);
321
322#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
323
324#else
325static inline bool should_fail_futex(bool fshared)
326{
327 return false;
328}
329#endif /* CONFIG_FAIL_FUTEX */
330
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800331static inline void futex_get_mm(union futex_key *key)
332{
333 atomic_inc(&key->private.mm->mm_count);
334 /*
335 * Ensure futex_get_mm() implies a full barrier such that
336 * get_futex_key() implies a full barrier. This is relied upon
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800337 * as smp_mb(); (B), see the ordering comment above.
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800338 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100339 smp_mb__after_atomic();
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800340}
341
Linus Torvalds11d46162014-03-20 22:11:17 -0700342/*
343 * Reflects a new waiter being added to the waitqueue.
344 */
345static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800346{
347#ifdef CONFIG_SMP
Linus Torvalds11d46162014-03-20 22:11:17 -0700348 atomic_inc(&hb->waiters);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800349 /*
Linus Torvalds11d46162014-03-20 22:11:17 -0700350 * Full barrier (A), see the ordering comment above.
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800351 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100352 smp_mb__after_atomic();
Linus Torvalds11d46162014-03-20 22:11:17 -0700353#endif
354}
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800355
Linus Torvalds11d46162014-03-20 22:11:17 -0700356/*
357 * Reflects a waiter being removed from the waitqueue by wakeup
358 * paths.
359 */
360static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
361{
362#ifdef CONFIG_SMP
363 atomic_dec(&hb->waiters);
364#endif
365}
366
367static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
368{
369#ifdef CONFIG_SMP
370 return atomic_read(&hb->waiters);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800371#else
Linus Torvalds11d46162014-03-20 22:11:17 -0700372 return 1;
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800373#endif
374}
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376/*
377 * We hash on the keys returned from get_futex_key (see below).
378 */
379static struct futex_hash_bucket *hash_futex(union futex_key *key)
380{
381 u32 hash = jhash2((u32*)&key->both.word,
382 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
383 key->both.offset);
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800384 return &futex_queues[hash & (futex_hashsize - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385}
386
387/*
388 * Return 1 if two futex_keys are equal, 0 otherwise.
389 */
390static inline int match_futex(union futex_key *key1, union futex_key *key2)
391{
Darren Hart2bc87202009-10-14 10:12:39 -0700392 return (key1 && key2
393 && key1->both.word == key2->both.word
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 && key1->both.ptr == key2->both.ptr
395 && key1->both.offset == key2->both.offset);
396}
397
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200398/*
399 * Take a reference to the resource addressed by a key.
400 * Can be called while holding spinlocks.
401 *
402 */
403static void get_futex_key_refs(union futex_key *key)
404{
405 if (!key->both.ptr)
406 return;
407
408 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
409 case FUT_OFF_INODE:
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800410 ihold(key->shared.inode); /* implies smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200411 break;
412 case FUT_OFF_MMSHARED:
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800413 futex_get_mm(key); /* implies smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200414 break;
Catalin Marinas76835b0e2014-10-17 17:38:49 +0100415 default:
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700416 /*
417 * Private futexes do not hold reference on an inode or
418 * mm, therefore the only purpose of calling get_futex_key_refs
419 * is because we need the barrier for the lockless waiter check.
420 */
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800421 smp_mb(); /* explicit smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200422 }
423}
424
425/*
426 * Drop a reference to the resource addressed by a key.
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700427 * The hash bucket spinlock must not be held. This is
428 * a no-op for private futexes, see comment in the get
429 * counterpart.
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200430 */
431static void drop_futex_key_refs(union futex_key *key)
432{
Darren Hart90621c42008-12-29 19:43:21 -0800433 if (!key->both.ptr) {
434 /* If we're here then we tried to put a key we failed to get */
435 WARN_ON_ONCE(1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200436 return;
Darren Hart90621c42008-12-29 19:43:21 -0800437 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200438
439 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
440 case FUT_OFF_INODE:
441 iput(key->shared.inode);
442 break;
443 case FUT_OFF_MMSHARED:
444 mmdrop(key->private.mm);
445 break;
446 }
447}
448
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700449/**
Darren Hartd96ee562009-09-21 22:30:22 -0700450 * get_futex_key() - Get parameters which are the keys for a futex
451 * @uaddr: virtual address of the futex
452 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
453 * @key: address where result is stored.
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500454 * @rw: mapping needs to be read/write (values: VERIFY_READ,
455 * VERIFY_WRITE)
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700456 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -0800457 * Return: a negative error code or 0
458 *
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700459 * The key words are stored in *key on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 *
Al Viro6131ffa2013-02-27 16:59:05 -0500461 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 * offset_within_page). For private mappings, it's (uaddr, current->mm).
463 * We can usually work out the index without swapping in the page.
464 *
Darren Hartb2d09942009-03-12 00:55:37 -0700465 * lock_page() might sleep, the caller should not hold a spinlock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 */
Thomas Gleixner64d13042009-05-18 21:20:10 +0200467static int
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500468get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469{
Ingo Molnare2970f22006-06-27 02:54:47 -0700470 unsigned long address = (unsigned long)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 struct mm_struct *mm = current->mm;
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800472 struct page *page;
473 struct address_space *mapping;
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500474 int err, ro = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476 /*
477 * The futex address must be "naturally" aligned.
478 */
Ingo Molnare2970f22006-06-27 02:54:47 -0700479 key->both.offset = address % PAGE_SIZE;
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700480 if (unlikely((address % sizeof(u32)) != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 return -EINVAL;
Ingo Molnare2970f22006-06-27 02:54:47 -0700482 address -= key->both.offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Linus Torvalds5cdec2d2013-12-12 09:53:51 -0800484 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
485 return -EFAULT;
486
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700487 if (unlikely(should_fail_futex(fshared)))
488 return -EFAULT;
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 /*
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700491 * PROCESS_PRIVATE futexes are fast.
492 * As the mm cannot disappear under us and the 'key' only needs
493 * virtual address, we dont even have to find the underlying vma.
494 * Note : We do have to check 'uaddr' is a valid user address,
495 * but access_ok() should be faster than find_vma()
496 */
497 if (!fshared) {
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700498 key->private.mm = mm;
499 key->private.address = address;
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800500 get_futex_key_refs(key); /* implies smp_mb(); (B) */
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700501 return 0;
502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200504again:
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700505 /* Ignore any VERIFY_READ mapping (futex common case) */
506 if (unlikely(should_fail_futex(fshared)))
507 return -EFAULT;
508
KOSAKI Motohiro7485d0d2010-01-05 16:32:43 +0900509 err = get_user_pages_fast(address, 1, 1, &page);
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500510 /*
511 * If write access is not required (eg. FUTEX_WAIT), try
512 * and get read-only access.
513 */
514 if (err == -EFAULT && rw == VERIFY_READ) {
515 err = get_user_pages_fast(address, 1, 0, &page);
516 ro = 1;
517 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200518 if (err < 0)
519 return err;
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500520 else
521 err = 0;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200522
Mel Gorman65d8fc72016-02-09 11:15:14 -0800523 /*
524 * The treatment of mapping from this point on is critical. The page
525 * lock protects many things but in this context the page lock
526 * stabilizes mapping, prevents inode freeing in the shared
527 * file-backed region case and guards against movement to swap cache.
528 *
529 * Strictly speaking the page lock is not needed in all cases being
530 * considered here and page lock forces unnecessarily serialization
531 * From this point on, mapping will be re-verified if necessary and
532 * page lock will be acquired only if it is unavoidable
533 */
534 page = compound_head(page);
535 mapping = READ_ONCE(page->mapping);
536
Hugh Dickinse6780f72011-12-31 11:44:01 -0800537 /*
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800538 * If page->mapping is NULL, then it cannot be a PageAnon
Hugh Dickinse6780f72011-12-31 11:44:01 -0800539 * page; but it might be the ZERO_PAGE or in the gate area or
540 * in a special mapping (all cases which we are happy to fail);
541 * or it may have been a good file page when get_user_pages_fast
542 * found it, but truncated or holepunched or subjected to
543 * invalidate_complete_page2 before we got the page lock (also
544 * cases which we are happy to fail). And we hold a reference,
545 * so refcount care in invalidate_complete_page's remove_mapping
546 * prevents drop_caches from setting mapping to NULL beneath us.
547 *
548 * The case we do have to guard against is when memory pressure made
549 * shmem_writepage move it from filecache to swapcache beneath us:
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800550 * an unlikely race, but we do need to retry for page->mapping.
Hugh Dickinse6780f72011-12-31 11:44:01 -0800551 */
Mel Gorman65d8fc72016-02-09 11:15:14 -0800552 if (unlikely(!mapping)) {
553 int shmem_swizzled;
554
555 /*
556 * Page lock is required to identify which special case above
557 * applies. If this is really a shmem page then the page lock
558 * will prevent unexpected transitions.
559 */
560 lock_page(page);
561 shmem_swizzled = PageSwapCache(page) || page->mapping;
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800562 unlock_page(page);
563 put_page(page);
Mel Gorman65d8fc72016-02-09 11:15:14 -0800564
Hugh Dickinse6780f72011-12-31 11:44:01 -0800565 if (shmem_swizzled)
566 goto again;
Mel Gorman65d8fc72016-02-09 11:15:14 -0800567
Hugh Dickinse6780f72011-12-31 11:44:01 -0800568 return -EFAULT;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
571 /*
572 * Private mappings are handled in a simple way.
573 *
Mel Gorman65d8fc72016-02-09 11:15:14 -0800574 * If the futex key is stored on an anonymous page, then the associated
575 * object is the mm which is implicitly pinned by the calling process.
576 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
578 * it's a read-only handle, it's expected that futexes attach to
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200579 * the object not the particular process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 */
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800581 if (PageAnon(page)) {
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500582 /*
583 * A RO anonymous page will never change and thus doesn't make
584 * sense for futex operations.
585 */
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700586 if (unlikely(should_fail_futex(fshared)) || ro) {
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500587 err = -EFAULT;
588 goto out;
589 }
590
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200591 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 key->private.mm = mm;
Ingo Molnare2970f22006-06-27 02:54:47 -0700593 key->private.address = address;
Mel Gorman65d8fc72016-02-09 11:15:14 -0800594
595 get_futex_key_refs(key); /* implies smp_mb(); (B) */
596
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200597 } else {
Mel Gorman65d8fc72016-02-09 11:15:14 -0800598 struct inode *inode;
599
600 /*
601 * The associated futex object in this case is the inode and
602 * the page->mapping must be traversed. Ordinarily this should
603 * be stabilised under page lock but it's not strictly
604 * necessary in this case as we just want to pin the inode, not
605 * update the radix tree or anything like that.
606 *
607 * The RCU read lock is taken as the inode is finally freed
608 * under RCU. If the mapping still matches expectations then the
609 * mapping->host can be safely accessed as being a valid inode.
610 */
611 rcu_read_lock();
612
613 if (READ_ONCE(page->mapping) != mapping) {
614 rcu_read_unlock();
615 put_page(page);
616
617 goto again;
618 }
619
620 inode = READ_ONCE(mapping->host);
621 if (!inode) {
622 rcu_read_unlock();
623 put_page(page);
624
625 goto again;
626 }
627
628 /*
629 * Take a reference unless it is about to be freed. Previously
630 * this reference was taken by ihold under the page lock
631 * pinning the inode in place so i_lock was unnecessary. The
632 * only way for this check to fail is if the inode was
633 * truncated in parallel so warn for now if this happens.
634 *
635 * We are not calling into get_futex_key_refs() in file-backed
636 * cases, therefore a successful atomic_inc return below will
637 * guarantee that get_futex_key() will still imply smp_mb(); (B).
638 */
639 if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
640 rcu_read_unlock();
641 put_page(page);
642
643 goto again;
644 }
645
646 /* Should be impossible but lets be paranoid for now */
647 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
648 err = -EFAULT;
649 rcu_read_unlock();
650 iput(inode);
651
652 goto out;
653 }
654
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200655 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
Mel Gorman65d8fc72016-02-09 11:15:14 -0800656 key->shared.inode = inode;
Zhang Yi13d60f42013-06-25 21:19:31 +0800657 key->shared.pgoff = basepage_index(page);
Mel Gorman65d8fc72016-02-09 11:15:14 -0800658 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 }
660
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500661out:
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800662 put_page(page);
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500663 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664}
665
Thomas Gleixnerae791a22010-11-10 13:30:36 +0100666static inline void put_futex_key(union futex_key *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200668 drop_futex_key_refs(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
Darren Hartd96ee562009-09-21 22:30:22 -0700671/**
672 * fault_in_user_writeable() - Fault in user address and verify RW access
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200673 * @uaddr: pointer to faulting user space address
674 *
675 * Slow path to fixup the fault we just took in the atomic write
676 * access to @uaddr.
677 *
Randy Dunlapfb62db22010-10-13 11:02:34 -0700678 * We have no generic implementation of a non-destructive write to the
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200679 * user address. We know that we faulted in the atomic pagefault
680 * disabled section so we can as well avoid the #PF overhead by
681 * calling get_user_pages() right away.
682 */
683static int fault_in_user_writeable(u32 __user *uaddr)
684{
Andi Kleen722d0172009-12-08 13:19:42 +0100685 struct mm_struct *mm = current->mm;
686 int ret;
687
688 down_read(&mm->mmap_sem);
Benjamin Herrenschmidt2efaca92011-07-25 17:12:32 -0700689 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
Dominik Dingel4a9e1cd2016-01-15 16:57:04 -0800690 FAULT_FLAG_WRITE, NULL);
Andi Kleen722d0172009-12-08 13:19:42 +0100691 up_read(&mm->mmap_sem);
692
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200693 return ret < 0 ? ret : 0;
694}
695
Darren Hart4b1c4862009-04-03 13:39:42 -0700696/**
697 * futex_top_waiter() - Return the highest priority waiter on a futex
Darren Hartd96ee562009-09-21 22:30:22 -0700698 * @hb: the hash bucket the futex_q's reside in
699 * @key: the futex key (to distinguish it from other futex futex_q's)
Darren Hart4b1c4862009-04-03 13:39:42 -0700700 *
701 * Must be called with the hb lock held.
702 */
703static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
704 union futex_key *key)
705{
706 struct futex_q *this;
707
708 plist_for_each_entry(this, &hb->chain, list) {
709 if (match_futex(&this->key, key))
710 return this;
711 }
712 return NULL;
713}
714
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800715static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
716 u32 uval, u32 newval)
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700717{
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800718 int ret;
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700719
720 pagefault_disable();
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800721 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700722 pagefault_enable();
723
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800724 return ret;
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700725}
726
727static int get_futex_value_locked(u32 *dest, u32 __user *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728{
729 int ret;
730
Peter Zijlstraa8663742006-12-06 20:32:20 -0800731 pagefault_disable();
Ingo Molnare2970f22006-06-27 02:54:47 -0700732 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
Peter Zijlstraa8663742006-12-06 20:32:20 -0800733 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735 return ret ? -EFAULT : 0;
736}
737
Ingo Molnarc87e2832006-06-27 02:54:58 -0700738
739/*
740 * PI code:
741 */
742static int refill_pi_state_cache(void)
743{
744 struct futex_pi_state *pi_state;
745
746 if (likely(current->pi_state_cache))
747 return 0;
748
Burman Yan4668edc2006-12-06 20:38:51 -0800749 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700750
751 if (!pi_state)
752 return -ENOMEM;
753
Ingo Molnarc87e2832006-06-27 02:54:58 -0700754 INIT_LIST_HEAD(&pi_state->list);
755 /* pi_mutex gets initialized later */
756 pi_state->owner = NULL;
757 atomic_set(&pi_state->refcount, 1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200758 pi_state->key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700759
760 current->pi_state_cache = pi_state;
761
762 return 0;
763}
764
765static struct futex_pi_state * alloc_pi_state(void)
766{
767 struct futex_pi_state *pi_state = current->pi_state_cache;
768
769 WARN_ON(!pi_state);
770 current->pi_state_cache = NULL;
771
772 return pi_state;
773}
774
Brian Silverman30a6b802014-10-25 20:20:37 -0400775/*
Thomas Gleixner29e9ee52015-12-19 20:07:39 +0000776 * Drops a reference to the pi_state object and frees or caches it
777 * when the last reference is gone.
778 *
Brian Silverman30a6b802014-10-25 20:20:37 -0400779 * Must be called with the hb lock held.
780 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +0000781static void put_pi_state(struct futex_pi_state *pi_state)
Ingo Molnarc87e2832006-06-27 02:54:58 -0700782{
Brian Silverman30a6b802014-10-25 20:20:37 -0400783 if (!pi_state)
784 return;
785
Ingo Molnarc87e2832006-06-27 02:54:58 -0700786 if (!atomic_dec_and_test(&pi_state->refcount))
787 return;
788
789 /*
790 * If pi_state->owner is NULL, the owner is most probably dying
791 * and has cleaned up the pi_state already
792 */
793 if (pi_state->owner) {
Thomas Gleixner1d615482009-11-17 14:54:03 +0100794 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700795 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100796 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700797
798 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
799 }
800
801 if (current->pi_state_cache)
802 kfree(pi_state);
803 else {
804 /*
805 * pi_state->list is already empty.
806 * clear pi_state->owner.
807 * refcount is at 0 - put it back to 1.
808 */
809 pi_state->owner = NULL;
810 atomic_set(&pi_state->refcount, 1);
811 current->pi_state_cache = pi_state;
812 }
813}
814
815/*
816 * Look up the task based on what TID userspace gave us.
817 * We dont trust it.
818 */
819static struct task_struct * futex_find_get_task(pid_t pid)
820{
821 struct task_struct *p;
822
Oleg Nesterovd359b542006-09-29 02:00:55 -0700823 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -0700824 p = find_task_by_vpid(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +0200825 if (p)
826 get_task_struct(p);
Thomas Gleixnera06381f2007-06-23 11:48:40 +0200827
Oleg Nesterovd359b542006-09-29 02:00:55 -0700828 rcu_read_unlock();
Ingo Molnarc87e2832006-06-27 02:54:58 -0700829
830 return p;
831}
832
833/*
834 * This task is holding PI mutexes at exit time => bad.
835 * Kernel cleans up PI-state, but userspace is likely hosed.
836 * (Robust-futex cleanup is separate and might save the day for userspace.)
837 */
838void exit_pi_state_list(struct task_struct *curr)
839{
Ingo Molnarc87e2832006-06-27 02:54:58 -0700840 struct list_head *next, *head = &curr->pi_state_list;
841 struct futex_pi_state *pi_state;
Ingo Molnar627371d2006-07-29 05:16:20 +0200842 struct futex_hash_bucket *hb;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200843 union futex_key key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700844
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800845 if (!futex_cmpxchg_enabled)
846 return;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700847 /*
848 * We are a ZOMBIE and nobody can enqueue itself on
849 * pi_state_list anymore, but we have to be careful
Ingo Molnar627371d2006-07-29 05:16:20 +0200850 * versus waiters unqueueing themselves:
Ingo Molnarc87e2832006-06-27 02:54:58 -0700851 */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100852 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700853 while (!list_empty(head)) {
854
855 next = head->next;
856 pi_state = list_entry(next, struct futex_pi_state, list);
857 key = pi_state->key;
Ingo Molnar627371d2006-07-29 05:16:20 +0200858 hb = hash_futex(&key);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100859 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700860
Ingo Molnarc87e2832006-06-27 02:54:58 -0700861 spin_lock(&hb->lock);
862
Thomas Gleixner1d615482009-11-17 14:54:03 +0100863 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200864 /*
865 * We dropped the pi-lock, so re-check whether this
866 * task still owns the PI-state:
867 */
Ingo Molnarc87e2832006-06-27 02:54:58 -0700868 if (head->next != next) {
869 spin_unlock(&hb->lock);
870 continue;
871 }
872
Ingo Molnarc87e2832006-06-27 02:54:58 -0700873 WARN_ON(pi_state->owner != curr);
Ingo Molnar627371d2006-07-29 05:16:20 +0200874 WARN_ON(list_empty(&pi_state->list));
875 list_del_init(&pi_state->list);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700876 pi_state->owner = NULL;
Thomas Gleixner1d615482009-11-17 14:54:03 +0100877 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700878
879 rt_mutex_unlock(&pi_state->pi_mutex);
880
881 spin_unlock(&hb->lock);
882
Thomas Gleixner1d615482009-11-17 14:54:03 +0100883 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700884 }
Thomas Gleixner1d615482009-11-17 14:54:03 +0100885 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700886}
887
Thomas Gleixner54a21782014-06-03 12:27:08 +0000888/*
889 * We need to check the following states:
890 *
891 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
892 *
893 * [1] NULL | --- | --- | 0 | 0/1 | Valid
894 * [2] NULL | --- | --- | >0 | 0/1 | Valid
895 *
896 * [3] Found | NULL | -- | Any | 0/1 | Invalid
897 *
898 * [4] Found | Found | NULL | 0 | 1 | Valid
899 * [5] Found | Found | NULL | >0 | 1 | Invalid
900 *
901 * [6] Found | Found | task | 0 | 1 | Valid
902 *
903 * [7] Found | Found | NULL | Any | 0 | Invalid
904 *
905 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
906 * [9] Found | Found | task | 0 | 0 | Invalid
907 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
908 *
909 * [1] Indicates that the kernel can acquire the futex atomically. We
910 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
911 *
912 * [2] Valid, if TID does not belong to a kernel thread. If no matching
913 * thread is found then it indicates that the owner TID has died.
914 *
915 * [3] Invalid. The waiter is queued on a non PI futex
916 *
917 * [4] Valid state after exit_robust_list(), which sets the user space
918 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
919 *
920 * [5] The user space value got manipulated between exit_robust_list()
921 * and exit_pi_state_list()
922 *
923 * [6] Valid state after exit_pi_state_list() which sets the new owner in
924 * the pi_state but cannot access the user space value.
925 *
926 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
927 *
928 * [8] Owner and user space value match
929 *
930 * [9] There is no transient state which sets the user space TID to 0
931 * except exit_robust_list(), but this is indicated by the
932 * FUTEX_OWNER_DIED bit. See [4]
933 *
934 * [10] There is no transient state which leaves owner and user space
935 * TID out of sync.
936 */
Thomas Gleixnere60cbc52014-06-11 20:45:39 +0000937
938/*
939 * Validate that the existing waiter has a pi_state and sanity check
940 * the pi_state against the user space value. If correct, attach to
941 * it.
942 */
943static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
944 struct futex_pi_state **ps)
945{
946 pid_t pid = uval & FUTEX_TID_MASK;
947
948 /*
949 * Userspace might have messed up non-PI and PI futexes [3]
950 */
951 if (unlikely(!pi_state))
952 return -EINVAL;
953
954 WARN_ON(!atomic_read(&pi_state->refcount));
955
956 /*
957 * Handle the owner died case:
958 */
959 if (uval & FUTEX_OWNER_DIED) {
960 /*
961 * exit_pi_state_list sets owner to NULL and wakes the
962 * topmost waiter. The task which acquires the
963 * pi_state->rt_mutex will fixup owner.
964 */
965 if (!pi_state->owner) {
966 /*
967 * No pi state owner, but the user space TID
968 * is not 0. Inconsistent state. [5]
969 */
970 if (pid)
971 return -EINVAL;
972 /*
973 * Take a ref on the state and return success. [4]
974 */
975 goto out_state;
976 }
977
978 /*
979 * If TID is 0, then either the dying owner has not
980 * yet executed exit_pi_state_list() or some waiter
981 * acquired the rtmutex in the pi state, but did not
982 * yet fixup the TID in user space.
983 *
984 * Take a ref on the state and return success. [6]
985 */
986 if (!pid)
987 goto out_state;
988 } else {
989 /*
990 * If the owner died bit is not set, then the pi_state
991 * must have an owner. [7]
992 */
993 if (!pi_state->owner)
994 return -EINVAL;
995 }
996
997 /*
998 * Bail out if user space manipulated the futex value. If pi
999 * state exists then the owner TID must be the same as the
1000 * user space TID. [9/10]
1001 */
1002 if (pid != task_pid_vnr(pi_state->owner))
1003 return -EINVAL;
1004out_state:
1005 atomic_inc(&pi_state->refcount);
1006 *ps = pi_state;
1007 return 0;
1008}
1009
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001010/*
1011 * Lookup the task for the TID provided from user space and attach to
1012 * it after doing proper sanity checks.
1013 */
1014static int attach_to_pi_owner(u32 uval, union futex_key *key,
1015 struct futex_pi_state **ps)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001016{
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001017 pid_t pid = uval & FUTEX_TID_MASK;
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001018 struct futex_pi_state *pi_state;
1019 struct task_struct *p;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001020
1021 /*
Ingo Molnare3f2dde2006-07-29 05:17:57 +02001022 * We are the first waiter - try to look up the real owner and attach
Thomas Gleixner54a21782014-06-03 12:27:08 +00001023 * the new pi_state to it, but bail out when TID = 0 [1]
Ingo Molnarc87e2832006-06-27 02:54:58 -07001024 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001025 if (!pid)
Ingo Molnare3f2dde2006-07-29 05:17:57 +02001026 return -ESRCH;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001027 p = futex_find_get_task(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +02001028 if (!p)
1029 return -ESRCH;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001030
Oleg Nesterova2129462015-02-02 15:05:36 +01001031 if (unlikely(p->flags & PF_KTHREAD)) {
Thomas Gleixnerf0d71b32014-05-12 20:45:35 +00001032 put_task_struct(p);
1033 return -EPERM;
1034 }
1035
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001036 /*
1037 * We need to look at the task state flags to figure out,
1038 * whether the task is exiting. To protect against the do_exit
1039 * change of the task flags, we do this protected by
1040 * p->pi_lock:
1041 */
Thomas Gleixner1d615482009-11-17 14:54:03 +01001042 raw_spin_lock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001043 if (unlikely(p->flags & PF_EXITING)) {
1044 /*
1045 * The task is on the way out. When PF_EXITPIDONE is
1046 * set, we know that the task has finished the
1047 * cleanup:
1048 */
1049 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1050
Thomas Gleixner1d615482009-11-17 14:54:03 +01001051 raw_spin_unlock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001052 put_task_struct(p);
1053 return ret;
1054 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001055
Thomas Gleixner54a21782014-06-03 12:27:08 +00001056 /*
1057 * No existing pi state. First waiter. [2]
1058 */
Ingo Molnarc87e2832006-06-27 02:54:58 -07001059 pi_state = alloc_pi_state();
1060
1061 /*
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001062 * Initialize the pi_mutex in locked state and make @p
Ingo Molnarc87e2832006-06-27 02:54:58 -07001063 * the owner of it:
1064 */
1065 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1066
1067 /* Store the key for possible exit cleanups: */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001068 pi_state->key = *key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001069
Ingo Molnar627371d2006-07-29 05:16:20 +02001070 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -07001071 list_add(&pi_state->list, &p->pi_state_list);
1072 pi_state->owner = p;
Thomas Gleixner1d615482009-11-17 14:54:03 +01001073 raw_spin_unlock_irq(&p->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001074
1075 put_task_struct(p);
1076
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001077 *ps = pi_state;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001078
1079 return 0;
1080}
1081
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001082static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
1083 union futex_key *key, struct futex_pi_state **ps)
1084{
1085 struct futex_q *match = futex_top_waiter(hb, key);
1086
1087 /*
1088 * If there is a waiter on that futex, validate it and
1089 * attach to the pi_state when the validation succeeds.
1090 */
1091 if (match)
1092 return attach_to_pi_state(uval, match->pi_state, ps);
1093
1094 /*
1095 * We are the first waiter - try to look up the owner based on
1096 * @uval and attach to it.
1097 */
1098 return attach_to_pi_owner(uval, key, ps);
1099}
1100
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001101static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1102{
1103 u32 uninitialized_var(curval);
1104
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001105 if (unlikely(should_fail_futex(true)))
1106 return -EFAULT;
1107
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001108 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1109 return -EFAULT;
1110
1111 /*If user space value changed, let the caller retry */
1112 return curval != uval ? -EAGAIN : 0;
1113}
1114
Darren Hart1a520842009-04-03 13:39:52 -07001115/**
Darren Hartd96ee562009-09-21 22:30:22 -07001116 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
Darren Hartbab5bc92009-04-07 23:23:50 -07001117 * @uaddr: the pi futex user address
1118 * @hb: the pi futex hash bucket
1119 * @key: the futex key associated with uaddr and hb
1120 * @ps: the pi_state pointer where we store the result of the
1121 * lookup
1122 * @task: the task to perform the atomic lock work for. This will
1123 * be "current" except in the case of requeue pi.
1124 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart1a520842009-04-03 13:39:52 -07001125 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001126 * Return:
1127 * 0 - ready to wait;
1128 * 1 - acquired the lock;
Darren Hart1a520842009-04-03 13:39:52 -07001129 * <0 - error
1130 *
1131 * The hb->lock and futex_key refs shall be held by the caller.
1132 */
1133static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1134 union futex_key *key,
1135 struct futex_pi_state **ps,
Darren Hartbab5bc92009-04-07 23:23:50 -07001136 struct task_struct *task, int set_waiters)
Darren Hart1a520842009-04-03 13:39:52 -07001137{
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001138 u32 uval, newval, vpid = task_pid_vnr(task);
1139 struct futex_q *match;
1140 int ret;
Darren Hart1a520842009-04-03 13:39:52 -07001141
1142 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001143 * Read the user space value first so we can validate a few
1144 * things before proceeding further.
Darren Hart1a520842009-04-03 13:39:52 -07001145 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001146 if (get_futex_value_locked(&uval, uaddr))
Darren Hart1a520842009-04-03 13:39:52 -07001147 return -EFAULT;
1148
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001149 if (unlikely(should_fail_futex(true)))
1150 return -EFAULT;
1151
Darren Hart1a520842009-04-03 13:39:52 -07001152 /*
1153 * Detect deadlocks.
1154 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001155 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
Darren Hart1a520842009-04-03 13:39:52 -07001156 return -EDEADLK;
1157
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001158 if ((unlikely(should_fail_futex(true))))
1159 return -EDEADLK;
1160
Darren Hart1a520842009-04-03 13:39:52 -07001161 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001162 * Lookup existing state first. If it exists, try to attach to
1163 * its pi_state.
Darren Hart1a520842009-04-03 13:39:52 -07001164 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001165 match = futex_top_waiter(hb, key);
1166 if (match)
1167 return attach_to_pi_state(uval, match->pi_state, ps);
1168
1169 /*
1170 * No waiter and user TID is 0. We are here because the
1171 * waiters or the owner died bit is set or called from
1172 * requeue_cmp_pi or for whatever reason something took the
1173 * syscall.
1174 */
1175 if (!(uval & FUTEX_TID_MASK)) {
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001176 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001177 * We take over the futex. No other waiters and the user space
1178 * TID is 0. We preserve the owner died bit.
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001179 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001180 newval = uval & FUTEX_OWNER_DIED;
1181 newval |= vpid;
1182
1183 /* The futex requeue_pi code can enforce the waiters bit */
1184 if (set_waiters)
1185 newval |= FUTEX_WAITERS;
1186
1187 ret = lock_pi_update_atomic(uaddr, uval, newval);
1188 /* If the take over worked, return 1 */
1189 return ret < 0 ? ret : 1;
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001190 }
Darren Hart1a520842009-04-03 13:39:52 -07001191
Darren Hart1a520842009-04-03 13:39:52 -07001192 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001193 * First waiter. Set the waiters bit before attaching ourself to
1194 * the owner. If owner tries to unlock, it will be forced into
1195 * the kernel and blocked on hb->lock.
Darren Hart1a520842009-04-03 13:39:52 -07001196 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001197 newval = uval | FUTEX_WAITERS;
1198 ret = lock_pi_update_atomic(uaddr, uval, newval);
1199 if (ret)
1200 return ret;
Darren Hart1a520842009-04-03 13:39:52 -07001201 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001202 * If the update of the user space value succeeded, we try to
1203 * attach to the owner. If that fails, no harm done, we only
1204 * set the FUTEX_WAITERS bit in the user space variable.
Darren Hart1a520842009-04-03 13:39:52 -07001205 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001206 return attach_to_pi_owner(uval, key, ps);
Darren Hart1a520842009-04-03 13:39:52 -07001207}
1208
Lai Jiangshan2e129782010-12-22 14:18:50 +08001209/**
1210 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1211 * @q: The futex_q to unqueue
1212 *
1213 * The q->lock_ptr must not be NULL and must be held by the caller.
1214 */
1215static void __unqueue_futex(struct futex_q *q)
1216{
1217 struct futex_hash_bucket *hb;
1218
Steven Rostedt29096202011-03-17 15:21:07 -04001219 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1220 || WARN_ON(plist_node_empty(&q->list)))
Lai Jiangshan2e129782010-12-22 14:18:50 +08001221 return;
1222
1223 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1224 plist_del(&q->list, &hb->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07001225 hb_waiters_dec(hb);
Lai Jiangshan2e129782010-12-22 14:18:50 +08001226}
1227
Ingo Molnarc87e2832006-06-27 02:54:58 -07001228/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 * The hash bucket lock must be held when this is called.
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001230 * Afterwards, the futex_q must not be accessed. Callers
1231 * must ensure to later call wake_up_q() for the actual
1232 * wakeups to occur.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 */
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001234static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235{
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001236 struct task_struct *p = q->task;
1237
Darren Hartaa109902012-11-26 16:29:56 -08001238 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1239 return;
1240
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001241 /*
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001242 * Queue the task for later wakeup for after we've released
1243 * the hb->lock. wake_q_add() grabs reference to p.
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001244 */
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001245 wake_q_add(wake_q, p);
Lai Jiangshan2e129782010-12-22 14:18:50 +08001246 __unqueue_futex(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 /*
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001248 * The waiting task can free the futex_q as soon as
1249 * q->lock_ptr = NULL is written, without taking any locks. A
1250 * memory barrier is required here to prevent the following
1251 * store to lock_ptr from getting ahead of the plist_del.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 */
Ralf Baechleccdea2f2006-12-06 20:40:26 -08001253 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 q->lock_ptr = NULL;
1255}
1256
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02001257static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1258 struct futex_hash_bucket *hb)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001259{
1260 struct task_struct *new_owner;
1261 struct futex_pi_state *pi_state = this->pi_state;
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03001262 u32 uninitialized_var(curval), newval;
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02001263 WAKE_Q(wake_q);
1264 bool deboost;
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001265 int ret = 0;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001266
1267 if (!pi_state)
1268 return -EINVAL;
1269
Thomas Gleixner51246bf2010-02-02 11:40:27 +01001270 /*
1271 * If current does not own the pi_state then the futex is
1272 * inconsistent and user space fiddled with the futex value.
1273 */
1274 if (pi_state->owner != current)
1275 return -EINVAL;
1276
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001277 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001278 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
1279
1280 /*
Steven Rostedtf123c982011-01-06 15:08:29 -05001281 * It is possible that the next waiter (the one that brought
1282 * this owner to the kernel) timed out and is no longer
1283 * waiting on the lock.
Ingo Molnarc87e2832006-06-27 02:54:58 -07001284 */
1285 if (!new_owner)
1286 new_owner = this->task;
1287
1288 /*
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001289 * We pass it to the next owner. The WAITERS bit is always
1290 * kept enabled while there is PI state around. We cleanup the
1291 * owner died bit, because we are the owner.
Ingo Molnarc87e2832006-06-27 02:54:58 -07001292 */
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001293 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001294
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001295 if (unlikely(should_fail_futex(true)))
1296 ret = -EFAULT;
1297
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001298 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
1299 ret = -EFAULT;
1300 else if (curval != uval)
1301 ret = -EINVAL;
1302 if (ret) {
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001303 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001304 return ret;
Ingo Molnare3f2dde2006-07-29 05:17:57 +02001305 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001306
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001307 raw_spin_lock(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001308 WARN_ON(list_empty(&pi_state->list));
1309 list_del_init(&pi_state->list);
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001310 raw_spin_unlock(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001311
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001312 raw_spin_lock(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001313 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -07001314 list_add(&pi_state->list, &new_owner->pi_state_list);
1315 pi_state->owner = new_owner;
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001316 raw_spin_unlock(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001317
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001318 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02001319
1320 deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
1321
1322 /*
1323 * First unlock HB so the waiter does not spin on it once he got woken
1324 * up. Second wake up the waiter before the priority is adjusted. If we
1325 * deboost first (and lose our higher priority), then the task might get
1326 * scheduled away before the wake up can take place.
1327 */
1328 spin_unlock(&hb->lock);
1329 wake_up_q(&wake_q);
1330 if (deboost)
1331 rt_mutex_adjust_prio(current);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001332
1333 return 0;
1334}
1335
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336/*
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001337 * Express the locking dependencies for lockdep:
1338 */
1339static inline void
1340double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1341{
1342 if (hb1 <= hb2) {
1343 spin_lock(&hb1->lock);
1344 if (hb1 < hb2)
1345 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1346 } else { /* hb1 > hb2 */
1347 spin_lock(&hb2->lock);
1348 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1349 }
1350}
1351
Darren Hart5eb3dc62009-03-12 00:55:52 -07001352static inline void
1353double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1354{
Darren Hartf061d352009-03-12 15:11:18 -07001355 spin_unlock(&hb1->lock);
Ingo Molnar88f502f2009-03-13 10:32:07 +01001356 if (hb1 != hb2)
1357 spin_unlock(&hb2->lock);
Darren Hart5eb3dc62009-03-12 00:55:52 -07001358}
1359
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001360/*
Darren Hartb2d09942009-03-12 00:55:37 -07001361 * Wake up waiters matching bitset queued on this futex (uaddr).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 */
Darren Hartb41277d2010-11-08 13:10:09 -08001363static int
1364futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365{
Ingo Molnare2970f22006-06-27 02:54:47 -07001366 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 struct futex_q *this, *next;
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001368 union futex_key key = FUTEX_KEY_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 int ret;
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001370 WAKE_Q(wake_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
Thomas Gleixnercd689982008-02-01 17:45:14 +01001372 if (!bitset)
1373 return -EINVAL;
1374
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001375 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 if (unlikely(ret != 0))
1377 goto out;
1378
Ingo Molnare2970f22006-06-27 02:54:47 -07001379 hb = hash_futex(&key);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -08001380
1381 /* Make sure we really have tasks to wakeup */
1382 if (!hb_waiters_pending(hb))
1383 goto out_put_key;
1384
Ingo Molnare2970f22006-06-27 02:54:47 -07001385 spin_lock(&hb->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
Jason Low0d00c7b2014-01-12 15:31:22 -08001387 plist_for_each_entry_safe(this, next, &hb->chain, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 if (match_futex (&this->key, &key)) {
Darren Hart52400ba2009-04-03 13:40:49 -07001389 if (this->pi_state || this->rt_waiter) {
Ingo Molnared6f7b12006-07-01 04:35:46 -07001390 ret = -EINVAL;
1391 break;
1392 }
Thomas Gleixnercd689982008-02-01 17:45:14 +01001393
1394 /* Check if one of the bits is set in both bitsets */
1395 if (!(this->bitset & bitset))
1396 continue;
1397
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001398 mark_wake_futex(&wake_q, this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 if (++ret >= nr_wake)
1400 break;
1401 }
1402 }
1403
Ingo Molnare2970f22006-06-27 02:54:47 -07001404 spin_unlock(&hb->lock);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001405 wake_up_q(&wake_q);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -08001406out_put_key:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001407 put_futex_key(&key);
Darren Hart42d35d42008-12-29 15:49:53 -08001408out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 return ret;
1410}
1411
1412/*
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001413 * Wake up all waiters hashed on the physical page that is mapped
1414 * to this virtual address:
1415 */
Ingo Molnare2970f22006-06-27 02:54:47 -07001416static int
Darren Hartb41277d2010-11-08 13:10:09 -08001417futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
Ingo Molnare2970f22006-06-27 02:54:47 -07001418 int nr_wake, int nr_wake2, int op)
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001419{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001420 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Ingo Molnare2970f22006-06-27 02:54:47 -07001421 struct futex_hash_bucket *hb1, *hb2;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001422 struct futex_q *this, *next;
Darren Harte4dc5b72009-03-12 00:56:13 -07001423 int ret, op_ret;
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001424 WAKE_Q(wake_q);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001425
Darren Harte4dc5b72009-03-12 00:56:13 -07001426retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001427 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001428 if (unlikely(ret != 0))
1429 goto out;
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001430 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001431 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001432 goto out_put_key1;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001433
Ingo Molnare2970f22006-06-27 02:54:47 -07001434 hb1 = hash_futex(&key1);
1435 hb2 = hash_futex(&key2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001436
Darren Harte4dc5b72009-03-12 00:56:13 -07001437retry_private:
Thomas Gleixnereaaea802009-10-04 09:34:17 +02001438 double_lock_hb(hb1, hb2);
Ingo Molnare2970f22006-06-27 02:54:47 -07001439 op_ret = futex_atomic_op_inuser(op, uaddr2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001440 if (unlikely(op_ret < 0)) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001441
Darren Hart5eb3dc62009-03-12 00:55:52 -07001442 double_unlock_hb(hb1, hb2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001443
David Howells7ee1dd32006-01-06 00:11:44 -08001444#ifndef CONFIG_MMU
Ingo Molnare2970f22006-06-27 02:54:47 -07001445 /*
1446 * we don't get EFAULT from MMU faults if we don't have an MMU,
1447 * but we might get them from range checking
1448 */
David Howells7ee1dd32006-01-06 00:11:44 -08001449 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -08001450 goto out_put_keys;
David Howells7ee1dd32006-01-06 00:11:44 -08001451#endif
1452
David Gibson796f8d92005-11-07 00:59:33 -08001453 if (unlikely(op_ret != -EFAULT)) {
1454 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -08001455 goto out_put_keys;
David Gibson796f8d92005-11-07 00:59:33 -08001456 }
1457
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001458 ret = fault_in_user_writeable(uaddr2);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001459 if (ret)
Darren Hartde87fcc2009-03-12 00:55:46 -07001460 goto out_put_keys;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001461
Darren Hartb41277d2010-11-08 13:10:09 -08001462 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07001463 goto retry_private;
1464
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001465 put_futex_key(&key2);
1466 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07001467 goto retry;
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001468 }
1469
Jason Low0d00c7b2014-01-12 15:31:22 -08001470 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001471 if (match_futex (&this->key, &key1)) {
Darren Hartaa109902012-11-26 16:29:56 -08001472 if (this->pi_state || this->rt_waiter) {
1473 ret = -EINVAL;
1474 goto out_unlock;
1475 }
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001476 mark_wake_futex(&wake_q, this);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001477 if (++ret >= nr_wake)
1478 break;
1479 }
1480 }
1481
1482 if (op_ret > 0) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001483 op_ret = 0;
Jason Low0d00c7b2014-01-12 15:31:22 -08001484 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001485 if (match_futex (&this->key, &key2)) {
Darren Hartaa109902012-11-26 16:29:56 -08001486 if (this->pi_state || this->rt_waiter) {
1487 ret = -EINVAL;
1488 goto out_unlock;
1489 }
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001490 mark_wake_futex(&wake_q, this);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001491 if (++op_ret >= nr_wake2)
1492 break;
1493 }
1494 }
1495 ret += op_ret;
1496 }
1497
Darren Hartaa109902012-11-26 16:29:56 -08001498out_unlock:
Darren Hart5eb3dc62009-03-12 00:55:52 -07001499 double_unlock_hb(hb1, hb2);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001500 wake_up_q(&wake_q);
Darren Hart42d35d42008-12-29 15:49:53 -08001501out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001502 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08001503out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001504 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08001505out:
Jakub Jelinek4732efb2005-09-06 15:16:25 -07001506 return ret;
1507}
1508
Darren Hart9121e472009-04-03 13:40:31 -07001509/**
1510 * requeue_futex() - Requeue a futex_q from one hb to another
1511 * @q: the futex_q to requeue
1512 * @hb1: the source hash_bucket
1513 * @hb2: the target hash_bucket
1514 * @key2: the new key for the requeued futex_q
1515 */
1516static inline
1517void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1518 struct futex_hash_bucket *hb2, union futex_key *key2)
1519{
1520
1521 /*
1522 * If key1 and key2 hash to the same bucket, no need to
1523 * requeue.
1524 */
1525 if (likely(&hb1->chain != &hb2->chain)) {
1526 plist_del(&q->list, &hb1->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07001527 hb_waiters_dec(hb1);
Darren Hart9121e472009-04-03 13:40:31 -07001528 plist_add(&q->list, &hb2->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07001529 hb_waiters_inc(hb2);
Darren Hart9121e472009-04-03 13:40:31 -07001530 q->lock_ptr = &hb2->lock;
Darren Hart9121e472009-04-03 13:40:31 -07001531 }
1532 get_futex_key_refs(key2);
1533 q->key = *key2;
1534}
1535
Darren Hart52400ba2009-04-03 13:40:49 -07001536/**
1537 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
Darren Hartd96ee562009-09-21 22:30:22 -07001538 * @q: the futex_q
1539 * @key: the key of the requeue target futex
1540 * @hb: the hash_bucket of the requeue target futex
Darren Hart52400ba2009-04-03 13:40:49 -07001541 *
1542 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1543 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1544 * to the requeue target futex so the waiter can detect the wakeup on the right
1545 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
Darren Hartbeda2c72009-08-09 15:34:39 -07001546 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1547 * to protect access to the pi_state to fixup the owner later. Must be called
1548 * with both q->lock_ptr and hb->lock held.
Darren Hart52400ba2009-04-03 13:40:49 -07001549 */
1550static inline
Darren Hartbeda2c72009-08-09 15:34:39 -07001551void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1552 struct futex_hash_bucket *hb)
Darren Hart52400ba2009-04-03 13:40:49 -07001553{
Darren Hart52400ba2009-04-03 13:40:49 -07001554 get_futex_key_refs(key);
1555 q->key = *key;
1556
Lai Jiangshan2e129782010-12-22 14:18:50 +08001557 __unqueue_futex(q);
Darren Hart52400ba2009-04-03 13:40:49 -07001558
1559 WARN_ON(!q->rt_waiter);
1560 q->rt_waiter = NULL;
1561
Darren Hartbeda2c72009-08-09 15:34:39 -07001562 q->lock_ptr = &hb->lock;
Darren Hartbeda2c72009-08-09 15:34:39 -07001563
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001564 wake_up_state(q->task, TASK_NORMAL);
Darren Hart52400ba2009-04-03 13:40:49 -07001565}
1566
1567/**
1568 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
Darren Hartbab5bc92009-04-07 23:23:50 -07001569 * @pifutex: the user address of the to futex
1570 * @hb1: the from futex hash bucket, must be locked by the caller
1571 * @hb2: the to futex hash bucket, must be locked by the caller
1572 * @key1: the from futex key
1573 * @key2: the to futex key
1574 * @ps: address to store the pi_state pointer
1575 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart52400ba2009-04-03 13:40:49 -07001576 *
1577 * Try and get the lock on behalf of the top waiter if we can do it atomically.
Darren Hartbab5bc92009-04-07 23:23:50 -07001578 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1579 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1580 * hb1 and hb2 must be held by the caller.
Darren Hart52400ba2009-04-03 13:40:49 -07001581 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001582 * Return:
1583 * 0 - failed to acquire the lock atomically;
Thomas Gleixner866293e2014-05-12 20:45:34 +00001584 * >0 - acquired the lock, return value is vpid of the top_waiter
Darren Hart52400ba2009-04-03 13:40:49 -07001585 * <0 - error
1586 */
1587static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1588 struct futex_hash_bucket *hb1,
1589 struct futex_hash_bucket *hb2,
1590 union futex_key *key1, union futex_key *key2,
Darren Hartbab5bc92009-04-07 23:23:50 -07001591 struct futex_pi_state **ps, int set_waiters)
Darren Hart52400ba2009-04-03 13:40:49 -07001592{
Darren Hartbab5bc92009-04-07 23:23:50 -07001593 struct futex_q *top_waiter = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07001594 u32 curval;
Thomas Gleixner866293e2014-05-12 20:45:34 +00001595 int ret, vpid;
Darren Hart52400ba2009-04-03 13:40:49 -07001596
1597 if (get_futex_value_locked(&curval, pifutex))
1598 return -EFAULT;
1599
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001600 if (unlikely(should_fail_futex(true)))
1601 return -EFAULT;
1602
Darren Hartbab5bc92009-04-07 23:23:50 -07001603 /*
1604 * Find the top_waiter and determine if there are additional waiters.
1605 * If the caller intends to requeue more than 1 waiter to pifutex,
1606 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1607 * as we have means to handle the possible fault. If not, don't set
1608 * the bit unecessarily as it will force the subsequent unlock to enter
1609 * the kernel.
1610 */
Darren Hart52400ba2009-04-03 13:40:49 -07001611 top_waiter = futex_top_waiter(hb1, key1);
1612
1613 /* There are no waiters, nothing for us to do. */
1614 if (!top_waiter)
1615 return 0;
1616
Darren Hart84bc4af2009-08-13 17:36:53 -07001617 /* Ensure we requeue to the expected futex. */
1618 if (!match_futex(top_waiter->requeue_pi_key, key2))
1619 return -EINVAL;
1620
Darren Hart52400ba2009-04-03 13:40:49 -07001621 /*
Darren Hartbab5bc92009-04-07 23:23:50 -07001622 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1623 * the contended case or if set_waiters is 1. The pi_state is returned
1624 * in ps in contended cases.
Darren Hart52400ba2009-04-03 13:40:49 -07001625 */
Thomas Gleixner866293e2014-05-12 20:45:34 +00001626 vpid = task_pid_vnr(top_waiter->task);
Darren Hartbab5bc92009-04-07 23:23:50 -07001627 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1628 set_waiters);
Thomas Gleixner866293e2014-05-12 20:45:34 +00001629 if (ret == 1) {
Darren Hartbeda2c72009-08-09 15:34:39 -07001630 requeue_pi_wake_futex(top_waiter, key2, hb2);
Thomas Gleixner866293e2014-05-12 20:45:34 +00001631 return vpid;
1632 }
Darren Hart52400ba2009-04-03 13:40:49 -07001633 return ret;
1634}
1635
1636/**
1637 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
Randy Dunlapfb62db22010-10-13 11:02:34 -07001638 * @uaddr1: source futex user address
Darren Hartb41277d2010-11-08 13:10:09 -08001639 * @flags: futex flags (FLAGS_SHARED, etc.)
Randy Dunlapfb62db22010-10-13 11:02:34 -07001640 * @uaddr2: target futex user address
1641 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1642 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1643 * @cmpval: @uaddr1 expected value (or %NULL)
1644 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
Darren Hartb41277d2010-11-08 13:10:09 -08001645 * pi futex (pi to pi requeue is not supported)
Darren Hart52400ba2009-04-03 13:40:49 -07001646 *
1647 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1648 * uaddr2 atomically on behalf of the top waiter.
1649 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001650 * Return:
1651 * >=0 - on success, the number of tasks requeued or woken;
Darren Hart52400ba2009-04-03 13:40:49 -07001652 * <0 - on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 */
Darren Hartb41277d2010-11-08 13:10:09 -08001654static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1655 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1656 u32 *cmpval, int requeue_pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001658 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Darren Hart52400ba2009-04-03 13:40:49 -07001659 int drop_count = 0, task_count = 0, ret;
1660 struct futex_pi_state *pi_state = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07001661 struct futex_hash_bucket *hb1, *hb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 struct futex_q *this, *next;
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001663 WAKE_Q(wake_q);
Darren Hart52400ba2009-04-03 13:40:49 -07001664
1665 if (requeue_pi) {
1666 /*
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00001667 * Requeue PI only works on two distinct uaddrs. This
1668 * check is only valid for private futexes. See below.
1669 */
1670 if (uaddr1 == uaddr2)
1671 return -EINVAL;
1672
1673 /*
Darren Hart52400ba2009-04-03 13:40:49 -07001674 * requeue_pi requires a pi_state, try to allocate it now
1675 * without any locks in case it fails.
1676 */
1677 if (refill_pi_state_cache())
1678 return -ENOMEM;
1679 /*
1680 * requeue_pi must wake as many tasks as it can, up to nr_wake
1681 * + nr_requeue, since it acquires the rt_mutex prior to
1682 * returning to userspace, so as to not leave the rt_mutex with
1683 * waiters and no owner. However, second and third wake-ups
1684 * cannot be predicted as they involve race conditions with the
1685 * first wake and a fault while looking up the pi_state. Both
1686 * pthread_cond_signal() and pthread_cond_broadcast() should
1687 * use nr_wake=1.
1688 */
1689 if (nr_wake != 1)
1690 return -EINVAL;
1691 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Darren Hart42d35d42008-12-29 15:49:53 -08001693retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001694 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 if (unlikely(ret != 0))
1696 goto out;
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001697 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1698 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001700 goto out_put_key1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00001702 /*
1703 * The check above which compares uaddrs is not sufficient for
1704 * shared futexes. We need to compare the keys:
1705 */
1706 if (requeue_pi && match_futex(&key1, &key2)) {
1707 ret = -EINVAL;
1708 goto out_put_keys;
1709 }
1710
Ingo Molnare2970f22006-06-27 02:54:47 -07001711 hb1 = hash_futex(&key1);
1712 hb2 = hash_futex(&key2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
Darren Harte4dc5b72009-03-12 00:56:13 -07001714retry_private:
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001715 hb_waiters_inc(hb2);
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001716 double_lock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
Ingo Molnare2970f22006-06-27 02:54:47 -07001718 if (likely(cmpval != NULL)) {
1719 u32 curval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Ingo Molnare2970f22006-06-27 02:54:47 -07001721 ret = get_futex_value_locked(&curval, uaddr1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 if (unlikely(ret)) {
Darren Hart5eb3dc62009-03-12 00:55:52 -07001724 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001725 hb_waiters_dec(hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
Darren Harte4dc5b72009-03-12 00:56:13 -07001727 ret = get_user(curval, uaddr1);
1728 if (ret)
1729 goto out_put_keys;
1730
Darren Hartb41277d2010-11-08 13:10:09 -08001731 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07001732 goto retry_private;
1733
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001734 put_futex_key(&key2);
1735 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07001736 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 }
Ingo Molnare2970f22006-06-27 02:54:47 -07001738 if (curval != *cmpval) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 ret = -EAGAIN;
1740 goto out_unlock;
1741 }
1742 }
1743
Darren Hart52400ba2009-04-03 13:40:49 -07001744 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
Darren Hartbab5bc92009-04-07 23:23:50 -07001745 /*
1746 * Attempt to acquire uaddr2 and wake the top waiter. If we
1747 * intend to requeue waiters, force setting the FUTEX_WAITERS
1748 * bit. We force this here where we are able to easily handle
1749 * faults rather in the requeue loop below.
1750 */
Darren Hart52400ba2009-04-03 13:40:49 -07001751 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
Darren Hartbab5bc92009-04-07 23:23:50 -07001752 &key2, &pi_state, nr_requeue);
Darren Hart52400ba2009-04-03 13:40:49 -07001753
1754 /*
1755 * At this point the top_waiter has either taken uaddr2 or is
1756 * waiting on it. If the former, then the pi_state will not
1757 * exist yet, look it up one more time to ensure we have a
Thomas Gleixner866293e2014-05-12 20:45:34 +00001758 * reference to it. If the lock was taken, ret contains the
1759 * vpid of the top waiter task.
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001760 * If the lock was not taken, we have pi_state and an initial
1761 * refcount on it. In case of an error we have nothing.
Darren Hart52400ba2009-04-03 13:40:49 -07001762 */
Thomas Gleixner866293e2014-05-12 20:45:34 +00001763 if (ret > 0) {
Darren Hart52400ba2009-04-03 13:40:49 -07001764 WARN_ON(pi_state);
Darren Hart89061d32009-10-15 15:30:48 -07001765 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07001766 task_count++;
Thomas Gleixner866293e2014-05-12 20:45:34 +00001767 /*
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001768 * If we acquired the lock, then the user space value
1769 * of uaddr2 should be vpid. It cannot be changed by
1770 * the top waiter as it is blocked on hb2 lock if it
1771 * tries to do so. If something fiddled with it behind
1772 * our back the pi state lookup might unearth it. So
1773 * we rather use the known value than rereading and
1774 * handing potential crap to lookup_pi_state.
1775 *
1776 * If that call succeeds then we have pi_state and an
1777 * initial refcount on it.
Thomas Gleixner866293e2014-05-12 20:45:34 +00001778 */
Thomas Gleixner54a21782014-06-03 12:27:08 +00001779 ret = lookup_pi_state(ret, hb2, &key2, &pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07001780 }
1781
1782 switch (ret) {
1783 case 0:
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001784 /* We hold a reference on the pi state. */
Darren Hart52400ba2009-04-03 13:40:49 -07001785 break;
Thomas Gleixner4959f2d2015-12-19 20:07:40 +00001786
1787 /* If the above failed, then pi_state is NULL */
Darren Hart52400ba2009-04-03 13:40:49 -07001788 case -EFAULT:
1789 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001790 hb_waiters_dec(hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001791 put_futex_key(&key2);
1792 put_futex_key(&key1);
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001793 ret = fault_in_user_writeable(uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07001794 if (!ret)
1795 goto retry;
1796 goto out;
1797 case -EAGAIN:
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001798 /*
1799 * Two reasons for this:
1800 * - Owner is exiting and we just wait for the
1801 * exit to complete.
1802 * - The user space value changed.
1803 */
Darren Hart52400ba2009-04-03 13:40:49 -07001804 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001805 hb_waiters_dec(hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001806 put_futex_key(&key2);
1807 put_futex_key(&key1);
Darren Hart52400ba2009-04-03 13:40:49 -07001808 cond_resched();
1809 goto retry;
1810 default:
1811 goto out_unlock;
1812 }
1813 }
1814
Jason Low0d00c7b2014-01-12 15:31:22 -08001815 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
Darren Hart52400ba2009-04-03 13:40:49 -07001816 if (task_count - nr_wake >= nr_requeue)
1817 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
Darren Hart52400ba2009-04-03 13:40:49 -07001819 if (!match_futex(&this->key, &key1))
1820 continue;
1821
Darren Hart392741e2009-08-07 15:20:48 -07001822 /*
1823 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1824 * be paired with each other and no other futex ops.
Darren Hartaa109902012-11-26 16:29:56 -08001825 *
1826 * We should never be requeueing a futex_q with a pi_state,
1827 * which is awaiting a futex_unlock_pi().
Darren Hart392741e2009-08-07 15:20:48 -07001828 */
1829 if ((requeue_pi && !this->rt_waiter) ||
Darren Hartaa109902012-11-26 16:29:56 -08001830 (!requeue_pi && this->rt_waiter) ||
1831 this->pi_state) {
Darren Hart392741e2009-08-07 15:20:48 -07001832 ret = -EINVAL;
1833 break;
1834 }
Darren Hart52400ba2009-04-03 13:40:49 -07001835
1836 /*
1837 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1838 * lock, we already woke the top_waiter. If not, it will be
1839 * woken by futex_unlock_pi().
1840 */
1841 if (++task_count <= nr_wake && !requeue_pi) {
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001842 mark_wake_futex(&wake_q, this);
Darren Hart52400ba2009-04-03 13:40:49 -07001843 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 }
Darren Hart52400ba2009-04-03 13:40:49 -07001845
Darren Hart84bc4af2009-08-13 17:36:53 -07001846 /* Ensure we requeue to the expected futex for requeue_pi. */
1847 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1848 ret = -EINVAL;
1849 break;
1850 }
1851
Darren Hart52400ba2009-04-03 13:40:49 -07001852 /*
1853 * Requeue nr_requeue waiters and possibly one more in the case
1854 * of requeue_pi if we couldn't acquire the lock atomically.
1855 */
1856 if (requeue_pi) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001857 /*
1858 * Prepare the waiter to take the rt_mutex. Take a
1859 * refcount on the pi_state and store the pointer in
1860 * the futex_q object of the waiter.
1861 */
Darren Hart52400ba2009-04-03 13:40:49 -07001862 atomic_inc(&pi_state->refcount);
1863 this->pi_state = pi_state;
1864 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1865 this->rt_waiter,
Thomas Gleixnerc051b212014-05-22 03:25:50 +00001866 this->task);
Darren Hart52400ba2009-04-03 13:40:49 -07001867 if (ret == 1) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001868 /*
1869 * We got the lock. We do neither drop the
1870 * refcount on pi_state nor clear
1871 * this->pi_state because the waiter needs the
1872 * pi_state for cleaning up the user space
1873 * value. It will drop the refcount after
1874 * doing so.
1875 */
Darren Hartbeda2c72009-08-09 15:34:39 -07001876 requeue_pi_wake_futex(this, &key2, hb2);
Darren Hart89061d32009-10-15 15:30:48 -07001877 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07001878 continue;
1879 } else if (ret) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001880 /*
1881 * rt_mutex_start_proxy_lock() detected a
1882 * potential deadlock when we tried to queue
1883 * that waiter. Drop the pi_state reference
1884 * which we took above and remove the pointer
1885 * to the state from the waiters futex_q
1886 * object.
1887 */
Darren Hart52400ba2009-04-03 13:40:49 -07001888 this->pi_state = NULL;
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00001889 put_pi_state(pi_state);
Thomas Gleixner885c2cb2015-12-19 20:07:41 +00001890 /*
1891 * We stop queueing more waiters and let user
1892 * space deal with the mess.
1893 */
1894 break;
Darren Hart52400ba2009-04-03 13:40:49 -07001895 }
1896 }
1897 requeue_futex(this, hb1, hb2, &key2);
1898 drop_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 }
1900
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001901 /*
1902 * We took an extra initial reference to the pi_state either
1903 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
1904 * need to drop it here again.
1905 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00001906 put_pi_state(pi_state);
Thomas Gleixner885c2cb2015-12-19 20:07:41 +00001907
1908out_unlock:
Darren Hart5eb3dc62009-03-12 00:55:52 -07001909 double_unlock_hb(hb1, hb2);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001910 wake_up_q(&wake_q);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001911 hb_waiters_dec(hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
Darren Hartcd84a422009-04-02 14:19:38 -07001913 /*
1914 * drop_futex_key_refs() must be called outside the spinlocks. During
1915 * the requeue we moved futex_q's from the hash bucket at key1 to the
1916 * one at key2 and updated their key pointer. We no longer need to
1917 * hold the references to key1.
1918 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 while (--drop_count >= 0)
Rusty Russell9adef582007-05-08 00:26:42 -07001920 drop_futex_key_refs(&key1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
Darren Hart42d35d42008-12-29 15:49:53 -08001922out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001923 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08001924out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001925 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08001926out:
Darren Hart52400ba2009-04-03 13:40:49 -07001927 return ret ? ret : task_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928}
1929
1930/* The key must be already stored in q->key. */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001931static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001932 __acquires(&hb->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933{
Ingo Molnare2970f22006-06-27 02:54:47 -07001934 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
Ingo Molnare2970f22006-06-27 02:54:47 -07001936 hb = hash_futex(&q->key);
Linus Torvalds11d46162014-03-20 22:11:17 -07001937
1938 /*
1939 * Increment the counter before taking the lock so that
1940 * a potential waker won't miss a to-be-slept task that is
1941 * waiting for the spinlock. This is safe as all queue_lock()
1942 * users end up calling queue_me(). Similarly, for housekeeping,
1943 * decrement the counter at queue_unlock() when some error has
1944 * occurred and we don't end up adding the task to the list.
1945 */
1946 hb_waiters_inc(hb);
1947
Ingo Molnare2970f22006-06-27 02:54:47 -07001948 q->lock_ptr = &hb->lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -08001950 spin_lock(&hb->lock); /* implies smp_mb(); (A) */
Ingo Molnare2970f22006-06-27 02:54:47 -07001951 return hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952}
1953
Darren Hartd40d65c2009-09-21 22:30:15 -07001954static inline void
Jason Low0d00c7b2014-01-12 15:31:22 -08001955queue_unlock(struct futex_hash_bucket *hb)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001956 __releases(&hb->lock)
Darren Hartd40d65c2009-09-21 22:30:15 -07001957{
1958 spin_unlock(&hb->lock);
Linus Torvalds11d46162014-03-20 22:11:17 -07001959 hb_waiters_dec(hb);
Darren Hartd40d65c2009-09-21 22:30:15 -07001960}
1961
1962/**
1963 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1964 * @q: The futex_q to enqueue
1965 * @hb: The destination hash bucket
1966 *
1967 * The hb->lock must be held by the caller, and is released here. A call to
1968 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1969 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1970 * or nothing if the unqueue is done as part of the wake process and the unqueue
1971 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1972 * an example).
1973 */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01001974static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
Namhyung Kim15e408c2010-09-14 21:43:48 +09001975 __releases(&hb->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976{
Pierre Peifferec92d082007-05-09 02:35:00 -07001977 int prio;
1978
1979 /*
1980 * The priority used to register this element is
1981 * - either the real thread-priority for the real-time threads
1982 * (i.e. threads with a priority lower than MAX_RT_PRIO)
1983 * - or MAX_RT_PRIO for non-RT threads.
1984 * Thus, all RT-threads are woken first in priority order, and
1985 * the others are woken last, in FIFO order.
1986 */
1987 prio = min(current->normal_prio, MAX_RT_PRIO);
1988
1989 plist_node_init(&q->list, prio);
Pierre Peifferec92d082007-05-09 02:35:00 -07001990 plist_add(&q->list, &hb->chain);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001991 q->task = current;
Ingo Molnare2970f22006-06-27 02:54:47 -07001992 spin_unlock(&hb->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993}
1994
Darren Hartd40d65c2009-09-21 22:30:15 -07001995/**
1996 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1997 * @q: The futex_q to unqueue
1998 *
1999 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2000 * be paired with exactly one earlier call to queue_me().
2001 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002002 * Return:
2003 * 1 - if the futex_q was still queued (and we removed unqueued it);
Darren Hartd40d65c2009-09-21 22:30:15 -07002004 * 0 - if the futex_q was already removed by the waking thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006static int unqueue_me(struct futex_q *q)
2007{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 spinlock_t *lock_ptr;
Ingo Molnare2970f22006-06-27 02:54:47 -07002009 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
2011 /* In the common case we don't take the spinlock, which is nice. */
Darren Hart42d35d42008-12-29 15:49:53 -08002012retry:
Jianyu Zhan29b75eb2016-03-07 09:32:24 +08002013 /*
2014 * q->lock_ptr can change between this read and the following spin_lock.
2015 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2016 * optimizing lock_ptr out of the logic below.
2017 */
2018 lock_ptr = READ_ONCE(q->lock_ptr);
Stephen Hemmingerc80544d2007-10-18 03:07:05 -07002019 if (lock_ptr != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 spin_lock(lock_ptr);
2021 /*
2022 * q->lock_ptr can change between reading it and
2023 * spin_lock(), causing us to take the wrong lock. This
2024 * corrects the race condition.
2025 *
2026 * Reasoning goes like this: if we have the wrong lock,
2027 * q->lock_ptr must have changed (maybe several times)
2028 * between reading it and the spin_lock(). It can
2029 * change again after the spin_lock() but only if it was
2030 * already changed before the spin_lock(). It cannot,
2031 * however, change back to the original value. Therefore
2032 * we can detect whether we acquired the correct lock.
2033 */
2034 if (unlikely(lock_ptr != q->lock_ptr)) {
2035 spin_unlock(lock_ptr);
2036 goto retry;
2037 }
Lai Jiangshan2e129782010-12-22 14:18:50 +08002038 __unqueue_futex(q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002039
2040 BUG_ON(q->pi_state);
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 spin_unlock(lock_ptr);
2043 ret = 1;
2044 }
2045
Rusty Russell9adef582007-05-08 00:26:42 -07002046 drop_futex_key_refs(&q->key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 return ret;
2048}
2049
Ingo Molnarc87e2832006-06-27 02:54:58 -07002050/*
2051 * PI futexes can not be requeued and must remove themself from the
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002052 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2053 * and dropped here.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002054 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002055static void unqueue_me_pi(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09002056 __releases(q->lock_ptr)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002057{
Lai Jiangshan2e129782010-12-22 14:18:50 +08002058 __unqueue_futex(q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002059
2060 BUG_ON(!q->pi_state);
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00002061 put_pi_state(q->pi_state);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002062 q->pi_state = NULL;
2063
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002064 spin_unlock(q->lock_ptr);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002065}
2066
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002067/*
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002068 * Fixup the pi_state owner with the new owner.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002069 *
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002070 * Must be called with hash bucket lock held and mm->sem held for non
2071 * private futexes.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002072 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002073static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002074 struct task_struct *newowner)
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002075{
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002076 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002077 struct futex_pi_state *pi_state = q->pi_state;
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002078 struct task_struct *oldowner = pi_state->owner;
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03002079 u32 uval, uninitialized_var(curval), newval;
Darren Harte4dc5b72009-03-12 00:56:13 -07002080 int ret;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002081
2082 /* Owner died? */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002083 if (!pi_state->owner)
2084 newtid |= FUTEX_OWNER_DIED;
2085
2086 /*
2087 * We are here either because we stole the rtmutex from the
Lai Jiangshan81612392011-01-14 17:09:41 +08002088 * previous highest priority waiter or we are the highest priority
2089 * waiter but failed to get the rtmutex the first time.
2090 * We have to replace the newowner TID in the user space variable.
2091 * This must be atomic as we have to preserve the owner died bit here.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002092 *
Darren Hartb2d09942009-03-12 00:55:37 -07002093 * Note: We write the user space value _before_ changing the pi_state
2094 * because we can fault here. Imagine swapped out pages or a fork
2095 * that marked all the anonymous memory readonly for cow.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002096 *
2097 * Modifying pi_state _before_ the user space value would
2098 * leave the pi_state in an inconsistent state when we fault
2099 * here, because we need to drop the hash bucket lock to
2100 * handle the fault. This might be observed in the PID check
2101 * in lookup_pi_state.
2102 */
2103retry:
2104 if (get_futex_value_locked(&uval, uaddr))
2105 goto handle_fault;
2106
2107 while (1) {
2108 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2109
Michel Lespinasse37a9d912011-03-10 18:48:51 -08002110 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002111 goto handle_fault;
2112 if (curval == uval)
2113 break;
2114 uval = curval;
2115 }
2116
2117 /*
2118 * We fixed up user space. Now we need to fix the pi_state
2119 * itself.
2120 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002121 if (pi_state->owner != NULL) {
Thomas Gleixner1d615482009-11-17 14:54:03 +01002122 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002123 WARN_ON(list_empty(&pi_state->list));
2124 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +01002125 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002126 }
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002127
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002128 pi_state->owner = newowner;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002129
Thomas Gleixner1d615482009-11-17 14:54:03 +01002130 raw_spin_lock_irq(&newowner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002131 WARN_ON(!list_empty(&pi_state->list));
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002132 list_add(&pi_state->list, &newowner->pi_state_list);
Thomas Gleixner1d615482009-11-17 14:54:03 +01002133 raw_spin_unlock_irq(&newowner->pi_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002134 return 0;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002135
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002136 /*
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002137 * To handle the page fault we need to drop the hash bucket
Lai Jiangshan81612392011-01-14 17:09:41 +08002138 * lock here. That gives the other task (either the highest priority
2139 * waiter itself or the task which stole the rtmutex) the
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002140 * chance to try the fixup of the pi_state. So once we are
2141 * back from handling the fault we need to check the pi_state
2142 * after reacquiring the hash bucket lock and before trying to
2143 * do another fixup. When the fixup has been done already we
2144 * simply return.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002145 */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002146handle_fault:
2147 spin_unlock(q->lock_ptr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002148
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002149 ret = fault_in_user_writeable(uaddr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002150
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002151 spin_lock(q->lock_ptr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002152
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002153 /*
2154 * Check if someone else fixed it for us:
2155 */
2156 if (pi_state->owner != oldowner)
2157 return 0;
2158
2159 if (ret)
2160 return ret;
2161
2162 goto retry;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002163}
2164
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002165static long futex_wait_restart(struct restart_block *restart);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07002166
Darren Hartca5f9522009-04-03 13:39:33 -07002167/**
Darren Hartdd973992009-04-03 13:40:02 -07002168 * fixup_owner() - Post lock pi_state and corner case management
2169 * @uaddr: user address of the futex
Darren Hartdd973992009-04-03 13:40:02 -07002170 * @q: futex_q (contains pi_state and access to the rt_mutex)
2171 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2172 *
2173 * After attempting to lock an rt_mutex, this function is called to cleanup
2174 * the pi_state owner as well as handle race conditions that may allow us to
2175 * acquire the lock. Must be called with the hb lock held.
2176 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002177 * Return:
2178 * 1 - success, lock taken;
2179 * 0 - success, lock not taken;
Darren Hartdd973992009-04-03 13:40:02 -07002180 * <0 - on error (-EFAULT)
2181 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002182static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
Darren Hartdd973992009-04-03 13:40:02 -07002183{
2184 struct task_struct *owner;
2185 int ret = 0;
2186
2187 if (locked) {
2188 /*
2189 * Got the lock. We might not be the anticipated owner if we
2190 * did a lock-steal - fix up the PI-state in that case:
2191 */
2192 if (q->pi_state->owner != current)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002193 ret = fixup_pi_state_owner(uaddr, q, current);
Darren Hartdd973992009-04-03 13:40:02 -07002194 goto out;
2195 }
2196
2197 /*
2198 * Catch the rare case, where the lock was released when we were on the
2199 * way back before we locked the hash bucket.
2200 */
2201 if (q->pi_state->owner == current) {
2202 /*
2203 * Try to get the rt_mutex now. This might fail as some other
2204 * task acquired the rt_mutex after we removed ourself from the
2205 * rt_mutex waiters list.
2206 */
2207 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
2208 locked = 1;
2209 goto out;
2210 }
2211
2212 /*
2213 * pi_state is incorrect, some other task did a lock steal and
2214 * we returned due to timeout or signal without taking the
Lai Jiangshan81612392011-01-14 17:09:41 +08002215 * rt_mutex. Too late.
Darren Hartdd973992009-04-03 13:40:02 -07002216 */
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01002217 raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
Darren Hartdd973992009-04-03 13:40:02 -07002218 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
Lai Jiangshan81612392011-01-14 17:09:41 +08002219 if (!owner)
2220 owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01002221 raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002222 ret = fixup_pi_state_owner(uaddr, q, owner);
Darren Hartdd973992009-04-03 13:40:02 -07002223 goto out;
2224 }
2225
2226 /*
2227 * Paranoia check. If we did not take the lock, then we should not be
Lai Jiangshan81612392011-01-14 17:09:41 +08002228 * the owner of the rt_mutex.
Darren Hartdd973992009-04-03 13:40:02 -07002229 */
2230 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
2231 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2232 "pi-state %p\n", ret,
2233 q->pi_state->pi_mutex.owner,
2234 q->pi_state->owner);
2235
2236out:
2237 return ret ? ret : locked;
2238}
2239
2240/**
Darren Hartca5f9522009-04-03 13:39:33 -07002241 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2242 * @hb: the futex hash bucket, must be locked by the caller
2243 * @q: the futex_q to queue up on
2244 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
Darren Hartca5f9522009-04-03 13:39:33 -07002245 */
2246static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002247 struct hrtimer_sleeper *timeout)
Darren Hartca5f9522009-04-03 13:39:33 -07002248{
Darren Hart9beba3c2009-09-24 11:54:47 -07002249 /*
2250 * The task state is guaranteed to be set before another task can
Peter Zijlstrab92b8b32015-05-12 10:51:55 +02002251 * wake it. set_current_state() is implemented using smp_store_mb() and
Darren Hart9beba3c2009-09-24 11:54:47 -07002252 * queue_me() calls spin_unlock() upon completion, both serializing
2253 * access to the hash list and forcing another memory barrier.
2254 */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002255 set_current_state(TASK_INTERRUPTIBLE);
Darren Hart0729e192009-09-21 22:30:38 -07002256 queue_me(q, hb);
Darren Hartca5f9522009-04-03 13:39:33 -07002257
2258 /* Arm the timer */
Thomas Gleixner2e4b0d32015-04-14 21:09:13 +00002259 if (timeout)
Darren Hartca5f9522009-04-03 13:39:33 -07002260 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
Darren Hartca5f9522009-04-03 13:39:33 -07002261
2262 /*
Darren Hart0729e192009-09-21 22:30:38 -07002263 * If we have been removed from the hash list, then another task
2264 * has tried to wake us, and we can skip the call to schedule().
Darren Hartca5f9522009-04-03 13:39:33 -07002265 */
2266 if (likely(!plist_node_empty(&q->list))) {
2267 /*
2268 * If the timer has already expired, current will already be
2269 * flagged for rescheduling. Only call schedule if there
2270 * is no timeout, or if it has yet to expire.
2271 */
2272 if (!timeout || timeout->task)
Colin Cross88c80042013-05-01 18:35:05 -07002273 freezable_schedule();
Darren Hartca5f9522009-04-03 13:39:33 -07002274 }
2275 __set_current_state(TASK_RUNNING);
2276}
2277
Darren Hartf8010732009-04-03 13:40:40 -07002278/**
2279 * futex_wait_setup() - Prepare to wait on a futex
2280 * @uaddr: the futex userspace address
2281 * @val: the expected value
Darren Hartb41277d2010-11-08 13:10:09 -08002282 * @flags: futex flags (FLAGS_SHARED, etc.)
Darren Hartf8010732009-04-03 13:40:40 -07002283 * @q: the associated futex_q
2284 * @hb: storage for hash_bucket pointer to be returned to caller
2285 *
2286 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2287 * compare it with the expected value. Handle atomic faults internally.
2288 * Return with the hb lock held and a q.key reference on success, and unlocked
2289 * with no q.key reference on failure.
2290 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002291 * Return:
2292 * 0 - uaddr contains val and hb has been locked;
Bart Van Asscheca4a04c2011-07-17 09:01:00 +02002293 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
Darren Hartf8010732009-04-03 13:40:40 -07002294 */
Darren Hartb41277d2010-11-08 13:10:09 -08002295static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
Darren Hartf8010732009-04-03 13:40:40 -07002296 struct futex_q *q, struct futex_hash_bucket **hb)
2297{
2298 u32 uval;
2299 int ret;
2300
2301 /*
2302 * Access the page AFTER the hash-bucket is locked.
2303 * Order is important:
2304 *
2305 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2306 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2307 *
2308 * The basic logical guarantee of a futex is that it blocks ONLY
2309 * if cond(var) is known to be true at the time of blocking, for
Michel Lespinasse8fe8f542011-03-06 18:07:50 -08002310 * any cond. If we locked the hash-bucket after testing *uaddr, that
2311 * would open a race condition where we could block indefinitely with
Darren Hartf8010732009-04-03 13:40:40 -07002312 * cond(var) false, which would violate the guarantee.
2313 *
Michel Lespinasse8fe8f542011-03-06 18:07:50 -08002314 * On the other hand, we insert q and release the hash-bucket only
2315 * after testing *uaddr. This guarantees that futex_wait() will NOT
2316 * absorb a wakeup if *uaddr does not match the desired values
2317 * while the syscall executes.
Darren Hartf8010732009-04-03 13:40:40 -07002318 */
2319retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002320 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
Darren Hartf8010732009-04-03 13:40:40 -07002321 if (unlikely(ret != 0))
Darren Harta5a2a0c2009-04-10 09:50:05 -07002322 return ret;
Darren Hartf8010732009-04-03 13:40:40 -07002323
2324retry_private:
2325 *hb = queue_lock(q);
2326
2327 ret = get_futex_value_locked(&uval, uaddr);
2328
2329 if (ret) {
Jason Low0d00c7b2014-01-12 15:31:22 -08002330 queue_unlock(*hb);
Darren Hartf8010732009-04-03 13:40:40 -07002331
2332 ret = get_user(uval, uaddr);
2333 if (ret)
2334 goto out;
2335
Darren Hartb41277d2010-11-08 13:10:09 -08002336 if (!(flags & FLAGS_SHARED))
Darren Hartf8010732009-04-03 13:40:40 -07002337 goto retry_private;
2338
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002339 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07002340 goto retry;
2341 }
2342
2343 if (uval != val) {
Jason Low0d00c7b2014-01-12 15:31:22 -08002344 queue_unlock(*hb);
Darren Hartf8010732009-04-03 13:40:40 -07002345 ret = -EWOULDBLOCK;
2346 }
2347
2348out:
2349 if (ret)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002350 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07002351 return ret;
2352}
2353
Darren Hartb41277d2010-11-08 13:10:09 -08002354static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2355 ktime_t *abs_time, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356{
Darren Hartca5f9522009-04-03 13:39:33 -07002357 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002358 struct restart_block *restart;
Ingo Molnare2970f22006-06-27 02:54:47 -07002359 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08002360 struct futex_q q = futex_q_init;
Ingo Molnare2970f22006-06-27 02:54:47 -07002361 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Thomas Gleixnercd689982008-02-01 17:45:14 +01002363 if (!bitset)
2364 return -EINVAL;
Thomas Gleixnercd689982008-02-01 17:45:14 +01002365 q.bitset = bitset;
Darren Hartca5f9522009-04-03 13:39:33 -07002366
2367 if (abs_time) {
2368 to = &timeout;
2369
Darren Hartb41277d2010-11-08 13:10:09 -08002370 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2371 CLOCK_REALTIME : CLOCK_MONOTONIC,
2372 HRTIMER_MODE_ABS);
Darren Hartca5f9522009-04-03 13:39:33 -07002373 hrtimer_init_sleeper(to, current);
2374 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2375 current->timer_slack_ns);
2376 }
2377
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002378retry:
Darren Hart7ada8762010-10-17 08:35:04 -07002379 /*
2380 * Prepare to wait on uaddr. On success, holds hb lock and increments
2381 * q.key refs.
2382 */
Darren Hartb41277d2010-11-08 13:10:09 -08002383 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Darren Hartf8010732009-04-03 13:40:40 -07002384 if (ret)
Darren Hart42d35d42008-12-29 15:49:53 -08002385 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386
Darren Hartca5f9522009-04-03 13:39:33 -07002387 /* queue_me and wait for wakeup, timeout, or a signal. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002388 futex_wait_queue_me(hb, &q, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
2390 /* If we were woken (and unqueued), we succeeded, whatever. */
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002391 ret = 0;
Darren Hart7ada8762010-10-17 08:35:04 -07002392 /* unqueue_me() drops q.key ref */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 if (!unqueue_me(&q))
Darren Hart7ada8762010-10-17 08:35:04 -07002394 goto out;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002395 ret = -ETIMEDOUT;
Darren Hartca5f9522009-04-03 13:39:33 -07002396 if (to && !to->task)
Darren Hart7ada8762010-10-17 08:35:04 -07002397 goto out;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002398
Ingo Molnare2970f22006-06-27 02:54:47 -07002399 /*
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002400 * We expect signal_pending(current), but we might be the
2401 * victim of a spurious wakeup as well.
Ingo Molnare2970f22006-06-27 02:54:47 -07002402 */
Darren Hart7ada8762010-10-17 08:35:04 -07002403 if (!signal_pending(current))
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002404 goto retry;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002405
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002406 ret = -ERESTARTSYS;
Pierre Peifferc19384b2007-05-09 02:35:02 -07002407 if (!abs_time)
Darren Hart7ada8762010-10-17 08:35:04 -07002408 goto out;
Steven Rostedtce6bd422007-12-05 15:46:09 +01002409
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002410 restart = &current->restart_block;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002411 restart->fn = futex_wait_restart;
Namhyung Kima3c74c52010-09-14 21:43:47 +09002412 restart->futex.uaddr = uaddr;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002413 restart->futex.val = val;
2414 restart->futex.time = abs_time->tv64;
2415 restart->futex.bitset = bitset;
Darren Hart0cd9c642011-04-14 15:41:57 -07002416 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002417
2418 ret = -ERESTART_RESTARTBLOCK;
2419
Darren Hart42d35d42008-12-29 15:49:53 -08002420out:
Darren Hartca5f9522009-04-03 13:39:33 -07002421 if (to) {
2422 hrtimer_cancel(&to->timer);
2423 destroy_hrtimer_on_stack(&to->timer);
2424 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002425 return ret;
2426}
2427
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002428
2429static long futex_wait_restart(struct restart_block *restart)
2430{
Namhyung Kima3c74c52010-09-14 21:43:47 +09002431 u32 __user *uaddr = restart->futex.uaddr;
Darren Harta72188d2009-04-03 13:40:22 -07002432 ktime_t t, *tp = NULL;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002433
Darren Harta72188d2009-04-03 13:40:22 -07002434 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
2435 t.tv64 = restart->futex.time;
2436 tp = &t;
2437 }
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002438 restart->fn = do_no_restart_syscall;
Darren Hartb41277d2010-11-08 13:10:09 -08002439
2440 return (long)futex_wait(uaddr, restart->futex.flags,
2441 restart->futex.val, tp, restart->futex.bitset);
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002442}
2443
2444
Ingo Molnarc87e2832006-06-27 02:54:58 -07002445/*
2446 * Userspace tried a 0 -> TID atomic transition of the futex value
2447 * and failed. The kernel side here does the whole locking operation:
Davidlohr Bueso767f5092015-06-29 23:26:01 -07002448 * if there are waiters then it will block as a consequence of relying
2449 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2450 * a 0 value of the futex too.).
2451 *
2452 * Also serves as futex trylock_pi()'ing, and due semantics.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002453 */
Michael Kerrisk996636d2015-01-16 20:28:06 +01002454static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
Darren Hartb41277d2010-11-08 13:10:09 -08002455 ktime_t *time, int trylock)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002456{
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002457 struct hrtimer_sleeper timeout, *to = NULL;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002458 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08002459 struct futex_q q = futex_q_init;
Darren Hartdd973992009-04-03 13:40:02 -07002460 int res, ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002461
2462 if (refill_pi_state_cache())
2463 return -ENOMEM;
2464
Pierre Peifferc19384b2007-05-09 02:35:02 -07002465 if (time) {
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002466 to = &timeout;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07002467 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2468 HRTIMER_MODE_ABS);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002469 hrtimer_init_sleeper(to, current);
Arjan van de Vencc584b22008-09-01 15:02:30 -07002470 hrtimer_set_expires(&to->timer, *time);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002471 }
2472
Darren Hart42d35d42008-12-29 15:49:53 -08002473retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002474 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002475 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08002476 goto out;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002477
Darren Harte4dc5b72009-03-12 00:56:13 -07002478retry_private:
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01002479 hb = queue_lock(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002480
Darren Hartbab5bc92009-04-07 23:23:50 -07002481 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002482 if (unlikely(ret)) {
Davidlohr Bueso767f5092015-06-29 23:26:01 -07002483 /*
2484 * Atomic work succeeded and we got the lock,
2485 * or failed. Either way, we do _not_ block.
2486 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002487 switch (ret) {
Darren Hart1a520842009-04-03 13:39:52 -07002488 case 1:
2489 /* We got the lock. */
2490 ret = 0;
2491 goto out_unlock_put_key;
2492 case -EFAULT:
2493 goto uaddr_faulted;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002494 case -EAGAIN:
2495 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00002496 * Two reasons for this:
2497 * - Task is exiting and we just wait for the
2498 * exit to complete.
2499 * - The user space value changed.
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002500 */
Jason Low0d00c7b2014-01-12 15:31:22 -08002501 queue_unlock(hb);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002502 put_futex_key(&q.key);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002503 cond_resched();
2504 goto retry;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002505 default:
Darren Hart42d35d42008-12-29 15:49:53 -08002506 goto out_unlock_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002507 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002508 }
2509
2510 /*
2511 * Only actually queue now that the atomic ops are done:
2512 */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01002513 queue_me(&q, hb);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002514
Ingo Molnarc87e2832006-06-27 02:54:58 -07002515 WARN_ON(!q.pi_state);
2516 /*
2517 * Block on the PI mutex:
2518 */
Thomas Gleixnerc051b212014-05-22 03:25:50 +00002519 if (!trylock) {
2520 ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
2521 } else {
Ingo Molnarc87e2832006-06-27 02:54:58 -07002522 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
2523 /* Fixup the trylock return value: */
2524 ret = ret ? 0 : -EWOULDBLOCK;
2525 }
2526
Vernon Mauerya99e4e42006-07-01 04:35:42 -07002527 spin_lock(q.lock_ptr);
Darren Hartdd973992009-04-03 13:40:02 -07002528 /*
2529 * Fixup the pi_state owner and possibly acquire the lock if we
2530 * haven't already.
2531 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002532 res = fixup_owner(uaddr, &q, !ret);
Darren Hartdd973992009-04-03 13:40:02 -07002533 /*
2534 * If fixup_owner() returned an error, proprogate that. If it acquired
2535 * the lock, clear our -ETIMEDOUT or -EINTR.
2536 */
2537 if (res)
2538 ret = (res < 0) ? res : 0;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002539
Darren Harte8f63862009-03-12 00:56:06 -07002540 /*
Darren Hartdd973992009-04-03 13:40:02 -07002541 * If fixup_owner() faulted and was unable to handle the fault, unlock
2542 * it and return the fault to userspace.
Darren Harte8f63862009-03-12 00:56:06 -07002543 */
2544 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
2545 rt_mutex_unlock(&q.pi_state->pi_mutex);
2546
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002547 /* Unqueue and drop the lock */
2548 unqueue_me_pi(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002549
Mikael Pettersson5ecb01c2010-01-23 22:36:29 +01002550 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002551
Darren Hart42d35d42008-12-29 15:49:53 -08002552out_unlock_put_key:
Jason Low0d00c7b2014-01-12 15:31:22 -08002553 queue_unlock(hb);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002554
Darren Hart42d35d42008-12-29 15:49:53 -08002555out_put_key:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002556 put_futex_key(&q.key);
Darren Hart42d35d42008-12-29 15:49:53 -08002557out:
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07002558 if (to)
2559 destroy_hrtimer_on_stack(&to->timer);
Darren Hartdd973992009-04-03 13:40:02 -07002560 return ret != -EINTR ? ret : -ERESTARTNOINTR;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002561
Darren Hart42d35d42008-12-29 15:49:53 -08002562uaddr_faulted:
Jason Low0d00c7b2014-01-12 15:31:22 -08002563 queue_unlock(hb);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002564
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002565 ret = fault_in_user_writeable(uaddr);
Darren Harte4dc5b72009-03-12 00:56:13 -07002566 if (ret)
2567 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002568
Darren Hartb41277d2010-11-08 13:10:09 -08002569 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07002570 goto retry_private;
2571
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002572 put_futex_key(&q.key);
Darren Harte4dc5b72009-03-12 00:56:13 -07002573 goto retry;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002574}
2575
2576/*
Ingo Molnarc87e2832006-06-27 02:54:58 -07002577 * Userspace attempted a TID -> 0 atomic transition, and failed.
2578 * This is the in-kernel slowpath: we look up the PI state (if any),
2579 * and do the rt-mutex unlock.
2580 */
Darren Hartb41277d2010-11-08 13:10:09 -08002581static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002582{
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002583 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
Peter Zijlstra38d47c12008-09-26 19:32:20 +02002584 union futex_key key = FUTEX_KEY_INIT;
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002585 struct futex_hash_bucket *hb;
2586 struct futex_q *match;
Darren Harte4dc5b72009-03-12 00:56:13 -07002587 int ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002588
2589retry:
2590 if (get_user(uval, uaddr))
2591 return -EFAULT;
2592 /*
2593 * We release only a lock we actually own:
2594 */
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +01002595 if ((uval & FUTEX_TID_MASK) != vpid)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002596 return -EPERM;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002597
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002598 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002599 if (ret)
2600 return ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002601
2602 hb = hash_futex(&key);
2603 spin_lock(&hb->lock);
2604
Ingo Molnarc87e2832006-06-27 02:54:58 -07002605 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002606 * Check waiters first. We do not trust user space values at
2607 * all and we at least want to know if user space fiddled
2608 * with the futex value instead of blindly unlocking.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002609 */
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002610 match = futex_top_waiter(hb, &key);
2611 if (match) {
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02002612 ret = wake_futex_pi(uaddr, uval, match, hb);
2613 /*
2614 * In case of success wake_futex_pi dropped the hash
2615 * bucket lock.
2616 */
2617 if (!ret)
2618 goto out_putkey;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002619 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002620 * The atomic access to the futex value generated a
2621 * pagefault, so retry the user-access and the wakeup:
Ingo Molnarc87e2832006-06-27 02:54:58 -07002622 */
2623 if (ret == -EFAULT)
2624 goto pi_faulted;
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02002625 /*
2626 * wake_futex_pi has detected invalid state. Tell user
2627 * space.
2628 */
Ingo Molnarc87e2832006-06-27 02:54:58 -07002629 goto out_unlock;
2630 }
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002631
Ingo Molnarc87e2832006-06-27 02:54:58 -07002632 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002633 * We have no kernel internal state, i.e. no waiters in the
2634 * kernel. Waiters which are about to queue themselves are stuck
2635 * on hb->lock. So we can safely ignore them. We do neither
2636 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2637 * owner.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002638 */
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002639 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
Thomas Gleixner13fbca42014-06-03 12:27:07 +00002640 goto pi_faulted;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002641
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002642 /*
2643 * If uval has changed, let user space handle it.
2644 */
2645 ret = (curval == uval) ? 0 : -EAGAIN;
2646
Ingo Molnarc87e2832006-06-27 02:54:58 -07002647out_unlock:
2648 spin_unlock(&hb->lock);
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02002649out_putkey:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002650 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002651 return ret;
2652
2653pi_faulted:
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002654 spin_unlock(&hb->lock);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002655 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002656
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002657 ret = fault_in_user_writeable(uaddr);
Darren Hartb5686362008-12-18 15:06:34 -08002658 if (!ret)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002659 goto retry;
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 return ret;
2662}
2663
Darren Hart52400ba2009-04-03 13:40:49 -07002664/**
2665 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2666 * @hb: the hash_bucket futex_q was original enqueued on
2667 * @q: the futex_q woken while waiting to be requeued
2668 * @key2: the futex_key of the requeue target futex
2669 * @timeout: the timeout associated with the wait (NULL if none)
2670 *
2671 * Detect if the task was woken on the initial futex as opposed to the requeue
2672 * target futex. If so, determine if it was a timeout or a signal that caused
2673 * the wakeup and return the appropriate error code to the caller. Must be
2674 * called with the hb lock held.
2675 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002676 * Return:
2677 * 0 = no early wakeup detected;
2678 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
Darren Hart52400ba2009-04-03 13:40:49 -07002679 */
2680static inline
2681int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2682 struct futex_q *q, union futex_key *key2,
2683 struct hrtimer_sleeper *timeout)
2684{
2685 int ret = 0;
2686
2687 /*
2688 * With the hb lock held, we avoid races while we process the wakeup.
2689 * We only need to hold hb (and not hb2) to ensure atomicity as the
2690 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2691 * It can't be requeued from uaddr2 to something else since we don't
2692 * support a PI aware source futex for requeue.
2693 */
2694 if (!match_futex(&q->key, key2)) {
2695 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2696 /*
2697 * We were woken prior to requeue by a timeout or a signal.
2698 * Unqueue the futex_q and determine which it was.
2699 */
Lai Jiangshan2e129782010-12-22 14:18:50 +08002700 plist_del(&q->list, &hb->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07002701 hb_waiters_dec(hb);
Darren Hart52400ba2009-04-03 13:40:49 -07002702
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002703 /* Handle spurious wakeups gracefully */
Thomas Gleixner11df6dd2009-10-28 20:26:48 +01002704 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07002705 if (timeout && !timeout->task)
2706 ret = -ETIMEDOUT;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002707 else if (signal_pending(current))
Thomas Gleixner1c840c12009-05-20 09:22:40 +02002708 ret = -ERESTARTNOINTR;
Darren Hart52400ba2009-04-03 13:40:49 -07002709 }
2710 return ret;
2711}
2712
2713/**
2714 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
Darren Hart56ec1602009-09-21 22:29:59 -07002715 * @uaddr: the futex we initially wait on (non-pi)
Darren Hartb41277d2010-11-08 13:10:09 -08002716 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07002717 * the same type, no requeueing from private to shared, etc.
Darren Hart52400ba2009-04-03 13:40:49 -07002718 * @val: the expected value of uaddr
2719 * @abs_time: absolute timeout
Darren Hart56ec1602009-09-21 22:29:59 -07002720 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
Darren Hart52400ba2009-04-03 13:40:49 -07002721 * @uaddr2: the pi futex we will take prior to returning to user-space
2722 *
2723 * The caller will wait on uaddr and will be requeued by futex_requeue() to
Darren Hart6f7b0a22012-07-20 11:53:31 -07002724 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2725 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2726 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2727 * without one, the pi logic would not know which task to boost/deboost, if
2728 * there was a need to.
Darren Hart52400ba2009-04-03 13:40:49 -07002729 *
2730 * We call schedule in futex_wait_queue_me() when we enqueue and return there
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002731 * via the following--
Darren Hart52400ba2009-04-03 13:40:49 -07002732 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
Darren Hartcc6db4e2009-07-31 16:20:10 -07002733 * 2) wakeup on uaddr2 after a requeue
2734 * 3) signal
2735 * 4) timeout
Darren Hart52400ba2009-04-03 13:40:49 -07002736 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07002737 * If 3, cleanup and return -ERESTARTNOINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07002738 *
2739 * If 2, we may then block on trying to take the rt_mutex and return via:
2740 * 5) successful lock
2741 * 6) signal
2742 * 7) timeout
2743 * 8) other lock acquisition failure
2744 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07002745 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
Darren Hart52400ba2009-04-03 13:40:49 -07002746 *
2747 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2748 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002749 * Return:
2750 * 0 - On success;
Darren Hart52400ba2009-04-03 13:40:49 -07002751 * <0 - On error
2752 */
Darren Hartb41277d2010-11-08 13:10:09 -08002753static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
Darren Hart52400ba2009-04-03 13:40:49 -07002754 u32 val, ktime_t *abs_time, u32 bitset,
Darren Hartb41277d2010-11-08 13:10:09 -08002755 u32 __user *uaddr2)
Darren Hart52400ba2009-04-03 13:40:49 -07002756{
2757 struct hrtimer_sleeper timeout, *to = NULL;
2758 struct rt_mutex_waiter rt_waiter;
2759 struct rt_mutex *pi_mutex = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07002760 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08002761 union futex_key key2 = FUTEX_KEY_INIT;
2762 struct futex_q q = futex_q_init;
Darren Hart52400ba2009-04-03 13:40:49 -07002763 int res, ret;
Darren Hart52400ba2009-04-03 13:40:49 -07002764
Darren Hart6f7b0a22012-07-20 11:53:31 -07002765 if (uaddr == uaddr2)
2766 return -EINVAL;
2767
Darren Hart52400ba2009-04-03 13:40:49 -07002768 if (!bitset)
2769 return -EINVAL;
2770
2771 if (abs_time) {
2772 to = &timeout;
Darren Hartb41277d2010-11-08 13:10:09 -08002773 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2774 CLOCK_REALTIME : CLOCK_MONOTONIC,
2775 HRTIMER_MODE_ABS);
Darren Hart52400ba2009-04-03 13:40:49 -07002776 hrtimer_init_sleeper(to, current);
2777 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2778 current->timer_slack_ns);
2779 }
2780
2781 /*
2782 * The waiter is allocated on our stack, manipulated by the requeue
2783 * code while we sleep on uaddr.
2784 */
2785 debug_rt_mutex_init_waiter(&rt_waiter);
Peter Zijlstrafb00aca2013-11-07 14:43:43 +01002786 RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
2787 RB_CLEAR_NODE(&rt_waiter.tree_entry);
Darren Hart52400ba2009-04-03 13:40:49 -07002788 rt_waiter.task = NULL;
2789
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002790 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
Darren Hart52400ba2009-04-03 13:40:49 -07002791 if (unlikely(ret != 0))
2792 goto out;
2793
Darren Hart84bc4af2009-08-13 17:36:53 -07002794 q.bitset = bitset;
2795 q.rt_waiter = &rt_waiter;
2796 q.requeue_pi_key = &key2;
2797
Darren Hart7ada8762010-10-17 08:35:04 -07002798 /*
2799 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
2800 * count.
2801 */
Darren Hartb41277d2010-11-08 13:10:09 -08002802 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02002803 if (ret)
2804 goto out_key2;
Darren Hart52400ba2009-04-03 13:40:49 -07002805
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00002806 /*
2807 * The check above which compares uaddrs is not sufficient for
2808 * shared futexes. We need to compare the keys:
2809 */
2810 if (match_futex(&q.key, &key2)) {
Thomas Gleixner13c42c22014-09-11 23:44:35 +02002811 queue_unlock(hb);
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00002812 ret = -EINVAL;
2813 goto out_put_keys;
2814 }
2815
Darren Hart52400ba2009-04-03 13:40:49 -07002816 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002817 futex_wait_queue_me(hb, &q, to);
Darren Hart52400ba2009-04-03 13:40:49 -07002818
2819 spin_lock(&hb->lock);
2820 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2821 spin_unlock(&hb->lock);
2822 if (ret)
2823 goto out_put_keys;
2824
2825 /*
2826 * In order for us to be here, we know our q.key == key2, and since
2827 * we took the hb->lock above, we also know that futex_requeue() has
2828 * completed and we no longer have to concern ourselves with a wakeup
Darren Hart7ada8762010-10-17 08:35:04 -07002829 * race with the atomic proxy lock acquisition by the requeue code. The
2830 * futex_requeue dropped our key1 reference and incremented our key2
2831 * reference count.
Darren Hart52400ba2009-04-03 13:40:49 -07002832 */
2833
2834 /* Check if the requeue code acquired the second futex for us. */
2835 if (!q.rt_waiter) {
2836 /*
2837 * Got the lock. We might not be the anticipated owner if we
2838 * did a lock-steal - fix up the PI-state in that case.
2839 */
2840 if (q.pi_state && (q.pi_state->owner != current)) {
2841 spin_lock(q.lock_ptr);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002842 ret = fixup_pi_state_owner(uaddr2, &q, current);
Thomas Gleixnerfb75a422015-12-19 20:07:38 +00002843 /*
2844 * Drop the reference to the pi state which
2845 * the requeue_pi() code acquired for us.
2846 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00002847 put_pi_state(q.pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07002848 spin_unlock(q.lock_ptr);
2849 }
2850 } else {
2851 /*
2852 * We have been woken up by futex_unlock_pi(), a timeout, or a
2853 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
2854 * the pi_state.
2855 */
Darren Hartf27071c2012-07-20 11:53:30 -07002856 WARN_ON(!q.pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07002857 pi_mutex = &q.pi_state->pi_mutex;
Thomas Gleixnerc051b212014-05-22 03:25:50 +00002858 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
Darren Hart52400ba2009-04-03 13:40:49 -07002859 debug_rt_mutex_free_waiter(&rt_waiter);
2860
2861 spin_lock(q.lock_ptr);
2862 /*
2863 * Fixup the pi_state owner and possibly acquire the lock if we
2864 * haven't already.
2865 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002866 res = fixup_owner(uaddr2, &q, !ret);
Darren Hart52400ba2009-04-03 13:40:49 -07002867 /*
2868 * If fixup_owner() returned an error, proprogate that. If it
Darren Hart56ec1602009-09-21 22:29:59 -07002869 * acquired the lock, clear -ETIMEDOUT or -EINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07002870 */
2871 if (res)
2872 ret = (res < 0) ? res : 0;
2873
2874 /* Unqueue and drop the lock. */
2875 unqueue_me_pi(&q);
2876 }
2877
2878 /*
2879 * If fixup_pi_state_owner() faulted and was unable to handle the
2880 * fault, unlock the rt_mutex and return the fault to userspace.
2881 */
2882 if (ret == -EFAULT) {
Darren Hartb6070a82012-07-20 11:53:29 -07002883 if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
Darren Hart52400ba2009-04-03 13:40:49 -07002884 rt_mutex_unlock(pi_mutex);
2885 } else if (ret == -EINTR) {
Darren Hart52400ba2009-04-03 13:40:49 -07002886 /*
Darren Hartcc6db4e2009-07-31 16:20:10 -07002887 * We've already been requeued, but cannot restart by calling
2888 * futex_lock_pi() directly. We could restart this syscall, but
2889 * it would detect that the user space "val" changed and return
2890 * -EWOULDBLOCK. Save the overhead of the restart and return
2891 * -EWOULDBLOCK directly.
Darren Hart52400ba2009-04-03 13:40:49 -07002892 */
Thomas Gleixner20708872009-05-19 23:04:59 +02002893 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07002894 }
2895
2896out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002897 put_futex_key(&q.key);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02002898out_key2:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002899 put_futex_key(&key2);
Darren Hart52400ba2009-04-03 13:40:49 -07002900
2901out:
2902 if (to) {
2903 hrtimer_cancel(&to->timer);
2904 destroy_hrtimer_on_stack(&to->timer);
2905 }
2906 return ret;
2907}
2908
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002909/*
2910 * Support for robust futexes: the kernel cleans up held futexes at
2911 * thread exit time.
2912 *
2913 * Implementation: user-space maintains a per-thread list of locks it
2914 * is holding. Upon do_exit(), the kernel carefully walks this list,
2915 * and marks all locks that are owned by this thread with the
Ingo Molnarc87e2832006-06-27 02:54:58 -07002916 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002917 * always manipulated with the lock held, so the list is private and
2918 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2919 * field, to allow the kernel to clean up if the thread dies after
2920 * acquiring the lock, but just before it could have added itself to
2921 * the list. There can only be one such pending lock.
2922 */
2923
2924/**
Darren Hartd96ee562009-09-21 22:30:22 -07002925 * sys_set_robust_list() - Set the robust-futex list head of a task
2926 * @head: pointer to the list-head
2927 * @len: length of the list-head, as userspace expects
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002928 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01002929SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2930 size_t, len)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002931{
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002932 if (!futex_cmpxchg_enabled)
2933 return -ENOSYS;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002934 /*
2935 * The kernel knows only one size for now:
2936 */
2937 if (unlikely(len != sizeof(*head)))
2938 return -EINVAL;
2939
2940 current->robust_list = head;
2941
2942 return 0;
2943}
2944
2945/**
Darren Hartd96ee562009-09-21 22:30:22 -07002946 * sys_get_robust_list() - Get the robust-futex list head of a task
2947 * @pid: pid of the process [zero for current task]
2948 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2949 * @len_ptr: pointer to a length field, the kernel fills in the header size
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002950 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01002951SYSCALL_DEFINE3(get_robust_list, int, pid,
2952 struct robust_list_head __user * __user *, head_ptr,
2953 size_t __user *, len_ptr)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002954{
Al Viroba46df92006-10-10 22:46:07 +01002955 struct robust_list_head __user *head;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002956 unsigned long ret;
Kees Cookbdbb7762012-03-19 16:12:53 -07002957 struct task_struct *p;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002958
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08002959 if (!futex_cmpxchg_enabled)
2960 return -ENOSYS;
2961
Kees Cookbdbb7762012-03-19 16:12:53 -07002962 rcu_read_lock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002963
Kees Cookbdbb7762012-03-19 16:12:53 -07002964 ret = -ESRCH;
2965 if (!pid)
2966 p = current;
2967 else {
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07002968 p = find_task_by_vpid(pid);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002969 if (!p)
2970 goto err_unlock;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002971 }
2972
Kees Cookbdbb7762012-03-19 16:12:53 -07002973 ret = -EPERM;
Jann Horncaaee622016-01-20 15:00:04 -08002974 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
Kees Cookbdbb7762012-03-19 16:12:53 -07002975 goto err_unlock;
2976
2977 head = p->robust_list;
2978 rcu_read_unlock();
2979
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002980 if (put_user(sizeof(*head), len_ptr))
2981 return -EFAULT;
2982 return put_user(head, head_ptr);
2983
2984err_unlock:
Oleg Nesterovaaa2a972006-09-29 02:00:55 -07002985 rcu_read_unlock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002986
2987 return ret;
2988}
2989
2990/*
2991 * Process a futex-list entry, check whether it's owned by the
2992 * dying task, and do notification if so:
2993 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02002994int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002995{
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03002996 u32 uval, uninitialized_var(nval), mval;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08002997
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08002998retry:
2999 if (get_user(uval, uaddr))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003000 return -1;
3001
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003002 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003003 /*
3004 * Ok, this dying thread is truly holding a futex
3005 * of interest. Set the OWNER_DIED bit atomically
3006 * via cmpxchg, and if the value had FUTEX_WAITERS
3007 * set, wake up a waiter (if any). (We have to do a
3008 * futex_wake() even if OWNER_DIED is already set -
3009 * to handle the rare but possible case of recursive
3010 * thread-death.) The rest of the cleanup is done in
3011 * userspace.
3012 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003013 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
Thomas Gleixner6e0aa9f2011-03-14 10:34:35 +01003014 /*
3015 * We are not holding a lock here, but we want to have
3016 * the pagefault_disable/enable() protection because
3017 * we want to handle the fault gracefully. If the
3018 * access fails we try to fault in the futex with R/W
3019 * verification via get_user_pages. get_user() above
3020 * does not guarantee R/W access. If that fails we
3021 * give up and leave the futex locked.
3022 */
3023 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3024 if (fault_in_user_writeable(uaddr))
3025 return -1;
3026 goto retry;
3027 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07003028 if (nval != uval)
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08003029 goto retry;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003030
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003031 /*
3032 * Wake robust non-PI futexes here. The wakeup of
3033 * PI futexes happens in exit_pi_state():
3034 */
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07003035 if (!pi && (uval & FUTEX_WAITERS))
Peter Zijlstrac2f9f202008-09-26 19:32:23 +02003036 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003037 }
3038 return 0;
3039}
3040
3041/*
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003042 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3043 */
3044static inline int fetch_robust_entry(struct robust_list __user **entry,
Al Viroba46df92006-10-10 22:46:07 +01003045 struct robust_list __user * __user *head,
Namhyung Kim1dcc41b2010-09-14 21:43:46 +09003046 unsigned int *pi)
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003047{
3048 unsigned long uentry;
3049
Al Viroba46df92006-10-10 22:46:07 +01003050 if (get_user(uentry, (unsigned long __user *)head))
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003051 return -EFAULT;
3052
Al Viroba46df92006-10-10 22:46:07 +01003053 *entry = (void __user *)(uentry & ~1UL);
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003054 *pi = uentry & 1;
3055
3056 return 0;
3057}
3058
3059/*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003060 * Walk curr->robust_list (very carefully, it's a userspace list!)
3061 * and mark any locks found there dead, and notify any waiters.
3062 *
3063 * We silently return on any sign of list-walking problem.
3064 */
3065void exit_robust_list(struct task_struct *curr)
3066{
3067 struct robust_list_head __user *head = curr->robust_list;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003068 struct robust_list __user *entry, *next_entry, *pending;
Darren Hart4c115e92010-11-04 15:00:00 -04003069 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3070 unsigned int uninitialized_var(next_pi);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003071 unsigned long futex_offset;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003072 int rc;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003073
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003074 if (!futex_cmpxchg_enabled)
3075 return;
3076
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003077 /*
3078 * Fetch the list head (which was registered earlier, via
3079 * sys_set_robust_list()):
3080 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003081 if (fetch_robust_entry(&entry, &head->list.next, &pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003082 return;
3083 /*
3084 * Fetch the relative futex offset:
3085 */
3086 if (get_user(futex_offset, &head->futex_offset))
3087 return;
3088 /*
3089 * Fetch any possibly pending lock-add first, and handle it
3090 * if it exists:
3091 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003092 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003093 return;
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003094
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003095 next_entry = NULL; /* avoid warning with gcc */
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003096 while (entry != &head->list) {
3097 /*
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003098 * Fetch the next entry in the list before calling
3099 * handle_futex_death:
3100 */
3101 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3102 /*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003103 * A pending lock might already be on the list, so
Ingo Molnarc87e2832006-06-27 02:54:58 -07003104 * don't process it twice:
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003105 */
3106 if (entry != pending)
Al Viroba46df92006-10-10 22:46:07 +01003107 if (handle_futex_death((void __user *)entry + futex_offset,
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003108 curr, pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003109 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003110 if (rc)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003111 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003112 entry = next_entry;
3113 pi = next_pi;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003114 /*
3115 * Avoid excessively long or circular lists:
3116 */
3117 if (!--limit)
3118 break;
3119
3120 cond_resched();
3121 }
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003122
3123 if (pending)
3124 handle_futex_death((void __user *)pending + futex_offset,
3125 curr, pip);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003126}
3127
Pierre Peifferc19384b2007-05-09 02:35:02 -07003128long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
Ingo Molnare2970f22006-06-27 02:54:47 -07003129 u32 __user *uaddr2, u32 val2, u32 val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130{
Thomas Gleixner81b40532012-02-15 12:17:09 +01003131 int cmd = op & FUTEX_CMD_MASK;
Darren Hartb41277d2010-11-08 13:10:09 -08003132 unsigned int flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003134 if (!(op & FUTEX_PRIVATE_FLAG))
Darren Hartb41277d2010-11-08 13:10:09 -08003135 flags |= FLAGS_SHARED;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003136
Darren Hartb41277d2010-11-08 13:10:09 -08003137 if (op & FUTEX_CLOCK_REALTIME) {
3138 flags |= FLAGS_CLOCKRT;
Darren Hart337f1302015-12-18 13:36:37 -08003139 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3140 cmd != FUTEX_WAIT_REQUEUE_PI)
Darren Hartb41277d2010-11-08 13:10:09 -08003141 return -ENOSYS;
3142 }
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003143
3144 switch (cmd) {
Thomas Gleixner59263b52012-02-15 12:08:34 +01003145 case FUTEX_LOCK_PI:
3146 case FUTEX_UNLOCK_PI:
3147 case FUTEX_TRYLOCK_PI:
3148 case FUTEX_WAIT_REQUEUE_PI:
3149 case FUTEX_CMP_REQUEUE_PI:
3150 if (!futex_cmpxchg_enabled)
3151 return -ENOSYS;
3152 }
3153
3154 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 case FUTEX_WAIT:
Thomas Gleixnercd689982008-02-01 17:45:14 +01003156 val3 = FUTEX_BITSET_MATCH_ANY;
3157 case FUTEX_WAIT_BITSET:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003158 return futex_wait(uaddr, flags, val, timeout, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 case FUTEX_WAKE:
Thomas Gleixnercd689982008-02-01 17:45:14 +01003160 val3 = FUTEX_BITSET_MATCH_ANY;
3161 case FUTEX_WAKE_BITSET:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003162 return futex_wake(uaddr, flags, val, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 case FUTEX_REQUEUE:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003164 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 case FUTEX_CMP_REQUEUE:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003166 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
Jakub Jelinek4732efb2005-09-06 15:16:25 -07003167 case FUTEX_WAKE_OP:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003168 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003169 case FUTEX_LOCK_PI:
Michael Kerrisk996636d2015-01-16 20:28:06 +01003170 return futex_lock_pi(uaddr, flags, timeout, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003171 case FUTEX_UNLOCK_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003172 return futex_unlock_pi(uaddr, flags);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003173 case FUTEX_TRYLOCK_PI:
Michael Kerrisk996636d2015-01-16 20:28:06 +01003174 return futex_lock_pi(uaddr, flags, NULL, 1);
Darren Hart52400ba2009-04-03 13:40:49 -07003175 case FUTEX_WAIT_REQUEUE_PI:
3176 val3 = FUTEX_BITSET_MATCH_ANY;
Thomas Gleixner81b40532012-02-15 12:17:09 +01003177 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3178 uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07003179 case FUTEX_CMP_REQUEUE_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003180 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 }
Thomas Gleixner81b40532012-02-15 12:17:09 +01003182 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183}
3184
3185
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003186SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3187 struct timespec __user *, utime, u32 __user *, uaddr2,
3188 u32, val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189{
Pierre Peifferc19384b2007-05-09 02:35:02 -07003190 struct timespec ts;
3191 ktime_t t, *tp = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07003192 u32 val2 = 0;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003193 int cmd = op & FUTEX_CMD_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194
Thomas Gleixnercd689982008-02-01 17:45:14 +01003195 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
Darren Hart52400ba2009-04-03 13:40:49 -07003196 cmd == FUTEX_WAIT_BITSET ||
3197 cmd == FUTEX_WAIT_REQUEUE_PI)) {
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07003198 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3199 return -EFAULT;
Pierre Peifferc19384b2007-05-09 02:35:02 -07003200 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 return -EFAULT;
Pierre Peifferc19384b2007-05-09 02:35:02 -07003202 if (!timespec_valid(&ts))
Thomas Gleixner9741ef962006-03-31 02:31:32 -08003203 return -EINVAL;
Pierre Peifferc19384b2007-05-09 02:35:02 -07003204
3205 t = timespec_to_ktime(ts);
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003206 if (cmd == FUTEX_WAIT)
Thomas Gleixner5a7780e2008-02-13 09:20:43 +01003207 t = ktime_add_safe(ktime_get(), t);
Pierre Peifferc19384b2007-05-09 02:35:02 -07003208 tp = &t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 }
3210 /*
Darren Hart52400ba2009-04-03 13:40:49 -07003211 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
Andreas Schwabf54f0982007-07-31 00:38:51 -07003212 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 */
Andreas Schwabf54f0982007-07-31 00:38:51 -07003214 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
Darren Hartba9c22f2009-04-20 22:22:22 -07003215 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
Ingo Molnare2970f22006-06-27 02:54:47 -07003216 val2 = (u32) (unsigned long) utime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217
Pierre Peifferc19384b2007-05-09 02:35:02 -07003218 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219}
3220
Heiko Carstens03b8c7b2014-03-02 13:09:47 +01003221static void __init futex_detect_cmpxchg(void)
3222{
3223#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3224 u32 curval;
3225
3226 /*
3227 * This will fail and we want it. Some arch implementations do
3228 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3229 * functionality. We want to know that before we call in any
3230 * of the complex code paths. Also we want to prevent
3231 * registration of robust lists in that case. NULL is
3232 * guaranteed to fault and we get -EFAULT on functional
3233 * implementation, the non-functional ones will return
3234 * -ENOSYS.
3235 */
3236 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3237 futex_cmpxchg_enabled = 1;
3238#endif
3239}
3240
Benjamin Herrenschmidtf6d107f2008-03-27 14:52:15 +11003241static int __init futex_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242{
Heiko Carstens63b1a812014-01-16 14:54:50 +01003243 unsigned int futex_shift;
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -08003244 unsigned long i;
3245
3246#if CONFIG_BASE_SMALL
3247 futex_hashsize = 16;
3248#else
3249 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3250#endif
3251
3252 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3253 futex_hashsize, 0,
3254 futex_hashsize < 256 ? HASH_SMALL : 0,
Heiko Carstens63b1a812014-01-16 14:54:50 +01003255 &futex_shift, NULL,
3256 futex_hashsize, futex_hashsize);
3257 futex_hashsize = 1UL << futex_shift;
Heiko Carstens03b8c7b2014-03-02 13:09:47 +01003258
3259 futex_detect_cmpxchg();
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003260
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -08003261 for (i = 0; i < futex_hashsize; i++) {
Linus Torvalds11d46162014-03-20 22:11:17 -07003262 atomic_set(&futex_queues[i].waiters, 0);
Dima Zavin732375c2011-07-07 17:27:59 -07003263 plist_head_init(&futex_queues[i].chain);
Thomas Gleixner3e4ab742008-02-23 15:23:55 -08003264 spin_lock_init(&futex_queues[i].lock);
3265 }
3266
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 return 0;
3268}
Benjamin Herrenschmidtf6d107f2008-03-27 14:52:15 +11003269__initcall(futex_init);