Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 2 | /* |
Peter Zijlstra | 67a6de4 | 2013-11-08 08:26:39 +0100 | [diff] [blame] | 3 | * kernel/locking/mutex.c |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 4 | * |
| 5 | * Mutexes: blocking mutual exclusion locks |
| 6 | * |
| 7 | * Started by Ingo Molnar: |
| 8 | * |
| 9 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 10 | * |
| 11 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 12 | * David Howells for suggestions and improvements. |
| 13 | * |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 14 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
| 15 | * from the -rt tree, where it was originally implemented for rtmutexes |
| 16 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
| 17 | * and Sven Dietrich. |
| 18 | * |
Mauro Carvalho Chehab | 387b146 | 2019-04-10 08:32:41 -0300 | [diff] [blame] | 19 | * Also see Documentation/locking/mutex-design.rst. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 20 | */ |
| 21 | #include <linux/mutex.h> |
Maarten Lankhorst | 1b375dc | 2013-07-05 09:29:32 +0200 | [diff] [blame] | 22 | #include <linux/ww_mutex.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 23 | #include <linux/sched/signal.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 24 | #include <linux/sched/rt.h> |
Ingo Molnar | 84f001e | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 25 | #include <linux/sched/wake_q.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 26 | #include <linux/sched/debug.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 27 | #include <linux/export.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 28 | #include <linux/spinlock.h> |
| 29 | #include <linux/interrupt.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 30 | #include <linux/debug_locks.h> |
Davidlohr Bueso | 7a215f8 | 2015-01-30 01:14:25 -0800 | [diff] [blame] | 31 | #include <linux/osq_lock.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 32 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 33 | #ifdef CONFIG_DEBUG_MUTEXES |
| 34 | # include "mutex-debug.h" |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 35 | #else |
| 36 | # include "mutex.h" |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 37 | #endif |
| 38 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 39 | void |
| 40 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 41 | { |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 42 | atomic_long_set(&lock->owner, 0); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 43 | spin_lock_init(&lock->wait_lock); |
| 44 | INIT_LIST_HEAD(&lock->wait_list); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 45 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Jason Low | 4d9d951 | 2014-07-14 10:27:50 -0700 | [diff] [blame] | 46 | osq_lock_init(&lock->osq); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 47 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 48 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 49 | debug_mutex_init(lock, name, key); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 50 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 51 | EXPORT_SYMBOL(__mutex_init); |
| 52 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 53 | /* |
| 54 | * @owner: contains: 'struct task_struct *' to the current lock owner, |
| 55 | * NULL means not owned. Since task_struct pointers are aligned at |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 56 | * at least L1_CACHE_BYTES, we have low bits to store extra state. |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 57 | * |
| 58 | * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 59 | * Bit1 indicates unlock needs to hand the lock to the top-waiter |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 60 | * Bit2 indicates handoff has been done and we're waiting for pickup. |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 61 | */ |
| 62 | #define MUTEX_FLAG_WAITERS 0x01 |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 63 | #define MUTEX_FLAG_HANDOFF 0x02 |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 64 | #define MUTEX_FLAG_PICKUP 0x04 |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 65 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 66 | #define MUTEX_FLAGS 0x07 |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 67 | |
Mukesh Ojha | 5f35d5a | 2019-07-31 20:35:03 +0530 | [diff] [blame] | 68 | /* |
| 69 | * Internal helper function; C doesn't allow us to hide it :/ |
| 70 | * |
| 71 | * DO NOT USE (outside of mutex code). |
| 72 | */ |
| 73 | static inline struct task_struct *__mutex_owner(struct mutex *lock) |
| 74 | { |
Mukesh Ojha | a037d26 | 2019-07-31 20:35:04 +0530 | [diff] [blame] | 75 | return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); |
Mukesh Ojha | 5f35d5a | 2019-07-31 20:35:03 +0530 | [diff] [blame] | 76 | } |
| 77 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 78 | static inline struct task_struct *__owner_task(unsigned long owner) |
| 79 | { |
| 80 | return (struct task_struct *)(owner & ~MUTEX_FLAGS); |
| 81 | } |
| 82 | |
Mukesh Ojha | 5f35d5a | 2019-07-31 20:35:03 +0530 | [diff] [blame] | 83 | bool mutex_is_locked(struct mutex *lock) |
| 84 | { |
| 85 | return __mutex_owner(lock) != NULL; |
| 86 | } |
| 87 | EXPORT_SYMBOL(mutex_is_locked); |
| 88 | |
| 89 | __must_check enum mutex_trylock_recursive_enum |
| 90 | mutex_trylock_recursive(struct mutex *lock) |
| 91 | { |
| 92 | if (unlikely(__mutex_owner(lock) == current)) |
| 93 | return MUTEX_TRYLOCK_RECURSIVE; |
| 94 | |
| 95 | return mutex_trylock(lock); |
| 96 | } |
| 97 | EXPORT_SYMBOL(mutex_trylock_recursive); |
| 98 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 99 | static inline unsigned long __owner_flags(unsigned long owner) |
| 100 | { |
| 101 | return owner & MUTEX_FLAGS; |
| 102 | } |
| 103 | |
| 104 | /* |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 105 | * Trylock variant that retuns the owning task on failure. |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 106 | */ |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 107 | static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 108 | { |
| 109 | unsigned long owner, curr = (unsigned long)current; |
| 110 | |
| 111 | owner = atomic_long_read(&lock->owner); |
| 112 | for (;;) { /* must loop, can race against a flag */ |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 113 | unsigned long old, flags = __owner_flags(owner); |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 114 | unsigned long task = owner & ~MUTEX_FLAGS; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 115 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 116 | if (task) { |
| 117 | if (likely(task != curr)) |
| 118 | break; |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 119 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 120 | if (likely(!(flags & MUTEX_FLAG_PICKUP))) |
| 121 | break; |
| 122 | |
| 123 | flags &= ~MUTEX_FLAG_PICKUP; |
| 124 | } else { |
| 125 | #ifdef CONFIG_DEBUG_MUTEXES |
| 126 | DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP); |
| 127 | #endif |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 128 | } |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 129 | |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 130 | /* |
| 131 | * We set the HANDOFF bit, we must make sure it doesn't live |
| 132 | * past the point where we acquire it. This would be possible |
| 133 | * if we (accidentally) set the bit on an unlocked mutex. |
| 134 | */ |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 135 | flags &= ~MUTEX_FLAG_HANDOFF; |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 136 | |
| 137 | old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 138 | if (old == owner) |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 139 | return NULL; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 140 | |
| 141 | owner = old; |
| 142 | } |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 143 | |
| 144 | return __owner_task(owner); |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * Actual trylock that will work on any unlocked state. |
| 149 | */ |
| 150 | static inline bool __mutex_trylock(struct mutex *lock) |
| 151 | { |
| 152 | return !__mutex_trylock_or_owner(lock); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 156 | /* |
| 157 | * Lockdep annotations are contained to the slow paths for simplicity. |
| 158 | * There is nothing that would stop spreading the lockdep annotations outwards |
| 159 | * except more code. |
| 160 | */ |
| 161 | |
| 162 | /* |
| 163 | * Optimistic trylock that only works in the uncontended case. Make sure to |
| 164 | * follow with a __mutex_trylock() before failing. |
| 165 | */ |
| 166 | static __always_inline bool __mutex_trylock_fast(struct mutex *lock) |
| 167 | { |
| 168 | unsigned long curr = (unsigned long)current; |
Peter Zijlstra | c427f69 | 2018-04-05 11:05:35 +0200 | [diff] [blame] | 169 | unsigned long zero = 0UL; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 170 | |
Peter Zijlstra | c427f69 | 2018-04-05 11:05:35 +0200 | [diff] [blame] | 171 | if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 172 | return true; |
| 173 | |
| 174 | return false; |
| 175 | } |
| 176 | |
| 177 | static __always_inline bool __mutex_unlock_fast(struct mutex *lock) |
| 178 | { |
| 179 | unsigned long curr = (unsigned long)current; |
| 180 | |
| 181 | if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) |
| 182 | return true; |
| 183 | |
| 184 | return false; |
| 185 | } |
| 186 | #endif |
| 187 | |
| 188 | static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) |
| 189 | { |
| 190 | atomic_long_or(flag, &lock->owner); |
| 191 | } |
| 192 | |
| 193 | static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) |
| 194 | { |
| 195 | atomic_long_andnot(flag, &lock->owner); |
| 196 | } |
| 197 | |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 198 | static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) |
| 199 | { |
| 200 | return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; |
| 201 | } |
| 202 | |
| 203 | /* |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 204 | * Add @waiter to a given location in the lock wait_list and set the |
| 205 | * FLAG_WAITERS flag if it's the first waiter. |
| 206 | */ |
| 207 | static void __sched |
| 208 | __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 209 | struct list_head *list) |
| 210 | { |
| 211 | debug_mutex_add_waiter(lock, waiter, current); |
| 212 | |
| 213 | list_add_tail(&waiter->list, list); |
| 214 | if (__mutex_waiter_is_first(lock, waiter)) |
| 215 | __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); |
| 216 | } |
| 217 | |
| 218 | /* |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 219 | * Give up ownership to a specific task, when @task = NULL, this is equivalent |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 220 | * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves |
| 221 | * WAITERS. Provides RELEASE semantics like a regular unlock, the |
| 222 | * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 223 | */ |
| 224 | static void __mutex_handoff(struct mutex *lock, struct task_struct *task) |
| 225 | { |
| 226 | unsigned long owner = atomic_long_read(&lock->owner); |
| 227 | |
| 228 | for (;;) { |
| 229 | unsigned long old, new; |
| 230 | |
| 231 | #ifdef CONFIG_DEBUG_MUTEXES |
| 232 | DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 233 | DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 234 | #endif |
| 235 | |
| 236 | new = (owner & MUTEX_FLAG_WAITERS); |
| 237 | new |= (unsigned long)task; |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 238 | if (task) |
| 239 | new |= MUTEX_FLAG_PICKUP; |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 240 | |
| 241 | old = atomic_long_cmpxchg_release(&lock->owner, owner, new); |
| 242 | if (old == owner) |
| 243 | break; |
| 244 | |
| 245 | owner = old; |
| 246 | } |
| 247 | } |
| 248 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 249 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 250 | /* |
| 251 | * We split the mutex lock/unlock logic into separate fastpath and |
| 252 | * slowpath functions, to reduce the register pressure on the fastpath. |
| 253 | * We also put the fastpath first in the kernel image, to make sure the |
| 254 | * branch is predicted by the CPU as default-untaken. |
| 255 | */ |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 256 | static void __sched __mutex_lock_slowpath(struct mutex *lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 257 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 258 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 259 | * mutex_lock - acquire the mutex |
| 260 | * @lock: the mutex to be acquired |
| 261 | * |
| 262 | * Lock the mutex exclusively for this task. If the mutex is not |
| 263 | * available right now, it will sleep until it can get it. |
| 264 | * |
| 265 | * The mutex must later on be released by the same task that |
| 266 | * acquired it. Recursive locking is not allowed. The task |
| 267 | * may not exit without first unlocking the mutex. Also, kernel |
Sharon Dvir | 139b6fd | 2015-02-01 23:47:32 +0200 | [diff] [blame] | 268 | * memory where the mutex resides must not be freed with |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 269 | * the mutex still locked. The mutex must first be initialized |
| 270 | * (or statically defined) before it can be locked. memset()-ing |
| 271 | * the mutex to 0 is not allowed. |
| 272 | * |
Mauro Carvalho Chehab | 7b4ff1a | 2017-05-11 10:17:45 -0300 | [diff] [blame] | 273 | * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
| 274 | * checks that will enforce the restrictions and will also do |
| 275 | * deadlock debugging) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 276 | * |
| 277 | * This function is similar to (but not equivalent to) down(). |
| 278 | */ |
H. Peter Anvin | b09d250 | 2009-04-01 17:21:56 -0700 | [diff] [blame] | 279 | void __sched mutex_lock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 280 | { |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 281 | might_sleep(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 282 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 283 | if (!__mutex_trylock_fast(lock)) |
| 284 | __mutex_lock_slowpath(lock); |
| 285 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 286 | EXPORT_SYMBOL(mutex_lock); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 287 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 288 | |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 289 | /* |
| 290 | * Wait-Die: |
| 291 | * The newer transactions are killed when: |
| 292 | * It (the new transaction) makes a request for a lock being held |
| 293 | * by an older transaction. |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 294 | * |
| 295 | * Wound-Wait: |
| 296 | * The newer transactions are wounded when: |
| 297 | * An older transaction makes a request for a lock being held by |
| 298 | * the newer transaction. |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 299 | */ |
| 300 | |
| 301 | /* |
| 302 | * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired |
| 303 | * it. |
| 304 | */ |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 305 | static __always_inline void |
| 306 | ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 307 | { |
| 308 | #ifdef CONFIG_DEBUG_MUTEXES |
| 309 | /* |
| 310 | * If this WARN_ON triggers, you used ww_mutex_lock to acquire, |
| 311 | * but released with a normal mutex_unlock in this call. |
| 312 | * |
| 313 | * This should never happen, always use ww_mutex_unlock. |
| 314 | */ |
| 315 | DEBUG_LOCKS_WARN_ON(ww->ctx); |
| 316 | |
| 317 | /* |
| 318 | * Not quite done after calling ww_acquire_done() ? |
| 319 | */ |
| 320 | DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); |
| 321 | |
| 322 | if (ww_ctx->contending_lock) { |
| 323 | /* |
| 324 | * After -EDEADLK you tried to |
| 325 | * acquire a different ww_mutex? Bad! |
| 326 | */ |
| 327 | DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); |
| 328 | |
| 329 | /* |
| 330 | * You called ww_mutex_lock after receiving -EDEADLK, |
| 331 | * but 'forgot' to unlock everything else first? |
| 332 | */ |
| 333 | DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); |
| 334 | ww_ctx->contending_lock = NULL; |
| 335 | } |
| 336 | |
| 337 | /* |
| 338 | * Naughty, using a different class will lead to undefined behavior! |
| 339 | */ |
| 340 | DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); |
| 341 | #endif |
| 342 | ww_ctx->acquired++; |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 343 | ww->ctx = ww_ctx; |
Nicolai Hähnle | 3822da3 | 2016-12-21 19:46:31 +0100 | [diff] [blame] | 344 | } |
| 345 | |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 346 | /* |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 347 | * Determine if context @a is 'after' context @b. IOW, @a is a younger |
| 348 | * transaction than @b and depending on algorithm either needs to wait for |
| 349 | * @b or die. |
| 350 | */ |
| 351 | static inline bool __sched |
| 352 | __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) |
| 353 | { |
| 354 | |
| 355 | return (signed long)(a->stamp - b->stamp) > 0; |
| 356 | } |
| 357 | |
| 358 | /* |
| 359 | * Wait-Die; wake a younger waiter context (when locks held) such that it can |
| 360 | * die. |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 361 | * |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 362 | * Among waiters with context, only the first one can have other locks acquired |
| 363 | * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and |
| 364 | * __ww_mutex_check_kill() wake any but the earliest context. |
| 365 | */ |
| 366 | static bool __sched |
| 367 | __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, |
| 368 | struct ww_acquire_ctx *ww_ctx) |
| 369 | { |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 370 | if (!ww_ctx->is_wait_die) |
| 371 | return false; |
| 372 | |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 373 | if (waiter->ww_ctx->acquired > 0 && |
| 374 | __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) { |
| 375 | debug_mutex_wake_waiter(lock, waiter); |
| 376 | wake_up_process(waiter->task); |
| 377 | } |
| 378 | |
| 379 | return true; |
| 380 | } |
| 381 | |
| 382 | /* |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 383 | * Wound-Wait; wound a younger @hold_ctx if it holds the lock. |
| 384 | * |
| 385 | * Wound the lock holder if there are waiters with older transactions than |
| 386 | * the lock holders. Even if multiple waiters may wound the lock holder, |
| 387 | * it's sufficient that only one does. |
| 388 | */ |
| 389 | static bool __ww_mutex_wound(struct mutex *lock, |
| 390 | struct ww_acquire_ctx *ww_ctx, |
| 391 | struct ww_acquire_ctx *hold_ctx) |
| 392 | { |
| 393 | struct task_struct *owner = __mutex_owner(lock); |
| 394 | |
| 395 | lockdep_assert_held(&lock->wait_lock); |
| 396 | |
| 397 | /* |
| 398 | * Possible through __ww_mutex_add_waiter() when we race with |
| 399 | * ww_mutex_set_context_fastpath(). In that case we'll get here again |
| 400 | * through __ww_mutex_check_waiters(). |
| 401 | */ |
| 402 | if (!hold_ctx) |
| 403 | return false; |
| 404 | |
| 405 | /* |
| 406 | * Can have !owner because of __mutex_unlock_slowpath(), but if owner, |
| 407 | * it cannot go away because we'll have FLAG_WAITERS set and hold |
| 408 | * wait_lock. |
| 409 | */ |
| 410 | if (!owner) |
| 411 | return false; |
| 412 | |
| 413 | if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) { |
| 414 | hold_ctx->wounded = 1; |
| 415 | |
| 416 | /* |
| 417 | * wake_up_process() paired with set_current_state() |
| 418 | * inserts sufficient barriers to make sure @owner either sees |
Thomas Hellstrom | e13e236 | 2018-09-03 16:07:08 +0200 | [diff] [blame] | 419 | * it's wounded in __ww_mutex_check_kill() or has a |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 420 | * wakeup pending to re-read the wounded state. |
| 421 | */ |
| 422 | if (owner != current) |
| 423 | wake_up_process(owner); |
| 424 | |
| 425 | return true; |
| 426 | } |
| 427 | |
| 428 | return false; |
| 429 | } |
| 430 | |
| 431 | /* |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 432 | * We just acquired @lock under @ww_ctx, if there are later contexts waiting |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 433 | * behind us on the wait-list, check if they need to die, or wound us. |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 434 | * |
| 435 | * See __ww_mutex_add_waiter() for the list-order construction; basically the |
| 436 | * list is ordered by stamp, smallest (oldest) first. |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 437 | * |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 438 | * This relies on never mixing wait-die/wound-wait on the same wait-list; |
| 439 | * which is currently ensured by that being a ww_class property. |
| 440 | * |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 441 | * The current task must not be on the wait list. |
| 442 | */ |
| 443 | static void __sched |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 444 | __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 445 | { |
| 446 | struct mutex_waiter *cur; |
| 447 | |
| 448 | lockdep_assert_held(&lock->wait_lock); |
| 449 | |
| 450 | list_for_each_entry(cur, &lock->wait_list, list) { |
| 451 | if (!cur->ww_ctx) |
| 452 | continue; |
| 453 | |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 454 | if (__ww_mutex_die(lock, cur, ww_ctx) || |
| 455 | __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx)) |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 456 | break; |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 457 | } |
| 458 | } |
| 459 | |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 460 | /* |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 461 | * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx |
| 462 | * and wake up any waiters so they can recheck. |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 463 | */ |
| 464 | static __always_inline void |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 465 | ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 466 | { |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 467 | ww_mutex_lock_acquired(lock, ctx); |
| 468 | |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 469 | /* |
| 470 | * The lock->ctx update should be visible on all cores before |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 471 | * the WAITERS check is done, otherwise contended waiters might be |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 472 | * missed. The contended waiters will either see ww_ctx == NULL |
| 473 | * and keep spinning, or it will acquire wait_lock, add itself |
| 474 | * to waiter list and sleep. |
| 475 | */ |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 476 | smp_mb(); /* See comments above and below. */ |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 477 | |
| 478 | /* |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 479 | * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS |
| 480 | * MB MB |
| 481 | * [R] MUTEX_FLAG_WAITERS [R] ww->ctx |
| 482 | * |
| 483 | * The memory barrier above pairs with the memory barrier in |
| 484 | * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx |
| 485 | * and/or !empty list. |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 486 | */ |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 487 | if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 488 | return; |
| 489 | |
| 490 | /* |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 491 | * Uh oh, we raced in fastpath, check if any of the waiters need to |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 492 | * die or wound us. |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 493 | */ |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 494 | spin_lock(&lock->base.wait_lock); |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 495 | __ww_mutex_check_waiters(&lock->base, ctx); |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 496 | spin_unlock(&lock->base.wait_lock); |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 497 | } |
| 498 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 499 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 500 | |
| 501 | static inline |
| 502 | bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
| 503 | struct mutex_waiter *waiter) |
| 504 | { |
| 505 | struct ww_mutex *ww; |
| 506 | |
| 507 | ww = container_of(lock, struct ww_mutex, base); |
| 508 | |
| 509 | /* |
| 510 | * If ww->ctx is set the contents are undefined, only |
| 511 | * by acquiring wait_lock there is a guarantee that |
| 512 | * they are not invalid when reading. |
| 513 | * |
| 514 | * As such, when deadlock detection needs to be |
| 515 | * performed the optimistic spinning cannot be done. |
| 516 | * |
| 517 | * Check this in every inner iteration because we may |
| 518 | * be racing against another thread's ww_mutex_lock. |
| 519 | */ |
| 520 | if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) |
| 521 | return false; |
| 522 | |
| 523 | /* |
| 524 | * If we aren't on the wait list yet, cancel the spin |
| 525 | * if there are waiters. We want to avoid stealing the |
| 526 | * lock from a waiter with an earlier stamp, since the |
| 527 | * other thread may already own a lock that we also |
| 528 | * need. |
| 529 | */ |
| 530 | if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) |
| 531 | return false; |
| 532 | |
| 533 | /* |
| 534 | * Similarly, stop spinning if we are no longer the |
| 535 | * first waiter. |
| 536 | */ |
| 537 | if (waiter && !__mutex_waiter_is_first(lock, waiter)) |
| 538 | return false; |
| 539 | |
| 540 | return true; |
| 541 | } |
| 542 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 543 | /* |
Nicolai Hähnle | 25f13b4 | 2016-12-21 19:46:37 +0100 | [diff] [blame] | 544 | * Look out! "owner" is an entirely speculative pointer access and not |
| 545 | * reliable. |
| 546 | * |
| 547 | * "noinline" so that this function shows up on perf profiles. |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 548 | */ |
| 549 | static noinline |
Nicolai Hähnle | 25f13b4 | 2016-12-21 19:46:37 +0100 | [diff] [blame] | 550 | bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 551 | struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 552 | { |
Jason Low | 01ac33c | 2015-04-08 12:39:19 -0700 | [diff] [blame] | 553 | bool ret = true; |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 554 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 555 | rcu_read_lock(); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 556 | while (__mutex_owner(lock) == owner) { |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 557 | /* |
| 558 | * Ensure we emit the owner->on_cpu, dereference _after_ |
Jason Low | 01ac33c | 2015-04-08 12:39:19 -0700 | [diff] [blame] | 559 | * checking lock->owner still matches owner. If that fails, |
| 560 | * owner might point to freed memory. If it still matches, |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 561 | * the rcu_read_lock() ensures the memory stays valid. |
| 562 | */ |
| 563 | barrier(); |
| 564 | |
Pan Xinhui | 05ffc95 | 2016-11-02 05:08:30 -0400 | [diff] [blame] | 565 | /* |
| 566 | * Use vcpu_is_preempted to detect lock holder preemption issue. |
| 567 | */ |
| 568 | if (!owner->on_cpu || need_resched() || |
| 569 | vcpu_is_preempted(task_cpu(owner))) { |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 570 | ret = false; |
| 571 | break; |
| 572 | } |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 573 | |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 574 | if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { |
| 575 | ret = false; |
| 576 | break; |
Nicolai Hähnle | 25f13b4 | 2016-12-21 19:46:37 +0100 | [diff] [blame] | 577 | } |
| 578 | |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 579 | cpu_relax(); |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 580 | } |
| 581 | rcu_read_unlock(); |
| 582 | |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 583 | return ret; |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 584 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 585 | |
| 586 | /* |
| 587 | * Initial check for entering the mutex spinning loop |
| 588 | */ |
| 589 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
| 590 | { |
Peter Zijlstra | 1e40c2e | 2013-07-19 20:31:01 +0200 | [diff] [blame] | 591 | struct task_struct *owner; |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 592 | int retval = 1; |
| 593 | |
Jason Low | 46af29e | 2014-01-28 11:13:12 -0800 | [diff] [blame] | 594 | if (need_resched()) |
| 595 | return 0; |
| 596 | |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 597 | rcu_read_lock(); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 598 | owner = __mutex_owner(lock); |
Pan Xinhui | 05ffc95 | 2016-11-02 05:08:30 -0400 | [diff] [blame] | 599 | |
| 600 | /* |
| 601 | * As lock holder preemption issue, we both skip spinning if task is not |
| 602 | * on cpu or its cpu is preempted |
| 603 | */ |
Peter Zijlstra | 1e40c2e | 2013-07-19 20:31:01 +0200 | [diff] [blame] | 604 | if (owner) |
Pan Xinhui | 05ffc95 | 2016-11-02 05:08:30 -0400 | [diff] [blame] | 605 | retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 606 | rcu_read_unlock(); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 607 | |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 608 | /* |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 609 | * If lock->owner is not set, the mutex has been released. Return true |
| 610 | * such that we'll trylock in the spin path, which is a faster option |
| 611 | * than the blocking slow path. |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 612 | */ |
| 613 | return retval; |
| 614 | } |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 615 | |
| 616 | /* |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 617 | * Optimistic spinning. |
| 618 | * |
| 619 | * We try to spin for acquisition when we find that the lock owner |
| 620 | * is currently running on a (different) CPU and while we don't |
| 621 | * need to reschedule. The rationale is that if the lock owner is |
| 622 | * running, it is likely to release the lock soon. |
| 623 | * |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 624 | * The mutex spinners are queued up using MCS lock so that only one |
| 625 | * spinner can compete for the mutex. However, if mutex spinning isn't |
| 626 | * going to happen, there is no point in going through the lock/unlock |
| 627 | * overhead. |
| 628 | * |
| 629 | * Returns true when the lock was taken, otherwise false, indicating |
| 630 | * that we need to jump to the slowpath and sleep. |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 631 | * |
| 632 | * The waiter flag is set to true if the spinner is a waiter in the wait |
| 633 | * queue. The waiter-spinner will spin on the lock directly and concurrently |
| 634 | * with the spinner at the head of the OSQ, if present, until the owner is |
| 635 | * changed to itself. |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 636 | */ |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 637 | static __always_inline bool |
| 638 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 639 | const bool use_ww_ctx, struct mutex_waiter *waiter) |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 640 | { |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 641 | if (!waiter) { |
| 642 | /* |
| 643 | * The purpose of the mutex_can_spin_on_owner() function is |
| 644 | * to eliminate the overhead of osq_lock() and osq_unlock() |
| 645 | * in case spinning isn't possible. As a waiter-spinner |
| 646 | * is not going to take OSQ lock anyway, there is no need |
| 647 | * to call mutex_can_spin_on_owner(). |
| 648 | */ |
| 649 | if (!mutex_can_spin_on_owner(lock)) |
| 650 | goto fail; |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 651 | |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 652 | /* |
| 653 | * In order to avoid a stampede of mutex spinners trying to |
| 654 | * acquire the mutex all at once, the spinners need to take a |
| 655 | * MCS (queued) lock first before spinning on the owner field. |
| 656 | */ |
| 657 | if (!osq_lock(&lock->osq)) |
| 658 | goto fail; |
| 659 | } |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 660 | |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 661 | for (;;) { |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 662 | struct task_struct *owner; |
| 663 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 664 | /* Try to acquire the mutex... */ |
| 665 | owner = __mutex_trylock_or_owner(lock); |
| 666 | if (!owner) |
| 667 | break; |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 668 | |
| 669 | /* |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 670 | * There's an owner, wait for it to either |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 671 | * release the lock or go to sleep. |
| 672 | */ |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 673 | if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 674 | goto fail_unlock; |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 675 | |
| 676 | /* |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 677 | * The cpu_relax() call is a compiler barrier which forces |
| 678 | * everything in this loop to be re-loaded. We don't need |
| 679 | * memory barriers as we'll eventually observe the right |
| 680 | * values at the cost of a few extra spins. |
| 681 | */ |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 682 | cpu_relax(); |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 683 | } |
| 684 | |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 685 | if (!waiter) |
| 686 | osq_unlock(&lock->osq); |
| 687 | |
| 688 | return true; |
| 689 | |
| 690 | |
| 691 | fail_unlock: |
| 692 | if (!waiter) |
| 693 | osq_unlock(&lock->osq); |
| 694 | |
| 695 | fail: |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 696 | /* |
| 697 | * If we fell out of the spin path because of need_resched(), |
| 698 | * reschedule now, before we try-lock the mutex. This avoids getting |
| 699 | * scheduled out right after we obtained the mutex. |
| 700 | */ |
Peter Zijlstra | 6f942a1 | 2014-09-24 10:18:46 +0200 | [diff] [blame] | 701 | if (need_resched()) { |
| 702 | /* |
| 703 | * We _should_ have TASK_RUNNING here, but just in case |
| 704 | * we do not, make it so, otherwise we might get stuck. |
| 705 | */ |
| 706 | __set_current_state(TASK_RUNNING); |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 707 | schedule_preempt_disabled(); |
Peter Zijlstra | 6f942a1 | 2014-09-24 10:18:46 +0200 | [diff] [blame] | 708 | } |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 709 | |
| 710 | return false; |
| 711 | } |
| 712 | #else |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 713 | static __always_inline bool |
| 714 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 715 | const bool use_ww_ctx, struct mutex_waiter *waiter) |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 716 | { |
| 717 | return false; |
| 718 | } |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 719 | #endif |
| 720 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 721 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 722 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 723 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 724 | * mutex_unlock - release the mutex |
| 725 | * @lock: the mutex to be released |
| 726 | * |
| 727 | * Unlock a mutex that has been locked by this task previously. |
| 728 | * |
| 729 | * This function must not be used in interrupt context. Unlocking |
| 730 | * of a not locked mutex is not allowed. |
| 731 | * |
| 732 | * This function is similar to (but not equivalent to) up(). |
| 733 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 734 | void __sched mutex_unlock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 735 | { |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 736 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 737 | if (__mutex_unlock_fast(lock)) |
| 738 | return; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 739 | #endif |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 740 | __mutex_unlock_slowpath(lock, _RET_IP_); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 741 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 742 | EXPORT_SYMBOL(mutex_unlock); |
| 743 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 744 | /** |
| 745 | * ww_mutex_unlock - release the w/w mutex |
| 746 | * @lock: the mutex to be released |
| 747 | * |
| 748 | * Unlock a mutex that has been locked by this task previously with any of the |
| 749 | * ww_mutex_lock* functions (with or without an acquire context). It is |
| 750 | * forbidden to release the locks after releasing the acquire context. |
| 751 | * |
| 752 | * This function must not be used in interrupt context. Unlocking |
| 753 | * of a unlocked mutex is not allowed. |
| 754 | */ |
| 755 | void __sched ww_mutex_unlock(struct ww_mutex *lock) |
| 756 | { |
| 757 | /* |
| 758 | * The unlocking fastpath is the 0->1 transition from 'locked' |
| 759 | * into 'unlocked' state: |
| 760 | */ |
| 761 | if (lock->ctx) { |
| 762 | #ifdef CONFIG_DEBUG_MUTEXES |
| 763 | DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); |
| 764 | #endif |
| 765 | if (lock->ctx->acquired > 0) |
| 766 | lock->ctx->acquired--; |
| 767 | lock->ctx = NULL; |
| 768 | } |
| 769 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 770 | mutex_unlock(&lock->base); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 771 | } |
| 772 | EXPORT_SYMBOL(ww_mutex_unlock); |
| 773 | |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 774 | |
| 775 | static __always_inline int __sched |
| 776 | __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) |
| 777 | { |
| 778 | if (ww_ctx->acquired > 0) { |
| 779 | #ifdef CONFIG_DEBUG_MUTEXES |
| 780 | struct ww_mutex *ww; |
| 781 | |
| 782 | ww = container_of(lock, struct ww_mutex, base); |
| 783 | DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); |
| 784 | ww_ctx->contending_lock = ww; |
| 785 | #endif |
| 786 | return -EDEADLK; |
| 787 | } |
| 788 | |
| 789 | return 0; |
| 790 | } |
| 791 | |
| 792 | |
| 793 | /* |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 794 | * Check the wound condition for the current lock acquire. |
| 795 | * |
| 796 | * Wound-Wait: If we're wounded, kill ourself. |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 797 | * |
| 798 | * Wait-Die: If we're trying to acquire a lock already held by an older |
| 799 | * context, kill ourselves. |
| 800 | * |
| 801 | * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to |
| 802 | * look at waiters before us in the wait-list. |
| 803 | */ |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 804 | static inline int __sched |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 805 | __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, |
| 806 | struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 807 | { |
| 808 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
Davidlohr Bueso | 4d3199e | 2015-02-22 19:31:41 -0800 | [diff] [blame] | 809 | struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); |
Nicolai Hähnle | 200b187 | 2016-12-21 19:46:35 +0100 | [diff] [blame] | 810 | struct mutex_waiter *cur; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 811 | |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 812 | if (ctx->acquired == 0) |
| 813 | return 0; |
| 814 | |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 815 | if (!ctx->is_wait_die) { |
| 816 | if (ctx->wounded) |
| 817 | return __ww_mutex_kill(lock, ctx); |
| 818 | |
| 819 | return 0; |
| 820 | } |
| 821 | |
Nicolai Hähnle | 200b187 | 2016-12-21 19:46:35 +0100 | [diff] [blame] | 822 | if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 823 | return __ww_mutex_kill(lock, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 824 | |
Nicolai Hähnle | 200b187 | 2016-12-21 19:46:35 +0100 | [diff] [blame] | 825 | /* |
| 826 | * If there is a waiter in front of us that has a context, then its |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 827 | * stamp is earlier than ours and we must kill ourself. |
Nicolai Hähnle | 200b187 | 2016-12-21 19:46:35 +0100 | [diff] [blame] | 828 | */ |
| 829 | cur = waiter; |
| 830 | list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 831 | if (!cur->ww_ctx) |
| 832 | continue; |
| 833 | |
| 834 | return __ww_mutex_kill(lock, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 835 | } |
| 836 | |
| 837 | return 0; |
| 838 | } |
| 839 | |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 840 | /* |
| 841 | * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest |
| 842 | * first. Such that older contexts are preferred to acquire the lock over |
| 843 | * younger contexts. |
| 844 | * |
| 845 | * Waiters without context are interspersed in FIFO order. |
| 846 | * |
| 847 | * Furthermore, for Wait-Die kill ourself immediately when possible (there are |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 848 | * older contexts already waiting) to avoid unnecessary waiting and for |
| 849 | * Wound-Wait ensure we wound the owning context when it is younger. |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 850 | */ |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 851 | static inline int __sched |
| 852 | __ww_mutex_add_waiter(struct mutex_waiter *waiter, |
| 853 | struct mutex *lock, |
| 854 | struct ww_acquire_ctx *ww_ctx) |
| 855 | { |
| 856 | struct mutex_waiter *cur; |
| 857 | struct list_head *pos; |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 858 | bool is_wait_die; |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 859 | |
| 860 | if (!ww_ctx) { |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 861 | __mutex_add_waiter(lock, waiter, &lock->wait_list); |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 862 | return 0; |
| 863 | } |
| 864 | |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 865 | is_wait_die = ww_ctx->is_wait_die; |
| 866 | |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 867 | /* |
| 868 | * Add the waiter before the first waiter with a higher stamp. |
| 869 | * Waiters without a context are skipped to avoid starving |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 870 | * them. Wait-Die waiters may die here. Wound-Wait waiters |
| 871 | * never die here, but they are sorted in stamp order and |
| 872 | * may wound the lock holder. |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 873 | */ |
| 874 | pos = &lock->wait_list; |
| 875 | list_for_each_entry_reverse(cur, &lock->wait_list, list) { |
| 876 | if (!cur->ww_ctx) |
| 877 | continue; |
| 878 | |
| 879 | if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 880 | /* |
| 881 | * Wait-Die: if we find an older context waiting, there |
| 882 | * is no point in queueing behind it, as we'd have to |
| 883 | * die the moment it would acquire the lock. |
| 884 | */ |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 885 | if (is_wait_die) { |
| 886 | int ret = __ww_mutex_kill(lock, ww_ctx); |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 887 | |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 888 | if (ret) |
| 889 | return ret; |
| 890 | } |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 891 | |
| 892 | break; |
| 893 | } |
| 894 | |
| 895 | pos = &cur->list; |
Nicolai Hähnle | 200b187 | 2016-12-21 19:46:35 +0100 | [diff] [blame] | 896 | |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 897 | /* Wait-Die: ensure younger waiters die. */ |
| 898 | __ww_mutex_die(lock, cur, ww_ctx); |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 899 | } |
| 900 | |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 901 | __mutex_add_waiter(lock, waiter, pos); |
| 902 | |
| 903 | /* |
| 904 | * Wound-Wait: if we're blocking on a mutex owned by a younger context, |
| 905 | * wound that such that we might proceed. |
| 906 | */ |
| 907 | if (!is_wait_die) { |
| 908 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
| 909 | |
| 910 | /* |
| 911 | * See ww_mutex_set_context_fastpath(). Orders setting |
| 912 | * MUTEX_FLAG_WAITERS vs the ww->ctx load, |
| 913 | * such that either we or the fastpath will wound @ww->ctx. |
| 914 | */ |
| 915 | smp_mb(); |
| 916 | __ww_mutex_wound(lock, ww_ctx, ww->ctx); |
| 917 | } |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 918 | |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 919 | return 0; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 920 | } |
| 921 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 922 | /* |
| 923 | * Lock a mutex (possibly interruptible), slowpath: |
| 924 | */ |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 925 | static __always_inline int __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 926 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 927 | struct lockdep_map *nest_lock, unsigned long ip, |
Tetsuo Handa | b026750 | 2013-10-17 19:45:29 +0900 | [diff] [blame] | 928 | struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 929 | { |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 930 | struct mutex_waiter waiter; |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 931 | bool first = false; |
Waiman Long | a40ca56 | 2016-08-26 19:35:08 -0400 | [diff] [blame] | 932 | struct ww_mutex *ww; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 933 | int ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 934 | |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 935 | might_sleep(); |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 936 | |
Sebastian Andrzej Siewior | 6c11c6e | 2019-07-03 11:21:26 +0200 | [diff] [blame] | 937 | #ifdef CONFIG_DEBUG_MUTEXES |
| 938 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
| 939 | #endif |
| 940 | |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 941 | ww = container_of(lock, struct ww_mutex, base); |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 942 | if (use_ww_ctx && ww_ctx) { |
Chris Wilson | 0422e83 | 2016-05-26 21:08:17 +0100 | [diff] [blame] | 943 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) |
| 944 | return -EALREADY; |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 945 | |
| 946 | /* |
| 947 | * Reset the wounded flag after a kill. No other process can |
| 948 | * race and wound us here since they can't have a valid owner |
| 949 | * pointer if we don't have any locks held. |
| 950 | */ |
| 951 | if (ww_ctx->acquired == 0) |
| 952 | ww_ctx->wounded = 0; |
Chris Wilson | 0422e83 | 2016-05-26 21:08:17 +0100 | [diff] [blame] | 953 | } |
| 954 | |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 955 | preempt_disable(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 956 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
Frederic Weisbecker | c022602 | 2009-12-02 20:49:16 +0100 | [diff] [blame] | 957 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 958 | if (__mutex_trylock(lock) || |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 959 | mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) { |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 960 | /* got the lock, yay! */ |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 961 | lock_acquired(&lock->dep_map, ip); |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 962 | if (use_ww_ctx && ww_ctx) |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 963 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 964 | preempt_enable(); |
| 965 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 966 | } |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 967 | |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 968 | spin_lock(&lock->wait_lock); |
Jason Low | 1e820c9 | 2014-06-11 11:37:21 -0700 | [diff] [blame] | 969 | /* |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 970 | * After waiting to acquire the wait_lock, try again. |
Jason Low | 1e820c9 | 2014-06-11 11:37:21 -0700 | [diff] [blame] | 971 | */ |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 972 | if (__mutex_trylock(lock)) { |
| 973 | if (use_ww_ctx && ww_ctx) |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 974 | __ww_mutex_check_waiters(lock, ww_ctx); |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 975 | |
Davidlohr Bueso | ec83f42 | 2013-06-28 13:13:18 -0700 | [diff] [blame] | 976 | goto skip_wait; |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 977 | } |
Davidlohr Bueso | ec83f42 | 2013-06-28 13:13:18 -0700 | [diff] [blame] | 978 | |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 979 | debug_mutex_lock_common(lock, &waiter); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 980 | |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 981 | lock_contended(&lock->dep_map, ip); |
| 982 | |
| 983 | if (!use_ww_ctx) { |
| 984 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 985 | __mutex_add_waiter(lock, &waiter, &lock->wait_list); |
| 986 | |
Nicolai Hähnle | 977625a | 2016-12-21 19:46:39 +0100 | [diff] [blame] | 987 | |
| 988 | #ifdef CONFIG_DEBUG_MUTEXES |
| 989 | waiter.ww_ctx = MUTEX_POISON_WW_CTX; |
| 990 | #endif |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 991 | } else { |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 992 | /* |
| 993 | * Add in stamp order, waking up waiters that must kill |
| 994 | * themselves. |
| 995 | */ |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 996 | ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); |
| 997 | if (ret) |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 998 | goto err_early_kill; |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 999 | |
| 1000 | waiter.ww_ctx = ww_ctx; |
| 1001 | } |
| 1002 | |
Davidlohr Bueso | d269a8b | 2017-01-03 13:43:13 -0800 | [diff] [blame] | 1003 | waiter.task = current; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1004 | |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 1005 | set_current_state(state); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1006 | for (;;) { |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 1007 | /* |
| 1008 | * Once we hold wait_lock, we're serialized against |
| 1009 | * mutex_unlock() handing the lock off to us, do a trylock |
| 1010 | * before testing the error conditions to make sure we pick up |
| 1011 | * the handoff. |
| 1012 | */ |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 1013 | if (__mutex_trylock(lock)) |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 1014 | goto acquired; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1015 | |
| 1016 | /* |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 1017 | * Check for signals and kill conditions while holding |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 1018 | * wait_lock. This ensures the lock cancellation is ordered |
| 1019 | * against mutex_unlock() and wake-ups do not go missing. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1020 | */ |
Davidlohr Bueso | 3bb5f4a | 2019-01-03 15:28:44 -0800 | [diff] [blame] | 1021 | if (signal_pending_state(state, current)) { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1022 | ret = -EINTR; |
| 1023 | goto err; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1024 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1025 | |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 1026 | if (use_ww_ctx && ww_ctx) { |
| 1027 | ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1028 | if (ret) |
| 1029 | goto err; |
| 1030 | } |
| 1031 | |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 1032 | spin_unlock(&lock->wait_lock); |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 1033 | schedule_preempt_disabled(); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1034 | |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 1035 | /* |
| 1036 | * ww_mutex needs to always recheck its position since its waiter |
| 1037 | * list is not FIFO ordered. |
| 1038 | */ |
| 1039 | if ((use_ww_ctx && ww_ctx) || !first) { |
| 1040 | first = __mutex_waiter_is_first(lock, &waiter); |
| 1041 | if (first) |
| 1042 | __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1043 | } |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 1044 | |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 1045 | set_current_state(state); |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 1046 | /* |
| 1047 | * Here we order against unlock; we must either see it change |
| 1048 | * state back to RUNNING and fall through the next schedule(), |
| 1049 | * or we must see its unlock and acquire. |
| 1050 | */ |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 1051 | if (__mutex_trylock(lock) || |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 1052 | (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 1053 | break; |
| 1054 | |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 1055 | spin_lock(&lock->wait_lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1056 | } |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 1057 | spin_lock(&lock->wait_lock); |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 1058 | acquired: |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 1059 | __set_current_state(TASK_RUNNING); |
Davidlohr Bueso | 51587bc | 2015-01-19 17:39:21 -0800 | [diff] [blame] | 1060 | |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 1061 | if (use_ww_ctx && ww_ctx) { |
| 1062 | /* |
| 1063 | * Wound-Wait; we stole the lock (!first_waiter), check the |
| 1064 | * waiters as anyone might want to wound us. |
| 1065 | */ |
| 1066 | if (!ww_ctx->is_wait_die && |
| 1067 | !__mutex_waiter_is_first(lock, &waiter)) |
| 1068 | __ww_mutex_check_waiters(lock, ww_ctx); |
| 1069 | } |
| 1070 | |
Davidlohr Bueso | d269a8b | 2017-01-03 13:43:13 -0800 | [diff] [blame] | 1071 | mutex_remove_waiter(lock, &waiter, current); |
Davidlohr Bueso | ec83f42 | 2013-06-28 13:13:18 -0700 | [diff] [blame] | 1072 | if (likely(list_empty(&lock->wait_list))) |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1073 | __mutex_clear_flag(lock, MUTEX_FLAGS); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1074 | |
Davidlohr Bueso | ec83f42 | 2013-06-28 13:13:18 -0700 | [diff] [blame] | 1075 | debug_mutex_free_waiter(&waiter); |
| 1076 | |
| 1077 | skip_wait: |
| 1078 | /* got the lock - cleanup and rejoice! */ |
| 1079 | lock_acquired(&lock->dep_map, ip); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1080 | |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 1081 | if (use_ww_ctx && ww_ctx) |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 1082 | ww_mutex_lock_acquired(ww, ww_ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1083 | |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 1084 | spin_unlock(&lock->wait_lock); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 1085 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1086 | return 0; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1087 | |
| 1088 | err: |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 1089 | __set_current_state(TASK_RUNNING); |
Davidlohr Bueso | d269a8b | 2017-01-03 13:43:13 -0800 | [diff] [blame] | 1090 | mutex_remove_waiter(lock, &waiter, current); |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 1091 | err_early_kill: |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 1092 | spin_unlock(&lock->wait_lock); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1093 | debug_mutex_free_waiter(&waiter); |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 1094 | mutex_release(&lock->dep_map, ip); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1095 | preempt_enable(); |
| 1096 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1097 | } |
| 1098 | |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1099 | static int __sched |
| 1100 | __mutex_lock(struct mutex *lock, long state, unsigned int subclass, |
| 1101 | struct lockdep_map *nest_lock, unsigned long ip) |
| 1102 | { |
| 1103 | return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); |
| 1104 | } |
| 1105 | |
| 1106 | static int __sched |
| 1107 | __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, |
| 1108 | struct lockdep_map *nest_lock, unsigned long ip, |
| 1109 | struct ww_acquire_ctx *ww_ctx) |
| 1110 | { |
| 1111 | return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); |
| 1112 | } |
| 1113 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 1114 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 1115 | void __sched |
| 1116 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
| 1117 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1118 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 1119 | } |
| 1120 | |
| 1121 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 1122 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 1123 | void __sched |
| 1124 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| 1125 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1126 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 1127 | } |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 1128 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
| 1129 | |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 1130 | int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1131 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
| 1132 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1133 | return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1134 | } |
| 1135 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
| 1136 | |
| 1137 | int __sched |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 1138 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 1139 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1140 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 1141 | } |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 1142 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1143 | |
Tejun Heo | 1460cb6 | 2016-10-28 12:58:11 -0400 | [diff] [blame] | 1144 | void __sched |
| 1145 | mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) |
| 1146 | { |
| 1147 | int token; |
| 1148 | |
| 1149 | might_sleep(); |
| 1150 | |
| 1151 | token = io_schedule_prepare(); |
| 1152 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
| 1153 | subclass, NULL, _RET_IP_, NULL, 0); |
| 1154 | io_schedule_finish(token); |
| 1155 | } |
| 1156 | EXPORT_SYMBOL_GPL(mutex_lock_io_nested); |
| 1157 | |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 1158 | static inline int |
| 1159 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 1160 | { |
| 1161 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
| 1162 | unsigned tmp; |
| 1163 | |
| 1164 | if (ctx->deadlock_inject_countdown-- == 0) { |
| 1165 | tmp = ctx->deadlock_inject_interval; |
| 1166 | if (tmp > UINT_MAX/4) |
| 1167 | tmp = UINT_MAX; |
| 1168 | else |
| 1169 | tmp = tmp*2 + tmp + tmp/2; |
| 1170 | |
| 1171 | ctx->deadlock_inject_interval = tmp; |
| 1172 | ctx->deadlock_inject_countdown = tmp; |
| 1173 | ctx->contending_lock = lock; |
| 1174 | |
| 1175 | ww_mutex_unlock(lock); |
| 1176 | |
| 1177 | return -EDEADLK; |
| 1178 | } |
| 1179 | #endif |
| 1180 | |
| 1181 | return 0; |
| 1182 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1183 | |
| 1184 | int __sched |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1185 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1186 | { |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 1187 | int ret; |
| 1188 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1189 | might_sleep(); |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1190 | ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, |
| 1191 | 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, |
| 1192 | ctx); |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 1193 | if (!ret && ctx && ctx->acquired > 1) |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 1194 | return ww_mutex_deadlock_injection(lock, ctx); |
| 1195 | |
| 1196 | return ret; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1197 | } |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1198 | EXPORT_SYMBOL_GPL(ww_mutex_lock); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1199 | |
| 1200 | int __sched |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1201 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1202 | { |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 1203 | int ret; |
| 1204 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1205 | might_sleep(); |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1206 | ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, |
| 1207 | 0, ctx ? &ctx->dep_map : NULL, _RET_IP_, |
| 1208 | ctx); |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 1209 | |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 1210 | if (!ret && ctx && ctx->acquired > 1) |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 1211 | return ww_mutex_deadlock_injection(lock, ctx); |
| 1212 | |
| 1213 | return ret; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1214 | } |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1215 | EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1216 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 1217 | #endif |
| 1218 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1219 | /* |
| 1220 | * Release the lock, slowpath: |
| 1221 | */ |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1222 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1223 | { |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1224 | struct task_struct *next = NULL; |
Waiman Long | 194a6b5 | 2016-11-17 11:46:38 -0500 | [diff] [blame] | 1225 | DEFINE_WAKE_Q(wake_q); |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 1226 | unsigned long owner; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1227 | |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 1228 | mutex_release(&lock->dep_map, ip); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1229 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1230 | /* |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1231 | * Release the lock before (potentially) taking the spinlock such that |
| 1232 | * other contenders can get on with things ASAP. |
| 1233 | * |
| 1234 | * Except when HANDOFF, in that case we must not clear the owner field, |
| 1235 | * but instead set it to the top waiter. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1236 | */ |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1237 | owner = atomic_long_read(&lock->owner); |
| 1238 | for (;;) { |
| 1239 | unsigned long old; |
| 1240 | |
| 1241 | #ifdef CONFIG_DEBUG_MUTEXES |
| 1242 | DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 1243 | DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1244 | #endif |
| 1245 | |
| 1246 | if (owner & MUTEX_FLAG_HANDOFF) |
| 1247 | break; |
| 1248 | |
| 1249 | old = atomic_long_cmpxchg_release(&lock->owner, owner, |
| 1250 | __owner_flags(owner)); |
| 1251 | if (old == owner) { |
| 1252 | if (owner & MUTEX_FLAG_WAITERS) |
| 1253 | break; |
| 1254 | |
| 1255 | return; |
| 1256 | } |
| 1257 | |
| 1258 | owner = old; |
| 1259 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1260 | |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 1261 | spin_lock(&lock->wait_lock); |
Jason Low | 1d8fe7d | 2014-01-28 11:13:14 -0800 | [diff] [blame] | 1262 | debug_mutex_unlock(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1263 | if (!list_empty(&lock->wait_list)) { |
| 1264 | /* get the first entry from the wait-list: */ |
| 1265 | struct mutex_waiter *waiter = |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1266 | list_first_entry(&lock->wait_list, |
| 1267 | struct mutex_waiter, list); |
| 1268 | |
| 1269 | next = waiter->task; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1270 | |
| 1271 | debug_mutex_wake_waiter(lock, waiter); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1272 | wake_q_add(&wake_q, next); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1273 | } |
| 1274 | |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1275 | if (owner & MUTEX_FLAG_HANDOFF) |
| 1276 | __mutex_handoff(lock, next); |
| 1277 | |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 1278 | spin_unlock(&lock->wait_lock); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 1279 | |
Davidlohr Bueso | 1329ce6 | 2016-01-24 18:23:43 -0800 | [diff] [blame] | 1280 | wake_up_q(&wake_q); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1281 | } |
| 1282 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 1283 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 1284 | /* |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1285 | * Here come the less common (and hence less performance-critical) APIs: |
| 1286 | * mutex_lock_interruptible() and mutex_trylock(). |
| 1287 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1288 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 1289 | __mutex_lock_killable_slowpath(struct mutex *lock); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1290 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1291 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 1292 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1293 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 1294 | /** |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 1295 | * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. |
| 1296 | * @lock: The mutex to be acquired. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1297 | * |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 1298 | * Lock the mutex like mutex_lock(). If a signal is delivered while the |
| 1299 | * process is sleeping, this function will return without acquiring the |
| 1300 | * mutex. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1301 | * |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 1302 | * Context: Process context. |
| 1303 | * Return: 0 if the lock was successfully acquired or %-EINTR if a |
| 1304 | * signal arrived. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1305 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1306 | int __sched mutex_lock_interruptible(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1307 | { |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 1308 | might_sleep(); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1309 | |
| 1310 | if (__mutex_trylock_fast(lock)) |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 1311 | return 0; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1312 | |
| 1313 | return __mutex_lock_interruptible_slowpath(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1314 | } |
| 1315 | |
| 1316 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 1317 | |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 1318 | /** |
| 1319 | * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. |
| 1320 | * @lock: The mutex to be acquired. |
| 1321 | * |
| 1322 | * Lock the mutex like mutex_lock(). If a signal which will be fatal to |
| 1323 | * the current process is delivered while the process is sleeping, this |
| 1324 | * function will return without acquiring the mutex. |
| 1325 | * |
| 1326 | * Context: Process context. |
| 1327 | * Return: 0 if the lock was successfully acquired or %-EINTR if a |
| 1328 | * fatal signal arrived. |
| 1329 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1330 | int __sched mutex_lock_killable(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1331 | { |
| 1332 | might_sleep(); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1333 | |
| 1334 | if (__mutex_trylock_fast(lock)) |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 1335 | return 0; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1336 | |
| 1337 | return __mutex_lock_killable_slowpath(lock); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1338 | } |
| 1339 | EXPORT_SYMBOL(mutex_lock_killable); |
| 1340 | |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 1341 | /** |
| 1342 | * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O |
| 1343 | * @lock: The mutex to be acquired. |
| 1344 | * |
| 1345 | * Lock the mutex like mutex_lock(). While the task is waiting for this |
| 1346 | * mutex, it will be accounted as being in the IO wait state by the |
| 1347 | * scheduler. |
| 1348 | * |
| 1349 | * Context: Process context. |
| 1350 | */ |
Tejun Heo | 1460cb6 | 2016-10-28 12:58:11 -0400 | [diff] [blame] | 1351 | void __sched mutex_lock_io(struct mutex *lock) |
| 1352 | { |
| 1353 | int token; |
| 1354 | |
| 1355 | token = io_schedule_prepare(); |
| 1356 | mutex_lock(lock); |
| 1357 | io_schedule_finish(token); |
| 1358 | } |
| 1359 | EXPORT_SYMBOL_GPL(mutex_lock_io); |
| 1360 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1361 | static noinline void __sched |
| 1362 | __mutex_lock_slowpath(struct mutex *lock) |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 1363 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1364 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 1365 | } |
| 1366 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1367 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 1368 | __mutex_lock_killable_slowpath(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1369 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1370 | return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1371 | } |
| 1372 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1373 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 1374 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1375 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1376 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1377 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1378 | |
| 1379 | static noinline int __sched |
| 1380 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 1381 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1382 | return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL, |
| 1383 | _RET_IP_, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1384 | } |
| 1385 | |
| 1386 | static noinline int __sched |
| 1387 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, |
| 1388 | struct ww_acquire_ctx *ctx) |
| 1389 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1390 | return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL, |
| 1391 | _RET_IP_, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1392 | } |
| 1393 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 1394 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1395 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 1396 | /** |
| 1397 | * mutex_trylock - try to acquire the mutex, without waiting |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1398 | * @lock: the mutex to be acquired |
| 1399 | * |
| 1400 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 1401 | * has been acquired successfully, and 0 on contention. |
| 1402 | * |
| 1403 | * NOTE: this function follows the spin_trylock() convention, so |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 1404 | * it is negated from the down_trylock() return values! Be careful |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1405 | * about this when converting semaphore users to mutexes. |
| 1406 | * |
| 1407 | * This function must not be used in interrupt context. The |
| 1408 | * mutex must be released by the same task that acquired it. |
| 1409 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1410 | int __sched mutex_trylock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1411 | { |
Sebastian Andrzej Siewior | 6c11c6e | 2019-07-03 11:21:26 +0200 | [diff] [blame] | 1412 | bool locked; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 1413 | |
Sebastian Andrzej Siewior | 6c11c6e | 2019-07-03 11:21:26 +0200 | [diff] [blame] | 1414 | #ifdef CONFIG_DEBUG_MUTEXES |
| 1415 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
| 1416 | #endif |
| 1417 | |
| 1418 | locked = __mutex_trylock(lock); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1419 | if (locked) |
| 1420 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 1421 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1422 | return locked; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1423 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1424 | EXPORT_SYMBOL(mutex_trylock); |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 1425 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1426 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 1427 | int __sched |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1428 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1429 | { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1430 | might_sleep(); |
| 1431 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1432 | if (__mutex_trylock_fast(&lock->base)) { |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 1433 | if (ctx) |
| 1434 | ww_mutex_set_context_fastpath(lock, ctx); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1435 | return 0; |
| 1436 | } |
| 1437 | |
| 1438 | return __ww_mutex_lock_slowpath(lock, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1439 | } |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1440 | EXPORT_SYMBOL(ww_mutex_lock); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1441 | |
| 1442 | int __sched |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1443 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1444 | { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1445 | might_sleep(); |
| 1446 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1447 | if (__mutex_trylock_fast(&lock->base)) { |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 1448 | if (ctx) |
| 1449 | ww_mutex_set_context_fastpath(lock, ctx); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1450 | return 0; |
| 1451 | } |
| 1452 | |
| 1453 | return __ww_mutex_lock_interruptible_slowpath(lock, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1454 | } |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1455 | EXPORT_SYMBOL(ww_mutex_lock_interruptible); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1456 | |
| 1457 | #endif |
| 1458 | |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 1459 | /** |
| 1460 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| 1461 | * @cnt: the atomic which we are to dec |
| 1462 | * @lock: the mutex to return holding if we dec to 0 |
| 1463 | * |
| 1464 | * return true and hold lock if we dec to 0, return false otherwise |
| 1465 | */ |
| 1466 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| 1467 | { |
| 1468 | /* dec if we can't possibly hit 0 */ |
| 1469 | if (atomic_add_unless(cnt, -1, 1)) |
| 1470 | return 0; |
| 1471 | /* we might hit 0, so take the lock */ |
| 1472 | mutex_lock(lock); |
| 1473 | if (!atomic_dec_and_test(cnt)) { |
| 1474 | /* when we actually did the dec, we didn't hit 0 */ |
| 1475 | mutex_unlock(lock); |
| 1476 | return 0; |
| 1477 | } |
| 1478 | /* we hit 0, and we hold the lock */ |
| 1479 | return 1; |
| 1480 | } |
| 1481 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |