Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 2 | /* |
Peter Zijlstra | 67a6de4 | 2013-11-08 08:26:39 +0100 | [diff] [blame] | 3 | * kernel/locking/mutex.c |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 4 | * |
| 5 | * Mutexes: blocking mutual exclusion locks |
| 6 | * |
| 7 | * Started by Ingo Molnar: |
| 8 | * |
| 9 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 10 | * |
| 11 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 12 | * David Howells for suggestions and improvements. |
| 13 | * |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 14 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
| 15 | * from the -rt tree, where it was originally implemented for rtmutexes |
| 16 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
| 17 | * and Sven Dietrich. |
| 18 | * |
Mauro Carvalho Chehab | 387b146 | 2019-04-10 08:32:41 -0300 | [diff] [blame] | 19 | * Also see Documentation/locking/mutex-design.rst. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 20 | */ |
| 21 | #include <linux/mutex.h> |
Maarten Lankhorst | 1b375dc | 2013-07-05 09:29:32 +0200 | [diff] [blame] | 22 | #include <linux/ww_mutex.h> |
Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 23 | #include <linux/sched/signal.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 24 | #include <linux/sched/rt.h> |
Ingo Molnar | 84f001e | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 25 | #include <linux/sched/wake_q.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 26 | #include <linux/sched/debug.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 27 | #include <linux/export.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 28 | #include <linux/spinlock.h> |
| 29 | #include <linux/interrupt.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 30 | #include <linux/debug_locks.h> |
Davidlohr Bueso | 7a215f8 | 2015-01-30 01:14:25 -0800 | [diff] [blame] | 31 | #include <linux/osq_lock.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 32 | |
Thomas Gleixner | bb630f9 | 2021-08-15 23:29:01 +0200 | [diff] [blame] | 33 | #ifndef CONFIG_PREEMPT_RT |
Thomas Gleixner | a321fb9 | 2021-08-17 16:17:38 +0200 | [diff] [blame] | 34 | #include "mutex.h" |
| 35 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 36 | #ifdef CONFIG_DEBUG_MUTEXES |
Peter Zijlstra | e6b4457 | 2021-06-30 17:35:20 +0200 | [diff] [blame] | 37 | # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 38 | #else |
Peter Zijlstra | e6b4457 | 2021-06-30 17:35:20 +0200 | [diff] [blame] | 39 | # define MUTEX_WARN_ON(cond) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 40 | #endif |
| 41 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 42 | void |
| 43 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 44 | { |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 45 | atomic_long_set(&lock->owner, 0); |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 46 | raw_spin_lock_init(&lock->wait_lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 47 | INIT_LIST_HEAD(&lock->wait_list); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 48 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Jason Low | 4d9d951 | 2014-07-14 10:27:50 -0700 | [diff] [blame] | 49 | osq_lock_init(&lock->osq); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 50 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 51 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 52 | debug_mutex_init(lock, name, key); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 53 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 54 | EXPORT_SYMBOL(__mutex_init); |
| 55 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 56 | /* |
| 57 | * @owner: contains: 'struct task_struct *' to the current lock owner, |
| 58 | * NULL means not owned. Since task_struct pointers are aligned at |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 59 | * at least L1_CACHE_BYTES, we have low bits to store extra state. |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 60 | * |
| 61 | * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup. |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 62 | * Bit1 indicates unlock needs to hand the lock to the top-waiter |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 63 | * Bit2 indicates handoff has been done and we're waiting for pickup. |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 64 | */ |
| 65 | #define MUTEX_FLAG_WAITERS 0x01 |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 66 | #define MUTEX_FLAG_HANDOFF 0x02 |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 67 | #define MUTEX_FLAG_PICKUP 0x04 |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 68 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 69 | #define MUTEX_FLAGS 0x07 |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 70 | |
Mukesh Ojha | 5f35d5a | 2019-07-31 20:35:03 +0530 | [diff] [blame] | 71 | /* |
| 72 | * Internal helper function; C doesn't allow us to hide it :/ |
| 73 | * |
| 74 | * DO NOT USE (outside of mutex code). |
| 75 | */ |
| 76 | static inline struct task_struct *__mutex_owner(struct mutex *lock) |
| 77 | { |
Mukesh Ojha | a037d26 | 2019-07-31 20:35:04 +0530 | [diff] [blame] | 78 | return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS); |
Mukesh Ojha | 5f35d5a | 2019-07-31 20:35:03 +0530 | [diff] [blame] | 79 | } |
| 80 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 81 | static inline struct task_struct *__owner_task(unsigned long owner) |
| 82 | { |
| 83 | return (struct task_struct *)(owner & ~MUTEX_FLAGS); |
| 84 | } |
| 85 | |
Mukesh Ojha | 5f35d5a | 2019-07-31 20:35:03 +0530 | [diff] [blame] | 86 | bool mutex_is_locked(struct mutex *lock) |
| 87 | { |
| 88 | return __mutex_owner(lock) != NULL; |
| 89 | } |
| 90 | EXPORT_SYMBOL(mutex_is_locked); |
| 91 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 92 | static inline unsigned long __owner_flags(unsigned long owner) |
| 93 | { |
| 94 | return owner & MUTEX_FLAGS; |
| 95 | } |
| 96 | |
Maarten Lankhorst | 12235da | 2021-09-09 11:32:18 +0200 | [diff] [blame] | 97 | /* |
| 98 | * Returns: __mutex_owner(lock) on failure or NULL on success. |
| 99 | */ |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 100 | static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 101 | { |
| 102 | unsigned long owner, curr = (unsigned long)current; |
| 103 | |
| 104 | owner = atomic_long_read(&lock->owner); |
| 105 | for (;;) { /* must loop, can race against a flag */ |
Peter Zijlstra | ab4e4d9 | 2021-06-30 17:35:17 +0200 | [diff] [blame] | 106 | unsigned long flags = __owner_flags(owner); |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 107 | unsigned long task = owner & ~MUTEX_FLAGS; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 108 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 109 | if (task) { |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 110 | if (flags & MUTEX_FLAG_PICKUP) { |
| 111 | if (task != curr) |
| 112 | break; |
| 113 | flags &= ~MUTEX_FLAG_PICKUP; |
| 114 | } else if (handoff) { |
| 115 | if (flags & MUTEX_FLAG_HANDOFF) |
| 116 | break; |
| 117 | flags |= MUTEX_FLAG_HANDOFF; |
| 118 | } else { |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 119 | break; |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 120 | } |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 121 | } else { |
Peter Zijlstra | e6b4457 | 2021-06-30 17:35:20 +0200 | [diff] [blame] | 122 | MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP)); |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 123 | task = curr; |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 124 | } |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 125 | |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 126 | if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) { |
| 127 | if (task == curr) |
| 128 | return NULL; |
| 129 | break; |
| 130 | } |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 131 | } |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 132 | |
| 133 | return __owner_task(owner); |
| 134 | } |
| 135 | |
| 136 | /* |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 137 | * Trylock or set HANDOFF |
| 138 | */ |
| 139 | static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) |
| 140 | { |
| 141 | return !__mutex_trylock_common(lock, handoff); |
| 142 | } |
| 143 | |
| 144 | /* |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 145 | * Actual trylock that will work on any unlocked state. |
| 146 | */ |
| 147 | static inline bool __mutex_trylock(struct mutex *lock) |
| 148 | { |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 149 | return !__mutex_trylock_common(lock, false); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 153 | /* |
| 154 | * Lockdep annotations are contained to the slow paths for simplicity. |
| 155 | * There is nothing that would stop spreading the lockdep annotations outwards |
| 156 | * except more code. |
| 157 | */ |
| 158 | |
| 159 | /* |
| 160 | * Optimistic trylock that only works in the uncontended case. Make sure to |
| 161 | * follow with a __mutex_trylock() before failing. |
| 162 | */ |
| 163 | static __always_inline bool __mutex_trylock_fast(struct mutex *lock) |
| 164 | { |
| 165 | unsigned long curr = (unsigned long)current; |
Peter Zijlstra | c427f69 | 2018-04-05 11:05:35 +0200 | [diff] [blame] | 166 | unsigned long zero = 0UL; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 167 | |
Peter Zijlstra | c427f69 | 2018-04-05 11:05:35 +0200 | [diff] [blame] | 168 | if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 169 | return true; |
| 170 | |
| 171 | return false; |
| 172 | } |
| 173 | |
| 174 | static __always_inline bool __mutex_unlock_fast(struct mutex *lock) |
| 175 | { |
| 176 | unsigned long curr = (unsigned long)current; |
| 177 | |
Peter Zijlstra | ab4e4d9 | 2021-06-30 17:35:17 +0200 | [diff] [blame] | 178 | return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 179 | } |
| 180 | #endif |
| 181 | |
| 182 | static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) |
| 183 | { |
| 184 | atomic_long_or(flag, &lock->owner); |
| 185 | } |
| 186 | |
| 187 | static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) |
| 188 | { |
| 189 | atomic_long_andnot(flag, &lock->owner); |
| 190 | } |
| 191 | |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 192 | static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) |
| 193 | { |
| 194 | return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; |
| 195 | } |
| 196 | |
| 197 | /* |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 198 | * Add @waiter to a given location in the lock wait_list and set the |
| 199 | * FLAG_WAITERS flag if it's the first waiter. |
| 200 | */ |
Zqiang | 3a010c4 | 2021-05-17 11:40:05 +0800 | [diff] [blame] | 201 | static void |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 202 | __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 203 | struct list_head *list) |
| 204 | { |
| 205 | debug_mutex_add_waiter(lock, waiter, current); |
| 206 | |
| 207 | list_add_tail(&waiter->list, list); |
| 208 | if (__mutex_waiter_is_first(lock, waiter)) |
| 209 | __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); |
| 210 | } |
| 211 | |
Zqiang | 3a010c4 | 2021-05-17 11:40:05 +0800 | [diff] [blame] | 212 | static void |
| 213 | __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) |
| 214 | { |
| 215 | list_del(&waiter->list); |
| 216 | if (likely(list_empty(&lock->wait_list))) |
| 217 | __mutex_clear_flag(lock, MUTEX_FLAGS); |
| 218 | |
| 219 | debug_mutex_remove_waiter(lock, waiter, current); |
| 220 | } |
| 221 | |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 222 | /* |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 223 | * Give up ownership to a specific task, when @task = NULL, this is equivalent |
Ingo Molnar | e2db759 | 2021-03-22 02:35:05 +0100 | [diff] [blame] | 224 | * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 225 | * WAITERS. Provides RELEASE semantics like a regular unlock, the |
| 226 | * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 227 | */ |
| 228 | static void __mutex_handoff(struct mutex *lock, struct task_struct *task) |
| 229 | { |
| 230 | unsigned long owner = atomic_long_read(&lock->owner); |
| 231 | |
| 232 | for (;;) { |
Peter Zijlstra | ab4e4d9 | 2021-06-30 17:35:17 +0200 | [diff] [blame] | 233 | unsigned long new; |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 234 | |
Peter Zijlstra | e6b4457 | 2021-06-30 17:35:20 +0200 | [diff] [blame] | 235 | MUTEX_WARN_ON(__owner_task(owner) != current); |
| 236 | MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 237 | |
| 238 | new = (owner & MUTEX_FLAG_WAITERS); |
| 239 | new |= (unsigned long)task; |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 240 | if (task) |
| 241 | new |= MUTEX_FLAG_PICKUP; |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 242 | |
Peter Zijlstra | ab4e4d9 | 2021-06-30 17:35:17 +0200 | [diff] [blame] | 243 | if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new)) |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 244 | break; |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 245 | } |
| 246 | } |
| 247 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 248 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 249 | /* |
| 250 | * We split the mutex lock/unlock logic into separate fastpath and |
| 251 | * slowpath functions, to reduce the register pressure on the fastpath. |
| 252 | * We also put the fastpath first in the kernel image, to make sure the |
| 253 | * branch is predicted by the CPU as default-untaken. |
| 254 | */ |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 255 | static void __sched __mutex_lock_slowpath(struct mutex *lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 256 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 257 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 258 | * mutex_lock - acquire the mutex |
| 259 | * @lock: the mutex to be acquired |
| 260 | * |
| 261 | * Lock the mutex exclusively for this task. If the mutex is not |
| 262 | * available right now, it will sleep until it can get it. |
| 263 | * |
| 264 | * The mutex must later on be released by the same task that |
| 265 | * acquired it. Recursive locking is not allowed. The task |
| 266 | * may not exit without first unlocking the mutex. Also, kernel |
Sharon Dvir | 139b6fd | 2015-02-01 23:47:32 +0200 | [diff] [blame] | 267 | * memory where the mutex resides must not be freed with |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 268 | * the mutex still locked. The mutex must first be initialized |
| 269 | * (or statically defined) before it can be locked. memset()-ing |
| 270 | * the mutex to 0 is not allowed. |
| 271 | * |
Mauro Carvalho Chehab | 7b4ff1a | 2017-05-11 10:17:45 -0300 | [diff] [blame] | 272 | * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
| 273 | * checks that will enforce the restrictions and will also do |
| 274 | * deadlock debugging) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 275 | * |
| 276 | * This function is similar to (but not equivalent to) down(). |
| 277 | */ |
H. Peter Anvin | b09d250 | 2009-04-01 17:21:56 -0700 | [diff] [blame] | 278 | void __sched mutex_lock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 279 | { |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 280 | might_sleep(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 281 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 282 | if (!__mutex_trylock_fast(lock)) |
| 283 | __mutex_lock_slowpath(lock); |
| 284 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 285 | EXPORT_SYMBOL(mutex_lock); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 286 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 287 | |
Peter Zijlstra (Intel) | 2674bd1 | 2021-08-17 16:31:54 +0200 | [diff] [blame] | 288 | #include "ww_mutex.h" |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 289 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 290 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 291 | |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 292 | /* |
| 293 | * Trylock variant that returns the owning task on failure. |
| 294 | */ |
| 295 | static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) |
| 296 | { |
| 297 | return __mutex_trylock_common(lock, false); |
| 298 | } |
| 299 | |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 300 | static inline |
| 301 | bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
| 302 | struct mutex_waiter *waiter) |
| 303 | { |
| 304 | struct ww_mutex *ww; |
| 305 | |
| 306 | ww = container_of(lock, struct ww_mutex, base); |
| 307 | |
| 308 | /* |
| 309 | * If ww->ctx is set the contents are undefined, only |
| 310 | * by acquiring wait_lock there is a guarantee that |
| 311 | * they are not invalid when reading. |
| 312 | * |
| 313 | * As such, when deadlock detection needs to be |
| 314 | * performed the optimistic spinning cannot be done. |
| 315 | * |
| 316 | * Check this in every inner iteration because we may |
| 317 | * be racing against another thread's ww_mutex_lock. |
| 318 | */ |
| 319 | if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) |
| 320 | return false; |
| 321 | |
| 322 | /* |
| 323 | * If we aren't on the wait list yet, cancel the spin |
| 324 | * if there are waiters. We want to avoid stealing the |
| 325 | * lock from a waiter with an earlier stamp, since the |
| 326 | * other thread may already own a lock that we also |
| 327 | * need. |
| 328 | */ |
| 329 | if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) |
| 330 | return false; |
| 331 | |
| 332 | /* |
| 333 | * Similarly, stop spinning if we are no longer the |
| 334 | * first waiter. |
| 335 | */ |
| 336 | if (waiter && !__mutex_waiter_is_first(lock, waiter)) |
| 337 | return false; |
| 338 | |
| 339 | return true; |
| 340 | } |
| 341 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 342 | /* |
Nicolai Hähnle | 25f13b4 | 2016-12-21 19:46:37 +0100 | [diff] [blame] | 343 | * Look out! "owner" is an entirely speculative pointer access and not |
| 344 | * reliable. |
| 345 | * |
| 346 | * "noinline" so that this function shows up on perf profiles. |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 347 | */ |
| 348 | static noinline |
Nicolai Hähnle | 25f13b4 | 2016-12-21 19:46:37 +0100 | [diff] [blame] | 349 | bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 350 | struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 351 | { |
Jason Low | 01ac33c | 2015-04-08 12:39:19 -0700 | [diff] [blame] | 352 | bool ret = true; |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 353 | |
Yanfei Xu | 6c2787f | 2021-10-13 21:41:52 +0800 | [diff] [blame] | 354 | lockdep_assert_preemption_disabled(); |
| 355 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 356 | while (__mutex_owner(lock) == owner) { |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 357 | /* |
| 358 | * Ensure we emit the owner->on_cpu, dereference _after_ |
Yanfei Xu | 6c2787f | 2021-10-13 21:41:52 +0800 | [diff] [blame] | 359 | * checking lock->owner still matches owner. And we already |
| 360 | * disabled preemption which is equal to the RCU read-side |
| 361 | * crital section in optimistic spinning code. Thus the |
| 362 | * task_strcut structure won't go away during the spinning |
| 363 | * period |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 364 | */ |
| 365 | barrier(); |
| 366 | |
Pan Xinhui | 05ffc95 | 2016-11-02 05:08:30 -0400 | [diff] [blame] | 367 | /* |
| 368 | * Use vcpu_is_preempted to detect lock holder preemption issue. |
| 369 | */ |
Kefeng Wang | c0bed69 | 2021-12-03 15:59:34 +0800 | [diff] [blame] | 370 | if (!owner_on_cpu(owner) || need_resched()) { |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 371 | ret = false; |
| 372 | break; |
| 373 | } |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 374 | |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 375 | if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { |
| 376 | ret = false; |
| 377 | break; |
Nicolai Hähnle | 25f13b4 | 2016-12-21 19:46:37 +0100 | [diff] [blame] | 378 | } |
| 379 | |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 380 | cpu_relax(); |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 381 | } |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 382 | |
Jason Low | be1f7bf | 2015-02-02 13:59:27 -0800 | [diff] [blame] | 383 | return ret; |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 384 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 385 | |
| 386 | /* |
| 387 | * Initial check for entering the mutex spinning loop |
| 388 | */ |
| 389 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
| 390 | { |
Peter Zijlstra | 1e40c2e | 2013-07-19 20:31:01 +0200 | [diff] [blame] | 391 | struct task_struct *owner; |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 392 | int retval = 1; |
| 393 | |
Yanfei Xu | 6c2787f | 2021-10-13 21:41:52 +0800 | [diff] [blame] | 394 | lockdep_assert_preemption_disabled(); |
| 395 | |
Jason Low | 46af29e | 2014-01-28 11:13:12 -0800 | [diff] [blame] | 396 | if (need_resched()) |
| 397 | return 0; |
| 398 | |
Yanfei Xu | 6c2787f | 2021-10-13 21:41:52 +0800 | [diff] [blame] | 399 | /* |
| 400 | * We already disabled preemption which is equal to the RCU read-side |
| 401 | * crital section in optimistic spinning code. Thus the task_strcut |
| 402 | * structure won't go away during the spinning period. |
| 403 | */ |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 404 | owner = __mutex_owner(lock); |
Peter Zijlstra | 1e40c2e | 2013-07-19 20:31:01 +0200 | [diff] [blame] | 405 | if (owner) |
Kefeng Wang | c0bed69 | 2021-12-03 15:59:34 +0800 | [diff] [blame] | 406 | retval = owner_on_cpu(owner); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 407 | |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 408 | /* |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 409 | * If lock->owner is not set, the mutex has been released. Return true |
| 410 | * such that we'll trylock in the spin path, which is a faster option |
| 411 | * than the blocking slow path. |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 412 | */ |
| 413 | return retval; |
| 414 | } |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 415 | |
| 416 | /* |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 417 | * Optimistic spinning. |
| 418 | * |
| 419 | * We try to spin for acquisition when we find that the lock owner |
| 420 | * is currently running on a (different) CPU and while we don't |
| 421 | * need to reschedule. The rationale is that if the lock owner is |
| 422 | * running, it is likely to release the lock soon. |
| 423 | * |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 424 | * The mutex spinners are queued up using MCS lock so that only one |
| 425 | * spinner can compete for the mutex. However, if mutex spinning isn't |
| 426 | * going to happen, there is no point in going through the lock/unlock |
| 427 | * overhead. |
| 428 | * |
| 429 | * Returns true when the lock was taken, otherwise false, indicating |
| 430 | * that we need to jump to the slowpath and sleep. |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 431 | * |
| 432 | * The waiter flag is set to true if the spinner is a waiter in the wait |
| 433 | * queue. The waiter-spinner will spin on the lock directly and concurrently |
| 434 | * with the spinner at the head of the OSQ, if present, until the owner is |
| 435 | * changed to itself. |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 436 | */ |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 437 | static __always_inline bool |
| 438 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 439 | struct mutex_waiter *waiter) |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 440 | { |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 441 | if (!waiter) { |
| 442 | /* |
| 443 | * The purpose of the mutex_can_spin_on_owner() function is |
| 444 | * to eliminate the overhead of osq_lock() and osq_unlock() |
| 445 | * in case spinning isn't possible. As a waiter-spinner |
| 446 | * is not going to take OSQ lock anyway, there is no need |
| 447 | * to call mutex_can_spin_on_owner(). |
| 448 | */ |
| 449 | if (!mutex_can_spin_on_owner(lock)) |
| 450 | goto fail; |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 451 | |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 452 | /* |
| 453 | * In order to avoid a stampede of mutex spinners trying to |
| 454 | * acquire the mutex all at once, the spinners need to take a |
| 455 | * MCS (queued) lock first before spinning on the owner field. |
| 456 | */ |
| 457 | if (!osq_lock(&lock->osq)) |
| 458 | goto fail; |
| 459 | } |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 460 | |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 461 | for (;;) { |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 462 | struct task_struct *owner; |
| 463 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 464 | /* Try to acquire the mutex... */ |
| 465 | owner = __mutex_trylock_or_owner(lock); |
| 466 | if (!owner) |
| 467 | break; |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 468 | |
| 469 | /* |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 470 | * There's an owner, wait for it to either |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 471 | * release the lock or go to sleep. |
| 472 | */ |
Nicolai Hähnle | c516df9 | 2016-12-21 19:46:38 +0100 | [diff] [blame] | 473 | if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 474 | goto fail_unlock; |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 475 | |
| 476 | /* |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 477 | * The cpu_relax() call is a compiler barrier which forces |
| 478 | * everything in this loop to be re-loaded. We don't need |
| 479 | * memory barriers as we'll eventually observe the right |
| 480 | * values at the cost of a few extra spins. |
| 481 | */ |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 482 | cpu_relax(); |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 483 | } |
| 484 | |
Waiman Long | b341afb | 2016-08-26 19:35:09 -0400 | [diff] [blame] | 485 | if (!waiter) |
| 486 | osq_unlock(&lock->osq); |
| 487 | |
| 488 | return true; |
| 489 | |
| 490 | |
| 491 | fail_unlock: |
| 492 | if (!waiter) |
| 493 | osq_unlock(&lock->osq); |
| 494 | |
| 495 | fail: |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 496 | /* |
| 497 | * If we fell out of the spin path because of need_resched(), |
| 498 | * reschedule now, before we try-lock the mutex. This avoids getting |
| 499 | * scheduled out right after we obtained the mutex. |
| 500 | */ |
Peter Zijlstra | 6f942a1 | 2014-09-24 10:18:46 +0200 | [diff] [blame] | 501 | if (need_resched()) { |
| 502 | /* |
| 503 | * We _should_ have TASK_RUNNING here, but just in case |
| 504 | * we do not, make it so, otherwise we might get stuck. |
| 505 | */ |
| 506 | __set_current_state(TASK_RUNNING); |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 507 | schedule_preempt_disabled(); |
Peter Zijlstra | 6f942a1 | 2014-09-24 10:18:46 +0200 | [diff] [blame] | 508 | } |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 509 | |
| 510 | return false; |
| 511 | } |
| 512 | #else |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 513 | static __always_inline bool |
| 514 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 515 | struct mutex_waiter *waiter) |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 516 | { |
| 517 | return false; |
| 518 | } |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 519 | #endif |
| 520 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 521 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 522 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 523 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 524 | * mutex_unlock - release the mutex |
| 525 | * @lock: the mutex to be released |
| 526 | * |
| 527 | * Unlock a mutex that has been locked by this task previously. |
| 528 | * |
| 529 | * This function must not be used in interrupt context. Unlocking |
| 530 | * of a not locked mutex is not allowed. |
| 531 | * |
| 532 | * This function is similar to (but not equivalent to) up(). |
| 533 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 534 | void __sched mutex_unlock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 535 | { |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 536 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 537 | if (__mutex_unlock_fast(lock)) |
| 538 | return; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 539 | #endif |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 540 | __mutex_unlock_slowpath(lock, _RET_IP_); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 541 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 542 | EXPORT_SYMBOL(mutex_unlock); |
| 543 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 544 | /** |
| 545 | * ww_mutex_unlock - release the w/w mutex |
| 546 | * @lock: the mutex to be released |
| 547 | * |
| 548 | * Unlock a mutex that has been locked by this task previously with any of the |
| 549 | * ww_mutex_lock* functions (with or without an acquire context). It is |
| 550 | * forbidden to release the locks after releasing the acquire context. |
| 551 | * |
| 552 | * This function must not be used in interrupt context. Unlocking |
| 553 | * of a unlocked mutex is not allowed. |
| 554 | */ |
| 555 | void __sched ww_mutex_unlock(struct ww_mutex *lock) |
| 556 | { |
Peter Zijlstra (Intel) | aaa77de | 2021-08-17 16:19:04 +0200 | [diff] [blame] | 557 | __ww_mutex_unlock(lock); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 558 | mutex_unlock(&lock->base); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 559 | } |
| 560 | EXPORT_SYMBOL(ww_mutex_unlock); |
| 561 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 562 | /* |
| 563 | * Lock a mutex (possibly interruptible), slowpath: |
| 564 | */ |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 565 | static __always_inline int __sched |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 566 | __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 567 | struct lockdep_map *nest_lock, unsigned long ip, |
Tetsuo Handa | b026750 | 2013-10-17 19:45:29 +0900 | [diff] [blame] | 568 | struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 569 | { |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 570 | struct mutex_waiter waiter; |
Waiman Long | a40ca56 | 2016-08-26 19:35:08 -0400 | [diff] [blame] | 571 | struct ww_mutex *ww; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 572 | int ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 573 | |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 574 | if (!use_ww_ctx) |
| 575 | ww_ctx = NULL; |
| 576 | |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 577 | might_sleep(); |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 578 | |
Peter Zijlstra | e6b4457 | 2021-06-30 17:35:20 +0200 | [diff] [blame] | 579 | MUTEX_WARN_ON(lock->magic != lock); |
Sebastian Andrzej Siewior | 6c11c6e | 2019-07-03 11:21:26 +0200 | [diff] [blame] | 580 | |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 581 | ww = container_of(lock, struct ww_mutex, base); |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 582 | if (ww_ctx) { |
Chris Wilson | 0422e83 | 2016-05-26 21:08:17 +0100 | [diff] [blame] | 583 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) |
| 584 | return -EALREADY; |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 585 | |
| 586 | /* |
| 587 | * Reset the wounded flag after a kill. No other process can |
| 588 | * race and wound us here since they can't have a valid owner |
| 589 | * pointer if we don't have any locks held. |
| 590 | */ |
| 591 | if (ww_ctx->acquired == 0) |
| 592 | ww_ctx->wounded = 0; |
Peter Zijlstra | cf702ed | 2021-08-15 23:28:38 +0200 | [diff] [blame] | 593 | |
| 594 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 595 | nest_lock = &ww_ctx->dep_map; |
| 596 | #endif |
Chris Wilson | 0422e83 | 2016-05-26 21:08:17 +0100 | [diff] [blame] | 597 | } |
| 598 | |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 599 | preempt_disable(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 600 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
Frederic Weisbecker | c022602 | 2009-12-02 20:49:16 +0100 | [diff] [blame] | 601 | |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 602 | if (__mutex_trylock(lock) || |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 603 | mutex_optimistic_spin(lock, ww_ctx, NULL)) { |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 604 | /* got the lock, yay! */ |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 605 | lock_acquired(&lock->dep_map, ip); |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 606 | if (ww_ctx) |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 607 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 608 | preempt_enable(); |
| 609 | return 0; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 610 | } |
Davidlohr Bueso | 7691651 | 2014-07-30 13:41:53 -0700 | [diff] [blame] | 611 | |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 612 | raw_spin_lock(&lock->wait_lock); |
Jason Low | 1e820c9 | 2014-06-11 11:37:21 -0700 | [diff] [blame] | 613 | /* |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 614 | * After waiting to acquire the wait_lock, try again. |
Jason Low | 1e820c9 | 2014-06-11 11:37:21 -0700 | [diff] [blame] | 615 | */ |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 616 | if (__mutex_trylock(lock)) { |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 617 | if (ww_ctx) |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 618 | __ww_mutex_check_waiters(lock, ww_ctx); |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 619 | |
Davidlohr Bueso | ec83f42 | 2013-06-28 13:13:18 -0700 | [diff] [blame] | 620 | goto skip_wait; |
Nicolai Hähnle | 659cf9f | 2016-12-21 19:46:36 +0100 | [diff] [blame] | 621 | } |
Davidlohr Bueso | ec83f42 | 2013-06-28 13:13:18 -0700 | [diff] [blame] | 622 | |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 623 | debug_mutex_lock_common(lock, &waiter); |
Peter Zijlstra | c0afb0f | 2021-08-15 23:28:39 +0200 | [diff] [blame] | 624 | waiter.task = current; |
Sebastian Andrzej Siewior | b857174 | 2021-08-19 21:30:30 +0200 | [diff] [blame] | 625 | if (use_ww_ctx) |
Peter Zijlstra | c0afb0f | 2021-08-15 23:28:39 +0200 | [diff] [blame] | 626 | waiter.ww_ctx = ww_ctx; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 627 | |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 628 | lock_contended(&lock->dep_map, ip); |
| 629 | |
| 630 | if (!use_ww_ctx) { |
| 631 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 632 | __mutex_add_waiter(lock, &waiter, &lock->wait_list); |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 633 | } else { |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 634 | /* |
| 635 | * Add in stamp order, waking up waiters that must kill |
| 636 | * themselves. |
| 637 | */ |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 638 | ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); |
| 639 | if (ret) |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 640 | goto err_early_kill; |
Nicolai Hähnle | 6baa5c6 | 2016-12-21 19:46:34 +0100 | [diff] [blame] | 641 | } |
| 642 | |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 643 | set_current_state(state); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 644 | for (;;) { |
Peter Zijlstra | 048661a | 2021-06-30 17:35:18 +0200 | [diff] [blame] | 645 | bool first; |
| 646 | |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 647 | /* |
| 648 | * Once we hold wait_lock, we're serialized against |
| 649 | * mutex_unlock() handing the lock off to us, do a trylock |
| 650 | * before testing the error conditions to make sure we pick up |
| 651 | * the handoff. |
| 652 | */ |
Peter Zijlstra | e274795 | 2017-01-11 14:17:48 +0100 | [diff] [blame] | 653 | if (__mutex_trylock(lock)) |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 654 | goto acquired; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 655 | |
| 656 | /* |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 657 | * Check for signals and kill conditions while holding |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 658 | * wait_lock. This ensures the lock cancellation is ordered |
| 659 | * against mutex_unlock() and wake-ups do not go missing. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 660 | */ |
Davidlohr Bueso | 3bb5f4a | 2019-01-03 15:28:44 -0800 | [diff] [blame] | 661 | if (signal_pending_state(state, current)) { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 662 | ret = -EINTR; |
| 663 | goto err; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 664 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 665 | |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 666 | if (ww_ctx) { |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 667 | ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 668 | if (ret) |
| 669 | goto err; |
| 670 | } |
| 671 | |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 672 | raw_spin_unlock(&lock->wait_lock); |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 673 | schedule_preempt_disabled(); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 674 | |
Peter Zijlstra | 048661a | 2021-06-30 17:35:18 +0200 | [diff] [blame] | 675 | first = __mutex_waiter_is_first(lock, &waiter); |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 676 | |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 677 | set_current_state(state); |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 678 | /* |
| 679 | * Here we order against unlock; we must either see it change |
| 680 | * state back to RUNNING and fall through the next schedule(), |
| 681 | * or we must see its unlock and acquire. |
| 682 | */ |
Peter Zijlstra | ad90880 | 2021-06-30 17:35:19 +0200 | [diff] [blame] | 683 | if (__mutex_trylock_or_handoff(lock, first) || |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 684 | (first && mutex_optimistic_spin(lock, ww_ctx, &waiter))) |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 685 | break; |
| 686 | |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 687 | raw_spin_lock(&lock->wait_lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 688 | } |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 689 | raw_spin_lock(&lock->wait_lock); |
Peter Zijlstra | 5bbd7e6 | 2016-09-02 13:42:12 +0200 | [diff] [blame] | 690 | acquired: |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 691 | __set_current_state(TASK_RUNNING); |
Davidlohr Bueso | 51587bc | 2015-01-19 17:39:21 -0800 | [diff] [blame] | 692 | |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 693 | if (ww_ctx) { |
Thomas Hellstrom | 08295b3 | 2018-06-15 10:17:38 +0200 | [diff] [blame] | 694 | /* |
| 695 | * Wound-Wait; we stole the lock (!first_waiter), check the |
| 696 | * waiters as anyone might want to wound us. |
| 697 | */ |
| 698 | if (!ww_ctx->is_wait_die && |
| 699 | !__mutex_waiter_is_first(lock, &waiter)) |
| 700 | __ww_mutex_check_waiters(lock, ww_ctx); |
| 701 | } |
| 702 | |
Zqiang | 3a010c4 | 2021-05-17 11:40:05 +0800 | [diff] [blame] | 703 | __mutex_remove_waiter(lock, &waiter); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 704 | |
Davidlohr Bueso | ec83f42 | 2013-06-28 13:13:18 -0700 | [diff] [blame] | 705 | debug_mutex_free_waiter(&waiter); |
| 706 | |
| 707 | skip_wait: |
| 708 | /* got the lock - cleanup and rejoice! */ |
| 709 | lock_acquired(&lock->dep_map, ip); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 710 | |
Waiman Long | 5de2055 | 2021-03-16 11:31:16 -0400 | [diff] [blame] | 711 | if (ww_ctx) |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 712 | ww_mutex_lock_acquired(ww, ww_ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 713 | |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 714 | raw_spin_unlock(&lock->wait_lock); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 715 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 716 | return 0; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 717 | |
| 718 | err: |
Davidlohr Bueso | 642fa44 | 2017-01-03 13:43:14 -0800 | [diff] [blame] | 719 | __set_current_state(TASK_RUNNING); |
Zqiang | 3a010c4 | 2021-05-17 11:40:05 +0800 | [diff] [blame] | 720 | __mutex_remove_waiter(lock, &waiter); |
Peter Ziljstra | 55f036c | 2018-06-15 10:07:12 +0200 | [diff] [blame] | 721 | err_early_kill: |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 722 | raw_spin_unlock(&lock->wait_lock); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 723 | debug_mutex_free_waiter(&waiter); |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 724 | mutex_release(&lock->dep_map, ip); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 725 | preempt_enable(); |
| 726 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 727 | } |
| 728 | |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 729 | static int __sched |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 730 | __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 731 | struct lockdep_map *nest_lock, unsigned long ip) |
| 732 | { |
| 733 | return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false); |
| 734 | } |
| 735 | |
| 736 | static int __sched |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 737 | __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, |
Peter Zijlstra | cf702ed | 2021-08-15 23:28:38 +0200 | [diff] [blame] | 738 | unsigned long ip, struct ww_acquire_ctx *ww_ctx) |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 739 | { |
Peter Zijlstra | cf702ed | 2021-08-15 23:28:38 +0200 | [diff] [blame] | 740 | return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true); |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 741 | } |
| 742 | |
Maarten Lankhorst | 12235da | 2021-09-09 11:32:18 +0200 | [diff] [blame] | 743 | /** |
| 744 | * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context |
| 745 | * @ww: mutex to lock |
| 746 | * @ww_ctx: optional w/w acquire context |
| 747 | * |
| 748 | * Trylocks a mutex with the optional acquire context; no deadlock detection is |
| 749 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. |
| 750 | * |
| 751 | * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is |
| 752 | * specified, -EALREADY handling may happen in calls to ww_mutex_trylock. |
| 753 | * |
| 754 | * A mutex acquired with this function must be released with ww_mutex_unlock. |
| 755 | */ |
| 756 | int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) |
| 757 | { |
| 758 | if (!ww_ctx) |
| 759 | return mutex_trylock(&ww->base); |
| 760 | |
| 761 | MUTEX_WARN_ON(ww->base.magic != &ww->base); |
| 762 | |
| 763 | /* |
| 764 | * Reset the wounded flag after a kill. No other process can |
| 765 | * race and wound us here, since they can't have a valid owner |
| 766 | * pointer if we don't have any locks held. |
| 767 | */ |
| 768 | if (ww_ctx->acquired == 0) |
| 769 | ww_ctx->wounded = 0; |
| 770 | |
| 771 | if (__mutex_trylock(&ww->base)) { |
| 772 | ww_mutex_set_context_fastpath(ww, ww_ctx); |
| 773 | mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); |
| 774 | return 1; |
| 775 | } |
| 776 | |
| 777 | return 0; |
| 778 | } |
| 779 | EXPORT_SYMBOL(ww_mutex_trylock); |
| 780 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 781 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 782 | void __sched |
| 783 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
| 784 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 785 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 786 | } |
| 787 | |
| 788 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 789 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 790 | void __sched |
| 791 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| 792 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 793 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 794 | } |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 795 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
| 796 | |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 797 | int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 798 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
| 799 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 800 | return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 801 | } |
| 802 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
| 803 | |
| 804 | int __sched |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 805 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 806 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 807 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 808 | } |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 809 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 810 | |
Tejun Heo | 1460cb6 | 2016-10-28 12:58:11 -0400 | [diff] [blame] | 811 | void __sched |
| 812 | mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) |
| 813 | { |
| 814 | int token; |
| 815 | |
| 816 | might_sleep(); |
| 817 | |
| 818 | token = io_schedule_prepare(); |
| 819 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
| 820 | subclass, NULL, _RET_IP_, NULL, 0); |
| 821 | io_schedule_finish(token); |
| 822 | } |
| 823 | EXPORT_SYMBOL_GPL(mutex_lock_io_nested); |
| 824 | |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 825 | static inline int |
| 826 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 827 | { |
| 828 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
| 829 | unsigned tmp; |
| 830 | |
| 831 | if (ctx->deadlock_inject_countdown-- == 0) { |
| 832 | tmp = ctx->deadlock_inject_interval; |
| 833 | if (tmp > UINT_MAX/4) |
| 834 | tmp = UINT_MAX; |
| 835 | else |
| 836 | tmp = tmp*2 + tmp + tmp/2; |
| 837 | |
| 838 | ctx->deadlock_inject_interval = tmp; |
| 839 | ctx->deadlock_inject_countdown = tmp; |
| 840 | ctx->contending_lock = lock; |
| 841 | |
| 842 | ww_mutex_unlock(lock); |
| 843 | |
| 844 | return -EDEADLK; |
| 845 | } |
| 846 | #endif |
| 847 | |
| 848 | return 0; |
| 849 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 850 | |
| 851 | int __sched |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 852 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 853 | { |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 854 | int ret; |
| 855 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 856 | might_sleep(); |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 857 | ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, |
Peter Zijlstra | cf702ed | 2021-08-15 23:28:38 +0200 | [diff] [blame] | 858 | 0, _RET_IP_, ctx); |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 859 | if (!ret && ctx && ctx->acquired > 1) |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 860 | return ww_mutex_deadlock_injection(lock, ctx); |
| 861 | |
| 862 | return ret; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 863 | } |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 864 | EXPORT_SYMBOL_GPL(ww_mutex_lock); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 865 | |
| 866 | int __sched |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 867 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 868 | { |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 869 | int ret; |
| 870 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 871 | might_sleep(); |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 872 | ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, |
Peter Zijlstra | cf702ed | 2021-08-15 23:28:38 +0200 | [diff] [blame] | 873 | 0, _RET_IP_, ctx); |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 874 | |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 875 | if (!ret && ctx && ctx->acquired > 1) |
Daniel Vetter | 2301002 | 2013-06-20 13:31:17 +0200 | [diff] [blame] | 876 | return ww_mutex_deadlock_injection(lock, ctx); |
| 877 | |
| 878 | return ret; |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 879 | } |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 880 | EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 881 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 882 | #endif |
| 883 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 884 | /* |
| 885 | * Release the lock, slowpath: |
| 886 | */ |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 887 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 888 | { |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 889 | struct task_struct *next = NULL; |
Waiman Long | 194a6b5 | 2016-11-17 11:46:38 -0500 | [diff] [blame] | 890 | DEFINE_WAKE_Q(wake_q); |
Peter Zijlstra | b9c16a0 | 2017-01-17 16:06:09 +0100 | [diff] [blame] | 891 | unsigned long owner; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 892 | |
Qian Cai | 5facae4 | 2019-09-19 12:09:40 -0400 | [diff] [blame] | 893 | mutex_release(&lock->dep_map, ip); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 894 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 895 | /* |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 896 | * Release the lock before (potentially) taking the spinlock such that |
| 897 | * other contenders can get on with things ASAP. |
| 898 | * |
| 899 | * Except when HANDOFF, in that case we must not clear the owner field, |
| 900 | * but instead set it to the top waiter. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 901 | */ |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 902 | owner = atomic_long_read(&lock->owner); |
| 903 | for (;;) { |
Peter Zijlstra | e6b4457 | 2021-06-30 17:35:20 +0200 | [diff] [blame] | 904 | MUTEX_WARN_ON(__owner_task(owner) != current); |
| 905 | MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 906 | |
| 907 | if (owner & MUTEX_FLAG_HANDOFF) |
| 908 | break; |
| 909 | |
Peter Zijlstra | ab4e4d9 | 2021-06-30 17:35:17 +0200 | [diff] [blame] | 910 | if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) { |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 911 | if (owner & MUTEX_FLAG_WAITERS) |
| 912 | break; |
| 913 | |
| 914 | return; |
| 915 | } |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 916 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 917 | |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 918 | raw_spin_lock(&lock->wait_lock); |
Jason Low | 1d8fe7d | 2014-01-28 11:13:14 -0800 | [diff] [blame] | 919 | debug_mutex_unlock(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 920 | if (!list_empty(&lock->wait_list)) { |
| 921 | /* get the first entry from the wait-list: */ |
| 922 | struct mutex_waiter *waiter = |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 923 | list_first_entry(&lock->wait_list, |
| 924 | struct mutex_waiter, list); |
| 925 | |
| 926 | next = waiter->task; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 927 | |
| 928 | debug_mutex_wake_waiter(lock, waiter); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 929 | wake_q_add(&wake_q, next); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 930 | } |
| 931 | |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 932 | if (owner & MUTEX_FLAG_HANDOFF) |
| 933 | __mutex_handoff(lock, next); |
| 934 | |
Thomas Gleixner | ebf4c55 | 2021-08-15 23:28:36 +0200 | [diff] [blame] | 935 | raw_spin_unlock(&lock->wait_lock); |
Peter Zijlstra | 9d659ae | 2016-08-23 14:40:16 +0200 | [diff] [blame] | 936 | |
Davidlohr Bueso | 1329ce6 | 2016-01-24 18:23:43 -0800 | [diff] [blame] | 937 | wake_up_q(&wake_q); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 938 | } |
| 939 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 940 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 941 | /* |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 942 | * Here come the less common (and hence less performance-critical) APIs: |
| 943 | * mutex_lock_interruptible() and mutex_trylock(). |
| 944 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 945 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 946 | __mutex_lock_killable_slowpath(struct mutex *lock); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 947 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 948 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 949 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 950 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 951 | /** |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 952 | * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. |
| 953 | * @lock: The mutex to be acquired. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 954 | * |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 955 | * Lock the mutex like mutex_lock(). If a signal is delivered while the |
| 956 | * process is sleeping, this function will return without acquiring the |
| 957 | * mutex. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 958 | * |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 959 | * Context: Process context. |
| 960 | * Return: 0 if the lock was successfully acquired or %-EINTR if a |
| 961 | * signal arrived. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 962 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 963 | int __sched mutex_lock_interruptible(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 964 | { |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 965 | might_sleep(); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 966 | |
| 967 | if (__mutex_trylock_fast(lock)) |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 968 | return 0; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 969 | |
| 970 | return __mutex_lock_interruptible_slowpath(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 971 | } |
| 972 | |
| 973 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 974 | |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 975 | /** |
| 976 | * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. |
| 977 | * @lock: The mutex to be acquired. |
| 978 | * |
| 979 | * Lock the mutex like mutex_lock(). If a signal which will be fatal to |
| 980 | * the current process is delivered while the process is sleeping, this |
| 981 | * function will return without acquiring the mutex. |
| 982 | * |
| 983 | * Context: Process context. |
| 984 | * Return: 0 if the lock was successfully acquired or %-EINTR if a |
| 985 | * fatal signal arrived. |
| 986 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 987 | int __sched mutex_lock_killable(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 988 | { |
| 989 | might_sleep(); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 990 | |
| 991 | if (__mutex_trylock_fast(lock)) |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 992 | return 0; |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 993 | |
| 994 | return __mutex_lock_killable_slowpath(lock); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 995 | } |
| 996 | EXPORT_SYMBOL(mutex_lock_killable); |
| 997 | |
Matthew Wilcox | 45dbac0 | 2018-03-15 04:58:12 -0700 | [diff] [blame] | 998 | /** |
| 999 | * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O |
| 1000 | * @lock: The mutex to be acquired. |
| 1001 | * |
| 1002 | * Lock the mutex like mutex_lock(). While the task is waiting for this |
| 1003 | * mutex, it will be accounted as being in the IO wait state by the |
| 1004 | * scheduler. |
| 1005 | * |
| 1006 | * Context: Process context. |
| 1007 | */ |
Tejun Heo | 1460cb6 | 2016-10-28 12:58:11 -0400 | [diff] [blame] | 1008 | void __sched mutex_lock_io(struct mutex *lock) |
| 1009 | { |
| 1010 | int token; |
| 1011 | |
| 1012 | token = io_schedule_prepare(); |
| 1013 | mutex_lock(lock); |
| 1014 | io_schedule_finish(token); |
| 1015 | } |
| 1016 | EXPORT_SYMBOL_GPL(mutex_lock_io); |
| 1017 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1018 | static noinline void __sched |
| 1019 | __mutex_lock_slowpath(struct mutex *lock) |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 1020 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1021 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 1022 | } |
| 1023 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1024 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 1025 | __mutex_lock_killable_slowpath(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1026 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1027 | return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 1028 | } |
| 1029 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1030 | static noinline int __sched |
Maarten Lankhorst | a41b56e | 2013-06-20 13:31:05 +0200 | [diff] [blame] | 1031 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1032 | { |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1033 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1034 | } |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1035 | |
| 1036 | static noinline int __sched |
| 1037 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 1038 | { |
Peter Zijlstra | cf702ed | 2021-08-15 23:28:38 +0200 | [diff] [blame] | 1039 | return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1040 | _RET_IP_, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1041 | } |
| 1042 | |
| 1043 | static noinline int __sched |
| 1044 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, |
| 1045 | struct ww_acquire_ctx *ctx) |
| 1046 | { |
Peter Zijlstra | cf702ed | 2021-08-15 23:28:38 +0200 | [diff] [blame] | 1047 | return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, |
Peter Zijlstra | 427b182 | 2016-12-23 10:36:00 +0100 | [diff] [blame] | 1048 | _RET_IP_, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1049 | } |
| 1050 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 1051 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1052 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 1053 | /** |
| 1054 | * mutex_trylock - try to acquire the mutex, without waiting |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1055 | * @lock: the mutex to be acquired |
| 1056 | * |
| 1057 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 1058 | * has been acquired successfully, and 0 on contention. |
| 1059 | * |
| 1060 | * NOTE: this function follows the spin_trylock() convention, so |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 1061 | * it is negated from the down_trylock() return values! Be careful |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1062 | * about this when converting semaphore users to mutexes. |
| 1063 | * |
| 1064 | * This function must not be used in interrupt context. The |
| 1065 | * mutex must be released by the same task that acquired it. |
| 1066 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 1067 | int __sched mutex_trylock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1068 | { |
Sebastian Andrzej Siewior | 6c11c6e | 2019-07-03 11:21:26 +0200 | [diff] [blame] | 1069 | bool locked; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 1070 | |
Peter Zijlstra | e6b4457 | 2021-06-30 17:35:20 +0200 | [diff] [blame] | 1071 | MUTEX_WARN_ON(lock->magic != lock); |
Sebastian Andrzej Siewior | 6c11c6e | 2019-07-03 11:21:26 +0200 | [diff] [blame] | 1072 | |
| 1073 | locked = __mutex_trylock(lock); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1074 | if (locked) |
| 1075 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 1076 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1077 | return locked; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1078 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1079 | EXPORT_SYMBOL(mutex_trylock); |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 1080 | |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1081 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 1082 | int __sched |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1083 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1084 | { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1085 | might_sleep(); |
| 1086 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1087 | if (__mutex_trylock_fast(&lock->base)) { |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 1088 | if (ctx) |
| 1089 | ww_mutex_set_context_fastpath(lock, ctx); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1090 | return 0; |
| 1091 | } |
| 1092 | |
| 1093 | return __ww_mutex_lock_slowpath(lock, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1094 | } |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1095 | EXPORT_SYMBOL(ww_mutex_lock); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1096 | |
| 1097 | int __sched |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1098 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1099 | { |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1100 | might_sleep(); |
| 1101 | |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1102 | if (__mutex_trylock_fast(&lock->base)) { |
Nicolai Hähnle | ea9e0fb | 2016-12-21 19:46:32 +0100 | [diff] [blame] | 1103 | if (ctx) |
| 1104 | ww_mutex_set_context_fastpath(lock, ctx); |
Peter Zijlstra | 3ca0ff5 | 2016-08-23 13:36:04 +0200 | [diff] [blame] | 1105 | return 0; |
| 1106 | } |
| 1107 | |
| 1108 | return __ww_mutex_lock_interruptible_slowpath(lock, ctx); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1109 | } |
Nicolai Hähnle | c5470b2 | 2016-12-21 19:46:33 +0100 | [diff] [blame] | 1110 | EXPORT_SYMBOL(ww_mutex_lock_interruptible); |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1111 | |
Thomas Gleixner | bb630f9 | 2021-08-15 23:29:01 +0200 | [diff] [blame] | 1112 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ |
| 1113 | #endif /* !CONFIG_PREEMPT_RT */ |
Maarten Lankhorst | 040a0a3 | 2013-06-24 10:30:04 +0200 | [diff] [blame] | 1114 | |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 1115 | /** |
| 1116 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| 1117 | * @cnt: the atomic which we are to dec |
| 1118 | * @lock: the mutex to return holding if we dec to 0 |
| 1119 | * |
| 1120 | * return true and hold lock if we dec to 0, return false otherwise |
| 1121 | */ |
| 1122 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| 1123 | { |
| 1124 | /* dec if we can't possibly hit 0 */ |
| 1125 | if (atomic_add_unless(cnt, -1, 1)) |
| 1126 | return 0; |
| 1127 | /* we might hit 0, so take the lock */ |
| 1128 | mutex_lock(lock); |
| 1129 | if (!atomic_dec_and_test(cnt)) { |
| 1130 | /* when we actually did the dec, we didn't hit 0 */ |
| 1131 | mutex_unlock(lock); |
| 1132 | return 0; |
| 1133 | } |
| 1134 | /* we hit 0, and we hold the lock */ |
| 1135 | return 1; |
| 1136 | } |
| 1137 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |