Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 1 | /* |
| 2 | * kernel/mutex.c |
| 3 | * |
| 4 | * Mutexes: blocking mutual exclusion locks |
| 5 | * |
| 6 | * Started by Ingo Molnar: |
| 7 | * |
| 8 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 9 | * |
| 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 11 | * David Howells for suggestions and improvements. |
| 12 | * |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
| 14 | * from the -rt tree, where it was originally implemented for rtmutexes |
| 15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
| 16 | * and Sven Dietrich. |
| 17 | * |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 18 | * Also see Documentation/mutex-design.txt. |
| 19 | */ |
| 20 | #include <linux/mutex.h> |
| 21 | #include <linux/sched.h> |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 22 | #include <linux/sched/rt.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 23 | #include <linux/export.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/interrupt.h> |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 26 | #include <linux/debug_locks.h> |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * In the DEBUG case we are using the "NULL fastpath" for mutexes, |
| 30 | * which forces all calls into the slowpath: |
| 31 | */ |
| 32 | #ifdef CONFIG_DEBUG_MUTEXES |
| 33 | # include "mutex-debug.h" |
| 34 | # include <asm-generic/mutex-null.h> |
| 35 | #else |
| 36 | # include "mutex.h" |
| 37 | # include <asm/mutex.h> |
| 38 | #endif |
| 39 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 40 | /* |
Waiman Long | cc189d2 | 2013-04-17 15:23:14 -0400 | [diff] [blame] | 41 | * A negative mutex count indicates that waiters are sleeping waiting for the |
| 42 | * mutex. |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 43 | */ |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 44 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 45 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 46 | void |
| 47 | __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 48 | { |
| 49 | atomic_set(&lock->count, 1); |
| 50 | spin_lock_init(&lock->wait_lock); |
| 51 | INIT_LIST_HEAD(&lock->wait_list); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 52 | mutex_clear_owner(lock); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 53 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 54 | lock->spin_mlock = NULL; |
| 55 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 56 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 57 | debug_mutex_init(lock, name, key); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 58 | } |
| 59 | |
| 60 | EXPORT_SYMBOL(__mutex_init); |
| 61 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 62 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 63 | /* |
| 64 | * We split the mutex lock/unlock logic into separate fastpath and |
| 65 | * slowpath functions, to reduce the register pressure on the fastpath. |
| 66 | * We also put the fastpath first in the kernel image, to make sure the |
| 67 | * branch is predicted by the CPU as default-untaken. |
| 68 | */ |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 69 | static __used noinline void __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 70 | __mutex_lock_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 71 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 72 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 73 | * mutex_lock - acquire the mutex |
| 74 | * @lock: the mutex to be acquired |
| 75 | * |
| 76 | * Lock the mutex exclusively for this task. If the mutex is not |
| 77 | * available right now, it will sleep until it can get it. |
| 78 | * |
| 79 | * The mutex must later on be released by the same task that |
| 80 | * acquired it. Recursive locking is not allowed. The task |
| 81 | * may not exit without first unlocking the mutex. Also, kernel |
| 82 | * memory where the mutex resides mutex must not be freed with |
| 83 | * the mutex still locked. The mutex must first be initialized |
| 84 | * (or statically defined) before it can be locked. memset()-ing |
| 85 | * the mutex to 0 is not allowed. |
| 86 | * |
| 87 | * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
| 88 | * checks that will enforce the restrictions and will also do |
| 89 | * deadlock debugging. ) |
| 90 | * |
| 91 | * This function is similar to (but not equivalent to) down(). |
| 92 | */ |
H. Peter Anvin | b09d250 | 2009-04-01 17:21:56 -0700 | [diff] [blame] | 93 | void __sched mutex_lock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 94 | { |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 95 | might_sleep(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 96 | /* |
| 97 | * The locking fastpath is the 1->0 transition from |
| 98 | * 'unlocked' into 'locked' state. |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 99 | */ |
| 100 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 101 | mutex_set_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | EXPORT_SYMBOL(mutex_lock); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 105 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 106 | |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 107 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 108 | /* |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 109 | * In order to avoid a stampede of mutex spinners from acquiring the mutex |
| 110 | * more or less simultaneously, the spinners need to acquire a MCS lock |
| 111 | * first before spinning on the owner field. |
| 112 | * |
| 113 | * We don't inline mspin_lock() so that perf can correctly account for the |
| 114 | * time spent in this lock function. |
| 115 | */ |
| 116 | struct mspin_node { |
| 117 | struct mspin_node *next ; |
| 118 | int locked; /* 1 if lock acquired */ |
| 119 | }; |
| 120 | #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock)) |
| 121 | |
| 122 | static noinline |
| 123 | void mspin_lock(struct mspin_node **lock, struct mspin_node *node) |
| 124 | { |
| 125 | struct mspin_node *prev; |
| 126 | |
| 127 | /* Init node */ |
| 128 | node->locked = 0; |
| 129 | node->next = NULL; |
| 130 | |
| 131 | prev = xchg(lock, node); |
| 132 | if (likely(prev == NULL)) { |
| 133 | /* Lock acquired */ |
| 134 | node->locked = 1; |
| 135 | return; |
| 136 | } |
| 137 | ACCESS_ONCE(prev->next) = node; |
| 138 | smp_wmb(); |
| 139 | /* Wait until the lock holder passes the lock down */ |
| 140 | while (!ACCESS_ONCE(node->locked)) |
| 141 | arch_mutex_cpu_relax(); |
| 142 | } |
| 143 | |
| 144 | static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node) |
| 145 | { |
| 146 | struct mspin_node *next = ACCESS_ONCE(node->next); |
| 147 | |
| 148 | if (likely(!next)) { |
| 149 | /* |
| 150 | * Release the lock by setting it to NULL |
| 151 | */ |
| 152 | if (cmpxchg(lock, node, NULL) == node) |
| 153 | return; |
| 154 | /* Wait until the next pointer is set */ |
| 155 | while (!(next = ACCESS_ONCE(node->next))) |
| 156 | arch_mutex_cpu_relax(); |
| 157 | } |
| 158 | ACCESS_ONCE(next->locked) = 1; |
| 159 | smp_wmb(); |
| 160 | } |
| 161 | |
| 162 | /* |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 163 | * Mutex spinning code migrated from kernel/sched/core.c |
| 164 | */ |
| 165 | |
| 166 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
| 167 | { |
| 168 | if (lock->owner != owner) |
| 169 | return false; |
| 170 | |
| 171 | /* |
| 172 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
| 173 | * lock->owner still matches owner, if that fails, owner might |
| 174 | * point to free()d memory, if it still matches, the rcu_read_lock() |
| 175 | * ensures the memory stays valid. |
| 176 | */ |
| 177 | barrier(); |
| 178 | |
| 179 | return owner->on_cpu; |
| 180 | } |
| 181 | |
| 182 | /* |
| 183 | * Look out! "owner" is an entirely speculative pointer |
| 184 | * access and not reliable. |
| 185 | */ |
| 186 | static noinline |
| 187 | int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) |
| 188 | { |
| 189 | rcu_read_lock(); |
| 190 | while (owner_running(lock, owner)) { |
| 191 | if (need_resched()) |
| 192 | break; |
| 193 | |
| 194 | arch_mutex_cpu_relax(); |
| 195 | } |
| 196 | rcu_read_unlock(); |
| 197 | |
| 198 | /* |
| 199 | * We break out the loop above on need_resched() and when the |
| 200 | * owner changed, which is a sign for heavy contention. Return |
| 201 | * success only when lock->owner is NULL. |
| 202 | */ |
| 203 | return lock->owner == NULL; |
| 204 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 205 | |
| 206 | /* |
| 207 | * Initial check for entering the mutex spinning loop |
| 208 | */ |
| 209 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
| 210 | { |
| 211 | int retval = 1; |
| 212 | |
| 213 | rcu_read_lock(); |
| 214 | if (lock->owner) |
| 215 | retval = lock->owner->on_cpu; |
| 216 | rcu_read_unlock(); |
| 217 | /* |
| 218 | * if lock->owner is not set, the mutex owner may have just acquired |
| 219 | * it and not set the owner yet or the mutex has been released. |
| 220 | */ |
| 221 | return retval; |
| 222 | } |
Waiman Long | 41fcb9f | 2013-04-17 15:23:11 -0400 | [diff] [blame] | 223 | #endif |
| 224 | |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 225 | static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 226 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 227 | /** |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 228 | * mutex_unlock - release the mutex |
| 229 | * @lock: the mutex to be released |
| 230 | * |
| 231 | * Unlock a mutex that has been locked by this task previously. |
| 232 | * |
| 233 | * This function must not be used in interrupt context. Unlocking |
| 234 | * of a not locked mutex is not allowed. |
| 235 | * |
| 236 | * This function is similar to (but not equivalent to) up(). |
| 237 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 238 | void __sched mutex_unlock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 239 | { |
| 240 | /* |
| 241 | * The unlocking fastpath is the 0->1 transition from 'locked' |
| 242 | * into 'unlocked' state: |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 243 | */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 244 | #ifndef CONFIG_DEBUG_MUTEXES |
| 245 | /* |
| 246 | * When debugging is enabled we must not clear the owner before time, |
| 247 | * the slow path will always be taken, and that clears the owner field |
| 248 | * after verifying that it was indeed current. |
| 249 | */ |
| 250 | mutex_clear_owner(lock); |
| 251 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 252 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
| 253 | } |
| 254 | |
| 255 | EXPORT_SYMBOL(mutex_unlock); |
| 256 | |
| 257 | /* |
| 258 | * Lock a mutex (possibly interruptible), slowpath: |
| 259 | */ |
| 260 | static inline int __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 261 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 262 | struct lockdep_map *nest_lock, unsigned long ip) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 263 | { |
| 264 | struct task_struct *task = current; |
| 265 | struct mutex_waiter waiter; |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 266 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 267 | |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 268 | preempt_disable(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 269 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
Frederic Weisbecker | c022602 | 2009-12-02 20:49:16 +0100 | [diff] [blame] | 270 | |
| 271 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 272 | /* |
| 273 | * Optimistic spinning. |
| 274 | * |
| 275 | * We try to spin for acquisition when we find that there are no |
| 276 | * pending waiters and the lock owner is currently running on a |
| 277 | * (different) CPU. |
| 278 | * |
| 279 | * The rationale is that if the lock owner is running, it is likely to |
| 280 | * release the lock soon. |
| 281 | * |
| 282 | * Since this needs the lock owner, and this mutex implementation |
| 283 | * doesn't track the owner atomically in the lock field, we need to |
| 284 | * track it non-atomically. |
| 285 | * |
| 286 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock |
| 287 | * to serialize everything. |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 288 | * |
| 289 | * The mutex spinners are queued up using MCS lock so that only one |
| 290 | * spinner can compete for the mutex. However, if mutex spinning isn't |
| 291 | * going to happen, there is no point in going through the lock/unlock |
| 292 | * overhead. |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 293 | */ |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 294 | if (!mutex_can_spin_on_owner(lock)) |
| 295 | goto slowpath; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 296 | |
| 297 | for (;;) { |
Peter Zijlstra | c6eb3dd | 2011-04-05 17:23:41 +0200 | [diff] [blame] | 298 | struct task_struct *owner; |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 299 | struct mspin_node node; |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 300 | |
| 301 | /* |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 302 | * If there's an owner, wait for it to either |
| 303 | * release the lock or go to sleep. |
| 304 | */ |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 305 | mspin_lock(MLOCK(lock), &node); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 306 | owner = ACCESS_ONCE(lock->owner); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 307 | if (owner && !mutex_spin_on_owner(lock, owner)) { |
| 308 | mspin_unlock(MLOCK(lock), &node); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 309 | break; |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 310 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 311 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 312 | if ((atomic_read(&lock->count) == 1) && |
| 313 | (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 314 | lock_acquired(&lock->dep_map, ip); |
| 315 | mutex_set_owner(lock); |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 316 | mspin_unlock(MLOCK(lock), &node); |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 317 | preempt_enable(); |
| 318 | return 0; |
| 319 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 320 | mspin_unlock(MLOCK(lock), &node); |
Chris Mason | ac6e60e | 2009-01-14 17:29:31 +0100 | [diff] [blame] | 321 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 322 | /* |
| 323 | * When there's no owner, we might have preempted between the |
| 324 | * owner acquiring the lock and setting the owner field. If |
| 325 | * we're an RT task that will live-lock because we won't let |
| 326 | * the owner complete. |
| 327 | */ |
| 328 | if (!owner && (need_resched() || rt_task(task))) |
| 329 | break; |
| 330 | |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 331 | /* |
| 332 | * The cpu_relax() call is a compiler barrier which forces |
| 333 | * everything in this loop to be re-loaded. We don't need |
| 334 | * memory barriers as we'll eventually observe the right |
| 335 | * values at the cost of a few extra spins. |
| 336 | */ |
Gerald Schaefer | 335d7af | 2010-11-22 15:47:36 +0100 | [diff] [blame] | 337 | arch_mutex_cpu_relax(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 338 | } |
Waiman Long | 2bd2c92 | 2013-04-17 15:23:13 -0400 | [diff] [blame] | 339 | slowpath: |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 340 | #endif |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 341 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 342 | |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 343 | debug_mutex_lock_common(lock, &waiter); |
Roman Zippel | c9f4f06 | 2007-05-09 02:35:16 -0700 | [diff] [blame] | 344 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 345 | |
| 346 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
| 347 | list_add_tail(&waiter.list, &lock->wait_list); |
| 348 | waiter.task = task; |
| 349 | |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 350 | if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 351 | goto done; |
| 352 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 353 | lock_contended(&lock->dep_map, ip); |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 354 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 355 | for (;;) { |
| 356 | /* |
| 357 | * Lets try to take the lock again - this is needed even if |
| 358 | * we get here for the first time (shortly after failing to |
| 359 | * acquire the lock), to make sure that we get a wakeup once |
| 360 | * it's unlocked. Later on, if we sleep, this is the |
| 361 | * operation that gives us the lock. We xchg it to -1, so |
| 362 | * that when we release the lock, we properly wake up the |
| 363 | * other waiters: |
| 364 | */ |
Waiman Long | 0dc8c73 | 2013-04-17 15:23:12 -0400 | [diff] [blame] | 365 | if (MUTEX_SHOW_NO_WAITER(lock) && |
| 366 | (atomic_xchg(&lock->count, -1) == 1)) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 367 | break; |
| 368 | |
| 369 | /* |
| 370 | * got a signal? (This code gets eliminated in the |
| 371 | * TASK_UNINTERRUPTIBLE case.) |
| 372 | */ |
Oleg Nesterov | 6ad3676 | 2008-06-08 21:20:42 +0400 | [diff] [blame] | 373 | if (unlikely(signal_pending_state(state, task))) { |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 374 | mutex_remove_waiter(lock, &waiter, |
| 375 | task_thread_info(task)); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 376 | mutex_release(&lock->dep_map, 1, ip); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 377 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 378 | |
| 379 | debug_mutex_free_waiter(&waiter); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 380 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 381 | return -EINTR; |
| 382 | } |
| 383 | __set_task_state(task, state); |
| 384 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 385 | /* didn't get the lock, go to sleep: */ |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 386 | spin_unlock_mutex(&lock->wait_lock, flags); |
Thomas Gleixner | bd2f553 | 2011-03-21 12:33:18 +0100 | [diff] [blame] | 387 | schedule_preempt_disabled(); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 388 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 389 | } |
| 390 | |
Peter Zijlstra | 4fe8774 | 2007-07-19 01:48:58 -0700 | [diff] [blame] | 391 | done: |
Peter Zijlstra | c7e78cf | 2008-10-16 23:17:09 +0200 | [diff] [blame] | 392 | lock_acquired(&lock->dep_map, ip); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 393 | /* got the lock - rejoice! */ |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 394 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
| 395 | mutex_set_owner(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 396 | |
| 397 | /* set it to 0 if there are no waiters left: */ |
| 398 | if (likely(list_empty(&lock->wait_list))) |
| 399 | atomic_set(&lock->count, 0); |
| 400 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 401 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 402 | |
| 403 | debug_mutex_free_waiter(&waiter); |
Peter Zijlstra | 41719b0 | 2009-01-14 15:36:26 +0100 | [diff] [blame] | 404 | preempt_enable(); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 405 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 406 | return 0; |
| 407 | } |
| 408 | |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 409 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 410 | void __sched |
| 411 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
| 412 | { |
| 413 | might_sleep(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 414 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 415 | } |
| 416 | |
| 417 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 418 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 419 | void __sched |
| 420 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| 421 | { |
| 422 | might_sleep(); |
| 423 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); |
| 424 | } |
| 425 | |
| 426 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
| 427 | |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 428 | int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 429 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
| 430 | { |
| 431 | might_sleep(); |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 432 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 433 | } |
| 434 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
| 435 | |
| 436 | int __sched |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 437 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 438 | { |
| 439 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 440 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 441 | subclass, NULL, _RET_IP_); |
NeilBrown | d63a5a7 | 2006-12-08 02:36:17 -0800 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 445 | #endif |
| 446 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 447 | /* |
| 448 | * Release the lock, slowpath: |
| 449 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 450 | static inline void |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 451 | __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 452 | { |
Ingo Molnar | 0270664 | 2006-01-10 23:15:02 +0100 | [diff] [blame] | 453 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 454 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 455 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 456 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 457 | mutex_release(&lock->dep_map, nested, _RET_IP_); |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 458 | debug_mutex_unlock(lock); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 459 | |
| 460 | /* |
| 461 | * some architectures leave the lock unlocked in the fastpath failure |
| 462 | * case, others need to leave it locked. In the later case we have to |
| 463 | * unlock it here |
| 464 | */ |
| 465 | if (__mutex_slowpath_needs_to_unlock()) |
| 466 | atomic_set(&lock->count, 1); |
| 467 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 468 | if (!list_empty(&lock->wait_list)) { |
| 469 | /* get the first entry from the wait-list: */ |
| 470 | struct mutex_waiter *waiter = |
| 471 | list_entry(lock->wait_list.next, |
| 472 | struct mutex_waiter, list); |
| 473 | |
| 474 | debug_mutex_wake_waiter(lock, waiter); |
| 475 | |
| 476 | wake_up_process(waiter->task); |
| 477 | } |
| 478 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 479 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 480 | } |
| 481 | |
| 482 | /* |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 483 | * Release the lock, slowpath: |
| 484 | */ |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 485 | static __used noinline void |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 486 | __mutex_unlock_slowpath(atomic_t *lock_count) |
| 487 | { |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 488 | __mutex_unlock_common_slowpath(lock_count, 1); |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 489 | } |
| 490 | |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 491 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 492 | /* |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 493 | * Here come the less common (and hence less performance-critical) APIs: |
| 494 | * mutex_lock_interruptible() and mutex_trylock(). |
| 495 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 496 | static noinline int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 497 | __mutex_lock_killable_slowpath(atomic_t *lock_count); |
| 498 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 499 | static noinline int __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 500 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 501 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 502 | /** |
| 503 | * mutex_lock_interruptible - acquire the mutex, interruptible |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 504 | * @lock: the mutex to be acquired |
| 505 | * |
| 506 | * Lock the mutex like mutex_lock(), and return 0 if the mutex has |
| 507 | * been acquired or sleep until the mutex becomes available. If a |
| 508 | * signal arrives while waiting for the lock then this function |
| 509 | * returns -EINTR. |
| 510 | * |
| 511 | * This function is similar to (but not equivalent to) down_interruptible(). |
| 512 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 513 | int __sched mutex_lock_interruptible(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 514 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 515 | int ret; |
| 516 | |
Ingo Molnar | c544bdb | 2006-01-10 22:10:36 +0100 | [diff] [blame] | 517 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 518 | ret = __mutex_fastpath_lock_retval |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 519 | (&lock->count, __mutex_lock_interruptible_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 520 | if (!ret) |
| 521 | mutex_set_owner(lock); |
| 522 | |
| 523 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 524 | } |
| 525 | |
| 526 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 527 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 528 | int __sched mutex_lock_killable(struct mutex *lock) |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 529 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 530 | int ret; |
| 531 | |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 532 | might_sleep(); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 533 | ret = __mutex_fastpath_lock_retval |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 534 | (&lock->count, __mutex_lock_killable_slowpath); |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 535 | if (!ret) |
| 536 | mutex_set_owner(lock); |
| 537 | |
| 538 | return ret; |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 539 | } |
| 540 | EXPORT_SYMBOL(mutex_lock_killable); |
| 541 | |
Török Edwin | 7918baa | 2008-11-24 10:17:42 +0200 | [diff] [blame] | 542 | static __used noinline void __sched |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 543 | __mutex_lock_slowpath(atomic_t *lock_count) |
| 544 | { |
| 545 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 546 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 547 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 548 | } |
| 549 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 550 | static noinline int __sched |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 551 | __mutex_lock_killable_slowpath(atomic_t *lock_count) |
| 552 | { |
| 553 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 554 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 555 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
Liam R. Howlett | ad77653 | 2007-12-06 17:37:59 -0500 | [diff] [blame] | 556 | } |
| 557 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 558 | static noinline int __sched |
Ingo Molnar | 9a11b49a | 2006-07-03 00:24:33 -0700 | [diff] [blame] | 559 | __mutex_lock_interruptible_slowpath(atomic_t *lock_count) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 560 | { |
| 561 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
| 562 | |
Peter Zijlstra | e4c70a6 | 2011-05-24 17:12:03 -0700 | [diff] [blame] | 563 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 564 | } |
Peter Zijlstra | e4564f7 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 565 | #endif |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 566 | |
| 567 | /* |
| 568 | * Spinlock based trylock, we take the spinlock and check whether we |
| 569 | * can get the lock: |
| 570 | */ |
| 571 | static inline int __mutex_trylock_slowpath(atomic_t *lock_count) |
| 572 | { |
| 573 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 574 | unsigned long flags; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 575 | int prev; |
| 576 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 577 | spin_lock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 578 | |
| 579 | prev = atomic_xchg(&lock->count, -1); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 580 | if (likely(prev == 1)) { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 581 | mutex_set_owner(lock); |
Ingo Molnar | ef5d470 | 2006-07-03 00:24:55 -0700 | [diff] [blame] | 582 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 583 | } |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 584 | |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 585 | /* Set it back to 0 if there are no waiters: */ |
| 586 | if (likely(list_empty(&lock->wait_list))) |
| 587 | atomic_set(&lock->count, 0); |
| 588 | |
Ingo Molnar | 1fb00c6 | 2006-06-26 00:24:31 -0700 | [diff] [blame] | 589 | spin_unlock_mutex(&lock->wait_lock, flags); |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 590 | |
| 591 | return prev == 1; |
| 592 | } |
| 593 | |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 594 | /** |
| 595 | * mutex_trylock - try to acquire the mutex, without waiting |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 596 | * @lock: the mutex to be acquired |
| 597 | * |
| 598 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 599 | * has been acquired successfully, and 0 on contention. |
| 600 | * |
| 601 | * NOTE: this function follows the spin_trylock() convention, so |
Randy Dunlap | ef5dc12 | 2010-09-02 15:48:16 -0700 | [diff] [blame] | 602 | * it is negated from the down_trylock() return values! Be careful |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 603 | * about this when converting semaphore users to mutexes. |
| 604 | * |
| 605 | * This function must not be used in interrupt context. The |
| 606 | * mutex must be released by the same task that acquired it. |
| 607 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 608 | int __sched mutex_trylock(struct mutex *lock) |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 609 | { |
Peter Zijlstra | 0d66bf6 | 2009-01-12 14:01:47 +0100 | [diff] [blame] | 610 | int ret; |
| 611 | |
| 612 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); |
| 613 | if (ret) |
| 614 | mutex_set_owner(lock); |
| 615 | |
| 616 | return ret; |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 617 | } |
Ingo Molnar | 6053ee3 | 2006-01-09 15:59:19 -0800 | [diff] [blame] | 618 | EXPORT_SYMBOL(mutex_trylock); |
Andrew Morton | a511e3f | 2009-04-29 15:59:58 -0700 | [diff] [blame] | 619 | |
| 620 | /** |
| 621 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| 622 | * @cnt: the atomic which we are to dec |
| 623 | * @lock: the mutex to return holding if we dec to 0 |
| 624 | * |
| 625 | * return true and hold lock if we dec to 0, return false otherwise |
| 626 | */ |
| 627 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| 628 | { |
| 629 | /* dec if we can't possibly hit 0 */ |
| 630 | if (atomic_add_unless(cnt, -1, 1)) |
| 631 | return 0; |
| 632 | /* we might hit 0, so take the lock */ |
| 633 | mutex_lock(lock); |
| 634 | if (!atomic_dec_and_test(cnt)) { |
| 635 | /* when we actually did the dec, we didn't hit 0 */ |
| 636 | mutex_unlock(lock); |
| 637 | return 0; |
| 638 | } |
| 639 | /* we hit 0, and we hold the lock */ |
| 640 | return 1; |
| 641 | } |
| 642 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |