Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 2 | /* |
| 3 | * MCS lock defines |
| 4 | * |
| 5 | * This file contains the main data structure and API definitions of MCS lock. |
| 6 | * |
| 7 | * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock |
| 8 | * with the desirable properties of being fair, and with each cpu trying |
| 9 | * to acquire the lock spinning on a local variable. |
| 10 | * It avoids expensive cache bouncings that common test-and-set spin-lock |
| 11 | * implementations incur. |
| 12 | */ |
| 13 | #ifndef __LINUX_MCS_SPINLOCK_H |
| 14 | #define __LINUX_MCS_SPINLOCK_H |
| 15 | |
Tim Chen | ddf1d16 | 2014-01-21 15:36:22 -0800 | [diff] [blame] | 16 | #include <asm/mcs_spinlock.h> |
| 17 | |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 18 | struct mcs_spinlock { |
| 19 | struct mcs_spinlock *next; |
| 20 | int locked; /* 1 if lock acquired */ |
Waiman Long | a33fda3 | 2015-04-24 14:56:30 -0400 | [diff] [blame] | 21 | int count; /* nesting count, see qspinlock.c */ |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 22 | }; |
| 23 | |
Will Deacon | e207552 | 2014-01-21 15:36:10 -0800 | [diff] [blame] | 24 | #ifndef arch_mcs_spin_lock_contended |
| 25 | /* |
Jason Low | 7f56b58 | 2018-04-26 11:34:22 +0100 | [diff] [blame] | 26 | * Using smp_cond_load_acquire() provides the acquire semantics |
| 27 | * required so that subsequent operations happen after the |
| 28 | * lock is acquired. Additionally, some architectures such as |
| 29 | * ARM64 would like to do spin-waiting instead of purely |
| 30 | * spinning, and smp_cond_load_acquire() provides that behavior. |
Will Deacon | e207552 | 2014-01-21 15:36:10 -0800 | [diff] [blame] | 31 | */ |
| 32 | #define arch_mcs_spin_lock_contended(l) \ |
| 33 | do { \ |
Jason Low | 7f56b58 | 2018-04-26 11:34:22 +0100 | [diff] [blame] | 34 | smp_cond_load_acquire(l, VAL); \ |
Will Deacon | e207552 | 2014-01-21 15:36:10 -0800 | [diff] [blame] | 35 | } while (0) |
| 36 | #endif |
| 37 | |
| 38 | #ifndef arch_mcs_spin_unlock_contended |
| 39 | /* |
| 40 | * smp_store_release() provides a memory barrier to ensure all |
| 41 | * operations in the critical section has been completed before |
| 42 | * unlocking. |
| 43 | */ |
| 44 | #define arch_mcs_spin_unlock_contended(l) \ |
| 45 | smp_store_release((l), 1) |
| 46 | #endif |
| 47 | |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 48 | /* |
| 49 | * Note: the smp_load_acquire/smp_store_release pair is not |
| 50 | * sufficient to form a full memory barrier across |
| 51 | * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. |
| 52 | * For applications that need a full barrier across multiple cpus |
| 53 | * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be |
| 54 | * used after mcs_lock. |
| 55 | */ |
Jason Low | 5faeb8a | 2014-01-21 15:36:05 -0800 | [diff] [blame] | 56 | |
| 57 | /* |
| 58 | * In order to acquire the lock, the caller should declare a local node and |
| 59 | * pass a reference of the node to this function in addition to the lock. |
| 60 | * If the lock has already been acquired, then this will proceed to spin |
| 61 | * on this node->locked until the previous lock holder sets the node->locked |
| 62 | * in mcs_spin_unlock(). |
Jason Low | 5faeb8a | 2014-01-21 15:36:05 -0800 | [diff] [blame] | 63 | */ |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 64 | static inline |
| 65 | void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) |
| 66 | { |
| 67 | struct mcs_spinlock *prev; |
| 68 | |
| 69 | /* Init node */ |
| 70 | node->locked = 0; |
| 71 | node->next = NULL; |
| 72 | |
Peter Zijlstra | 920c720 | 2016-02-01 15:11:28 +0100 | [diff] [blame] | 73 | /* |
| 74 | * We rely on the full barrier with global transitivity implied by the |
| 75 | * below xchg() to order the initialization stores above against any |
| 76 | * observation of @node. And to provide the ACQUIRE ordering associated |
| 77 | * with a LOCK primitive. |
| 78 | */ |
| 79 | prev = xchg(lock, node); |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 80 | if (likely(prev == NULL)) { |
Jason Low | 5faeb8a | 2014-01-21 15:36:05 -0800 | [diff] [blame] | 81 | /* |
| 82 | * Lock acquired, don't need to set node->locked to 1. Threads |
| 83 | * only spin on its own node->locked value for lock acquisition. |
| 84 | * However, since this thread can immediately acquire the lock |
| 85 | * and does not proceed to spin on its own node->locked, this |
| 86 | * value won't be used. If a debug mode is needed to |
| 87 | * audit lock status, then set node->locked value here. |
| 88 | */ |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 89 | return; |
| 90 | } |
Davidlohr Bueso | 4d3199e | 2015-02-22 19:31:41 -0800 | [diff] [blame] | 91 | WRITE_ONCE(prev->next, node); |
Will Deacon | e207552 | 2014-01-21 15:36:10 -0800 | [diff] [blame] | 92 | |
| 93 | /* Wait until the lock holder passes the lock down. */ |
| 94 | arch_mcs_spin_lock_contended(&node->locked); |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 95 | } |
| 96 | |
Jason Low | 5faeb8a | 2014-01-21 15:36:05 -0800 | [diff] [blame] | 97 | /* |
| 98 | * Releases the lock. The caller should pass in the corresponding node that |
| 99 | * was used to acquire the lock. |
| 100 | */ |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 101 | static inline |
| 102 | void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) |
| 103 | { |
Davidlohr Bueso | 4d3199e | 2015-02-22 19:31:41 -0800 | [diff] [blame] | 104 | struct mcs_spinlock *next = READ_ONCE(node->next); |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 105 | |
| 106 | if (likely(!next)) { |
| 107 | /* |
| 108 | * Release the lock by setting it to NULL |
| 109 | */ |
Davidlohr Bueso | 3552a07 | 2015-09-30 13:03:14 -0700 | [diff] [blame] | 110 | if (likely(cmpxchg_release(lock, node, NULL) == node)) |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 111 | return; |
| 112 | /* Wait until the next pointer is set */ |
Davidlohr Bueso | 4d3199e | 2015-02-22 19:31:41 -0800 | [diff] [blame] | 113 | while (!(next = READ_ONCE(node->next))) |
Christian Borntraeger | f2f09a4 | 2016-10-25 11:03:14 +0200 | [diff] [blame] | 114 | cpu_relax(); |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 115 | } |
Will Deacon | e207552 | 2014-01-21 15:36:10 -0800 | [diff] [blame] | 116 | |
| 117 | /* Pass lock to next waiter. */ |
| 118 | arch_mcs_spin_unlock_contended(&next->locked); |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 119 | } |
| 120 | |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 121 | #endif /* __LINUX_MCS_SPINLOCK_H */ |