Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 1 | /* |
| 2 | * MCS lock defines |
| 3 | * |
| 4 | * This file contains the main data structure and API definitions of MCS lock. |
| 5 | * |
| 6 | * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock |
| 7 | * with the desirable properties of being fair, and with each cpu trying |
| 8 | * to acquire the lock spinning on a local variable. |
| 9 | * It avoids expensive cache bouncings that common test-and-set spin-lock |
| 10 | * implementations incur. |
| 11 | */ |
| 12 | #ifndef __LINUX_MCS_SPINLOCK_H |
| 13 | #define __LINUX_MCS_SPINLOCK_H |
| 14 | |
Tim Chen | ddf1d16 | 2014-01-21 15:36:22 -0800 | [diff] [blame] | 15 | #include <asm/mcs_spinlock.h> |
| 16 | |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 17 | struct mcs_spinlock { |
| 18 | struct mcs_spinlock *next; |
| 19 | int locked; /* 1 if lock acquired */ |
| 20 | }; |
| 21 | |
Will Deacon | e207552 | 2014-01-21 15:36:10 -0800 | [diff] [blame] | 22 | #ifndef arch_mcs_spin_lock_contended |
| 23 | /* |
| 24 | * Using smp_load_acquire() provides a memory barrier that ensures |
| 25 | * subsequent operations happen after the lock is acquired. |
| 26 | */ |
| 27 | #define arch_mcs_spin_lock_contended(l) \ |
| 28 | do { \ |
| 29 | while (!(smp_load_acquire(l))) \ |
| 30 | arch_mutex_cpu_relax(); \ |
| 31 | } while (0) |
| 32 | #endif |
| 33 | |
| 34 | #ifndef arch_mcs_spin_unlock_contended |
| 35 | /* |
| 36 | * smp_store_release() provides a memory barrier to ensure all |
| 37 | * operations in the critical section has been completed before |
| 38 | * unlocking. |
| 39 | */ |
| 40 | #define arch_mcs_spin_unlock_contended(l) \ |
| 41 | smp_store_release((l), 1) |
| 42 | #endif |
| 43 | |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 44 | /* |
| 45 | * Note: the smp_load_acquire/smp_store_release pair is not |
| 46 | * sufficient to form a full memory barrier across |
| 47 | * cpus for many architectures (except x86) for mcs_unlock and mcs_lock. |
| 48 | * For applications that need a full barrier across multiple cpus |
| 49 | * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be |
| 50 | * used after mcs_lock. |
| 51 | */ |
Jason Low | 5faeb8a | 2014-01-21 15:36:05 -0800 | [diff] [blame] | 52 | |
| 53 | /* |
| 54 | * In order to acquire the lock, the caller should declare a local node and |
| 55 | * pass a reference of the node to this function in addition to the lock. |
| 56 | * If the lock has already been acquired, then this will proceed to spin |
| 57 | * on this node->locked until the previous lock holder sets the node->locked |
| 58 | * in mcs_spin_unlock(). |
| 59 | * |
| 60 | * We don't inline mcs_spin_lock() so that perf can correctly account for the |
| 61 | * time spent in this lock function. |
| 62 | */ |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 63 | static inline |
| 64 | void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) |
| 65 | { |
| 66 | struct mcs_spinlock *prev; |
| 67 | |
| 68 | /* Init node */ |
| 69 | node->locked = 0; |
| 70 | node->next = NULL; |
| 71 | |
| 72 | prev = xchg(lock, node); |
| 73 | if (likely(prev == NULL)) { |
Jason Low | 5faeb8a | 2014-01-21 15:36:05 -0800 | [diff] [blame] | 74 | /* |
| 75 | * Lock acquired, don't need to set node->locked to 1. Threads |
| 76 | * only spin on its own node->locked value for lock acquisition. |
| 77 | * However, since this thread can immediately acquire the lock |
| 78 | * and does not proceed to spin on its own node->locked, this |
| 79 | * value won't be used. If a debug mode is needed to |
| 80 | * audit lock status, then set node->locked value here. |
| 81 | */ |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 82 | return; |
| 83 | } |
| 84 | ACCESS_ONCE(prev->next) = node; |
Will Deacon | e207552 | 2014-01-21 15:36:10 -0800 | [diff] [blame] | 85 | |
| 86 | /* Wait until the lock holder passes the lock down. */ |
| 87 | arch_mcs_spin_lock_contended(&node->locked); |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 88 | } |
| 89 | |
Jason Low | 5faeb8a | 2014-01-21 15:36:05 -0800 | [diff] [blame] | 90 | /* |
| 91 | * Releases the lock. The caller should pass in the corresponding node that |
| 92 | * was used to acquire the lock. |
| 93 | */ |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 94 | static inline |
| 95 | void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) |
| 96 | { |
| 97 | struct mcs_spinlock *next = ACCESS_ONCE(node->next); |
| 98 | |
| 99 | if (likely(!next)) { |
| 100 | /* |
| 101 | * Release the lock by setting it to NULL |
| 102 | */ |
Jason Low | 5faeb8a | 2014-01-21 15:36:05 -0800 | [diff] [blame] | 103 | if (likely(cmpxchg(lock, node, NULL) == node)) |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 104 | return; |
| 105 | /* Wait until the next pointer is set */ |
| 106 | while (!(next = ACCESS_ONCE(node->next))) |
| 107 | arch_mutex_cpu_relax(); |
| 108 | } |
Will Deacon | e207552 | 2014-01-21 15:36:10 -0800 | [diff] [blame] | 109 | |
| 110 | /* Pass lock to next waiter. */ |
| 111 | arch_mcs_spin_unlock_contended(&next->locked); |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 112 | } |
| 113 | |
Peter Zijlstra | fb0527b | 2014-01-29 12:51:42 +0100 | [diff] [blame] | 114 | /* |
| 115 | * Cancellable version of the MCS lock above. |
| 116 | * |
| 117 | * Intended for adaptive spinning of sleeping locks: |
| 118 | * mutex_lock()/rwsem_down_{read,write}() etc. |
| 119 | */ |
| 120 | |
| 121 | struct optimistic_spin_queue { |
| 122 | struct optimistic_spin_queue *next, *prev; |
| 123 | int locked; /* 1 if lock acquired */ |
| 124 | }; |
| 125 | |
| 126 | extern bool osq_lock(struct optimistic_spin_queue **lock); |
| 127 | extern void osq_unlock(struct optimistic_spin_queue **lock); |
| 128 | |
Tim Chen | e722467 | 2014-01-21 15:36:00 -0800 | [diff] [blame] | 129 | #endif /* __LINUX_MCS_SPINLOCK_H */ |