Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 2 | #ifndef _GEN_PV_LOCK_SLOWPATH |
| 3 | #error "do not include this file" |
| 4 | #endif |
| 5 | |
| 6 | #include <linux/hash.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 7 | #include <linux/memblock.h> |
Waiman Long | cba77f0 | 2015-07-11 21:19:19 -0400 | [diff] [blame] | 8 | #include <linux/debug_locks.h> |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 9 | |
| 10 | /* |
| 11 | * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead |
| 12 | * of spinning them. |
| 13 | * |
| 14 | * This relies on the architecture to provide two paravirt hypercalls: |
| 15 | * |
| 16 | * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val |
| 17 | * pv_kick(cpu) -- wakes a suspended vcpu |
| 18 | * |
| 19 | * Using these we implement __pv_queued_spin_lock_slowpath() and |
| 20 | * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and |
| 21 | * native_queued_spin_unlock(). |
| 22 | */ |
| 23 | |
| 24 | #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET) |
| 25 | |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 26 | /* |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 27 | * Queue Node Adaptive Spinning |
| 28 | * |
| 29 | * A queue node vCPU will stop spinning if the vCPU in the previous node is |
| 30 | * not running. The one lock stealing attempt allowed at slowpath entry |
| 31 | * mitigates the slight slowdown for non-overcommitted guest with this |
| 32 | * aggressive wait-early mechanism. |
| 33 | * |
| 34 | * The status of the previous node will be checked at fixed interval |
| 35 | * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't |
| 36 | * pound on the cacheline of the previous node too heavily. |
| 37 | */ |
| 38 | #define PV_PREV_CHECK_MASK 0xff |
| 39 | |
| 40 | /* |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 41 | * Queue node uses: vcpu_running & vcpu_halted. |
| 42 | * Queue head uses: vcpu_running & vcpu_hashed. |
| 43 | */ |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 44 | enum vcpu_state { |
| 45 | vcpu_running = 0, |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 46 | vcpu_halted, /* Used only in pv_wait_node */ |
| 47 | vcpu_hashed, /* = pv_hash'ed + vcpu_halted */ |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 48 | }; |
| 49 | |
| 50 | struct pv_node { |
| 51 | struct mcs_spinlock mcs; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 52 | int cpu; |
| 53 | u8 state; |
| 54 | }; |
| 55 | |
| 56 | /* |
Waiman Long | 11752ad | 2017-11-07 16:18:06 -0500 | [diff] [blame] | 57 | * Hybrid PV queued/unfair lock |
| 58 | * |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 59 | * By replacing the regular queued_spin_trylock() with the function below, |
| 60 | * it will be called once when a lock waiter enter the PV slowpath before |
Waiman Long | 11752ad | 2017-11-07 16:18:06 -0500 | [diff] [blame] | 61 | * being queued. |
| 62 | * |
| 63 | * The pending bit is set by the queue head vCPU of the MCS wait queue in |
| 64 | * pv_wait_head_or_lock() to signal that it is ready to spin on the lock. |
| 65 | * When that bit becomes visible to the incoming waiters, no lock stealing |
| 66 | * is allowed. The function will return immediately to make the waiters |
| 67 | * enter the MCS wait queue. So lock starvation shouldn't happen as long |
| 68 | * as the queued mode vCPUs are actively running to set the pending bit |
| 69 | * and hence disabling lock stealing. |
| 70 | * |
| 71 | * When the pending bit isn't set, the lock waiters will stay in the unfair |
| 72 | * mode spinning on the lock unless the MCS wait queue is empty. In this |
| 73 | * case, the lock waiters will enter the queued mode slowpath trying to |
| 74 | * become the queue head and set the pending bit. |
| 75 | * |
| 76 | * This hybrid PV queued/unfair lock combines the best attributes of a |
| 77 | * queued lock (no lock starvation) and an unfair lock (good performance |
| 78 | * on not heavily contended locks). |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 79 | */ |
Waiman Long | 11752ad | 2017-11-07 16:18:06 -0500 | [diff] [blame] | 80 | #define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l) |
| 81 | static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 82 | { |
Waiman Long | 11752ad | 2017-11-07 16:18:06 -0500 | [diff] [blame] | 83 | /* |
| 84 | * Stay in unfair lock mode as long as queued mode waiters are |
| 85 | * present in the MCS wait queue but the pending bit isn't set. |
| 86 | */ |
| 87 | for (;;) { |
| 88 | int val = atomic_read(&lock->val); |
| 89 | |
| 90 | if (!(val & _Q_LOCKED_PENDING_MASK) && |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 91 | (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { |
Waiman Long | ad53fa1 | 2019-04-04 13:43:16 -0400 | [diff] [blame] | 92 | lockevent_inc(pv_lock_stealing); |
Waiman Long | 11752ad | 2017-11-07 16:18:06 -0500 | [diff] [blame] | 93 | return true; |
| 94 | } |
| 95 | if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK)) |
| 96 | break; |
| 97 | |
| 98 | cpu_relax(); |
Peter Zijlstra | 64a5e3c | 2016-07-14 14:26:11 +0200 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | return false; |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | /* |
| 105 | * The pending bit is used by the queue head vCPU to indicate that it |
| 106 | * is actively spinning on the lock and no lock stealing is allowed. |
| 107 | */ |
| 108 | #if _Q_PENDING_BITS == 8 |
| 109 | static __always_inline void set_pending(struct qspinlock *lock) |
| 110 | { |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 111 | WRITE_ONCE(lock->pending, 1); |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 112 | } |
| 113 | |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 114 | /* |
| 115 | * The pending bit check in pv_queued_spin_steal_lock() isn't a memory |
Waiman Long | 34d54f3 | 2017-08-14 16:07:02 -0400 | [diff] [blame] | 116 | * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the |
| 117 | * lock just to be sure that it will get it. |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 118 | */ |
| 119 | static __always_inline int trylock_clear_pending(struct qspinlock *lock) |
| 120 | { |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 121 | return !READ_ONCE(lock->locked) && |
| 122 | (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, |
Waiman Long | 34d54f3 | 2017-08-14 16:07:02 -0400 | [diff] [blame] | 123 | _Q_LOCKED_VAL) == _Q_PENDING_VAL); |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 124 | } |
| 125 | #else /* _Q_PENDING_BITS == 8 */ |
| 126 | static __always_inline void set_pending(struct qspinlock *lock) |
| 127 | { |
Peter Zijlstra | e37837f | 2016-04-18 01:01:27 +0200 | [diff] [blame] | 128 | atomic_or(_Q_PENDING_VAL, &lock->val); |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 129 | } |
| 130 | |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 131 | static __always_inline int trylock_clear_pending(struct qspinlock *lock) |
| 132 | { |
| 133 | int val = atomic_read(&lock->val); |
| 134 | |
| 135 | for (;;) { |
| 136 | int old, new; |
| 137 | |
| 138 | if (val & _Q_LOCKED_MASK) |
| 139 | break; |
| 140 | |
| 141 | /* |
| 142 | * Try to clear pending bit & set locked bit |
| 143 | */ |
| 144 | old = val; |
| 145 | new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL; |
Waiman Long | 34d54f3 | 2017-08-14 16:07:02 -0400 | [diff] [blame] | 146 | val = atomic_cmpxchg_acquire(&lock->val, old, new); |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 147 | |
| 148 | if (val == old) |
| 149 | return 1; |
| 150 | } |
| 151 | return 0; |
| 152 | } |
| 153 | #endif /* _Q_PENDING_BITS == 8 */ |
| 154 | |
| 155 | /* |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 156 | * Lock and MCS node addresses hash table for fast lookup |
| 157 | * |
| 158 | * Hashing is done on a per-cacheline basis to minimize the need to access |
| 159 | * more than one cacheline. |
| 160 | * |
| 161 | * Dynamically allocate a hash table big enough to hold at least 4X the |
| 162 | * number of possible cpus in the system. Allocation is done on page |
| 163 | * granularity. So the minimum number of hash buckets should be at least |
| 164 | * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page. |
| 165 | * |
| 166 | * Since we should not be holding locks from NMI context (very rare indeed) the |
| 167 | * max load factor is 0.75, which is around the point where open addressing |
| 168 | * breaks down. |
| 169 | * |
| 170 | */ |
| 171 | struct pv_hash_entry { |
| 172 | struct qspinlock *lock; |
| 173 | struct pv_node *node; |
| 174 | }; |
| 175 | |
| 176 | #define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry)) |
| 177 | #define PV_HE_MIN (PAGE_SIZE / sizeof(struct pv_hash_entry)) |
| 178 | |
| 179 | static struct pv_hash_entry *pv_lock_hash; |
| 180 | static unsigned int pv_lock_hash_bits __read_mostly; |
| 181 | |
| 182 | /* |
| 183 | * Allocate memory for the PV qspinlock hash buckets |
| 184 | * |
| 185 | * This function should be called from the paravirt spinlock initialization |
| 186 | * routine. |
| 187 | */ |
| 188 | void __init __pv_init_lock_hash(void) |
| 189 | { |
| 190 | int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE); |
| 191 | |
| 192 | if (pv_hash_size < PV_HE_MIN) |
| 193 | pv_hash_size = PV_HE_MIN; |
| 194 | |
| 195 | /* |
| 196 | * Allocate space from bootmem which should be page-size aligned |
| 197 | * and hence cacheline aligned. |
| 198 | */ |
| 199 | pv_lock_hash = alloc_large_system_hash("PV qspinlock", |
| 200 | sizeof(struct pv_hash_entry), |
Pavel Tatashin | 3d375d7 | 2017-07-06 15:39:11 -0700 | [diff] [blame] | 201 | pv_hash_size, 0, |
| 202 | HASH_EARLY | HASH_ZERO, |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 203 | &pv_lock_hash_bits, NULL, |
| 204 | pv_hash_size, pv_hash_size); |
| 205 | } |
| 206 | |
| 207 | #define for_each_hash_entry(he, offset, hash) \ |
| 208 | for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \ |
| 209 | offset < (1 << pv_lock_hash_bits); \ |
| 210 | offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)]) |
| 211 | |
| 212 | static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) |
| 213 | { |
| 214 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); |
| 215 | struct pv_hash_entry *he; |
Waiman Long | 45e898b | 2015-11-09 19:09:25 -0500 | [diff] [blame] | 216 | int hopcnt = 0; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 217 | |
| 218 | for_each_hash_entry(he, offset, hash) { |
Waiman Long | 45e898b | 2015-11-09 19:09:25 -0500 | [diff] [blame] | 219 | hopcnt++; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 220 | if (!cmpxchg(&he->lock, NULL, lock)) { |
| 221 | WRITE_ONCE(he->node, node); |
Waiman Long | ad53fa1 | 2019-04-04 13:43:16 -0400 | [diff] [blame] | 222 | lockevent_pv_hop(hopcnt); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 223 | return &he->lock; |
| 224 | } |
| 225 | } |
| 226 | /* |
| 227 | * Hard assume there is a free entry for us. |
| 228 | * |
| 229 | * This is guaranteed by ensuring every blocked lock only ever consumes |
| 230 | * a single entry, and since we only have 4 nesting levels per CPU |
| 231 | * and allocated 4*nr_possible_cpus(), this must be so. |
| 232 | * |
| 233 | * The single entry is guaranteed by having the lock owner unhash |
| 234 | * before it releases. |
| 235 | */ |
| 236 | BUG(); |
| 237 | } |
| 238 | |
| 239 | static struct pv_node *pv_unhash(struct qspinlock *lock) |
| 240 | { |
| 241 | unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); |
| 242 | struct pv_hash_entry *he; |
| 243 | struct pv_node *node; |
| 244 | |
| 245 | for_each_hash_entry(he, offset, hash) { |
| 246 | if (READ_ONCE(he->lock) == lock) { |
| 247 | node = READ_ONCE(he->node); |
| 248 | WRITE_ONCE(he->lock, NULL); |
| 249 | return node; |
| 250 | } |
| 251 | } |
| 252 | /* |
| 253 | * Hard assume we'll find an entry. |
| 254 | * |
| 255 | * This guarantees a limited lookup time and is itself guaranteed by |
| 256 | * having the lock owner do the unhash -- IFF the unlock sees the |
| 257 | * SLOW flag, there MUST be a hash entry. |
| 258 | */ |
| 259 | BUG(); |
| 260 | } |
| 261 | |
| 262 | /* |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 263 | * Return true if when it is time to check the previous node which is not |
| 264 | * in a running state. |
| 265 | */ |
| 266 | static inline bool |
| 267 | pv_wait_early(struct pv_node *prev, int loop) |
| 268 | { |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 269 | if ((loop & PV_PREV_CHECK_MASK) != 0) |
| 270 | return false; |
| 271 | |
Wanpeng Li | 89340d0 | 2019-09-09 09:40:28 +0800 | [diff] [blame] | 272 | return READ_ONCE(prev->state) != vcpu_running; |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 273 | } |
| 274 | |
| 275 | /* |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 276 | * Initialize the PV part of the mcs_spinlock node. |
| 277 | */ |
| 278 | static void pv_init_node(struct mcs_spinlock *node) |
| 279 | { |
| 280 | struct pv_node *pn = (struct pv_node *)node; |
| 281 | |
Waiman Long | 0fa809c | 2018-10-16 09:45:07 -0400 | [diff] [blame] | 282 | BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode)); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 283 | |
| 284 | pn->cpu = smp_processor_id(); |
| 285 | pn->state = vcpu_running; |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * Wait for node->locked to become true, halt the vcpu after a short spin. |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 290 | * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its |
| 291 | * behalf. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 292 | */ |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 293 | static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 294 | { |
| 295 | struct pv_node *pn = (struct pv_node *)node; |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 296 | struct pv_node *pp = (struct pv_node *)prev; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 297 | int loop; |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 298 | bool wait_early; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 299 | |
Waiman Long | 08be8f6 | 2016-05-31 12:53:47 -0400 | [diff] [blame] | 300 | for (;;) { |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 301 | for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 302 | if (READ_ONCE(node->locked)) |
| 303 | return; |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 304 | if (pv_wait_early(pp, loop)) { |
| 305 | wait_early = true; |
| 306 | break; |
| 307 | } |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 308 | cpu_relax(); |
| 309 | } |
| 310 | |
| 311 | /* |
| 312 | * Order pn->state vs pn->locked thusly: |
| 313 | * |
| 314 | * [S] pn->state = vcpu_halted [S] next->locked = 1 |
| 315 | * MB MB |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 316 | * [L] pn->locked [RmW] pn->state = vcpu_hashed |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 317 | * |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 318 | * Matches the cmpxchg() from pv_kick_node(). |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 319 | */ |
Peter Zijlstra | b92b8b3 | 2015-05-12 10:51:55 +0200 | [diff] [blame] | 320 | smp_store_mb(pn->state, vcpu_halted); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 321 | |
Waiman Long | 45e898b | 2015-11-09 19:09:25 -0500 | [diff] [blame] | 322 | if (!READ_ONCE(node->locked)) { |
Waiman Long | ad53fa1 | 2019-04-04 13:43:16 -0400 | [diff] [blame] | 323 | lockevent_inc(pv_wait_node); |
| 324 | lockevent_cond_inc(pv_wait_early, wait_early); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 325 | pv_wait(&pn->state, vcpu_halted); |
Waiman Long | 45e898b | 2015-11-09 19:09:25 -0500 | [diff] [blame] | 326 | } |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 327 | |
| 328 | /* |
Waiman Long | 45e898b | 2015-11-09 19:09:25 -0500 | [diff] [blame] | 329 | * If pv_kick_node() changed us to vcpu_hashed, retain that |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 330 | * value so that pv_wait_head_or_lock() knows to not also try |
| 331 | * to hash this lock. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 332 | */ |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 333 | cmpxchg(&pn->state, vcpu_halted, vcpu_running); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 334 | |
| 335 | /* |
| 336 | * If the locked flag is still not set after wakeup, it is a |
| 337 | * spurious wakeup and the vCPU should wait again. However, |
| 338 | * there is a pretty high overhead for CPU halting and kicking. |
| 339 | * So it is better to spin for a while in the hope that the |
| 340 | * MCS lock will be released soon. |
| 341 | */ |
Waiman Long | ad53fa1 | 2019-04-04 13:43:16 -0400 | [diff] [blame] | 342 | lockevent_cond_inc(pv_spurious_wakeup, |
| 343 | !READ_ONCE(node->locked)); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 344 | } |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 345 | |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 346 | /* |
| 347 | * By now our node->locked should be 1 and our caller will not actually |
| 348 | * spin-wait for it. We do however rely on our caller to do a |
| 349 | * load-acquire for us. |
| 350 | */ |
| 351 | } |
| 352 | |
| 353 | /* |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 354 | * Called after setting next->locked = 1 when we're the lock owner. |
| 355 | * |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 356 | * Instead of waking the waiters stuck in pv_wait_node() advance their state |
| 357 | * such that they're waiting in pv_wait_head_or_lock(), this avoids a |
| 358 | * wake/sleep cycle. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 359 | */ |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 360 | static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 361 | { |
| 362 | struct pv_node *pn = (struct pv_node *)node; |
| 363 | |
| 364 | /* |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 365 | * If the vCPU is indeed halted, advance its state to match that of |
| 366 | * pv_wait_node(). If OTOH this fails, the vCPU was running and will |
| 367 | * observe its next->locked value and advance itself. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 368 | * |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 369 | * Matches with smp_store_mb() and cmpxchg() in pv_wait_node() |
Waiman Long | 34d54f3 | 2017-08-14 16:07:02 -0400 | [diff] [blame] | 370 | * |
| 371 | * The write to next->locked in arch_mcs_spin_unlock_contended() |
| 372 | * must be ordered before the read of pn->state in the cmpxchg() |
| 373 | * below for the code to work correctly. To guarantee full ordering |
| 374 | * irrespective of the success or failure of the cmpxchg(), |
| 375 | * a relaxed version with explicit barrier is used. The control |
| 376 | * dependency will order the reading of pn->state before any |
| 377 | * subsequent writes. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 378 | */ |
Waiman Long | 34d54f3 | 2017-08-14 16:07:02 -0400 | [diff] [blame] | 379 | smp_mb__before_atomic(); |
| 380 | if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed) |
| 381 | != vcpu_halted) |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 382 | return; |
| 383 | |
| 384 | /* |
| 385 | * Put the lock into the hash table and set the _Q_SLOW_VAL. |
| 386 | * |
| 387 | * As this is the same vCPU that will check the _Q_SLOW_VAL value and |
| 388 | * the hash table later on at unlock time, no atomic instruction is |
| 389 | * needed. |
| 390 | */ |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 391 | WRITE_ONCE(lock->locked, _Q_SLOW_VAL); |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 392 | (void)pv_hash(lock, pn); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | /* |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 396 | * Wait for l->locked to become clear and acquire the lock; |
| 397 | * halt the vcpu after a short spin. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 398 | * __pv_queued_spin_unlock() will wake us. |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 399 | * |
| 400 | * The current value of the lock will be returned for additional processing. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 401 | */ |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 402 | static u32 |
| 403 | pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 404 | { |
| 405 | struct pv_node *pn = (struct pv_node *)node; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 406 | struct qspinlock **lp = NULL; |
Waiman Long | 45e898b | 2015-11-09 19:09:25 -0500 | [diff] [blame] | 407 | int waitcnt = 0; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 408 | int loop; |
| 409 | |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 410 | /* |
| 411 | * If pv_kick_node() already advanced our state, we don't need to |
| 412 | * insert ourselves into the hash table anymore. |
| 413 | */ |
| 414 | if (READ_ONCE(pn->state) == vcpu_hashed) |
| 415 | lp = (struct qspinlock **)1; |
| 416 | |
Waiman Long | 32d6251 | 2015-12-10 15:17:45 -0500 | [diff] [blame] | 417 | /* |
| 418 | * Tracking # of slowpath locking operations |
| 419 | */ |
Waiman Long | ad53fa1 | 2019-04-04 13:43:16 -0400 | [diff] [blame] | 420 | lockevent_inc(lock_slowpath); |
Waiman Long | 32d6251 | 2015-12-10 15:17:45 -0500 | [diff] [blame] | 421 | |
Waiman Long | 45e898b | 2015-11-09 19:09:25 -0500 | [diff] [blame] | 422 | for (;; waitcnt++) { |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 423 | /* |
Waiman Long | cd0272f | 2015-11-09 19:09:27 -0500 | [diff] [blame] | 424 | * Set correct vCPU state to be used by queue node wait-early |
| 425 | * mechanism. |
| 426 | */ |
| 427 | WRITE_ONCE(pn->state, vcpu_running); |
| 428 | |
| 429 | /* |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 430 | * Set the pending bit in the active lock spinning loop to |
| 431 | * disable lock stealing before attempting to acquire the lock. |
| 432 | */ |
| 433 | set_pending(lock); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 434 | for (loop = SPIN_THRESHOLD; loop; loop--) { |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 435 | if (trylock_clear_pending(lock)) |
| 436 | goto gotlock; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 437 | cpu_relax(); |
| 438 | } |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 439 | clear_pending(lock); |
| 440 | |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 441 | |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 442 | if (!lp) { /* ONCE */ |
| 443 | lp = pv_hash(lock, pn); |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 444 | |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 445 | /* |
Will Deacon | 3b3fdf1 | 2015-07-13 16:58:30 +0100 | [diff] [blame] | 446 | * We must hash before setting _Q_SLOW_VAL, such that |
| 447 | * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock() |
| 448 | * we'll be sure to be able to observe our hash entry. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 449 | * |
Will Deacon | 3b3fdf1 | 2015-07-13 16:58:30 +0100 | [diff] [blame] | 450 | * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL |
| 451 | * MB RMB |
| 452 | * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 453 | * |
Will Deacon | 3b3fdf1 | 2015-07-13 16:58:30 +0100 | [diff] [blame] | 454 | * Matches the smp_rmb() in __pv_queued_spin_unlock(). |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 455 | */ |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 456 | if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 457 | /* |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 458 | * The lock was free and now we own the lock. |
| 459 | * Change the lock value back to _Q_LOCKED_VAL |
| 460 | * and unhash the table. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 461 | */ |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 462 | WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 463 | WRITE_ONCE(*lp, NULL); |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 464 | goto gotlock; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 465 | } |
| 466 | } |
Wanpeng Li | 229ce63 | 2016-07-14 16:15:56 +0800 | [diff] [blame] | 467 | WRITE_ONCE(pn->state, vcpu_hashed); |
Waiman Long | ad53fa1 | 2019-04-04 13:43:16 -0400 | [diff] [blame] | 468 | lockevent_inc(pv_wait_head); |
| 469 | lockevent_cond_inc(pv_wait_again, waitcnt); |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 470 | pv_wait(&lock->locked, _Q_SLOW_VAL); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 471 | |
| 472 | /* |
Waiman Long | 08be8f6 | 2016-05-31 12:53:47 -0400 | [diff] [blame] | 473 | * Because of lock stealing, the queue head vCPU may not be |
| 474 | * able to acquire the lock before it has to wait again. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 475 | */ |
| 476 | } |
| 477 | |
| 478 | /* |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 479 | * The cmpxchg() or xchg() call before coming here provides the |
| 480 | * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL |
| 481 | * here is to indicate to the compiler that the value will always |
| 482 | * be nozero to enable better code optimization. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 483 | */ |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 484 | gotlock: |
| 485 | return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 486 | } |
| 487 | |
| 488 | /* |
Waiman Long | d780453 | 2015-11-09 19:09:24 -0500 | [diff] [blame] | 489 | * PV versions of the unlock fastpath and slowpath functions to be used |
| 490 | * instead of queued_spin_unlock(). |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 491 | */ |
Waiman Long | d780453 | 2015-11-09 19:09:24 -0500 | [diff] [blame] | 492 | __visible void |
| 493 | __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 494 | { |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 495 | struct pv_node *node; |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 496 | |
Peter Zijlstra | 0b792bf | 2015-07-21 12:13:43 +0200 | [diff] [blame] | 497 | if (unlikely(locked != _Q_SLOW_VAL)) { |
| 498 | WARN(!debug_locks_silent, |
| 499 | "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n", |
| 500 | (unsigned long)lock, atomic_read(&lock->val)); |
Waiman Long | cba77f0 | 2015-07-11 21:19:19 -0400 | [diff] [blame] | 501 | return; |
| 502 | } |
| 503 | |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 504 | /* |
Will Deacon | 3b3fdf1 | 2015-07-13 16:58:30 +0100 | [diff] [blame] | 505 | * A failed cmpxchg doesn't provide any memory-ordering guarantees, |
| 506 | * so we need a barrier to order the read of the node data in |
| 507 | * pv_unhash *after* we've read the lock being _Q_SLOW_VAL. |
| 508 | * |
Waiman Long | 1c4941f | 2015-11-10 16:18:56 -0500 | [diff] [blame] | 509 | * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL. |
Will Deacon | 3b3fdf1 | 2015-07-13 16:58:30 +0100 | [diff] [blame] | 510 | */ |
| 511 | smp_rmb(); |
| 512 | |
| 513 | /* |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 514 | * Since the above failed to release, this must be the SLOW path. |
| 515 | * Therefore start by looking up the blocked node and unhashing it. |
| 516 | */ |
| 517 | node = pv_unhash(lock); |
| 518 | |
| 519 | /* |
| 520 | * Now that we have a reference to the (likely) blocked pv_node, |
| 521 | * release the lock. |
| 522 | */ |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 523 | smp_store_release(&lock->locked, 0); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 524 | |
| 525 | /* |
| 526 | * At this point the memory pointed at by lock can be freed/reused, |
| 527 | * however we can still use the pv_node to kick the CPU. |
Waiman Long | 75d2270 | 2015-07-11 16:36:52 -0400 | [diff] [blame] | 528 | * The other vCPU may not really be halted, but kicking an active |
| 529 | * vCPU is harmless other than the additional latency in completing |
| 530 | * the unlock. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 531 | */ |
Waiman Long | ad53fa1 | 2019-04-04 13:43:16 -0400 | [diff] [blame] | 532 | lockevent_inc(pv_kick_unlock); |
Waiman Long | 93edc8b | 2015-09-11 14:37:34 -0400 | [diff] [blame] | 533 | pv_kick(node->cpu); |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 534 | } |
Waiman Long | d780453 | 2015-11-09 19:09:24 -0500 | [diff] [blame] | 535 | |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 536 | /* |
| 537 | * Include the architecture specific callee-save thunk of the |
| 538 | * __pv_queued_spin_unlock(). This thunk is put together with |
Waiman Long | d780453 | 2015-11-09 19:09:24 -0500 | [diff] [blame] | 539 | * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock |
| 540 | * function close to each other sharing consecutive instruction cachelines. |
| 541 | * Alternatively, architecture specific version of __pv_queued_spin_unlock() |
| 542 | * can be defined. |
Waiman Long | a23db28 | 2015-04-24 14:56:37 -0400 | [diff] [blame] | 543 | */ |
| 544 | #include <asm/qspinlock_paravirt.h> |
| 545 | |
Waiman Long | d780453 | 2015-11-09 19:09:24 -0500 | [diff] [blame] | 546 | #ifndef __pv_queued_spin_unlock |
| 547 | __visible void __pv_queued_spin_unlock(struct qspinlock *lock) |
| 548 | { |
Waiman Long | d780453 | 2015-11-09 19:09:24 -0500 | [diff] [blame] | 549 | u8 locked; |
| 550 | |
| 551 | /* |
| 552 | * We must not unlock if SLOW, because in that case we must first |
| 553 | * unhash. Otherwise it would be possible to have multiple @lock |
| 554 | * entries, which would be BAD. |
| 555 | */ |
Will Deacon | 625e88b | 2018-04-26 11:34:16 +0100 | [diff] [blame] | 556 | locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0); |
Waiman Long | d780453 | 2015-11-09 19:09:24 -0500 | [diff] [blame] | 557 | if (likely(locked == _Q_LOCKED_VAL)) |
| 558 | return; |
| 559 | |
| 560 | __pv_queued_spin_unlock_slowpath(lock, locked); |
| 561 | } |
| 562 | #endif /* __pv_queued_spin_unlock */ |