Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 1 | /* |
Waiman Long | c7114b4 | 2015-05-11 13:57:11 -0400 | [diff] [blame] | 2 | * Queued read/write locks |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. |
| 15 | * |
| 16 | * Authors: Waiman Long <waiman.long@hp.com> |
| 17 | */ |
| 18 | #include <linux/smp.h> |
| 19 | #include <linux/bug.h> |
| 20 | #include <linux/cpumask.h> |
| 21 | #include <linux/percpu.h> |
| 22 | #include <linux/hardirq.h> |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 23 | #include <asm/qrwlock.h> |
| 24 | |
Waiman Long | 405963b | 2015-06-09 11:19:13 -0400 | [diff] [blame] | 25 | /* |
| 26 | * This internal data structure is used for optimizing access to some of |
| 27 | * the subfields within the atomic_t cnts. |
| 28 | */ |
| 29 | struct __qrwlock { |
| 30 | union { |
| 31 | atomic_t cnts; |
| 32 | struct { |
| 33 | #ifdef __LITTLE_ENDIAN |
| 34 | u8 wmode; /* Writer mode */ |
| 35 | u8 rcnts[3]; /* Reader counts */ |
| 36 | #else |
| 37 | u8 rcnts[3]; /* Reader counts */ |
| 38 | u8 wmode; /* Writer mode */ |
| 39 | #endif |
| 40 | }; |
| 41 | }; |
| 42 | arch_spinlock_t lock; |
| 43 | }; |
| 44 | |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 45 | /** |
| 46 | * rspin_until_writer_unlock - inc reader count & spin until writer is gone |
| 47 | * @lock : Pointer to queue rwlock structure |
| 48 | * @writer: Current queue rwlock writer status byte |
| 49 | * |
| 50 | * In interrupt context or at the head of the queue, the reader will just |
| 51 | * increment the reader count & wait until the writer releases the lock. |
| 52 | */ |
| 53 | static __always_inline void |
| 54 | rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts) |
| 55 | { |
| 56 | while ((cnts & _QW_WMASK) == _QW_LOCKED) { |
Davidlohr Bueso | 3a6bfbc | 2014-06-29 15:09:33 -0700 | [diff] [blame] | 57 | cpu_relax_lowlatency(); |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame^] | 58 | cnts = atomic_read_acquire(&lock->cnts); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 59 | } |
| 60 | } |
| 61 | |
| 62 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 63 | * queued_read_lock_slowpath - acquire read lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 64 | * @lock: Pointer to queue rwlock structure |
Waiman Long | 0e06e5b | 2015-06-19 11:50:01 -0400 | [diff] [blame] | 65 | * @cnts: Current qrwlock lock value |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 66 | */ |
Waiman Long | 0e06e5b | 2015-06-19 11:50:01 -0400 | [diff] [blame] | 67 | void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 68 | { |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 69 | /* |
| 70 | * Readers come here when they cannot get the lock without waiting |
| 71 | */ |
| 72 | if (unlikely(in_interrupt())) { |
| 73 | /* |
Waiman Long | 0e06e5b | 2015-06-19 11:50:01 -0400 | [diff] [blame] | 74 | * Readers in interrupt context will get the lock immediately |
| 75 | * if the writer is just waiting (not holding the lock yet). |
| 76 | * The rspin_until_writer_unlock() function returns immediately |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame^] | 77 | * in this case. Otherwise, they will spin (with ACQUIRE |
| 78 | * semantics) until the lock is available without waiting in |
| 79 | * the queue. |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 80 | */ |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 81 | rspin_until_writer_unlock(lock, cnts); |
| 82 | return; |
| 83 | } |
| 84 | atomic_sub(_QR_BIAS, &lock->cnts); |
| 85 | |
| 86 | /* |
| 87 | * Put the reader into the wait queue |
| 88 | */ |
| 89 | arch_spin_lock(&lock->lock); |
| 90 | |
| 91 | /* |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame^] | 92 | * The ACQUIRE semantics of the following spinning code ensure |
| 93 | * that accesses can't leak upwards out of our subsequent critical |
| 94 | * section in the case that the lock is currently held for write. |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 95 | */ |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame^] | 96 | cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS; |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 97 | rspin_until_writer_unlock(lock, cnts); |
| 98 | |
| 99 | /* |
| 100 | * Signal the next one in queue to become queue head |
| 101 | */ |
| 102 | arch_spin_unlock(&lock->lock); |
| 103 | } |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 104 | EXPORT_SYMBOL(queued_read_lock_slowpath); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 105 | |
| 106 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 107 | * queued_write_lock_slowpath - acquire write lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 108 | * @lock : Pointer to queue rwlock structure |
| 109 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 110 | void queued_write_lock_slowpath(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 111 | { |
| 112 | u32 cnts; |
| 113 | |
| 114 | /* Put the writer into the wait queue */ |
| 115 | arch_spin_lock(&lock->lock); |
| 116 | |
| 117 | /* Try to acquire the lock directly if no reader is present */ |
| 118 | if (!atomic_read(&lock->cnts) && |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame^] | 119 | (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 120 | goto unlock; |
| 121 | |
| 122 | /* |
| 123 | * Set the waiting flag to notify readers that a writer is pending, |
| 124 | * or wait for a previous writer to go away. |
| 125 | */ |
| 126 | for (;;) { |
Waiman Long | 405963b | 2015-06-09 11:19:13 -0400 | [diff] [blame] | 127 | struct __qrwlock *l = (struct __qrwlock *)lock; |
| 128 | |
| 129 | if (!READ_ONCE(l->wmode) && |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame^] | 130 | (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0)) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 131 | break; |
| 132 | |
Davidlohr Bueso | 3a6bfbc | 2014-06-29 15:09:33 -0700 | [diff] [blame] | 133 | cpu_relax_lowlatency(); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | /* When no more readers, set the locked flag */ |
| 137 | for (;;) { |
| 138 | cnts = atomic_read(&lock->cnts); |
| 139 | if ((cnts == _QW_WAITING) && |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame^] | 140 | (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING, |
| 141 | _QW_LOCKED) == _QW_WAITING)) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 142 | break; |
| 143 | |
Davidlohr Bueso | 3a6bfbc | 2014-06-29 15:09:33 -0700 | [diff] [blame] | 144 | cpu_relax_lowlatency(); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 145 | } |
| 146 | unlock: |
| 147 | arch_spin_unlock(&lock->lock); |
| 148 | } |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 149 | EXPORT_SYMBOL(queued_write_lock_slowpath); |