Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | |
| 3 | /* |
| 4 | * RT-specific reader/writer semaphores and reader/writer locks |
| 5 | * |
| 6 | * down_write/write_lock() |
| 7 | * 1) Lock rtmutex |
| 8 | * 2) Remove the reader BIAS to force readers into the slow path |
| 9 | * 3) Wait until all readers have left the critical section |
| 10 | * 4) Mark it write locked |
| 11 | * |
| 12 | * up_write/write_unlock() |
| 13 | * 1) Remove the write locked marker |
| 14 | * 2) Set the reader BIAS, so readers can use the fast path again |
| 15 | * 3) Unlock rtmutex, to release blocked readers |
| 16 | * |
| 17 | * down_read/read_lock() |
| 18 | * 1) Try fast path acquisition (reader BIAS is set) |
| 19 | * 2) Take tmutex::wait_lock, which protects the writelocked flag |
| 20 | * 3) If !writelocked, acquire it for read |
| 21 | * 4) If writelocked, block on tmutex |
| 22 | * 5) unlock rtmutex, goto 1) |
| 23 | * |
| 24 | * up_read/read_unlock() |
| 25 | * 1) Try fast path release (reader count != 1) |
| 26 | * 2) Wake the writer waiting in down_write()/write_lock() #3 |
| 27 | * |
| 28 | * down_read/read_lock()#3 has the consequence, that rw semaphores and rw |
| 29 | * locks on RT are not writer fair, but writers, which should be avoided in |
| 30 | * RT tasks (think mmap_sem), are subject to the rtmutex priority/DL |
| 31 | * inheritance mechanism. |
| 32 | * |
| 33 | * It's possible to make the rw primitives writer fair by keeping a list of |
| 34 | * active readers. A blocked writer would force all newly incoming readers |
| 35 | * to block on the rtmutex, but the rtmutex would have to be proxy locked |
| 36 | * for one reader after the other. We can't use multi-reader inheritance |
| 37 | * because there is no way to support that with SCHED_DEADLINE. |
| 38 | * Implementing the one by one reader boosting/handover mechanism is a |
| 39 | * major surgery for a very dubious value. |
| 40 | * |
| 41 | * The risk of writer starvation is there, but the pathological use cases |
| 42 | * which trigger it are not necessarily the typical RT workloads. |
| 43 | * |
Boqun Feng | 8112152 | 2021-09-09 12:59:19 +0200 | [diff] [blame] | 44 | * Fast-path orderings: |
| 45 | * The lock/unlock of readers can run in fast paths: lock and unlock are only |
| 46 | * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE |
| 47 | * semantics of rwbase_rt. Atomic ops should thus provide _acquire() |
| 48 | * and _release() (or stronger). |
| 49 | * |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 50 | * Common code shared between RT rw_semaphore and rwlock |
| 51 | */ |
| 52 | |
| 53 | static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb) |
| 54 | { |
| 55 | int r; |
| 56 | |
| 57 | /* |
| 58 | * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is |
| 59 | * set. |
| 60 | */ |
| 61 | for (r = atomic_read(&rwb->readers); r < 0;) { |
Davidlohr Bueso | c78416d | 2021-09-19 22:20:30 -0700 | [diff] [blame] | 62 | if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1))) |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 63 | return 1; |
| 64 | } |
| 65 | return 0; |
| 66 | } |
| 67 | |
| 68 | static int __sched __rwbase_read_lock(struct rwbase_rt *rwb, |
| 69 | unsigned int state) |
| 70 | { |
| 71 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
| 72 | int ret; |
| 73 | |
| 74 | raw_spin_lock_irq(&rtm->wait_lock); |
| 75 | /* |
| 76 | * Allow readers, as long as the writer has not completely |
| 77 | * acquired the semaphore for write. |
| 78 | */ |
| 79 | if (atomic_read(&rwb->readers) != WRITER_BIAS) { |
| 80 | atomic_inc(&rwb->readers); |
| 81 | raw_spin_unlock_irq(&rtm->wait_lock); |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * Call into the slow lock path with the rtmutex->wait_lock |
| 87 | * held, so this can't result in the following race: |
| 88 | * |
| 89 | * Reader1 Reader2 Writer |
| 90 | * down_read() |
| 91 | * down_write() |
| 92 | * rtmutex_lock(m) |
| 93 | * wait() |
| 94 | * down_read() |
| 95 | * unlock(m->wait_lock) |
| 96 | * up_read() |
| 97 | * wake(Writer) |
| 98 | * lock(m->wait_lock) |
| 99 | * sem->writelocked=true |
| 100 | * unlock(m->wait_lock) |
| 101 | * |
| 102 | * up_write() |
| 103 | * sem->writelocked=false |
| 104 | * rtmutex_unlock(m) |
| 105 | * down_read() |
| 106 | * down_write() |
| 107 | * rtmutex_lock(m) |
| 108 | * wait() |
| 109 | * rtmutex_lock(m) |
| 110 | * |
| 111 | * That would put Reader1 behind the writer waiting on |
| 112 | * Reader2 to call up_read(), which might be unbound. |
| 113 | */ |
| 114 | |
| 115 | /* |
| 116 | * For rwlocks this returns 0 unconditionally, so the below |
| 117 | * !ret conditionals are optimized out. |
| 118 | */ |
| 119 | ret = rwbase_rtmutex_slowlock_locked(rtm, state); |
| 120 | |
| 121 | /* |
| 122 | * On success the rtmutex is held, so there can't be a writer |
| 123 | * active. Increment the reader count and immediately drop the |
| 124 | * rtmutex again. |
| 125 | * |
| 126 | * rtmutex->wait_lock has to be unlocked in any case of course. |
| 127 | */ |
| 128 | if (!ret) |
| 129 | atomic_inc(&rwb->readers); |
| 130 | raw_spin_unlock_irq(&rtm->wait_lock); |
| 131 | if (!ret) |
| 132 | rwbase_rtmutex_unlock(rtm); |
| 133 | return ret; |
| 134 | } |
| 135 | |
| 136 | static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb, |
| 137 | unsigned int state) |
| 138 | { |
| 139 | if (rwbase_read_trylock(rwb)) |
| 140 | return 0; |
| 141 | |
| 142 | return __rwbase_read_lock(rwb, state); |
| 143 | } |
| 144 | |
| 145 | static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb, |
| 146 | unsigned int state) |
| 147 | { |
| 148 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
| 149 | struct task_struct *owner; |
Thomas Gleixner | 9321f81 | 2021-09-28 17:00:06 +0200 | [diff] [blame] | 150 | DEFINE_RT_WAKE_Q(wqh); |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 151 | |
| 152 | raw_spin_lock_irq(&rtm->wait_lock); |
| 153 | /* |
| 154 | * Wake the writer, i.e. the rtmutex owner. It might release the |
| 155 | * rtmutex concurrently in the fast path (due to a signal), but to |
| 156 | * clean up rwb->readers it needs to acquire rtm->wait_lock. The |
| 157 | * worst case which can happen is a spurious wakeup. |
| 158 | */ |
| 159 | owner = rt_mutex_owner(rtm); |
| 160 | if (owner) |
Thomas Gleixner | 9321f81 | 2021-09-28 17:00:06 +0200 | [diff] [blame] | 161 | rt_mutex_wake_q_add_task(&wqh, owner, state); |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 162 | |
Thomas Gleixner | 9321f81 | 2021-09-28 17:00:06 +0200 | [diff] [blame] | 163 | /* Pairs with the preempt_enable in rt_mutex_wake_up_q() */ |
| 164 | preempt_disable(); |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 165 | raw_spin_unlock_irq(&rtm->wait_lock); |
Thomas Gleixner | 9321f81 | 2021-09-28 17:00:06 +0200 | [diff] [blame] | 166 | rt_mutex_wake_up_q(&wqh); |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb, |
| 170 | unsigned int state) |
| 171 | { |
| 172 | /* |
| 173 | * rwb->readers can only hit 0 when a writer is waiting for the |
| 174 | * active readers to leave the critical section. |
Boqun Feng | 8112152 | 2021-09-09 12:59:19 +0200 | [diff] [blame] | 175 | * |
| 176 | * dec_and_test() is fully ordered, provides RELEASE. |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 177 | */ |
| 178 | if (unlikely(atomic_dec_and_test(&rwb->readers))) |
| 179 | __rwbase_read_unlock(rwb, state); |
| 180 | } |
| 181 | |
| 182 | static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias, |
| 183 | unsigned long flags) |
| 184 | { |
| 185 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
| 186 | |
Boqun Feng | 8112152 | 2021-09-09 12:59:19 +0200 | [diff] [blame] | 187 | /* |
| 188 | * _release() is needed in case that reader is in fast path, pairing |
Davidlohr Bueso | c78416d | 2021-09-19 22:20:30 -0700 | [diff] [blame] | 189 | * with atomic_try_cmpxchg_acquire() in rwbase_read_trylock(). |
Boqun Feng | 8112152 | 2021-09-09 12:59:19 +0200 | [diff] [blame] | 190 | */ |
| 191 | (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers); |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 192 | raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); |
| 193 | rwbase_rtmutex_unlock(rtm); |
| 194 | } |
| 195 | |
| 196 | static inline void rwbase_write_unlock(struct rwbase_rt *rwb) |
| 197 | { |
| 198 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
| 199 | unsigned long flags; |
| 200 | |
| 201 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
| 202 | __rwbase_write_unlock(rwb, WRITER_BIAS, flags); |
| 203 | } |
| 204 | |
| 205 | static inline void rwbase_write_downgrade(struct rwbase_rt *rwb) |
| 206 | { |
| 207 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
| 208 | unsigned long flags; |
| 209 | |
| 210 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
| 211 | /* Release it and account current as reader */ |
| 212 | __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags); |
| 213 | } |
| 214 | |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 215 | static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb) |
| 216 | { |
| 217 | /* Can do without CAS because we're serialized by wait_lock. */ |
| 218 | lockdep_assert_held(&rwb->rtmutex.wait_lock); |
| 219 | |
Boqun Feng | 8112152 | 2021-09-09 12:59:19 +0200 | [diff] [blame] | 220 | /* |
| 221 | * _acquire is needed in case the reader is in the fast path, pairing |
| 222 | * with rwbase_read_unlock(), provides ACQUIRE. |
| 223 | */ |
| 224 | if (!atomic_read_acquire(&rwb->readers)) { |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 225 | atomic_set(&rwb->readers, WRITER_BIAS); |
| 226 | return 1; |
| 227 | } |
| 228 | |
| 229 | return 0; |
| 230 | } |
| 231 | |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 232 | static int __sched rwbase_write_lock(struct rwbase_rt *rwb, |
| 233 | unsigned int state) |
| 234 | { |
| 235 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
| 236 | unsigned long flags; |
| 237 | |
| 238 | /* Take the rtmutex as a first step */ |
| 239 | if (rwbase_rtmutex_lock_state(rtm, state)) |
| 240 | return -EINTR; |
| 241 | |
| 242 | /* Force readers into slow path */ |
| 243 | atomic_sub(READER_BIAS, &rwb->readers); |
| 244 | |
| 245 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 246 | if (__rwbase_write_trylock(rwb)) |
| 247 | goto out_unlock; |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 248 | |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 249 | rwbase_set_and_save_current_state(state); |
| 250 | for (;;) { |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 251 | /* Optimized out for rwlocks */ |
| 252 | if (rwbase_signal_pending_state(state, current)) { |
Peter Zijlstra | 7687201 | 2021-09-09 12:59:17 +0200 | [diff] [blame] | 253 | rwbase_restore_current_state(); |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 254 | __rwbase_write_unlock(rwb, 0, flags); |
| 255 | return -EINTR; |
| 256 | } |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 257 | |
| 258 | if (__rwbase_write_trylock(rwb)) |
| 259 | break; |
| 260 | |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 261 | raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 262 | rwbase_schedule(); |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 263 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 264 | |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 265 | set_current_state(state); |
| 266 | } |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 267 | rwbase_restore_current_state(); |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 268 | |
| 269 | out_unlock: |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 270 | raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); |
| 271 | return 0; |
| 272 | } |
| 273 | |
| 274 | static inline int rwbase_write_trylock(struct rwbase_rt *rwb) |
| 275 | { |
| 276 | struct rt_mutex_base *rtm = &rwb->rtmutex; |
| 277 | unsigned long flags; |
| 278 | |
| 279 | if (!rwbase_rtmutex_trylock(rtm)) |
| 280 | return 0; |
| 281 | |
| 282 | atomic_sub(READER_BIAS, &rwb->readers); |
| 283 | |
| 284 | raw_spin_lock_irqsave(&rtm->wait_lock, flags); |
Peter Zijlstra | 616be87 | 2021-09-09 12:59:18 +0200 | [diff] [blame] | 285 | if (__rwbase_write_trylock(rwb)) { |
Thomas Gleixner | 943f0ed | 2021-08-15 23:28:03 +0200 | [diff] [blame] | 286 | raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); |
| 287 | return 1; |
| 288 | } |
| 289 | __rwbase_write_unlock(rwb, 0, flags); |
| 290 | return 0; |
| 291 | } |