blob: c51387a4326574a276d04dc76d92e6b284794c42 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ingo Molnarc4e05112006-07-03 00:24:29 -07002/* kernel/rwsem.c: R/W semaphores, public implementation
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
Waiman Long5dec94d2019-05-20 16:59:03 -04006 *
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
9 *
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 *
Waiman Long4f23dbc2019-05-20 16:59:06 -040013 * Rwsem count bit fields re-definition and rwsem rearchitecture by
14 * Waiman Long <longman@redhat.com> and
15 * Peter Zijlstra <peterz@infradead.org>.
Ingo Molnarc4e05112006-07-03 00:24:29 -070016 */
17
18#include <linux/types.h>
19#include <linux/kernel.h>
Livio Soaresc7af77b2007-12-18 15:21:13 +010020#include <linux/sched.h>
Waiman Long5dec94d2019-05-20 16:59:03 -040021#include <linux/sched/rt.h>
22#include <linux/sched/task.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010023#include <linux/sched/debug.h>
Waiman Long5dec94d2019-05-20 16:59:03 -040024#include <linux/sched/wake_q.h>
25#include <linux/sched/signal.h>
Waiman Long7d43f1c2019-05-20 16:59:13 -040026#include <linux/sched/clock.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040027#include <linux/export.h>
Ingo Molnarc4e05112006-07-03 00:24:29 -070028#include <linux/rwsem.h>
Arun Sharma600634972011-07-26 16:09:06 -070029#include <linux/atomic.h>
Ingo Molnarc4e05112006-07-03 00:24:29 -070030
Thomas Gleixner42254102021-08-15 23:28:05 +020031#ifndef CONFIG_PREEMPT_RT
Waiman Long5dec94d2019-05-20 16:59:03 -040032#include "lock_events.h"
33
34/*
Waiman Long617f3ef2020-11-20 23:14:16 -050035 * The least significant 2 bits of the owner value has the following
Waiman Long5dec94d2019-05-20 16:59:03 -040036 * meanings when set.
Waiman Long02f10822019-05-20 16:59:10 -040037 * - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
Waiman Long617f3ef2020-11-20 23:14:16 -050038 * - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
Waiman Long5dec94d2019-05-20 16:59:03 -040039 *
Waiman Long617f3ef2020-11-20 23:14:16 -050040 * When the rwsem is reader-owned and a spinning writer has timed out,
41 * the nonspinnable bit will be set to disable optimistic spinning.
Waiman Long7d43f1c2019-05-20 16:59:13 -040042
Waiman Long5dec94d2019-05-20 16:59:03 -040043 * When a writer acquires a rwsem, it puts its task_struct pointer
44 * into the owner field. It is cleared after an unlock.
45 *
46 * When a reader acquires a rwsem, it will also puts its task_struct
Waiman Long7d43f1c2019-05-20 16:59:13 -040047 * pointer into the owner field with the RWSEM_READER_OWNED bit set.
48 * On unlock, the owner field will largely be left untouched. So
49 * for a free or reader-owned rwsem, the owner value may contain
50 * information about the last reader that acquires the rwsem.
Waiman Long5dec94d2019-05-20 16:59:03 -040051 *
52 * That information may be helpful in debugging cases where the system
53 * seems to hang on a reader owned rwsem especially if only one reader
54 * is involved. Ideally we would like to track all the readers that own
55 * a rwsem, but the overhead is simply too big.
Waiman Long5cfd92e2019-05-20 16:59:14 -040056 *
Waiman Long617f3ef2020-11-20 23:14:16 -050057 * A fast path reader optimistic lock stealing is supported when the rwsem
58 * is previously owned by a writer and the following conditions are met:
Waiman Long617f3ef2020-11-20 23:14:16 -050059 * - rwsem is not currently writer owned
60 * - the handoff isn't set.
Waiman Long5dec94d2019-05-20 16:59:03 -040061 */
62#define RWSEM_READER_OWNED (1UL << 0)
Waiman Long617f3ef2020-11-20 23:14:16 -050063#define RWSEM_NONSPINNABLE (1UL << 1)
Waiman Long02f10822019-05-20 16:59:10 -040064#define RWSEM_OWNER_FLAGS_MASK (RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
Waiman Long5dec94d2019-05-20 16:59:03 -040065
66#ifdef CONFIG_DEBUG_RWSEMS
67# define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
68 if (!debug_locks_silent && \
Davidlohr Buesofce45cd2019-07-28 21:47:35 -070069 WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
Waiman Long5dec94d2019-05-20 16:59:03 -040070 #c, atomic_long_read(&(sem)->count), \
Davidlohr Buesofce45cd2019-07-28 21:47:35 -070071 (unsigned long) sem->magic, \
Waiman Long94a97172019-05-20 16:59:12 -040072 atomic_long_read(&(sem)->owner), (long)current, \
Waiman Long5dec94d2019-05-20 16:59:03 -040073 list_empty(&(sem)->wait_list) ? "" : "not ")) \
74 debug_locks_off(); \
75 } while (0)
76#else
77# define DEBUG_RWSEMS_WARN_ON(c, sem)
78#endif
79
80/*
Waiman Longa15ea1a2019-05-20 16:59:15 -040081 * On 64-bit architectures, the bit definitions of the count are:
Waiman Long5dec94d2019-05-20 16:59:03 -040082 *
Waiman Longa15ea1a2019-05-20 16:59:15 -040083 * Bit 0 - writer locked bit
84 * Bit 1 - waiters present bit
85 * Bit 2 - lock handoff bit
86 * Bits 3-7 - reserved
87 * Bits 8-62 - 55-bit reader count
88 * Bit 63 - read fail bit
89 *
90 * On 32-bit architectures, the bit definitions of the count are:
91 *
92 * Bit 0 - writer locked bit
93 * Bit 1 - waiters present bit
94 * Bit 2 - lock handoff bit
95 * Bits 3-7 - reserved
96 * Bits 8-30 - 23-bit reader count
97 * Bit 31 - read fail bit
98 *
99 * It is not likely that the most significant bit (read fail bit) will ever
100 * be set. This guard bit is still checked anyway in the down_read() fastpath
101 * just in case we need to use up more of the reader bits for other purpose
102 * in the future.
Waiman Long5dec94d2019-05-20 16:59:03 -0400103 *
104 * atomic_long_fetch_add() is used to obtain reader lock, whereas
105 * atomic_long_cmpxchg() will be used to obtain writer lock.
Waiman Long4f23dbc2019-05-20 16:59:06 -0400106 *
107 * There are three places where the lock handoff bit may be set or cleared.
108 * 1) rwsem_mark_wake() for readers.
109 * 2) rwsem_try_write_lock() for writers.
110 * 3) Error path of rwsem_down_write_slowpath().
111 *
112 * For all the above cases, wait_lock will be held. A writer must also
113 * be the first one in the wait_list to be eligible for setting the handoff
114 * bit. So concurrent setting/clearing of handoff bit is not possible.
Waiman Long5dec94d2019-05-20 16:59:03 -0400115 */
116#define RWSEM_WRITER_LOCKED (1UL << 0)
117#define RWSEM_FLAG_WAITERS (1UL << 1)
Waiman Long4f23dbc2019-05-20 16:59:06 -0400118#define RWSEM_FLAG_HANDOFF (1UL << 2)
Waiman Longa15ea1a2019-05-20 16:59:15 -0400119#define RWSEM_FLAG_READFAIL (1UL << (BITS_PER_LONG - 1))
Waiman Long4f23dbc2019-05-20 16:59:06 -0400120
Waiman Long5dec94d2019-05-20 16:59:03 -0400121#define RWSEM_READER_SHIFT 8
122#define RWSEM_READER_BIAS (1UL << RWSEM_READER_SHIFT)
123#define RWSEM_READER_MASK (~(RWSEM_READER_BIAS - 1))
124#define RWSEM_WRITER_MASK RWSEM_WRITER_LOCKED
125#define RWSEM_LOCK_MASK (RWSEM_WRITER_MASK|RWSEM_READER_MASK)
Waiman Long4f23dbc2019-05-20 16:59:06 -0400126#define RWSEM_READ_FAILED_MASK (RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
Waiman Longa15ea1a2019-05-20 16:59:15 -0400127 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
Waiman Long5dec94d2019-05-20 16:59:03 -0400128
129/*
130 * All writes to owner are protected by WRITE_ONCE() to make sure that
131 * store tearing can't happen as optimistic spinners may read and use
132 * the owner value concurrently without lock. Read from owner, however,
133 * may not need READ_ONCE() as long as the pointer value is only used
134 * for comparison and isn't being dereferenced.
135 */
136static inline void rwsem_set_owner(struct rw_semaphore *sem)
137{
Waiman Long94a97172019-05-20 16:59:12 -0400138 atomic_long_set(&sem->owner, (long)current);
Waiman Long5dec94d2019-05-20 16:59:03 -0400139}
140
141static inline void rwsem_clear_owner(struct rw_semaphore *sem)
142{
Waiman Long94a97172019-05-20 16:59:12 -0400143 atomic_long_set(&sem->owner, 0);
144}
145
146/*
147 * Test the flags in the owner field.
148 */
149static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
150{
151 return atomic_long_read(&sem->owner) & flags;
Waiman Long5dec94d2019-05-20 16:59:03 -0400152}
153
154/*
155 * The task_struct pointer of the last owning reader will be left in
156 * the owner field.
157 *
158 * Note that the owner value just indicates the task has owned the rwsem
159 * previously, it may not be the real owner or one of the real owners
160 * anymore when that field is examined, so take it with a grain of salt.
Waiman Long5cfd92e2019-05-20 16:59:14 -0400161 *
162 * The reader non-spinnable bit is preserved.
Waiman Long5dec94d2019-05-20 16:59:03 -0400163 */
164static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
165 struct task_struct *owner)
166{
Waiman Long5cfd92e2019-05-20 16:59:14 -0400167 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
Waiman Long617f3ef2020-11-20 23:14:16 -0500168 (atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE);
Waiman Long5dec94d2019-05-20 16:59:03 -0400169
Waiman Long94a97172019-05-20 16:59:12 -0400170 atomic_long_set(&sem->owner, val);
Waiman Long5dec94d2019-05-20 16:59:03 -0400171}
172
173static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
174{
175 __rwsem_set_reader_owned(sem, current);
176}
177
178/*
Waiman Long94a97172019-05-20 16:59:12 -0400179 * Return true if the rwsem is owned by a reader.
Waiman Long5dec94d2019-05-20 16:59:03 -0400180 */
Waiman Long94a97172019-05-20 16:59:12 -0400181static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
Waiman Long5dec94d2019-05-20 16:59:03 -0400182{
Waiman Long94a97172019-05-20 16:59:12 -0400183#ifdef CONFIG_DEBUG_RWSEMS
184 /*
185 * Check the count to see if it is write-locked.
186 */
187 long count = atomic_long_read(&sem->count);
188
189 if (count & RWSEM_WRITER_MASK)
190 return false;
191#endif
192 return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
Waiman Long5dec94d2019-05-20 16:59:03 -0400193}
194
195#ifdef CONFIG_DEBUG_RWSEMS
196/*
197 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
198 * is a task pointer in owner of a reader-owned rwsem, it will be the
199 * real owner or one of the real owners. The only exception is when the
200 * unlock is done by up_read_non_owner().
201 */
202static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
203{
Waiman Long94a97172019-05-20 16:59:12 -0400204 unsigned long val = atomic_long_read(&sem->owner);
205
206 while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
207 if (atomic_long_try_cmpxchg(&sem->owner, &val,
208 val & RWSEM_OWNER_FLAGS_MASK))
209 return;
210 }
Waiman Long5dec94d2019-05-20 16:59:03 -0400211}
212#else
213static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
214{
215}
216#endif
217
218/*
Waiman Long7d43f1c2019-05-20 16:59:13 -0400219 * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
220 * remains set. Otherwise, the operation will be aborted.
221 */
222static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
223{
224 unsigned long owner = atomic_long_read(&sem->owner);
225
226 do {
227 if (!(owner & RWSEM_READER_OWNED))
228 break;
229 if (owner & RWSEM_NONSPINNABLE)
230 break;
231 } while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
232 owner | RWSEM_NONSPINNABLE));
233}
234
Waiman Longc8fe8b02020-11-20 23:14:12 -0500235static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
Waiman Longa15ea1a2019-05-20 16:59:15 -0400236{
Waiman Longc8fe8b02020-11-20 23:14:12 -0500237 *cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
Peter Zijlstra33791162020-12-08 10:22:16 +0100238
Waiman Longc8fe8b02020-11-20 23:14:12 -0500239 if (WARN_ON_ONCE(*cntp < 0))
Waiman Longa15ea1a2019-05-20 16:59:15 -0400240 rwsem_set_nonspinnable(sem);
Peter Zijlstra33791162020-12-08 10:22:16 +0100241
Waiman Longc8fe8b02020-11-20 23:14:12 -0500242 if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
Peter Zijlstra33791162020-12-08 10:22:16 +0100243 rwsem_set_reader_owned(sem);
244 return true;
245 }
246
247 return false;
Waiman Longa15ea1a2019-05-20 16:59:15 -0400248}
249
Peter Zijlstra285c61a2020-12-08 10:25:06 +0100250static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
251{
252 long tmp = RWSEM_UNLOCKED_VALUE;
253
254 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
255 rwsem_set_owner(sem);
256 return true;
257 }
258
259 return false;
260}
261
Waiman Long7d43f1c2019-05-20 16:59:13 -0400262/*
Waiman Long94a97172019-05-20 16:59:12 -0400263 * Return just the real task structure pointer of the owner
264 */
265static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
266{
267 return (struct task_struct *)
268 (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
269}
270
271/*
272 * Return the real task structure pointer of the owner and the embedded
273 * flags in the owner. pflags must be non-NULL.
274 */
275static inline struct task_struct *
276rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
277{
278 unsigned long owner = atomic_long_read(&sem->owner);
279
280 *pflags = owner & RWSEM_OWNER_FLAGS_MASK;
281 return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
282}
283
284/*
Waiman Long5dec94d2019-05-20 16:59:03 -0400285 * Guide to the rw_semaphore's count field.
286 *
287 * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
288 * by a writer.
289 *
290 * The lock is owned by readers when
291 * (1) the RWSEM_WRITER_LOCKED isn't set in count,
292 * (2) some of the reader bits are set in count, and
293 * (3) the owner field has RWSEM_READ_OWNED bit set.
294 *
295 * Having some reader bits set is not enough to guarantee a readers owned
296 * lock as the readers may be in the process of backing out from the count
297 * and a writer has just released the lock. So another writer may steal
298 * the lock immediately after that.
299 */
300
301/*
302 * Initialize an rwsem:
303 */
304void __init_rwsem(struct rw_semaphore *sem, const char *name,
305 struct lock_class_key *key)
306{
307#ifdef CONFIG_DEBUG_LOCK_ALLOC
308 /*
309 * Make sure we are not reinitializing a held semaphore:
310 */
311 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
Peter Zijlstrade8f5e42020-03-21 12:26:01 +0100312 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
Waiman Long5dec94d2019-05-20 16:59:03 -0400313#endif
Davidlohr Buesofce45cd2019-07-28 21:47:35 -0700314#ifdef CONFIG_DEBUG_RWSEMS
315 sem->magic = sem;
316#endif
Waiman Long5dec94d2019-05-20 16:59:03 -0400317 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
318 raw_spin_lock_init(&sem->wait_lock);
319 INIT_LIST_HEAD(&sem->wait_list);
Waiman Long94a97172019-05-20 16:59:12 -0400320 atomic_long_set(&sem->owner, 0L);
Waiman Long5dec94d2019-05-20 16:59:03 -0400321#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
322 osq_lock_init(&sem->osq);
323#endif
324}
Waiman Long5dec94d2019-05-20 16:59:03 -0400325EXPORT_SYMBOL(__init_rwsem);
326
327enum rwsem_waiter_type {
328 RWSEM_WAITING_FOR_WRITE,
329 RWSEM_WAITING_FOR_READ
330};
331
332struct rwsem_waiter {
333 struct list_head list;
334 struct task_struct *task;
335 enum rwsem_waiter_type type;
Waiman Long4f23dbc2019-05-20 16:59:06 -0400336 unsigned long timeout;
Waiman Long5dec94d2019-05-20 16:59:03 -0400337};
Waiman Long4f23dbc2019-05-20 16:59:06 -0400338#define rwsem_first_waiter(sem) \
339 list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
Waiman Long5dec94d2019-05-20 16:59:03 -0400340
341enum rwsem_wake_type {
342 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
343 RWSEM_WAKE_READERS, /* Wake readers only */
344 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
345};
346
Waiman Long4f23dbc2019-05-20 16:59:06 -0400347enum writer_wait_state {
348 WRITER_NOT_FIRST, /* Writer is not first in wait list */
349 WRITER_FIRST, /* Writer is first in wait list */
350 WRITER_HANDOFF /* Writer is first & handoff needed */
351};
352
353/*
354 * The typical HZ value is either 250 or 1000. So set the minimum waiting
355 * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
356 * queue before initiating the handoff protocol.
357 */
358#define RWSEM_WAIT_TIMEOUT DIV_ROUND_UP(HZ, 250)
359
Waiman Long5dec94d2019-05-20 16:59:03 -0400360/*
Waiman Longd3681e22019-05-20 16:59:09 -0400361 * Magic number to batch-wakeup waiting readers, even when writers are
362 * also present in the queue. This both limits the amount of work the
363 * waking thread must do and also prevents any potential counter overflow,
364 * however unlikely.
365 */
366#define MAX_READERS_WAKEUP 0x100
367
368/*
Waiman Long5dec94d2019-05-20 16:59:03 -0400369 * handle the lock release when processes blocked on it that can now run
370 * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
371 * have been set.
372 * - there must be someone on the queue
373 * - the wait_lock must be held by the caller
374 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
375 * to actually wakeup the blocked task(s) and drop the reference count,
376 * preferably when the wait_lock is released
377 * - woken process blocks are discarded from the list after having task zeroed
378 * - writers are only marked woken if downgrading is false
379 */
Waiman Long6cef7ff62019-05-20 16:59:04 -0400380static void rwsem_mark_wake(struct rw_semaphore *sem,
381 enum rwsem_wake_type wake_type,
382 struct wake_q_head *wake_q)
Waiman Long5dec94d2019-05-20 16:59:03 -0400383{
384 struct rwsem_waiter *waiter, *tmp;
385 long oldcount, woken = 0, adjustment = 0;
386 struct list_head wlist;
387
Waiman Long4f23dbc2019-05-20 16:59:06 -0400388 lockdep_assert_held(&sem->wait_lock);
389
Waiman Long5dec94d2019-05-20 16:59:03 -0400390 /*
391 * Take a peek at the queue head waiter such that we can determine
392 * the wakeup(s) to perform.
393 */
Waiman Long4f23dbc2019-05-20 16:59:06 -0400394 waiter = rwsem_first_waiter(sem);
Waiman Long5dec94d2019-05-20 16:59:03 -0400395
396 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
397 if (wake_type == RWSEM_WAKE_ANY) {
398 /*
399 * Mark writer at the front of the queue for wakeup.
400 * Until the task is actually later awoken later by
401 * the caller, other writers are able to steal it.
402 * Readers, on the other hand, will block as they
403 * will notice the queued writer.
404 */
405 wake_q_add(wake_q, waiter->task);
406 lockevent_inc(rwsem_wake_writer);
407 }
408
409 return;
410 }
411
412 /*
Waiman Longa15ea1a2019-05-20 16:59:15 -0400413 * No reader wakeup if there are too many of them already.
414 */
415 if (unlikely(atomic_long_read(&sem->count) < 0))
416 return;
417
418 /*
Waiman Long5dec94d2019-05-20 16:59:03 -0400419 * Writers might steal the lock before we grant it to the next reader.
420 * We prefer to do the first reader grant before counting readers
421 * so we can bail out early if a writer stole the lock.
422 */
423 if (wake_type != RWSEM_WAKE_READ_OWNED) {
Waiman Long5cfd92e2019-05-20 16:59:14 -0400424 struct task_struct *owner;
425
Waiman Long5dec94d2019-05-20 16:59:03 -0400426 adjustment = RWSEM_READER_BIAS;
427 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
428 if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
Waiman Long4f23dbc2019-05-20 16:59:06 -0400429 /*
430 * When we've been waiting "too" long (for writers
431 * to give up the lock), request a HANDOFF to
432 * force the issue.
433 */
434 if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
435 time_after(jiffies, waiter->timeout)) {
436 adjustment -= RWSEM_FLAG_HANDOFF;
437 lockevent_inc(rwsem_rlock_handoff);
438 }
439
440 atomic_long_add(-adjustment, &sem->count);
Waiman Long5dec94d2019-05-20 16:59:03 -0400441 return;
442 }
443 /*
444 * Set it to reader-owned to give spinners an early
445 * indication that readers now have the lock.
Waiman Long5cfd92e2019-05-20 16:59:14 -0400446 * The reader nonspinnable bit seen at slowpath entry of
447 * the reader is copied over.
Waiman Long5dec94d2019-05-20 16:59:03 -0400448 */
Waiman Long5cfd92e2019-05-20 16:59:14 -0400449 owner = waiter->task;
Waiman Long5cfd92e2019-05-20 16:59:14 -0400450 __rwsem_set_reader_owned(sem, owner);
Waiman Long5dec94d2019-05-20 16:59:03 -0400451 }
452
453 /*
Waiman Longd3681e22019-05-20 16:59:09 -0400454 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
455 * queue. We know that the woken will be at least 1 as we accounted
Waiman Long5dec94d2019-05-20 16:59:03 -0400456 * for above. Note we increment the 'active part' of the count by the
457 * number of readers before waking any processes up.
458 *
Waiman Longd3681e22019-05-20 16:59:09 -0400459 * This is an adaptation of the phase-fair R/W locks where at the
460 * reader phase (first waiter is a reader), all readers are eligible
461 * to acquire the lock at the same time irrespective of their order
462 * in the queue. The writers acquire the lock according to their
463 * order in the queue.
464 *
Waiman Long5dec94d2019-05-20 16:59:03 -0400465 * We have to do wakeup in 2 passes to prevent the possibility that
466 * the reader count may be decremented before it is incremented. It
467 * is because the to-be-woken waiter may not have slept yet. So it
468 * may see waiter->task got cleared, finish its critical section and
469 * do an unlock before the reader count increment.
470 *
471 * 1) Collect the read-waiters in a separate list, count them and
472 * fully increment the reader count in rwsem.
473 * 2) For each waiters in the new list, clear waiter->task and
474 * put them into wake_q to be woken up later.
475 */
Waiman Longd3681e22019-05-20 16:59:09 -0400476 INIT_LIST_HEAD(&wlist);
477 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
Waiman Long5dec94d2019-05-20 16:59:03 -0400478 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
Waiman Longd3681e22019-05-20 16:59:09 -0400479 continue;
Waiman Long5dec94d2019-05-20 16:59:03 -0400480
481 woken++;
Waiman Longd3681e22019-05-20 16:59:09 -0400482 list_move_tail(&waiter->list, &wlist);
483
484 /*
485 * Limit # of readers that can be woken up per wakeup call.
486 */
Yanfei Xu5197fcd2021-10-13 21:41:54 +0800487 if (unlikely(woken >= MAX_READERS_WAKEUP))
Waiman Longd3681e22019-05-20 16:59:09 -0400488 break;
Waiman Long5dec94d2019-05-20 16:59:03 -0400489 }
Waiman Long5dec94d2019-05-20 16:59:03 -0400490
491 adjustment = woken * RWSEM_READER_BIAS - adjustment;
492 lockevent_cond_inc(rwsem_wake_reader, woken);
493 if (list_empty(&sem->wait_list)) {
494 /* hit end of list above */
495 adjustment -= RWSEM_FLAG_WAITERS;
496 }
497
Waiman Long4f23dbc2019-05-20 16:59:06 -0400498 /*
499 * When we've woken a reader, we no longer need to force writers
500 * to give up the lock and we can clear HANDOFF.
501 */
502 if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
503 adjustment -= RWSEM_FLAG_HANDOFF;
504
Waiman Long5dec94d2019-05-20 16:59:03 -0400505 if (adjustment)
506 atomic_long_add(adjustment, &sem->count);
507
508 /* 2nd pass */
509 list_for_each_entry_safe(waiter, tmp, &wlist, list) {
510 struct task_struct *tsk;
511
512 tsk = waiter->task;
513 get_task_struct(tsk);
514
515 /*
516 * Ensure calling get_task_struct() before setting the reader
Waiman Long6cef7ff62019-05-20 16:59:04 -0400517 * waiter to nil such that rwsem_down_read_slowpath() cannot
Waiman Long5dec94d2019-05-20 16:59:03 -0400518 * race with do_exit() by always holding a reference count
519 * to the task to wakeup.
520 */
521 smp_store_release(&waiter->task, NULL);
522 /*
523 * Ensure issuing the wakeup (either by us or someone else)
524 * after setting the reader waiter to nil.
525 */
526 wake_q_add_safe(wake_q, tsk);
527 }
528}
529
530/*
531 * This function must be called with the sem->wait_lock held to prevent
532 * race conditions between checking the rwsem wait list and setting the
533 * sem->count accordingly.
Waiman Long4f23dbc2019-05-20 16:59:06 -0400534 *
535 * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
536 * bit is set or the lock is acquired with handoff bit cleared.
Waiman Long5dec94d2019-05-20 16:59:03 -0400537 */
Waiman Long00f3c5a2019-05-20 16:59:07 -0400538static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
Waiman Long4f23dbc2019-05-20 16:59:06 -0400539 enum writer_wait_state wstate)
Waiman Long5dec94d2019-05-20 16:59:03 -0400540{
Waiman Long00f3c5a2019-05-20 16:59:07 -0400541 long count, new;
Waiman Long5dec94d2019-05-20 16:59:03 -0400542
Waiman Long4f23dbc2019-05-20 16:59:06 -0400543 lockdep_assert_held(&sem->wait_lock);
544
Waiman Long00f3c5a2019-05-20 16:59:07 -0400545 count = atomic_long_read(&sem->count);
Waiman Long4f23dbc2019-05-20 16:59:06 -0400546 do {
547 bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
548
549 if (has_handoff && wstate == WRITER_NOT_FIRST)
550 return false;
551
552 new = count;
553
554 if (count & RWSEM_LOCK_MASK) {
555 if (has_handoff || (wstate != WRITER_HANDOFF))
556 return false;
557
558 new |= RWSEM_FLAG_HANDOFF;
559 } else {
560 new |= RWSEM_WRITER_LOCKED;
561 new &= ~RWSEM_FLAG_HANDOFF;
562
563 if (list_is_singular(&sem->wait_list))
564 new &= ~RWSEM_FLAG_WAITERS;
565 }
566 } while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
567
568 /*
569 * We have either acquired the lock with handoff bit cleared or
570 * set the handoff bit.
571 */
572 if (new & RWSEM_FLAG_HANDOFF)
Waiman Long5dec94d2019-05-20 16:59:03 -0400573 return false;
574
Waiman Long4f23dbc2019-05-20 16:59:06 -0400575 rwsem_set_owner(sem);
576 return true;
Waiman Long5dec94d2019-05-20 16:59:03 -0400577}
578
Yanfei Xu7cdacc52021-10-13 21:41:53 +0800579/*
580 * The rwsem_spin_on_owner() function returns the following 4 values
581 * depending on the lock owner state.
582 * OWNER_NULL : owner is currently NULL
583 * OWNER_WRITER: when owner changes and is a writer
584 * OWNER_READER: when owner changes and the new owner may be a reader.
585 * OWNER_NONSPINNABLE:
586 * when optimistic spinning has to stop because either the
587 * owner stops running, is unknown, or its timeslice has
588 * been used up.
589 */
590enum owner_state {
591 OWNER_NULL = 1 << 0,
592 OWNER_WRITER = 1 << 1,
593 OWNER_READER = 1 << 2,
594 OWNER_NONSPINNABLE = 1 << 3,
595};
596
Waiman Long5dec94d2019-05-20 16:59:03 -0400597#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
598/*
599 * Try to acquire write lock before the writer has been put on wait queue.
600 */
601static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
602{
603 long count = atomic_long_read(&sem->count);
604
Waiman Long4f23dbc2019-05-20 16:59:06 -0400605 while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
Waiman Long5dec94d2019-05-20 16:59:03 -0400606 if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
Waiman Long4f23dbc2019-05-20 16:59:06 -0400607 count | RWSEM_WRITER_LOCKED)) {
Waiman Long5dec94d2019-05-20 16:59:03 -0400608 rwsem_set_owner(sem);
Waiman Long617f3ef2020-11-20 23:14:16 -0500609 lockevent_inc(rwsem_opt_lock);
Waiman Long5dec94d2019-05-20 16:59:03 -0400610 return true;
611 }
612 }
613 return false;
614}
615
616static inline bool owner_on_cpu(struct task_struct *owner)
617{
618 /*
619 * As lock holder preemption issue, we both skip spinning if
620 * task is not on cpu or its cpu is preempted
621 */
622 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
623}
624
Waiman Long617f3ef2020-11-20 23:14:16 -0500625static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
Waiman Long5dec94d2019-05-20 16:59:03 -0400626{
627 struct task_struct *owner;
Waiman Long94a97172019-05-20 16:59:12 -0400628 unsigned long flags;
Waiman Long5dec94d2019-05-20 16:59:03 -0400629 bool ret = true;
630
Waiman Longcf694822019-05-20 16:59:11 -0400631 if (need_resched()) {
632 lockevent_inc(rwsem_opt_fail);
Waiman Long5dec94d2019-05-20 16:59:03 -0400633 return false;
Waiman Longcf694822019-05-20 16:59:11 -0400634 }
Waiman Long5dec94d2019-05-20 16:59:03 -0400635
Waiman Longcf694822019-05-20 16:59:11 -0400636 preempt_disable();
Yanfei Xu6c2787f2021-10-13 21:41:52 +0800637 /*
638 * Disable preemption is equal to the RCU read-side crital section,
639 * thus the task_strcut structure won't go away.
640 */
Waiman Long94a97172019-05-20 16:59:12 -0400641 owner = rwsem_owner_flags(sem, &flags);
Waiman Long78134302019-07-20 11:04:10 -0400642 /*
643 * Don't check the read-owner as the entry may be stale.
644 */
Waiman Long617f3ef2020-11-20 23:14:16 -0500645 if ((flags & RWSEM_NONSPINNABLE) ||
Waiman Long78134302019-07-20 11:04:10 -0400646 (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
Waiman Long94a97172019-05-20 16:59:12 -0400647 ret = false;
Waiman Longcf694822019-05-20 16:59:11 -0400648 preempt_enable();
649
650 lockevent_cond_inc(rwsem_opt_fail, !ret);
Waiman Long5dec94d2019-05-20 16:59:03 -0400651 return ret;
652}
653
Waiman Long7d43f1c2019-05-20 16:59:13 -0400654#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER | OWNER_READER)
Waiman Long5dec94d2019-05-20 16:59:03 -0400655
Waiman Long94a97172019-05-20 16:59:12 -0400656static inline enum owner_state
Waiman Long617f3ef2020-11-20 23:14:16 -0500657rwsem_owner_state(struct task_struct *owner, unsigned long flags)
Waiman Long3f6d5172019-05-20 16:59:05 -0400658{
Waiman Long617f3ef2020-11-20 23:14:16 -0500659 if (flags & RWSEM_NONSPINNABLE)
Waiman Long3f6d5172019-05-20 16:59:05 -0400660 return OWNER_NONSPINNABLE;
661
Waiman Long94a97172019-05-20 16:59:12 -0400662 if (flags & RWSEM_READER_OWNED)
Waiman Long3f6d5172019-05-20 16:59:05 -0400663 return OWNER_READER;
664
Waiman Long94a97172019-05-20 16:59:12 -0400665 return owner ? OWNER_WRITER : OWNER_NULL;
Waiman Long3f6d5172019-05-20 16:59:05 -0400666}
667
Waiman Long7d43f1c2019-05-20 16:59:13 -0400668static noinline enum owner_state
Waiman Long617f3ef2020-11-20 23:14:16 -0500669rwsem_spin_on_owner(struct rw_semaphore *sem)
Waiman Long3f6d5172019-05-20 16:59:05 -0400670{
Waiman Long94a97172019-05-20 16:59:12 -0400671 struct task_struct *new, *owner;
672 unsigned long flags, new_flags;
673 enum owner_state state;
Waiman Long3f6d5172019-05-20 16:59:05 -0400674
Yanfei Xu6c2787f2021-10-13 21:41:52 +0800675 lockdep_assert_preemption_disabled();
676
Waiman Long94a97172019-05-20 16:59:12 -0400677 owner = rwsem_owner_flags(sem, &flags);
Waiman Long617f3ef2020-11-20 23:14:16 -0500678 state = rwsem_owner_state(owner, flags);
Waiman Long3f6d5172019-05-20 16:59:05 -0400679 if (state != OWNER_WRITER)
680 return state;
Waiman Long5dec94d2019-05-20 16:59:03 -0400681
Waiman Long3f6d5172019-05-20 16:59:05 -0400682 for (;;) {
Waiman Long91d2a812019-06-25 10:39:13 -0400683 /*
684 * When a waiting writer set the handoff flag, it may spin
685 * on the owner as well. Once that writer acquires the lock,
686 * we can spin on it. So we don't need to quit even when the
687 * handoff bit is set.
688 */
Waiman Long94a97172019-05-20 16:59:12 -0400689 new = rwsem_owner_flags(sem, &new_flags);
690 if ((new != owner) || (new_flags != flags)) {
Waiman Long617f3ef2020-11-20 23:14:16 -0500691 state = rwsem_owner_state(new, new_flags);
Waiman Long3f6d5172019-05-20 16:59:05 -0400692 break;
693 }
694
Waiman Long5dec94d2019-05-20 16:59:03 -0400695 /*
696 * Ensure we emit the owner->on_cpu, dereference _after_
697 * checking sem->owner still matches owner, if that fails,
698 * owner might point to free()d memory, if it still matches,
Yanfei Xu6c2787f2021-10-13 21:41:52 +0800699 * our spinning context already disabled preemption which is
700 * equal to RCU read-side crital section ensures the memory
701 * stays valid.
Waiman Long5dec94d2019-05-20 16:59:03 -0400702 */
703 barrier();
704
Waiman Long5dec94d2019-05-20 16:59:03 -0400705 if (need_resched() || !owner_on_cpu(owner)) {
Waiman Long3f6d5172019-05-20 16:59:05 -0400706 state = OWNER_NONSPINNABLE;
707 break;
Waiman Long5dec94d2019-05-20 16:59:03 -0400708 }
709
710 cpu_relax();
711 }
Waiman Long5dec94d2019-05-20 16:59:03 -0400712
Waiman Long3f6d5172019-05-20 16:59:05 -0400713 return state;
Waiman Long5dec94d2019-05-20 16:59:03 -0400714}
715
Waiman Long7d43f1c2019-05-20 16:59:13 -0400716/*
717 * Calculate reader-owned rwsem spinning threshold for writer
718 *
719 * The more readers own the rwsem, the longer it will take for them to
720 * wind down and free the rwsem. So the empirical formula used to
721 * determine the actual spinning time limit here is:
722 *
723 * Spinning threshold = (10 + nr_readers/2)us
724 *
725 * The limit is capped to a maximum of 25us (30 readers). This is just
726 * a heuristic and is subjected to change in the future.
727 */
728static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
729{
730 long count = atomic_long_read(&sem->count);
731 int readers = count >> RWSEM_READER_SHIFT;
732 u64 delta;
733
734 if (readers > 30)
735 readers = 30;
736 delta = (20 + readers) * NSEC_PER_USEC / 2;
737
738 return sched_clock() + delta;
739}
740
Waiman Long617f3ef2020-11-20 23:14:16 -0500741static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
Waiman Long5dec94d2019-05-20 16:59:03 -0400742{
743 bool taken = false;
Waiman Long990fa732019-05-20 16:59:08 -0400744 int prev_owner_state = OWNER_NULL;
Waiman Long7d43f1c2019-05-20 16:59:13 -0400745 int loop = 0;
746 u64 rspin_threshold = 0;
Waiman Long5dec94d2019-05-20 16:59:03 -0400747
748 preempt_disable();
749
750 /* sem->wait_lock should not be held when doing optimistic spinning */
Waiman Long5dec94d2019-05-20 16:59:03 -0400751 if (!osq_lock(&sem->osq))
752 goto done;
753
754 /*
755 * Optimistically spin on the owner field and attempt to acquire the
756 * lock whenever the owner changes. Spinning will be stopped when:
757 * 1) the owning writer isn't running; or
Waiman Long7d43f1c2019-05-20 16:59:13 -0400758 * 2) readers own the lock and spinning time has exceeded limit.
Waiman Long5dec94d2019-05-20 16:59:03 -0400759 */
Waiman Long990fa732019-05-20 16:59:08 -0400760 for (;;) {
Waiman Long7d43f1c2019-05-20 16:59:13 -0400761 enum owner_state owner_state;
Waiman Long990fa732019-05-20 16:59:08 -0400762
Waiman Long617f3ef2020-11-20 23:14:16 -0500763 owner_state = rwsem_spin_on_owner(sem);
Waiman Long990fa732019-05-20 16:59:08 -0400764 if (!(owner_state & OWNER_SPINNABLE))
765 break;
766
Waiman Long5dec94d2019-05-20 16:59:03 -0400767 /*
768 * Try to acquire the lock
769 */
Waiman Long617f3ef2020-11-20 23:14:16 -0500770 taken = rwsem_try_write_lock_unqueued(sem);
Waiman Longcf694822019-05-20 16:59:11 -0400771
772 if (taken)
Waiman Long5dec94d2019-05-20 16:59:03 -0400773 break;
Waiman Long5dec94d2019-05-20 16:59:03 -0400774
775 /*
Waiman Long7d43f1c2019-05-20 16:59:13 -0400776 * Time-based reader-owned rwsem optimistic spinning
777 */
Waiman Long617f3ef2020-11-20 23:14:16 -0500778 if (owner_state == OWNER_READER) {
Waiman Long7d43f1c2019-05-20 16:59:13 -0400779 /*
780 * Re-initialize rspin_threshold every time when
781 * the owner state changes from non-reader to reader.
782 * This allows a writer to steal the lock in between
783 * 2 reader phases and have the threshold reset at
784 * the beginning of the 2nd reader phase.
785 */
786 if (prev_owner_state != OWNER_READER) {
Waiman Long617f3ef2020-11-20 23:14:16 -0500787 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
Waiman Long7d43f1c2019-05-20 16:59:13 -0400788 break;
789 rspin_threshold = rwsem_rspin_threshold(sem);
790 loop = 0;
791 }
792
793 /*
794 * Check time threshold once every 16 iterations to
795 * avoid calling sched_clock() too frequently so
796 * as to reduce the average latency between the times
797 * when the lock becomes free and when the spinner
798 * is ready to do a trylock.
799 */
800 else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
801 rwsem_set_nonspinnable(sem);
802 lockevent_inc(rwsem_opt_nospin);
803 break;
804 }
805 }
806
807 /*
Waiman Long990fa732019-05-20 16:59:08 -0400808 * An RT task cannot do optimistic spinning if it cannot
809 * be sure the lock holder is running or live-lock may
810 * happen if the current task and the lock holder happen
811 * to run in the same CPU. However, aborting optimistic
812 * spinning while a NULL owner is detected may miss some
813 * opportunity where spinning can continue without causing
814 * problem.
815 *
816 * There are 2 possible cases where an RT task may be able
817 * to continue spinning.
818 *
819 * 1) The lock owner is in the process of releasing the
820 * lock, sem->owner is cleared but the lock has not
821 * been released yet.
822 * 2) The lock was free and owner cleared, but another
823 * task just comes in and acquire the lock before
824 * we try to get it. The new owner may be a spinnable
825 * writer.
826 *
Ingo Molnare2db7592021-03-22 02:35:05 +0100827 * To take advantage of two scenarios listed above, the RT
Waiman Long990fa732019-05-20 16:59:08 -0400828 * task is made to retry one more time to see if it can
829 * acquire the lock or continue spinning on the new owning
830 * writer. Of course, if the time lag is long enough or the
831 * new owner is not a writer or spinnable, the RT task will
832 * quit spinning.
833 *
834 * If the owner is a writer, the need_resched() check is
835 * done inside rwsem_spin_on_owner(). If the owner is not
836 * a writer, need_resched() check needs to be done here.
Waiman Long5dec94d2019-05-20 16:59:03 -0400837 */
Waiman Long990fa732019-05-20 16:59:08 -0400838 if (owner_state != OWNER_WRITER) {
839 if (need_resched())
840 break;
841 if (rt_task(current) &&
842 (prev_owner_state != OWNER_WRITER))
843 break;
844 }
845 prev_owner_state = owner_state;
Waiman Long5dec94d2019-05-20 16:59:03 -0400846
847 /*
848 * The cpu_relax() call is a compiler barrier which forces
849 * everything in this loop to be re-loaded. We don't need
850 * memory barriers as we'll eventually observe the right
851 * values at the cost of a few extra spins.
852 */
853 cpu_relax();
854 }
855 osq_unlock(&sem->osq);
856done:
857 preempt_enable();
858 lockevent_cond_inc(rwsem_opt_fail, !taken);
859 return taken;
860}
Waiman Long7d43f1c2019-05-20 16:59:13 -0400861
862/*
Waiman Long617f3ef2020-11-20 23:14:16 -0500863 * Clear the owner's RWSEM_NONSPINNABLE bit if it is set. This should
Waiman Long7d43f1c2019-05-20 16:59:13 -0400864 * only be called when the reader count reaches 0.
Waiman Long7d43f1c2019-05-20 16:59:13 -0400865 */
Waiman Long617f3ef2020-11-20 23:14:16 -0500866static inline void clear_nonspinnable(struct rw_semaphore *sem)
Waiman Long7d43f1c2019-05-20 16:59:13 -0400867{
Waiman Long617f3ef2020-11-20 23:14:16 -0500868 if (rwsem_test_oflags(sem, RWSEM_NONSPINNABLE))
869 atomic_long_andnot(RWSEM_NONSPINNABLE, &sem->owner);
Waiman Long1a728df2020-11-20 23:14:14 -0500870}
871
Waiman Long5dec94d2019-05-20 16:59:03 -0400872#else
Waiman Long617f3ef2020-11-20 23:14:16 -0500873static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
Waiman Longcf694822019-05-20 16:59:11 -0400874{
875 return false;
876}
877
Waiman Long617f3ef2020-11-20 23:14:16 -0500878static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem)
Waiman Long5dec94d2019-05-20 16:59:03 -0400879{
880 return false;
881}
Waiman Long7d43f1c2019-05-20 16:59:13 -0400882
Waiman Long617f3ef2020-11-20 23:14:16 -0500883static inline void clear_nonspinnable(struct rw_semaphore *sem) { }
Waiman Long1a728df2020-11-20 23:14:14 -0500884
Yanfei Xu7cdacc52021-10-13 21:41:53 +0800885static inline enum owner_state
Waiman Long617f3ef2020-11-20 23:14:16 -0500886rwsem_spin_on_owner(struct rw_semaphore *sem)
Waiman Long91d2a812019-06-25 10:39:13 -0400887{
Yanfei Xu7cdacc52021-10-13 21:41:53 +0800888 return OWNER_NONSPINNABLE;
Waiman Long91d2a812019-06-25 10:39:13 -0400889}
Waiman Long5dec94d2019-05-20 16:59:03 -0400890#endif
891
892/*
893 * Wait for the read lock to be granted
894 */
Waiman Long6cef7ff62019-05-20 16:59:04 -0400895static struct rw_semaphore __sched *
Peter Zijlstra2f064a52021-06-11 10:28:17 +0200896rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int state)
Waiman Long5dec94d2019-05-20 16:59:03 -0400897{
Waiman Long617f3ef2020-11-20 23:14:16 -0500898 long adjustment = -RWSEM_READER_BIAS;
Waiman Long2f06f702020-11-20 23:14:13 -0500899 long rcnt = (count >> RWSEM_READER_SHIFT);
Waiman Long5dec94d2019-05-20 16:59:03 -0400900 struct rwsem_waiter waiter;
901 DEFINE_WAKE_Q(wake_q);
Waiman Longa15ea1a2019-05-20 16:59:15 -0400902 bool wake = false;
Waiman Long5dec94d2019-05-20 16:59:03 -0400903
Waiman Long5cfd92e2019-05-20 16:59:14 -0400904 /*
Waiman Long2f06f702020-11-20 23:14:13 -0500905 * To prevent a constant stream of readers from starving a sleeping
Waiman Long617f3ef2020-11-20 23:14:16 -0500906 * waiter, don't attempt optimistic lock stealing if the lock is
907 * currently owned by readers.
Waiman Long2f06f702020-11-20 23:14:13 -0500908 */
Waiman Long617f3ef2020-11-20 23:14:16 -0500909 if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
910 (rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
Waiman Long2f06f702020-11-20 23:14:13 -0500911 goto queue;
912
913 /*
Waiman Long617f3ef2020-11-20 23:14:16 -0500914 * Reader optimistic lock stealing.
Waiman Long1a728df2020-11-20 23:14:14 -0500915 */
Waiman Long617f3ef2020-11-20 23:14:16 -0500916 if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF))) {
Waiman Long1a728df2020-11-20 23:14:14 -0500917 rwsem_set_reader_owned(sem);
918 lockevent_inc(rwsem_rlock_steal);
Waiman Long1a728df2020-11-20 23:14:14 -0500919
Waiman Longcf694822019-05-20 16:59:11 -0400920 /*
Waiman Long617f3ef2020-11-20 23:14:16 -0500921 * Wake up other readers in the wait queue if it is
922 * the first reader.
Waiman Longcf694822019-05-20 16:59:11 -0400923 */
Waiman Long617f3ef2020-11-20 23:14:16 -0500924 if ((rcnt == 1) && (count & RWSEM_FLAG_WAITERS)) {
Waiman Longcf694822019-05-20 16:59:11 -0400925 raw_spin_lock_irq(&sem->wait_lock);
926 if (!list_empty(&sem->wait_list))
927 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
928 &wake_q);
929 raw_spin_unlock_irq(&sem->wait_lock);
930 wake_up_q(&wake_q);
931 }
932 return sem;
933 }
934
935queue:
Waiman Long5dec94d2019-05-20 16:59:03 -0400936 waiter.task = current;
937 waiter.type = RWSEM_WAITING_FOR_READ;
Waiman Long4f23dbc2019-05-20 16:59:06 -0400938 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
Waiman Long5dec94d2019-05-20 16:59:03 -0400939
940 raw_spin_lock_irq(&sem->wait_lock);
941 if (list_empty(&sem->wait_list)) {
942 /*
943 * In case the wait queue is empty and the lock isn't owned
Waiman Long4f23dbc2019-05-20 16:59:06 -0400944 * by a writer or has the handoff bit set, this reader can
945 * exit the slowpath and return immediately as its
946 * RWSEM_READER_BIAS has already been set in the count.
Waiman Long5dec94d2019-05-20 16:59:03 -0400947 */
Waiman Long617f3ef2020-11-20 23:14:16 -0500948 if (!(atomic_long_read(&sem->count) &
Waiman Long4f23dbc2019-05-20 16:59:06 -0400949 (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
Jan Stanceke1b98fa2019-07-18 10:51:25 +0200950 /* Provide lock ACQUIRE */
951 smp_acquire__after_ctrl_dep();
Waiman Long5dec94d2019-05-20 16:59:03 -0400952 raw_spin_unlock_irq(&sem->wait_lock);
953 rwsem_set_reader_owned(sem);
954 lockevent_inc(rwsem_rlock_fast);
955 return sem;
956 }
957 adjustment += RWSEM_FLAG_WAITERS;
958 }
959 list_add_tail(&waiter.list, &sem->wait_list);
960
961 /* we're now waiting on the lock, but no longer actively locking */
Waiman Long617f3ef2020-11-20 23:14:16 -0500962 count = atomic_long_add_return(adjustment, &sem->count);
Waiman Long5dec94d2019-05-20 16:59:03 -0400963
964 /*
965 * If there are no active locks, wake the front queued process(es).
966 *
967 * If there are no writers and we are first in the queue,
968 * wake our own waiter to join the existing active readers !
969 */
Waiman Long7d43f1c2019-05-20 16:59:13 -0400970 if (!(count & RWSEM_LOCK_MASK)) {
Waiman Long617f3ef2020-11-20 23:14:16 -0500971 clear_nonspinnable(sem);
Waiman Long7d43f1c2019-05-20 16:59:13 -0400972 wake = true;
973 }
974 if (wake || (!(count & RWSEM_WRITER_MASK) &&
975 (adjustment & RWSEM_FLAG_WAITERS)))
Waiman Long6cef7ff62019-05-20 16:59:04 -0400976 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
Waiman Long5dec94d2019-05-20 16:59:03 -0400977
978 raw_spin_unlock_irq(&sem->wait_lock);
979 wake_up_q(&wake_q);
980
981 /* wait to be given the lock */
Peter Zijlstra6ffddfb2019-07-18 15:08:53 +0200982 for (;;) {
Waiman Long5dec94d2019-05-20 16:59:03 -0400983 set_current_state(state);
Peter Zijlstra99143f82019-07-18 14:56:17 +0200984 if (!smp_load_acquire(&waiter.task)) {
Peter Zijlstra6ffddfb2019-07-18 15:08:53 +0200985 /* Matches rwsem_mark_wake()'s smp_store_release(). */
Waiman Long5dec94d2019-05-20 16:59:03 -0400986 break;
Peter Zijlstra99143f82019-07-18 14:56:17 +0200987 }
Waiman Long5dec94d2019-05-20 16:59:03 -0400988 if (signal_pending_state(state, current)) {
989 raw_spin_lock_irq(&sem->wait_lock);
990 if (waiter.task)
991 goto out_nolock;
992 raw_spin_unlock_irq(&sem->wait_lock);
Peter Zijlstra6ffddfb2019-07-18 15:08:53 +0200993 /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
Waiman Long5dec94d2019-05-20 16:59:03 -0400994 break;
995 }
996 schedule();
997 lockevent_inc(rwsem_sleep_reader);
998 }
999
1000 __set_current_state(TASK_RUNNING);
1001 lockevent_inc(rwsem_rlock);
1002 return sem;
Peter Zijlstra6ffddfb2019-07-18 15:08:53 +02001003
Waiman Long5dec94d2019-05-20 16:59:03 -04001004out_nolock:
1005 list_del(&waiter.list);
Waiman Long4f23dbc2019-05-20 16:59:06 -04001006 if (list_empty(&sem->wait_list)) {
1007 atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
1008 &sem->count);
1009 }
Waiman Long5dec94d2019-05-20 16:59:03 -04001010 raw_spin_unlock_irq(&sem->wait_lock);
1011 __set_current_state(TASK_RUNNING);
1012 lockevent_inc(rwsem_rlock_fail);
1013 return ERR_PTR(-EINTR);
1014}
1015
Waiman Long5dec94d2019-05-20 16:59:03 -04001016/*
1017 * Wait until we successfully acquire the write lock
1018 */
Waiman Long6cef7ff62019-05-20 16:59:04 -04001019static struct rw_semaphore *
1020rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
Waiman Long5dec94d2019-05-20 16:59:03 -04001021{
1022 long count;
Waiman Long4f23dbc2019-05-20 16:59:06 -04001023 enum writer_wait_state wstate;
Waiman Long5dec94d2019-05-20 16:59:03 -04001024 struct rwsem_waiter waiter;
1025 struct rw_semaphore *ret = sem;
1026 DEFINE_WAKE_Q(wake_q);
1027
1028 /* do optimistic spinning and steal lock if possible */
Waiman Long617f3ef2020-11-20 23:14:16 -05001029 if (rwsem_can_spin_on_owner(sem) && rwsem_optimistic_spin(sem)) {
Peter Zijlstra6ffddfb2019-07-18 15:08:53 +02001030 /* rwsem_optimistic_spin() implies ACQUIRE on success */
Waiman Long5dec94d2019-05-20 16:59:03 -04001031 return sem;
Peter Zijlstra6ffddfb2019-07-18 15:08:53 +02001032 }
Waiman Long5dec94d2019-05-20 16:59:03 -04001033
1034 /*
1035 * Optimistic spinning failed, proceed to the slowpath
1036 * and block until we can acquire the sem.
1037 */
1038 waiter.task = current;
1039 waiter.type = RWSEM_WAITING_FOR_WRITE;
Waiman Long4f23dbc2019-05-20 16:59:06 -04001040 waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
Waiman Long5dec94d2019-05-20 16:59:03 -04001041
1042 raw_spin_lock_irq(&sem->wait_lock);
1043
1044 /* account for this before adding a new element to the list */
Waiman Long4f23dbc2019-05-20 16:59:06 -04001045 wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
Waiman Long5dec94d2019-05-20 16:59:03 -04001046
1047 list_add_tail(&waiter.list, &sem->wait_list);
1048
1049 /* we're now waiting on the lock */
Waiman Long4f23dbc2019-05-20 16:59:06 -04001050 if (wstate == WRITER_NOT_FIRST) {
Waiman Long5dec94d2019-05-20 16:59:03 -04001051 count = atomic_long_read(&sem->count);
1052
1053 /*
Waiman Long4f23dbc2019-05-20 16:59:06 -04001054 * If there were already threads queued before us and:
Randy Dunlapc034f482021-02-25 17:21:10 -08001055 * 1) there are no active locks, wake the front
Waiman Long4f23dbc2019-05-20 16:59:06 -04001056 * queued process(es) as the handoff bit might be set.
1057 * 2) there are no active writers and some readers, the lock
1058 * must be read owned; so we try to wake any read lock
1059 * waiters that were queued ahead of us.
Waiman Long5dec94d2019-05-20 16:59:03 -04001060 */
Waiman Long4f23dbc2019-05-20 16:59:06 -04001061 if (count & RWSEM_WRITER_MASK)
1062 goto wait;
Waiman Long5dec94d2019-05-20 16:59:03 -04001063
Waiman Long4f23dbc2019-05-20 16:59:06 -04001064 rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
1065 ? RWSEM_WAKE_READERS
1066 : RWSEM_WAKE_ANY, &wake_q);
Waiman Long5dec94d2019-05-20 16:59:03 -04001067
Waiman Long00f3c5a2019-05-20 16:59:07 -04001068 if (!wake_q_empty(&wake_q)) {
1069 /*
1070 * We want to minimize wait_lock hold time especially
1071 * when a large number of readers are to be woken up.
1072 */
1073 raw_spin_unlock_irq(&sem->wait_lock);
1074 wake_up_q(&wake_q);
1075 wake_q_init(&wake_q); /* Used again, reinit */
1076 raw_spin_lock_irq(&sem->wait_lock);
1077 }
Waiman Long5dec94d2019-05-20 16:59:03 -04001078 } else {
Waiman Long00f3c5a2019-05-20 16:59:07 -04001079 atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
Waiman Long5dec94d2019-05-20 16:59:03 -04001080 }
1081
Waiman Long4f23dbc2019-05-20 16:59:06 -04001082wait:
Waiman Long5dec94d2019-05-20 16:59:03 -04001083 /* wait until we successfully acquire the lock */
1084 set_current_state(state);
Peter Zijlstra6ffddfb2019-07-18 15:08:53 +02001085 for (;;) {
1086 if (rwsem_try_write_lock(sem, wstate)) {
1087 /* rwsem_try_write_lock() implies ACQUIRE on success */
Waiman Long5dec94d2019-05-20 16:59:03 -04001088 break;
Peter Zijlstra6ffddfb2019-07-18 15:08:53 +02001089 }
Waiman Long4f23dbc2019-05-20 16:59:06 -04001090
Waiman Long5dec94d2019-05-20 16:59:03 -04001091 raw_spin_unlock_irq(&sem->wait_lock);
1092
Waiman Long91d2a812019-06-25 10:39:13 -04001093 /*
1094 * After setting the handoff bit and failing to acquire
1095 * the lock, attempt to spin on owner to accelerate lock
1096 * transfer. If the previous owner is a on-cpu writer and it
1097 * has just released the lock, OWNER_NULL will be returned.
1098 * In this case, we attempt to acquire the lock again
1099 * without sleeping.
1100 */
Yanfei Xu7cdacc52021-10-13 21:41:53 +08001101 if (wstate == WRITER_HANDOFF) {
1102 enum owner_state owner_state;
1103
1104 preempt_disable();
1105 owner_state = rwsem_spin_on_owner(sem);
1106 preempt_enable();
1107
1108 if (owner_state == OWNER_NULL)
1109 goto trylock_again;
1110 }
Waiman Long91d2a812019-06-25 10:39:13 -04001111
Waiman Long5dec94d2019-05-20 16:59:03 -04001112 /* Block until there are no active lockers. */
Waiman Long4f23dbc2019-05-20 16:59:06 -04001113 for (;;) {
Waiman Long5dec94d2019-05-20 16:59:03 -04001114 if (signal_pending_state(state, current))
1115 goto out_nolock;
1116
1117 schedule();
1118 lockevent_inc(rwsem_sleep_writer);
1119 set_current_state(state);
Waiman Long4f23dbc2019-05-20 16:59:06 -04001120 /*
1121 * If HANDOFF bit is set, unconditionally do
1122 * a trylock.
1123 */
1124 if (wstate == WRITER_HANDOFF)
1125 break;
1126
1127 if ((wstate == WRITER_NOT_FIRST) &&
1128 (rwsem_first_waiter(sem) == &waiter))
1129 wstate = WRITER_FIRST;
1130
Waiman Long5dec94d2019-05-20 16:59:03 -04001131 count = atomic_long_read(&sem->count);
Waiman Long4f23dbc2019-05-20 16:59:06 -04001132 if (!(count & RWSEM_LOCK_MASK))
1133 break;
1134
1135 /*
1136 * The setting of the handoff bit is deferred
1137 * until rwsem_try_write_lock() is called.
1138 */
1139 if ((wstate == WRITER_FIRST) && (rt_task(current) ||
1140 time_after(jiffies, waiter.timeout))) {
1141 wstate = WRITER_HANDOFF;
1142 lockevent_inc(rwsem_wlock_handoff);
1143 break;
1144 }
1145 }
Waiman Long91d2a812019-06-25 10:39:13 -04001146trylock_again:
Waiman Long5dec94d2019-05-20 16:59:03 -04001147 raw_spin_lock_irq(&sem->wait_lock);
1148 }
1149 __set_current_state(TASK_RUNNING);
1150 list_del(&waiter.list);
1151 raw_spin_unlock_irq(&sem->wait_lock);
1152 lockevent_inc(rwsem_wlock);
1153
1154 return ret;
1155
1156out_nolock:
1157 __set_current_state(TASK_RUNNING);
1158 raw_spin_lock_irq(&sem->wait_lock);
1159 list_del(&waiter.list);
Waiman Long4f23dbc2019-05-20 16:59:06 -04001160
1161 if (unlikely(wstate == WRITER_HANDOFF))
1162 atomic_long_add(-RWSEM_FLAG_HANDOFF, &sem->count);
1163
Waiman Long5dec94d2019-05-20 16:59:03 -04001164 if (list_empty(&sem->wait_list))
1165 atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
1166 else
Waiman Long6cef7ff62019-05-20 16:59:04 -04001167 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
Waiman Long5dec94d2019-05-20 16:59:03 -04001168 raw_spin_unlock_irq(&sem->wait_lock);
1169 wake_up_q(&wake_q);
1170 lockevent_inc(rwsem_wlock_fail);
1171
1172 return ERR_PTR(-EINTR);
1173}
1174
Waiman Long5dec94d2019-05-20 16:59:03 -04001175/*
1176 * handle waking up a waiter on the semaphore
1177 * - up_read/up_write has decremented the active part of count if we come here
1178 */
xuyehand4e50762021-07-06 12:50:43 +08001179static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
Waiman Long5dec94d2019-05-20 16:59:03 -04001180{
1181 unsigned long flags;
1182 DEFINE_WAKE_Q(wake_q);
1183
1184 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1185
1186 if (!list_empty(&sem->wait_list))
Waiman Long6cef7ff62019-05-20 16:59:04 -04001187 rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
Waiman Long5dec94d2019-05-20 16:59:03 -04001188
1189 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1190 wake_up_q(&wake_q);
1191
1192 return sem;
1193}
Waiman Long5dec94d2019-05-20 16:59:03 -04001194
1195/*
1196 * downgrade a write lock into a read lock
1197 * - caller incremented waiting part of count and discovered it still negative
1198 * - just wake up any readers at the front of the queue
1199 */
Waiman Long6cef7ff62019-05-20 16:59:04 -04001200static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
Waiman Long5dec94d2019-05-20 16:59:03 -04001201{
1202 unsigned long flags;
1203 DEFINE_WAKE_Q(wake_q);
1204
1205 raw_spin_lock_irqsave(&sem->wait_lock, flags);
1206
1207 if (!list_empty(&sem->wait_list))
Waiman Long6cef7ff62019-05-20 16:59:04 -04001208 rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
Waiman Long5dec94d2019-05-20 16:59:03 -04001209
1210 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
1211 wake_up_q(&wake_q);
1212
1213 return sem;
1214}
Waiman Long5dec94d2019-05-20 16:59:03 -04001215
1216/*
1217 * lock for reading
1218 */
Peter Zijlstrac995e632020-12-08 10:27:41 +01001219static inline int __down_read_common(struct rw_semaphore *sem, int state)
Waiman Long5dec94d2019-05-20 16:59:03 -04001220{
Waiman Longc8fe8b02020-11-20 23:14:12 -05001221 long count;
1222
1223 if (!rwsem_read_trylock(sem, &count)) {
1224 if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
Peter Zijlstrac995e632020-12-08 10:27:41 +01001225 return -EINTR;
Waiman Long94a97172019-05-20 16:59:12 -04001226 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
Waiman Long5dec94d2019-05-20 16:59:03 -04001227 }
Peter Zijlstrac995e632020-12-08 10:27:41 +01001228 return 0;
1229}
1230
1231static inline void __down_read(struct rw_semaphore *sem)
1232{
1233 __down_read_common(sem, TASK_UNINTERRUPTIBLE);
Waiman Long5dec94d2019-05-20 16:59:03 -04001234}
1235
Eric W. Biederman31784cf2020-12-03 14:11:13 -06001236static inline int __down_read_interruptible(struct rw_semaphore *sem)
1237{
Peter Zijlstrac995e632020-12-08 10:27:41 +01001238 return __down_read_common(sem, TASK_INTERRUPTIBLE);
Eric W. Biederman31784cf2020-12-03 14:11:13 -06001239}
1240
Waiman Long5dec94d2019-05-20 16:59:03 -04001241static inline int __down_read_killable(struct rw_semaphore *sem)
1242{
Peter Zijlstrac995e632020-12-08 10:27:41 +01001243 return __down_read_common(sem, TASK_KILLABLE);
Waiman Long5dec94d2019-05-20 16:59:03 -04001244}
1245
1246static inline int __down_read_trylock(struct rw_semaphore *sem)
1247{
Davidlohr Buesofce45cd2019-07-28 21:47:35 -07001248 long tmp;
1249
1250 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
1251
Waiman Long5dec94d2019-05-20 16:59:03 -04001252 /*
1253 * Optimize for the case when the rwsem is not locked at all.
1254 */
Davidlohr Buesofce45cd2019-07-28 21:47:35 -07001255 tmp = RWSEM_UNLOCKED_VALUE;
Waiman Long5dec94d2019-05-20 16:59:03 -04001256 do {
1257 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
1258 tmp + RWSEM_READER_BIAS)) {
1259 rwsem_set_reader_owned(sem);
1260 return 1;
1261 }
1262 } while (!(tmp & RWSEM_READ_FAILED_MASK));
1263 return 0;
1264}
1265
1266/*
1267 * lock for writing
1268 */
Peter Zijlstrac995e632020-12-08 10:27:41 +01001269static inline int __down_write_common(struct rw_semaphore *sem, int state)
Waiman Long5dec94d2019-05-20 16:59:03 -04001270{
Peter Zijlstra285c61a2020-12-08 10:25:06 +01001271 if (unlikely(!rwsem_write_trylock(sem))) {
Peter Zijlstrac995e632020-12-08 10:27:41 +01001272 if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
Waiman Long5dec94d2019-05-20 16:59:03 -04001273 return -EINTR;
Waiman Long6cef7ff62019-05-20 16:59:04 -04001274 }
Peter Zijlstra285c61a2020-12-08 10:25:06 +01001275
Waiman Long5dec94d2019-05-20 16:59:03 -04001276 return 0;
1277}
1278
Peter Zijlstrac995e632020-12-08 10:27:41 +01001279static inline void __down_write(struct rw_semaphore *sem)
1280{
1281 __down_write_common(sem, TASK_UNINTERRUPTIBLE);
1282}
1283
1284static inline int __down_write_killable(struct rw_semaphore *sem)
1285{
1286 return __down_write_common(sem, TASK_KILLABLE);
1287}
1288
Waiman Long5dec94d2019-05-20 16:59:03 -04001289static inline int __down_write_trylock(struct rw_semaphore *sem)
1290{
Davidlohr Buesofce45cd2019-07-28 21:47:35 -07001291 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
Peter Zijlstra285c61a2020-12-08 10:25:06 +01001292 return rwsem_write_trylock(sem);
Waiman Long5dec94d2019-05-20 16:59:03 -04001293}
1294
1295/*
1296 * unlock after reading
1297 */
Peter Zijlstra7f264822019-10-30 20:30:41 +01001298static inline void __up_read(struct rw_semaphore *sem)
Waiman Long5dec94d2019-05-20 16:59:03 -04001299{
1300 long tmp;
1301
Davidlohr Buesofce45cd2019-07-28 21:47:35 -07001302 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
Waiman Long94a97172019-05-20 16:59:12 -04001303 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
Davidlohr Buesofce45cd2019-07-28 21:47:35 -07001304
Waiman Long5dec94d2019-05-20 16:59:03 -04001305 rwsem_clear_reader_owned(sem);
1306 tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
Waiman Longa15ea1a2019-05-20 16:59:15 -04001307 DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
Waiman Long6cef7ff62019-05-20 16:59:04 -04001308 if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
Waiman Long7d43f1c2019-05-20 16:59:13 -04001309 RWSEM_FLAG_WAITERS)) {
Waiman Long617f3ef2020-11-20 23:14:16 -05001310 clear_nonspinnable(sem);
xuyehand4e50762021-07-06 12:50:43 +08001311 rwsem_wake(sem);
Waiman Long7d43f1c2019-05-20 16:59:13 -04001312 }
Waiman Long5dec94d2019-05-20 16:59:03 -04001313}
1314
1315/*
1316 * unlock after writing
1317 */
Peter Zijlstra7f264822019-10-30 20:30:41 +01001318static inline void __up_write(struct rw_semaphore *sem)
Waiman Long5dec94d2019-05-20 16:59:03 -04001319{
Waiman Long6cef7ff62019-05-20 16:59:04 -04001320 long tmp;
1321
Davidlohr Buesofce45cd2019-07-28 21:47:35 -07001322 DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
Waiman Long02f10822019-05-20 16:59:10 -04001323 /*
1324 * sem->owner may differ from current if the ownership is transferred
1325 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
1326 */
Waiman Long94a97172019-05-20 16:59:12 -04001327 DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
1328 !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
Davidlohr Buesofce45cd2019-07-28 21:47:35 -07001329
Waiman Long5dec94d2019-05-20 16:59:03 -04001330 rwsem_clear_owner(sem);
Waiman Long6cef7ff62019-05-20 16:59:04 -04001331 tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
1332 if (unlikely(tmp & RWSEM_FLAG_WAITERS))
xuyehand4e50762021-07-06 12:50:43 +08001333 rwsem_wake(sem);
Waiman Long5dec94d2019-05-20 16:59:03 -04001334}
1335
1336/*
1337 * downgrade write lock to read lock
1338 */
1339static inline void __downgrade_write(struct rw_semaphore *sem)
1340{
1341 long tmp;
1342
1343 /*
1344 * When downgrading from exclusive to shared ownership,
1345 * anything inside the write-locked region cannot leak
1346 * into the read side. In contrast, anything in the
1347 * read-locked region is ok to be re-ordered into the
1348 * write side. As such, rely on RELEASE semantics.
1349 */
Waiman Long94a97172019-05-20 16:59:12 -04001350 DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
Waiman Long5dec94d2019-05-20 16:59:03 -04001351 tmp = atomic_long_fetch_add_release(
1352 -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
1353 rwsem_set_reader_owned(sem);
1354 if (tmp & RWSEM_FLAG_WAITERS)
1355 rwsem_downgrade_wake(sem);
1356}
Davidlohr Bueso4fc828e2014-05-02 11:24:15 -07001357
Thomas Gleixner42254102021-08-15 23:28:05 +02001358#else /* !CONFIG_PREEMPT_RT */
1359
Thomas Gleixnere17ba59b2021-08-15 23:28:12 +02001360#define RT_MUTEX_BUILD_MUTEX
Thomas Gleixner42254102021-08-15 23:28:05 +02001361#include "rtmutex.c"
1362
1363#define rwbase_set_and_save_current_state(state) \
1364 set_current_state(state)
1365
1366#define rwbase_restore_current_state() \
1367 __set_current_state(TASK_RUNNING)
1368
1369#define rwbase_rtmutex_lock_state(rtm, state) \
1370 __rt_mutex_lock(rtm, state)
1371
1372#define rwbase_rtmutex_slowlock_locked(rtm, state) \
Peter Zijlstraadd46132021-08-15 23:28:58 +02001373 __rt_mutex_slowlock_locked(rtm, NULL, state)
Thomas Gleixner42254102021-08-15 23:28:05 +02001374
1375#define rwbase_rtmutex_unlock(rtm) \
1376 __rt_mutex_unlock(rtm)
1377
1378#define rwbase_rtmutex_trylock(rtm) \
1379 __rt_mutex_trylock(rtm)
1380
1381#define rwbase_signal_pending_state(state, current) \
1382 signal_pending_state(state, current)
1383
1384#define rwbase_schedule() \
1385 schedule()
1386
1387#include "rwbase_rt.c"
1388
Mike Galbraith15eb7c82021-08-31 08:38:19 +02001389void __init_rwsem(struct rw_semaphore *sem, const char *name,
Thomas Gleixner42254102021-08-15 23:28:05 +02001390 struct lock_class_key *key)
1391{
Mike Galbraith15eb7c82021-08-31 08:38:19 +02001392 init_rwbase_rt(&(sem)->rwbase);
1393
1394#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixner42254102021-08-15 23:28:05 +02001395 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
1396 lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
Thomas Gleixner42254102021-08-15 23:28:05 +02001397#endif
Mike Galbraith15eb7c82021-08-31 08:38:19 +02001398}
1399EXPORT_SYMBOL(__init_rwsem);
Thomas Gleixner42254102021-08-15 23:28:05 +02001400
1401static inline void __down_read(struct rw_semaphore *sem)
1402{
1403 rwbase_read_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1404}
1405
1406static inline int __down_read_interruptible(struct rw_semaphore *sem)
1407{
1408 return rwbase_read_lock(&sem->rwbase, TASK_INTERRUPTIBLE);
1409}
1410
1411static inline int __down_read_killable(struct rw_semaphore *sem)
1412{
1413 return rwbase_read_lock(&sem->rwbase, TASK_KILLABLE);
1414}
1415
1416static inline int __down_read_trylock(struct rw_semaphore *sem)
1417{
1418 return rwbase_read_trylock(&sem->rwbase);
1419}
1420
1421static inline void __up_read(struct rw_semaphore *sem)
1422{
1423 rwbase_read_unlock(&sem->rwbase, TASK_NORMAL);
1424}
1425
1426static inline void __sched __down_write(struct rw_semaphore *sem)
1427{
1428 rwbase_write_lock(&sem->rwbase, TASK_UNINTERRUPTIBLE);
1429}
1430
1431static inline int __sched __down_write_killable(struct rw_semaphore *sem)
1432{
1433 return rwbase_write_lock(&sem->rwbase, TASK_KILLABLE);
1434}
1435
1436static inline int __down_write_trylock(struct rw_semaphore *sem)
1437{
1438 return rwbase_write_trylock(&sem->rwbase);
1439}
1440
1441static inline void __up_write(struct rw_semaphore *sem)
1442{
1443 rwbase_write_unlock(&sem->rwbase);
1444}
1445
1446static inline void __downgrade_write(struct rw_semaphore *sem)
1447{
1448 rwbase_write_downgrade(&sem->rwbase);
1449}
1450
1451/* Debug stubs for the common API */
1452#define DEBUG_RWSEMS_WARN_ON(c, sem)
1453
1454static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
1455 struct task_struct *owner)
1456{
1457}
1458
1459static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
1460{
1461 int count = atomic_read(&sem->rwbase.readers);
1462
1463 return count < 0 && count != READER_BIAS;
1464}
1465
1466#endif /* CONFIG_PREEMPT_RT */
1467
Ingo Molnarc4e05112006-07-03 00:24:29 -07001468/*
1469 * lock for reading
1470 */
Livio Soaresc7af77b2007-12-18 15:21:13 +01001471void __sched down_read(struct rw_semaphore *sem)
Ingo Molnarc4e05112006-07-03 00:24:29 -07001472{
1473 might_sleep();
1474 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1475
Peter Zijlstra4fe87742007-07-19 01:48:58 -07001476 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
Ingo Molnarc4e05112006-07-03 00:24:29 -07001477}
Ingo Molnarc4e05112006-07-03 00:24:29 -07001478EXPORT_SYMBOL(down_read);
1479
Eric W. Biederman31784cf2020-12-03 14:11:13 -06001480int __sched down_read_interruptible(struct rw_semaphore *sem)
1481{
1482 might_sleep();
1483 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1484
1485 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
1486 rwsem_release(&sem->dep_map, _RET_IP_);
1487 return -EINTR;
1488 }
1489
1490 return 0;
1491}
1492EXPORT_SYMBOL(down_read_interruptible);
1493
Kirill Tkhai76f85072017-09-29 19:06:38 +03001494int __sched down_read_killable(struct rw_semaphore *sem)
1495{
1496 might_sleep();
1497 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
1498
1499 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
Qian Cai5facae42019-09-19 12:09:40 -04001500 rwsem_release(&sem->dep_map, _RET_IP_);
Kirill Tkhai76f85072017-09-29 19:06:38 +03001501 return -EINTR;
1502 }
1503
Kirill Tkhai76f85072017-09-29 19:06:38 +03001504 return 0;
1505}
Kirill Tkhai76f85072017-09-29 19:06:38 +03001506EXPORT_SYMBOL(down_read_killable);
1507
Ingo Molnarc4e05112006-07-03 00:24:29 -07001508/*
1509 * trylock for reading -- returns 1 if successful, 0 if contention
1510 */
1511int down_read_trylock(struct rw_semaphore *sem)
1512{
1513 int ret = __down_read_trylock(sem);
1514
Waiman Longc7580c12019-04-04 13:43:11 -04001515 if (ret == 1)
Ingo Molnarc4e05112006-07-03 00:24:29 -07001516 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
1517 return ret;
1518}
Ingo Molnarc4e05112006-07-03 00:24:29 -07001519EXPORT_SYMBOL(down_read_trylock);
1520
1521/*
1522 * lock for writing
1523 */
Livio Soaresc7af77b2007-12-18 15:21:13 +01001524void __sched down_write(struct rw_semaphore *sem)
Ingo Molnarc4e05112006-07-03 00:24:29 -07001525{
1526 might_sleep();
1527 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
Peter Zijlstra4fe87742007-07-19 01:48:58 -07001528 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
Ingo Molnarc4e05112006-07-03 00:24:29 -07001529}
Ingo Molnarc4e05112006-07-03 00:24:29 -07001530EXPORT_SYMBOL(down_write);
1531
1532/*
Michal Hocko916633a2016-04-07 17:12:31 +02001533 * lock for writing
1534 */
1535int __sched down_write_killable(struct rw_semaphore *sem)
1536{
1537 might_sleep();
1538 rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
1539
Waiman Long6cef7ff62019-05-20 16:59:04 -04001540 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1541 __down_write_killable)) {
Qian Cai5facae42019-09-19 12:09:40 -04001542 rwsem_release(&sem->dep_map, _RET_IP_);
Michal Hocko916633a2016-04-07 17:12:31 +02001543 return -EINTR;
1544 }
1545
Michal Hocko916633a2016-04-07 17:12:31 +02001546 return 0;
1547}
Michal Hocko916633a2016-04-07 17:12:31 +02001548EXPORT_SYMBOL(down_write_killable);
1549
1550/*
Ingo Molnarc4e05112006-07-03 00:24:29 -07001551 * trylock for writing -- returns 1 if successful, 0 if contention
1552 */
1553int down_write_trylock(struct rw_semaphore *sem)
1554{
1555 int ret = __down_write_trylock(sem);
1556
Waiman Longc7580c12019-04-04 13:43:11 -04001557 if (ret == 1)
Pavel Emelianov428e6ce2007-05-08 00:29:10 -07001558 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
Davidlohr Bueso4fc828e2014-05-02 11:24:15 -07001559
Ingo Molnarc4e05112006-07-03 00:24:29 -07001560 return ret;
1561}
Ingo Molnarc4e05112006-07-03 00:24:29 -07001562EXPORT_SYMBOL(down_write_trylock);
1563
1564/*
1565 * release a read lock
1566 */
1567void up_read(struct rw_semaphore *sem)
1568{
Qian Cai5facae42019-09-19 12:09:40 -04001569 rwsem_release(&sem->dep_map, _RET_IP_);
Ingo Molnarc4e05112006-07-03 00:24:29 -07001570 __up_read(sem);
1571}
Ingo Molnarc4e05112006-07-03 00:24:29 -07001572EXPORT_SYMBOL(up_read);
1573
1574/*
1575 * release a write lock
1576 */
1577void up_write(struct rw_semaphore *sem)
1578{
Qian Cai5facae42019-09-19 12:09:40 -04001579 rwsem_release(&sem->dep_map, _RET_IP_);
Ingo Molnarc4e05112006-07-03 00:24:29 -07001580 __up_write(sem);
1581}
Ingo Molnarc4e05112006-07-03 00:24:29 -07001582EXPORT_SYMBOL(up_write);
1583
1584/*
1585 * downgrade write lock to read lock
1586 */
1587void downgrade_write(struct rw_semaphore *sem)
1588{
J. R. Okajima6419c4a2017-02-03 01:38:17 +09001589 lock_downgrade(&sem->dep_map, _RET_IP_);
Ingo Molnarc4e05112006-07-03 00:24:29 -07001590 __downgrade_write(sem);
1591}
Ingo Molnarc4e05112006-07-03 00:24:29 -07001592EXPORT_SYMBOL(downgrade_write);
Ingo Molnar4ea21762006-07-03 00:24:53 -07001593
1594#ifdef CONFIG_DEBUG_LOCK_ALLOC
1595
1596void down_read_nested(struct rw_semaphore *sem, int subclass)
1597{
1598 might_sleep();
1599 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
Peter Zijlstra4fe87742007-07-19 01:48:58 -07001600 LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
Ingo Molnar4ea21762006-07-03 00:24:53 -07001601}
Ingo Molnar4ea21762006-07-03 00:24:53 -07001602EXPORT_SYMBOL(down_read_nested);
1603
Eric W. Biederman0f9368b2020-12-03 14:10:32 -06001604int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
1605{
1606 might_sleep();
1607 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
1608
1609 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
1610 rwsem_release(&sem->dep_map, _RET_IP_);
1611 return -EINTR;
1612 }
1613
1614 return 0;
1615}
1616EXPORT_SYMBOL(down_read_killable_nested);
1617
Jiri Kosina1b963c82013-01-11 14:31:56 -08001618void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
1619{
1620 might_sleep();
1621 rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
Jiri Kosina1b963c82013-01-11 14:31:56 -08001622 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
1623}
Jiri Kosina1b963c82013-01-11 14:31:56 -08001624EXPORT_SYMBOL(_down_write_nest_lock);
1625
Kent Overstreet84759c62011-09-21 21:43:05 -07001626void down_read_non_owner(struct rw_semaphore *sem)
1627{
1628 might_sleep();
Kent Overstreet84759c62011-09-21 21:43:05 -07001629 __down_read(sem);
Waiman Long925b9cd2018-09-06 16:18:34 -04001630 __rwsem_set_reader_owned(sem, NULL);
Kent Overstreet84759c62011-09-21 21:43:05 -07001631}
Kent Overstreet84759c62011-09-21 21:43:05 -07001632EXPORT_SYMBOL(down_read_non_owner);
1633
Ingo Molnar4ea21762006-07-03 00:24:53 -07001634void down_write_nested(struct rw_semaphore *sem, int subclass)
1635{
1636 might_sleep();
1637 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
Peter Zijlstra4fe87742007-07-19 01:48:58 -07001638 LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
Ingo Molnar4ea21762006-07-03 00:24:53 -07001639}
Ingo Molnar4ea21762006-07-03 00:24:53 -07001640EXPORT_SYMBOL(down_write_nested);
1641
Al Viro887bddf2016-05-26 00:04:58 -04001642int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
1643{
1644 might_sleep();
1645 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
1646
Waiman Long6cef7ff62019-05-20 16:59:04 -04001647 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
1648 __down_write_killable)) {
Qian Cai5facae42019-09-19 12:09:40 -04001649 rwsem_release(&sem->dep_map, _RET_IP_);
Al Viro887bddf2016-05-26 00:04:58 -04001650 return -EINTR;
1651 }
1652
Al Viro887bddf2016-05-26 00:04:58 -04001653 return 0;
1654}
Al Viro887bddf2016-05-26 00:04:58 -04001655EXPORT_SYMBOL(down_write_killable_nested);
1656
Kent Overstreet84759c62011-09-21 21:43:05 -07001657void up_read_non_owner(struct rw_semaphore *sem)
1658{
Waiman Long94a97172019-05-20 16:59:12 -04001659 DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
Kent Overstreet84759c62011-09-21 21:43:05 -07001660 __up_read(sem);
1661}
Kent Overstreet84759c62011-09-21 21:43:05 -07001662EXPORT_SYMBOL(up_read_non_owner);
1663
Ingo Molnar4ea21762006-07-03 00:24:53 -07001664#endif