Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Queue read/write lock |
| 4 | * |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 5 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. |
| 6 | * |
| 7 | * Authors: Waiman Long <waiman.long@hp.com> |
| 8 | */ |
| 9 | #ifndef __ASM_GENERIC_QRWLOCK_H |
| 10 | #define __ASM_GENERIC_QRWLOCK_H |
| 11 | |
| 12 | #include <linux/atomic.h> |
| 13 | #include <asm/barrier.h> |
| 14 | #include <asm/processor.h> |
| 15 | |
| 16 | #include <asm-generic/qrwlock_types.h> |
Waiman Long | d8d0da4 | 2021-02-10 13:16:31 -0500 | [diff] [blame] | 17 | |
| 18 | /* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */ |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 19 | |
| 20 | /* |
pan xinhui | 2db34e8 | 2016-07-18 17:47:39 +0800 | [diff] [blame] | 21 | * Writer states & reader shift and bias. |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 22 | */ |
Will Deacon | d133166 | 2017-10-12 13:20:51 +0100 | [diff] [blame] | 23 | #define _QW_WAITING 0x100 /* A writer is waiting */ |
| 24 | #define _QW_LOCKED 0x0ff /* A writer holds the lock */ |
| 25 | #define _QW_WMASK 0x1ff /* Writer mask */ |
| 26 | #define _QR_SHIFT 9 /* Reader count shift */ |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 27 | #define _QR_BIAS (1U << _QR_SHIFT) |
| 28 | |
| 29 | /* |
| 30 | * External function declarations |
| 31 | */ |
Will Deacon | b519b56 | 2017-10-12 13:20:49 +0100 | [diff] [blame] | 32 | extern void queued_read_lock_slowpath(struct qrwlock *lock); |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 33 | extern void queued_write_lock_slowpath(struct qrwlock *lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 34 | |
| 35 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 36 | * queued_read_trylock - try to acquire read lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 37 | * @lock : Pointer to queue rwlock structure |
| 38 | * Return: 1 if lock acquired, 0 if failed |
| 39 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 40 | static inline int queued_read_trylock(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 41 | { |
Arnd Bergmann | f44ca08 | 2020-10-19 09:09:21 +0200 | [diff] [blame] | 42 | int cnts; |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 43 | |
| 44 | cnts = atomic_read(&lock->cnts); |
| 45 | if (likely(!(cnts & _QW_WMASK))) { |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 46 | cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 47 | if (likely(!(cnts & _QW_WMASK))) |
| 48 | return 1; |
| 49 | atomic_sub(_QR_BIAS, &lock->cnts); |
| 50 | } |
| 51 | return 0; |
| 52 | } |
| 53 | |
| 54 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 55 | * queued_write_trylock - try to acquire write lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 56 | * @lock : Pointer to queue rwlock structure |
| 57 | * Return: 1 if lock acquired, 0 if failed |
| 58 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 59 | static inline int queued_write_trylock(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 60 | { |
Arnd Bergmann | f44ca08 | 2020-10-19 09:09:21 +0200 | [diff] [blame] | 61 | int cnts; |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 62 | |
| 63 | cnts = atomic_read(&lock->cnts); |
| 64 | if (unlikely(cnts)) |
| 65 | return 0; |
| 66 | |
Matthew Wilcox | 27df896 | 2018-08-20 10:19:14 -0400 | [diff] [blame] | 67 | return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, |
| 68 | _QW_LOCKED)); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 69 | } |
| 70 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 71 | * queued_read_lock - acquire read lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 72 | * @lock: Pointer to queue rwlock structure |
| 73 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 74 | static inline void queued_read_lock(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 75 | { |
Arnd Bergmann | f44ca08 | 2020-10-19 09:09:21 +0200 | [diff] [blame] | 76 | int cnts; |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 77 | |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 78 | cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 79 | if (likely(!(cnts & _QW_WMASK))) |
| 80 | return; |
| 81 | |
| 82 | /* The slowpath will decrement the reader count, if necessary. */ |
Will Deacon | b519b56 | 2017-10-12 13:20:49 +0100 | [diff] [blame] | 83 | queued_read_lock_slowpath(lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 87 | * queued_write_lock - acquire write lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 88 | * @lock : Pointer to queue rwlock structure |
| 89 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 90 | static inline void queued_write_lock(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 91 | { |
Arnd Bergmann | f44ca08 | 2020-10-19 09:09:21 +0200 | [diff] [blame] | 92 | int cnts = 0; |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 93 | /* Optimize for the unfair lock case where the fair flag is 0. */ |
Matthew Wilcox | 27df896 | 2018-08-20 10:19:14 -0400 | [diff] [blame] | 94 | if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 95 | return; |
| 96 | |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 97 | queued_write_lock_slowpath(lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 101 | * queued_read_unlock - release read lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 102 | * @lock : Pointer to queue rwlock structure |
| 103 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 104 | static inline void queued_read_unlock(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 105 | { |
| 106 | /* |
| 107 | * Atomically decrement the reader count |
| 108 | */ |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 109 | (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 110 | } |
| 111 | |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 112 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 113 | * queued_write_unlock - release write lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 114 | * @lock : Pointer to queue rwlock structure |
| 115 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 116 | static inline void queued_write_unlock(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 117 | { |
Will Deacon | d133166 | 2017-10-12 13:20:51 +0100 | [diff] [blame] | 118 | smp_store_release(&lock->wlocked, 0); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 119 | } |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 120 | |
Ben Gardon | 26128cb | 2021-02-02 10:57:12 -0800 | [diff] [blame] | 121 | /** |
| 122 | * queued_rwlock_is_contended - check if the lock is contended |
| 123 | * @lock : Pointer to queue rwlock structure |
| 124 | * Return: 1 if lock contended, 0 otherwise |
| 125 | */ |
| 126 | static inline int queued_rwlock_is_contended(struct qrwlock *lock) |
| 127 | { |
| 128 | return arch_spin_is_locked(&lock->wait_lock); |
| 129 | } |
| 130 | |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 131 | /* |
| 132 | * Remapping rwlock architecture specific functions to the corresponding |
| 133 | * queue rwlock functions. |
| 134 | */ |
Ben Gardon | 26128cb | 2021-02-02 10:57:12 -0800 | [diff] [blame] | 135 | #define arch_read_lock(l) queued_read_lock(l) |
| 136 | #define arch_write_lock(l) queued_write_lock(l) |
| 137 | #define arch_read_trylock(l) queued_read_trylock(l) |
| 138 | #define arch_write_trylock(l) queued_write_trylock(l) |
| 139 | #define arch_read_unlock(l) queued_read_unlock(l) |
| 140 | #define arch_write_unlock(l) queued_write_unlock(l) |
| 141 | #define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 142 | |
| 143 | #endif /* __ASM_GENERIC_QRWLOCK_H */ |