Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* rwsem.h: R/W semaphores, public interface |
| 3 | * |
| 4 | * Written by David Howells (dhowells@redhat.com). |
| 5 | * Derived from asm-i386/semaphore.h |
| 6 | */ |
| 7 | |
| 8 | #ifndef _LINUX_RWSEM_H |
| 9 | #define _LINUX_RWSEM_H |
| 10 | |
| 11 | #include <linux/linkage.h> |
| 12 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/types.h> |
| 14 | #include <linux/kernel.h> |
Thomas Gleixner | c16a87c | 2011-01-26 20:05:50 +0000 | [diff] [blame] | 15 | #include <linux/list.h> |
| 16 | #include <linux/spinlock.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 17 | #include <linux/atomic.h> |
Michal Hocko | d4799608 | 2016-04-07 17:12:26 +0200 | [diff] [blame] | 18 | #include <linux/err.h> |
Davidlohr Bueso | 5db6c6f | 2014-07-11 14:00:06 -0700 | [diff] [blame] | 19 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
Jason Low | 9063182 | 2014-07-14 10:27:49 -0700 | [diff] [blame] | 20 | #include <linux/osq_lock.h> |
Davidlohr Bueso | 5db6c6f | 2014-07-11 14:00:06 -0700 | [diff] [blame] | 21 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Waiman Long | 364f784 | 2019-04-04 13:43:20 -0400 | [diff] [blame] | 23 | /* |
| 24 | * For an uncontended rwsem, count and owner are the only fields a task |
| 25 | * needs to touch when acquiring the rwsem. So they are put next to each |
| 26 | * other to increase the chance that they will share the same cacheline. |
| 27 | * |
| 28 | * In a contended rwsem, the owner is likely the most frequently accessed |
| 29 | * field in the structure as the optimistic waiter that holds the osq lock |
| 30 | * will spin on owner. For an embedded rwsem, other hot fields in the |
| 31 | * containing structure should be moved further away from the rwsem to |
| 32 | * reduce the chance that they will share the same cacheline causing |
| 33 | * cacheline bouncing problem. |
| 34 | */ |
Thomas Gleixner | 1c8ed64 | 2011-01-26 20:05:56 +0000 | [diff] [blame] | 35 | struct rw_semaphore { |
Jason Low | 8ee62b1 | 2016-06-03 22:26:02 -0700 | [diff] [blame] | 36 | atomic_long_t count; |
Davidlohr Bueso | 4fc828e | 2014-05-02 11:24:15 -0700 | [diff] [blame] | 37 | /* |
Waiman Long | 94a9717 | 2019-05-20 16:59:12 -0400 | [diff] [blame] | 38 | * Write owner or one of the read owners as well flags regarding |
| 39 | * the current state of the rwsem. Can be used as a speculative |
| 40 | * check to see if the write owner is running on the cpu. |
Davidlohr Bueso | 4fc828e | 2014-05-02 11:24:15 -0700 | [diff] [blame] | 41 | */ |
Waiman Long | 94a9717 | 2019-05-20 16:59:12 -0400 | [diff] [blame] | 42 | atomic_long_t owner; |
Waiman Long | c71fd89 | 2019-05-20 16:59:00 -0400 | [diff] [blame] | 43 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
Waiman Long | 364f784 | 2019-04-04 13:43:20 -0400 | [diff] [blame] | 44 | struct optimistic_spin_queue osq; /* spinner MCS lock */ |
Davidlohr Bueso | 4fc828e | 2014-05-02 11:24:15 -0700 | [diff] [blame] | 45 | #endif |
Waiman Long | 364f784 | 2019-04-04 13:43:20 -0400 | [diff] [blame] | 46 | raw_spinlock_t wait_lock; |
| 47 | struct list_head wait_list; |
Davidlohr Bueso | fce45cd | 2019-07-28 21:47:35 -0700 | [diff] [blame] | 48 | #ifdef CONFIG_DEBUG_RWSEMS |
| 49 | void *magic; |
| 50 | #endif |
Thomas Gleixner | 1c8ed64 | 2011-01-26 20:05:56 +0000 | [diff] [blame] | 51 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 52 | struct lockdep_map dep_map; |
| 53 | #endif |
| 54 | }; |
| 55 | |
Thomas Gleixner | 41e5887 | 2011-01-26 20:06:03 +0000 | [diff] [blame] | 56 | /* In all implementations count != 0 means locked */ |
| 57 | static inline int rwsem_is_locked(struct rw_semaphore *sem) |
| 58 | { |
Jason Low | 8ee62b1 | 2016-06-03 22:26:02 -0700 | [diff] [blame] | 59 | return atomic_long_read(&sem->count) != 0; |
Thomas Gleixner | 41e5887 | 2011-01-26 20:06:03 +0000 | [diff] [blame] | 60 | } |
| 61 | |
Waiman Long | 46ad084 | 2019-03-22 10:30:06 -0400 | [diff] [blame] | 62 | #define RWSEM_UNLOCKED_VALUE 0L |
Jason Low | 8ee62b1 | 2016-06-03 22:26:02 -0700 | [diff] [blame] | 63 | #define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Thomas Gleixner | 12249b3 | 2011-01-26 20:06:00 +0000 | [diff] [blame] | 65 | /* Common initializer macros and functions */ |
| 66 | |
| 67 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 68 | # define __RWSEM_DEP_MAP_INIT(lockname) \ |
| 69 | , .dep_map = { \ |
| 70 | .name = #lockname, \ |
| 71 | .wait_type_inner = LD_WAIT_SLEEP, \ |
| 72 | } |
Thomas Gleixner | 12249b3 | 2011-01-26 20:06:00 +0000 | [diff] [blame] | 73 | #else |
| 74 | # define __RWSEM_DEP_MAP_INIT(lockname) |
| 75 | #endif |
| 76 | |
Davidlohr Bueso | fce45cd | 2019-07-28 21:47:35 -0700 | [diff] [blame] | 77 | #ifdef CONFIG_DEBUG_RWSEMS |
| 78 | # define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname |
| 79 | #else |
| 80 | # define __DEBUG_RWSEM_INITIALIZER(lockname) |
| 81 | #endif |
| 82 | |
Davidlohr Bueso | 5db6c6f | 2014-07-11 14:00:06 -0700 | [diff] [blame] | 83 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
Waiman Long | c71fd89 | 2019-05-20 16:59:00 -0400 | [diff] [blame] | 84 | #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED |
Davidlohr Bueso | 4fc828e | 2014-05-02 11:24:15 -0700 | [diff] [blame] | 85 | #else |
Jason Low | ce069fc | 2014-07-14 10:27:52 -0700 | [diff] [blame] | 86 | #define __RWSEM_OPT_INIT(lockname) |
Davidlohr Bueso | 4fc828e | 2014-05-02 11:24:15 -0700 | [diff] [blame] | 87 | #endif |
Thomas Gleixner | 12249b3 | 2011-01-26 20:06:00 +0000 | [diff] [blame] | 88 | |
Jason Low | ce069fc | 2014-07-14 10:27:52 -0700 | [diff] [blame] | 89 | #define __RWSEM_INITIALIZER(name) \ |
Jason Low | 8ee62b1 | 2016-06-03 22:26:02 -0700 | [diff] [blame] | 90 | { __RWSEM_INIT_COUNT(name), \ |
Waiman Long | 94a9717 | 2019-05-20 16:59:12 -0400 | [diff] [blame] | 91 | .owner = ATOMIC_LONG_INIT(0), \ |
Jason Low | ce069fc | 2014-07-14 10:27:52 -0700 | [diff] [blame] | 92 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
| 93 | .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ |
| 94 | __RWSEM_OPT_INIT(name) \ |
Davidlohr Bueso | fce45cd | 2019-07-28 21:47:35 -0700 | [diff] [blame] | 95 | __DEBUG_RWSEM_INITIALIZER(name) \ |
Jason Low | ce069fc | 2014-07-14 10:27:52 -0700 | [diff] [blame] | 96 | __RWSEM_DEP_MAP_INIT(name) } |
| 97 | |
Thomas Gleixner | 12249b3 | 2011-01-26 20:06:00 +0000 | [diff] [blame] | 98 | #define DECLARE_RWSEM(name) \ |
| 99 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
| 100 | |
| 101 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, |
| 102 | struct lock_class_key *key); |
| 103 | |
| 104 | #define init_rwsem(sem) \ |
| 105 | do { \ |
| 106 | static struct lock_class_key __key; \ |
| 107 | \ |
| 108 | __init_rwsem((sem), #sem, &__key); \ |
| 109 | } while (0) |
| 110 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | /* |
Josef Bacik | 4a444b1 | 2013-08-30 10:05:22 -0400 | [diff] [blame] | 112 | * This is the same regardless of which rwsem implementation that is being used. |
| 113 | * It is just a heuristic meant to be called by somebody alreadying holding the |
| 114 | * rwsem to see if somebody from an incompatible type is wanting access to the |
| 115 | * lock. |
| 116 | */ |
| 117 | static inline int rwsem_is_contended(struct rw_semaphore *sem) |
| 118 | { |
| 119 | return !list_empty(&sem->wait_list); |
| 120 | } |
| 121 | |
| 122 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | * lock for reading |
| 124 | */ |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 125 | extern void down_read(struct rw_semaphore *sem); |
Kirill Tkhai | 76f8507 | 2017-09-29 19:06:38 +0300 | [diff] [blame] | 126 | extern int __must_check down_read_killable(struct rw_semaphore *sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
| 128 | /* |
| 129 | * trylock for reading -- returns 1 if successful, 0 if contention |
| 130 | */ |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 131 | extern int down_read_trylock(struct rw_semaphore *sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
| 133 | /* |
| 134 | * lock for writing |
| 135 | */ |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 136 | extern void down_write(struct rw_semaphore *sem); |
Michal Hocko | 916633a | 2016-04-07 17:12:31 +0200 | [diff] [blame] | 137 | extern int __must_check down_write_killable(struct rw_semaphore *sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | |
| 139 | /* |
| 140 | * trylock for writing -- returns 1 if successful, 0 if contention |
| 141 | */ |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 142 | extern int down_write_trylock(struct rw_semaphore *sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
| 144 | /* |
| 145 | * release a read lock |
| 146 | */ |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 147 | extern void up_read(struct rw_semaphore *sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | |
| 149 | /* |
| 150 | * release a write lock |
| 151 | */ |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 152 | extern void up_write(struct rw_semaphore *sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | |
| 154 | /* |
| 155 | * downgrade write lock to read lock |
| 156 | */ |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 157 | extern void downgrade_write(struct rw_semaphore *sem); |
| 158 | |
| 159 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 160 | /* |
Ingo Molnar | 5fca80e | 2006-07-10 04:44:02 -0700 | [diff] [blame] | 161 | * nested locking. NOTE: rwsems are not allowed to recurse |
| 162 | * (which occurs if the same task tries to acquire the same |
| 163 | * lock instance multiple times), but multiple locks of the |
| 164 | * same lock class might be taken, if the order of the locks |
| 165 | * is always the same. This ordering rule can be expressed |
| 166 | * to lockdep via the _nested() APIs, but enumerating the |
| 167 | * subclasses that are used. (If the nesting relationship is |
| 168 | * static then another method for expressing nested locking is |
| 169 | * the explicit definition of lock class keys and the use of |
| 170 | * lockdep_set_class() at lock initialization time. |
Mauro Carvalho Chehab | 387b146 | 2019-04-10 08:32:41 -0300 | [diff] [blame] | 171 | * See Documentation/locking/lockdep-design.rst for more details.) |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 172 | */ |
| 173 | extern void down_read_nested(struct rw_semaphore *sem, int subclass); |
| 174 | extern void down_write_nested(struct rw_semaphore *sem, int subclass); |
Al Viro | 887bddf | 2016-05-26 00:04:58 -0400 | [diff] [blame] | 175 | extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); |
Jiri Kosina | 1b963c8 | 2013-01-11 14:31:56 -0800 | [diff] [blame] | 176 | extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); |
| 177 | |
| 178 | # define down_write_nest_lock(sem, nest_lock) \ |
| 179 | do { \ |
| 180 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ |
| 181 | _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ |
| 182 | } while (0); |
| 183 | |
Kent Overstreet | 84759c6 | 2011-09-21 21:43:05 -0700 | [diff] [blame] | 184 | /* |
| 185 | * Take/release a lock when not the owner will release it. |
| 186 | * |
| 187 | * [ This API should be avoided as much as possible - the |
| 188 | * proper abstraction for this case is completions. ] |
| 189 | */ |
| 190 | extern void down_read_non_owner(struct rw_semaphore *sem); |
| 191 | extern void up_read_non_owner(struct rw_semaphore *sem); |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 192 | #else |
| 193 | # define down_read_nested(sem, subclass) down_read(sem) |
Jiri Kosina | e65b9ad | 2013-01-15 20:12:37 +0100 | [diff] [blame] | 194 | # define down_write_nest_lock(sem, nest_lock) down_write(sem) |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 195 | # define down_write_nested(sem, subclass) down_write(sem) |
Al Viro | 887bddf | 2016-05-26 00:04:58 -0400 | [diff] [blame] | 196 | # define down_write_killable_nested(sem, subclass) down_write_killable(sem) |
Kent Overstreet | 84759c6 | 2011-09-21 21:43:05 -0700 | [diff] [blame] | 197 | # define down_read_non_owner(sem) down_read(sem) |
| 198 | # define up_read_non_owner(sem) up_read(sem) |
Ingo Molnar | 4ea2176 | 2006-07-03 00:24:53 -0700 | [diff] [blame] | 199 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | #endif /* _LINUX_RWSEM_H */ |