blob: ce8291053af30a18566fc73b65629f4547295fb5 [file] [log] [blame]
Greg Kroah-Hartmane3b3d0f2017-11-06 18:11:51 +01001// SPDX-License-Identifier: GPL-2.0
Peter Hurley4898e6402013-04-16 06:15:50 -04002/*
3 * Ldisc rw semaphore
4 *
5 * The ldisc semaphore is semantically a rw_semaphore but which enforces
6 * an alternate policy, namely:
7 * 1) Supports lock wait timeouts
8 * 2) Write waiter has priority
9 * 3) Downgrading is not supported
10 *
11 * Implementation notes:
12 * 1) Upper half of semaphore count is a wait count (differs from rwsem
13 * in that rwsem normalizes the upper half to the wait bias)
14 * 2) Lacks overflow checking
15 *
16 * The generic counting was copied and modified from include/asm-generic/rwsem.h
17 * by Paul Mackerras <paulus@samba.org>.
18 *
19 * The scheduling policy was copied and modified from lib/rwsem.c
20 * Written by David Howells (dhowells@redhat.com).
21 *
22 * This implementation incorporates the write lock stealing work of
23 * Michel Lespinasse <walken@google.com>.
24 *
25 * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
Peter Hurley4898e6402013-04-16 06:15:50 -040026 */
27
28#include <linux/list.h>
29#include <linux/spinlock.h>
30#include <linux/atomic.h>
31#include <linux/tty.h>
32#include <linux/sched.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010033#include <linux/sched/debug.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010034#include <linux/sched/task.h>
Peter Hurley4898e6402013-04-16 06:15:50 -040035
36
Peter Hurley4898e6402013-04-16 06:15:50 -040037#if BITS_PER_LONG == 64
38# define LDSEM_ACTIVE_MASK 0xffffffffL
39#else
40# define LDSEM_ACTIVE_MASK 0x0000ffffL
41#endif
42
43#define LDSEM_UNLOCKED 0L
44#define LDSEM_ACTIVE_BIAS 1L
45#define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1)
46#define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS
47#define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
48
49struct ldsem_waiter {
50 struct list_head list;
51 struct task_struct *task;
52};
53
Peter Hurley4898e6402013-04-16 06:15:50 -040054/*
55 * Initialize an ldsem:
56 */
57void __init_ldsem(struct ld_semaphore *sem, const char *name,
58 struct lock_class_key *key)
59{
60#ifdef CONFIG_DEBUG_LOCK_ALLOC
61 /*
62 * Make sure we are not reinitializing a held semaphore:
63 */
64 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
65 lockdep_init_map(&sem->dep_map, name, key, 0);
66#endif
Peter Zijlstra5fd691a2018-06-05 16:53:34 +020067 atomic_long_set(&sem->count, LDSEM_UNLOCKED);
Peter Hurley4898e6402013-04-16 06:15:50 -040068 sem->wait_readers = 0;
69 raw_spin_lock_init(&sem->wait_lock);
70 INIT_LIST_HEAD(&sem->read_wait);
71 INIT_LIST_HEAD(&sem->write_wait);
72}
73
74static void __ldsem_wake_readers(struct ld_semaphore *sem)
75{
76 struct ldsem_waiter *waiter, *next;
77 struct task_struct *tsk;
78 long adjust, count;
79
Peter Zijlstra5fd691a2018-06-05 16:53:34 +020080 /*
81 * Try to grant read locks to all readers on the read wait list.
Peter Hurley4898e6402013-04-16 06:15:50 -040082 * Note the 'active part' of the count is incremented by
83 * the number of readers before waking any processes up.
84 */
85 adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
Peter Zijlstra5fd691a2018-06-05 16:53:34 +020086 count = atomic_long_add_return(adjust, &sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -040087 do {
88 if (count > 0)
89 break;
Peter Zijlstra5fd691a2018-06-05 16:53:34 +020090 if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
Peter Hurley4898e6402013-04-16 06:15:50 -040091 return;
92 } while (1);
93
94 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
95 tsk = waiter->task;
Peter Zijlstra952041a2019-07-18 15:03:15 +020096 smp_store_release(&waiter->task, NULL);
Peter Hurley4898e6402013-04-16 06:15:50 -040097 wake_up_process(tsk);
98 put_task_struct(tsk);
99 }
100 INIT_LIST_HEAD(&sem->read_wait);
101 sem->wait_readers = 0;
102}
103
104static inline int writer_trylock(struct ld_semaphore *sem)
105{
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200106 /*
107 * Only wake this writer if the active part of the count can be
Peter Hurley4898e6402013-04-16 06:15:50 -0400108 * transitioned from 0 -> 1
109 */
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200110 long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -0400111 do {
112 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
113 return 1;
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200114 if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
Peter Hurley4898e6402013-04-16 06:15:50 -0400115 return 0;
116 } while (1);
117}
118
119static void __ldsem_wake_writer(struct ld_semaphore *sem)
120{
121 struct ldsem_waiter *waiter;
122
123 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
124 wake_up_process(waiter->task);
125}
126
127/*
128 * handle the lock release when processes blocked on it that can now run
129 * - if we come here from up_xxxx(), then:
130 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
131 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
132 * - the spinlock must be held by the caller
133 * - woken process blocks are discarded from the list after having task zeroed
134 */
135static void __ldsem_wake(struct ld_semaphore *sem)
136{
137 if (!list_empty(&sem->write_wait))
138 __ldsem_wake_writer(sem);
139 else if (!list_empty(&sem->read_wait))
140 __ldsem_wake_readers(sem);
141}
142
143static void ldsem_wake(struct ld_semaphore *sem)
144{
145 unsigned long flags;
146
147 raw_spin_lock_irqsave(&sem->wait_lock, flags);
148 __ldsem_wake(sem);
149 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
150}
151
152/*
153 * wait for the read lock to be granted
154 */
155static struct ld_semaphore __sched *
156down_read_failed(struct ld_semaphore *sem, long count, long timeout)
157{
158 struct ldsem_waiter waiter;
Peter Hurley4898e6402013-04-16 06:15:50 -0400159 long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
160
161 /* set up my own style of waitqueue */
162 raw_spin_lock_irq(&sem->wait_lock);
163
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200164 /*
165 * Try to reverse the lock attempt but if the count has changed
Peter Hurley4898e6402013-04-16 06:15:50 -0400166 * so that reversing fails, check if there are are no waiters,
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200167 * and early-out if not
168 */
Peter Hurley4898e6402013-04-16 06:15:50 -0400169 do {
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200170 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
171 count += adjust;
Peter Hurley4898e6402013-04-16 06:15:50 -0400172 break;
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200173 }
Peter Hurley4898e6402013-04-16 06:15:50 -0400174 if (count > 0) {
175 raw_spin_unlock_irq(&sem->wait_lock);
176 return sem;
177 }
178 } while (1);
179
180 list_add_tail(&waiter.list, &sem->read_wait);
181 sem->wait_readers++;
182
Davidlohr Bueso5376f2e2017-01-03 13:43:12 -0800183 waiter.task = current;
184 get_task_struct(current);
Peter Hurley4898e6402013-04-16 06:15:50 -0400185
186 /* if there are no active locks, wake the new lock owner(s) */
187 if ((count & LDSEM_ACTIVE_MASK) == 0)
188 __ldsem_wake(sem);
189
190 raw_spin_unlock_irq(&sem->wait_lock);
191
192 /* wait to be given the lock */
193 for (;;) {
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800194 set_current_state(TASK_UNINTERRUPTIBLE);
Peter Hurley4898e6402013-04-16 06:15:50 -0400195
Peter Zijlstra952041a2019-07-18 15:03:15 +0200196 if (!smp_load_acquire(&waiter.task))
Peter Hurley4898e6402013-04-16 06:15:50 -0400197 break;
198 if (!timeout)
199 break;
200 timeout = schedule_timeout(timeout);
201 }
202
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800203 __set_current_state(TASK_RUNNING);
Peter Hurley4898e6402013-04-16 06:15:50 -0400204
205 if (!timeout) {
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200206 /*
207 * Lock timed out but check if this task was just
Peter Hurley4898e6402013-04-16 06:15:50 -0400208 * granted lock ownership - if so, pretend there
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200209 * was no timeout; otherwise, cleanup lock wait.
210 */
Peter Hurley4898e6402013-04-16 06:15:50 -0400211 raw_spin_lock_irq(&sem->wait_lock);
212 if (waiter.task) {
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200213 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
Dmitry Safonov2f588ce2018-11-01 00:24:52 +0000214 sem->wait_readers--;
Peter Hurley4898e6402013-04-16 06:15:50 -0400215 list_del(&waiter.list);
216 raw_spin_unlock_irq(&sem->wait_lock);
217 put_task_struct(waiter.task);
218 return NULL;
219 }
220 raw_spin_unlock_irq(&sem->wait_lock);
221 }
222
223 return sem;
224}
225
226/*
227 * wait for the write lock to be granted
228 */
229static struct ld_semaphore __sched *
230down_write_failed(struct ld_semaphore *sem, long count, long timeout)
231{
232 struct ldsem_waiter waiter;
Peter Hurley4898e6402013-04-16 06:15:50 -0400233 long adjust = -LDSEM_ACTIVE_BIAS;
234 int locked = 0;
235
236 /* set up my own style of waitqueue */
237 raw_spin_lock_irq(&sem->wait_lock);
238
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200239 /*
240 * Try to reverse the lock attempt but if the count has changed
Peter Hurley4898e6402013-04-16 06:15:50 -0400241 * so that reversing fails, check if the lock is now owned,
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200242 * and early-out if so.
243 */
Peter Hurley4898e6402013-04-16 06:15:50 -0400244 do {
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200245 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
Peter Hurley4898e6402013-04-16 06:15:50 -0400246 break;
247 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
248 raw_spin_unlock_irq(&sem->wait_lock);
249 return sem;
250 }
251 } while (1);
252
253 list_add_tail(&waiter.list, &sem->write_wait);
254
Davidlohr Bueso5376f2e2017-01-03 13:43:12 -0800255 waiter.task = current;
Peter Hurley4898e6402013-04-16 06:15:50 -0400256
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800257 set_current_state(TASK_UNINTERRUPTIBLE);
Peter Hurley4898e6402013-04-16 06:15:50 -0400258 for (;;) {
259 if (!timeout)
260 break;
261 raw_spin_unlock_irq(&sem->wait_lock);
262 timeout = schedule_timeout(timeout);
263 raw_spin_lock_irq(&sem->wait_lock);
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800264 set_current_state(TASK_UNINTERRUPTIBLE);
Greg Kroah-Hartmanf9ce5cc2015-04-30 11:22:18 +0200265 locked = writer_trylock(sem);
266 if (locked)
Peter Hurley4898e6402013-04-16 06:15:50 -0400267 break;
268 }
269
270 if (!locked)
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200271 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -0400272 list_del(&waiter.list);
Dmitry Safonov231f8fd2018-11-01 00:24:46 +0000273
274 /*
275 * In case of timeout, wake up every reader who gave the right of way
276 * to writer. Prevent separation readers into two groups:
277 * one that helds semaphore and another that sleeps.
278 * (in case of no contention with a writer)
279 */
280 if (!locked && list_empty(&sem->write_wait))
281 __ldsem_wake_readers(sem);
282
Peter Hurley4898e6402013-04-16 06:15:50 -0400283 raw_spin_unlock_irq(&sem->wait_lock);
284
Davidlohr Bueso642fa442017-01-03 13:43:14 -0800285 __set_current_state(TASK_RUNNING);
Peter Hurley4898e6402013-04-16 06:15:50 -0400286
287 /* lock wait may have timed out */
288 if (!locked)
289 return NULL;
290 return sem;
291}
292
293
294
Denys Vlasenkofc0285f2015-10-27 18:46:45 +0100295static int __ldsem_down_read_nested(struct ld_semaphore *sem,
Peter Hurley4898e6402013-04-16 06:15:50 -0400296 int subclass, long timeout)
297{
298 long count;
299
Peter Zijlstrac18ad0c2018-11-01 00:24:50 +0000300 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400301
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200302 count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -0400303 if (count <= 0) {
Peter Zijlstrac18ad0c2018-11-01 00:24:50 +0000304 lock_contended(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400305 if (!down_read_failed(sem, count, timeout)) {
Qian Cai5facae42019-09-19 12:09:40 -0400306 rwsem_release(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400307 return 0;
308 }
309 }
Peter Zijlstrac18ad0c2018-11-01 00:24:50 +0000310 lock_acquired(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400311 return 1;
312}
313
Denys Vlasenko5ef65042015-10-27 18:46:46 +0100314static int __ldsem_down_write_nested(struct ld_semaphore *sem,
Peter Hurley4898e6402013-04-16 06:15:50 -0400315 int subclass, long timeout)
316{
317 long count;
318
Peter Zijlstrac18ad0c2018-11-01 00:24:50 +0000319 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400320
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200321 count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -0400322 if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
Peter Zijlstrac18ad0c2018-11-01 00:24:50 +0000323 lock_contended(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400324 if (!down_write_failed(sem, count, timeout)) {
Qian Cai5facae42019-09-19 12:09:40 -0400325 rwsem_release(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400326 return 0;
327 }
328 }
Peter Zijlstrac18ad0c2018-11-01 00:24:50 +0000329 lock_acquired(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400330 return 1;
331}
332
333
334/*
335 * lock for reading -- returns 1 if successful, 0 if timed out
336 */
337int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
338{
339 might_sleep();
340 return __ldsem_down_read_nested(sem, 0, timeout);
341}
342
343/*
344 * trylock for reading -- returns 1 if successful, 0 if contention
345 */
346int ldsem_down_read_trylock(struct ld_semaphore *sem)
347{
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200348 long count = atomic_long_read(&sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -0400349
350 while (count >= 0) {
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200351 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
Peter Zijlstrac18ad0c2018-11-01 00:24:50 +0000352 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
353 lock_acquired(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400354 return 1;
355 }
356 }
357 return 0;
358}
359
360/*
361 * lock for writing -- returns 1 if successful, 0 if timed out
362 */
363int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
364{
365 might_sleep();
366 return __ldsem_down_write_nested(sem, 0, timeout);
367}
368
369/*
370 * trylock for writing -- returns 1 if successful, 0 if contention
371 */
372int ldsem_down_write_trylock(struct ld_semaphore *sem)
373{
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200374 long count = atomic_long_read(&sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -0400375
376 while ((count & LDSEM_ACTIVE_MASK) == 0) {
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200377 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
Peter Zijlstrac18ad0c2018-11-01 00:24:50 +0000378 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
379 lock_acquired(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400380 return 1;
381 }
382 }
383 return 0;
384}
385
386/*
387 * release a read lock
388 */
389void ldsem_up_read(struct ld_semaphore *sem)
390{
391 long count;
392
Qian Cai5facae42019-09-19 12:09:40 -0400393 rwsem_release(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400394
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200395 count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -0400396 if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
397 ldsem_wake(sem);
398}
399
400/*
401 * release a write lock
402 */
403void ldsem_up_write(struct ld_semaphore *sem)
404{
405 long count;
406
Qian Cai5facae42019-09-19 12:09:40 -0400407 rwsem_release(&sem->dep_map, _RET_IP_);
Peter Hurley4898e6402013-04-16 06:15:50 -0400408
Peter Zijlstra5fd691a2018-06-05 16:53:34 +0200409 count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
Peter Hurley4898e6402013-04-16 06:15:50 -0400410 if (count < 0)
411 ldsem_wake(sem);
412}
413
414
415#ifdef CONFIG_DEBUG_LOCK_ALLOC
416
417int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
418{
419 might_sleep();
420 return __ldsem_down_read_nested(sem, subclass, timeout);
421}
422
423int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
424 long timeout)
425{
426 might_sleep();
427 return __ldsem_down_write_nested(sem, subclass, timeout);
428}
429
430#endif