Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 2 | /* |
Waiman Long | c7114b4 | 2015-05-11 13:57:11 -0400 | [diff] [blame] | 3 | * Queued read/write locks |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 4 | * |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 5 | * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P. |
| 6 | * |
| 7 | * Authors: Waiman Long <waiman.long@hp.com> |
| 8 | */ |
| 9 | #include <linux/smp.h> |
| 10 | #include <linux/bug.h> |
| 11 | #include <linux/cpumask.h> |
| 12 | #include <linux/percpu.h> |
| 13 | #include <linux/hardirq.h> |
Babu Moger | 9ab6055 | 2017-05-24 17:55:10 -0600 | [diff] [blame] | 14 | #include <linux/spinlock.h> |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 15 | |
| 16 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 17 | * queued_read_lock_slowpath - acquire read lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 18 | * @lock: Pointer to queue rwlock structure |
| 19 | */ |
Will Deacon | b519b56 | 2017-10-12 13:20:49 +0100 | [diff] [blame] | 20 | void queued_read_lock_slowpath(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 21 | { |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 22 | /* |
| 23 | * Readers come here when they cannot get the lock without waiting |
| 24 | */ |
| 25 | if (unlikely(in_interrupt())) { |
| 26 | /* |
Waiman Long | 0e06e5b | 2015-06-19 11:50:01 -0400 | [diff] [blame] | 27 | * Readers in interrupt context will get the lock immediately |
Will Deacon | b519b56 | 2017-10-12 13:20:49 +0100 | [diff] [blame] | 28 | * if the writer is just waiting (not holding the lock yet), |
| 29 | * so spin with ACQUIRE semantics until the lock is available |
| 30 | * without waiting in the queue. |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 31 | */ |
Will Deacon | d133166 | 2017-10-12 13:20:51 +0100 | [diff] [blame] | 32 | atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 33 | return; |
| 34 | } |
| 35 | atomic_sub(_QR_BIAS, &lock->cnts); |
| 36 | |
| 37 | /* |
| 38 | * Put the reader into the wait queue |
| 39 | */ |
Davidlohr Bueso | 6e1e519 | 2015-09-14 00:37:22 -0700 | [diff] [blame] | 40 | arch_spin_lock(&lock->wait_lock); |
Will Deacon | b519b56 | 2017-10-12 13:20:49 +0100 | [diff] [blame] | 41 | atomic_add(_QR_BIAS, &lock->cnts); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 42 | |
| 43 | /* |
Will Deacon | 77e430e | 2015-08-06 17:54:42 +0100 | [diff] [blame] | 44 | * The ACQUIRE semantics of the following spinning code ensure |
| 45 | * that accesses can't leak upwards out of our subsequent critical |
| 46 | * section in the case that the lock is currently held for write. |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 47 | */ |
Will Deacon | d133166 | 2017-10-12 13:20:51 +0100 | [diff] [blame] | 48 | atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED)); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 49 | |
| 50 | /* |
| 51 | * Signal the next one in queue to become queue head |
| 52 | */ |
Davidlohr Bueso | 6e1e519 | 2015-09-14 00:37:22 -0700 | [diff] [blame] | 53 | arch_spin_unlock(&lock->wait_lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 54 | } |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 55 | EXPORT_SYMBOL(queued_read_lock_slowpath); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 56 | |
| 57 | /** |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 58 | * queued_write_lock_slowpath - acquire write lock of a queue rwlock |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 59 | * @lock : Pointer to queue rwlock structure |
| 60 | */ |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 61 | void queued_write_lock_slowpath(struct qrwlock *lock) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 62 | { |
Ali Saidi | 84a24bf | 2021-04-15 17:27:11 +0000 | [diff] [blame] | 63 | int cnts; |
| 64 | |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 65 | /* Put the writer into the wait queue */ |
Davidlohr Bueso | 6e1e519 | 2015-09-14 00:37:22 -0700 | [diff] [blame] | 66 | arch_spin_lock(&lock->wait_lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 67 | |
| 68 | /* Try to acquire the lock directly if no reader is present */ |
Waiman Long | 28ce0e7 | 2021-04-26 14:50:17 -0400 | [diff] [blame] | 69 | if (!(cnts = atomic_read(&lock->cnts)) && |
| 70 | atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)) |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 71 | goto unlock; |
| 72 | |
Will Deacon | d133166 | 2017-10-12 13:20:51 +0100 | [diff] [blame] | 73 | /* Set the waiting flag to notify readers that a writer is pending */ |
Waiman Long | 28ce0e7 | 2021-04-26 14:50:17 -0400 | [diff] [blame] | 74 | atomic_or(_QW_WAITING, &lock->cnts); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 75 | |
Will Deacon | d133166 | 2017-10-12 13:20:51 +0100 | [diff] [blame] | 76 | /* When no more readers or writers, set the locked flag */ |
Will Deacon | b519b56 | 2017-10-12 13:20:49 +0100 | [diff] [blame] | 77 | do { |
Ali Saidi | 84a24bf | 2021-04-15 17:27:11 +0000 | [diff] [blame] | 78 | cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING); |
| 79 | } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 80 | unlock: |
Davidlohr Bueso | 6e1e519 | 2015-09-14 00:37:22 -0700 | [diff] [blame] | 81 | arch_spin_unlock(&lock->wait_lock); |
Waiman Long | 70af2f8 | 2014-02-03 13:18:49 +0100 | [diff] [blame] | 82 | } |
Waiman Long | f7d71f2 | 2015-06-19 11:50:00 -0400 | [diff] [blame] | 83 | EXPORT_SYMBOL(queued_write_lock_slowpath); |