blob: ec36b73f4733b1b065e63ba2440372bee4f4d799 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Waiman Long70af2f82014-02-03 13:18:49 +01002/*
Waiman Longc7114b42015-05-11 13:57:11 -04003 * Queued read/write locks
Waiman Long70af2f82014-02-03 13:18:49 +01004 *
Waiman Long70af2f82014-02-03 13:18:49 +01005 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#include <linux/smp.h>
10#include <linux/bug.h>
11#include <linux/cpumask.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
Babu Moger9ab60552017-05-24 17:55:10 -060014#include <linux/spinlock.h>
Waiman Long70af2f82014-02-03 13:18:49 +010015
16/**
Waiman Longf7d71f22015-06-19 11:50:00 -040017 * queued_read_lock_slowpath - acquire read lock of a queue rwlock
Waiman Long70af2f82014-02-03 13:18:49 +010018 * @lock: Pointer to queue rwlock structure
19 */
Will Deaconb519b562017-10-12 13:20:49 +010020void queued_read_lock_slowpath(struct qrwlock *lock)
Waiman Long70af2f82014-02-03 13:18:49 +010021{
Waiman Long70af2f82014-02-03 13:18:49 +010022 /*
23 * Readers come here when they cannot get the lock without waiting
24 */
25 if (unlikely(in_interrupt())) {
26 /*
Waiman Long0e06e5b2015-06-19 11:50:01 -040027 * Readers in interrupt context will get the lock immediately
Will Deaconb519b562017-10-12 13:20:49 +010028 * if the writer is just waiting (not holding the lock yet),
29 * so spin with ACQUIRE semantics until the lock is available
30 * without waiting in the queue.
Waiman Long70af2f82014-02-03 13:18:49 +010031 */
Will Deacond1331662017-10-12 13:20:51 +010032 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
Waiman Long70af2f82014-02-03 13:18:49 +010033 return;
34 }
35 atomic_sub(_QR_BIAS, &lock->cnts);
36
37 /*
38 * Put the reader into the wait queue
39 */
Davidlohr Bueso6e1e5192015-09-14 00:37:22 -070040 arch_spin_lock(&lock->wait_lock);
Will Deaconb519b562017-10-12 13:20:49 +010041 atomic_add(_QR_BIAS, &lock->cnts);
Waiman Long70af2f82014-02-03 13:18:49 +010042
43 /*
Will Deacon77e430e2015-08-06 17:54:42 +010044 * The ACQUIRE semantics of the following spinning code ensure
45 * that accesses can't leak upwards out of our subsequent critical
46 * section in the case that the lock is currently held for write.
Waiman Long70af2f82014-02-03 13:18:49 +010047 */
Will Deacond1331662017-10-12 13:20:51 +010048 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
Waiman Long70af2f82014-02-03 13:18:49 +010049
50 /*
51 * Signal the next one in queue to become queue head
52 */
Davidlohr Bueso6e1e5192015-09-14 00:37:22 -070053 arch_spin_unlock(&lock->wait_lock);
Waiman Long70af2f82014-02-03 13:18:49 +010054}
Waiman Longf7d71f22015-06-19 11:50:00 -040055EXPORT_SYMBOL(queued_read_lock_slowpath);
Waiman Long70af2f82014-02-03 13:18:49 +010056
57/**
Waiman Longf7d71f22015-06-19 11:50:00 -040058 * queued_write_lock_slowpath - acquire write lock of a queue rwlock
Waiman Long70af2f82014-02-03 13:18:49 +010059 * @lock : Pointer to queue rwlock structure
60 */
Waiman Longf7d71f22015-06-19 11:50:00 -040061void queued_write_lock_slowpath(struct qrwlock *lock)
Waiman Long70af2f82014-02-03 13:18:49 +010062{
Ali Saidi84a24bf2021-04-15 17:27:11 +000063 int cnts;
64
Waiman Long70af2f82014-02-03 13:18:49 +010065 /* Put the writer into the wait queue */
Davidlohr Bueso6e1e5192015-09-14 00:37:22 -070066 arch_spin_lock(&lock->wait_lock);
Waiman Long70af2f82014-02-03 13:18:49 +010067
68 /* Try to acquire the lock directly if no reader is present */
Waiman Long28ce0e72021-04-26 14:50:17 -040069 if (!(cnts = atomic_read(&lock->cnts)) &&
70 atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))
Waiman Long70af2f82014-02-03 13:18:49 +010071 goto unlock;
72
Will Deacond1331662017-10-12 13:20:51 +010073 /* Set the waiting flag to notify readers that a writer is pending */
Waiman Long28ce0e72021-04-26 14:50:17 -040074 atomic_or(_QW_WAITING, &lock->cnts);
Waiman Long70af2f82014-02-03 13:18:49 +010075
Will Deacond1331662017-10-12 13:20:51 +010076 /* When no more readers or writers, set the locked flag */
Will Deaconb519b562017-10-12 13:20:49 +010077 do {
Ali Saidi84a24bf2021-04-15 17:27:11 +000078 cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
79 } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
Waiman Long70af2f82014-02-03 13:18:49 +010080unlock:
Davidlohr Bueso6e1e5192015-09-14 00:37:22 -070081 arch_spin_unlock(&lock->wait_lock);
Waiman Long70af2f82014-02-03 13:18:49 +010082}
Waiman Longf7d71f22015-06-19 11:50:00 -040083EXPORT_SYMBOL(queued_write_lock_slowpath);