blob: d74b13825501437f0bb50e5899f9e9eca7d7be03 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Waiman Longa33fda32015-04-24 14:56:30 -04002/*
3 * Queued spinlock
4 *
Waiman Longa33fda32015-04-24 14:56:30 -04005 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
Waiman Long64d816c2015-11-09 19:09:21 -05006 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
Waiman Longa33fda32015-04-24 14:56:30 -04007 *
Waiman Long64d816c2015-11-09 19:09:21 -05008 * Authors: Waiman Long <waiman.long@hpe.com>
Waiman Longa33fda32015-04-24 14:56:30 -04009 */
10#ifndef __ASM_GENERIC_QSPINLOCK_H
11#define __ASM_GENERIC_QSPINLOCK_H
12
13#include <asm-generic/qspinlock_types.h>
Herbert Xu459e3952020-07-29 22:33:16 +100014#include <linux/atomic.h>
Waiman Longa33fda32015-04-24 14:56:30 -040015
Nicholas Pigginaa65ff62020-07-24 23:14:20 +100016#ifndef queued_spin_is_locked
Waiman Longa33fda32015-04-24 14:56:30 -040017/**
18 * queued_spin_is_locked - is the spinlock locked?
19 * @lock: Pointer to queued spinlock structure
20 * Return: 1 if it is locked, 0 otherwise
21 */
22static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
23{
Peter Zijlstra54cf8092016-05-20 18:04:36 +020024 /*
Peter Zijlstra2c610022016-06-08 10:19:51 +020025 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
26 * isn't immediately observable.
Peter Zijlstra54cf8092016-05-20 18:04:36 +020027 */
Peter Zijlstra2c610022016-06-08 10:19:51 +020028 return atomic_read(&lock->val);
Waiman Longa33fda32015-04-24 14:56:30 -040029}
Nicholas Pigginaa65ff62020-07-24 23:14:20 +100030#endif
Waiman Longa33fda32015-04-24 14:56:30 -040031
32/**
33 * queued_spin_value_unlocked - is the spinlock structure unlocked?
34 * @lock: queued spinlock structure
35 * Return: 1 if it is unlocked, 0 otherwise
36 *
37 * N.B. Whenever there are tasks waiting for the lock, it is considered
38 * locked wrt the lockref code to avoid lock stealing by the lockref
39 * code and change things underneath the lock. This also allows some
40 * optimizations to be applied without conflict with lockref.
41 */
42static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
43{
44 return !atomic_read(&lock.val);
45}
46
47/**
48 * queued_spin_is_contended - check if the lock is contended
49 * @lock : Pointer to queued spinlock structure
50 * Return: 1 if lock contended, 0 otherwise
51 */
52static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
53{
54 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
55}
56/**
57 * queued_spin_trylock - try to acquire the queued spinlock
58 * @lock : Pointer to queued spinlock structure
59 * Return: 1 if lock acquired, 0 if failed
60 */
61static __always_inline int queued_spin_trylock(struct qspinlock *lock)
62{
Arnd Bergmannf44ca082020-10-19 09:09:21 +020063 int val = atomic_read(&lock->val);
Matthew Wilcox27df8962018-08-20 10:19:14 -040064
65 if (unlikely(val))
66 return 0;
67
68 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
Waiman Longa33fda32015-04-24 14:56:30 -040069}
70
71extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
72
Nicholas Piggin20c0e822020-07-24 23:14:21 +100073#ifndef queued_spin_lock
Waiman Longa33fda32015-04-24 14:56:30 -040074/**
75 * queued_spin_lock - acquire a queued spinlock
76 * @lock: Pointer to queued spinlock structure
77 */
78static __always_inline void queued_spin_lock(struct qspinlock *lock)
79{
Arnd Bergmannf44ca082020-10-19 09:09:21 +020080 int val = 0;
Waiman Longa33fda32015-04-24 14:56:30 -040081
Matthew Wilcox27df8962018-08-20 10:19:14 -040082 if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
Waiman Longa33fda32015-04-24 14:56:30 -040083 return;
Matthew Wilcox27df8962018-08-20 10:19:14 -040084
Waiman Longa33fda32015-04-24 14:56:30 -040085 queued_spin_lock_slowpath(lock, val);
86}
Nicholas Piggin20c0e822020-07-24 23:14:21 +100087#endif
Waiman Longa33fda32015-04-24 14:56:30 -040088
89#ifndef queued_spin_unlock
90/**
91 * queued_spin_unlock - release a queued spinlock
92 * @lock : Pointer to queued spinlock structure
93 */
94static __always_inline void queued_spin_unlock(struct qspinlock *lock)
95{
96 /*
Pan Xinhuica50e422016-06-03 16:38:14 +080097 * unlock() needs release semantics:
Waiman Longa33fda32015-04-24 14:56:30 -040098 */
Will Deacon626e5fb2018-04-26 11:34:24 +010099 smp_store_release(&lock->locked, 0);
Waiman Longa33fda32015-04-24 14:56:30 -0400100}
101#endif
102
Peter Zijlstra43b3f022015-09-04 17:25:23 +0200103#ifndef virt_spin_lock
104static __always_inline bool virt_spin_lock(struct qspinlock *lock)
Peter Zijlstra (Intel)2aa79af2015-04-24 14:56:36 -0400105{
106 return false;
107}
108#endif
109
Waiman Longa33fda32015-04-24 14:56:30 -0400110/*
Waiman Longa33fda32015-04-24 14:56:30 -0400111 * Remapping spinlock architecture specific functions to the corresponding
112 * queued spinlock functions.
113 */
114#define arch_spin_is_locked(l) queued_spin_is_locked(l)
115#define arch_spin_is_contended(l) queued_spin_is_contended(l)
116#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
117#define arch_spin_lock(l) queued_spin_lock(l)
118#define arch_spin_trylock(l) queued_spin_trylock(l)
119#define arch_spin_unlock(l) queued_spin_unlock(l)
Waiman Longa33fda32015-04-24 14:56:30 -0400120
121#endif /* __ASM_GENERIC_QSPINLOCK_H */