blob: 7541fa707f5b2ca838b8bb666523b5666c1b47cd [file] [log] [blame]
Waiman Longa33fda32015-04-24 14:56:30 -04001/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
Waiman Long64d816c2015-11-09 19:09:21 -050015 * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
Waiman Longa33fda32015-04-24 14:56:30 -040016 *
Waiman Long64d816c2015-11-09 19:09:21 -050017 * Authors: Waiman Long <waiman.long@hpe.com>
Waiman Longa33fda32015-04-24 14:56:30 -040018 */
19#ifndef __ASM_GENERIC_QSPINLOCK_H
20#define __ASM_GENERIC_QSPINLOCK_H
21
22#include <asm-generic/qspinlock_types.h>
23
24/**
25 * queued_spin_is_locked - is the spinlock locked?
26 * @lock: Pointer to queued spinlock structure
27 * Return: 1 if it is locked, 0 otherwise
28 */
29static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
30{
Peter Zijlstra54cf8092016-05-20 18:04:36 +020031 /*
Peter Zijlstra2c610022016-06-08 10:19:51 +020032 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
33 * isn't immediately observable.
Peter Zijlstra54cf8092016-05-20 18:04:36 +020034 */
Peter Zijlstra2c610022016-06-08 10:19:51 +020035 return atomic_read(&lock->val);
Waiman Longa33fda32015-04-24 14:56:30 -040036}
37
38/**
39 * queued_spin_value_unlocked - is the spinlock structure unlocked?
40 * @lock: queued spinlock structure
41 * Return: 1 if it is unlocked, 0 otherwise
42 *
43 * N.B. Whenever there are tasks waiting for the lock, it is considered
44 * locked wrt the lockref code to avoid lock stealing by the lockref
45 * code and change things underneath the lock. This also allows some
46 * optimizations to be applied without conflict with lockref.
47 */
48static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
49{
50 return !atomic_read(&lock.val);
51}
52
53/**
54 * queued_spin_is_contended - check if the lock is contended
55 * @lock : Pointer to queued spinlock structure
56 * Return: 1 if lock contended, 0 otherwise
57 */
58static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
59{
60 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
61}
62/**
63 * queued_spin_trylock - try to acquire the queued spinlock
64 * @lock : Pointer to queued spinlock structure
65 * Return: 1 if lock acquired, 0 if failed
66 */
67static __always_inline int queued_spin_trylock(struct qspinlock *lock)
68{
Matthew Wilcox27df8962018-08-20 10:19:14 -040069 u32 val = atomic_read(&lock->val);
70
71 if (unlikely(val))
72 return 0;
73
74 return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
Waiman Longa33fda32015-04-24 14:56:30 -040075}
76
77extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
78
79/**
80 * queued_spin_lock - acquire a queued spinlock
81 * @lock: Pointer to queued spinlock structure
82 */
83static __always_inline void queued_spin_lock(struct qspinlock *lock)
84{
Matthew Wilcox27df8962018-08-20 10:19:14 -040085 u32 val = 0;
Waiman Longa33fda32015-04-24 14:56:30 -040086
Matthew Wilcox27df8962018-08-20 10:19:14 -040087 if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
Waiman Longa33fda32015-04-24 14:56:30 -040088 return;
Matthew Wilcox27df8962018-08-20 10:19:14 -040089
Waiman Longa33fda32015-04-24 14:56:30 -040090 queued_spin_lock_slowpath(lock, val);
91}
92
93#ifndef queued_spin_unlock
94/**
95 * queued_spin_unlock - release a queued spinlock
96 * @lock : Pointer to queued spinlock structure
97 */
98static __always_inline void queued_spin_unlock(struct qspinlock *lock)
99{
100 /*
Pan Xinhuica50e422016-06-03 16:38:14 +0800101 * unlock() needs release semantics:
Waiman Longa33fda32015-04-24 14:56:30 -0400102 */
Will Deacon626e5fb2018-04-26 11:34:24 +0100103 smp_store_release(&lock->locked, 0);
Waiman Longa33fda32015-04-24 14:56:30 -0400104}
105#endif
106
Peter Zijlstra43b3f022015-09-04 17:25:23 +0200107#ifndef virt_spin_lock
108static __always_inline bool virt_spin_lock(struct qspinlock *lock)
Peter Zijlstra (Intel)2aa79af2015-04-24 14:56:36 -0400109{
110 return false;
111}
112#endif
113
Waiman Longa33fda32015-04-24 14:56:30 -0400114/*
Waiman Longa33fda32015-04-24 14:56:30 -0400115 * Remapping spinlock architecture specific functions to the corresponding
116 * queued spinlock functions.
117 */
118#define arch_spin_is_locked(l) queued_spin_is_locked(l)
119#define arch_spin_is_contended(l) queued_spin_is_contended(l)
120#define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
121#define arch_spin_lock(l) queued_spin_lock(l)
122#define arch_spin_trylock(l) queued_spin_trylock(l)
123#define arch_spin_unlock(l) queued_spin_unlock(l)
Waiman Longa33fda32015-04-24 14:56:30 -0400124
125#endif /* __ASM_GENERIC_QSPINLOCK_H */