blob: c47e8361bfb5c5342f4483d4b226acee656c0235 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ingo Molnar23f78d4a2006-06-27 02:54:53 -07002/*
3 * RT Mutexes: blocking mutual exclusion locks with PI support
4 *
5 * started by Ingo Molnar and Thomas Gleixner:
6 *
7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
9 *
10 * This file contains the private data structure and API definitions.
11 */
12
13#ifndef __KERNEL_RTMUTEX_COMMON_H
14#define __KERNEL_RTMUTEX_COMMON_H
15
Thomas Gleixnerf41dcc12021-03-26 16:29:39 +010016#include <linux/debug_locks.h>
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070017#include <linux/rtmutex.h>
Ingo Molnar84f001e2017-02-01 16:36:40 +010018#include <linux/sched/wake_q.h>
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070019
20/*
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070021 * This is the control structure for tasks blocked on a rt_mutex,
22 * which is allocated on the kernel stack on of the blocked task.
23 *
Peter Zijlstrafb00aca2013-11-07 14:43:43 +010024 * @tree_entry: pi node to enqueue into the mutex waiters tree
25 * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070026 * @task: task reference to the blocked task
Thomas Gleixner37350e32021-03-26 16:29:37 +010027 * @lock: Pointer to the rt_mutex on which the waiter blocks
Thomas Gleixnerc014ef62021-08-15 23:28:06 +020028 * @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
Thomas Gleixner37350e32021-03-26 16:29:37 +010029 * @prio: Priority of the waiter
30 * @deadline: Deadline of the waiter if applicable
Peter Zijlstraadd46132021-08-15 23:28:58 +020031 * @ww_ctx: WW context pointer
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070032 */
33struct rt_mutex_waiter {
Thomas Gleixner37350e32021-03-26 16:29:37 +010034 struct rb_node tree_entry;
35 struct rb_node pi_tree_entry;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070036 struct task_struct *task;
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020037 struct rt_mutex_base *lock;
Thomas Gleixnerc014ef62021-08-15 23:28:06 +020038 unsigned int wake_state;
Thomas Gleixner37350e32021-03-26 16:29:37 +010039 int prio;
40 u64 deadline;
Peter Zijlstraadd46132021-08-15 23:28:58 +020041 struct ww_acquire_ctx *ww_ctx;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070042};
43
Thomas Gleixnerb576e642021-08-15 23:28:08 +020044/**
45 * rt_wake_q_head - Wrapper around regular wake_q_head to support
46 * "sleeping" spinlocks on RT
Thomas Gleixner456cfbc2021-08-15 23:28:11 +020047 * @head: The regular wake_q_head for sleeping lock variants
48 * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups
Thomas Gleixnerb576e642021-08-15 23:28:08 +020049 */
50struct rt_wake_q_head {
51 struct wake_q_head head;
Thomas Gleixner456cfbc2021-08-15 23:28:11 +020052 struct task_struct *rtlock_task;
Thomas Gleixnerb576e642021-08-15 23:28:08 +020053};
54
55#define DEFINE_RT_WAKE_Q(name) \
56 struct rt_wake_q_head name = { \
57 .head = WAKE_Q_HEAD_INITIALIZER(name.head), \
Thomas Gleixner456cfbc2021-08-15 23:28:11 +020058 .rtlock_task = NULL, \
Thomas Gleixnerb576e642021-08-15 23:28:08 +020059 }
60
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070061/*
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020062 * PI-futex support (proxy locking functions, etc.):
63 */
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020064extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020065 struct task_struct *proxy_owner);
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020066extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
67extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020068 struct rt_mutex_waiter *waiter,
69 struct task_struct *task);
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020070extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020071 struct rt_mutex_waiter *waiter,
72 struct task_struct *task);
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020073extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020074 struct hrtimer_sleeper *to,
75 struct rt_mutex_waiter *waiter);
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020076extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020077 struct rt_mutex_waiter *waiter);
78
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020079extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
80extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020081
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020082extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
83extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
Thomas Gleixner7980aa32021-08-15 23:28:09 +020084 struct rt_wake_q_head *wqh);
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020085
Thomas Gleixner7980aa32021-08-15 23:28:09 +020086extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
Thomas Gleixner531ae4b2021-08-15 23:27:57 +020087
88/*
Thomas Gleixner37350e32021-03-26 16:29:37 +010089 * Must be guarded because this header is included from rcu/tree_plugin.h
90 * unconditionally.
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070091 */
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -040092#ifdef CONFIG_RT_MUTEXES
Peter Zijlstra830e6ac2021-08-15 23:27:58 +020093static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070094{
Davidlohr Buesoa23ba902017-09-08 16:15:01 -070095 return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070096}
97
Thomas Gleixnerc3123c42021-08-25 12:33:12 +020098/*
99 * Lockless speculative check whether @waiter is still the top waiter on
100 * @lock. This is solely comparing pointers and not derefencing the
101 * leftmost entry which might be about to vanish.
102 */
103static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
104 struct rt_mutex_waiter *waiter)
105{
106 struct rb_node *leftmost = rb_first_cached(&lock->waiters);
107
108 return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter;
109}
110
Peter Zijlstra830e6ac2021-08-15 23:27:58 +0200111static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700112{
Peter Zijlstrac28d62c2018-03-27 14:14:38 +0200113 struct rb_node *leftmost = rb_first_cached(&lock->waiters);
114 struct rt_mutex_waiter *w = NULL;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700115
Peter Zijlstrac28d62c2018-03-27 14:14:38 +0200116 if (leftmost) {
117 w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
118 BUG_ON(w->lock != lock);
119 }
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700120 return w;
121}
122
123static inline int task_has_pi_waiters(struct task_struct *p)
124{
Davidlohr Buesoa23ba902017-09-08 16:15:01 -0700125 return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700126}
127
Thomas Gleixner37350e32021-03-26 16:29:37 +0100128static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700129{
Thomas Gleixner37350e32021-03-26 16:29:37 +0100130 return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
131 pi_tree_entry);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700132}
133
Lai Jiangshan81612392011-01-14 17:09:41 +0800134#define RT_MUTEX_HAS_WAITERS 1UL
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700135
Peter Zijlstra830e6ac2021-08-15 23:27:58 +0200136static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700137{
Thomas Gleixner1be5d4f2016-11-30 21:04:42 +0000138 unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
139
Thomas Gleixnerb5016e82016-11-30 21:04:44 +0000140 return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700141}
142
Ingo Molnarc87e2832006-06-27 02:54:58 -0700143/*
Thomas Gleixner8930ed82014-05-22 03:25:47 +0000144 * Constants for rt mutex functions which have a selectable deadlock
145 * detection.
146 *
147 * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are
148 * no further PI adjustments to be made.
149 *
150 * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full
151 * walk of the lock chain.
152 */
153enum rtmutex_chainwalk {
154 RT_MUTEX_MIN_CHAINWALK,
155 RT_MUTEX_FULL_CHAINWALK,
156};
157
Peter Zijlstra830e6ac2021-08-15 23:27:58 +0200158static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
Thomas Gleixnerf5a98862021-03-26 16:29:38 +0100159{
Thomas Gleixnerf5a98862021-03-26 16:29:38 +0100160 raw_spin_lock_init(&lock->wait_lock);
161 lock->waiters = RB_ROOT_CACHED;
Peter Zijlstra830e6ac2021-08-15 23:27:58 +0200162 lock->owner = NULL;
Thomas Gleixnerf5a98862021-03-26 16:29:38 +0100163}
164
Thomas Gleixnerf41dcc12021-03-26 16:29:39 +0100165/* Debug functions */
Peter Zijlstra830e6ac2021-08-15 23:27:58 +0200166static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
Thomas Gleixnerf41dcc12021-03-26 16:29:39 +0100167{
168 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
169 DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
170}
171
Peter Zijlstra830e6ac2021-08-15 23:27:58 +0200172static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
Thomas Gleixnerf41dcc12021-03-26 16:29:39 +0100173{
174 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
175 DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
176}
177
178static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
179{
180 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
181 memset(waiter, 0x11, sizeof(*waiter));
182}
183
184static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
185{
186 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
187 memset(waiter, 0x22, sizeof(*waiter));
188}
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700189
Thomas Gleixner531ae4b2021-08-15 23:27:57 +0200190static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
191{
192 debug_rt_mutex_init_waiter(waiter);
193 RB_CLEAR_NODE(&waiter->pi_tree_entry);
194 RB_CLEAR_NODE(&waiter->tree_entry);
Thomas Gleixnerc014ef62021-08-15 23:28:06 +0200195 waiter->wake_state = TASK_NORMAL;
Thomas Gleixner531ae4b2021-08-15 23:27:57 +0200196 waiter->task = NULL;
197}
198
Thomas Gleixner1c143c42021-08-15 23:28:25 +0200199static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
Thomas Gleixnerc014ef62021-08-15 23:28:06 +0200200{
201 rt_mutex_init_waiter(waiter);
202 waiter->wake_state = TASK_RTLOCK_WAIT;
203}
204
Thomas Gleixner531ae4b2021-08-15 23:27:57 +0200205#else /* CONFIG_RT_MUTEXES */
206/* Used in rcu/tree_plugin.h */
Peter Zijlstra830e6ac2021-08-15 23:27:58 +0200207static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
Thomas Gleixner531ae4b2021-08-15 23:27:57 +0200208{
209 return NULL;
210}
211#endif /* !CONFIG_RT_MUTEXES */
212
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700213#endif