Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: MIT |
| 3 | * |
| 4 | * Copyright © 2019 Intel Corporation |
| 5 | */ |
| 6 | |
| 7 | #ifndef INTEL_WAKEREF_H |
| 8 | #define INTEL_WAKEREF_H |
| 9 | |
| 10 | #include <linux/atomic.h> |
Chris Wilson | e9037e7 | 2020-03-23 10:32:21 +0000 | [diff] [blame] | 11 | #include <linux/bitfield.h> |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 12 | #include <linux/bits.h> |
Chris Wilson | 07779a7 | 2019-11-20 12:54:33 +0000 | [diff] [blame] | 13 | #include <linux/lockdep.h> |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 14 | #include <linux/mutex.h> |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 15 | #include <linux/refcount.h> |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 16 | #include <linux/stackdepot.h> |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 17 | #include <linux/timer.h> |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 18 | #include <linux/workqueue.h> |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 19 | |
Chris Wilson | fb993aa | 2019-06-21 19:38:01 +0100 | [diff] [blame] | 20 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) |
| 21 | #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) |
| 22 | #else |
| 23 | #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) |
| 24 | #endif |
| 25 | |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 26 | struct intel_runtime_pm; |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 27 | struct intel_wakeref; |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 28 | |
| 29 | typedef depot_stack_handle_t intel_wakeref_t; |
| 30 | |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 31 | struct intel_wakeref_ops { |
| 32 | int (*get)(struct intel_wakeref *wf); |
| 33 | int (*put)(struct intel_wakeref *wf); |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 34 | }; |
| 35 | |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 36 | struct intel_wakeref { |
| 37 | atomic_t count; |
| 38 | struct mutex mutex; |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 39 | |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 40 | intel_wakeref_t wakeref; |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 41 | |
| 42 | struct intel_runtime_pm *rpm; |
| 43 | const struct intel_wakeref_ops *ops; |
| 44 | |
Chris Wilson | e9037e7 | 2020-03-23 10:32:21 +0000 | [diff] [blame] | 45 | struct delayed_work work; |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 46 | }; |
| 47 | |
Chris Wilson | cdd280b1 | 2020-01-02 23:16:04 +0000 | [diff] [blame] | 48 | struct intel_wakeref_lockclass { |
| 49 | struct lock_class_key mutex; |
| 50 | struct lock_class_key work; |
| 51 | }; |
| 52 | |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 53 | void __intel_wakeref_init(struct intel_wakeref *wf, |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 54 | struct intel_runtime_pm *rpm, |
| 55 | const struct intel_wakeref_ops *ops, |
Chris Wilson | cdd280b1 | 2020-01-02 23:16:04 +0000 | [diff] [blame] | 56 | struct intel_wakeref_lockclass *key); |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 57 | #define intel_wakeref_init(wf, rpm, ops) do { \ |
Chris Wilson | cdd280b1 | 2020-01-02 23:16:04 +0000 | [diff] [blame] | 58 | static struct intel_wakeref_lockclass __key; \ |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 59 | \ |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 60 | __intel_wakeref_init((wf), (rpm), (ops), &__key); \ |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 61 | } while (0) |
| 62 | |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 63 | int __intel_wakeref_get_first(struct intel_wakeref *wf); |
Chris Wilson | 07779a7 | 2019-11-20 12:54:33 +0000 | [diff] [blame] | 64 | void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags); |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 65 | |
| 66 | /** |
| 67 | * intel_wakeref_get: Acquire the wakeref |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 68 | * @wf: the wakeref |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 69 | * |
| 70 | * Acquire a hold on the wakeref. The first user to do so, will acquire |
| 71 | * the runtime pm wakeref and then call the @fn underneath the wakeref |
| 72 | * mutex. |
| 73 | * |
| 74 | * Note that @fn is allowed to fail, in which case the runtime-pm wakeref |
| 75 | * will be released and the acquisition unwound, and an error reported. |
| 76 | * |
| 77 | * Returns: 0 if the wakeref was acquired successfully, or a negative error |
| 78 | * code otherwise. |
| 79 | */ |
| 80 | static inline int |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 81 | intel_wakeref_get(struct intel_wakeref *wf) |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 82 | { |
Chris Wilson | 93b0e8f | 2019-11-21 13:05:28 +0000 | [diff] [blame] | 83 | might_sleep(); |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 84 | if (unlikely(!atomic_inc_not_zero(&wf->count))) |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 85 | return __intel_wakeref_get_first(wf); |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 86 | |
| 87 | return 0; |
| 88 | } |
| 89 | |
| 90 | /** |
Chris Wilson | 93b0e8f | 2019-11-21 13:05:28 +0000 | [diff] [blame] | 91 | * __intel_wakeref_get: Acquire the wakeref, again |
| 92 | * @wf: the wakeref |
| 93 | * |
| 94 | * Increment the wakeref counter, only valid if it is already held by |
| 95 | * the caller. |
| 96 | * |
| 97 | * See intel_wakeref_get(). |
| 98 | */ |
| 99 | static inline void |
| 100 | __intel_wakeref_get(struct intel_wakeref *wf) |
| 101 | { |
| 102 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
| 103 | atomic_inc(&wf->count); |
| 104 | } |
| 105 | |
| 106 | /** |
Chris Wilson | de5147b | 2019-06-26 16:45:47 +0100 | [diff] [blame] | 107 | * intel_wakeref_get_if_in_use: Acquire the wakeref |
| 108 | * @wf: the wakeref |
| 109 | * |
| 110 | * Acquire a hold on the wakeref, but only if the wakeref is already |
| 111 | * active. |
| 112 | * |
| 113 | * Returns: true if the wakeref was acquired, false otherwise. |
| 114 | */ |
| 115 | static inline bool |
| 116 | intel_wakeref_get_if_active(struct intel_wakeref *wf) |
| 117 | { |
| 118 | return atomic_inc_not_zero(&wf->count); |
| 119 | } |
| 120 | |
Chris Wilson | e9037e7 | 2020-03-23 10:32:21 +0000 | [diff] [blame] | 121 | enum { |
| 122 | INTEL_WAKEREF_PUT_ASYNC_BIT = 0, |
| 123 | __INTEL_WAKEREF_PUT_LAST_BIT__ |
| 124 | }; |
| 125 | |
Chris Wilson | de5147b | 2019-06-26 16:45:47 +0100 | [diff] [blame] | 126 | /** |
Chris Wilson | 07779a7 | 2019-11-20 12:54:33 +0000 | [diff] [blame] | 127 | * intel_wakeref_put_flags: Release the wakeref |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 128 | * @wf: the wakeref |
Chris Wilson | 07779a7 | 2019-11-20 12:54:33 +0000 | [diff] [blame] | 129 | * @flags: control flags |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 130 | * |
| 131 | * Release our hold on the wakeref. When there are no more users, |
| 132 | * the runtime pm wakeref will be released after the @fn callback is called |
| 133 | * underneath the wakeref mutex. |
| 134 | * |
| 135 | * Note that @fn is allowed to fail, in which case the runtime-pm wakeref |
| 136 | * is retained and an error reported. |
| 137 | * |
| 138 | * Returns: 0 if the wakeref was released successfully, or a negative error |
| 139 | * code otherwise. |
| 140 | */ |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 141 | static inline void |
Chris Wilson | 07779a7 | 2019-11-20 12:54:33 +0000 | [diff] [blame] | 142 | __intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags) |
Chris Wilson | e9037e7 | 2020-03-23 10:32:21 +0000 | [diff] [blame] | 143 | #define INTEL_WAKEREF_PUT_ASYNC BIT(INTEL_WAKEREF_PUT_ASYNC_BIT) |
| 144 | #define INTEL_WAKEREF_PUT_DELAY \ |
| 145 | GENMASK(BITS_PER_LONG - 1, __INTEL_WAKEREF_PUT_LAST_BIT__) |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 146 | { |
Chris Wilson | fb993aa | 2019-06-21 19:38:01 +0100 | [diff] [blame] | 147 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 148 | if (unlikely(!atomic_add_unless(&wf->count, -1, 1))) |
Chris Wilson | 07779a7 | 2019-11-20 12:54:33 +0000 | [diff] [blame] | 149 | __intel_wakeref_put_last(wf, flags); |
| 150 | } |
| 151 | |
| 152 | static inline void |
| 153 | intel_wakeref_put(struct intel_wakeref *wf) |
| 154 | { |
| 155 | might_sleep(); |
| 156 | __intel_wakeref_put(wf, 0); |
| 157 | } |
| 158 | |
| 159 | static inline void |
| 160 | intel_wakeref_put_async(struct intel_wakeref *wf) |
| 161 | { |
| 162 | __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC); |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 163 | } |
| 164 | |
Chris Wilson | e9037e7 | 2020-03-23 10:32:21 +0000 | [diff] [blame] | 165 | static inline void |
| 166 | intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay) |
| 167 | { |
| 168 | __intel_wakeref_put(wf, |
| 169 | INTEL_WAKEREF_PUT_ASYNC | |
| 170 | FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay)); |
| 171 | } |
| 172 | |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 173 | /** |
| 174 | * intel_wakeref_lock: Lock the wakeref (mutex) |
| 175 | * @wf: the wakeref |
| 176 | * |
| 177 | * Locks the wakeref to prevent it being acquired or released. New users |
| 178 | * can still adjust the counter, but the wakeref itself (and callback) |
| 179 | * cannot be acquired or released. |
| 180 | */ |
| 181 | static inline void |
| 182 | intel_wakeref_lock(struct intel_wakeref *wf) |
| 183 | __acquires(wf->mutex) |
| 184 | { |
| 185 | mutex_lock(&wf->mutex); |
| 186 | } |
| 187 | |
| 188 | /** |
| 189 | * intel_wakeref_unlock: Unlock the wakeref |
| 190 | * @wf: the wakeref |
| 191 | * |
| 192 | * Releases a previously acquired intel_wakeref_lock(). |
| 193 | */ |
| 194 | static inline void |
| 195 | intel_wakeref_unlock(struct intel_wakeref *wf) |
| 196 | __releases(wf->mutex) |
| 197 | { |
| 198 | mutex_unlock(&wf->mutex); |
| 199 | } |
| 200 | |
| 201 | /** |
Chris Wilson | f4ba070 | 2019-11-18 23:02:46 +0000 | [diff] [blame] | 202 | * intel_wakeref_unlock_wait: Wait until the active callback is complete |
| 203 | * @wf: the wakeref |
| 204 | * |
| 205 | * Waits for the active callback (under the @wf->mutex or another CPU) is |
| 206 | * complete. |
| 207 | */ |
| 208 | static inline void |
| 209 | intel_wakeref_unlock_wait(struct intel_wakeref *wf) |
| 210 | { |
| 211 | mutex_lock(&wf->mutex); |
| 212 | mutex_unlock(&wf->mutex); |
Chris Wilson | e9037e7 | 2020-03-23 10:32:21 +0000 | [diff] [blame] | 213 | flush_delayed_work(&wf->work); |
Chris Wilson | f4ba070 | 2019-11-18 23:02:46 +0000 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | /** |
Chris Wilson | 5f22e5b | 2019-06-25 14:01:14 +0100 | [diff] [blame] | 217 | * intel_wakeref_is_active: Query whether the wakeref is currently held |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 218 | * @wf: the wakeref |
| 219 | * |
| 220 | * Returns: true if the wakeref is currently held. |
| 221 | */ |
| 222 | static inline bool |
Chris Wilson | 5f22e5b | 2019-06-25 14:01:14 +0100 | [diff] [blame] | 223 | intel_wakeref_is_active(const struct intel_wakeref *wf) |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 224 | { |
Chris Wilson | 7ee280a | 2019-05-03 12:52:14 +0100 | [diff] [blame] | 225 | return READ_ONCE(wf->wakeref); |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 226 | } |
| 227 | |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 228 | /** |
Chris Wilson | a79ca65 | 2019-08-13 20:07:05 +0100 | [diff] [blame] | 229 | * __intel_wakeref_defer_park: Defer the current park callback |
| 230 | * @wf: the wakeref |
| 231 | */ |
| 232 | static inline void |
| 233 | __intel_wakeref_defer_park(struct intel_wakeref *wf) |
| 234 | { |
Chris Wilson | 07779a7 | 2019-11-20 12:54:33 +0000 | [diff] [blame] | 235 | lockdep_assert_held(&wf->mutex); |
Chris Wilson | a79ca65 | 2019-08-13 20:07:05 +0100 | [diff] [blame] | 236 | INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count)); |
| 237 | atomic_set_release(&wf->count, 1); |
| 238 | } |
| 239 | |
| 240 | /** |
Chris Wilson | c7302f2 | 2019-08-08 21:27:58 +0100 | [diff] [blame] | 241 | * intel_wakeref_wait_for_idle: Wait until the wakeref is idle |
| 242 | * @wf: the wakeref |
| 243 | * |
| 244 | * Wait for the earlier asynchronous release of the wakeref. Note |
| 245 | * this will wait for any third party as well, so make sure you only wait |
| 246 | * when you have control over the wakeref and trust no one else is acquiring |
| 247 | * it. |
| 248 | * |
| 249 | * Return: 0 on success, error code if killed. |
| 250 | */ |
| 251 | int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); |
| 252 | |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 253 | struct intel_wakeref_auto { |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 254 | struct intel_runtime_pm *rpm; |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 255 | struct timer_list timer; |
| 256 | intel_wakeref_t wakeref; |
| 257 | spinlock_t lock; |
| 258 | refcount_t count; |
| 259 | }; |
| 260 | |
| 261 | /** |
| 262 | * intel_wakeref_auto: Delay the runtime-pm autosuspend |
| 263 | * @wf: the wakeref |
| 264 | * @timeout: relative timeout in jiffies |
| 265 | * |
| 266 | * The runtime-pm core uses a suspend delay after the last wakeref |
| 267 | * is released before triggering runtime suspend of the device. That |
| 268 | * delay is configurable via sysfs with little regard to the device |
| 269 | * characteristics. Instead, we want to tune the autosuspend based on our |
| 270 | * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied |
| 271 | * timeout. |
| 272 | * |
| 273 | * Pass @timeout = 0 to cancel a previous autosuspend by executing the |
| 274 | * suspend immediately. |
| 275 | */ |
| 276 | void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); |
| 277 | |
| 278 | void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 279 | struct intel_runtime_pm *rpm); |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 280 | void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); |
| 281 | |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 282 | #endif /* INTEL_WAKEREF_H */ |