Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: MIT |
| 3 | * |
| 4 | * Copyright © 2019 Intel Corporation |
| 5 | */ |
| 6 | |
| 7 | #ifndef INTEL_WAKEREF_H |
| 8 | #define INTEL_WAKEREF_H |
| 9 | |
| 10 | #include <linux/atomic.h> |
| 11 | #include <linux/mutex.h> |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 12 | #include <linux/refcount.h> |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 13 | #include <linux/stackdepot.h> |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 14 | #include <linux/timer.h> |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 15 | |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 16 | struct intel_runtime_pm; |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 17 | |
| 18 | typedef depot_stack_handle_t intel_wakeref_t; |
| 19 | |
| 20 | struct intel_wakeref { |
| 21 | atomic_t count; |
| 22 | struct mutex mutex; |
| 23 | intel_wakeref_t wakeref; |
| 24 | }; |
| 25 | |
| 26 | void __intel_wakeref_init(struct intel_wakeref *wf, |
| 27 | struct lock_class_key *key); |
| 28 | #define intel_wakeref_init(wf) do { \ |
| 29 | static struct lock_class_key __key; \ |
| 30 | \ |
| 31 | __intel_wakeref_init((wf), &__key); \ |
| 32 | } while (0) |
| 33 | |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 34 | int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 35 | struct intel_wakeref *wf, |
| 36 | int (*fn)(struct intel_wakeref *wf)); |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 37 | int __intel_wakeref_put_last(struct intel_runtime_pm *rpm, |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 38 | struct intel_wakeref *wf, |
| 39 | int (*fn)(struct intel_wakeref *wf)); |
| 40 | |
| 41 | /** |
| 42 | * intel_wakeref_get: Acquire the wakeref |
| 43 | * @i915: the drm_i915_private device |
| 44 | * @wf: the wakeref |
| 45 | * @fn: callback for acquired the wakeref, called only on first acquire. |
| 46 | * |
| 47 | * Acquire a hold on the wakeref. The first user to do so, will acquire |
| 48 | * the runtime pm wakeref and then call the @fn underneath the wakeref |
| 49 | * mutex. |
| 50 | * |
| 51 | * Note that @fn is allowed to fail, in which case the runtime-pm wakeref |
| 52 | * will be released and the acquisition unwound, and an error reported. |
| 53 | * |
| 54 | * Returns: 0 if the wakeref was acquired successfully, or a negative error |
| 55 | * code otherwise. |
| 56 | */ |
| 57 | static inline int |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 58 | intel_wakeref_get(struct intel_runtime_pm *rpm, |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 59 | struct intel_wakeref *wf, |
| 60 | int (*fn)(struct intel_wakeref *wf)) |
| 61 | { |
| 62 | if (unlikely(!atomic_inc_not_zero(&wf->count))) |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 63 | return __intel_wakeref_get_first(rpm, wf, fn); |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 64 | |
| 65 | return 0; |
| 66 | } |
| 67 | |
| 68 | /** |
| 69 | * intel_wakeref_put: Release the wakeref |
| 70 | * @i915: the drm_i915_private device |
| 71 | * @wf: the wakeref |
| 72 | * @fn: callback for releasing the wakeref, called only on final release. |
| 73 | * |
| 74 | * Release our hold on the wakeref. When there are no more users, |
| 75 | * the runtime pm wakeref will be released after the @fn callback is called |
| 76 | * underneath the wakeref mutex. |
| 77 | * |
| 78 | * Note that @fn is allowed to fail, in which case the runtime-pm wakeref |
| 79 | * is retained and an error reported. |
| 80 | * |
| 81 | * Returns: 0 if the wakeref was released successfully, or a negative error |
| 82 | * code otherwise. |
| 83 | */ |
| 84 | static inline int |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 85 | intel_wakeref_put(struct intel_runtime_pm *rpm, |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 86 | struct intel_wakeref *wf, |
| 87 | int (*fn)(struct intel_wakeref *wf)) |
| 88 | { |
| 89 | if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex)) |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 90 | return __intel_wakeref_put_last(rpm, wf, fn); |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 91 | |
| 92 | return 0; |
| 93 | } |
| 94 | |
| 95 | /** |
| 96 | * intel_wakeref_lock: Lock the wakeref (mutex) |
| 97 | * @wf: the wakeref |
| 98 | * |
| 99 | * Locks the wakeref to prevent it being acquired or released. New users |
| 100 | * can still adjust the counter, but the wakeref itself (and callback) |
| 101 | * cannot be acquired or released. |
| 102 | */ |
| 103 | static inline void |
| 104 | intel_wakeref_lock(struct intel_wakeref *wf) |
| 105 | __acquires(wf->mutex) |
| 106 | { |
| 107 | mutex_lock(&wf->mutex); |
| 108 | } |
| 109 | |
| 110 | /** |
| 111 | * intel_wakeref_unlock: Unlock the wakeref |
| 112 | * @wf: the wakeref |
| 113 | * |
| 114 | * Releases a previously acquired intel_wakeref_lock(). |
| 115 | */ |
| 116 | static inline void |
| 117 | intel_wakeref_unlock(struct intel_wakeref *wf) |
| 118 | __releases(wf->mutex) |
| 119 | { |
| 120 | mutex_unlock(&wf->mutex); |
| 121 | } |
| 122 | |
| 123 | /** |
| 124 | * intel_wakeref_active: Query whether the wakeref is currently held |
| 125 | * @wf: the wakeref |
| 126 | * |
| 127 | * Returns: true if the wakeref is currently held. |
| 128 | */ |
| 129 | static inline bool |
| 130 | intel_wakeref_active(struct intel_wakeref *wf) |
| 131 | { |
Chris Wilson | 7ee280a | 2019-05-03 12:52:14 +0100 | [diff] [blame] | 132 | return READ_ONCE(wf->wakeref); |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 133 | } |
| 134 | |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 135 | struct intel_wakeref_auto { |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 136 | struct intel_runtime_pm *rpm; |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 137 | struct timer_list timer; |
| 138 | intel_wakeref_t wakeref; |
| 139 | spinlock_t lock; |
| 140 | refcount_t count; |
| 141 | }; |
| 142 | |
| 143 | /** |
| 144 | * intel_wakeref_auto: Delay the runtime-pm autosuspend |
| 145 | * @wf: the wakeref |
| 146 | * @timeout: relative timeout in jiffies |
| 147 | * |
| 148 | * The runtime-pm core uses a suspend delay after the last wakeref |
| 149 | * is released before triggering runtime suspend of the device. That |
| 150 | * delay is configurable via sysfs with little regard to the device |
| 151 | * characteristics. Instead, we want to tune the autosuspend based on our |
| 152 | * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied |
| 153 | * timeout. |
| 154 | * |
| 155 | * Pass @timeout = 0 to cancel a previous autosuspend by executing the |
| 156 | * suspend immediately. |
| 157 | */ |
| 158 | void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout); |
| 159 | |
| 160 | void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, |
Daniele Ceraolo Spurio | 58a111f | 2019-06-13 16:21:56 -0700 | [diff] [blame] | 161 | struct intel_runtime_pm *rpm); |
Chris Wilson | b27e35a | 2019-05-27 12:51:14 +0100 | [diff] [blame] | 162 | void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf); |
| 163 | |
Chris Wilson | d91e657 | 2019-04-24 21:07:13 +0100 | [diff] [blame] | 164 | #endif /* INTEL_WAKEREF_H */ |