Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * to deal in the Software without restriction, including without limitation |
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice shall be included in |
| 13 | * all copies or substantial portions of the Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 21 | * OTHER DEALINGS IN THE SOFTWARE. |
| 22 | */ |
| 23 | |
Rob Clark | b962a12 | 2018-10-22 14:31:22 +0200 | [diff] [blame] | 24 | #include <drm/drm_atomic.h> |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 25 | #include <drm/drm_crtc.h> |
Sam Ravnborg | 0500c04 | 2019-05-26 19:35:35 +0200 | [diff] [blame] | 26 | #include <drm/drm_device.h> |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 27 | #include <drm/drm_modeset_lock.h> |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 28 | #include <drm/drm_print.h> |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 29 | |
| 30 | /** |
| 31 | * DOC: kms locking |
| 32 | * |
| 33 | * As KMS moves toward more fine grained locking, and atomic ioctl where |
| 34 | * userspace can indirectly control locking order, it becomes necessary |
Daniel Vetter | 0645de5 | 2016-05-30 11:10:49 +0200 | [diff] [blame] | 35 | * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 36 | * the locking is more distributed around the driver code, we want a bit |
| 37 | * of extra utility/tracking out of our acquire-ctx. This is provided |
Daniel Vetter | d574528 | 2017-01-25 07:26:45 +0100 | [diff] [blame] | 38 | * by &struct drm_modeset_lock and &struct drm_modeset_acquire_ctx. |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 39 | * |
Mauro Carvalho Chehab | 387b146 | 2019-04-10 08:32:41 -0300 | [diff] [blame] | 40 | * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.rst |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 41 | * |
Daniel Vetter | da5335b | 2016-05-31 22:55:13 +0200 | [diff] [blame] | 42 | * The basic usage pattern is to:: |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 43 | * |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 44 | * drm_modeset_acquire_init(ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE) |
Danilo Cesar Lemes de Paula | f03d8ed | 2015-11-25 18:07:55 +0100 | [diff] [blame] | 45 | * retry: |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 46 | * foreach (lock in random_ordered_set_of_locks) { |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 47 | * ret = drm_modeset_lock(lock, ctx) |
Danilo Cesar Lemes de Paula | f03d8ed | 2015-11-25 18:07:55 +0100 | [diff] [blame] | 48 | * if (ret == -EDEADLK) { |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 49 | * ret = drm_modeset_backoff(ctx); |
| 50 | * if (!ret) |
| 51 | * goto retry; |
Danilo Cesar Lemes de Paula | f03d8ed | 2015-11-25 18:07:55 +0100 | [diff] [blame] | 52 | * } |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 53 | * if (ret) |
| 54 | * goto out; |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 55 | * } |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 56 | * ... do stuff ... |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 57 | * out: |
| 58 | * drm_modeset_drop_locks(ctx); |
| 59 | * drm_modeset_acquire_fini(ctx); |
Daniel Vetter | 0645de5 | 2016-05-30 11:10:49 +0200 | [diff] [blame] | 60 | * |
Sean Paul | b7ea04d | 2018-11-29 10:04:17 -0500 | [diff] [blame] | 61 | * For convenience this control flow is implemented in |
| 62 | * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case |
| 63 | * where all modeset locks need to be taken through drm_modeset_lock_all_ctx(). |
| 64 | * |
Liviu Dudau | fc2157b | 2017-07-20 17:07:48 +0100 | [diff] [blame] | 65 | * If all that is needed is a single modeset lock, then the &struct |
| 66 | * drm_modeset_acquire_ctx is not needed and the locking can be simplified |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 67 | * by passing a NULL instead of ctx in the drm_modeset_lock() call or |
| 68 | * calling drm_modeset_lock_single_interruptible(). To unlock afterwards |
| 69 | * call drm_modeset_unlock(). |
Liviu Dudau | fc2157b | 2017-07-20 17:07:48 +0100 | [diff] [blame] | 70 | * |
| 71 | * On top of these per-object locks using &ww_mutex there's also an overall |
Daniel Vetter | d574528 | 2017-01-25 07:26:45 +0100 | [diff] [blame] | 72 | * &drm_mode_config.mutex, for protecting everything else. Mostly this means |
Daniel Vetter | b07c425 | 2016-11-29 10:24:40 +0100 | [diff] [blame] | 73 | * probe state of connectors, and preventing hotplug add/removal of connectors. |
Daniel Vetter | 0645de5 | 2016-05-30 11:10:49 +0200 | [diff] [blame] | 74 | * |
Daniel Vetter | b07c425 | 2016-11-29 10:24:40 +0100 | [diff] [blame] | 75 | * Finally there's a bunch of dedicated locks to protect drm core internal |
| 76 | * lists and lookup data structures. |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 77 | */ |
| 78 | |
Rob Clark | 35cf035 | 2016-11-14 17:40:57 -0500 | [diff] [blame] | 79 | static DEFINE_WW_CLASS(crtc_ww_class); |
| 80 | |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 81 | #if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK) |
Stephen Rothwell | bcae3af | 2021-10-18 11:51:13 +0300 | [diff] [blame] | 82 | static noinline depot_stack_handle_t __drm_stack_depot_save(void) |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 83 | { |
| 84 | unsigned long entries[8]; |
| 85 | unsigned int n; |
| 86 | |
| 87 | n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); |
| 88 | |
| 89 | return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); |
| 90 | } |
| 91 | |
Stephen Rothwell | bcae3af | 2021-10-18 11:51:13 +0300 | [diff] [blame] | 92 | static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 93 | { |
| 94 | struct drm_printer p = drm_debug_printer("drm_modeset_lock"); |
| 95 | unsigned long *entries; |
| 96 | unsigned int nr_entries; |
| 97 | char *buf; |
| 98 | |
| 99 | buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); |
| 100 | if (!buf) |
| 101 | return; |
| 102 | |
| 103 | nr_entries = stack_depot_fetch(stack_depot, &entries); |
| 104 | stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 2); |
| 105 | |
| 106 | drm_printf(&p, "attempting to lock a contended lock without backoff:\n%s", buf); |
| 107 | |
| 108 | kfree(buf); |
| 109 | } |
Vlastimil Babka | 2dba5eb | 2022-01-21 22:14:27 -0800 | [diff] [blame] | 110 | |
| 111 | static void __drm_stack_depot_init(void) |
| 112 | { |
| 113 | stack_depot_init(); |
| 114 | } |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 115 | #else /* CONFIG_DRM_DEBUG_MODESET_LOCK */ |
Stephen Rothwell | bcae3af | 2021-10-18 11:51:13 +0300 | [diff] [blame] | 116 | static depot_stack_handle_t __drm_stack_depot_save(void) |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 117 | { |
| 118 | return 0; |
| 119 | } |
Stephen Rothwell | bcae3af | 2021-10-18 11:51:13 +0300 | [diff] [blame] | 120 | static void __drm_stack_depot_print(depot_stack_handle_t stack_depot) |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 121 | { |
| 122 | } |
Vlastimil Babka | 2dba5eb | 2022-01-21 22:14:27 -0800 | [diff] [blame] | 123 | static void __drm_stack_depot_init(void) |
| 124 | { |
| 125 | } |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 126 | #endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */ |
| 127 | |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 128 | /** |
Daniel Vetter | bf9e37b | 2015-07-28 13:18:42 +0200 | [diff] [blame] | 129 | * drm_modeset_lock_all - take all modeset locks |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 130 | * @dev: DRM device |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 131 | * |
Daniel Vetter | bf9e37b | 2015-07-28 13:18:42 +0200 | [diff] [blame] | 132 | * This function takes all modeset locks, suitable where a more fine-grained |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 133 | * scheme isn't (yet) implemented. Locks must be dropped by calling the |
| 134 | * drm_modeset_unlock_all() function. |
| 135 | * |
| 136 | * This function is deprecated. It allocates a lock acquisition context and |
Daniel Vetter | d574528 | 2017-01-25 07:26:45 +0100 | [diff] [blame] | 137 | * stores it in &drm_device.mode_config. This facilitate conversion of |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 138 | * existing code because it removes the need to manually deal with the |
| 139 | * acquisition context, but it is also brittle because the context is global |
| 140 | * and care must be taken not to nest calls. New code should use the |
| 141 | * drm_modeset_lock_all_ctx() function and pass in the context explicitly. |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 142 | */ |
Daniel Vetter | bf9e37b | 2015-07-28 13:18:42 +0200 | [diff] [blame] | 143 | void drm_modeset_lock_all(struct drm_device *dev) |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 144 | { |
| 145 | struct drm_mode_config *config = &dev->mode_config; |
| 146 | struct drm_modeset_acquire_ctx *ctx; |
| 147 | int ret; |
| 148 | |
Chris Wilson | d18d1a5 | 2017-10-31 11:55:35 +0000 | [diff] [blame] | 149 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL); |
Daniel Vetter | bf9e37b | 2015-07-28 13:18:42 +0200 | [diff] [blame] | 150 | if (WARN_ON(!ctx)) |
| 151 | return; |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 152 | |
Daniel Vetter | bf9e37b | 2015-07-28 13:18:42 +0200 | [diff] [blame] | 153 | mutex_lock(&config->mutex); |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 154 | |
| 155 | drm_modeset_acquire_init(ctx, 0); |
| 156 | |
| 157 | retry: |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 158 | ret = drm_modeset_lock_all_ctx(dev, ctx); |
| 159 | if (ret < 0) { |
| 160 | if (ret == -EDEADLK) { |
| 161 | drm_modeset_backoff(ctx); |
| 162 | goto retry; |
| 163 | } |
| 164 | |
| 165 | drm_modeset_acquire_fini(ctx); |
| 166 | kfree(ctx); |
| 167 | return; |
| 168 | } |
Maarten Lankhorst | 5fcdc9d | 2018-02-21 16:23:31 +0100 | [diff] [blame] | 169 | ww_acquire_done(&ctx->ww_ctx); |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 170 | |
| 171 | WARN_ON(config->acquire_ctx); |
| 172 | |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 173 | /* |
| 174 | * We hold the locks now, so it is safe to stash the acquisition |
| 175 | * context for drm_modeset_unlock_all(). |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 176 | */ |
| 177 | config->acquire_ctx = ctx; |
| 178 | |
| 179 | drm_warn_on_modeset_not_all_locked(dev); |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 180 | } |
| 181 | EXPORT_SYMBOL(drm_modeset_lock_all); |
| 182 | |
| 183 | /** |
| 184 | * drm_modeset_unlock_all - drop all modeset locks |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 185 | * @dev: DRM device |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 186 | * |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 187 | * This function drops all modeset locks taken by a previous call to the |
| 188 | * drm_modeset_lock_all() function. |
| 189 | * |
| 190 | * This function is deprecated. It uses the lock acquisition context stored |
Daniel Vetter | d574528 | 2017-01-25 07:26:45 +0100 | [diff] [blame] | 191 | * in &drm_device.mode_config. This facilitates conversion of existing |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 192 | * code because it removes the need to manually deal with the acquisition |
| 193 | * context, but it is also brittle because the context is global and care must |
| 194 | * be taken not to nest calls. New code should pass the acquisition context |
| 195 | * directly to the drm_modeset_drop_locks() function. |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 196 | */ |
| 197 | void drm_modeset_unlock_all(struct drm_device *dev) |
| 198 | { |
| 199 | struct drm_mode_config *config = &dev->mode_config; |
| 200 | struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; |
| 201 | |
| 202 | if (WARN_ON(!ctx)) |
| 203 | return; |
| 204 | |
| 205 | config->acquire_ctx = NULL; |
| 206 | drm_modeset_drop_locks(ctx); |
| 207 | drm_modeset_acquire_fini(ctx); |
| 208 | |
| 209 | kfree(ctx); |
| 210 | |
| 211 | mutex_unlock(&dev->mode_config.mutex); |
| 212 | } |
| 213 | EXPORT_SYMBOL(drm_modeset_unlock_all); |
| 214 | |
Daniel Vetter | d059f65 | 2014-07-25 18:07:40 +0200 | [diff] [blame] | 215 | /** |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 216 | * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked |
| 217 | * @dev: device |
| 218 | * |
| 219 | * Useful as a debug assert. |
| 220 | */ |
| 221 | void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) |
| 222 | { |
| 223 | struct drm_crtc *crtc; |
| 224 | |
| 225 | /* Locking is currently fubar in the panic handler. */ |
| 226 | if (oops_in_progress) |
| 227 | return; |
| 228 | |
Daniel Vetter | e4f6254 | 2015-07-09 23:44:35 +0200 | [diff] [blame] | 229 | drm_for_each_crtc(crtc, dev) |
Daniel Vetter | a6a8bb8 | 2014-07-25 17:47:18 +0200 | [diff] [blame] | 230 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); |
| 231 | |
| 232 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); |
| 233 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
| 234 | } |
| 235 | EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); |
| 236 | |
| 237 | /** |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 238 | * drm_modeset_acquire_init - initialize acquire context |
| 239 | * @ctx: the acquire context |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 240 | * @flags: 0 or %DRM_MODESET_ACQUIRE_INTERRUPTIBLE |
| 241 | * |
| 242 | * When passing %DRM_MODESET_ACQUIRE_INTERRUPTIBLE to @flags, |
| 243 | * all calls to drm_modeset_lock() will perform an interruptible |
| 244 | * wait. |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 245 | */ |
| 246 | void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, |
| 247 | uint32_t flags) |
| 248 | { |
Rob Clark | fb54918 | 2014-06-07 10:55:39 -0400 | [diff] [blame] | 249 | memset(ctx, 0, sizeof(*ctx)); |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 250 | ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class); |
| 251 | INIT_LIST_HEAD(&ctx->locked); |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 252 | |
| 253 | if (flags & DRM_MODESET_ACQUIRE_INTERRUPTIBLE) |
| 254 | ctx->interruptible = true; |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 255 | } |
| 256 | EXPORT_SYMBOL(drm_modeset_acquire_init); |
| 257 | |
| 258 | /** |
| 259 | * drm_modeset_acquire_fini - cleanup acquire context |
| 260 | * @ctx: the acquire context |
| 261 | */ |
| 262 | void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx) |
| 263 | { |
| 264 | ww_acquire_fini(&ctx->ww_ctx); |
| 265 | } |
| 266 | EXPORT_SYMBOL(drm_modeset_acquire_fini); |
| 267 | |
| 268 | /** |
| 269 | * drm_modeset_drop_locks - drop all locks |
| 270 | * @ctx: the acquire context |
| 271 | * |
| 272 | * Drop all locks currently held against this acquire context. |
| 273 | */ |
| 274 | void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx) |
| 275 | { |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 276 | if (WARN_ON(ctx->contended)) |
Stephen Rothwell | bcae3af | 2021-10-18 11:51:13 +0300 | [diff] [blame] | 277 | __drm_stack_depot_print(ctx->stack_depot); |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 278 | |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 279 | while (!list_empty(&ctx->locked)) { |
| 280 | struct drm_modeset_lock *lock; |
| 281 | |
| 282 | lock = list_first_entry(&ctx->locked, |
| 283 | struct drm_modeset_lock, head); |
| 284 | |
| 285 | drm_modeset_unlock(lock); |
| 286 | } |
| 287 | } |
| 288 | EXPORT_SYMBOL(drm_modeset_drop_locks); |
| 289 | |
| 290 | static inline int modeset_lock(struct drm_modeset_lock *lock, |
| 291 | struct drm_modeset_acquire_ctx *ctx, |
| 292 | bool interruptible, bool slow) |
| 293 | { |
| 294 | int ret; |
| 295 | |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 296 | if (WARN_ON(ctx->contended)) |
Stephen Rothwell | bcae3af | 2021-10-18 11:51:13 +0300 | [diff] [blame] | 297 | __drm_stack_depot_print(ctx->stack_depot); |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 298 | |
Daniel Vetter | cb597bb | 2014-07-27 19:09:33 +0200 | [diff] [blame] | 299 | if (ctx->trylock_only) { |
Maarten Lankhorst | 825926d | 2015-08-27 13:58:09 +0200 | [diff] [blame] | 300 | lockdep_assert_held(&ctx->ww_ctx); |
| 301 | |
Maarten Lankhorst | 12235da | 2021-09-09 11:32:18 +0200 | [diff] [blame] | 302 | if (!ww_mutex_trylock(&lock->mutex, NULL)) |
Daniel Vetter | cb597bb | 2014-07-27 19:09:33 +0200 | [diff] [blame] | 303 | return -EBUSY; |
| 304 | else |
| 305 | return 0; |
| 306 | } else if (interruptible && slow) { |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 307 | ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx); |
| 308 | } else if (interruptible) { |
| 309 | ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx); |
| 310 | } else if (slow) { |
| 311 | ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx); |
| 312 | ret = 0; |
| 313 | } else { |
| 314 | ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx); |
| 315 | } |
| 316 | if (!ret) { |
| 317 | WARN_ON(!list_empty(&lock->head)); |
| 318 | list_add(&lock->head, &ctx->locked); |
| 319 | } else if (ret == -EALREADY) { |
| 320 | /* we already hold the lock.. this is fine. For atomic |
| 321 | * we will need to be able to drm_modeset_lock() things |
| 322 | * without having to keep track of what is already locked |
| 323 | * or not. |
| 324 | */ |
| 325 | ret = 0; |
| 326 | } else if (ret == -EDEADLK) { |
| 327 | ctx->contended = lock; |
Stephen Rothwell | bcae3af | 2021-10-18 11:51:13 +0300 | [diff] [blame] | 328 | ctx->stack_depot = __drm_stack_depot_save(); |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 329 | } |
| 330 | |
| 331 | return ret; |
| 332 | } |
| 333 | |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 334 | /** |
| 335 | * drm_modeset_backoff - deadlock avoidance backoff |
| 336 | * @ctx: the acquire context |
| 337 | * |
| 338 | * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK), |
| 339 | * you must call this function to drop all currently held locks and |
| 340 | * block until the contended lock becomes available. |
| 341 | * |
| 342 | * This function returns 0 on success, or -ERESTARTSYS if this context |
| 343 | * is initialized with %DRM_MODESET_ACQUIRE_INTERRUPTIBLE and the |
| 344 | * wait has been interrupted. |
| 345 | */ |
| 346 | int drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx) |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 347 | { |
| 348 | struct drm_modeset_lock *contended = ctx->contended; |
| 349 | |
| 350 | ctx->contended = NULL; |
Jani Nikula | cd06ab2 | 2021-10-01 12:14:44 +0300 | [diff] [blame] | 351 | ctx->stack_depot = 0; |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 352 | |
| 353 | if (WARN_ON(!contended)) |
| 354 | return 0; |
| 355 | |
| 356 | drm_modeset_drop_locks(ctx); |
| 357 | |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 358 | return modeset_lock(contended, ctx, ctx->interruptible, true); |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 359 | } |
| 360 | EXPORT_SYMBOL(drm_modeset_backoff); |
| 361 | |
| 362 | /** |
Rob Clark | 35cf035 | 2016-11-14 17:40:57 -0500 | [diff] [blame] | 363 | * drm_modeset_lock_init - initialize lock |
| 364 | * @lock: lock to init |
| 365 | */ |
| 366 | void drm_modeset_lock_init(struct drm_modeset_lock *lock) |
| 367 | { |
| 368 | ww_mutex_init(&lock->mutex, &crtc_ww_class); |
| 369 | INIT_LIST_HEAD(&lock->head); |
Vlastimil Babka | 2dba5eb | 2022-01-21 22:14:27 -0800 | [diff] [blame] | 370 | __drm_stack_depot_init(); |
Rob Clark | 35cf035 | 2016-11-14 17:40:57 -0500 | [diff] [blame] | 371 | } |
| 372 | EXPORT_SYMBOL(drm_modeset_lock_init); |
| 373 | |
| 374 | /** |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 375 | * drm_modeset_lock - take modeset lock |
| 376 | * @lock: lock to take |
| 377 | * @ctx: acquire ctx |
| 378 | * |
Liviu Dudau | fc2157b | 2017-07-20 17:07:48 +0100 | [diff] [blame] | 379 | * If @ctx is not NULL, then its ww acquire context is used and the |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 380 | * lock will be tracked by the context and can be released by calling |
| 381 | * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a |
| 382 | * deadlock scenario has been detected and it is an error to attempt |
| 383 | * to take any more locks without first calling drm_modeset_backoff(). |
Liviu Dudau | fc2157b | 2017-07-20 17:07:48 +0100 | [diff] [blame] | 384 | * |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 385 | * If the @ctx is not NULL and initialized with |
| 386 | * %DRM_MODESET_ACQUIRE_INTERRUPTIBLE, this function will fail with |
| 387 | * -ERESTARTSYS when interrupted. |
| 388 | * |
Liviu Dudau | fc2157b | 2017-07-20 17:07:48 +0100 | [diff] [blame] | 389 | * If @ctx is NULL then the function call behaves like a normal, |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 390 | * uninterruptible non-nesting mutex_lock() call. |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 391 | */ |
| 392 | int drm_modeset_lock(struct drm_modeset_lock *lock, |
| 393 | struct drm_modeset_acquire_ctx *ctx) |
| 394 | { |
| 395 | if (ctx) |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 396 | return modeset_lock(lock, ctx, ctx->interruptible, false); |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 397 | |
| 398 | ww_mutex_lock(&lock->mutex, NULL); |
| 399 | return 0; |
| 400 | } |
| 401 | EXPORT_SYMBOL(drm_modeset_lock); |
| 402 | |
| 403 | /** |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 404 | * drm_modeset_lock_single_interruptible - take a single modeset lock |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 405 | * @lock: lock to take |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 406 | * |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 407 | * This function behaves as drm_modeset_lock() with a NULL context, |
| 408 | * but performs interruptible waits. |
| 409 | * |
| 410 | * This function returns 0 on success, or -ERESTARTSYS when interrupted. |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 411 | */ |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 412 | int drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock) |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 413 | { |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 414 | return ww_mutex_lock_interruptible(&lock->mutex, NULL); |
| 415 | } |
Maarten Lankhorst | 6f8bcc7 | 2017-09-12 15:37:44 +0200 | [diff] [blame] | 416 | EXPORT_SYMBOL(drm_modeset_lock_single_interruptible); |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 417 | |
| 418 | /** |
| 419 | * drm_modeset_unlock - drop modeset lock |
| 420 | * @lock: lock to release |
| 421 | */ |
| 422 | void drm_modeset_unlock(struct drm_modeset_lock *lock) |
| 423 | { |
| 424 | list_del_init(&lock->head); |
| 425 | ww_mutex_unlock(&lock->mutex); |
| 426 | } |
| 427 | EXPORT_SYMBOL(drm_modeset_unlock); |
| 428 | |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 429 | /** |
| 430 | * drm_modeset_lock_all_ctx - take all modeset locks |
| 431 | * @dev: DRM device |
| 432 | * @ctx: lock acquisition context |
| 433 | * |
| 434 | * This function takes all modeset locks, suitable where a more fine-grained |
| 435 | * scheme isn't (yet) implemented. |
| 436 | * |
Daniel Vetter | d574528 | 2017-01-25 07:26:45 +0100 | [diff] [blame] | 437 | * Unlike drm_modeset_lock_all(), it doesn't take the &drm_mode_config.mutex |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 438 | * since that lock isn't required for modeset state changes. Callers which |
| 439 | * need to grab that lock too need to do so outside of the acquire context |
| 440 | * @ctx. |
| 441 | * |
| 442 | * Locks acquired with this function should be released by calling the |
| 443 | * drm_modeset_drop_locks() function on @ctx. |
| 444 | * |
Sean Paul | b7ea04d | 2018-11-29 10:04:17 -0500 | [diff] [blame] | 445 | * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() |
| 446 | * |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 447 | * Returns: 0 on success or a negative error-code on failure. |
| 448 | */ |
| 449 | int drm_modeset_lock_all_ctx(struct drm_device *dev, |
| 450 | struct drm_modeset_acquire_ctx *ctx) |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 451 | { |
Rob Clark | b962a12 | 2018-10-22 14:31:22 +0200 | [diff] [blame] | 452 | struct drm_private_obj *privobj; |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 453 | struct drm_crtc *crtc; |
Daniel Vetter | 4d02e2d | 2014-11-11 10:12:00 +0100 | [diff] [blame] | 454 | struct drm_plane *plane; |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 455 | int ret; |
| 456 | |
| 457 | ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); |
| 458 | if (ret) |
| 459 | return ret; |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 460 | |
Daniel Vetter | e4f6254 | 2015-07-09 23:44:35 +0200 | [diff] [blame] | 461 | drm_for_each_crtc(crtc, dev) { |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 462 | ret = drm_modeset_lock(&crtc->mutex, ctx); |
| 463 | if (ret) |
| 464 | return ret; |
| 465 | } |
| 466 | |
Daniel Vetter | e4f6254 | 2015-07-09 23:44:35 +0200 | [diff] [blame] | 467 | drm_for_each_plane(plane, dev) { |
Daniel Vetter | 4d02e2d | 2014-11-11 10:12:00 +0100 | [diff] [blame] | 468 | ret = drm_modeset_lock(&plane->mutex, ctx); |
| 469 | if (ret) |
| 470 | return ret; |
| 471 | } |
| 472 | |
Rob Clark | b962a12 | 2018-10-22 14:31:22 +0200 | [diff] [blame] | 473 | drm_for_each_privobj(privobj, dev) { |
| 474 | ret = drm_modeset_lock(&privobj->lock, ctx); |
| 475 | if (ret) |
| 476 | return ret; |
| 477 | } |
| 478 | |
Rob Clark | 51fd371 | 2013-11-19 12:10:12 -0500 | [diff] [blame] | 479 | return 0; |
| 480 | } |
Thierry Reding | 06eaae4 | 2015-12-02 17:50:03 +0100 | [diff] [blame] | 481 | EXPORT_SYMBOL(drm_modeset_lock_all_ctx); |