Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
| 3 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
| 4 | * Copyright (c) 2009-2010, Code Aurora Forum. |
| 5 | * Copyright 2016 Intel Corp. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the "Software"), |
| 9 | * to deal in the Software without restriction, including without limitation |
| 10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 11 | * and/or sell copies of the Software, and to permit persons to whom the |
| 12 | * Software is furnished to do so, subject to the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the next |
| 15 | * paragraph) shall be included in all copies or substantial portions of the |
| 16 | * Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 21 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 22 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 23 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 24 | * OTHER DEALINGS IN THE SOFTWARE. |
| 25 | */ |
| 26 | |
| 27 | #ifndef _DRM_DRV_H_ |
| 28 | #define _DRM_DRV_H_ |
| 29 | |
| 30 | #include <linux/list.h> |
| 31 | #include <linux/irqreturn.h> |
| 32 | |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 33 | #include <drm/drm_device.h> |
| 34 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 35 | struct drm_file; |
| 36 | struct drm_gem_object; |
| 37 | struct drm_master; |
| 38 | struct drm_minor; |
| 39 | struct dma_buf_attachment; |
| 40 | struct drm_display_mode; |
| 41 | struct drm_mode_create_dumb; |
Noralf Trønnes | 45d58b4 | 2017-11-07 20:13:40 +0100 | [diff] [blame] | 42 | struct drm_printer; |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 43 | |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 44 | /** |
| 45 | * enum drm_driver_feature - feature flags |
| 46 | * |
| 47 | * See &drm_driver.driver_features, drm_device.driver_features and |
| 48 | * drm_core_check_feature(). |
| 49 | */ |
| 50 | enum drm_driver_feature { |
| 51 | /** |
| 52 | * @DRIVER_GEM: |
| 53 | * |
| 54 | * Driver use the GEM memory manager. This should be set for all modern |
| 55 | * drivers. |
| 56 | */ |
| 57 | DRIVER_GEM = BIT(0), |
| 58 | /** |
| 59 | * @DRIVER_MODESET: |
| 60 | * |
| 61 | * Driver supports mode setting interfaces (KMS). |
| 62 | */ |
| 63 | DRIVER_MODESET = BIT(1), |
| 64 | /** |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 65 | * @DRIVER_RENDER: |
| 66 | * |
| 67 | * Driver supports dedicated render nodes. See also the :ref:`section on |
| 68 | * render nodes <drm_render_node>` for details. |
| 69 | */ |
| 70 | DRIVER_RENDER = BIT(3), |
| 71 | /** |
| 72 | * @DRIVER_ATOMIC: |
| 73 | * |
| 74 | * Driver supports the full atomic modesetting userspace API. Drivers |
| 75 | * which only use atomic internally, but do not the support the full |
| 76 | * userspace API (e.g. not all properties converted to atomic, or |
| 77 | * multi-plane updates are not guaranteed to be tear-free) should not |
| 78 | * set this flag. |
| 79 | */ |
| 80 | DRIVER_ATOMIC = BIT(4), |
| 81 | /** |
| 82 | * @DRIVER_SYNCOBJ: |
| 83 | * |
| 84 | * Driver supports &drm_syncobj for explicit synchronization of command |
| 85 | * submission. |
| 86 | */ |
| 87 | DRIVER_SYNCOBJ = BIT(5), |
Lionel Landwerlin | 060cebb | 2019-04-16 13:57:50 +0100 | [diff] [blame] | 88 | /** |
| 89 | * @DRIVER_SYNCOBJ_TIMELINE: |
| 90 | * |
| 91 | * Driver supports the timeline flavor of &drm_syncobj for explicit |
| 92 | * synchronization of command submission. |
| 93 | */ |
| 94 | DRIVER_SYNCOBJ_TIMELINE = BIT(6), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 95 | |
| 96 | /* IMPORTANT: Below are all the legacy flags, add new ones above. */ |
| 97 | |
| 98 | /** |
| 99 | * @DRIVER_USE_AGP: |
| 100 | * |
| 101 | * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage |
| 102 | * AGP resources. New drivers don't need this. |
| 103 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 104 | DRIVER_USE_AGP = BIT(25), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 105 | /** |
| 106 | * @DRIVER_LEGACY: |
| 107 | * |
| 108 | * Denote a legacy driver using shadow attach. Do not use. |
| 109 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 110 | DRIVER_LEGACY = BIT(26), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 111 | /** |
| 112 | * @DRIVER_PCI_DMA: |
| 113 | * |
| 114 | * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace |
| 115 | * will be enabled. Only for legacy drivers. Do not use. |
| 116 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 117 | DRIVER_PCI_DMA = BIT(27), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 118 | /** |
| 119 | * @DRIVER_SG: |
| 120 | * |
| 121 | * Driver can perform scatter/gather DMA, allocation and mapping of |
| 122 | * scatter/gather buffers will be enabled. Only for legacy drivers. Do |
| 123 | * not use. |
| 124 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 125 | DRIVER_SG = BIT(28), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 126 | |
| 127 | /** |
| 128 | * @DRIVER_HAVE_DMA: |
| 129 | * |
| 130 | * Driver supports DMA, the userspace DMA API will be supported. Only |
| 131 | * for legacy drivers. Do not use. |
| 132 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 133 | DRIVER_HAVE_DMA = BIT(29), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 134 | /** |
| 135 | * @DRIVER_HAVE_IRQ: |
| 136 | * |
| 137 | * Legacy irq support. Only for legacy drivers. Do not use. |
| 138 | * |
| 139 | * New drivers can either use the drm_irq_install() and |
| 140 | * drm_irq_uninstall() helper functions, or roll their own irq support |
| 141 | * code by calling request_irq() directly. |
| 142 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 143 | DRIVER_HAVE_IRQ = BIT(30), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 144 | /** |
| 145 | * @DRIVER_KMS_LEGACY_CONTEXT: |
| 146 | * |
| 147 | * Used only by nouveau for backwards compatibility with existing |
| 148 | * userspace. Do not use. |
| 149 | */ |
| 150 | DRIVER_KMS_LEGACY_CONTEXT = BIT(31), |
| 151 | }; |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 152 | |
| 153 | /** |
| 154 | * struct drm_driver - DRM driver structure |
| 155 | * |
Luca Ceresoli | 60e6ecf | 2019-03-13 16:35:37 +0100 | [diff] [blame] | 156 | * This structure represent the common code for a family of cards. There will be |
| 157 | * one &struct drm_device for each card present in this family. It contains lots |
| 158 | * of vfunc entries, and a pile of those probably should be moved to more |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 159 | * appropriate places like &drm_mode_config_funcs or into a new operations |
| 160 | * structure for GEM drivers. |
| 161 | */ |
| 162 | struct drm_driver { |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 163 | /** |
| 164 | * @load: |
| 165 | * |
| 166 | * Backward-compatible driver callback to complete |
| 167 | * initialization steps after the driver is registered. For |
| 168 | * this reason, may suffer from race conditions and its use is |
| 169 | * deprecated for new drivers. It is therefore only supported |
| 170 | * for existing drivers not yet converted to the new scheme. |
| 171 | * See drm_dev_init() and drm_dev_register() for proper and |
| 172 | * race-free way to set up a &struct drm_device. |
| 173 | * |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 174 | * This is deprecated, do not use! |
| 175 | * |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 176 | * Returns: |
| 177 | * |
| 178 | * Zero on success, non-zero value on failure. |
| 179 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 180 | int (*load) (struct drm_device *, unsigned long flags); |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 181 | |
| 182 | /** |
| 183 | * @open: |
| 184 | * |
| 185 | * Driver callback when a new &struct drm_file is opened. Useful for |
| 186 | * setting up driver-private data structures like buffer allocators, |
| 187 | * execution contexts or similar things. Such driver-private resources |
| 188 | * must be released again in @postclose. |
| 189 | * |
| 190 | * Since the display/modeset side of DRM can only be owned by exactly |
| 191 | * one &struct drm_file (see &drm_file.is_master and &drm_device.master) |
| 192 | * there should never be a need to set up any modeset related resources |
| 193 | * in this callback. Doing so would be a driver design bug. |
| 194 | * |
| 195 | * Returns: |
| 196 | * |
| 197 | * 0 on success, a negative error code on failure, which will be |
| 198 | * promoted to userspace as the result of the open() system call. |
| 199 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 200 | int (*open) (struct drm_device *, struct drm_file *); |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 201 | |
| 202 | /** |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 203 | * @postclose: |
| 204 | * |
| 205 | * One of the driver callbacks when a new &struct drm_file is closed. |
| 206 | * Useful for tearing down driver-private data structures allocated in |
| 207 | * @open like buffer allocators, execution contexts or similar things. |
| 208 | * |
| 209 | * Since the display/modeset side of DRM can only be owned by exactly |
| 210 | * one &struct drm_file (see &drm_file.is_master and &drm_device.master) |
| 211 | * there should never be a need to tear down any modeset related |
| 212 | * resources in this callback. Doing so would be a driver design bug. |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 213 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 214 | void (*postclose) (struct drm_device *, struct drm_file *); |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 215 | |
| 216 | /** |
| 217 | * @lastclose: |
| 218 | * |
| 219 | * Called when the last &struct drm_file has been closed and there's |
| 220 | * currently no userspace client for the &struct drm_device. |
| 221 | * |
| 222 | * Modern drivers should only use this to force-restore the fbdev |
| 223 | * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked(). |
| 224 | * Anything else would indicate there's something seriously wrong. |
| 225 | * Modern drivers can also use this to execute delayed power switching |
| 226 | * state changes, e.g. in conjunction with the :ref:`vga_switcheroo` |
| 227 | * infrastructure. |
| 228 | * |
Daniel Vetter | 45c3d21 | 2017-05-08 10:26:33 +0200 | [diff] [blame] | 229 | * This is called after @postclose hook has been called. |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 230 | * |
| 231 | * NOTE: |
| 232 | * |
| 233 | * All legacy drivers use this callback to de-initialize the hardware. |
| 234 | * This is purely because of the shadow-attach model, where the DRM |
| 235 | * kernel driver does not really own the hardware. Instead ownershipe is |
| 236 | * handled with the help of userspace through an inheritedly racy dance |
| 237 | * to set/unset the VT into raw mode. |
| 238 | * |
| 239 | * Legacy drivers initialize the hardware in the @firstopen callback, |
| 240 | * which isn't even called for modern drivers. |
| 241 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 242 | void (*lastclose) (struct drm_device *); |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 243 | |
| 244 | /** |
| 245 | * @unload: |
| 246 | * |
| 247 | * Reverse the effects of the driver load callback. Ideally, |
| 248 | * the clean up performed by the driver should happen in the |
| 249 | * reverse order of the initialization. Similarly to the load |
| 250 | * hook, this handler is deprecated and its usage should be |
| 251 | * dropped in favor of an open-coded teardown function at the |
Aishwarya Pant | 9a96f55 | 2017-09-26 13:58:49 +0530 | [diff] [blame] | 252 | * driver layer. See drm_dev_unregister() and drm_dev_put() |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 253 | * for the proper way to remove a &struct drm_device. |
| 254 | * |
| 255 | * The unload() hook is called right after unregistering |
| 256 | * the device. |
| 257 | * |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 258 | */ |
Gabriel Krisman Bertazi | 11b3c20 | 2017-01-06 15:57:31 -0200 | [diff] [blame] | 259 | void (*unload) (struct drm_device *); |
Chris Wilson | f30c925 | 2017-02-02 09:36:32 +0000 | [diff] [blame] | 260 | |
| 261 | /** |
| 262 | * @release: |
| 263 | * |
| 264 | * Optional callback for destroying device data after the final |
| 265 | * reference is released, i.e. the device is being destroyed. Drivers |
| 266 | * using this callback are responsible for calling drm_dev_fini() |
| 267 | * to finalize the device and then freeing the struct themselves. |
| 268 | */ |
| 269 | void (*release) (struct drm_device *); |
| 270 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 271 | /** |
Daniel Vetter | 16584b2 | 2017-05-31 11:22:53 +0200 | [diff] [blame] | 272 | * @irq_handler: |
| 273 | * |
| 274 | * Interrupt handler called when using drm_irq_install(). Not used by |
| 275 | * drivers which implement their own interrupt handling. |
| 276 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 277 | irqreturn_t(*irq_handler) (int irq, void *arg); |
Daniel Vetter | 16584b2 | 2017-05-31 11:22:53 +0200 | [diff] [blame] | 278 | |
| 279 | /** |
| 280 | * @irq_preinstall: |
| 281 | * |
| 282 | * Optional callback used by drm_irq_install() which is called before |
| 283 | * the interrupt handler is registered. This should be used to clear out |
| 284 | * any pending interrupts (from e.g. firmware based drives) and reset |
| 285 | * the interrupt handling registers. |
| 286 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 287 | void (*irq_preinstall) (struct drm_device *dev); |
Daniel Vetter | 16584b2 | 2017-05-31 11:22:53 +0200 | [diff] [blame] | 288 | |
| 289 | /** |
| 290 | * @irq_postinstall: |
| 291 | * |
| 292 | * Optional callback used by drm_irq_install() which is called after |
| 293 | * the interrupt handler is registered. This should be used to enable |
| 294 | * interrupt generation in the hardware. |
| 295 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 296 | int (*irq_postinstall) (struct drm_device *dev); |
Daniel Vetter | 16584b2 | 2017-05-31 11:22:53 +0200 | [diff] [blame] | 297 | |
| 298 | /** |
| 299 | * @irq_uninstall: |
| 300 | * |
| 301 | * Optional callback used by drm_irq_uninstall() which is called before |
| 302 | * the interrupt handler is unregistered. This should be used to disable |
| 303 | * interrupt generation in the hardware. |
| 304 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 305 | void (*irq_uninstall) (struct drm_device *dev); |
| 306 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 307 | /** |
Daniel Vetter | 6c4789e | 2016-11-14 12:58:20 +0100 | [diff] [blame] | 308 | * @master_set: |
| 309 | * |
| 310 | * Called whenever the minor master is set. Only used by vmwgfx. |
| 311 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 312 | int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, |
| 313 | bool from_open); |
Daniel Vetter | 6c4789e | 2016-11-14 12:58:20 +0100 | [diff] [blame] | 314 | /** |
| 315 | * @master_drop: |
| 316 | * |
| 317 | * Called whenever the minor master is dropped. Only used by vmwgfx. |
| 318 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 319 | void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); |
| 320 | |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 321 | /** |
| 322 | * @debugfs_init: |
| 323 | * |
| 324 | * Allows drivers to create driver-specific debugfs files. |
| 325 | */ |
Wambui Karuga | 7ce84471 | 2020-03-10 16:31:21 +0300 | [diff] [blame^] | 326 | void (*debugfs_init)(struct drm_minor *minor); |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 327 | |
| 328 | /** |
| 329 | * @gem_free_object: deconstructor for drm_gem_objects |
| 330 | * |
| 331 | * This is deprecated and should not be used by new drivers. Use |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 332 | * &drm_gem_object_funcs.free instead. |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 333 | */ |
| 334 | void (*gem_free_object) (struct drm_gem_object *obj); |
| 335 | |
| 336 | /** |
| 337 | * @gem_free_object_unlocked: deconstructor for drm_gem_objects |
| 338 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 339 | * This is deprecated and should not be used by new drivers. Use |
| 340 | * &drm_gem_object_funcs.free instead. |
| 341 | * Compared to @gem_free_object this is not encumbered with |
| 342 | * &drm_device.struct_mutex legacy locking schemes. |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 343 | */ |
| 344 | void (*gem_free_object_unlocked) (struct drm_gem_object *obj); |
| 345 | |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 346 | /** |
| 347 | * @gem_open_object: |
| 348 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 349 | * This callback is deprecated in favour of &drm_gem_object_funcs.open. |
| 350 | * |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 351 | * Driver hook called upon gem handle creation |
| 352 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 353 | int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 354 | |
| 355 | /** |
| 356 | * @gem_close_object: |
| 357 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 358 | * This callback is deprecated in favour of &drm_gem_object_funcs.close. |
| 359 | * |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 360 | * Driver hook called upon gem handle release |
| 361 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 362 | void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); |
| 363 | |
| 364 | /** |
Noralf Trønnes | 45d58b4 | 2017-11-07 20:13:40 +0100 | [diff] [blame] | 365 | * @gem_print_info: |
| 366 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 367 | * This callback is deprecated in favour of |
| 368 | * &drm_gem_object_funcs.print_info. |
| 369 | * |
Noralf Trønnes | 45d58b4 | 2017-11-07 20:13:40 +0100 | [diff] [blame] | 370 | * If driver subclasses struct &drm_gem_object, it can implement this |
| 371 | * optional hook for printing additional driver specific info. |
| 372 | * |
| 373 | * drm_printf_indent() should be used in the callback passing it the |
| 374 | * indent argument. |
| 375 | * |
| 376 | * This callback is called from drm_gem_print_info(). |
| 377 | */ |
| 378 | void (*gem_print_info)(struct drm_printer *p, unsigned int indent, |
| 379 | const struct drm_gem_object *obj); |
| 380 | |
| 381 | /** |
Chris Wilson | 218adc1 | 2016-11-25 12:34:27 +0000 | [diff] [blame] | 382 | * @gem_create_object: constructor for gem objects |
| 383 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 384 | * Hook for allocating the GEM object struct, for use by the CMA and |
| 385 | * SHMEM GEM helpers. |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 386 | */ |
| 387 | struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, |
| 388 | size_t size); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 389 | /** |
| 390 | * @prime_handle_to_fd: |
| 391 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 392 | * Main PRIME export function. Should be implemented with |
| 393 | * drm_gem_prime_handle_to_fd() for GEM based drivers. |
| 394 | * |
| 395 | * For an in-depth discussion see :ref:`PRIME buffer sharing |
| 396 | * documentation <prime_buffer_sharing>`. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 397 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 398 | int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, |
| 399 | uint32_t handle, uint32_t flags, int *prime_fd); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 400 | /** |
| 401 | * @prime_fd_to_handle: |
| 402 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 403 | * Main PRIME import function. Should be implemented with |
| 404 | * drm_gem_prime_fd_to_handle() for GEM based drivers. |
| 405 | * |
| 406 | * For an in-depth discussion see :ref:`PRIME buffer sharing |
| 407 | * documentation <prime_buffer_sharing>`. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 408 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 409 | int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, |
| 410 | int prime_fd, uint32_t *handle); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 411 | /** |
| 412 | * @gem_prime_export: |
| 413 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 414 | * Export hook for GEM drivers. Deprecated in favour of |
| 415 | * &drm_gem_object_funcs.export. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 416 | */ |
Daniel Vetter | e4fa845 | 2019-06-14 22:35:25 +0200 | [diff] [blame] | 417 | struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj, |
| 418 | int flags); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 419 | /** |
| 420 | * @gem_prime_import: |
| 421 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 422 | * Import hook for GEM drivers. |
Noralf Trønnes | f001488 | 2018-11-10 15:56:43 +0100 | [diff] [blame] | 423 | * |
| 424 | * This defaults to drm_gem_prime_import() if not set. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 425 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 426 | struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, |
| 427 | struct dma_buf *dma_buf); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 428 | |
| 429 | /** |
| 430 | * @gem_prime_pin: |
| 431 | * |
| 432 | * Deprecated hook in favour of &drm_gem_object_funcs.pin. |
| 433 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 434 | int (*gem_prime_pin)(struct drm_gem_object *obj); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 435 | |
| 436 | /** |
| 437 | * @gem_prime_unpin: |
| 438 | * |
| 439 | * Deprecated hook in favour of &drm_gem_object_funcs.unpin. |
| 440 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 441 | void (*gem_prime_unpin)(struct drm_gem_object *obj); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 442 | |
| 443 | |
| 444 | /** |
| 445 | * @gem_prime_get_sg_table: |
| 446 | * |
| 447 | * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table. |
| 448 | */ |
| 449 | struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); |
| 450 | |
| 451 | /** |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 452 | * @gem_prime_import_sg_table: |
| 453 | * |
| 454 | * Optional hook used by the PRIME helper functions |
| 455 | * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). |
| 456 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 457 | struct drm_gem_object *(*gem_prime_import_sg_table)( |
| 458 | struct drm_device *dev, |
| 459 | struct dma_buf_attachment *attach, |
| 460 | struct sg_table *sgt); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 461 | /** |
| 462 | * @gem_prime_vmap: |
| 463 | * |
| 464 | * Deprecated vmap hook for GEM drivers. Please use |
| 465 | * &drm_gem_object_funcs.vmap instead. |
| 466 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 467 | void *(*gem_prime_vmap)(struct drm_gem_object *obj); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 468 | |
| 469 | /** |
| 470 | * @gem_prime_vunmap: |
| 471 | * |
| 472 | * Deprecated vunmap hook for GEM drivers. Please use |
| 473 | * &drm_gem_object_funcs.vunmap instead. |
| 474 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 475 | void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 476 | |
| 477 | /** |
| 478 | * @gem_prime_mmap: |
| 479 | * |
| 480 | * mmap hook for GEM drivers, used to implement dma-buf mmap in the |
| 481 | * PRIME helpers. |
| 482 | * |
| 483 | * FIXME: There's way too much duplication going on here, and also moved |
| 484 | * to &drm_gem_object_funcs. |
| 485 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 486 | int (*gem_prime_mmap)(struct drm_gem_object *obj, |
| 487 | struct vm_area_struct *vma); |
| 488 | |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 489 | /** |
| 490 | * @dumb_create: |
| 491 | * |
| 492 | * This creates a new dumb buffer in the driver's backing storage manager (GEM, |
| 493 | * TTM or something else entirely) and returns the resulting buffer handle. This |
| 494 | * handle can then be wrapped up into a framebuffer modeset object. |
| 495 | * |
| 496 | * Note that userspace is not allowed to use such objects for render |
| 497 | * acceleration - drivers must create their own private ioctls for such a use |
| 498 | * case. |
| 499 | * |
| 500 | * Width, height and depth are specified in the &drm_mode_create_dumb |
| 501 | * argument. The callback needs to fill the handle, pitch and size for |
| 502 | * the created buffer. |
| 503 | * |
| 504 | * Called by the user via ioctl. |
| 505 | * |
| 506 | * Returns: |
| 507 | * |
| 508 | * Zero on success, negative errno on failure. |
| 509 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 510 | int (*dumb_create)(struct drm_file *file_priv, |
| 511 | struct drm_device *dev, |
| 512 | struct drm_mode_create_dumb *args); |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 513 | /** |
| 514 | * @dumb_map_offset: |
| 515 | * |
| 516 | * Allocate an offset in the drm device node's address space to be able to |
Daniel Vetter | 39dea70 | 2018-11-27 10:19:21 +0100 | [diff] [blame] | 517 | * memory map a dumb buffer. |
| 518 | * |
| 519 | * The default implementation is drm_gem_create_mmap_offset(). GEM based |
| 520 | * drivers must not overwrite this. |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 521 | * |
| 522 | * Called by the user via ioctl. |
| 523 | * |
| 524 | * Returns: |
| 525 | * |
| 526 | * Zero on success, negative errno on failure. |
| 527 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 528 | int (*dumb_map_offset)(struct drm_file *file_priv, |
| 529 | struct drm_device *dev, uint32_t handle, |
| 530 | uint64_t *offset); |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 531 | /** |
| 532 | * @dumb_destroy: |
| 533 | * |
| 534 | * This destroys the userspace handle for the given dumb backing storage buffer. |
| 535 | * Since buffer objects must be reference counted in the kernel a buffer object |
| 536 | * won't be immediately freed if a framebuffer modeset object still uses it. |
| 537 | * |
| 538 | * Called by the user via ioctl. |
| 539 | * |
Daniel Vetter | 39dea70 | 2018-11-27 10:19:21 +0100 | [diff] [blame] | 540 | * The default implementation is drm_gem_dumb_destroy(). GEM based drivers |
| 541 | * must not overwrite this. |
| 542 | * |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 543 | * Returns: |
| 544 | * |
| 545 | * Zero on success, negative errno on failure. |
| 546 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 547 | int (*dumb_destroy)(struct drm_file *file_priv, |
| 548 | struct drm_device *dev, |
| 549 | uint32_t handle); |
| 550 | |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 551 | /** |
| 552 | * @gem_vm_ops: Driver private ops for this object |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 553 | * |
| 554 | * For GEM drivers this is deprecated in favour of |
| 555 | * &drm_gem_object_funcs.vm_ops. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 556 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 557 | const struct vm_operations_struct *gem_vm_ops; |
| 558 | |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 559 | /** @major: driver major number */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 560 | int major; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 561 | /** @minor: driver minor number */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 562 | int minor; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 563 | /** @patchlevel: driver patch level */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 564 | int patchlevel; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 565 | /** @name: driver name */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 566 | char *name; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 567 | /** @desc: driver description */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 568 | char *desc; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 569 | /** @date: driver date */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 570 | char *date; |
| 571 | |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 572 | /** |
| 573 | * @driver_features: |
| 574 | * Driver features, see &enum drm_driver_feature. Drivers can disable |
| 575 | * some features on a per-instance basis using |
| 576 | * &drm_device.driver_features. |
| 577 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 578 | u32 driver_features; |
Daniel Vetter | bb2eaba | 2017-05-31 11:20:45 +0200 | [diff] [blame] | 579 | |
| 580 | /** |
| 581 | * @ioctls: |
| 582 | * |
| 583 | * Array of driver-private IOCTL description entries. See the chapter on |
| 584 | * :ref:`IOCTL support in the userland interfaces |
| 585 | * chapter<drm_driver_ioctl>` for the full details. |
| 586 | */ |
| 587 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 588 | const struct drm_ioctl_desc *ioctls; |
Daniel Vetter | bb2eaba | 2017-05-31 11:20:45 +0200 | [diff] [blame] | 589 | /** @num_ioctls: Number of entries in @ioctls. */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 590 | int num_ioctls; |
Daniel Vetter | bb2eaba | 2017-05-31 11:20:45 +0200 | [diff] [blame] | 591 | |
| 592 | /** |
| 593 | * @fops: |
| 594 | * |
| 595 | * File operations for the DRM device node. See the discussion in |
| 596 | * :ref:`file operations<drm_driver_fops>` for in-depth coverage and |
| 597 | * some examples. |
| 598 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 599 | const struct file_operations *fops; |
| 600 | |
Daniel Vetter | 0683c0a | 2017-01-25 07:26:54 +0100 | [diff] [blame] | 601 | /* Everything below here is for legacy driver, never use! */ |
| 602 | /* private: */ |
| 603 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 604 | /* List of devices hanging off this driver with stealth attach. */ |
| 605 | struct list_head legacy_dev_list; |
Daniel Vetter | 0683c0a | 2017-01-25 07:26:54 +0100 | [diff] [blame] | 606 | int (*firstopen) (struct drm_device *); |
Daniel Vetter | 45c3d21 | 2017-05-08 10:26:33 +0200 | [diff] [blame] | 607 | void (*preclose) (struct drm_device *, struct drm_file *file_priv); |
Daniel Vetter | 0683c0a | 2017-01-25 07:26:54 +0100 | [diff] [blame] | 608 | int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 609 | int (*dma_quiescent) (struct drm_device *); |
| 610 | int (*context_dtor) (struct drm_device *dev, int context); |
Thomas Zimmermann | f397d66 | 2020-01-23 14:59:42 +0100 | [diff] [blame] | 611 | u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); |
| 612 | int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); |
| 613 | void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); |
Daniel Vetter | 0683c0a | 2017-01-25 07:26:54 +0100 | [diff] [blame] | 614 | int dev_priv_size; |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 615 | }; |
| 616 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 617 | int drm_dev_init(struct drm_device *dev, |
| 618 | struct drm_driver *driver, |
| 619 | struct device *parent); |
Noralf Trønnes | 9b1f1b6 | 2019-02-25 15:42:27 +0100 | [diff] [blame] | 620 | int devm_drm_dev_init(struct device *parent, |
| 621 | struct drm_device *dev, |
| 622 | struct drm_driver *driver); |
Chris Wilson | f30c925 | 2017-02-02 09:36:32 +0000 | [diff] [blame] | 623 | void drm_dev_fini(struct drm_device *dev); |
| 624 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 625 | struct drm_device *drm_dev_alloc(struct drm_driver *driver, |
| 626 | struct device *parent); |
| 627 | int drm_dev_register(struct drm_device *dev, unsigned long flags); |
| 628 | void drm_dev_unregister(struct drm_device *dev); |
| 629 | |
Aishwarya Pant | 9a96f55 | 2017-09-26 13:58:49 +0530 | [diff] [blame] | 630 | void drm_dev_get(struct drm_device *dev); |
| 631 | void drm_dev_put(struct drm_device *dev); |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 632 | void drm_put_dev(struct drm_device *dev); |
Noralf Trønnes | bee330f | 2018-03-28 10:38:35 +0300 | [diff] [blame] | 633 | bool drm_dev_enter(struct drm_device *dev, int *idx); |
| 634 | void drm_dev_exit(int idx); |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 635 | void drm_dev_unplug(struct drm_device *dev); |
| 636 | |
| 637 | /** |
| 638 | * drm_dev_is_unplugged - is a DRM device unplugged |
| 639 | * @dev: DRM device |
| 640 | * |
| 641 | * This function can be called to check whether a hotpluggable is unplugged. |
| 642 | * Unplugging itself is singalled through drm_dev_unplug(). If a device is |
| 643 | * unplugged, these two functions guarantee that any store before calling |
| 644 | * drm_dev_unplug() is visible to callers of this function after it completes |
Daniel Vetter | 168982d | 2019-01-29 09:56:43 +0100 | [diff] [blame] | 645 | * |
| 646 | * WARNING: This function fundamentally races against drm_dev_unplug(). It is |
| 647 | * recommended that drivers instead use the underlying drm_dev_enter() and |
| 648 | * drm_dev_exit() function pairs. |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 649 | */ |
Noralf Trønnes | bee330f | 2018-03-28 10:38:35 +0300 | [diff] [blame] | 650 | static inline bool drm_dev_is_unplugged(struct drm_device *dev) |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 651 | { |
Noralf Trønnes | bee330f | 2018-03-28 10:38:35 +0300 | [diff] [blame] | 652 | int idx; |
| 653 | |
| 654 | if (drm_dev_enter(dev, &idx)) { |
| 655 | drm_dev_exit(idx); |
| 656 | return false; |
| 657 | } |
| 658 | |
| 659 | return true; |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 660 | } |
| 661 | |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 662 | /** |
Jani Nikula | 12a1d4e | 2020-01-23 14:48:00 +0200 | [diff] [blame] | 663 | * drm_core_check_all_features - check driver feature flags mask |
| 664 | * @dev: DRM device to check |
| 665 | * @features: feature flag(s) mask |
| 666 | * |
| 667 | * This checks @dev for driver features, see &drm_driver.driver_features, |
| 668 | * &drm_device.driver_features, and the various &enum drm_driver_feature flags. |
| 669 | * |
| 670 | * Returns true if all features in the @features mask are supported, false |
| 671 | * otherwise. |
| 672 | */ |
| 673 | static inline bool drm_core_check_all_features(const struct drm_device *dev, |
| 674 | u32 features) |
| 675 | { |
| 676 | u32 supported = dev->driver->driver_features & dev->driver_features; |
| 677 | |
| 678 | return features && (supported & features) == features; |
| 679 | } |
| 680 | |
| 681 | /** |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 682 | * drm_core_check_feature - check driver feature flags |
| 683 | * @dev: DRM device to check |
| 684 | * @feature: feature flag |
| 685 | * |
Ville Syrjälä | 18ace11 | 2018-09-13 16:16:21 +0300 | [diff] [blame] | 686 | * This checks @dev for driver features, see &drm_driver.driver_features, |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 687 | * &drm_device.driver_features, and the various &enum drm_driver_feature flags. |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 688 | * |
| 689 | * Returns true if the @feature is supported, false otherwise. |
| 690 | */ |
Jani Nikula | 12a1d4e | 2020-01-23 14:48:00 +0200 | [diff] [blame] | 691 | static inline bool drm_core_check_feature(const struct drm_device *dev, |
| 692 | enum drm_driver_feature feature) |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 693 | { |
Jani Nikula | 12a1d4e | 2020-01-23 14:48:00 +0200 | [diff] [blame] | 694 | return drm_core_check_all_features(dev, feature); |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 695 | } |
| 696 | |
| 697 | /** |
| 698 | * drm_drv_uses_atomic_modeset - check if the driver implements |
| 699 | * atomic_commit() |
| 700 | * @dev: DRM device |
| 701 | * |
| 702 | * This check is useful if drivers do not have DRIVER_ATOMIC set but |
| 703 | * have atomic modesetting internally implemented. |
| 704 | */ |
| 705 | static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) |
| 706 | { |
| 707 | return drm_core_check_feature(dev, DRIVER_ATOMIC) || |
Dave Airlie | 5707833 | 2018-09-18 16:20:18 +1000 | [diff] [blame] | 708 | (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 709 | } |
| 710 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 711 | |
Dave Airlie | 6320745 | 2016-11-30 14:18:51 +1000 | [diff] [blame] | 712 | int drm_dev_set_unique(struct drm_device *dev, const char *name); |
| 713 | |
| 714 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 715 | #endif |