Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
| 3 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
| 4 | * Copyright (c) 2009-2010, Code Aurora Forum. |
| 5 | * Copyright 2016 Intel Corp. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the "Software"), |
| 9 | * to deal in the Software without restriction, including without limitation |
| 10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 11 | * and/or sell copies of the Software, and to permit persons to whom the |
| 12 | * Software is furnished to do so, subject to the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice (including the next |
| 15 | * paragraph) shall be included in all copies or substantial portions of the |
| 16 | * Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 21 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 22 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 23 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 24 | * OTHER DEALINGS IN THE SOFTWARE. |
| 25 | */ |
| 26 | |
| 27 | #ifndef _DRM_DRV_H_ |
| 28 | #define _DRM_DRV_H_ |
| 29 | |
| 30 | #include <linux/list.h> |
| 31 | #include <linux/irqreturn.h> |
| 32 | |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 33 | #include <drm/drm_device.h> |
| 34 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 35 | struct drm_file; |
| 36 | struct drm_gem_object; |
| 37 | struct drm_master; |
| 38 | struct drm_minor; |
| 39 | struct dma_buf_attachment; |
| 40 | struct drm_display_mode; |
| 41 | struct drm_mode_create_dumb; |
Noralf Trønnes | 45d58b4 | 2017-11-07 20:13:40 +0100 | [diff] [blame] | 42 | struct drm_printer; |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 43 | |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 44 | /** |
| 45 | * enum drm_driver_feature - feature flags |
| 46 | * |
| 47 | * See &drm_driver.driver_features, drm_device.driver_features and |
| 48 | * drm_core_check_feature(). |
| 49 | */ |
| 50 | enum drm_driver_feature { |
| 51 | /** |
| 52 | * @DRIVER_GEM: |
| 53 | * |
| 54 | * Driver use the GEM memory manager. This should be set for all modern |
| 55 | * drivers. |
| 56 | */ |
| 57 | DRIVER_GEM = BIT(0), |
| 58 | /** |
| 59 | * @DRIVER_MODESET: |
| 60 | * |
| 61 | * Driver supports mode setting interfaces (KMS). |
| 62 | */ |
| 63 | DRIVER_MODESET = BIT(1), |
| 64 | /** |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 65 | * @DRIVER_RENDER: |
| 66 | * |
| 67 | * Driver supports dedicated render nodes. See also the :ref:`section on |
| 68 | * render nodes <drm_render_node>` for details. |
| 69 | */ |
| 70 | DRIVER_RENDER = BIT(3), |
| 71 | /** |
| 72 | * @DRIVER_ATOMIC: |
| 73 | * |
| 74 | * Driver supports the full atomic modesetting userspace API. Drivers |
| 75 | * which only use atomic internally, but do not the support the full |
| 76 | * userspace API (e.g. not all properties converted to atomic, or |
| 77 | * multi-plane updates are not guaranteed to be tear-free) should not |
| 78 | * set this flag. |
| 79 | */ |
| 80 | DRIVER_ATOMIC = BIT(4), |
| 81 | /** |
| 82 | * @DRIVER_SYNCOBJ: |
| 83 | * |
| 84 | * Driver supports &drm_syncobj for explicit synchronization of command |
| 85 | * submission. |
| 86 | */ |
| 87 | DRIVER_SYNCOBJ = BIT(5), |
Lionel Landwerlin | 060cebb | 2019-04-16 13:57:50 +0100 | [diff] [blame] | 88 | /** |
| 89 | * @DRIVER_SYNCOBJ_TIMELINE: |
| 90 | * |
| 91 | * Driver supports the timeline flavor of &drm_syncobj for explicit |
| 92 | * synchronization of command submission. |
| 93 | */ |
| 94 | DRIVER_SYNCOBJ_TIMELINE = BIT(6), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 95 | |
| 96 | /* IMPORTANT: Below are all the legacy flags, add new ones above. */ |
| 97 | |
| 98 | /** |
| 99 | * @DRIVER_USE_AGP: |
| 100 | * |
| 101 | * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage |
| 102 | * AGP resources. New drivers don't need this. |
| 103 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 104 | DRIVER_USE_AGP = BIT(25), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 105 | /** |
| 106 | * @DRIVER_LEGACY: |
| 107 | * |
| 108 | * Denote a legacy driver using shadow attach. Do not use. |
| 109 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 110 | DRIVER_LEGACY = BIT(26), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 111 | /** |
| 112 | * @DRIVER_PCI_DMA: |
| 113 | * |
| 114 | * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace |
| 115 | * will be enabled. Only for legacy drivers. Do not use. |
| 116 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 117 | DRIVER_PCI_DMA = BIT(27), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 118 | /** |
| 119 | * @DRIVER_SG: |
| 120 | * |
| 121 | * Driver can perform scatter/gather DMA, allocation and mapping of |
| 122 | * scatter/gather buffers will be enabled. Only for legacy drivers. Do |
| 123 | * not use. |
| 124 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 125 | DRIVER_SG = BIT(28), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 126 | |
| 127 | /** |
| 128 | * @DRIVER_HAVE_DMA: |
| 129 | * |
| 130 | * Driver supports DMA, the userspace DMA API will be supported. Only |
| 131 | * for legacy drivers. Do not use. |
| 132 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 133 | DRIVER_HAVE_DMA = BIT(29), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 134 | /** |
| 135 | * @DRIVER_HAVE_IRQ: |
| 136 | * |
| 137 | * Legacy irq support. Only for legacy drivers. Do not use. |
| 138 | * |
| 139 | * New drivers can either use the drm_irq_install() and |
| 140 | * drm_irq_uninstall() helper functions, or roll their own irq support |
| 141 | * code by calling request_irq() directly. |
| 142 | */ |
Daniel Vetter | 1ff4948 | 2019-01-29 11:42:48 +0100 | [diff] [blame] | 143 | DRIVER_HAVE_IRQ = BIT(30), |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 144 | /** |
| 145 | * @DRIVER_KMS_LEGACY_CONTEXT: |
| 146 | * |
| 147 | * Used only by nouveau for backwards compatibility with existing |
| 148 | * userspace. Do not use. |
| 149 | */ |
| 150 | DRIVER_KMS_LEGACY_CONTEXT = BIT(31), |
| 151 | }; |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 152 | |
| 153 | /** |
| 154 | * struct drm_driver - DRM driver structure |
| 155 | * |
Luca Ceresoli | 60e6ecf | 2019-03-13 16:35:37 +0100 | [diff] [blame] | 156 | * This structure represent the common code for a family of cards. There will be |
| 157 | * one &struct drm_device for each card present in this family. It contains lots |
| 158 | * of vfunc entries, and a pile of those probably should be moved to more |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 159 | * appropriate places like &drm_mode_config_funcs or into a new operations |
| 160 | * structure for GEM drivers. |
| 161 | */ |
| 162 | struct drm_driver { |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 163 | /** |
| 164 | * @load: |
| 165 | * |
Daniel Vetter | 4c8e84b8 | 2020-09-02 09:26:27 +0200 | [diff] [blame] | 166 | * Backward-compatible driver callback to complete initialization steps |
| 167 | * after the driver is registered. For this reason, may suffer from |
| 168 | * race conditions and its use is deprecated for new drivers. It is |
| 169 | * therefore only supported for existing drivers not yet converted to |
| 170 | * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for |
| 171 | * proper and race-free way to set up a &struct drm_device. |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 172 | * |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 173 | * This is deprecated, do not use! |
| 174 | * |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 175 | * Returns: |
| 176 | * |
| 177 | * Zero on success, non-zero value on failure. |
| 178 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 179 | int (*load) (struct drm_device *, unsigned long flags); |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 180 | |
| 181 | /** |
| 182 | * @open: |
| 183 | * |
| 184 | * Driver callback when a new &struct drm_file is opened. Useful for |
| 185 | * setting up driver-private data structures like buffer allocators, |
| 186 | * execution contexts or similar things. Such driver-private resources |
| 187 | * must be released again in @postclose. |
| 188 | * |
| 189 | * Since the display/modeset side of DRM can only be owned by exactly |
| 190 | * one &struct drm_file (see &drm_file.is_master and &drm_device.master) |
| 191 | * there should never be a need to set up any modeset related resources |
| 192 | * in this callback. Doing so would be a driver design bug. |
| 193 | * |
| 194 | * Returns: |
| 195 | * |
| 196 | * 0 on success, a negative error code on failure, which will be |
| 197 | * promoted to userspace as the result of the open() system call. |
| 198 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 199 | int (*open) (struct drm_device *, struct drm_file *); |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 200 | |
| 201 | /** |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 202 | * @postclose: |
| 203 | * |
| 204 | * One of the driver callbacks when a new &struct drm_file is closed. |
| 205 | * Useful for tearing down driver-private data structures allocated in |
| 206 | * @open like buffer allocators, execution contexts or similar things. |
| 207 | * |
| 208 | * Since the display/modeset side of DRM can only be owned by exactly |
| 209 | * one &struct drm_file (see &drm_file.is_master and &drm_device.master) |
| 210 | * there should never be a need to tear down any modeset related |
| 211 | * resources in this callback. Doing so would be a driver design bug. |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 212 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 213 | void (*postclose) (struct drm_device *, struct drm_file *); |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 214 | |
| 215 | /** |
| 216 | * @lastclose: |
| 217 | * |
| 218 | * Called when the last &struct drm_file has been closed and there's |
| 219 | * currently no userspace client for the &struct drm_device. |
| 220 | * |
| 221 | * Modern drivers should only use this to force-restore the fbdev |
| 222 | * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked(). |
| 223 | * Anything else would indicate there's something seriously wrong. |
| 224 | * Modern drivers can also use this to execute delayed power switching |
| 225 | * state changes, e.g. in conjunction with the :ref:`vga_switcheroo` |
| 226 | * infrastructure. |
| 227 | * |
Daniel Vetter | 45c3d21 | 2017-05-08 10:26:33 +0200 | [diff] [blame] | 228 | * This is called after @postclose hook has been called. |
Daniel Vetter | b93658f | 2017-03-08 15:12:44 +0100 | [diff] [blame] | 229 | * |
| 230 | * NOTE: |
| 231 | * |
| 232 | * All legacy drivers use this callback to de-initialize the hardware. |
| 233 | * This is purely because of the shadow-attach model, where the DRM |
| 234 | * kernel driver does not really own the hardware. Instead ownershipe is |
| 235 | * handled with the help of userspace through an inheritedly racy dance |
| 236 | * to set/unset the VT into raw mode. |
| 237 | * |
| 238 | * Legacy drivers initialize the hardware in the @firstopen callback, |
| 239 | * which isn't even called for modern drivers. |
| 240 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 241 | void (*lastclose) (struct drm_device *); |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 242 | |
| 243 | /** |
| 244 | * @unload: |
| 245 | * |
| 246 | * Reverse the effects of the driver load callback. Ideally, |
| 247 | * the clean up performed by the driver should happen in the |
| 248 | * reverse order of the initialization. Similarly to the load |
| 249 | * hook, this handler is deprecated and its usage should be |
| 250 | * dropped in favor of an open-coded teardown function at the |
Aishwarya Pant | 9a96f55 | 2017-09-26 13:58:49 +0530 | [diff] [blame] | 251 | * driver layer. See drm_dev_unregister() and drm_dev_put() |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 252 | * for the proper way to remove a &struct drm_device. |
| 253 | * |
| 254 | * The unload() hook is called right after unregistering |
| 255 | * the device. |
| 256 | * |
Gabriel Krisman Bertazi | 5692650 | 2017-01-02 12:20:08 -0200 | [diff] [blame] | 257 | */ |
Gabriel Krisman Bertazi | 11b3c20 | 2017-01-06 15:57:31 -0200 | [diff] [blame] | 258 | void (*unload) (struct drm_device *); |
Chris Wilson | f30c925 | 2017-02-02 09:36:32 +0000 | [diff] [blame] | 259 | |
| 260 | /** |
| 261 | * @release: |
| 262 | * |
| 263 | * Optional callback for destroying device data after the final |
Daniel Vetter | d33b58d | 2020-03-23 15:49:24 +0100 | [diff] [blame] | 264 | * reference is released, i.e. the device is being destroyed. |
Daniel Vetter | 9e1ed9f | 2020-03-23 15:49:50 +0100 | [diff] [blame] | 265 | * |
| 266 | * This is deprecated, clean up all memory allocations associated with a |
| 267 | * &drm_device using drmm_add_action(), drmm_kmalloc() and related |
| 268 | * managed resources functions. |
Chris Wilson | f30c925 | 2017-02-02 09:36:32 +0000 | [diff] [blame] | 269 | */ |
| 270 | void (*release) (struct drm_device *); |
| 271 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 272 | /** |
Daniel Vetter | 16584b2 | 2017-05-31 11:22:53 +0200 | [diff] [blame] | 273 | * @irq_handler: |
| 274 | * |
| 275 | * Interrupt handler called when using drm_irq_install(). Not used by |
| 276 | * drivers which implement their own interrupt handling. |
| 277 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 278 | irqreturn_t(*irq_handler) (int irq, void *arg); |
Daniel Vetter | 16584b2 | 2017-05-31 11:22:53 +0200 | [diff] [blame] | 279 | |
| 280 | /** |
| 281 | * @irq_preinstall: |
| 282 | * |
| 283 | * Optional callback used by drm_irq_install() which is called before |
| 284 | * the interrupt handler is registered. This should be used to clear out |
| 285 | * any pending interrupts (from e.g. firmware based drives) and reset |
| 286 | * the interrupt handling registers. |
| 287 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 288 | void (*irq_preinstall) (struct drm_device *dev); |
Daniel Vetter | 16584b2 | 2017-05-31 11:22:53 +0200 | [diff] [blame] | 289 | |
| 290 | /** |
| 291 | * @irq_postinstall: |
| 292 | * |
| 293 | * Optional callback used by drm_irq_install() which is called after |
| 294 | * the interrupt handler is registered. This should be used to enable |
| 295 | * interrupt generation in the hardware. |
| 296 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 297 | int (*irq_postinstall) (struct drm_device *dev); |
Daniel Vetter | 16584b2 | 2017-05-31 11:22:53 +0200 | [diff] [blame] | 298 | |
| 299 | /** |
| 300 | * @irq_uninstall: |
| 301 | * |
| 302 | * Optional callback used by drm_irq_uninstall() which is called before |
| 303 | * the interrupt handler is unregistered. This should be used to disable |
| 304 | * interrupt generation in the hardware. |
| 305 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 306 | void (*irq_uninstall) (struct drm_device *dev); |
| 307 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 308 | /** |
Daniel Vetter | 6c4789e | 2016-11-14 12:58:20 +0100 | [diff] [blame] | 309 | * @master_set: |
| 310 | * |
| 311 | * Called whenever the minor master is set. Only used by vmwgfx. |
| 312 | */ |
Emil Velikov | 907f532 | 2020-05-30 13:46:39 +0100 | [diff] [blame] | 313 | void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, |
| 314 | bool from_open); |
Daniel Vetter | 6c4789e | 2016-11-14 12:58:20 +0100 | [diff] [blame] | 315 | /** |
| 316 | * @master_drop: |
| 317 | * |
| 318 | * Called whenever the minor master is dropped. Only used by vmwgfx. |
| 319 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 320 | void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); |
| 321 | |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 322 | /** |
| 323 | * @debugfs_init: |
| 324 | * |
| 325 | * Allows drivers to create driver-specific debugfs files. |
| 326 | */ |
Wambui Karuga | 7ce84471 | 2020-03-10 16:31:21 +0300 | [diff] [blame] | 327 | void (*debugfs_init)(struct drm_minor *minor); |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 328 | |
| 329 | /** |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 330 | * @gem_free_object_unlocked: deconstructor for drm_gem_objects |
| 331 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 332 | * This is deprecated and should not be used by new drivers. Use |
| 333 | * &drm_gem_object_funcs.free instead. |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 334 | */ |
| 335 | void (*gem_free_object_unlocked) (struct drm_gem_object *obj); |
| 336 | |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 337 | /** |
| 338 | * @gem_open_object: |
| 339 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 340 | * This callback is deprecated in favour of &drm_gem_object_funcs.open. |
| 341 | * |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 342 | * Driver hook called upon gem handle creation |
| 343 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 344 | int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 345 | |
| 346 | /** |
| 347 | * @gem_close_object: |
| 348 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 349 | * This callback is deprecated in favour of &drm_gem_object_funcs.close. |
| 350 | * |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 351 | * Driver hook called upon gem handle release |
| 352 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 353 | void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); |
| 354 | |
| 355 | /** |
Chris Wilson | 218adc1 | 2016-11-25 12:34:27 +0000 | [diff] [blame] | 356 | * @gem_create_object: constructor for gem objects |
| 357 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 358 | * Hook for allocating the GEM object struct, for use by the CMA and |
| 359 | * SHMEM GEM helpers. |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 360 | */ |
| 361 | struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, |
| 362 | size_t size); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 363 | /** |
| 364 | * @prime_handle_to_fd: |
| 365 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 366 | * Main PRIME export function. Should be implemented with |
| 367 | * drm_gem_prime_handle_to_fd() for GEM based drivers. |
| 368 | * |
| 369 | * For an in-depth discussion see :ref:`PRIME buffer sharing |
| 370 | * documentation <prime_buffer_sharing>`. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 371 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 372 | int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, |
| 373 | uint32_t handle, uint32_t flags, int *prime_fd); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 374 | /** |
| 375 | * @prime_fd_to_handle: |
| 376 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 377 | * Main PRIME import function. Should be implemented with |
| 378 | * drm_gem_prime_fd_to_handle() for GEM based drivers. |
| 379 | * |
| 380 | * For an in-depth discussion see :ref:`PRIME buffer sharing |
| 381 | * documentation <prime_buffer_sharing>`. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 382 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 383 | int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, |
| 384 | int prime_fd, uint32_t *handle); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 385 | /** |
| 386 | * @gem_prime_export: |
| 387 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 388 | * Export hook for GEM drivers. Deprecated in favour of |
| 389 | * &drm_gem_object_funcs.export. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 390 | */ |
Daniel Vetter | e4fa845 | 2019-06-14 22:35:25 +0200 | [diff] [blame] | 391 | struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj, |
| 392 | int flags); |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 393 | /** |
| 394 | * @gem_prime_import: |
| 395 | * |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 396 | * Import hook for GEM drivers. |
Noralf Trønnes | f001488 | 2018-11-10 15:56:43 +0100 | [diff] [blame] | 397 | * |
| 398 | * This defaults to drm_gem_prime_import() if not set. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 399 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 400 | struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, |
| 401 | struct dma_buf *dma_buf); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 402 | |
| 403 | /** |
| 404 | * @gem_prime_pin: |
| 405 | * |
| 406 | * Deprecated hook in favour of &drm_gem_object_funcs.pin. |
| 407 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 408 | int (*gem_prime_pin)(struct drm_gem_object *obj); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 409 | |
| 410 | /** |
| 411 | * @gem_prime_unpin: |
| 412 | * |
| 413 | * Deprecated hook in favour of &drm_gem_object_funcs.unpin. |
| 414 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 415 | void (*gem_prime_unpin)(struct drm_gem_object *obj); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 416 | |
| 417 | |
| 418 | /** |
| 419 | * @gem_prime_get_sg_table: |
| 420 | * |
| 421 | * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table. |
| 422 | */ |
| 423 | struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); |
| 424 | |
| 425 | /** |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 426 | * @gem_prime_import_sg_table: |
| 427 | * |
| 428 | * Optional hook used by the PRIME helper functions |
| 429 | * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). |
| 430 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 431 | struct drm_gem_object *(*gem_prime_import_sg_table)( |
| 432 | struct drm_device *dev, |
| 433 | struct dma_buf_attachment *attach, |
| 434 | struct sg_table *sgt); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 435 | /** |
| 436 | * @gem_prime_vmap: |
| 437 | * |
| 438 | * Deprecated vmap hook for GEM drivers. Please use |
| 439 | * &drm_gem_object_funcs.vmap instead. |
| 440 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 441 | void *(*gem_prime_vmap)(struct drm_gem_object *obj); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 442 | |
| 443 | /** |
| 444 | * @gem_prime_vunmap: |
| 445 | * |
| 446 | * Deprecated vunmap hook for GEM drivers. Please use |
| 447 | * &drm_gem_object_funcs.vunmap instead. |
| 448 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 449 | void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 450 | |
| 451 | /** |
| 452 | * @gem_prime_mmap: |
| 453 | * |
| 454 | * mmap hook for GEM drivers, used to implement dma-buf mmap in the |
| 455 | * PRIME helpers. |
| 456 | * |
| 457 | * FIXME: There's way too much duplication going on here, and also moved |
| 458 | * to &drm_gem_object_funcs. |
| 459 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 460 | int (*gem_prime_mmap)(struct drm_gem_object *obj, |
| 461 | struct vm_area_struct *vma); |
| 462 | |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 463 | /** |
| 464 | * @dumb_create: |
| 465 | * |
| 466 | * This creates a new dumb buffer in the driver's backing storage manager (GEM, |
| 467 | * TTM or something else entirely) and returns the resulting buffer handle. This |
| 468 | * handle can then be wrapped up into a framebuffer modeset object. |
| 469 | * |
| 470 | * Note that userspace is not allowed to use such objects for render |
| 471 | * acceleration - drivers must create their own private ioctls for such a use |
| 472 | * case. |
| 473 | * |
| 474 | * Width, height and depth are specified in the &drm_mode_create_dumb |
| 475 | * argument. The callback needs to fill the handle, pitch and size for |
| 476 | * the created buffer. |
| 477 | * |
| 478 | * Called by the user via ioctl. |
| 479 | * |
| 480 | * Returns: |
| 481 | * |
| 482 | * Zero on success, negative errno on failure. |
| 483 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 484 | int (*dumb_create)(struct drm_file *file_priv, |
| 485 | struct drm_device *dev, |
| 486 | struct drm_mode_create_dumb *args); |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 487 | /** |
| 488 | * @dumb_map_offset: |
| 489 | * |
| 490 | * Allocate an offset in the drm device node's address space to be able to |
Daniel Vetter | 39dea70 | 2018-11-27 10:19:21 +0100 | [diff] [blame] | 491 | * memory map a dumb buffer. |
| 492 | * |
| 493 | * The default implementation is drm_gem_create_mmap_offset(). GEM based |
| 494 | * drivers must not overwrite this. |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 495 | * |
| 496 | * Called by the user via ioctl. |
| 497 | * |
| 498 | * Returns: |
| 499 | * |
| 500 | * Zero on success, negative errno on failure. |
| 501 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 502 | int (*dumb_map_offset)(struct drm_file *file_priv, |
| 503 | struct drm_device *dev, uint32_t handle, |
| 504 | uint64_t *offset); |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 505 | /** |
| 506 | * @dumb_destroy: |
| 507 | * |
| 508 | * This destroys the userspace handle for the given dumb backing storage buffer. |
| 509 | * Since buffer objects must be reference counted in the kernel a buffer object |
| 510 | * won't be immediately freed if a framebuffer modeset object still uses it. |
| 511 | * |
| 512 | * Called by the user via ioctl. |
| 513 | * |
Daniel Vetter | 39dea70 | 2018-11-27 10:19:21 +0100 | [diff] [blame] | 514 | * The default implementation is drm_gem_dumb_destroy(). GEM based drivers |
| 515 | * must not overwrite this. |
| 516 | * |
Daniel Vetter | 4f93624 | 2016-11-14 12:58:21 +0100 | [diff] [blame] | 517 | * Returns: |
| 518 | * |
| 519 | * Zero on success, negative errno on failure. |
| 520 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 521 | int (*dumb_destroy)(struct drm_file *file_priv, |
| 522 | struct drm_device *dev, |
| 523 | uint32_t handle); |
| 524 | |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 525 | /** |
| 526 | * @gem_vm_ops: Driver private ops for this object |
Daniel Vetter | 805dc614 | 2019-06-20 14:46:15 +0200 | [diff] [blame] | 527 | * |
| 528 | * For GEM drivers this is deprecated in favour of |
| 529 | * &drm_gem_object_funcs.vm_ops. |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 530 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 531 | const struct vm_operations_struct *gem_vm_ops; |
| 532 | |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 533 | /** @major: driver major number */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 534 | int major; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 535 | /** @minor: driver minor number */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 536 | int minor; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 537 | /** @patchlevel: driver patch level */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 538 | int patchlevel; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 539 | /** @name: driver name */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 540 | char *name; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 541 | /** @desc: driver description */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 542 | char *desc; |
Sean Paul | d1b6c62 | 2017-07-20 13:47:43 -0400 | [diff] [blame] | 543 | /** @date: driver date */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 544 | char *date; |
| 545 | |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 546 | /** |
| 547 | * @driver_features: |
| 548 | * Driver features, see &enum drm_driver_feature. Drivers can disable |
| 549 | * some features on a per-instance basis using |
| 550 | * &drm_device.driver_features. |
| 551 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 552 | u32 driver_features; |
Daniel Vetter | bb2eaba | 2017-05-31 11:20:45 +0200 | [diff] [blame] | 553 | |
| 554 | /** |
| 555 | * @ioctls: |
| 556 | * |
| 557 | * Array of driver-private IOCTL description entries. See the chapter on |
| 558 | * :ref:`IOCTL support in the userland interfaces |
| 559 | * chapter<drm_driver_ioctl>` for the full details. |
| 560 | */ |
| 561 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 562 | const struct drm_ioctl_desc *ioctls; |
Daniel Vetter | bb2eaba | 2017-05-31 11:20:45 +0200 | [diff] [blame] | 563 | /** @num_ioctls: Number of entries in @ioctls. */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 564 | int num_ioctls; |
Daniel Vetter | bb2eaba | 2017-05-31 11:20:45 +0200 | [diff] [blame] | 565 | |
| 566 | /** |
| 567 | * @fops: |
| 568 | * |
| 569 | * File operations for the DRM device node. See the discussion in |
| 570 | * :ref:`file operations<drm_driver_fops>` for in-depth coverage and |
| 571 | * some examples. |
| 572 | */ |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 573 | const struct file_operations *fops; |
| 574 | |
Daniel Vetter | 0683c0a | 2017-01-25 07:26:54 +0100 | [diff] [blame] | 575 | /* Everything below here is for legacy driver, never use! */ |
| 576 | /* private: */ |
| 577 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 578 | /* List of devices hanging off this driver with stealth attach. */ |
| 579 | struct list_head legacy_dev_list; |
Daniel Vetter | 0683c0a | 2017-01-25 07:26:54 +0100 | [diff] [blame] | 580 | int (*firstopen) (struct drm_device *); |
Daniel Vetter | 45c3d21 | 2017-05-08 10:26:33 +0200 | [diff] [blame] | 581 | void (*preclose) (struct drm_device *, struct drm_file *file_priv); |
Daniel Vetter | 0683c0a | 2017-01-25 07:26:54 +0100 | [diff] [blame] | 582 | int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); |
| 583 | int (*dma_quiescent) (struct drm_device *); |
| 584 | int (*context_dtor) (struct drm_device *dev, int context); |
Thomas Zimmermann | f397d66 | 2020-01-23 14:59:42 +0100 | [diff] [blame] | 585 | u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe); |
| 586 | int (*enable_vblank)(struct drm_device *dev, unsigned int pipe); |
| 587 | void (*disable_vblank)(struct drm_device *dev, unsigned int pipe); |
Daniel Vetter | 0683c0a | 2017-01-25 07:26:54 +0100 | [diff] [blame] | 588 | int dev_priv_size; |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 589 | }; |
| 590 | |
Daniel Vetter | b0b5849 | 2020-04-15 09:39:36 +0200 | [diff] [blame] | 591 | void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, |
| 592 | size_t size, size_t offset); |
| 593 | |
| 594 | /** |
| 595 | * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance |
| 596 | * @parent: Parent device object |
| 597 | * @driver: DRM driver |
| 598 | * @type: the type of the struct which contains struct &drm_device |
| 599 | * @member: the name of the &drm_device within @type. |
| 600 | * |
| 601 | * This allocates and initialize a new DRM device. No device registration is done. |
| 602 | * Call drm_dev_register() to advertice the device to user space and register it |
| 603 | * with other core subsystems. This should be done last in the device |
| 604 | * initialization sequence to make sure userspace can't access an inconsistent |
| 605 | * state. |
| 606 | * |
| 607 | * The initial ref-count of the object is 1. Use drm_dev_get() and |
| 608 | * drm_dev_put() to take and drop further ref-counts. |
| 609 | * |
| 610 | * It is recommended that drivers embed &struct drm_device into their own device |
| 611 | * structure. |
| 612 | * |
| 613 | * Note that this manages the lifetime of the resulting &drm_device |
| 614 | * automatically using devres. The DRM device initialized with this function is |
| 615 | * automatically put on driver detach using drm_dev_put(). |
| 616 | * |
| 617 | * RETURNS: |
| 618 | * Pointer to new DRM device, or ERR_PTR on failure. |
| 619 | */ |
| 620 | #define devm_drm_dev_alloc(parent, driver, type, member) \ |
| 621 | ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ |
| 622 | offsetof(type, member))) |
| 623 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 624 | struct drm_device *drm_dev_alloc(struct drm_driver *driver, |
| 625 | struct device *parent); |
| 626 | int drm_dev_register(struct drm_device *dev, unsigned long flags); |
| 627 | void drm_dev_unregister(struct drm_device *dev); |
| 628 | |
Aishwarya Pant | 9a96f55 | 2017-09-26 13:58:49 +0530 | [diff] [blame] | 629 | void drm_dev_get(struct drm_device *dev); |
| 630 | void drm_dev_put(struct drm_device *dev); |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 631 | void drm_put_dev(struct drm_device *dev); |
Noralf Trønnes | bee330f | 2018-03-28 10:38:35 +0300 | [diff] [blame] | 632 | bool drm_dev_enter(struct drm_device *dev, int *idx); |
| 633 | void drm_dev_exit(int idx); |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 634 | void drm_dev_unplug(struct drm_device *dev); |
| 635 | |
| 636 | /** |
| 637 | * drm_dev_is_unplugged - is a DRM device unplugged |
| 638 | * @dev: DRM device |
| 639 | * |
| 640 | * This function can be called to check whether a hotpluggable is unplugged. |
| 641 | * Unplugging itself is singalled through drm_dev_unplug(). If a device is |
| 642 | * unplugged, these two functions guarantee that any store before calling |
| 643 | * drm_dev_unplug() is visible to callers of this function after it completes |
Daniel Vetter | 168982d | 2019-01-29 09:56:43 +0100 | [diff] [blame] | 644 | * |
| 645 | * WARNING: This function fundamentally races against drm_dev_unplug(). It is |
| 646 | * recommended that drivers instead use the underlying drm_dev_enter() and |
| 647 | * drm_dev_exit() function pairs. |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 648 | */ |
Noralf Trønnes | bee330f | 2018-03-28 10:38:35 +0300 | [diff] [blame] | 649 | static inline bool drm_dev_is_unplugged(struct drm_device *dev) |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 650 | { |
Noralf Trønnes | bee330f | 2018-03-28 10:38:35 +0300 | [diff] [blame] | 651 | int idx; |
| 652 | |
| 653 | if (drm_dev_enter(dev, &idx)) { |
| 654 | drm_dev_exit(idx); |
| 655 | return false; |
| 656 | } |
| 657 | |
| 658 | return true; |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 659 | } |
| 660 | |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 661 | /** |
Jani Nikula | 12a1d4e | 2020-01-23 14:48:00 +0200 | [diff] [blame] | 662 | * drm_core_check_all_features - check driver feature flags mask |
| 663 | * @dev: DRM device to check |
| 664 | * @features: feature flag(s) mask |
| 665 | * |
| 666 | * This checks @dev for driver features, see &drm_driver.driver_features, |
| 667 | * &drm_device.driver_features, and the various &enum drm_driver_feature flags. |
| 668 | * |
| 669 | * Returns true if all features in the @features mask are supported, false |
| 670 | * otherwise. |
| 671 | */ |
| 672 | static inline bool drm_core_check_all_features(const struct drm_device *dev, |
| 673 | u32 features) |
| 674 | { |
| 675 | u32 supported = dev->driver->driver_features & dev->driver_features; |
| 676 | |
| 677 | return features && (supported & features) == features; |
| 678 | } |
| 679 | |
| 680 | /** |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 681 | * drm_core_check_feature - check driver feature flags |
| 682 | * @dev: DRM device to check |
| 683 | * @feature: feature flag |
| 684 | * |
Ville Syrjälä | 18ace11 | 2018-09-13 16:16:21 +0300 | [diff] [blame] | 685 | * This checks @dev for driver features, see &drm_driver.driver_features, |
Daniel Vetter | 0e2a933 | 2019-01-29 11:42:47 +0100 | [diff] [blame] | 686 | * &drm_device.driver_features, and the various &enum drm_driver_feature flags. |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 687 | * |
| 688 | * Returns true if the @feature is supported, false otherwise. |
| 689 | */ |
Jani Nikula | 12a1d4e | 2020-01-23 14:48:00 +0200 | [diff] [blame] | 690 | static inline bool drm_core_check_feature(const struct drm_device *dev, |
| 691 | enum drm_driver_feature feature) |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 692 | { |
Jani Nikula | 12a1d4e | 2020-01-23 14:48:00 +0200 | [diff] [blame] | 693 | return drm_core_check_all_features(dev, feature); |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 694 | } |
| 695 | |
| 696 | /** |
| 697 | * drm_drv_uses_atomic_modeset - check if the driver implements |
| 698 | * atomic_commit() |
| 699 | * @dev: DRM device |
| 700 | * |
| 701 | * This check is useful if drivers do not have DRIVER_ATOMIC set but |
| 702 | * have atomic modesetting internally implemented. |
| 703 | */ |
| 704 | static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) |
| 705 | { |
| 706 | return drm_core_check_feature(dev, DRIVER_ATOMIC) || |
Dave Airlie | 5707833 | 2018-09-18 16:20:18 +1000 | [diff] [blame] | 707 | (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); |
Daniel Vetter | 3479fc2 | 2018-07-09 10:40:02 +0200 | [diff] [blame] | 708 | } |
| 709 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 710 | |
Dave Airlie | 6320745 | 2016-11-30 14:18:51 +1000 | [diff] [blame] | 711 | int drm_dev_set_unique(struct drm_device *dev, const char *name); |
| 712 | |
| 713 | |
Daniel Vetter | 85e634b | 2016-11-14 12:58:19 +0100 | [diff] [blame] | 714 | #endif |