blob: a3acb7ac35504010058d21342fb435cb56885a2a [file] [log] [blame]
Daniel Vettera8f8b1d2017-03-08 15:12:42 +01001/*
2 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * Copyright (c) 2009-2010, Code Aurora Forum.
5 * All rights reserved.
6 *
7 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
8 * Author: Gareth Hughes <gareth@valinux.com>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the "Software"),
12 * to deal in the Software without restriction, including without limitation
13 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 * and/or sell copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the next
18 * paragraph) shall be included in all copies or substantial portions of the
19 * Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
27 * OTHER DEALINGS IN THE SOFTWARE.
28 */
29
30#ifndef _DRM_FILE_H_
31#define _DRM_FILE_H_
32
33#include <linux/types.h>
34#include <linux/completion.h>
Jani Nikula39e23672018-12-27 14:56:38 +020035#include <linux/idr.h>
Daniel Vettera8f8b1d2017-03-08 15:12:42 +010036
37#include <uapi/drm/drm.h>
38
39#include <drm/drm_prime.h>
40
41struct dma_fence;
42struct drm_file;
43struct drm_device;
Daniel Vetter3ed43512017-05-31 11:21:46 +020044struct device;
Chris Wilson4748aa12019-11-07 18:05:58 +000045struct file;
Daniel Vettera8f8b1d2017-03-08 15:12:42 +010046
47/*
48 * FIXME: Not sure we want to have drm_minor here in the end, but to avoid
49 * header include loops we need it here for now.
50 */
Daniel Vetterb93658f2017-03-08 15:12:44 +010051
Eric Anholtc9ac371d2018-05-08 17:14:25 -070052/* Note that the order of this enum is ABI (it determines
53 * /dev/dri/renderD* numbers).
54 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +010055enum drm_minor_type {
56 DRM_MINOR_PRIMARY,
Eric Anholtc9ac371d2018-05-08 17:14:25 -070057 DRM_MINOR_CONTROL,
Daniel Vettera8f8b1d2017-03-08 15:12:42 +010058 DRM_MINOR_RENDER,
59};
60
61/**
Daniel Vetterb93658f2017-03-08 15:12:44 +010062 * struct drm_minor - DRM device minor structure
63 *
64 * This structure represents a DRM minor number for device nodes in /dev.
65 * Entirely opaque to drivers and should never be inspected directly by drivers.
66 * Drivers instead should only interact with &struct drm_file and of course
67 * &struct drm_device, which is also where driver-private data and resources can
68 * be attached to.
Daniel Vettera8f8b1d2017-03-08 15:12:42 +010069 */
70struct drm_minor {
Daniel Vetterb93658f2017-03-08 15:12:44 +010071 /* private: */
72 int index; /* Minor device number */
73 int type; /* Control or render */
74 struct device *kdev; /* Linux device */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +010075 struct drm_device *dev;
76
77 struct dentry *debugfs_root;
78
79 struct list_head debugfs_list;
80 struct mutex debugfs_lock; /* Protects debugfs_list. */
81};
82
Daniel Vetterb93658f2017-03-08 15:12:44 +010083/**
84 * struct drm_pending_event - Event queued up for userspace to read
85 *
86 * This represents a DRM event. Drivers can use this as a generic completion
87 * mechanism, which supports kernel-internal &struct completion, &struct dma_fence
88 * and also the DRM-specific &struct drm_event delivery mechanism.
89 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +010090struct drm_pending_event {
Daniel Vetterb93658f2017-03-08 15:12:44 +010091 /**
92 * @completion:
93 *
94 * Optional pointer to a kernel internal completion signalled when
95 * drm_send_event() is called, useful to internally synchronize with
96 * nonblocking operations.
97 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +010098 struct completion *completion;
Daniel Vetterb93658f2017-03-08 15:12:44 +010099
100 /**
101 * @completion_release:
102 *
103 * Optional callback currently only used by the atomic modeset helpers
104 * to clean up the reference count for the structure @completion is
105 * stored in.
106 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100107 void (*completion_release)(struct completion *completion);
Daniel Vetterb93658f2017-03-08 15:12:44 +0100108
109 /**
110 * @event:
111 *
112 * Pointer to the actual event that should be sent to userspace to be
113 * read using drm_read(). Can be optional, since nowadays events are
114 * also used to signal kernel internal threads with @completion or DMA
115 * transactions using @fence.
116 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100117 struct drm_event *event;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100118
119 /**
120 * @fence:
121 *
122 * Optional DMA fence to unblock other hardware transactions which
123 * depend upon the nonblocking DRM operation this event represents.
124 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100125 struct dma_fence *fence;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100126
127 /**
128 * @file_priv:
129 *
130 * &struct drm_file where @event should be delivered to. Only set when
131 * @event is set.
132 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100133 struct drm_file *file_priv;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100134
135 /**
136 * @link:
137 *
138 * Double-linked list to keep track of this event. Can be used by the
139 * driver up to the point when it calls drm_send_event(), after that
140 * this list entry is owned by the core for its own book-keeping.
141 */
142 struct list_head link;
143
144 /**
145 * @pending_link:
146 *
147 * Entry on &drm_file.pending_event_list, to keep track of all pending
148 * events for @file_priv, to allow correct unwinding of them when
149 * userspace closes the file before the event is delivered.
150 */
151 struct list_head pending_link;
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100152};
153
Daniel Vetterb93658f2017-03-08 15:12:44 +0100154/**
155 * struct drm_file - DRM file private data
156 *
157 * This structure tracks DRM state per open file descriptor.
158 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100159struct drm_file {
Daniel Vetterb93658f2017-03-08 15:12:44 +0100160 /**
161 * @authenticated:
162 *
163 * Whether the client is allowed to submit rendering, which for legacy
164 * nodes means it must be authenticated.
165 *
166 * See also the :ref:`section on primary nodes and authentication
167 * <drm_primary_node>`.
168 */
Daniel Vetter078b7de2018-11-02 14:25:42 +0100169 bool authenticated;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100170
171 /**
172 * @stereo_allowed:
173 *
174 * True when the client has asked us to expose stereo 3D mode flags.
175 */
Daniel Vetter078b7de2018-11-02 14:25:42 +0100176 bool stereo_allowed;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100177
178 /**
179 * @universal_planes:
180 *
181 * True if client understands CRTC primary planes and cursor planes
182 * in the plane list. Automatically set when @atomic is set.
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100183 */
Daniel Vetter078b7de2018-11-02 14:25:42 +0100184 bool universal_planes;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100185
186 /** @atomic: True if client understands atomic properties. */
Daniel Vetter078b7de2018-11-02 14:25:42 +0100187 bool atomic;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100188
189 /**
Ankit Nautiyal7595bda2018-05-08 16:39:41 +0530190 * @aspect_ratio_allowed:
191 *
192 * True, if client can handle picture aspect ratios, and has requested
193 * to pass this information along with the mode.
194 */
Daniel Vetter078b7de2018-11-02 14:25:42 +0100195 bool aspect_ratio_allowed;
Ankit Nautiyal7595bda2018-05-08 16:39:41 +0530196
197 /**
Liviu Dudaud67b6a22018-02-28 14:11:23 +0000198 * @writeback_connectors:
199 *
200 * True if client understands writeback connectors
201 */
Daniel Vetter078b7de2018-11-02 14:25:42 +0100202 bool writeback_connectors;
Liviu Dudaud67b6a22018-02-28 14:11:23 +0000203
204 /**
Emil Velikov45bc3d22020-03-19 17:29:29 +0000205 * @was_master:
206 *
207 * This client has or had, master capability. Protected by struct
208 * &drm_device.master_mutex.
209 *
210 * This is used to ensure that CAP_SYS_ADMIN is not enforced, if the
211 * client is or was master in the past.
212 */
213 bool was_master;
214
215 /**
Daniel Vetterb93658f2017-03-08 15:12:44 +0100216 * @is_master:
217 *
218 * This client is the creator of @master. Protected by struct
219 * &drm_device.master_mutex.
220 *
221 * See also the :ref:`section on primary nodes and authentication
222 * <drm_primary_node>`.
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100223 */
Daniel Vetter078b7de2018-11-02 14:25:42 +0100224 bool is_master;
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100225
Daniel Vetterb93658f2017-03-08 15:12:44 +0100226 /**
227 * @master:
228 *
Desmond Cheong Zhi Xi0b0860a2021-07-12 12:35:07 +0800229 * Master this node is currently associated with. Protected by struct
230 * &drm_device.master_mutex, and serialized by @master_lookup_lock.
231 *
232 * Only relevant if drm_is_primary_client() returns true. Note that
233 * this only matches &drm_device.master if the master is the currently
234 * active one.
Daniel Vetterb93658f2017-03-08 15:12:44 +0100235 *
Desmond Cheong Zhi Xi649839d2021-08-02 18:59:57 +0800236 * To update @master, both &drm_device.master_mutex and
237 * @master_lookup_lock need to be held, therefore holding either of
238 * them is safe and enough for the read side.
239 *
Desmond Cheong Zhi Xi56f07292021-07-12 12:35:08 +0800240 * When dereferencing this pointer, either hold struct
241 * &drm_device.master_mutex for the duration of the pointer's use, or
242 * use drm_file_get_master() if struct &drm_device.master_mutex is not
243 * currently held and there is no other need to hold it. This prevents
244 * @master from being freed during use.
245 *
Daniel Vetterb93658f2017-03-08 15:12:44 +0100246 * See also @authentication and @is_master and the :ref:`section on
247 * primary nodes and authentication <drm_primary_node>`.
248 */
249 struct drm_master *master;
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100250
Desmond Cheong Zhi Xi0b0860a2021-07-12 12:35:07 +0800251 /** @master_lock: Serializes @master. */
252 spinlock_t master_lookup_lock;
253
Daniel Vetterb93658f2017-03-08 15:12:44 +0100254 /** @pid: Process that opened this file. */
255 struct pid *pid;
256
257 /** @magic: Authentication magic, see @authenticated. */
258 drm_magic_t magic;
259
260 /**
261 * @lhead:
262 *
263 * List of all open files of a DRM device, linked into
264 * &drm_device.filelist. Protected by &drm_device.filelist_mutex.
265 */
266 struct list_head lhead;
267
268 /** @minor: &struct drm_minor for this file. */
269 struct drm_minor *minor;
270
271 /**
272 * @object_idr:
273 *
274 * Mapping of mm object handles to object pointers. Used by the GEM
275 * subsystem. Protected by @table_lock.
276 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100277 struct idr object_idr;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100278
279 /** @table_lock: Protects @object_idr. */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100280 spinlock_t table_lock;
281
Dave Airliee9083422017-04-04 13:26:24 +1000282 /** @syncobj_idr: Mapping of sync object handles to object pointers. */
283 struct idr syncobj_idr;
284 /** @syncobj_table_lock: Protects @syncobj_idr. */
285 spinlock_t syncobj_table_lock;
286
Daniel Vetterb93658f2017-03-08 15:12:44 +0100287 /** @filp: Pointer to the core file structure. */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100288 struct file *filp;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100289
290 /**
291 * @driver_priv:
292 *
293 * Optional pointer for driver private data. Can be allocated in
294 * &drm_driver.open and should be freed in &drm_driver.postclose.
295 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100296 void *driver_priv;
297
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100298 /**
Daniel Vetterb93658f2017-03-08 15:12:44 +0100299 * @fbs:
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100300 *
Daniel Vetterb93658f2017-03-08 15:12:44 +0100301 * List of &struct drm_framebuffer associated with this file, using the
302 * &drm_framebuffer.filp_head entry.
303 *
304 * Protected by @fbs_lock. Note that the @fbs list holds a reference on
305 * the framebuffer object to prevent it from untimely disappearing.
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100306 */
307 struct list_head fbs;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100308
309 /** @fbs_lock: Protects @fbs. */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100310 struct mutex fbs_lock;
311
Daniel Vetterb93658f2017-03-08 15:12:44 +0100312 /**
313 * @blobs:
314 *
315 * User-created blob properties; this retains a reference on the
316 * property.
317 *
318 * Protected by @drm_mode_config.blob_lock;
319 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100320 struct list_head blobs;
321
Daniel Vetterb93658f2017-03-08 15:12:44 +0100322 /** @event_wait: Waitqueue for new events added to @event_list. */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100323 wait_queue_head_t event_wait;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100324
325 /**
326 * @pending_event_list:
327 *
328 * List of pending &struct drm_pending_event, used to clean up pending
329 * events in case this file gets closed before the event is signalled.
330 * Uses the &drm_pending_event.pending_link entry.
331 *
332 * Protect by &drm_device.event_lock.
333 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100334 struct list_head pending_event_list;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100335
336 /**
337 * @event_list:
338 *
339 * List of &struct drm_pending_event, ready for delivery to userspace
340 * through drm_read(). Uses the &drm_pending_event.link entry.
341 *
342 * Protect by &drm_device.event_lock.
343 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100344 struct list_head event_list;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100345
346 /**
347 * @event_space:
348 *
349 * Available event space to prevent userspace from
350 * exhausting kernel memory. Currently limited to the fairly arbitrary
351 * value of 4KB.
352 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100353 int event_space;
354
Daniel Vetterb93658f2017-03-08 15:12:44 +0100355 /** @event_read_lock: Serializes drm_read(). */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100356 struct mutex event_read_lock;
357
Daniel Vetterb93658f2017-03-08 15:12:44 +0100358 /**
359 * @prime:
360 *
361 * Per-file buffer caches used by the PRIME buffer sharing code.
362 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100363 struct drm_prime_file_private prime;
Daniel Vetterb93658f2017-03-08 15:12:44 +0100364
365 /* private: */
Dave Airlieee22f762019-04-23 10:01:50 +1000366#if IS_ENABLED(CONFIG_DRM_LEGACY)
Daniel Vetterb93658f2017-03-08 15:12:44 +0100367 unsigned long lock_count; /* DRI1 legacy lock count */
Dave Airlieee22f762019-04-23 10:01:50 +1000368#endif
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100369};
370
Daniel Vetterb93658f2017-03-08 15:12:44 +0100371/**
372 * drm_is_primary_client - is this an open file of the primary node
373 * @file_priv: DRM file
374 *
375 * Returns true if this is an open file of the primary node, i.e.
376 * &drm_file.minor of @file_priv is a primary minor.
377 *
378 * See also the :ref:`section on primary nodes and authentication
379 * <drm_primary_node>`.
380 */
381static inline bool drm_is_primary_client(const struct drm_file *file_priv)
382{
383 return file_priv->minor->type == DRM_MINOR_PRIMARY;
384}
385
386/**
387 * drm_is_render_client - is this an open file of the render node
388 * @file_priv: DRM file
389 *
390 * Returns true if this is an open file of the render node, i.e.
391 * &drm_file.minor of @file_priv is a render minor.
392 *
393 * See also the :ref:`section on render nodes <drm_render_node>`.
394 */
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100395static inline bool drm_is_render_client(const struct drm_file *file_priv)
396{
397 return file_priv->minor->type == DRM_MINOR_RENDER;
398}
399
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100400int drm_open(struct inode *inode, struct file *filp);
401ssize_t drm_read(struct file *filp, char __user *buffer,
402 size_t count, loff_t *offset);
403int drm_release(struct inode *inode, struct file *filp);
Chris Wilson7a2c65dd2020-01-24 12:56:26 +0000404int drm_release_noglobal(struct inode *inode, struct file *filp);
Al Viroafc9a422017-07-03 06:39:46 -0400405__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100406int drm_event_reserve_init_locked(struct drm_device *dev,
407 struct drm_file *file_priv,
408 struct drm_pending_event *p,
409 struct drm_event *e);
410int drm_event_reserve_init(struct drm_device *dev,
411 struct drm_file *file_priv,
412 struct drm_pending_event *p,
413 struct drm_event *e);
414void drm_event_cancel_free(struct drm_device *dev,
415 struct drm_pending_event *p);
416void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
417void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
Veera Sundaram Sankarana78e7a52021-01-15 16:31:47 -0800418void drm_send_event_timestamp_locked(struct drm_device *dev,
419 struct drm_pending_event *e,
420 ktime_t timestamp);
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100421
Chris Wilson4748aa12019-11-07 18:05:58 +0000422struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
423
Thomas Hellstrom (VMware)b1823412020-03-24 18:49:26 +0100424#ifdef CONFIG_MMU
425struct drm_vma_offset_manager;
426unsigned long drm_get_unmapped_area(struct file *file,
427 unsigned long uaddr, unsigned long len,
428 unsigned long pgoff, unsigned long flags,
429 struct drm_vma_offset_manager *mgr);
430#endif /* CONFIG_MMU */
431
432
Daniel Vettera8f8b1d2017-03-08 15:12:42 +0100433#endif /* _DRM_FILE_H_ */