blob: 6dbb986497955cb622c99d4a2e2c5a8f29330733 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
Thierry Redingc6a1af8a2014-05-19 13:39:07 +02007 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
David Herrmann1b7199f2014-07-23 12:29:56 +020029#include <linux/debugfs.h>
David Herrmann31bbe162014-01-03 14:09:47 +010030#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32#include <linux/moduleparam.h>
David Herrmann31bbe162014-01-03 14:09:47 +010033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Daniel Vetter85e634b2016-11-14 12:58:19 +010035
36#include <drm/drm_drv.h>
David Howells760285e2012-10-02 18:01:07 +010037#include <drm/drmP.h>
Daniel Vetter85e634b2016-11-14 12:58:19 +010038
Benjamin Gaignard79190ea2016-06-21 16:37:09 +020039#include "drm_crtc_internal.h"
David Herrmanne7b960702014-07-24 12:10:04 +020040#include "drm_legacy.h"
Daniel Vetter67d0ec42014-09-10 12:43:53 +020041#include "drm_internal.h"
Daniel Vetter81065542016-06-21 10:54:13 +020042#include "drm_crtc_internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Ezequiel Garcia6dc3e222016-04-20 13:45:03 -030044/*
45 * drm_debug: Enable debug output.
46 * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
47 */
48unsigned int drm_debug = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049EXPORT_SYMBOL(drm_debug);
50
David Herrmann82d5e732016-09-01 14:48:36 +020051MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
52MODULE_DESCRIPTION("DRM shared core routines");
Linus Torvalds1da177e2005-04-16 15:20:36 -070053MODULE_LICENSE("GPL and additional rights");
Ezequiel Garcia6dc3e222016-04-20 13:45:03 -030054MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
55"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
56"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
57"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
58"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
59"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
60"\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
Dave Jonesc0758142005-10-03 15:02:20 -040061module_param_named(debug, drm_debug, int, 0600);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
David Herrmann0d639882014-02-24 15:53:25 +010063static DEFINE_SPINLOCK(drm_minor_lock);
David Herrmann1b7199f2014-07-23 12:29:56 +020064static struct idr drm_minors_idr;
Dave Airlie2c14f282008-04-21 16:47:32 +100065
David Herrmann1b7199f2014-07-23 12:29:56 +020066static struct dentry *drm_debugfs_root;
Joe Perches5ad3d882011-04-17 20:35:51 -070067
Sean Paulc4e68a52016-08-15 16:18:04 -070068#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
69
70void drm_dev_printk(const struct device *dev, const char *level,
71 unsigned int category, const char *function_name,
72 const char *prefix, const char *format, ...)
Joe Perches5ad3d882011-04-17 20:35:51 -070073{
74 struct va_format vaf;
75 va_list args;
Joe Perches5ad3d882011-04-17 20:35:51 -070076
Sean Paulc4e68a52016-08-15 16:18:04 -070077 if (category != DRM_UT_NONE && !(drm_debug & category))
78 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Lespiau, Damiena73d4e92014-03-24 15:53:15 +000080 va_start(args, format);
81 vaf.fmt = format;
82 vaf.va = &args;
Daniel Vetterfffb9062013-11-17 22:25:02 +010083
Chris Wilsonb4ba97e2016-08-19 08:37:50 +010084 if (dev)
85 dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
86 &vaf);
87 else
88 printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
Lespiau, Damiena73d4e92014-03-24 15:53:15 +000089
90 va_end(args);
yakui_zhao4fefcb22009-06-02 14:09:47 +080091}
Sean Paulc4e68a52016-08-15 16:18:04 -070092EXPORT_SYMBOL(drm_dev_printk);
93
94void drm_printk(const char *level, unsigned int category,
Sean Paulc4e68a52016-08-15 16:18:04 -070095 const char *format, ...)
96{
97 struct va_format vaf;
98 va_list args;
99
100 if (category != DRM_UT_NONE && !(drm_debug & category))
101 return;
102
103 va_start(args, format);
104 vaf.fmt = format;
105 vaf.va = &args;
106
Joe Perches6bd488d2016-09-25 19:18:34 -0700107 printk("%s" "[" DRM_NAME ":%ps]%s %pV",
108 level, __builtin_return_address(0),
109 strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
Sean Paulc4e68a52016-08-15 16:18:04 -0700110
111 va_end(args);
112}
113EXPORT_SYMBOL(drm_printk);
Joe Perches5ad3d882011-04-17 20:35:51 -0700114
David Herrmann0d639882014-02-24 15:53:25 +0100115/*
116 * DRM Minors
117 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
118 * of them is represented by a drm_minor object. Depending on the capabilities
119 * of the device-driver, different interfaces are registered.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 *
David Herrmann0d639882014-02-24 15:53:25 +0100121 * Minors can be accessed via dev->$minor_name. This pointer is either
122 * NULL or a valid drm_minor pointer and stays valid as long as the device is
123 * valid. This means, DRM minors have the same life-time as the underlying
124 * device. However, this doesn't mean that the minor is active. Minors are
125 * registered and unregistered dynamically according to device-state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 */
David Herrmann0d639882014-02-24 15:53:25 +0100127
David Herrmann05b701f2014-01-29 12:43:56 +0100128static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
129 unsigned int type)
130{
131 switch (type) {
David Herrmanna3ccc462016-08-03 20:04:25 +0200132 case DRM_MINOR_PRIMARY:
David Herrmann05b701f2014-01-29 12:43:56 +0100133 return &dev->primary;
134 case DRM_MINOR_RENDER:
135 return &dev->render;
136 case DRM_MINOR_CONTROL:
137 return &dev->control;
138 default:
139 return NULL;
140 }
141}
142
143static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
144{
145 struct drm_minor *minor;
David Herrmannf1b85962014-07-23 10:34:52 +0200146 unsigned long flags;
147 int r;
David Herrmann05b701f2014-01-29 12:43:56 +0100148
149 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
150 if (!minor)
151 return -ENOMEM;
152
153 minor->type = type;
154 minor->dev = dev;
David Herrmann05b701f2014-01-29 12:43:56 +0100155
David Herrmannf1b85962014-07-23 10:34:52 +0200156 idr_preload(GFP_KERNEL);
157 spin_lock_irqsave(&drm_minor_lock, flags);
158 r = idr_alloc(&drm_minors_idr,
159 NULL,
160 64 * type,
161 64 * (type + 1),
162 GFP_NOWAIT);
163 spin_unlock_irqrestore(&drm_minor_lock, flags);
164 idr_preload_end();
165
166 if (r < 0)
167 goto err_free;
168
169 minor->index = r;
170
David Herrmanne1728072014-07-23 11:38:38 +0200171 minor->kdev = drm_sysfs_minor_alloc(minor);
172 if (IS_ERR(minor->kdev)) {
173 r = PTR_ERR(minor->kdev);
174 goto err_index;
175 }
176
David Herrmann05b701f2014-01-29 12:43:56 +0100177 *drm_minor_get_slot(dev, type) = minor;
178 return 0;
David Herrmannf1b85962014-07-23 10:34:52 +0200179
David Herrmanne1728072014-07-23 11:38:38 +0200180err_index:
181 spin_lock_irqsave(&drm_minor_lock, flags);
182 idr_remove(&drm_minors_idr, minor->index);
183 spin_unlock_irqrestore(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200184err_free:
185 kfree(minor);
186 return r;
David Herrmann05b701f2014-01-29 12:43:56 +0100187}
188
David Herrmannbd9dfa92014-01-29 12:55:48 +0100189static void drm_minor_free(struct drm_device *dev, unsigned int type)
190{
David Herrmannf1b85962014-07-23 10:34:52 +0200191 struct drm_minor **slot, *minor;
192 unsigned long flags;
David Herrmannbd9dfa92014-01-29 12:55:48 +0100193
194 slot = drm_minor_get_slot(dev, type);
David Herrmannf1b85962014-07-23 10:34:52 +0200195 minor = *slot;
196 if (!minor)
197 return;
198
David Herrmanne1728072014-07-23 11:38:38 +0200199 put_device(minor->kdev);
David Herrmannf1b85962014-07-23 10:34:52 +0200200
201 spin_lock_irqsave(&drm_minor_lock, flags);
202 idr_remove(&drm_minors_idr, minor->index);
203 spin_unlock_irqrestore(&drm_minor_lock, flags);
204
205 kfree(minor);
206 *slot = NULL;
David Herrmannbd9dfa92014-01-29 12:55:48 +0100207}
208
David Herrmannafcdbc82014-01-29 12:57:05 +0100209static int drm_minor_register(struct drm_device *dev, unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
David Herrmannf1b85962014-07-23 10:34:52 +0200211 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100212 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 DRM_DEBUG("\n");
216
David Herrmannf1b85962014-07-23 10:34:52 +0200217 minor = *drm_minor_get_slot(dev, type);
218 if (!minor)
David Herrmann05b701f2014-01-29 12:43:56 +0100219 return 0;
220
David Herrmannf1b85962014-07-23 10:34:52 +0200221 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
Ben Gamari955b12d2009-02-17 20:08:49 -0500222 if (ret) {
GeunSik Lim156f5a72009-06-02 15:01:37 +0900223 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
David Herrmannf1b85962014-07-23 10:34:52 +0200224 return ret;
Ben Gamari955b12d2009-02-17 20:08:49 -0500225 }
Dave Airlie2c14f282008-04-21 16:47:32 +1000226
David Herrmanne1728072014-07-23 11:38:38 +0200227 ret = device_add(minor->kdev);
228 if (ret)
Daniel Vettercb6458f2013-08-08 15:41:34 +0200229 goto err_debugfs;
Dave Airlie2c14f282008-04-21 16:47:32 +1000230
David Herrmann0d639882014-02-24 15:53:25 +0100231 /* replace NULL with @minor so lookups will succeed from now on */
232 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200233 idr_replace(&drm_minors_idr, minor, minor->index);
David Herrmann0d639882014-02-24 15:53:25 +0100234 spin_unlock_irqrestore(&drm_minor_lock, flags);
Dave Airlie2c14f282008-04-21 16:47:32 +1000235
David Herrmannf1b85962014-07-23 10:34:52 +0200236 DRM_DEBUG("new minor registered %d\n", minor->index);
Dave Airlie2c14f282008-04-21 16:47:32 +1000237 return 0;
238
Daniel Vettercb6458f2013-08-08 15:41:34 +0200239err_debugfs:
David Herrmannf1b85962014-07-23 10:34:52 +0200240 drm_debugfs_cleanup(minor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 return ret;
242}
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000243
David Herrmannafcdbc82014-01-29 12:57:05 +0100244static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
David Herrmannf73aca52013-10-20 18:55:40 +0200245{
David Herrmannafcdbc82014-01-29 12:57:05 +0100246 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100247 unsigned long flags;
David Herrmannafcdbc82014-01-29 12:57:05 +0100248
249 minor = *drm_minor_get_slot(dev, type);
David Herrmanne1728072014-07-23 11:38:38 +0200250 if (!minor || !device_is_registered(minor->kdev))
David Herrmannf73aca52013-10-20 18:55:40 +0200251 return;
252
David Herrmannf1b85962014-07-23 10:34:52 +0200253 /* replace @minor with NULL so lookups will fail from now on */
David Herrmann0d639882014-02-24 15:53:25 +0100254 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmannf1b85962014-07-23 10:34:52 +0200255 idr_replace(&drm_minors_idr, NULL, minor->index);
David Herrmann0d639882014-02-24 15:53:25 +0100256 spin_unlock_irqrestore(&drm_minor_lock, flags);
David Herrmann0d639882014-02-24 15:53:25 +0100257
David Herrmanne1728072014-07-23 11:38:38 +0200258 device_del(minor->kdev);
259 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
David Herrmann865fb472013-10-20 18:55:43 +0200260 drm_debugfs_cleanup(minor);
David Herrmannf73aca52013-10-20 18:55:40 +0200261}
262
Daniel Vetter85e634b2016-11-14 12:58:19 +0100263/*
David Herrmann1616c522014-01-29 10:49:19 +0100264 * Looks up the given minor-ID and returns the respective DRM-minor object. The
265 * refence-count of the underlying device is increased so you must release this
266 * object with drm_minor_release().
267 *
268 * As long as you hold this minor, it is guaranteed that the object and the
269 * minor->dev pointer will stay valid! However, the device may get unplugged and
270 * unregistered while you hold the minor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 */
David Herrmann1616c522014-01-29 10:49:19 +0100272struct drm_minor *drm_minor_acquire(unsigned int minor_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
David Herrmann1616c522014-01-29 10:49:19 +0100274 struct drm_minor *minor;
David Herrmann0d639882014-02-24 15:53:25 +0100275 unsigned long flags;
Eric Anholt673a3942008-07-30 12:06:12 -0700276
David Herrmann0d639882014-02-24 15:53:25 +0100277 spin_lock_irqsave(&drm_minor_lock, flags);
David Herrmann1616c522014-01-29 10:49:19 +0100278 minor = idr_find(&drm_minors_idr, minor_id);
David Herrmann0d639882014-02-24 15:53:25 +0100279 if (minor)
280 drm_dev_ref(minor->dev);
281 spin_unlock_irqrestore(&drm_minor_lock, flags);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000282
David Herrmann0d639882014-02-24 15:53:25 +0100283 if (!minor) {
284 return ERR_PTR(-ENODEV);
285 } else if (drm_device_is_unplugged(minor->dev)) {
286 drm_dev_unref(minor->dev);
287 return ERR_PTR(-ENODEV);
288 }
289
David Herrmann1616c522014-01-29 10:49:19 +0100290 return minor;
291}
292
David Herrmann1616c522014-01-29 10:49:19 +0100293void drm_minor_release(struct drm_minor *minor)
294{
295 drm_dev_unref(minor->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500297
298/**
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200299 * DOC: driver instance overview
300 *
301 * A device instance for a drm driver is represented by struct &drm_device. This
302 * is allocated with drm_dev_alloc(), usually from bus-specific ->probe()
303 * callbacks implemented by the driver. The driver then needs to initialize all
304 * the various subsystems for the drm device like memory management, vblank
305 * handling, modesetting support and intial output configuration plus obviously
Daniel Vettera7429462016-06-21 10:54:16 +0200306 * initialize all the corresponding hardware bits. Finally when everything is up
307 * and running and ready for userspace the device instance can be published
308 * using drm_dev_register().
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200309 *
310 * There is also deprecated support for initalizing device instances using
311 * bus-specific helpers and the ->load() callback. But due to
312 * backwards-compatibility needs the device instance have to be published too
313 * early, which requires unpretty global locking to make safe and is therefore
314 * only support for existing drivers not yet converted to the new scheme.
315 *
316 * When cleaning up a device instance everything needs to be done in reverse:
317 * First unpublish the device instance with drm_dev_unregister(). Then clean up
318 * any other resources allocated at device initialization and drop the driver's
319 * reference to &drm_device using drm_dev_unref().
320 *
321 * Note that the lifetime rules for &drm_device instance has still a lot of
322 * historical baggage. Hence use the reference counting provided by
323 * drm_dev_ref() and drm_dev_unref() only carefully.
324 *
325 * Also note that embedding of &drm_device is currently not (yet) supported (but
326 * it would be easy to add). Drivers can store driver-private data in the
327 * dev_priv field of &drm_device.
328 */
329
Daniel Vettera7429462016-06-21 10:54:16 +0200330static int drm_dev_set_unique(struct drm_device *dev, const char *name)
331{
Tom Gundersenc6bf8112016-09-21 16:59:18 +0200332 if (!name)
333 return -EINVAL;
334
Daniel Vettera7429462016-06-21 10:54:16 +0200335 kfree(dev->unique);
336 dev->unique = kstrdup(name, GFP_KERNEL);
337
338 return dev->unique ? 0 : -ENOMEM;
339}
340
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200341/**
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200342 * drm_put_dev - Unregister and release a DRM device
343 * @dev: DRM device
344 *
345 * Called at module unload time or when a PCI device is unplugged.
346 *
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500347 * Cleans up all DRM device, calling drm_lastclose().
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200348 *
349 * Note: Use of this function is deprecated. It will eventually go away
350 * completely. Please use drm_dev_unregister() and drm_dev_unref() explicitly
351 * instead to make sure that the device isn't userspace accessible any more
352 * while teardown is in progress, ensuring that userspace can't access an
353 * inconsistent state.
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500354 */
355void drm_put_dev(struct drm_device *dev)
356{
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500357 DRM_DEBUG("\n");
358
359 if (!dev) {
360 DRM_ERROR("cleanup called no dev\n");
361 return;
362 }
363
David Herrmannc3a49732013-10-02 11:23:38 +0200364 drm_dev_unregister(dev);
David Herrmann099d1c22014-01-29 10:21:36 +0100365 drm_dev_unref(dev);
Kristian Høgsberg112b7152009-01-04 16:55:33 -0500366}
367EXPORT_SYMBOL(drm_put_dev);
Dave Airlie2c07a212012-02-20 14:18:07 +0000368
369void drm_unplug_dev(struct drm_device *dev)
370{
371 /* for a USB device */
Chris Wilsona39be602016-06-24 15:36:20 +0100372 drm_dev_unregister(dev);
Dave Airlie2c07a212012-02-20 14:18:07 +0000373
374 mutex_lock(&drm_global_mutex);
375
376 drm_device_set_unplugged(dev);
377
378 if (dev->open_count == 0) {
379 drm_put_dev(dev);
380 }
381 mutex_unlock(&drm_global_mutex);
382}
383EXPORT_SYMBOL(drm_unplug_dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200384
David Herrmann31bbe162014-01-03 14:09:47 +0100385/*
386 * DRM internal mount
387 * We want to be able to allocate our own "struct address_space" to control
388 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
389 * stand-alone address_space objects, so we need an underlying inode. As there
390 * is no way to allocate an independent inode easily, we need a fake internal
391 * VFS mount-point.
392 *
393 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
394 * frees it again. You are allowed to use iget() and iput() to get references to
395 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
396 * drm_fs_inode_free() call (which does not have to be the last iput()).
397 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
398 * between multiple inode-users. You could, technically, call
399 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
400 * iput(), but this way you'd end up with a new vfsmount for each inode.
401 */
402
403static int drm_fs_cnt;
404static struct vfsmount *drm_fs_mnt;
405
406static const struct dentry_operations drm_fs_dops = {
407 .d_dname = simple_dname,
408};
409
410static const struct super_operations drm_fs_sops = {
411 .statfs = simple_statfs,
412};
413
414static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
415 const char *dev_name, void *data)
416{
417 return mount_pseudo(fs_type,
418 "drm:",
419 &drm_fs_sops,
420 &drm_fs_dops,
421 0x010203ff);
422}
423
424static struct file_system_type drm_fs_type = {
425 .name = "drm",
426 .owner = THIS_MODULE,
427 .mount = drm_fs_mount,
428 .kill_sb = kill_anon_super,
429};
430
431static struct inode *drm_fs_inode_new(void)
432{
433 struct inode *inode;
434 int r;
435
436 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
437 if (r < 0) {
438 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
439 return ERR_PTR(r);
440 }
441
442 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
443 if (IS_ERR(inode))
444 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
445
446 return inode;
447}
448
449static void drm_fs_inode_free(struct inode *inode)
450{
451 if (inode) {
452 iput(inode);
453 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
454 }
455}
456
David Herrmann1bb72532013-10-02 11:23:34 +0200457/**
Chris Wilsonb209aca2016-06-15 13:17:46 +0100458 * drm_dev_init - Initialise new DRM device
459 * @dev: DRM device
460 * @driver: DRM driver
David Herrmann1bb72532013-10-02 11:23:34 +0200461 * @parent: Parent device object
462 *
Chris Wilsonb209aca2016-06-15 13:17:46 +0100463 * Initialize a new DRM device. No device registration is done.
David Herrmannc22f0ac2013-10-02 11:23:35 +0200464 * Call drm_dev_register() to advertice the device to user space and register it
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200465 * with other core subsystems. This should be done last in the device
466 * initialization sequence to make sure userspace can't access an inconsistent
467 * state.
David Herrmann1bb72532013-10-02 11:23:34 +0200468 *
David Herrmann099d1c22014-01-29 10:21:36 +0100469 * The initial ref-count of the object is 1. Use drm_dev_ref() and
470 * drm_dev_unref() to take and drop further ref-counts.
471 *
Daniel Vetterb0ff4b92014-11-24 20:01:58 +0100472 * Note that for purely virtual devices @parent can be NULL.
473 *
Chris Wilsonb209aca2016-06-15 13:17:46 +0100474 * Drivers that do not want to allocate their own device struct
475 * embedding struct &drm_device can call drm_dev_alloc() instead.
476 *
David Herrmann1bb72532013-10-02 11:23:34 +0200477 * RETURNS:
Chris Wilsonb209aca2016-06-15 13:17:46 +0100478 * 0 on success, or error code on failure.
David Herrmann1bb72532013-10-02 11:23:34 +0200479 */
Chris Wilsonb209aca2016-06-15 13:17:46 +0100480int drm_dev_init(struct drm_device *dev,
481 struct drm_driver *driver,
482 struct device *parent)
David Herrmann1bb72532013-10-02 11:23:34 +0200483{
David Herrmann1bb72532013-10-02 11:23:34 +0200484 int ret;
485
David Herrmann099d1c22014-01-29 10:21:36 +0100486 kref_init(&dev->ref);
David Herrmann1bb72532013-10-02 11:23:34 +0200487 dev->dev = parent;
488 dev->driver = driver;
489
490 INIT_LIST_HEAD(&dev->filelist);
491 INIT_LIST_HEAD(&dev->ctxlist);
492 INIT_LIST_HEAD(&dev->vmalist);
493 INIT_LIST_HEAD(&dev->maplist);
494 INIT_LIST_HEAD(&dev->vblank_event_list);
495
Daniel Vetter2177a212013-12-16 11:21:06 +0100496 spin_lock_init(&dev->buf_lock);
David Herrmann1bb72532013-10-02 11:23:34 +0200497 spin_lock_init(&dev->event_lock);
498 mutex_init(&dev->struct_mutex);
Daniel Vetter1d2ac402016-04-26 19:29:41 +0200499 mutex_init(&dev->filelist_mutex);
David Herrmann1bb72532013-10-02 11:23:34 +0200500 mutex_init(&dev->ctxlist_mutex);
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100501 mutex_init(&dev->master_mutex);
David Herrmann1bb72532013-10-02 11:23:34 +0200502
David Herrmann6796cb12014-01-03 14:24:19 +0100503 dev->anon_inode = drm_fs_inode_new();
504 if (IS_ERR(dev->anon_inode)) {
505 ret = PTR_ERR(dev->anon_inode);
506 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
David Herrmann1bb72532013-10-02 11:23:34 +0200507 goto err_free;
David Herrmann6796cb12014-01-03 14:24:19 +0100508 }
509
David Herrmann05b701f2014-01-29 12:43:56 +0100510 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
511 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
512 if (ret)
513 goto err_minors;
514 }
515
David Herrmann6d6dfcf2014-03-16 14:38:40 +0100516 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
David Herrmann05b701f2014-01-29 12:43:56 +0100517 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
518 if (ret)
519 goto err_minors;
520 }
521
David Herrmanna3ccc462016-08-03 20:04:25 +0200522 ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
David Herrmann05b701f2014-01-29 12:43:56 +0100523 if (ret)
524 goto err_minors;
525
Chris Wilsonb209aca2016-06-15 13:17:46 +0100526 ret = drm_ht_create(&dev->map_hash, 12);
527 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100528 goto err_minors;
David Herrmann1bb72532013-10-02 11:23:34 +0200529
Daniel Vetterba6976c2015-06-23 11:22:36 +0200530 drm_legacy_ctxbitmap_init(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200531
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200532 if (drm_core_check_feature(dev, DRIVER_GEM)) {
David Herrmann1bb72532013-10-02 11:23:34 +0200533 ret = drm_gem_init(dev);
534 if (ret) {
535 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
536 goto err_ctxbitmap;
537 }
538 }
539
Daniel Vetter5079c462016-06-21 10:54:14 +0200540 /* Use the parent device name as DRM device unique identifier, but fall
541 * back to the driver name for virtual devices like vgem. */
542 ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
543 if (ret)
544 goto err_setunique;
Nicolas Ioosse112e592015-12-11 11:20:28 +0100545
Chris Wilsonb209aca2016-06-15 13:17:46 +0100546 return 0;
David Herrmann1bb72532013-10-02 11:23:34 +0200547
Nicolas Ioosse112e592015-12-11 11:20:28 +0100548err_setunique:
549 if (drm_core_check_feature(dev, DRIVER_GEM))
550 drm_gem_destroy(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200551err_ctxbitmap:
David Herrmanne7b960702014-07-24 12:10:04 +0200552 drm_legacy_ctxbitmap_cleanup(dev);
David Herrmann1bb72532013-10-02 11:23:34 +0200553 drm_ht_remove(&dev->map_hash);
David Herrmann05b701f2014-01-29 12:43:56 +0100554err_minors:
David Herrmanna3ccc462016-08-03 20:04:25 +0200555 drm_minor_free(dev, DRM_MINOR_PRIMARY);
David Herrmannbd9dfa92014-01-29 12:55:48 +0100556 drm_minor_free(dev, DRM_MINOR_RENDER);
557 drm_minor_free(dev, DRM_MINOR_CONTROL);
David Herrmann6796cb12014-01-03 14:24:19 +0100558 drm_fs_inode_free(dev->anon_inode);
David Herrmann1bb72532013-10-02 11:23:34 +0200559err_free:
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100560 mutex_destroy(&dev->master_mutex);
Joonas Lahtinenf92e1ee2016-11-10 15:50:35 +0200561 mutex_destroy(&dev->ctxlist_mutex);
562 mutex_destroy(&dev->filelist_mutex);
563 mutex_destroy(&dev->struct_mutex);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100564 return ret;
565}
566EXPORT_SYMBOL(drm_dev_init);
567
568/**
569 * drm_dev_alloc - Allocate new DRM device
570 * @driver: DRM driver to allocate device for
571 * @parent: Parent device object
572 *
573 * Allocate and initialize a new DRM device. No device registration is done.
574 * Call drm_dev_register() to advertice the device to user space and register it
575 * with other core subsystems. This should be done last in the device
576 * initialization sequence to make sure userspace can't access an inconsistent
577 * state.
578 *
579 * The initial ref-count of the object is 1. Use drm_dev_ref() and
580 * drm_dev_unref() to take and drop further ref-counts.
581 *
582 * Note that for purely virtual devices @parent can be NULL.
583 *
584 * Drivers that wish to subclass or embed struct &drm_device into their
585 * own struct should look at using drm_dev_init() instead.
586 *
587 * RETURNS:
Tom Gundersen0f288602016-09-21 16:59:19 +0200588 * Pointer to new DRM device, or ERR_PTR on failure.
Chris Wilsonb209aca2016-06-15 13:17:46 +0100589 */
590struct drm_device *drm_dev_alloc(struct drm_driver *driver,
591 struct device *parent)
592{
593 struct drm_device *dev;
594 int ret;
595
596 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
597 if (!dev)
Tom Gundersen0f288602016-09-21 16:59:19 +0200598 return ERR_PTR(-ENOMEM);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100599
600 ret = drm_dev_init(dev, driver, parent);
601 if (ret) {
602 kfree(dev);
Tom Gundersen0f288602016-09-21 16:59:19 +0200603 return ERR_PTR(ret);
Chris Wilsonb209aca2016-06-15 13:17:46 +0100604 }
605
606 return dev;
David Herrmann1bb72532013-10-02 11:23:34 +0200607}
608EXPORT_SYMBOL(drm_dev_alloc);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200609
David Herrmann099d1c22014-01-29 10:21:36 +0100610static void drm_dev_release(struct kref *ref)
David Herrmann0dc8fe52013-10-02 11:23:37 +0200611{
David Herrmann099d1c22014-01-29 10:21:36 +0100612 struct drm_device *dev = container_of(ref, struct drm_device, ref);
David Herrmann8f6599d2013-10-20 18:55:45 +0200613
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200614 if (drm_core_check_feature(dev, DRIVER_GEM))
David Herrmann0dc8fe52013-10-02 11:23:37 +0200615 drm_gem_destroy(dev);
616
David Herrmanne7b960702014-07-24 12:10:04 +0200617 drm_legacy_ctxbitmap_cleanup(dev);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200618 drm_ht_remove(&dev->map_hash);
David Herrmann6796cb12014-01-03 14:24:19 +0100619 drm_fs_inode_free(dev->anon_inode);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200620
David Herrmanna3ccc462016-08-03 20:04:25 +0200621 drm_minor_free(dev, DRM_MINOR_PRIMARY);
David Herrmannbd9dfa92014-01-29 12:55:48 +0100622 drm_minor_free(dev, DRM_MINOR_RENDER);
623 drm_minor_free(dev, DRM_MINOR_CONTROL);
624
Thomas Hellstromc996fd02014-02-25 19:57:44 +0100625 mutex_destroy(&dev->master_mutex);
Joonas Lahtinenf92e1ee2016-11-10 15:50:35 +0200626 mutex_destroy(&dev->ctxlist_mutex);
627 mutex_destroy(&dev->filelist_mutex);
628 mutex_destroy(&dev->struct_mutex);
Thierry Redingca8e2ad2014-04-11 15:23:00 +0200629 kfree(dev->unique);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200630 kfree(dev);
631}
David Herrmann099d1c22014-01-29 10:21:36 +0100632
633/**
634 * drm_dev_ref - Take reference of a DRM device
635 * @dev: device to take reference of or NULL
636 *
637 * This increases the ref-count of @dev by one. You *must* already own a
638 * reference when calling this. Use drm_dev_unref() to drop this reference
639 * again.
640 *
641 * This function never fails. However, this function does not provide *any*
642 * guarantee whether the device is alive or running. It only provides a
643 * reference to the object and the memory associated with it.
644 */
645void drm_dev_ref(struct drm_device *dev)
646{
647 if (dev)
648 kref_get(&dev->ref);
649}
650EXPORT_SYMBOL(drm_dev_ref);
651
652/**
653 * drm_dev_unref - Drop reference of a DRM device
654 * @dev: device to drop reference of or NULL
655 *
656 * This decreases the ref-count of @dev by one. The device is destroyed if the
657 * ref-count drops to zero.
658 */
659void drm_dev_unref(struct drm_device *dev)
660{
661 if (dev)
662 kref_put(&dev->ref, drm_dev_release);
663}
664EXPORT_SYMBOL(drm_dev_unref);
David Herrmann0dc8fe52013-10-02 11:23:37 +0200665
666/**
David Herrmannc22f0ac2013-10-02 11:23:35 +0200667 * drm_dev_register - Register DRM device
668 * @dev: Device to register
Thierry Redingc6a1af8a2014-05-19 13:39:07 +0200669 * @flags: Flags passed to the driver's .load() function
David Herrmannc22f0ac2013-10-02 11:23:35 +0200670 *
671 * Register the DRM device @dev with the system, advertise device to user-space
672 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100673 * previously.
David Herrmannc22f0ac2013-10-02 11:23:35 +0200674 *
675 * Never call this twice on any device!
676 *
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200677 * NOTE: To ensure backward compatibility with existing drivers method this
678 * function calls the ->load() method after registering the device nodes,
679 * creating race conditions. Usage of the ->load() methods is therefore
680 * deprecated, drivers must perform all initialization before calling
681 * drm_dev_register().
682 *
David Herrmannc22f0ac2013-10-02 11:23:35 +0200683 * RETURNS:
684 * 0 on success, negative error code on failure.
685 */
686int drm_dev_register(struct drm_device *dev, unsigned long flags)
687{
688 int ret;
689
690 mutex_lock(&drm_global_mutex);
691
David Herrmannafcdbc82014-01-29 12:57:05 +0100692 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200693 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100694 goto err_minors;
695
David Herrmannafcdbc82014-01-29 12:57:05 +0100696 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
David Herrmann05b701f2014-01-29 12:43:56 +0100697 if (ret)
698 goto err_minors;
699
David Herrmanna3ccc462016-08-03 20:04:25 +0200700 ret = drm_minor_register(dev, DRM_MINOR_PRIMARY);
David Herrmann05b701f2014-01-29 12:43:56 +0100701 if (ret)
702 goto err_minors;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200703
704 if (dev->driver->load) {
705 ret = dev->driver->load(dev, flags);
706 if (ret)
David Herrmann05b701f2014-01-29 12:43:56 +0100707 goto err_minors;
David Herrmannc22f0ac2013-10-02 11:23:35 +0200708 }
709
Chris Wilsonbee7fb12016-06-18 14:46:41 +0100710 if (drm_core_check_feature(dev, DRIVER_MODESET))
Benjamin Gaignard79190ea2016-06-21 16:37:09 +0200711 drm_modeset_register_all(dev);
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100712
David Herrmannc22f0ac2013-10-02 11:23:35 +0200713 ret = 0;
714 goto out_unlock;
715
David Herrmann05b701f2014-01-29 12:43:56 +0100716err_minors:
David Herrmanna3ccc462016-08-03 20:04:25 +0200717 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
David Herrmannafcdbc82014-01-29 12:57:05 +0100718 drm_minor_unregister(dev, DRM_MINOR_RENDER);
719 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
David Herrmannc22f0ac2013-10-02 11:23:35 +0200720out_unlock:
721 mutex_unlock(&drm_global_mutex);
722 return ret;
723}
724EXPORT_SYMBOL(drm_dev_register);
David Herrmannc3a49732013-10-02 11:23:38 +0200725
726/**
727 * drm_dev_unregister - Unregister DRM device
728 * @dev: Device to unregister
729 *
730 * Unregister the DRM device from the system. This does the reverse of
731 * drm_dev_register() but does not deallocate the device. The caller must call
David Herrmann099d1c22014-01-29 10:21:36 +0100732 * drm_dev_unref() to drop their final reference.
Daniel Vetter6e3f7972015-09-28 21:46:35 +0200733 *
734 * This should be called first in the device teardown code to make sure
735 * userspace can't access the device instance any more.
David Herrmannc3a49732013-10-02 11:23:38 +0200736 */
737void drm_dev_unregister(struct drm_device *dev)
738{
739 struct drm_map_list *r_list, *list_temp;
740
741 drm_lastclose(dev);
742
Chris Wilsonbee7fb12016-06-18 14:46:41 +0100743 if (drm_core_check_feature(dev, DRIVER_MODESET))
Benjamin Gaignard79190ea2016-06-21 16:37:09 +0200744 drm_modeset_unregister_all(dev);
Chris Wilsone28cd4d2016-06-17 09:25:17 +0100745
David Herrmannc3a49732013-10-02 11:23:38 +0200746 if (dev->driver->unload)
747 dev->driver->unload(dev);
748
Daniel Vetter4efafeb2013-12-11 11:34:38 +0100749 if (dev->agp)
750 drm_pci_agp_destroy(dev);
David Herrmannc3a49732013-10-02 11:23:38 +0200751
752 drm_vblank_cleanup(dev);
753
754 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
David Herrmann9fc5cde2014-08-29 12:12:28 +0200755 drm_legacy_rmmap(dev, r_list->map);
David Herrmannc3a49732013-10-02 11:23:38 +0200756
David Herrmanna3ccc462016-08-03 20:04:25 +0200757 drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
David Herrmannafcdbc82014-01-29 12:57:05 +0100758 drm_minor_unregister(dev, DRM_MINOR_RENDER);
759 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
David Herrmannc3a49732013-10-02 11:23:38 +0200760}
761EXPORT_SYMBOL(drm_dev_unregister);
Thierry Redingca8e2ad2014-04-11 15:23:00 +0200762
David Herrmann1b7199f2014-07-23 12:29:56 +0200763/*
764 * DRM Core
765 * The DRM core module initializes all global DRM objects and makes them
766 * available to drivers. Once setup, drivers can probe their respective
767 * devices.
768 * Currently, core management includes:
769 * - The "DRM-Global" key/value database
770 * - Global ID management for connectors
771 * - DRM major number allocation
772 * - DRM minor management
773 * - DRM sysfs class
774 * - DRM debugfs root
775 *
776 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
777 * interface registered on a DRM device, you can request minor numbers from DRM
778 * core. DRM core takes care of major-number management and char-dev
779 * registration. A stub ->open() callback forwards any open() requests to the
780 * registered minor.
781 */
782
783static int drm_stub_open(struct inode *inode, struct file *filp)
784{
785 const struct file_operations *new_fops;
786 struct drm_minor *minor;
787 int err;
788
789 DRM_DEBUG("\n");
790
791 mutex_lock(&drm_global_mutex);
792 minor = drm_minor_acquire(iminor(inode));
793 if (IS_ERR(minor)) {
794 err = PTR_ERR(minor);
795 goto out_unlock;
796 }
797
798 new_fops = fops_get(minor->dev->driver->fops);
799 if (!new_fops) {
800 err = -ENODEV;
801 goto out_release;
802 }
803
804 replace_fops(filp, new_fops);
805 if (filp->f_op->open)
806 err = filp->f_op->open(inode, filp);
807 else
808 err = 0;
809
810out_release:
811 drm_minor_release(minor);
812out_unlock:
813 mutex_unlock(&drm_global_mutex);
814 return err;
815}
816
817static const struct file_operations drm_stub_fops = {
818 .owner = THIS_MODULE,
819 .open = drm_stub_open,
820 .llseek = noop_llseek,
821};
822
David Herrmann2cc107d2016-09-01 14:48:37 +0200823static void drm_core_exit(void)
824{
825 unregister_chrdev(DRM_MAJOR, "drm");
826 debugfs_remove(drm_debugfs_root);
827 drm_sysfs_destroy();
828 idr_destroy(&drm_minors_idr);
829 drm_connector_ida_destroy();
830 drm_global_release();
831}
832
David Herrmann1b7199f2014-07-23 12:29:56 +0200833static int __init drm_core_init(void)
834{
David Herrmann2cc107d2016-09-01 14:48:37 +0200835 int ret;
David Herrmann1b7199f2014-07-23 12:29:56 +0200836
837 drm_global_init();
838 drm_connector_ida_init();
839 idr_init(&drm_minors_idr);
840
David Herrmannfcc90212015-09-09 14:21:30 +0200841 ret = drm_sysfs_init();
842 if (ret < 0) {
David Herrmann2cc107d2016-09-01 14:48:37 +0200843 DRM_ERROR("Cannot create DRM class: %d\n", ret);
844 goto error;
David Herrmann1b7199f2014-07-23 12:29:56 +0200845 }
846
847 drm_debugfs_root = debugfs_create_dir("dri", NULL);
848 if (!drm_debugfs_root) {
David Herrmann2cc107d2016-09-01 14:48:37 +0200849 ret = -ENOMEM;
850 DRM_ERROR("Cannot create debugfs-root: %d\n", ret);
851 goto error;
David Herrmann1b7199f2014-07-23 12:29:56 +0200852 }
853
David Herrmann2cc107d2016-09-01 14:48:37 +0200854 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops);
855 if (ret < 0)
856 goto error;
857
David Herrmann82d5e732016-09-01 14:48:36 +0200858 DRM_INFO("Initialized\n");
David Herrmann1b7199f2014-07-23 12:29:56 +0200859 return 0;
David Herrmann1b7199f2014-07-23 12:29:56 +0200860
David Herrmann2cc107d2016-09-01 14:48:37 +0200861error:
862 drm_core_exit();
David Herrmann1b7199f2014-07-23 12:29:56 +0200863 return ret;
864}
865
David Herrmann1b7199f2014-07-23 12:29:56 +0200866module_init(drm_core_init);
867module_exit(drm_core_exit);