blob: cd45e45e2cced25944fe73ff2dd84b193f9f36cf [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070037#include <linux/shmem_fs.h>
Dave Airlie32488772011-11-25 15:21:02 +000038#include <linux/dma-buf.h>
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020040#include <drm/drm_vma_manager.h>
Daniel Vetterd9fc9412014-09-23 15:46:53 +020041#include <drm/drm_gem.h>
Daniel Vetter67d0ec42014-09-10 12:43:53 +020042#include "drm_internal.h"
Eric Anholt673a3942008-07-30 12:06:12 -070043
44/** @file drm_gem.c
45 *
46 * This file provides some of the base ioctls and library routines for
47 * the graphics memory manager implemented by each device driver.
48 *
49 * Because various devices have different requirements in terms of
50 * synchronization and migration strategies, implementing that is left up to
51 * the driver, and all that the general API provides should be generic --
52 * allocating objects, reading/writing data with the cpu, freeing objects.
53 * Even there, platform-dependent optimizations for reading/writing data with
54 * the CPU mean we'll likely hook those out to driver-specific calls. However,
55 * the DRI2 implementation wants to have at least allocate/mmap be generic.
56 *
57 * The goal was to have swap-backed object allocation managed through
58 * struct file. However, file descriptors as handles to a struct file have
59 * two major failings:
60 * - Process limits prevent more than 1024 or so being used at a time by
61 * default.
62 * - Inability to allocate high fds will aggravate the X Server's select()
63 * handling, and likely that of many GL client applications as well.
64 *
65 * This led to a plan of using our own integer IDs (called handles, following
66 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
67 * ioctls. The objects themselves will still include the struct file so
68 * that we can transition to fds if the required kernel infrastructure shows
69 * up at a later date, and as our interface with shmfs for memory allocation.
70 */
71
Jesse Barnesa2c0a972008-11-05 10:31:53 -080072/*
73 * We make up offsets for buffer objects so we can recognize them at
74 * mmap time.
75 */
Jordan Crouse05269a32010-05-27 13:40:27 -060076
77/* pgoff in mmap is an unsigned long, so we need to make sure that
78 * the faked up offset will fit
79 */
80
81#if BITS_PER_LONG == 64
Jesse Barnesa2c0a972008-11-05 10:31:53 -080082#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
83#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
Jordan Crouse05269a32010-05-27 13:40:27 -060084#else
85#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
86#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
87#endif
Jesse Barnesa2c0a972008-11-05 10:31:53 -080088
Eric Anholt673a3942008-07-30 12:06:12 -070089/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +010090 * drm_gem_init - Initialize the GEM device fields
91 * @dev: drm_devic structure to initialize
Eric Anholt673a3942008-07-30 12:06:12 -070092 */
Eric Anholt673a3942008-07-30 12:06:12 -070093int
94drm_gem_init(struct drm_device *dev)
95{
Daniel Vetterb04a5902013-12-11 14:24:46 +010096 struct drm_vma_offset_manager *vma_offset_manager;
Jesse Barnesa2c0a972008-11-05 10:31:53 -080097
Daniel Vettercd4f0132013-08-15 00:02:44 +020098 mutex_init(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -070099 idr_init(&dev->object_name_idr);
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800100
Daniel Vetterb04a5902013-12-11 14:24:46 +0100101 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
102 if (!vma_offset_manager) {
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800103 DRM_ERROR("out of memory\n");
104 return -ENOMEM;
105 }
106
Daniel Vetterb04a5902013-12-11 14:24:46 +0100107 dev->vma_offset_manager = vma_offset_manager;
108 drm_vma_offset_manager_init(vma_offset_manager,
David Herrmann0de23972013-07-24 21:07:52 +0200109 DRM_FILE_PAGE_OFFSET_START,
110 DRM_FILE_PAGE_OFFSET_SIZE);
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800111
Eric Anholt673a3942008-07-30 12:06:12 -0700112 return 0;
113}
114
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800115void
116drm_gem_destroy(struct drm_device *dev)
117{
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800118
Daniel Vetterb04a5902013-12-11 14:24:46 +0100119 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
120 kfree(dev->vma_offset_manager);
121 dev->vma_offset_manager = NULL;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800122}
123
Eric Anholt673a3942008-07-30 12:06:12 -0700124/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100125 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
126 * @dev: drm_device the object should be initialized for
127 * @obj: drm_gem_object to initialize
128 * @size: object size
129 *
Alan Cox62cb70112011-06-07 14:17:51 +0100130 * Initialize an already allocated GEM object of the specified size with
Daniel Vetter1d397042010-04-09 19:05:04 +0000131 * shmfs backing store.
132 */
133int drm_gem_object_init(struct drm_device *dev,
134 struct drm_gem_object *obj, size_t size)
135{
David Herrmann89c82332013-07-11 11:56:32 +0200136 struct file *filp;
Daniel Vetter1d397042010-04-09 19:05:04 +0000137
Daniel Vetter6ab11a22014-01-20 08:21:54 +0100138 drm_gem_private_object_init(dev, obj, size);
139
David Herrmann89c82332013-07-11 11:56:32 +0200140 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
141 if (IS_ERR(filp))
142 return PTR_ERR(filp);
Daniel Vetter1d397042010-04-09 19:05:04 +0000143
David Herrmann89c82332013-07-11 11:56:32 +0200144 obj->filp = filp;
Daniel Vetter1d397042010-04-09 19:05:04 +0000145
Daniel Vetter1d397042010-04-09 19:05:04 +0000146 return 0;
147}
148EXPORT_SYMBOL(drm_gem_object_init);
149
150/**
Laurent Pinchart2a5706a2014-08-28 14:34:36 +0200151 * drm_gem_private_object_init - initialize an allocated private GEM object
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100152 * @dev: drm_device the object should be initialized for
153 * @obj: drm_gem_object to initialize
154 * @size: object size
155 *
Alan Cox62cb70112011-06-07 14:17:51 +0100156 * Initialize an already allocated GEM object of the specified size with
157 * no GEM provided backing store. Instead the caller is responsible for
158 * backing the object and handling it.
159 */
David Herrmann89c82332013-07-11 11:56:32 +0200160void drm_gem_private_object_init(struct drm_device *dev,
161 struct drm_gem_object *obj, size_t size)
Alan Cox62cb70112011-06-07 14:17:51 +0100162{
163 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
164
165 obj->dev = dev;
166 obj->filp = NULL;
167
168 kref_init(&obj->refcount);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200169 obj->handle_count = 0;
Alan Cox62cb70112011-06-07 14:17:51 +0100170 obj->size = size;
David Herrmann88d7ebe2013-08-25 18:28:57 +0200171 drm_vma_node_reset(&obj->vma_node);
Alan Cox62cb70112011-06-07 14:17:51 +0100172}
173EXPORT_SYMBOL(drm_gem_private_object_init);
174
Dave Airlie0ff926c2012-05-20 17:31:16 +0100175static void
176drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
177{
Daniel Vetter319c9332013-08-15 00:02:46 +0200178 /*
179 * Note: obj->dma_buf can't disappear as long as we still hold a
180 * handle reference in obj->handle_count.
181 */
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200182 mutex_lock(&filp->prime.lock);
Daniel Vetter319c9332013-08-15 00:02:46 +0200183 if (obj->dma_buf) {
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200184 drm_prime_remove_buf_handle_locked(&filp->prime,
185 obj->dma_buf);
Dave Airlie0ff926c2012-05-20 17:31:16 +0100186 }
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200187 mutex_unlock(&filp->prime.lock);
Dave Airlie0ff926c2012-05-20 17:31:16 +0100188}
189
Daniel Vetter36da5902013-08-15 00:02:34 +0200190/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100191 * drm_gem_object_free - release resources bound to userspace handles
192 * @obj: GEM object to clean up.
193 *
Daniel Vetter36da5902013-08-15 00:02:34 +0200194 * Called after the last handle to the object has been closed
195 *
196 * Removes any name for the object. Note that this must be
197 * called before drm_gem_object_free or we'll be touching
198 * freed memory
199 */
200static void drm_gem_object_handle_free(struct drm_gem_object *obj)
201{
202 struct drm_device *dev = obj->dev;
203
204 /* Remove any name for this object */
Daniel Vetter36da5902013-08-15 00:02:34 +0200205 if (obj->name) {
206 idr_remove(&dev->object_name_idr, obj->name);
207 obj->name = 0;
Daniel Vettera8e11d12013-08-15 00:02:37 +0200208 }
Daniel Vetter36da5902013-08-15 00:02:34 +0200209}
210
Daniel Vetter319c9332013-08-15 00:02:46 +0200211static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
212{
213 /* Unbreak the reference cycle if we have an exported dma_buf. */
214 if (obj->dma_buf) {
215 dma_buf_put(obj->dma_buf);
216 obj->dma_buf = NULL;
217 }
218}
219
Daniel Vetterbecee2a2013-08-15 00:02:39 +0200220static void
Daniel Vetter36da5902013-08-15 00:02:34 +0200221drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
222{
Daniel Vettera8e11d12013-08-15 00:02:37 +0200223 if (WARN_ON(obj->handle_count == 0))
Daniel Vetter36da5902013-08-15 00:02:34 +0200224 return;
225
226 /*
227 * Must bump handle count first as this may be the last
228 * ref, in which case the object would disappear before we
229 * checked for a name
230 */
231
Daniel Vettercd4f0132013-08-15 00:02:44 +0200232 mutex_lock(&obj->dev->object_name_lock);
Daniel Vetter319c9332013-08-15 00:02:46 +0200233 if (--obj->handle_count == 0) {
Daniel Vetter36da5902013-08-15 00:02:34 +0200234 drm_gem_object_handle_free(obj);
Daniel Vetter319c9332013-08-15 00:02:46 +0200235 drm_gem_object_exported_dma_buf_free(obj);
236 }
Daniel Vettercd4f0132013-08-15 00:02:44 +0200237 mutex_unlock(&obj->dev->object_name_lock);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200238
Daniel Vetter36da5902013-08-15 00:02:34 +0200239 drm_gem_object_unreference_unlocked(obj);
240}
241
Eric Anholt673a3942008-07-30 12:06:12 -0700242/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100243 * drm_gem_handle_delete - deletes the given file-private handle
244 * @filp: drm file-private structure to use for the handle look up
245 * @handle: userspace handle to delete
246 *
247 * Removes the GEM handle from the @filp lookup table and if this is the last
248 * handle also cleans up linked resources like GEM names.
Eric Anholt673a3942008-07-30 12:06:12 -0700249 */
Dave Airlieff72145b2011-02-07 12:16:14 +1000250int
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300251drm_gem_handle_delete(struct drm_file *filp, u32 handle)
Eric Anholt673a3942008-07-30 12:06:12 -0700252{
253 struct drm_device *dev;
254 struct drm_gem_object *obj;
255
256 /* This is gross. The idr system doesn't let us try a delete and
257 * return an error code. It just spews if you fail at deleting.
258 * So, we have to grab a lock around finding the object and then
259 * doing the delete on it and dropping the refcount, or the user
260 * could race us to double-decrement the refcount and cause a
261 * use-after-free later. Given the frequency of our handle lookups,
262 * we may want to use ida for number allocation and a hash table
263 * for the pointers, anyway.
264 */
265 spin_lock(&filp->table_lock);
266
267 /* Check if we currently have a reference on the object */
268 obj = idr_find(&filp->object_idr, handle);
269 if (obj == NULL) {
270 spin_unlock(&filp->table_lock);
271 return -EINVAL;
272 }
273 dev = obj->dev;
274
275 /* Release reference and decrement refcount. */
276 idr_remove(&filp->object_idr, handle);
277 spin_unlock(&filp->table_lock);
278
Thierry Reding9c784852013-08-28 12:04:14 +0200279 if (drm_core_check_feature(dev, DRIVER_PRIME))
280 drm_gem_remove_prime_handles(obj, filp);
David Herrmannca481c92013-08-25 18:28:58 +0200281 drm_vma_node_revoke(&obj->vma_node, filp->filp);
Dave Airlie32488772011-11-25 15:21:02 +0000282
Ben Skeggs304eda32011-06-09 00:24:59 +0000283 if (dev->driver->gem_close_object)
284 dev->driver->gem_close_object(obj, filp);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000285 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700286
287 return 0;
288}
Dave Airlieff72145b2011-02-07 12:16:14 +1000289EXPORT_SYMBOL(drm_gem_handle_delete);
Eric Anholt673a3942008-07-30 12:06:12 -0700290
291/**
Daniel Vetter43387b32013-07-16 09:12:04 +0200292 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100293 * @file: drm file-private structure to remove the dumb handle from
294 * @dev: corresponding drm_device
295 * @handle: the dumb handle to remove
Daniel Vetter43387b32013-07-16 09:12:04 +0200296 *
297 * This implements the ->dumb_destroy kms driver callback for drivers which use
298 * gem to manage their backing storage.
299 */
300int drm_gem_dumb_destroy(struct drm_file *file,
301 struct drm_device *dev,
302 uint32_t handle)
303{
304 return drm_gem_handle_delete(file, handle);
305}
306EXPORT_SYMBOL(drm_gem_dumb_destroy);
307
308/**
Daniel Vetter20228c42013-08-15 00:02:45 +0200309 * drm_gem_handle_create_tail - internal functions to create a handle
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100310 * @file_priv: drm file-private structure to register the handle for
311 * @obj: object to register
312 * @handlep: pionter to return the created handle to the caller
Daniel Vetter20228c42013-08-15 00:02:45 +0200313 *
314 * This expects the dev->object_name_lock to be held already and will drop it
315 * before returning. Used to avoid races in establishing new handles when
316 * importing an object from either an flink name or a dma-buf.
Eric Anholt673a3942008-07-30 12:06:12 -0700317 */
318int
Daniel Vetter20228c42013-08-15 00:02:45 +0200319drm_gem_handle_create_tail(struct drm_file *file_priv,
320 struct drm_gem_object *obj,
321 u32 *handlep)
Eric Anholt673a3942008-07-30 12:06:12 -0700322{
Ben Skeggs304eda32011-06-09 00:24:59 +0000323 struct drm_device *dev = obj->dev;
324 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700325
Daniel Vetter20228c42013-08-15 00:02:45 +0200326 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
327
Eric Anholt673a3942008-07-30 12:06:12 -0700328 /*
Tejun Heo2e928812013-02-27 17:04:08 -0800329 * Get the user-visible handle using idr. Preload and perform
330 * allocation under our spinlock.
Eric Anholt673a3942008-07-30 12:06:12 -0700331 */
Tejun Heo2e928812013-02-27 17:04:08 -0800332 idr_preload(GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -0700333 spin_lock(&file_priv->table_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800334
335 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200336 drm_gem_object_reference(obj);
337 obj->handle_count++;
Eric Anholt673a3942008-07-30 12:06:12 -0700338 spin_unlock(&file_priv->table_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800339 idr_preload_end();
Daniel Vettercd4f0132013-08-15 00:02:44 +0200340 mutex_unlock(&dev->object_name_lock);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200341 if (ret < 0) {
342 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700343 return ret;
Daniel Vettera8e11d12013-08-15 00:02:37 +0200344 }
Tejun Heo2e928812013-02-27 17:04:08 -0800345 *handlep = ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700346
David Herrmannca481c92013-08-25 18:28:58 +0200347 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
348 if (ret) {
349 drm_gem_handle_delete(file_priv, *handlep);
350 return ret;
351 }
Ben Skeggs304eda32011-06-09 00:24:59 +0000352
353 if (dev->driver->gem_open_object) {
354 ret = dev->driver->gem_open_object(obj, file_priv);
355 if (ret) {
356 drm_gem_handle_delete(file_priv, *handlep);
357 return ret;
358 }
359 }
360
Eric Anholt673a3942008-07-30 12:06:12 -0700361 return 0;
362}
Daniel Vetter20228c42013-08-15 00:02:45 +0200363
364/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100365 * gem_handle_create - create a gem handle for an object
366 * @file_priv: drm file-private structure to register the handle for
367 * @obj: object to register
368 * @handlep: pionter to return the created handle to the caller
369 *
Daniel Vetter20228c42013-08-15 00:02:45 +0200370 * Create a handle for this object. This adds a handle reference
371 * to the object, which includes a regular reference count. Callers
372 * will likely want to dereference the object afterwards.
373 */
374int
375drm_gem_handle_create(struct drm_file *file_priv,
376 struct drm_gem_object *obj,
377 u32 *handlep)
378{
379 mutex_lock(&obj->dev->object_name_lock);
380
381 return drm_gem_handle_create_tail(file_priv, obj, handlep);
382}
Eric Anholt673a3942008-07-30 12:06:12 -0700383EXPORT_SYMBOL(drm_gem_handle_create);
384
Rob Clark75ef8b32011-08-10 08:09:07 -0500385
386/**
387 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
388 * @obj: obj in question
389 *
390 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
391 */
392void
393drm_gem_free_mmap_offset(struct drm_gem_object *obj)
394{
395 struct drm_device *dev = obj->dev;
Rob Clark75ef8b32011-08-10 08:09:07 -0500396
Daniel Vetterb04a5902013-12-11 14:24:46 +0100397 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
Rob Clark75ef8b32011-08-10 08:09:07 -0500398}
399EXPORT_SYMBOL(drm_gem_free_mmap_offset);
400
401/**
Rob Clark367bbd42013-08-07 13:41:23 -0400402 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
403 * @obj: obj in question
404 * @size: the virtual size
405 *
406 * GEM memory mapping works by handing back to userspace a fake mmap offset
407 * it can use in a subsequent mmap(2) call. The DRM core code then looks
408 * up the object based on the offset and sets up the various memory mapping
409 * structures.
410 *
411 * This routine allocates and attaches a fake offset for @obj, in cases where
412 * the virtual size differs from the physical size (ie. obj->size). Otherwise
413 * just use drm_gem_create_mmap_offset().
414 */
415int
416drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
417{
418 struct drm_device *dev = obj->dev;
Rob Clark367bbd42013-08-07 13:41:23 -0400419
Daniel Vetterb04a5902013-12-11 14:24:46 +0100420 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
Rob Clark367bbd42013-08-07 13:41:23 -0400421 size / PAGE_SIZE);
422}
423EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
424
425/**
Rob Clark75ef8b32011-08-10 08:09:07 -0500426 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
427 * @obj: obj in question
428 *
429 * GEM memory mapping works by handing back to userspace a fake mmap offset
430 * it can use in a subsequent mmap(2) call. The DRM core code then looks
431 * up the object based on the offset and sets up the various memory mapping
432 * structures.
433 *
434 * This routine allocates and attaches a fake offset for @obj.
435 */
Rob Clark367bbd42013-08-07 13:41:23 -0400436int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
Rob Clark75ef8b32011-08-10 08:09:07 -0500437{
Rob Clark367bbd42013-08-07 13:41:23 -0400438 return drm_gem_create_mmap_offset_size(obj, obj->size);
Rob Clark75ef8b32011-08-10 08:09:07 -0500439}
440EXPORT_SYMBOL(drm_gem_create_mmap_offset);
441
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400442/**
443 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
444 * from shmem
445 * @obj: obj in question
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200446 *
447 * This reads the page-array of the shmem-backing storage of the given gem
448 * object. An array of pages is returned. If a page is not allocated or
449 * swapped-out, this will allocate/swap-in the required pages. Note that the
450 * whole object is covered by the page-array and pinned in memory.
451 *
452 * Use drm_gem_put_pages() to release the array and unpin all pages.
453 *
454 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
455 * If you require other GFP-masks, you have to do those allocations yourself.
456 *
457 * Note that you are not allowed to change gfp-zones during runtime. That is,
458 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
459 * set during initialization. If you have special zone constraints, set them
460 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
461 * to keep pages in the required zone during swap-in.
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400462 */
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200463struct page **drm_gem_get_pages(struct drm_gem_object *obj)
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400464{
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400465 struct address_space *mapping;
466 struct page *p, **pages;
467 int i, npages;
468
469 /* This is the shared memory object that backs the GEM resource */
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200470 mapping = file_inode(obj->filp)->i_mapping;
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400471
472 /* We already BUG_ON() for non-page-aligned sizes in
473 * drm_gem_object_init(), so we should never hit this unless
474 * driver author is doing something really wrong:
475 */
476 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
477
478 npages = obj->size >> PAGE_SHIFT;
479
480 pages = drm_malloc_ab(npages, sizeof(struct page *));
481 if (pages == NULL)
482 return ERR_PTR(-ENOMEM);
483
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400484 for (i = 0; i < npages; i++) {
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200485 p = shmem_read_mapping_page(mapping, i);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400486 if (IS_ERR(p))
487 goto fail;
488 pages[i] = p;
489
David Herrmann21230002014-05-25 14:34:08 +0200490 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
491 * correct region during swapin. Note that this requires
492 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
493 * so shmem can relocate pages during swapin if required.
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400494 */
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200495 BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400496 (page_to_pfn(p) >= 0x00100000UL));
497 }
498
499 return pages;
500
501fail:
502 while (i--)
503 page_cache_release(pages[i]);
504
505 drm_free_large(pages);
506 return ERR_CAST(p);
507}
508EXPORT_SYMBOL(drm_gem_get_pages);
509
510/**
511 * drm_gem_put_pages - helper to free backing pages for a GEM object
512 * @obj: obj in question
513 * @pages: pages to free
514 * @dirty: if true, pages will be marked as dirty
515 * @accessed: if true, the pages will be marked as accessed
516 */
517void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
518 bool dirty, bool accessed)
519{
520 int i, npages;
521
522 /* We already BUG_ON() for non-page-aligned sizes in
523 * drm_gem_object_init(), so we should never hit this unless
524 * driver author is doing something really wrong:
525 */
526 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
527
528 npages = obj->size >> PAGE_SHIFT;
529
530 for (i = 0; i < npages; i++) {
531 if (dirty)
532 set_page_dirty(pages[i]);
533
534 if (accessed)
535 mark_page_accessed(pages[i]);
536
537 /* Undo the reference we took when populating the table */
538 page_cache_release(pages[i]);
539 }
540
541 drm_free_large(pages);
542}
543EXPORT_SYMBOL(drm_gem_put_pages);
544
Eric Anholt673a3942008-07-30 12:06:12 -0700545/** Returns a reference to the object named by the handle. */
546struct drm_gem_object *
547drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300548 u32 handle)
Eric Anholt673a3942008-07-30 12:06:12 -0700549{
550 struct drm_gem_object *obj;
551
552 spin_lock(&filp->table_lock);
553
554 /* Check if we currently have a reference on the object */
555 obj = idr_find(&filp->object_idr, handle);
556 if (obj == NULL) {
557 spin_unlock(&filp->table_lock);
558 return NULL;
559 }
560
561 drm_gem_object_reference(obj);
562
563 spin_unlock(&filp->table_lock);
564
565 return obj;
566}
567EXPORT_SYMBOL(drm_gem_object_lookup);
568
569/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100570 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
571 * @dev: drm_device
572 * @data: ioctl data
573 * @file_priv: drm file-private structure
574 *
Eric Anholt673a3942008-07-30 12:06:12 -0700575 * Releases the handle to an mm object.
576 */
577int
578drm_gem_close_ioctl(struct drm_device *dev, void *data,
579 struct drm_file *file_priv)
580{
581 struct drm_gem_close *args = data;
582 int ret;
583
584 if (!(dev->driver->driver_features & DRIVER_GEM))
585 return -ENODEV;
586
587 ret = drm_gem_handle_delete(file_priv, args->handle);
588
589 return ret;
590}
591
592/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100593 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
594 * @dev: drm_device
595 * @data: ioctl data
596 * @file_priv: drm file-private structure
597 *
Eric Anholt673a3942008-07-30 12:06:12 -0700598 * Create a global name for an object, returning the name.
599 *
600 * Note that the name does not hold a reference; when the object
601 * is freed, the name goes away.
602 */
603int
604drm_gem_flink_ioctl(struct drm_device *dev, void *data,
605 struct drm_file *file_priv)
606{
607 struct drm_gem_flink *args = data;
608 struct drm_gem_object *obj;
609 int ret;
610
611 if (!(dev->driver->driver_features & DRIVER_GEM))
612 return -ENODEV;
613
614 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
615 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100616 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700617
Daniel Vettercd4f0132013-08-15 00:02:44 +0200618 mutex_lock(&dev->object_name_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800619 idr_preload(GFP_KERNEL);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200620 /* prevent races with concurrent gem_close. */
621 if (obj->handle_count == 0) {
622 ret = -ENOENT;
623 goto err;
624 }
625
Chris Wilson8d59bae2009-02-11 14:26:28 +0000626 if (!obj->name) {
Tejun Heo2e928812013-02-27 17:04:08 -0800627 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
Tejun Heo2e928812013-02-27 17:04:08 -0800628 if (ret < 0)
Chris Wilson8d59bae2009-02-11 14:26:28 +0000629 goto err;
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900630
631 obj->name = ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700632 }
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000633
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900634 args->name = (uint64_t) obj->name;
635 ret = 0;
636
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000637err:
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900638 idr_preload_end();
Daniel Vettercd4f0132013-08-15 00:02:44 +0200639 mutex_unlock(&dev->object_name_lock);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000640 drm_gem_object_unreference_unlocked(obj);
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000641 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700642}
643
644/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100645 * drm_gem_open - implementation of the GEM_OPEN ioctl
646 * @dev: drm_device
647 * @data: ioctl data
648 * @file_priv: drm file-private structure
649 *
Eric Anholt673a3942008-07-30 12:06:12 -0700650 * Open an object using the global name, returning a handle and the size.
651 *
652 * This handle (of course) holds a reference to the object, so the object
653 * will not go away until the handle is deleted.
654 */
655int
656drm_gem_open_ioctl(struct drm_device *dev, void *data,
657 struct drm_file *file_priv)
658{
659 struct drm_gem_open *args = data;
660 struct drm_gem_object *obj;
661 int ret;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300662 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700663
664 if (!(dev->driver->driver_features & DRIVER_GEM))
665 return -ENODEV;
666
Daniel Vettercd4f0132013-08-15 00:02:44 +0200667 mutex_lock(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700668 obj = idr_find(&dev->object_name_idr, (int) args->name);
Daniel Vetter20228c42013-08-15 00:02:45 +0200669 if (obj) {
Eric Anholt673a3942008-07-30 12:06:12 -0700670 drm_gem_object_reference(obj);
Daniel Vetter20228c42013-08-15 00:02:45 +0200671 } else {
672 mutex_unlock(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700673 return -ENOENT;
Daniel Vetter20228c42013-08-15 00:02:45 +0200674 }
Eric Anholt673a3942008-07-30 12:06:12 -0700675
Daniel Vetter20228c42013-08-15 00:02:45 +0200676 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
677 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
Luca Barbieribc9025b2010-02-09 05:49:12 +0000678 drm_gem_object_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700679 if (ret)
680 return ret;
681
682 args->handle = handle;
683 args->size = obj->size;
684
685 return 0;
686}
687
688/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100689 * gem_gem_open - initalizes GEM file-private structures at devnode open time
690 * @dev: drm_device which is being opened by userspace
691 * @file_private: drm file-private structure to set up
692 *
Eric Anholt673a3942008-07-30 12:06:12 -0700693 * Called at device open time, sets up the structure for handling refcounting
694 * of mm objects.
695 */
696void
697drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
698{
699 idr_init(&file_private->object_idr);
700 spin_lock_init(&file_private->table_lock);
701}
702
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100703/*
Eric Anholt673a3942008-07-30 12:06:12 -0700704 * Called at device close to release the file's
705 * handle references on objects.
706 */
707static int
708drm_gem_object_release_handle(int id, void *ptr, void *data)
709{
Ben Skeggs304eda32011-06-09 00:24:59 +0000710 struct drm_file *file_priv = data;
Eric Anholt673a3942008-07-30 12:06:12 -0700711 struct drm_gem_object *obj = ptr;
Ben Skeggs304eda32011-06-09 00:24:59 +0000712 struct drm_device *dev = obj->dev;
713
Thierry Reding9c784852013-08-28 12:04:14 +0200714 if (drm_core_check_feature(dev, DRIVER_PRIME))
715 drm_gem_remove_prime_handles(obj, file_priv);
David Herrmannca481c92013-08-25 18:28:58 +0200716 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
Dave Airlie32488772011-11-25 15:21:02 +0000717
Ben Skeggs304eda32011-06-09 00:24:59 +0000718 if (dev->driver->gem_close_object)
719 dev->driver->gem_close_object(obj, file_priv);
Eric Anholt673a3942008-07-30 12:06:12 -0700720
Luca Barbieribc9025b2010-02-09 05:49:12 +0000721 drm_gem_object_handle_unreference_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700722
723 return 0;
724}
725
726/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100727 * drm_gem_release - release file-private GEM resources
728 * @dev: drm_device which is being closed by userspace
729 * @file_private: drm file-private structure to clean up
730 *
Eric Anholt673a3942008-07-30 12:06:12 -0700731 * Called at close time when the filp is going away.
732 *
733 * Releases any remaining references on objects by this filp.
734 */
735void
736drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
737{
Eric Anholt673a3942008-07-30 12:06:12 -0700738 idr_for_each(&file_private->object_idr,
Ben Skeggs304eda32011-06-09 00:24:59 +0000739 &drm_gem_object_release_handle, file_private);
Eric Anholt673a3942008-07-30 12:06:12 -0700740 idr_destroy(&file_private->object_idr);
Eric Anholt673a3942008-07-30 12:06:12 -0700741}
742
Daniel Vetterfd632aa2010-04-09 19:05:05 +0000743void
744drm_gem_object_release(struct drm_gem_object *obj)
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000745{
Daniel Vetter319c9332013-08-15 00:02:46 +0200746 WARN_ON(obj->dma_buf);
747
Alan Cox62cb70112011-06-07 14:17:51 +0100748 if (obj->filp)
David Herrmann16d28312014-01-20 20:07:49 +0100749 fput(obj->filp);
David Herrmann77472342014-01-20 20:05:43 +0100750
751 drm_gem_free_mmap_offset(obj);
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000752}
Daniel Vetterfd632aa2010-04-09 19:05:05 +0000753EXPORT_SYMBOL(drm_gem_object_release);
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000754
Eric Anholt673a3942008-07-30 12:06:12 -0700755/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100756 * drm_gem_object_free - free a GEM object
757 * @kref: kref of the object to free
758 *
Eric Anholt673a3942008-07-30 12:06:12 -0700759 * Called after the last reference to the object has been lost.
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000760 * Must be called holding struct_ mutex
Eric Anholt673a3942008-07-30 12:06:12 -0700761 *
762 * Frees the object
763 */
764void
765drm_gem_object_free(struct kref *kref)
766{
767 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
768 struct drm_device *dev = obj->dev;
769
770 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
771
772 if (dev->driver->gem_free_object != NULL)
773 dev->driver->gem_free_object(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700774}
775EXPORT_SYMBOL(drm_gem_object_free);
776
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800777void drm_gem_vm_open(struct vm_area_struct *vma)
778{
779 struct drm_gem_object *obj = vma->vm_private_data;
780
781 drm_gem_object_reference(obj);
Chris Wilson31dfbc92010-09-27 21:28:30 +0100782
783 mutex_lock(&obj->dev->struct_mutex);
Rob Clarkb06d66b2012-05-01 11:04:51 -0500784 drm_vm_open_locked(obj->dev, vma);
Chris Wilson31dfbc92010-09-27 21:28:30 +0100785 mutex_unlock(&obj->dev->struct_mutex);
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800786}
787EXPORT_SYMBOL(drm_gem_vm_open);
788
789void drm_gem_vm_close(struct vm_area_struct *vma)
790{
791 struct drm_gem_object *obj = vma->vm_private_data;
Chris Wilsonb74ad5a2011-03-17 22:33:33 +0000792 struct drm_device *dev = obj->dev;
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800793
Chris Wilsonb74ad5a2011-03-17 22:33:33 +0000794 mutex_lock(&dev->struct_mutex);
Rob Clarkb06d66b2012-05-01 11:04:51 -0500795 drm_vm_close_locked(obj->dev, vma);
Chris Wilson31dfbc92010-09-27 21:28:30 +0100796 drm_gem_object_unreference(obj);
Chris Wilsonb74ad5a2011-03-17 22:33:33 +0000797 mutex_unlock(&dev->struct_mutex);
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800798}
799EXPORT_SYMBOL(drm_gem_vm_close);
800
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200801/**
802 * drm_gem_mmap_obj - memory map a GEM object
803 * @obj: the GEM object to map
804 * @obj_size: the object size to be mapped, in bytes
805 * @vma: VMA for the area to be mapped
806 *
807 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
808 * provided by the driver. Depending on their requirements, drivers can either
809 * provide a fault handler in their gem_vm_ops (in which case any accesses to
810 * the object will be trapped, to perform migration, GTT binding, surface
811 * register allocation, or performance monitoring), or mmap the buffer memory
812 * synchronously after calling drm_gem_mmap_obj.
813 *
814 * This function is mainly intended to implement the DMABUF mmap operation, when
815 * the GEM object is not looked up based on its fake offset. To implement the
816 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
817 *
David Herrmannca481c92013-08-25 18:28:58 +0200818 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
819 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
820 * callers must verify access restrictions before calling this helper.
821 *
YoungJun Cho4368dd82013-06-27 08:39:58 +0900822 * NOTE: This function has to be protected with dev->struct_mutex
823 *
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200824 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
825 * size, or if no gem_vm_ops are provided.
826 */
827int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
828 struct vm_area_struct *vma)
829{
830 struct drm_device *dev = obj->dev;
831
YoungJun Cho4368dd82013-06-27 08:39:58 +0900832 lockdep_assert_held(&dev->struct_mutex);
833
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200834 /* Check for valid size. */
835 if (obj_size < vma->vm_end - vma->vm_start)
836 return -EINVAL;
837
838 if (!dev->driver->gem_vm_ops)
839 return -EINVAL;
840
841 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
842 vma->vm_ops = dev->driver->gem_vm_ops;
843 vma->vm_private_data = obj;
David Herrmann16d28312014-01-20 20:07:49 +0100844 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200845
846 /* Take a ref for this mapping of the object, so that the fault
847 * handler can dereference the mmap offset's pointer to the object.
848 * This reference is cleaned up by the corresponding vm_close
849 * (which should happen whether the vma was created by this call, or
850 * by a vm_open due to mremap or partial unmap or whatever).
851 */
852 drm_gem_object_reference(obj);
853
854 drm_vm_open_locked(dev, vma);
855 return 0;
856}
857EXPORT_SYMBOL(drm_gem_mmap_obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800858
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800859/**
860 * drm_gem_mmap - memory map routine for GEM objects
861 * @filp: DRM file pointer
862 * @vma: VMA for the area to be mapped
863 *
864 * If a driver supports GEM object mapping, mmap calls on the DRM file
865 * descriptor will end up here.
866 *
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200867 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800868 * contain the fake offset we created when the GTT map ioctl was called on
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +0200869 * the object) and map it with a call to drm_gem_mmap_obj().
David Herrmannca481c92013-08-25 18:28:58 +0200870 *
871 * If the caller is not granted access to the buffer object, the mmap will fail
872 * with EACCES. Please see the vma manager for more information.
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800873 */
874int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
875{
876 struct drm_file *priv = filp->private_data;
877 struct drm_device *dev = priv->minor->dev;
David Herrmann0de23972013-07-24 21:07:52 +0200878 struct drm_gem_object *obj;
879 struct drm_vma_offset_node *node;
David Herrmanna8469aa2014-01-20 20:15:38 +0100880 int ret;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800881
Dave Airlie2c07a212012-02-20 14:18:07 +0000882 if (drm_device_is_unplugged(dev))
883 return -ENODEV;
884
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800885 mutex_lock(&dev->struct_mutex);
886
Daniel Vetterb04a5902013-12-11 14:24:46 +0100887 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
888 vma->vm_pgoff,
David Herrmann0de23972013-07-24 21:07:52 +0200889 vma_pages(vma));
890 if (!node) {
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800891 mutex_unlock(&dev->struct_mutex);
Daniel Vetter197633b2014-09-23 15:46:48 +0200892 return -EINVAL;
David Herrmannca481c92013-08-25 18:28:58 +0200893 } else if (!drm_vma_node_is_allowed(node, filp)) {
894 mutex_unlock(&dev->struct_mutex);
895 return -EACCES;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800896 }
897
David Herrmann0de23972013-07-24 21:07:52 +0200898 obj = container_of(node, struct drm_gem_object, vma_node);
David Herrmannaed2c032013-07-26 12:09:32 +0200899 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800900
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800901 mutex_unlock(&dev->struct_mutex);
902
903 return ret;
904}
905EXPORT_SYMBOL(drm_gem_mmap);