blob: 388b3742e562a3f7299a9a106550e85935f11925 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070037#include <linux/shmem_fs.h>
Dave Airlie32488772011-11-25 15:21:02 +000038#include <linux/dma-buf.h>
Tom Lendacky95cf9262017-07-17 16:10:26 -050039#include <linux/mem_encrypt.h>
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +080040#include <linux/pagevec.h>
David Howells760285e2012-10-02 18:01:07 +010041#include <drm/drmP.h>
David Herrmann0de23972013-07-24 21:07:52 +020042#include <drm/drm_vma_manager.h>
Daniel Vetterd9fc9412014-09-23 15:46:53 +020043#include <drm/drm_gem.h>
Noralf Trønnes45d58b42017-11-07 20:13:40 +010044#include <drm/drm_print.h>
Daniel Vetter67d0ec42014-09-10 12:43:53 +020045#include "drm_internal.h"
Eric Anholt673a3942008-07-30 12:06:12 -070046
47/** @file drm_gem.c
48 *
49 * This file provides some of the base ioctls and library routines for
50 * the graphics memory manager implemented by each device driver.
51 *
52 * Because various devices have different requirements in terms of
53 * synchronization and migration strategies, implementing that is left up to
54 * the driver, and all that the general API provides should be generic --
55 * allocating objects, reading/writing data with the cpu, freeing objects.
56 * Even there, platform-dependent optimizations for reading/writing data with
57 * the CPU mean we'll likely hook those out to driver-specific calls. However,
58 * the DRI2 implementation wants to have at least allocate/mmap be generic.
59 *
60 * The goal was to have swap-backed object allocation managed through
61 * struct file. However, file descriptors as handles to a struct file have
62 * two major failings:
63 * - Process limits prevent more than 1024 or so being used at a time by
64 * default.
65 * - Inability to allocate high fds will aggravate the X Server's select()
66 * handling, and likely that of many GL client applications as well.
67 *
68 * This led to a plan of using our own integer IDs (called handles, following
69 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
70 * ioctls. The objects themselves will still include the struct file so
71 * that we can transition to fds if the required kernel infrastructure shows
72 * up at a later date, and as our interface with shmfs for memory allocation.
73 */
74
Jesse Barnesa2c0a972008-11-05 10:31:53 -080075/*
76 * We make up offsets for buffer objects so we can recognize them at
77 * mmap time.
78 */
Jordan Crouse05269a32010-05-27 13:40:27 -060079
80/* pgoff in mmap is an unsigned long, so we need to make sure that
81 * the faked up offset will fit
82 */
83
84#if BITS_PER_LONG == 64
Jesse Barnesa2c0a972008-11-05 10:31:53 -080085#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
86#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
Jordan Crouse05269a32010-05-27 13:40:27 -060087#else
88#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
89#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
90#endif
Jesse Barnesa2c0a972008-11-05 10:31:53 -080091
Eric Anholt673a3942008-07-30 12:06:12 -070092/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +010093 * drm_gem_init - Initialize the GEM device fields
94 * @dev: drm_devic structure to initialize
Eric Anholt673a3942008-07-30 12:06:12 -070095 */
Eric Anholt673a3942008-07-30 12:06:12 -070096int
97drm_gem_init(struct drm_device *dev)
98{
Daniel Vetterb04a5902013-12-11 14:24:46 +010099 struct drm_vma_offset_manager *vma_offset_manager;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800100
Daniel Vettercd4f0132013-08-15 00:02:44 +0200101 mutex_init(&dev->object_name_lock);
Chris Wilsone86584c2018-02-12 14:55:33 +0000102 idr_init_base(&dev->object_name_idr, 1);
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800103
Daniel Vetterb04a5902013-12-11 14:24:46 +0100104 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
105 if (!vma_offset_manager) {
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800106 DRM_ERROR("out of memory\n");
107 return -ENOMEM;
108 }
109
Daniel Vetterb04a5902013-12-11 14:24:46 +0100110 dev->vma_offset_manager = vma_offset_manager;
111 drm_vma_offset_manager_init(vma_offset_manager,
David Herrmann0de23972013-07-24 21:07:52 +0200112 DRM_FILE_PAGE_OFFSET_START,
113 DRM_FILE_PAGE_OFFSET_SIZE);
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800114
Eric Anholt673a3942008-07-30 12:06:12 -0700115 return 0;
116}
117
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800118void
119drm_gem_destroy(struct drm_device *dev)
120{
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800121
Daniel Vetterb04a5902013-12-11 14:24:46 +0100122 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
123 kfree(dev->vma_offset_manager);
124 dev->vma_offset_manager = NULL;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800125}
126
Eric Anholt673a3942008-07-30 12:06:12 -0700127/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100128 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
129 * @dev: drm_device the object should be initialized for
130 * @obj: drm_gem_object to initialize
131 * @size: object size
132 *
Alan Cox62cb70112011-06-07 14:17:51 +0100133 * Initialize an already allocated GEM object of the specified size with
Daniel Vetter1d397042010-04-09 19:05:04 +0000134 * shmfs backing store.
135 */
136int drm_gem_object_init(struct drm_device *dev,
137 struct drm_gem_object *obj, size_t size)
138{
David Herrmann89c82332013-07-11 11:56:32 +0200139 struct file *filp;
Daniel Vetter1d397042010-04-09 19:05:04 +0000140
Daniel Vetter6ab11a22014-01-20 08:21:54 +0100141 drm_gem_private_object_init(dev, obj, size);
142
David Herrmann89c82332013-07-11 11:56:32 +0200143 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
144 if (IS_ERR(filp))
145 return PTR_ERR(filp);
Daniel Vetter1d397042010-04-09 19:05:04 +0000146
David Herrmann89c82332013-07-11 11:56:32 +0200147 obj->filp = filp;
Daniel Vetter1d397042010-04-09 19:05:04 +0000148
Daniel Vetter1d397042010-04-09 19:05:04 +0000149 return 0;
150}
151EXPORT_SYMBOL(drm_gem_object_init);
152
153/**
Laurent Pinchart2a5706a2014-08-28 14:34:36 +0200154 * drm_gem_private_object_init - initialize an allocated private GEM object
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100155 * @dev: drm_device the object should be initialized for
156 * @obj: drm_gem_object to initialize
157 * @size: object size
158 *
Alan Cox62cb70112011-06-07 14:17:51 +0100159 * Initialize an already allocated GEM object of the specified size with
160 * no GEM provided backing store. Instead the caller is responsible for
161 * backing the object and handling it.
162 */
David Herrmann89c82332013-07-11 11:56:32 +0200163void drm_gem_private_object_init(struct drm_device *dev,
164 struct drm_gem_object *obj, size_t size)
Alan Cox62cb70112011-06-07 14:17:51 +0100165{
166 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
167
168 obj->dev = dev;
169 obj->filp = NULL;
170
171 kref_init(&obj->refcount);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200172 obj->handle_count = 0;
Alan Cox62cb70112011-06-07 14:17:51 +0100173 obj->size = size;
Rob Herring1ba62712019-02-02 09:41:54 -0600174 reservation_object_init(&obj->_resv);
175 if (!obj->resv)
176 obj->resv = &obj->_resv;
177
David Herrmann88d7ebe2013-08-25 18:28:57 +0200178 drm_vma_node_reset(&obj->vma_node);
Alan Cox62cb70112011-06-07 14:17:51 +0100179}
180EXPORT_SYMBOL(drm_gem_private_object_init);
181
Dave Airlie0ff926c2012-05-20 17:31:16 +0100182static void
183drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
184{
Daniel Vetter319c9332013-08-15 00:02:46 +0200185 /*
186 * Note: obj->dma_buf can't disappear as long as we still hold a
187 * handle reference in obj->handle_count.
188 */
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200189 mutex_lock(&filp->prime.lock);
Daniel Vetter319c9332013-08-15 00:02:46 +0200190 if (obj->dma_buf) {
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200191 drm_prime_remove_buf_handle_locked(&filp->prime,
192 obj->dma_buf);
Dave Airlie0ff926c2012-05-20 17:31:16 +0100193 }
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200194 mutex_unlock(&filp->prime.lock);
Dave Airlie0ff926c2012-05-20 17:31:16 +0100195}
196
Daniel Vetter36da5902013-08-15 00:02:34 +0200197/**
Thierry Redingc6a84322014-10-02 14:45:55 +0200198 * drm_gem_object_handle_free - release resources bound to userspace handles
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100199 * @obj: GEM object to clean up.
200 *
Daniel Vetter36da5902013-08-15 00:02:34 +0200201 * Called after the last handle to the object has been closed
202 *
203 * Removes any name for the object. Note that this must be
204 * called before drm_gem_object_free or we'll be touching
205 * freed memory
206 */
207static void drm_gem_object_handle_free(struct drm_gem_object *obj)
208{
209 struct drm_device *dev = obj->dev;
210
211 /* Remove any name for this object */
Daniel Vetter36da5902013-08-15 00:02:34 +0200212 if (obj->name) {
213 idr_remove(&dev->object_name_idr, obj->name);
214 obj->name = 0;
Daniel Vettera8e11d12013-08-15 00:02:37 +0200215 }
Daniel Vetter36da5902013-08-15 00:02:34 +0200216}
217
Daniel Vetter319c9332013-08-15 00:02:46 +0200218static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
219{
220 /* Unbreak the reference cycle if we have an exported dma_buf. */
221 if (obj->dma_buf) {
222 dma_buf_put(obj->dma_buf);
223 obj->dma_buf = NULL;
224 }
225}
226
Daniel Vetterbecee2a2013-08-15 00:02:39 +0200227static void
Thierry Redinge6b62712017-02-28 15:46:41 +0100228drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
Daniel Vetter36da5902013-08-15 00:02:34 +0200229{
Chris Wilson98a88832016-01-04 10:11:00 +0000230 struct drm_device *dev = obj->dev;
231 bool final = false;
232
Daniel Vettera8e11d12013-08-15 00:02:37 +0200233 if (WARN_ON(obj->handle_count == 0))
Daniel Vetter36da5902013-08-15 00:02:34 +0200234 return;
235
236 /*
237 * Must bump handle count first as this may be the last
238 * ref, in which case the object would disappear before we
239 * checked for a name
240 */
241
Chris Wilson98a88832016-01-04 10:11:00 +0000242 mutex_lock(&dev->object_name_lock);
Daniel Vetter319c9332013-08-15 00:02:46 +0200243 if (--obj->handle_count == 0) {
Daniel Vetter36da5902013-08-15 00:02:34 +0200244 drm_gem_object_handle_free(obj);
Daniel Vetter319c9332013-08-15 00:02:46 +0200245 drm_gem_object_exported_dma_buf_free(obj);
Chris Wilson98a88832016-01-04 10:11:00 +0000246 final = true;
Daniel Vetter319c9332013-08-15 00:02:46 +0200247 }
Chris Wilson98a88832016-01-04 10:11:00 +0000248 mutex_unlock(&dev->object_name_lock);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200249
Chris Wilson98a88832016-01-04 10:11:00 +0000250 if (final)
Thierry Redinge6b62712017-02-28 15:46:41 +0100251 drm_gem_object_put_unlocked(obj);
Daniel Vetter36da5902013-08-15 00:02:34 +0200252}
253
Chris Wilson8815b232016-01-05 09:42:31 +0000254/*
255 * Called at device or object close to release the file's
256 * handle references on objects.
257 */
258static int
259drm_gem_object_release_handle(int id, void *ptr, void *data)
260{
261 struct drm_file *file_priv = data;
262 struct drm_gem_object *obj = ptr;
263 struct drm_device *dev = obj->dev;
264
Noralf Trønnesb39b5392018-11-10 15:56:45 +0100265 if (obj->funcs && obj->funcs->close)
266 obj->funcs->close(obj, file_priv);
267 else if (dev->driver->gem_close_object)
Chris Wilsond0a133f2017-08-19 13:05:58 +0100268 dev->driver->gem_close_object(obj, file_priv);
269
Chris Wilson8815b232016-01-05 09:42:31 +0000270 if (drm_core_check_feature(dev, DRIVER_PRIME))
271 drm_gem_remove_prime_handles(obj, file_priv);
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200272 drm_vma_node_revoke(&obj->vma_node, file_priv);
Chris Wilson8815b232016-01-05 09:42:31 +0000273
Thierry Redinge6b62712017-02-28 15:46:41 +0100274 drm_gem_object_handle_put_unlocked(obj);
Chris Wilson8815b232016-01-05 09:42:31 +0000275
276 return 0;
277}
278
Eric Anholt673a3942008-07-30 12:06:12 -0700279/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100280 * drm_gem_handle_delete - deletes the given file-private handle
281 * @filp: drm file-private structure to use for the handle look up
282 * @handle: userspace handle to delete
283 *
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200284 * Removes the GEM handle from the @filp lookup table which has been added with
285 * drm_gem_handle_create(). If this is the last handle also cleans up linked
286 * resources like GEM names.
Eric Anholt673a3942008-07-30 12:06:12 -0700287 */
Dave Airlieff72145b2011-02-07 12:16:14 +1000288int
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300289drm_gem_handle_delete(struct drm_file *filp, u32 handle)
Eric Anholt673a3942008-07-30 12:06:12 -0700290{
Eric Anholt673a3942008-07-30 12:06:12 -0700291 struct drm_gem_object *obj;
292
Eric Anholt673a3942008-07-30 12:06:12 -0700293 spin_lock(&filp->table_lock);
294
295 /* Check if we currently have a reference on the object */
Chris Wilsonf6cd7da2016-04-15 12:55:08 +0100296 obj = idr_replace(&filp->object_idr, NULL, handle);
297 spin_unlock(&filp->table_lock);
298 if (IS_ERR_OR_NULL(obj))
Eric Anholt673a3942008-07-30 12:06:12 -0700299 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700300
Chris Wilsonf6cd7da2016-04-15 12:55:08 +0100301 /* Release driver's reference and decrement refcount. */
302 drm_gem_object_release_handle(handle, obj, filp);
303
304 /* And finally make the handle available for future allocations. */
305 spin_lock(&filp->table_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700306 idr_remove(&filp->object_idr, handle);
307 spin_unlock(&filp->table_lock);
308
Eric Anholt673a3942008-07-30 12:06:12 -0700309 return 0;
310}
Dave Airlieff72145b2011-02-07 12:16:14 +1000311EXPORT_SYMBOL(drm_gem_handle_delete);
Eric Anholt673a3942008-07-30 12:06:12 -0700312
313/**
Noralf Trønnesdb611522017-07-23 21:16:17 +0200314 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
315 * @file: drm file-private structure containing the gem object
316 * @dev: corresponding drm_device
317 * @handle: gem object handle
318 * @offset: return location for the fake mmap offset
319 *
320 * This implements the &drm_driver.dumb_map_offset kms driver callback for
321 * drivers which use gem to manage their backing storage.
322 *
323 * Returns:
324 * 0 on success or a negative error code on failure.
325 */
326int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
327 u32 handle, u64 *offset)
328{
329 struct drm_gem_object *obj;
330 int ret;
331
332 obj = drm_gem_object_lookup(file, handle);
333 if (!obj)
334 return -ENOENT;
335
Noralf Trønnes90378e52017-08-17 18:21:30 +0200336 /* Don't allow imported objects to be mapped */
337 if (obj->import_attach) {
338 ret = -EINVAL;
339 goto out;
340 }
341
Noralf Trønnesdb611522017-07-23 21:16:17 +0200342 ret = drm_gem_create_mmap_offset(obj);
343 if (ret)
344 goto out;
345
346 *offset = drm_vma_node_offset_addr(&obj->vma_node);
347out:
348 drm_gem_object_put_unlocked(obj);
349
350 return ret;
351}
352EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
353
354/**
Daniel Vetter43387b32013-07-16 09:12:04 +0200355 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100356 * @file: drm file-private structure to remove the dumb handle from
357 * @dev: corresponding drm_device
358 * @handle: the dumb handle to remove
Noralf Trønnes1dd3a0602017-10-26 18:57:26 +0200359 *
Daniel Vetter940eba22017-01-25 07:26:46 +0100360 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
361 * which use gem to manage their backing storage.
Daniel Vetter43387b32013-07-16 09:12:04 +0200362 */
363int drm_gem_dumb_destroy(struct drm_file *file,
364 struct drm_device *dev,
365 uint32_t handle)
366{
367 return drm_gem_handle_delete(file, handle);
368}
369EXPORT_SYMBOL(drm_gem_dumb_destroy);
370
371/**
Daniel Vetter20228c42013-08-15 00:02:45 +0200372 * drm_gem_handle_create_tail - internal functions to create a handle
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100373 * @file_priv: drm file-private structure to register the handle for
374 * @obj: object to register
Thierry Reding8bf81802014-11-03 13:20:52 +0100375 * @handlep: pointer to return the created handle to the caller
Noralf Trønnes1dd3a0602017-10-26 18:57:26 +0200376 *
Daniel Vetter940eba22017-01-25 07:26:46 +0100377 * This expects the &drm_device.object_name_lock to be held already and will
378 * drop it before returning. Used to avoid races in establishing new handles
379 * when importing an object from either an flink name or a dma-buf.
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200380 *
381 * Handles must be release again through drm_gem_handle_delete(). This is done
382 * when userspace closes @file_priv for all attached handles, or through the
383 * GEM_CLOSE ioctl for individual handles.
Eric Anholt673a3942008-07-30 12:06:12 -0700384 */
385int
Daniel Vetter20228c42013-08-15 00:02:45 +0200386drm_gem_handle_create_tail(struct drm_file *file_priv,
387 struct drm_gem_object *obj,
388 u32 *handlep)
Eric Anholt673a3942008-07-30 12:06:12 -0700389{
Ben Skeggs304eda32011-06-09 00:24:59 +0000390 struct drm_device *dev = obj->dev;
Chris Wilson9649399e2016-01-05 09:42:30 +0000391 u32 handle;
Ben Skeggs304eda32011-06-09 00:24:59 +0000392 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700393
Daniel Vetter20228c42013-08-15 00:02:45 +0200394 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
Chris Wilson98a88832016-01-04 10:11:00 +0000395 if (obj->handle_count++ == 0)
Thierry Redinge6b62712017-02-28 15:46:41 +0100396 drm_gem_object_get(obj);
Daniel Vetter20228c42013-08-15 00:02:45 +0200397
Eric Anholt673a3942008-07-30 12:06:12 -0700398 /*
Tejun Heo2e928812013-02-27 17:04:08 -0800399 * Get the user-visible handle using idr. Preload and perform
400 * allocation under our spinlock.
Eric Anholt673a3942008-07-30 12:06:12 -0700401 */
Tejun Heo2e928812013-02-27 17:04:08 -0800402 idr_preload(GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -0700403 spin_lock(&file_priv->table_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800404
405 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
Chris Wilson98a88832016-01-04 10:11:00 +0000406
Eric Anholt673a3942008-07-30 12:06:12 -0700407 spin_unlock(&file_priv->table_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800408 idr_preload_end();
Chris Wilson98a88832016-01-04 10:11:00 +0000409
Daniel Vettercd4f0132013-08-15 00:02:44 +0200410 mutex_unlock(&dev->object_name_lock);
Chris Wilson69841282016-01-04 10:10:59 +0000411 if (ret < 0)
412 goto err_unref;
413
Chris Wilson9649399e2016-01-05 09:42:30 +0000414 handle = ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700415
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200416 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
Chris Wilson69841282016-01-04 10:10:59 +0000417 if (ret)
418 goto err_remove;
Ben Skeggs304eda32011-06-09 00:24:59 +0000419
Noralf Trønnesb39b5392018-11-10 15:56:45 +0100420 if (obj->funcs && obj->funcs->open) {
421 ret = obj->funcs->open(obj, file_priv);
422 if (ret)
423 goto err_revoke;
424 } else if (dev->driver->gem_open_object) {
Ben Skeggs304eda32011-06-09 00:24:59 +0000425 ret = dev->driver->gem_open_object(obj, file_priv);
Chris Wilson69841282016-01-04 10:10:59 +0000426 if (ret)
427 goto err_revoke;
Ben Skeggs304eda32011-06-09 00:24:59 +0000428 }
429
Chris Wilson9649399e2016-01-05 09:42:30 +0000430 *handlep = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700431 return 0;
Chris Wilson69841282016-01-04 10:10:59 +0000432
433err_revoke:
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200434 drm_vma_node_revoke(&obj->vma_node, file_priv);
Chris Wilson69841282016-01-04 10:10:59 +0000435err_remove:
436 spin_lock(&file_priv->table_lock);
Chris Wilson9649399e2016-01-05 09:42:30 +0000437 idr_remove(&file_priv->object_idr, handle);
Chris Wilson69841282016-01-04 10:10:59 +0000438 spin_unlock(&file_priv->table_lock);
439err_unref:
Thierry Redinge6b62712017-02-28 15:46:41 +0100440 drm_gem_object_handle_put_unlocked(obj);
Chris Wilson69841282016-01-04 10:10:59 +0000441 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700442}
Daniel Vetter20228c42013-08-15 00:02:45 +0200443
444/**
Thierry Reding8bf81802014-11-03 13:20:52 +0100445 * drm_gem_handle_create - create a gem handle for an object
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100446 * @file_priv: drm file-private structure to register the handle for
447 * @obj: object to register
448 * @handlep: pionter to return the created handle to the caller
449 *
Daniel Vetter390311762018-03-22 09:02:33 +0100450 * Create a handle for this object. This adds a handle reference to the object,
451 * which includes a regular reference count. Callers will likely want to
452 * dereference the object afterwards.
453 *
454 * Since this publishes @obj to userspace it must be fully set up by this point,
455 * drivers must call this last in their buffer object creation callbacks.
Daniel Vetter20228c42013-08-15 00:02:45 +0200456 */
Thierry Reding8bf81802014-11-03 13:20:52 +0100457int drm_gem_handle_create(struct drm_file *file_priv,
458 struct drm_gem_object *obj,
459 u32 *handlep)
Daniel Vetter20228c42013-08-15 00:02:45 +0200460{
461 mutex_lock(&obj->dev->object_name_lock);
462
463 return drm_gem_handle_create_tail(file_priv, obj, handlep);
464}
Eric Anholt673a3942008-07-30 12:06:12 -0700465EXPORT_SYMBOL(drm_gem_handle_create);
466
Rob Clark75ef8b32011-08-10 08:09:07 -0500467
468/**
469 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
470 * @obj: obj in question
471 *
472 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
Daniel Vetterf74418a2016-03-30 11:40:52 +0200473 *
474 * Note that drm_gem_object_release() already calls this function, so drivers
475 * don't have to take care of releasing the mmap offset themselves when freeing
476 * the GEM object.
Rob Clark75ef8b32011-08-10 08:09:07 -0500477 */
478void
479drm_gem_free_mmap_offset(struct drm_gem_object *obj)
480{
481 struct drm_device *dev = obj->dev;
Rob Clark75ef8b32011-08-10 08:09:07 -0500482
Daniel Vetterb04a5902013-12-11 14:24:46 +0100483 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
Rob Clark75ef8b32011-08-10 08:09:07 -0500484}
485EXPORT_SYMBOL(drm_gem_free_mmap_offset);
486
487/**
Rob Clark367bbd42013-08-07 13:41:23 -0400488 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
489 * @obj: obj in question
490 * @size: the virtual size
491 *
492 * GEM memory mapping works by handing back to userspace a fake mmap offset
493 * it can use in a subsequent mmap(2) call. The DRM core code then looks
494 * up the object based on the offset and sets up the various memory mapping
495 * structures.
496 *
497 * This routine allocates and attaches a fake offset for @obj, in cases where
Daniel Vetter940eba22017-01-25 07:26:46 +0100498 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
499 * Otherwise just use drm_gem_create_mmap_offset().
Daniel Vetterf74418a2016-03-30 11:40:52 +0200500 *
501 * This function is idempotent and handles an already allocated mmap offset
502 * transparently. Drivers do not need to check for this case.
Rob Clark367bbd42013-08-07 13:41:23 -0400503 */
504int
505drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
506{
507 struct drm_device *dev = obj->dev;
Rob Clark367bbd42013-08-07 13:41:23 -0400508
Daniel Vetterb04a5902013-12-11 14:24:46 +0100509 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
Rob Clark367bbd42013-08-07 13:41:23 -0400510 size / PAGE_SIZE);
511}
512EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
513
514/**
Rob Clark75ef8b32011-08-10 08:09:07 -0500515 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
516 * @obj: obj in question
517 *
518 * GEM memory mapping works by handing back to userspace a fake mmap offset
519 * it can use in a subsequent mmap(2) call. The DRM core code then looks
520 * up the object based on the offset and sets up the various memory mapping
521 * structures.
522 *
523 * This routine allocates and attaches a fake offset for @obj.
Daniel Vetterf74418a2016-03-30 11:40:52 +0200524 *
525 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
526 * the fake offset again.
Rob Clark75ef8b32011-08-10 08:09:07 -0500527 */
Rob Clark367bbd42013-08-07 13:41:23 -0400528int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
Rob Clark75ef8b32011-08-10 08:09:07 -0500529{
Rob Clark367bbd42013-08-07 13:41:23 -0400530 return drm_gem_create_mmap_offset_size(obj, obj->size);
Rob Clark75ef8b32011-08-10 08:09:07 -0500531}
532EXPORT_SYMBOL(drm_gem_create_mmap_offset);
533
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800534/*
535 * Move pages to appropriate lru and release the pagevec, decrementing the
536 * ref count of those pages.
537 */
538static void drm_gem_check_release_pagevec(struct pagevec *pvec)
539{
540 check_move_unevictable_pages(pvec);
541 __pagevec_release(pvec);
542 cond_resched();
543}
544
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400545/**
546 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
547 * from shmem
548 * @obj: obj in question
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200549 *
550 * This reads the page-array of the shmem-backing storage of the given gem
551 * object. An array of pages is returned. If a page is not allocated or
552 * swapped-out, this will allocate/swap-in the required pages. Note that the
553 * whole object is covered by the page-array and pinned in memory.
554 *
555 * Use drm_gem_put_pages() to release the array and unpin all pages.
556 *
557 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
558 * If you require other GFP-masks, you have to do those allocations yourself.
559 *
560 * Note that you are not allowed to change gfp-zones during runtime. That is,
561 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
562 * set during initialization. If you have special zone constraints, set them
Jordan Crouse5b9fbff2017-10-03 09:38:10 -0600563 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200564 * to keep pages in the required zone during swap-in.
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400565 */
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200566struct page **drm_gem_get_pages(struct drm_gem_object *obj)
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400567{
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400568 struct address_space *mapping;
569 struct page *p, **pages;
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800570 struct pagevec pvec;
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400571 int i, npages;
572
573 /* This is the shared memory object that backs the GEM resource */
Al Viro93c76a32015-12-04 23:45:44 -0500574 mapping = obj->filp->f_mapping;
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400575
576 /* We already BUG_ON() for non-page-aligned sizes in
577 * drm_gem_object_init(), so we should never hit this unless
578 * driver author is doing something really wrong:
579 */
580 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
581
582 npages = obj->size >> PAGE_SHIFT;
583
Michal Hocko20981052017-05-17 14:23:12 +0200584 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400585 if (pages == NULL)
586 return ERR_PTR(-ENOMEM);
587
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800588 mapping_set_unevictable(mapping);
589
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400590 for (i = 0; i < npages; i++) {
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200591 p = shmem_read_mapping_page(mapping, i);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400592 if (IS_ERR(p))
593 goto fail;
594 pages[i] = p;
595
David Herrmann21230002014-05-25 14:34:08 +0200596 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
597 * correct region during swapin. Note that this requires
598 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
599 * so shmem can relocate pages during swapin if required.
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400600 */
Michal Hockoc62d2552015-11-06 16:28:49 -0800601 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400602 (page_to_pfn(p) >= 0x00100000UL));
603 }
604
605 return pages;
606
607fail:
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800608 mapping_clear_unevictable(mapping);
609 pagevec_init(&pvec);
610 while (i--) {
611 if (!pagevec_add(&pvec, pages[i]))
612 drm_gem_check_release_pagevec(&pvec);
613 }
614 if (pagevec_count(&pvec))
615 drm_gem_check_release_pagevec(&pvec);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400616
Michal Hocko20981052017-05-17 14:23:12 +0200617 kvfree(pages);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400618 return ERR_CAST(p);
619}
620EXPORT_SYMBOL(drm_gem_get_pages);
621
622/**
623 * drm_gem_put_pages - helper to free backing pages for a GEM object
624 * @obj: obj in question
625 * @pages: pages to free
626 * @dirty: if true, pages will be marked as dirty
627 * @accessed: if true, the pages will be marked as accessed
628 */
629void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
630 bool dirty, bool accessed)
631{
632 int i, npages;
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800633 struct address_space *mapping;
634 struct pagevec pvec;
635
636 mapping = file_inode(obj->filp)->i_mapping;
637 mapping_clear_unevictable(mapping);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400638
639 /* We already BUG_ON() for non-page-aligned sizes in
640 * drm_gem_object_init(), so we should never hit this unless
641 * driver author is doing something really wrong:
642 */
643 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
644
645 npages = obj->size >> PAGE_SHIFT;
646
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800647 pagevec_init(&pvec);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400648 for (i = 0; i < npages; i++) {
649 if (dirty)
650 set_page_dirty(pages[i]);
651
652 if (accessed)
653 mark_page_accessed(pages[i]);
654
655 /* Undo the reference we took when populating the table */
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800656 if (!pagevec_add(&pvec, pages[i]))
657 drm_gem_check_release_pagevec(&pvec);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400658 }
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800659 if (pagevec_count(&pvec))
660 drm_gem_check_release_pagevec(&pvec);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400661
Michal Hocko20981052017-05-17 14:23:12 +0200662 kvfree(pages);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400663}
664EXPORT_SYMBOL(drm_gem_put_pages);
665
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200666/**
Matt Roper1e55a532019-02-01 17:23:26 -0800667 * drm_gem_object_lookup - look up a GEM object from its handle
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200668 * @filp: DRM file private date
669 * @handle: userspace handle
670 *
671 * Returns:
672 *
673 * A reference to the object named by the handle if such exists on @filp, NULL
674 * otherwise.
675 */
Eric Anholt673a3942008-07-30 12:06:12 -0700676struct drm_gem_object *
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100677drm_gem_object_lookup(struct drm_file *filp, u32 handle)
Eric Anholt673a3942008-07-30 12:06:12 -0700678{
679 struct drm_gem_object *obj;
680
681 spin_lock(&filp->table_lock);
682
683 /* Check if we currently have a reference on the object */
684 obj = idr_find(&filp->object_idr, handle);
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100685 if (obj)
Thierry Redinge6b62712017-02-28 15:46:41 +0100686 drm_gem_object_get(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700687
688 spin_unlock(&filp->table_lock);
689
690 return obj;
691}
692EXPORT_SYMBOL(drm_gem_object_lookup);
693
694/**
Rob Herring1ba62712019-02-02 09:41:54 -0600695 * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
696 * shared and/or exclusive fences.
697 * @filep: DRM file private date
698 * @handle: userspace handle
699 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
700 * @timeout: timeout value in jiffies or zero to return immediately
701 *
702 * Returns:
703 *
704 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
705 * greater than 0 on success.
706 */
707long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
708 bool wait_all, unsigned long timeout)
709{
710 long ret;
711 struct drm_gem_object *obj;
712
713 obj = drm_gem_object_lookup(filep, handle);
714 if (!obj) {
715 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
716 return -EINVAL;
717 }
718
719 ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
720 true, timeout);
721 if (ret == 0)
722 ret = -ETIME;
723 else if (ret > 0)
724 ret = 0;
725
726 drm_gem_object_put_unlocked(obj);
727
728 return ret;
729}
730EXPORT_SYMBOL(drm_gem_reservation_object_wait);
731
732/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100733 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
734 * @dev: drm_device
735 * @data: ioctl data
736 * @file_priv: drm file-private structure
737 *
Eric Anholt673a3942008-07-30 12:06:12 -0700738 * Releases the handle to an mm object.
739 */
740int
741drm_gem_close_ioctl(struct drm_device *dev, void *data,
742 struct drm_file *file_priv)
743{
744 struct drm_gem_close *args = data;
745 int ret;
746
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200747 if (!drm_core_check_feature(dev, DRIVER_GEM))
Chris Wilson69fdf422018-09-13 20:20:50 +0100748 return -EOPNOTSUPP;
Eric Anholt673a3942008-07-30 12:06:12 -0700749
750 ret = drm_gem_handle_delete(file_priv, args->handle);
751
752 return ret;
753}
754
755/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100756 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
757 * @dev: drm_device
758 * @data: ioctl data
759 * @file_priv: drm file-private structure
760 *
Eric Anholt673a3942008-07-30 12:06:12 -0700761 * Create a global name for an object, returning the name.
762 *
763 * Note that the name does not hold a reference; when the object
764 * is freed, the name goes away.
765 */
766int
767drm_gem_flink_ioctl(struct drm_device *dev, void *data,
768 struct drm_file *file_priv)
769{
770 struct drm_gem_flink *args = data;
771 struct drm_gem_object *obj;
772 int ret;
773
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200774 if (!drm_core_check_feature(dev, DRIVER_GEM))
Chris Wilson69fdf422018-09-13 20:20:50 +0100775 return -EOPNOTSUPP;
Eric Anholt673a3942008-07-30 12:06:12 -0700776
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100777 obj = drm_gem_object_lookup(file_priv, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -0700778 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100779 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700780
Daniel Vettercd4f0132013-08-15 00:02:44 +0200781 mutex_lock(&dev->object_name_lock);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200782 /* prevent races with concurrent gem_close. */
783 if (obj->handle_count == 0) {
784 ret = -ENOENT;
785 goto err;
786 }
787
Chris Wilson8d59bae2009-02-11 14:26:28 +0000788 if (!obj->name) {
Chris Wilson0f646422016-01-04 10:11:01 +0000789 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
Tejun Heo2e928812013-02-27 17:04:08 -0800790 if (ret < 0)
Chris Wilson8d59bae2009-02-11 14:26:28 +0000791 goto err;
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900792
793 obj->name = ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700794 }
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000795
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900796 args->name = (uint64_t) obj->name;
797 ret = 0;
798
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000799err:
Daniel Vettercd4f0132013-08-15 00:02:44 +0200800 mutex_unlock(&dev->object_name_lock);
Thierry Redinge6b62712017-02-28 15:46:41 +0100801 drm_gem_object_put_unlocked(obj);
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000802 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700803}
804
805/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100806 * drm_gem_open - implementation of the GEM_OPEN ioctl
807 * @dev: drm_device
808 * @data: ioctl data
809 * @file_priv: drm file-private structure
810 *
Eric Anholt673a3942008-07-30 12:06:12 -0700811 * Open an object using the global name, returning a handle and the size.
812 *
813 * This handle (of course) holds a reference to the object, so the object
814 * will not go away until the handle is deleted.
815 */
816int
817drm_gem_open_ioctl(struct drm_device *dev, void *data,
818 struct drm_file *file_priv)
819{
820 struct drm_gem_open *args = data;
821 struct drm_gem_object *obj;
822 int ret;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300823 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700824
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200825 if (!drm_core_check_feature(dev, DRIVER_GEM))
Chris Wilson69fdf422018-09-13 20:20:50 +0100826 return -EOPNOTSUPP;
Eric Anholt673a3942008-07-30 12:06:12 -0700827
Daniel Vettercd4f0132013-08-15 00:02:44 +0200828 mutex_lock(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700829 obj = idr_find(&dev->object_name_idr, (int) args->name);
Daniel Vetter20228c42013-08-15 00:02:45 +0200830 if (obj) {
Thierry Redinge6b62712017-02-28 15:46:41 +0100831 drm_gem_object_get(obj);
Daniel Vetter20228c42013-08-15 00:02:45 +0200832 } else {
833 mutex_unlock(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700834 return -ENOENT;
Daniel Vetter20228c42013-08-15 00:02:45 +0200835 }
Eric Anholt673a3942008-07-30 12:06:12 -0700836
Daniel Vetter20228c42013-08-15 00:02:45 +0200837 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
838 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
Thierry Redinge6b62712017-02-28 15:46:41 +0100839 drm_gem_object_put_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700840 if (ret)
841 return ret;
842
843 args->handle = handle;
844 args->size = obj->size;
845
846 return 0;
847}
848
849/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100850 * gem_gem_open - initalizes GEM file-private structures at devnode open time
851 * @dev: drm_device which is being opened by userspace
852 * @file_private: drm file-private structure to set up
853 *
Eric Anholt673a3942008-07-30 12:06:12 -0700854 * Called at device open time, sets up the structure for handling refcounting
855 * of mm objects.
856 */
857void
858drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
859{
Chris Wilsone86584c2018-02-12 14:55:33 +0000860 idr_init_base(&file_private->object_idr, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700861 spin_lock_init(&file_private->table_lock);
862}
863
Eric Anholt673a3942008-07-30 12:06:12 -0700864/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100865 * drm_gem_release - release file-private GEM resources
866 * @dev: drm_device which is being closed by userspace
867 * @file_private: drm file-private structure to clean up
868 *
Eric Anholt673a3942008-07-30 12:06:12 -0700869 * Called at close time when the filp is going away.
870 *
871 * Releases any remaining references on objects by this filp.
872 */
873void
874drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
875{
Eric Anholt673a3942008-07-30 12:06:12 -0700876 idr_for_each(&file_private->object_idr,
Ben Skeggs304eda32011-06-09 00:24:59 +0000877 &drm_gem_object_release_handle, file_private);
Eric Anholt673a3942008-07-30 12:06:12 -0700878 idr_destroy(&file_private->object_idr);
Eric Anholt673a3942008-07-30 12:06:12 -0700879}
880
Daniel Vetterf74418a2016-03-30 11:40:52 +0200881/**
882 * drm_gem_object_release - release GEM buffer object resources
883 * @obj: GEM buffer object
884 *
885 * This releases any structures and resources used by @obj and is the invers of
886 * drm_gem_object_init().
887 */
Daniel Vetterfd632aa2010-04-09 19:05:05 +0000888void
889drm_gem_object_release(struct drm_gem_object *obj)
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000890{
Daniel Vetter319c9332013-08-15 00:02:46 +0200891 WARN_ON(obj->dma_buf);
892
Alan Cox62cb70112011-06-07 14:17:51 +0100893 if (obj->filp)
David Herrmann16d28312014-01-20 20:07:49 +0100894 fput(obj->filp);
David Herrmann77472342014-01-20 20:05:43 +0100895
Rob Herring1ba62712019-02-02 09:41:54 -0600896 reservation_object_fini(&obj->_resv);
David Herrmann77472342014-01-20 20:05:43 +0100897 drm_gem_free_mmap_offset(obj);
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000898}
Daniel Vetterfd632aa2010-04-09 19:05:05 +0000899EXPORT_SYMBOL(drm_gem_object_release);
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000900
Eric Anholt673a3942008-07-30 12:06:12 -0700901/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100902 * drm_gem_object_free - free a GEM object
903 * @kref: kref of the object to free
904 *
Eric Anholt673a3942008-07-30 12:06:12 -0700905 * Called after the last reference to the object has been lost.
Daniel Vetter940eba22017-01-25 07:26:46 +0100906 * Must be called holding &drm_device.struct_mutex.
Eric Anholt673a3942008-07-30 12:06:12 -0700907 *
908 * Frees the object
909 */
910void
911drm_gem_object_free(struct kref *kref)
912{
Daniel Vetter6ff774b2015-10-15 09:36:26 +0200913 struct drm_gem_object *obj =
914 container_of(kref, struct drm_gem_object, refcount);
Eric Anholt673a3942008-07-30 12:06:12 -0700915 struct drm_device *dev = obj->dev;
916
Noralf Trønnesb39b5392018-11-10 15:56:45 +0100917 if (obj->funcs) {
918 obj->funcs->free(obj);
919 } else if (dev->driver->gem_free_object_unlocked) {
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200920 dev->driver->gem_free_object_unlocked(obj);
Daniel Vetter6d3e7fd2016-05-04 14:10:44 +0200921 } else if (dev->driver->gem_free_object) {
922 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
923
Eric Anholt673a3942008-07-30 12:06:12 -0700924 dev->driver->gem_free_object(obj);
Daniel Vetter6d3e7fd2016-05-04 14:10:44 +0200925 }
Eric Anholt673a3942008-07-30 12:06:12 -0700926}
927EXPORT_SYMBOL(drm_gem_object_free);
928
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200929/**
Thierry Redinge6b62712017-02-28 15:46:41 +0100930 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200931 * @obj: GEM buffer object
932 *
933 * This releases a reference to @obj. Callers must not hold the
Daniel Vetter940eba22017-01-25 07:26:46 +0100934 * &drm_device.struct_mutex lock when calling this function.
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200935 *
Thierry Redinge6b62712017-02-28 15:46:41 +0100936 * See also __drm_gem_object_put().
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200937 */
938void
Thierry Redinge6b62712017-02-28 15:46:41 +0100939drm_gem_object_put_unlocked(struct drm_gem_object *obj)
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200940{
941 struct drm_device *dev;
942
943 if (!obj)
944 return;
945
946 dev = obj->dev;
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200947
Noralf Trønnesb39b5392018-11-10 15:56:45 +0100948 if (dev->driver->gem_free_object) {
Daniel Vetter3379c042017-07-15 11:53:28 +0200949 might_lock(&dev->struct_mutex);
950 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200951 &dev->struct_mutex))
Daniel Vetter3379c042017-07-15 11:53:28 +0200952 mutex_unlock(&dev->struct_mutex);
Noralf Trønnesb39b5392018-11-10 15:56:45 +0100953 } else {
954 kref_put(&obj->refcount, drm_gem_object_free);
Daniel Vetter3379c042017-07-15 11:53:28 +0200955 }
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200956}
Thierry Redinge6b62712017-02-28 15:46:41 +0100957EXPORT_SYMBOL(drm_gem_object_put_unlocked);
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200958
959/**
Thierry Redinge6b62712017-02-28 15:46:41 +0100960 * drm_gem_object_put - release a GEM buffer object reference
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200961 * @obj: GEM buffer object
962 *
Daniel Vetter940eba22017-01-25 07:26:46 +0100963 * This releases a reference to @obj. Callers must hold the
964 * &drm_device.struct_mutex lock when calling this function, even when the
965 * driver doesn't use &drm_device.struct_mutex for anything.
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200966 *
967 * For drivers not encumbered with legacy locking use
Thierry Redinge6b62712017-02-28 15:46:41 +0100968 * drm_gem_object_put_unlocked() instead.
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200969 */
970void
Thierry Redinge6b62712017-02-28 15:46:41 +0100971drm_gem_object_put(struct drm_gem_object *obj)
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200972{
973 if (obj) {
974 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
975
976 kref_put(&obj->refcount, drm_gem_object_free);
977 }
978}
Thierry Redinge6b62712017-02-28 15:46:41 +0100979EXPORT_SYMBOL(drm_gem_object_put);
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200980
981/**
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200982 * drm_gem_vm_open - vma->ops->open implementation for GEM
983 * @vma: VM area structure
984 *
985 * This function implements the #vm_operations_struct open() callback for GEM
986 * drivers. This must be used together with drm_gem_vm_close().
987 */
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800988void drm_gem_vm_open(struct vm_area_struct *vma)
989{
990 struct drm_gem_object *obj = vma->vm_private_data;
991
Thierry Redinge6b62712017-02-28 15:46:41 +0100992 drm_gem_object_get(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -0800993}
994EXPORT_SYMBOL(drm_gem_vm_open);
995
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200996/**
997 * drm_gem_vm_close - vma->ops->close implementation for GEM
998 * @vma: VM area structure
999 *
1000 * This function implements the #vm_operations_struct close() callback for GEM
1001 * drivers. This must be used together with drm_gem_vm_open().
1002 */
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001003void drm_gem_vm_close(struct vm_area_struct *vma)
1004{
1005 struct drm_gem_object *obj = vma->vm_private_data;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001006
Thierry Redinge6b62712017-02-28 15:46:41 +01001007 drm_gem_object_put_unlocked(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001008}
1009EXPORT_SYMBOL(drm_gem_vm_close);
1010
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001011/**
1012 * drm_gem_mmap_obj - memory map a GEM object
1013 * @obj: the GEM object to map
1014 * @obj_size: the object size to be mapped, in bytes
1015 * @vma: VMA for the area to be mapped
1016 *
1017 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1018 * provided by the driver. Depending on their requirements, drivers can either
1019 * provide a fault handler in their gem_vm_ops (in which case any accesses to
1020 * the object will be trapped, to perform migration, GTT binding, surface
1021 * register allocation, or performance monitoring), or mmap the buffer memory
1022 * synchronously after calling drm_gem_mmap_obj.
1023 *
1024 * This function is mainly intended to implement the DMABUF mmap operation, when
1025 * the GEM object is not looked up based on its fake offset. To implement the
1026 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1027 *
David Herrmannca481c92013-08-25 18:28:58 +02001028 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1029 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1030 * callers must verify access restrictions before calling this helper.
1031 *
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001032 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1033 * size, or if no gem_vm_ops are provided.
1034 */
1035int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1036 struct vm_area_struct *vma)
1037{
1038 struct drm_device *dev = obj->dev;
1039
1040 /* Check for valid size. */
1041 if (obj_size < vma->vm_end - vma->vm_start)
1042 return -EINVAL;
1043
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001044 if (obj->funcs && obj->funcs->vm_ops)
1045 vma->vm_ops = obj->funcs->vm_ops;
1046 else if (dev->driver->gem_vm_ops)
1047 vma->vm_ops = dev->driver->gem_vm_ops;
1048 else
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001049 return -EINVAL;
1050
1051 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001052 vma->vm_private_data = obj;
David Herrmann16d28312014-01-20 20:07:49 +01001053 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
Tom Lendacky95cf9262017-07-17 16:10:26 -05001054 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001055
1056 /* Take a ref for this mapping of the object, so that the fault
1057 * handler can dereference the mmap offset's pointer to the object.
1058 * This reference is cleaned up by the corresponding vm_close
1059 * (which should happen whether the vma was created by this call, or
1060 * by a vm_open due to mremap or partial unmap or whatever).
1061 */
Thierry Redinge6b62712017-02-28 15:46:41 +01001062 drm_gem_object_get(obj);
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001063
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001064 return 0;
1065}
1066EXPORT_SYMBOL(drm_gem_mmap_obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001067
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001068/**
1069 * drm_gem_mmap - memory map routine for GEM objects
1070 * @filp: DRM file pointer
1071 * @vma: VMA for the area to be mapped
1072 *
1073 * If a driver supports GEM object mapping, mmap calls on the DRM file
1074 * descriptor will end up here.
1075 *
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001076 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001077 * contain the fake offset we created when the GTT map ioctl was called on
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001078 * the object) and map it with a call to drm_gem_mmap_obj().
David Herrmannca481c92013-08-25 18:28:58 +02001079 *
1080 * If the caller is not granted access to the buffer object, the mmap will fail
1081 * with EACCES. Please see the vma manager for more information.
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001082 */
1083int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1084{
1085 struct drm_file *priv = filp->private_data;
1086 struct drm_device *dev = priv->minor->dev;
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001087 struct drm_gem_object *obj = NULL;
David Herrmann0de23972013-07-24 21:07:52 +02001088 struct drm_vma_offset_node *node;
David Herrmanna8469aa2014-01-20 20:15:38 +01001089 int ret;
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001090
Daniel Vetterc07dcd62017-08-02 13:56:02 +02001091 if (drm_dev_is_unplugged(dev))
Dave Airlie2c07a212012-02-20 14:18:07 +00001092 return -ENODEV;
1093
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001094 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1095 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1096 vma->vm_pgoff,
1097 vma_pages(vma));
1098 if (likely(node)) {
1099 obj = container_of(node, struct drm_gem_object, vma_node);
1100 /*
1101 * When the object is being freed, after it hits 0-refcnt it
1102 * proceeds to tear down the object. In the process it will
1103 * attempt to remove the VMA offset and so acquire this
1104 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1105 * that matches our range, we know it is in the process of being
1106 * destroyed and will be freed as soon as we release the lock -
1107 * so we have to check for the 0-refcnted object and treat it as
1108 * invalid.
1109 */
1110 if (!kref_get_unless_zero(&obj->refcount))
1111 obj = NULL;
1112 }
1113 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001114
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001115 if (!obj)
Daniel Vetter197633b2014-09-23 15:46:48 +02001116 return -EINVAL;
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001117
David Herrmannd9a1f0b2016-09-01 14:48:33 +02001118 if (!drm_vma_node_is_allowed(node, priv)) {
Thierry Redinge6b62712017-02-28 15:46:41 +01001119 drm_gem_object_put_unlocked(obj);
David Herrmannca481c92013-08-25 18:28:58 +02001120 return -EACCES;
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001121 }
1122
Chris Wilson3e977ac2018-07-12 19:53:13 +01001123 if (node->readonly) {
1124 if (vma->vm_flags & VM_WRITE) {
1125 drm_gem_object_put_unlocked(obj);
1126 return -EINVAL;
1127 }
1128
1129 vma->vm_flags &= ~VM_MAYWRITE;
1130 }
1131
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001132 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1133 vma);
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001134
Thierry Redinge6b62712017-02-28 15:46:41 +01001135 drm_gem_object_put_unlocked(obj);
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001136
1137 return ret;
1138}
1139EXPORT_SYMBOL(drm_gem_mmap);
Noralf Trønnes45d58b42017-11-07 20:13:40 +01001140
1141void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1142 const struct drm_gem_object *obj)
1143{
1144 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1145 drm_printf_indent(p, indent, "refcount=%u\n",
1146 kref_read(&obj->refcount));
1147 drm_printf_indent(p, indent, "start=%08lx\n",
1148 drm_vma_node_start(&obj->vma_node));
1149 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1150 drm_printf_indent(p, indent, "imported=%s\n",
1151 obj->import_attach ? "yes" : "no");
1152
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001153 if (obj->funcs && obj->funcs->print_info)
1154 obj->funcs->print_info(p, indent, obj);
1155 else if (obj->dev->driver->gem_print_info)
Noralf Trønnes45d58b42017-11-07 20:13:40 +01001156 obj->dev->driver->gem_print_info(p, indent, obj);
1157}
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001158
1159/**
1160 * drm_gem_pin - Pin backing buffer in memory
1161 * @obj: GEM object
1162 *
1163 * Make sure the backing buffer is pinned in memory.
1164 *
1165 * Returns:
1166 * 0 on success or a negative error code on failure.
1167 */
1168int drm_gem_pin(struct drm_gem_object *obj)
1169{
1170 if (obj->funcs && obj->funcs->pin)
1171 return obj->funcs->pin(obj);
1172 else if (obj->dev->driver->gem_prime_pin)
1173 return obj->dev->driver->gem_prime_pin(obj);
1174 else
1175 return 0;
1176}
1177EXPORT_SYMBOL(drm_gem_pin);
1178
1179/**
1180 * drm_gem_unpin - Unpin backing buffer from memory
1181 * @obj: GEM object
1182 *
1183 * Relax the requirement that the backing buffer is pinned in memory.
1184 */
1185void drm_gem_unpin(struct drm_gem_object *obj)
1186{
1187 if (obj->funcs && obj->funcs->unpin)
1188 obj->funcs->unpin(obj);
1189 else if (obj->dev->driver->gem_prime_unpin)
1190 obj->dev->driver->gem_prime_unpin(obj);
1191}
1192EXPORT_SYMBOL(drm_gem_unpin);
1193
1194/**
1195 * drm_gem_vmap - Map buffer into kernel virtual address space
1196 * @obj: GEM object
1197 *
1198 * Returns:
1199 * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative
1200 * error code on failure.
1201 */
1202void *drm_gem_vmap(struct drm_gem_object *obj)
1203{
1204 void *vaddr;
1205
1206 if (obj->funcs && obj->funcs->vmap)
1207 vaddr = obj->funcs->vmap(obj);
1208 else if (obj->dev->driver->gem_prime_vmap)
1209 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1210 else
1211 vaddr = ERR_PTR(-EOPNOTSUPP);
1212
1213 if (!vaddr)
1214 vaddr = ERR_PTR(-ENOMEM);
1215
1216 return vaddr;
1217}
1218EXPORT_SYMBOL(drm_gem_vmap);
1219
1220/**
1221 * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space
1222 * @obj: GEM object
1223 * @vaddr: Virtual address (can be NULL)
1224 */
1225void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1226{
1227 if (!vaddr)
1228 return;
1229
1230 if (obj->funcs && obj->funcs->vunmap)
1231 obj->funcs->vunmap(obj, vaddr);
1232 else if (obj->dev->driver->gem_prime_vunmap)
1233 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1234}
1235EXPORT_SYMBOL(drm_gem_vunmap);
Eric Anholt7edc3e32019-03-08 08:17:13 -08001236
1237/**
1238 * drm_gem_lock_reservations - Sets up the ww context and acquires
1239 * the lock on an array of GEM objects.
1240 *
1241 * Once you've locked your reservations, you'll want to set up space
1242 * for your shared fences (if applicable), submit your job, then
1243 * drm_gem_unlock_reservations().
1244 *
1245 * @objs: drm_gem_objects to lock
1246 * @count: Number of objects in @objs
1247 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1248 * part of tracking this set of locked reservations.
1249 */
1250int
1251drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1252 struct ww_acquire_ctx *acquire_ctx)
1253{
1254 int contended = -1;
1255 int i, ret;
1256
1257 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1258
1259retry:
1260 if (contended != -1) {
1261 struct drm_gem_object *obj = objs[contended];
1262
1263 ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
1264 acquire_ctx);
1265 if (ret) {
1266 ww_acquire_done(acquire_ctx);
1267 return ret;
1268 }
1269 }
1270
1271 for (i = 0; i < count; i++) {
1272 if (i == contended)
1273 continue;
1274
1275 ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
1276 acquire_ctx);
1277 if (ret) {
1278 int j;
1279
1280 for (j = 0; j < i; j++)
1281 ww_mutex_unlock(&objs[j]->resv->lock);
1282
1283 if (contended != -1 && contended >= i)
1284 ww_mutex_unlock(&objs[contended]->resv->lock);
1285
1286 if (ret == -EDEADLK) {
1287 contended = i;
1288 goto retry;
1289 }
1290
1291 ww_acquire_done(acquire_ctx);
1292 return ret;
1293 }
1294 }
1295
1296 ww_acquire_done(acquire_ctx);
1297
1298 return 0;
1299}
1300EXPORT_SYMBOL(drm_gem_lock_reservations);
1301
1302void
1303drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1304 struct ww_acquire_ctx *acquire_ctx)
1305{
1306 int i;
1307
1308 for (i = 0; i < count; i++)
1309 ww_mutex_unlock(&objs[i]->resv->lock);
1310
1311 ww_acquire_fini(acquire_ctx);
1312}
1313EXPORT_SYMBOL(drm_gem_unlock_reservations);