blob: afc38cece3f539e80a514f899775c373b8564ca6 [file] [log] [blame]
Eric Anholt673a3942008-07-30 12:06:12 -07001/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/mm.h>
31#include <linux/uaccess.h>
32#include <linux/fs.h>
33#include <linux/file.h>
34#include <linux/module.h>
35#include <linux/mman.h>
36#include <linux/pagemap.h>
Hugh Dickins5949eac2011-06-27 16:18:18 -070037#include <linux/shmem_fs.h>
Dave Airlie32488772011-11-25 15:21:02 +000038#include <linux/dma-buf.h>
Tom Lendacky95cf9262017-07-17 16:10:26 -050039#include <linux/mem_encrypt.h>
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +080040#include <linux/pagevec.h>
Sam Ravnborg0500c042019-05-26 19:35:35 +020041
Sam Ravnborg1c535872019-07-18 18:15:02 +020042#include <drm/drm.h>
Sam Ravnborg0500c042019-05-26 19:35:35 +020043#include <drm/drm_device.h>
44#include <drm/drm_drv.h>
45#include <drm/drm_file.h>
Daniel Vetterd9fc9412014-09-23 15:46:53 +020046#include <drm/drm_gem.h>
Noralf Trønnes45d58b42017-11-07 20:13:40 +010047#include <drm/drm_print.h>
Sam Ravnborg0500c042019-05-26 19:35:35 +020048#include <drm/drm_vma_manager.h>
49
Daniel Vetter67d0ec42014-09-10 12:43:53 +020050#include "drm_internal.h"
Eric Anholt673a3942008-07-30 12:06:12 -070051
52/** @file drm_gem.c
53 *
54 * This file provides some of the base ioctls and library routines for
55 * the graphics memory manager implemented by each device driver.
56 *
57 * Because various devices have different requirements in terms of
58 * synchronization and migration strategies, implementing that is left up to
59 * the driver, and all that the general API provides should be generic --
60 * allocating objects, reading/writing data with the cpu, freeing objects.
61 * Even there, platform-dependent optimizations for reading/writing data with
62 * the CPU mean we'll likely hook those out to driver-specific calls. However,
63 * the DRI2 implementation wants to have at least allocate/mmap be generic.
64 *
65 * The goal was to have swap-backed object allocation managed through
66 * struct file. However, file descriptors as handles to a struct file have
67 * two major failings:
68 * - Process limits prevent more than 1024 or so being used at a time by
69 * default.
70 * - Inability to allocate high fds will aggravate the X Server's select()
71 * handling, and likely that of many GL client applications as well.
72 *
73 * This led to a plan of using our own integer IDs (called handles, following
74 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
75 * ioctls. The objects themselves will still include the struct file so
76 * that we can transition to fds if the required kernel infrastructure shows
77 * up at a later date, and as our interface with shmfs for memory allocation.
78 */
79
80/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +010081 * drm_gem_init - Initialize the GEM device fields
82 * @dev: drm_devic structure to initialize
Eric Anholt673a3942008-07-30 12:06:12 -070083 */
Eric Anholt673a3942008-07-30 12:06:12 -070084int
85drm_gem_init(struct drm_device *dev)
86{
Daniel Vetterb04a5902013-12-11 14:24:46 +010087 struct drm_vma_offset_manager *vma_offset_manager;
Jesse Barnesa2c0a972008-11-05 10:31:53 -080088
Daniel Vettercd4f0132013-08-15 00:02:44 +020089 mutex_init(&dev->object_name_lock);
Chris Wilsone86584c2018-02-12 14:55:33 +000090 idr_init_base(&dev->object_name_idr, 1);
Jesse Barnesa2c0a972008-11-05 10:31:53 -080091
Daniel Vetterb04a5902013-12-11 14:24:46 +010092 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
93 if (!vma_offset_manager) {
Jesse Barnesa2c0a972008-11-05 10:31:53 -080094 DRM_ERROR("out of memory\n");
95 return -ENOMEM;
96 }
97
Daniel Vetterb04a5902013-12-11 14:24:46 +010098 dev->vma_offset_manager = vma_offset_manager;
99 drm_vma_offset_manager_init(vma_offset_manager,
David Herrmann0de23972013-07-24 21:07:52 +0200100 DRM_FILE_PAGE_OFFSET_START,
101 DRM_FILE_PAGE_OFFSET_SIZE);
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800102
Eric Anholt673a3942008-07-30 12:06:12 -0700103 return 0;
104}
105
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800106void
107drm_gem_destroy(struct drm_device *dev)
108{
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800109
Daniel Vetterb04a5902013-12-11 14:24:46 +0100110 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
111 kfree(dev->vma_offset_manager);
112 dev->vma_offset_manager = NULL;
Jesse Barnesa2c0a972008-11-05 10:31:53 -0800113}
114
Eric Anholt673a3942008-07-30 12:06:12 -0700115/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100116 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
117 * @dev: drm_device the object should be initialized for
118 * @obj: drm_gem_object to initialize
119 * @size: object size
120 *
Alan Cox62cb70112011-06-07 14:17:51 +0100121 * Initialize an already allocated GEM object of the specified size with
Daniel Vetter1d397042010-04-09 19:05:04 +0000122 * shmfs backing store.
123 */
124int drm_gem_object_init(struct drm_device *dev,
125 struct drm_gem_object *obj, size_t size)
126{
David Herrmann89c82332013-07-11 11:56:32 +0200127 struct file *filp;
Daniel Vetter1d397042010-04-09 19:05:04 +0000128
Daniel Vetter6ab11a22014-01-20 08:21:54 +0100129 drm_gem_private_object_init(dev, obj, size);
130
David Herrmann89c82332013-07-11 11:56:32 +0200131 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
132 if (IS_ERR(filp))
133 return PTR_ERR(filp);
Daniel Vetter1d397042010-04-09 19:05:04 +0000134
David Herrmann89c82332013-07-11 11:56:32 +0200135 obj->filp = filp;
Daniel Vetter1d397042010-04-09 19:05:04 +0000136
Daniel Vetter1d397042010-04-09 19:05:04 +0000137 return 0;
138}
139EXPORT_SYMBOL(drm_gem_object_init);
140
141/**
Laurent Pinchart2a5706a2014-08-28 14:34:36 +0200142 * drm_gem_private_object_init - initialize an allocated private GEM object
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100143 * @dev: drm_device the object should be initialized for
144 * @obj: drm_gem_object to initialize
145 * @size: object size
146 *
Alan Cox62cb70112011-06-07 14:17:51 +0100147 * Initialize an already allocated GEM object of the specified size with
148 * no GEM provided backing store. Instead the caller is responsible for
149 * backing the object and handling it.
150 */
David Herrmann89c82332013-07-11 11:56:32 +0200151void drm_gem_private_object_init(struct drm_device *dev,
152 struct drm_gem_object *obj, size_t size)
Alan Cox62cb70112011-06-07 14:17:51 +0100153{
154 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
155
156 obj->dev = dev;
157 obj->filp = NULL;
158
159 kref_init(&obj->refcount);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200160 obj->handle_count = 0;
Alan Cox62cb70112011-06-07 14:17:51 +0100161 obj->size = size;
Rob Herring1ba62712019-02-02 09:41:54 -0600162 reservation_object_init(&obj->_resv);
163 if (!obj->resv)
164 obj->resv = &obj->_resv;
165
David Herrmann88d7ebe2013-08-25 18:28:57 +0200166 drm_vma_node_reset(&obj->vma_node);
Alan Cox62cb70112011-06-07 14:17:51 +0100167}
168EXPORT_SYMBOL(drm_gem_private_object_init);
169
Dave Airlie0ff926c2012-05-20 17:31:16 +0100170static void
171drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
172{
Daniel Vetter319c9332013-08-15 00:02:46 +0200173 /*
174 * Note: obj->dma_buf can't disappear as long as we still hold a
175 * handle reference in obj->handle_count.
176 */
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200177 mutex_lock(&filp->prime.lock);
Daniel Vetter319c9332013-08-15 00:02:46 +0200178 if (obj->dma_buf) {
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200179 drm_prime_remove_buf_handle_locked(&filp->prime,
180 obj->dma_buf);
Dave Airlie0ff926c2012-05-20 17:31:16 +0100181 }
Daniel Vetterd0b2c532013-08-15 00:02:49 +0200182 mutex_unlock(&filp->prime.lock);
Dave Airlie0ff926c2012-05-20 17:31:16 +0100183}
184
Daniel Vetter36da5902013-08-15 00:02:34 +0200185/**
Thierry Redingc6a84322014-10-02 14:45:55 +0200186 * drm_gem_object_handle_free - release resources bound to userspace handles
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100187 * @obj: GEM object to clean up.
188 *
Daniel Vetter36da5902013-08-15 00:02:34 +0200189 * Called after the last handle to the object has been closed
190 *
191 * Removes any name for the object. Note that this must be
192 * called before drm_gem_object_free or we'll be touching
193 * freed memory
194 */
195static void drm_gem_object_handle_free(struct drm_gem_object *obj)
196{
197 struct drm_device *dev = obj->dev;
198
199 /* Remove any name for this object */
Daniel Vetter36da5902013-08-15 00:02:34 +0200200 if (obj->name) {
201 idr_remove(&dev->object_name_idr, obj->name);
202 obj->name = 0;
Daniel Vettera8e11d12013-08-15 00:02:37 +0200203 }
Daniel Vetter36da5902013-08-15 00:02:34 +0200204}
205
Daniel Vetter319c9332013-08-15 00:02:46 +0200206static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
207{
208 /* Unbreak the reference cycle if we have an exported dma_buf. */
209 if (obj->dma_buf) {
210 dma_buf_put(obj->dma_buf);
211 obj->dma_buf = NULL;
212 }
213}
214
Daniel Vetterbecee2a2013-08-15 00:02:39 +0200215static void
Thierry Redinge6b62712017-02-28 15:46:41 +0100216drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
Daniel Vetter36da5902013-08-15 00:02:34 +0200217{
Chris Wilson98a88832016-01-04 10:11:00 +0000218 struct drm_device *dev = obj->dev;
219 bool final = false;
220
Daniel Vettera8e11d12013-08-15 00:02:37 +0200221 if (WARN_ON(obj->handle_count == 0))
Daniel Vetter36da5902013-08-15 00:02:34 +0200222 return;
223
224 /*
225 * Must bump handle count first as this may be the last
226 * ref, in which case the object would disappear before we
227 * checked for a name
228 */
229
Chris Wilson98a88832016-01-04 10:11:00 +0000230 mutex_lock(&dev->object_name_lock);
Daniel Vetter319c9332013-08-15 00:02:46 +0200231 if (--obj->handle_count == 0) {
Daniel Vetter36da5902013-08-15 00:02:34 +0200232 drm_gem_object_handle_free(obj);
Daniel Vetter319c9332013-08-15 00:02:46 +0200233 drm_gem_object_exported_dma_buf_free(obj);
Chris Wilson98a88832016-01-04 10:11:00 +0000234 final = true;
Daniel Vetter319c9332013-08-15 00:02:46 +0200235 }
Chris Wilson98a88832016-01-04 10:11:00 +0000236 mutex_unlock(&dev->object_name_lock);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200237
Chris Wilson98a88832016-01-04 10:11:00 +0000238 if (final)
Thierry Redinge6b62712017-02-28 15:46:41 +0100239 drm_gem_object_put_unlocked(obj);
Daniel Vetter36da5902013-08-15 00:02:34 +0200240}
241
Chris Wilson8815b232016-01-05 09:42:31 +0000242/*
243 * Called at device or object close to release the file's
244 * handle references on objects.
245 */
246static int
247drm_gem_object_release_handle(int id, void *ptr, void *data)
248{
249 struct drm_file *file_priv = data;
250 struct drm_gem_object *obj = ptr;
251 struct drm_device *dev = obj->dev;
252
Noralf Trønnesb39b5392018-11-10 15:56:45 +0100253 if (obj->funcs && obj->funcs->close)
254 obj->funcs->close(obj, file_priv);
255 else if (dev->driver->gem_close_object)
Chris Wilsond0a133f2017-08-19 13:05:58 +0100256 dev->driver->gem_close_object(obj, file_priv);
257
Daniel Vetterae75f832019-06-14 22:35:20 +0200258 drm_gem_remove_prime_handles(obj, file_priv);
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200259 drm_vma_node_revoke(&obj->vma_node, file_priv);
Chris Wilson8815b232016-01-05 09:42:31 +0000260
Thierry Redinge6b62712017-02-28 15:46:41 +0100261 drm_gem_object_handle_put_unlocked(obj);
Chris Wilson8815b232016-01-05 09:42:31 +0000262
263 return 0;
264}
265
Eric Anholt673a3942008-07-30 12:06:12 -0700266/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100267 * drm_gem_handle_delete - deletes the given file-private handle
268 * @filp: drm file-private structure to use for the handle look up
269 * @handle: userspace handle to delete
270 *
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200271 * Removes the GEM handle from the @filp lookup table which has been added with
272 * drm_gem_handle_create(). If this is the last handle also cleans up linked
273 * resources like GEM names.
Eric Anholt673a3942008-07-30 12:06:12 -0700274 */
Dave Airlieff72145b2011-02-07 12:16:14 +1000275int
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300276drm_gem_handle_delete(struct drm_file *filp, u32 handle)
Eric Anholt673a3942008-07-30 12:06:12 -0700277{
Eric Anholt673a3942008-07-30 12:06:12 -0700278 struct drm_gem_object *obj;
279
Eric Anholt673a3942008-07-30 12:06:12 -0700280 spin_lock(&filp->table_lock);
281
282 /* Check if we currently have a reference on the object */
Chris Wilsonf6cd7da2016-04-15 12:55:08 +0100283 obj = idr_replace(&filp->object_idr, NULL, handle);
284 spin_unlock(&filp->table_lock);
285 if (IS_ERR_OR_NULL(obj))
Eric Anholt673a3942008-07-30 12:06:12 -0700286 return -EINVAL;
Eric Anholt673a3942008-07-30 12:06:12 -0700287
Chris Wilsonf6cd7da2016-04-15 12:55:08 +0100288 /* Release driver's reference and decrement refcount. */
289 drm_gem_object_release_handle(handle, obj, filp);
290
291 /* And finally make the handle available for future allocations. */
292 spin_lock(&filp->table_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700293 idr_remove(&filp->object_idr, handle);
294 spin_unlock(&filp->table_lock);
295
Eric Anholt673a3942008-07-30 12:06:12 -0700296 return 0;
297}
Dave Airlieff72145b2011-02-07 12:16:14 +1000298EXPORT_SYMBOL(drm_gem_handle_delete);
Eric Anholt673a3942008-07-30 12:06:12 -0700299
300/**
Rob Herringabd4e742019-08-07 10:52:47 -0400301 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
Noralf Trønnesdb611522017-07-23 21:16:17 +0200302 * @file: drm file-private structure containing the gem object
303 * @dev: corresponding drm_device
304 * @handle: gem object handle
305 * @offset: return location for the fake mmap offset
306 *
307 * This implements the &drm_driver.dumb_map_offset kms driver callback for
308 * drivers which use gem to manage their backing storage.
309 *
310 * Returns:
311 * 0 on success or a negative error code on failure.
312 */
Rob Herringabd4e742019-08-07 10:52:47 -0400313int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
Noralf Trønnesdb611522017-07-23 21:16:17 +0200314 u32 handle, u64 *offset)
315{
316 struct drm_gem_object *obj;
317 int ret;
318
319 obj = drm_gem_object_lookup(file, handle);
320 if (!obj)
321 return -ENOENT;
322
Noralf Trønnes90378e52017-08-17 18:21:30 +0200323 /* Don't allow imported objects to be mapped */
324 if (obj->import_attach) {
325 ret = -EINVAL;
326 goto out;
327 }
328
Noralf Trønnesdb611522017-07-23 21:16:17 +0200329 ret = drm_gem_create_mmap_offset(obj);
330 if (ret)
331 goto out;
332
333 *offset = drm_vma_node_offset_addr(&obj->vma_node);
334out:
335 drm_gem_object_put_unlocked(obj);
336
337 return ret;
338}
Rob Herringabd4e742019-08-07 10:52:47 -0400339EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
Noralf Trønnesdb611522017-07-23 21:16:17 +0200340
341/**
Daniel Vetter43387b32013-07-16 09:12:04 +0200342 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100343 * @file: drm file-private structure to remove the dumb handle from
344 * @dev: corresponding drm_device
345 * @handle: the dumb handle to remove
Noralf Trønnes1dd3a0602017-10-26 18:57:26 +0200346 *
Daniel Vetter940eba22017-01-25 07:26:46 +0100347 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
348 * which use gem to manage their backing storage.
Daniel Vetter43387b32013-07-16 09:12:04 +0200349 */
350int drm_gem_dumb_destroy(struct drm_file *file,
351 struct drm_device *dev,
352 uint32_t handle)
353{
354 return drm_gem_handle_delete(file, handle);
355}
356EXPORT_SYMBOL(drm_gem_dumb_destroy);
357
358/**
Daniel Vetter20228c42013-08-15 00:02:45 +0200359 * drm_gem_handle_create_tail - internal functions to create a handle
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100360 * @file_priv: drm file-private structure to register the handle for
361 * @obj: object to register
Thierry Reding8bf81802014-11-03 13:20:52 +0100362 * @handlep: pointer to return the created handle to the caller
Noralf Trønnes1dd3a0602017-10-26 18:57:26 +0200363 *
Daniel Vetter940eba22017-01-25 07:26:46 +0100364 * This expects the &drm_device.object_name_lock to be held already and will
365 * drop it before returning. Used to avoid races in establishing new handles
366 * when importing an object from either an flink name or a dma-buf.
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200367 *
368 * Handles must be release again through drm_gem_handle_delete(). This is done
369 * when userspace closes @file_priv for all attached handles, or through the
370 * GEM_CLOSE ioctl for individual handles.
Eric Anholt673a3942008-07-30 12:06:12 -0700371 */
372int
Daniel Vetter20228c42013-08-15 00:02:45 +0200373drm_gem_handle_create_tail(struct drm_file *file_priv,
374 struct drm_gem_object *obj,
375 u32 *handlep)
Eric Anholt673a3942008-07-30 12:06:12 -0700376{
Ben Skeggs304eda32011-06-09 00:24:59 +0000377 struct drm_device *dev = obj->dev;
Chris Wilson9649399e2016-01-05 09:42:30 +0000378 u32 handle;
Ben Skeggs304eda32011-06-09 00:24:59 +0000379 int ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700380
Daniel Vetter20228c42013-08-15 00:02:45 +0200381 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
Chris Wilson98a88832016-01-04 10:11:00 +0000382 if (obj->handle_count++ == 0)
Thierry Redinge6b62712017-02-28 15:46:41 +0100383 drm_gem_object_get(obj);
Daniel Vetter20228c42013-08-15 00:02:45 +0200384
Eric Anholt673a3942008-07-30 12:06:12 -0700385 /*
Tejun Heo2e928812013-02-27 17:04:08 -0800386 * Get the user-visible handle using idr. Preload and perform
387 * allocation under our spinlock.
Eric Anholt673a3942008-07-30 12:06:12 -0700388 */
Tejun Heo2e928812013-02-27 17:04:08 -0800389 idr_preload(GFP_KERNEL);
Eric Anholt673a3942008-07-30 12:06:12 -0700390 spin_lock(&file_priv->table_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800391
392 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
Chris Wilson98a88832016-01-04 10:11:00 +0000393
Eric Anholt673a3942008-07-30 12:06:12 -0700394 spin_unlock(&file_priv->table_lock);
Tejun Heo2e928812013-02-27 17:04:08 -0800395 idr_preload_end();
Chris Wilson98a88832016-01-04 10:11:00 +0000396
Daniel Vettercd4f0132013-08-15 00:02:44 +0200397 mutex_unlock(&dev->object_name_lock);
Chris Wilson69841282016-01-04 10:10:59 +0000398 if (ret < 0)
399 goto err_unref;
400
Chris Wilson9649399e2016-01-05 09:42:30 +0000401 handle = ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700402
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200403 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
Chris Wilson69841282016-01-04 10:10:59 +0000404 if (ret)
405 goto err_remove;
Ben Skeggs304eda32011-06-09 00:24:59 +0000406
Noralf Trønnesb39b5392018-11-10 15:56:45 +0100407 if (obj->funcs && obj->funcs->open) {
408 ret = obj->funcs->open(obj, file_priv);
409 if (ret)
410 goto err_revoke;
411 } else if (dev->driver->gem_open_object) {
Ben Skeggs304eda32011-06-09 00:24:59 +0000412 ret = dev->driver->gem_open_object(obj, file_priv);
Chris Wilson69841282016-01-04 10:10:59 +0000413 if (ret)
414 goto err_revoke;
Ben Skeggs304eda32011-06-09 00:24:59 +0000415 }
416
Chris Wilson9649399e2016-01-05 09:42:30 +0000417 *handlep = handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700418 return 0;
Chris Wilson69841282016-01-04 10:10:59 +0000419
420err_revoke:
David Herrmannd9a1f0b2016-09-01 14:48:33 +0200421 drm_vma_node_revoke(&obj->vma_node, file_priv);
Chris Wilson69841282016-01-04 10:10:59 +0000422err_remove:
423 spin_lock(&file_priv->table_lock);
Chris Wilson9649399e2016-01-05 09:42:30 +0000424 idr_remove(&file_priv->object_idr, handle);
Chris Wilson69841282016-01-04 10:10:59 +0000425 spin_unlock(&file_priv->table_lock);
426err_unref:
Thierry Redinge6b62712017-02-28 15:46:41 +0100427 drm_gem_object_handle_put_unlocked(obj);
Chris Wilson69841282016-01-04 10:10:59 +0000428 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700429}
Daniel Vetter20228c42013-08-15 00:02:45 +0200430
431/**
Thierry Reding8bf81802014-11-03 13:20:52 +0100432 * drm_gem_handle_create - create a gem handle for an object
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100433 * @file_priv: drm file-private structure to register the handle for
434 * @obj: object to register
435 * @handlep: pionter to return the created handle to the caller
436 *
Daniel Vetter390311762018-03-22 09:02:33 +0100437 * Create a handle for this object. This adds a handle reference to the object,
438 * which includes a regular reference count. Callers will likely want to
439 * dereference the object afterwards.
440 *
441 * Since this publishes @obj to userspace it must be fully set up by this point,
442 * drivers must call this last in their buffer object creation callbacks.
Daniel Vetter20228c42013-08-15 00:02:45 +0200443 */
Thierry Reding8bf81802014-11-03 13:20:52 +0100444int drm_gem_handle_create(struct drm_file *file_priv,
445 struct drm_gem_object *obj,
446 u32 *handlep)
Daniel Vetter20228c42013-08-15 00:02:45 +0200447{
448 mutex_lock(&obj->dev->object_name_lock);
449
450 return drm_gem_handle_create_tail(file_priv, obj, handlep);
451}
Eric Anholt673a3942008-07-30 12:06:12 -0700452EXPORT_SYMBOL(drm_gem_handle_create);
453
Rob Clark75ef8b32011-08-10 08:09:07 -0500454
455/**
456 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
457 * @obj: obj in question
458 *
459 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
Daniel Vetterf74418a2016-03-30 11:40:52 +0200460 *
461 * Note that drm_gem_object_release() already calls this function, so drivers
462 * don't have to take care of releasing the mmap offset themselves when freeing
463 * the GEM object.
Rob Clark75ef8b32011-08-10 08:09:07 -0500464 */
465void
466drm_gem_free_mmap_offset(struct drm_gem_object *obj)
467{
468 struct drm_device *dev = obj->dev;
Rob Clark75ef8b32011-08-10 08:09:07 -0500469
Daniel Vetterb04a5902013-12-11 14:24:46 +0100470 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
Rob Clark75ef8b32011-08-10 08:09:07 -0500471}
472EXPORT_SYMBOL(drm_gem_free_mmap_offset);
473
474/**
Rob Clark367bbd42013-08-07 13:41:23 -0400475 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
476 * @obj: obj in question
477 * @size: the virtual size
478 *
479 * GEM memory mapping works by handing back to userspace a fake mmap offset
480 * it can use in a subsequent mmap(2) call. The DRM core code then looks
481 * up the object based on the offset and sets up the various memory mapping
482 * structures.
483 *
484 * This routine allocates and attaches a fake offset for @obj, in cases where
Daniel Vetter940eba22017-01-25 07:26:46 +0100485 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
486 * Otherwise just use drm_gem_create_mmap_offset().
Daniel Vetterf74418a2016-03-30 11:40:52 +0200487 *
488 * This function is idempotent and handles an already allocated mmap offset
489 * transparently. Drivers do not need to check for this case.
Rob Clark367bbd42013-08-07 13:41:23 -0400490 */
491int
492drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
493{
494 struct drm_device *dev = obj->dev;
Rob Clark367bbd42013-08-07 13:41:23 -0400495
Daniel Vetterb04a5902013-12-11 14:24:46 +0100496 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
Rob Clark367bbd42013-08-07 13:41:23 -0400497 size / PAGE_SIZE);
498}
499EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
500
501/**
Rob Clark75ef8b32011-08-10 08:09:07 -0500502 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
503 * @obj: obj in question
504 *
505 * GEM memory mapping works by handing back to userspace a fake mmap offset
506 * it can use in a subsequent mmap(2) call. The DRM core code then looks
507 * up the object based on the offset and sets up the various memory mapping
508 * structures.
509 *
510 * This routine allocates and attaches a fake offset for @obj.
Daniel Vetterf74418a2016-03-30 11:40:52 +0200511 *
512 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
513 * the fake offset again.
Rob Clark75ef8b32011-08-10 08:09:07 -0500514 */
Rob Clark367bbd42013-08-07 13:41:23 -0400515int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
Rob Clark75ef8b32011-08-10 08:09:07 -0500516{
Rob Clark367bbd42013-08-07 13:41:23 -0400517 return drm_gem_create_mmap_offset_size(obj, obj->size);
Rob Clark75ef8b32011-08-10 08:09:07 -0500518}
519EXPORT_SYMBOL(drm_gem_create_mmap_offset);
520
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800521/*
522 * Move pages to appropriate lru and release the pagevec, decrementing the
523 * ref count of those pages.
524 */
525static void drm_gem_check_release_pagevec(struct pagevec *pvec)
526{
527 check_move_unevictable_pages(pvec);
528 __pagevec_release(pvec);
529 cond_resched();
530}
531
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400532/**
533 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
534 * from shmem
535 * @obj: obj in question
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200536 *
537 * This reads the page-array of the shmem-backing storage of the given gem
538 * object. An array of pages is returned. If a page is not allocated or
539 * swapped-out, this will allocate/swap-in the required pages. Note that the
540 * whole object is covered by the page-array and pinned in memory.
541 *
542 * Use drm_gem_put_pages() to release the array and unpin all pages.
543 *
544 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
545 * If you require other GFP-masks, you have to do those allocations yourself.
546 *
547 * Note that you are not allowed to change gfp-zones during runtime. That is,
548 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
549 * set during initialization. If you have special zone constraints, set them
Jordan Crouse5b9fbff2017-10-03 09:38:10 -0600550 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200551 * to keep pages in the required zone during swap-in.
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400552 */
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200553struct page **drm_gem_get_pages(struct drm_gem_object *obj)
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400554{
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400555 struct address_space *mapping;
556 struct page *p, **pages;
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800557 struct pagevec pvec;
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400558 int i, npages;
559
560 /* This is the shared memory object that backs the GEM resource */
Al Viro93c76a32015-12-04 23:45:44 -0500561 mapping = obj->filp->f_mapping;
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400562
563 /* We already BUG_ON() for non-page-aligned sizes in
564 * drm_gem_object_init(), so we should never hit this unless
565 * driver author is doing something really wrong:
566 */
567 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
568
569 npages = obj->size >> PAGE_SHIFT;
570
Michal Hocko20981052017-05-17 14:23:12 +0200571 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400572 if (pages == NULL)
573 return ERR_PTR(-ENOMEM);
574
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800575 mapping_set_unevictable(mapping);
576
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400577 for (i = 0; i < npages; i++) {
David Herrmann0cdbe8a2014-05-25 12:59:47 +0200578 p = shmem_read_mapping_page(mapping, i);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400579 if (IS_ERR(p))
580 goto fail;
581 pages[i] = p;
582
David Herrmann21230002014-05-25 14:34:08 +0200583 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
584 * correct region during swapin. Note that this requires
585 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
586 * so shmem can relocate pages during swapin if required.
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400587 */
Michal Hockoc62d2552015-11-06 16:28:49 -0800588 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400589 (page_to_pfn(p) >= 0x00100000UL));
590 }
591
592 return pages;
593
594fail:
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800595 mapping_clear_unevictable(mapping);
596 pagevec_init(&pvec);
597 while (i--) {
598 if (!pagevec_add(&pvec, pages[i]))
599 drm_gem_check_release_pagevec(&pvec);
600 }
601 if (pagevec_count(&pvec))
602 drm_gem_check_release_pagevec(&pvec);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400603
Michal Hocko20981052017-05-17 14:23:12 +0200604 kvfree(pages);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400605 return ERR_CAST(p);
606}
607EXPORT_SYMBOL(drm_gem_get_pages);
608
609/**
610 * drm_gem_put_pages - helper to free backing pages for a GEM object
611 * @obj: obj in question
612 * @pages: pages to free
613 * @dirty: if true, pages will be marked as dirty
614 * @accessed: if true, the pages will be marked as accessed
615 */
616void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
617 bool dirty, bool accessed)
618{
619 int i, npages;
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800620 struct address_space *mapping;
621 struct pagevec pvec;
622
623 mapping = file_inode(obj->filp)->i_mapping;
624 mapping_clear_unevictable(mapping);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400625
626 /* We already BUG_ON() for non-page-aligned sizes in
627 * drm_gem_object_init(), so we should never hit this unless
628 * driver author is doing something really wrong:
629 */
630 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
631
632 npages = obj->size >> PAGE_SHIFT;
633
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800634 pagevec_init(&pvec);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400635 for (i = 0; i < npages; i++) {
636 if (dirty)
637 set_page_dirty(pages[i]);
638
639 if (accessed)
640 mark_page_accessed(pages[i]);
641
642 /* Undo the reference we took when populating the table */
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800643 if (!pagevec_add(&pvec, pages[i]))
644 drm_gem_check_release_pagevec(&pvec);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400645 }
Kuo-Hsin Yangfb4b4922019-01-08 15:45:17 +0800646 if (pagevec_count(&pvec))
647 drm_gem_check_release_pagevec(&pvec);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400648
Michal Hocko20981052017-05-17 14:23:12 +0200649 kvfree(pages);
Rob Clarkbcc5c9d2013-08-07 13:41:24 -0400650}
651EXPORT_SYMBOL(drm_gem_put_pages);
652
Rob Herringc117aa42019-03-08 14:26:02 -0600653static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
654 struct drm_gem_object **objs)
655{
656 int i, ret = 0;
657 struct drm_gem_object *obj;
658
659 spin_lock(&filp->table_lock);
660
661 for (i = 0; i < count; i++) {
662 /* Check if we currently have a reference on the object */
663 obj = idr_find(&filp->object_idr, handle[i]);
664 if (!obj) {
665 ret = -ENOENT;
666 break;
667 }
668 drm_gem_object_get(obj);
669 objs[i] = obj;
670 }
671 spin_unlock(&filp->table_lock);
672
673 return ret;
674}
675
676/**
677 * drm_gem_objects_lookup - look up GEM objects from an array of handles
678 * @filp: DRM file private date
679 * @bo_handles: user pointer to array of userspace handle
680 * @count: size of handle array
681 * @objs_out: returned pointer to array of drm_gem_object pointers
682 *
683 * Takes an array of userspace handles and returns a newly allocated array of
684 * GEM objects.
685 *
686 * For a single handle lookup, use drm_gem_object_lookup().
687 *
688 * Returns:
689 *
690 * @objs filled in with GEM object pointers. Returned GEM objects need to be
691 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
692 * failure. 0 is returned on success.
693 *
694 */
695int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
696 int count, struct drm_gem_object ***objs_out)
697{
698 int ret;
699 u32 *handles;
700 struct drm_gem_object **objs;
701
702 if (!count)
703 return 0;
704
705 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
706 GFP_KERNEL | __GFP_ZERO);
707 if (!objs)
708 return -ENOMEM;
709
710 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
711 if (!handles) {
712 ret = -ENOMEM;
713 goto out;
714 }
715
716 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
717 ret = -EFAULT;
718 DRM_DEBUG("Failed to copy in GEM handles\n");
719 goto out;
720 }
721
722 ret = objects_lookup(filp, handles, count, objs);
723 *objs_out = objs;
724
725out:
726 kvfree(handles);
727 return ret;
728
729}
730EXPORT_SYMBOL(drm_gem_objects_lookup);
731
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200732/**
Matt Roper1e55a532019-02-01 17:23:26 -0800733 * drm_gem_object_lookup - look up a GEM object from its handle
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200734 * @filp: DRM file private date
735 * @handle: userspace handle
736 *
737 * Returns:
738 *
739 * A reference to the object named by the handle if such exists on @filp, NULL
740 * otherwise.
Rob Herringc117aa42019-03-08 14:26:02 -0600741 *
742 * If looking up an array of handles, use drm_gem_objects_lookup().
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200743 */
Eric Anholt673a3942008-07-30 12:06:12 -0700744struct drm_gem_object *
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100745drm_gem_object_lookup(struct drm_file *filp, u32 handle)
Eric Anholt673a3942008-07-30 12:06:12 -0700746{
Rob Herringc117aa42019-03-08 14:26:02 -0600747 struct drm_gem_object *obj = NULL;
Eric Anholt673a3942008-07-30 12:06:12 -0700748
Rob Herringc117aa42019-03-08 14:26:02 -0600749 objects_lookup(filp, &handle, 1, &obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700750 return obj;
751}
752EXPORT_SYMBOL(drm_gem_object_lookup);
753
754/**
Rob Herring1ba62712019-02-02 09:41:54 -0600755 * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
756 * shared and/or exclusive fences.
757 * @filep: DRM file private date
758 * @handle: userspace handle
759 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
760 * @timeout: timeout value in jiffies or zero to return immediately
761 *
762 * Returns:
763 *
764 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
765 * greater than 0 on success.
766 */
767long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
768 bool wait_all, unsigned long timeout)
769{
770 long ret;
771 struct drm_gem_object *obj;
772
773 obj = drm_gem_object_lookup(filep, handle);
774 if (!obj) {
775 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
776 return -EINVAL;
777 }
778
779 ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
780 true, timeout);
781 if (ret == 0)
782 ret = -ETIME;
783 else if (ret > 0)
784 ret = 0;
785
786 drm_gem_object_put_unlocked(obj);
787
788 return ret;
789}
790EXPORT_SYMBOL(drm_gem_reservation_object_wait);
791
792/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100793 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
794 * @dev: drm_device
795 * @data: ioctl data
796 * @file_priv: drm file-private structure
797 *
Eric Anholt673a3942008-07-30 12:06:12 -0700798 * Releases the handle to an mm object.
799 */
800int
801drm_gem_close_ioctl(struct drm_device *dev, void *data,
802 struct drm_file *file_priv)
803{
804 struct drm_gem_close *args = data;
805 int ret;
806
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200807 if (!drm_core_check_feature(dev, DRIVER_GEM))
Chris Wilson69fdf422018-09-13 20:20:50 +0100808 return -EOPNOTSUPP;
Eric Anholt673a3942008-07-30 12:06:12 -0700809
810 ret = drm_gem_handle_delete(file_priv, args->handle);
811
812 return ret;
813}
814
815/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100816 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
817 * @dev: drm_device
818 * @data: ioctl data
819 * @file_priv: drm file-private structure
820 *
Eric Anholt673a3942008-07-30 12:06:12 -0700821 * Create a global name for an object, returning the name.
822 *
823 * Note that the name does not hold a reference; when the object
824 * is freed, the name goes away.
825 */
826int
827drm_gem_flink_ioctl(struct drm_device *dev, void *data,
828 struct drm_file *file_priv)
829{
830 struct drm_gem_flink *args = data;
831 struct drm_gem_object *obj;
832 int ret;
833
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200834 if (!drm_core_check_feature(dev, DRIVER_GEM))
Chris Wilson69fdf422018-09-13 20:20:50 +0100835 return -EOPNOTSUPP;
Eric Anholt673a3942008-07-30 12:06:12 -0700836
Chris Wilsona8ad0bd2016-05-09 11:04:54 +0100837 obj = drm_gem_object_lookup(file_priv, args->handle);
Eric Anholt673a3942008-07-30 12:06:12 -0700838 if (obj == NULL)
Chris Wilsonbf79cb92010-08-04 14:19:46 +0100839 return -ENOENT;
Eric Anholt673a3942008-07-30 12:06:12 -0700840
Daniel Vettercd4f0132013-08-15 00:02:44 +0200841 mutex_lock(&dev->object_name_lock);
Daniel Vettera8e11d12013-08-15 00:02:37 +0200842 /* prevent races with concurrent gem_close. */
843 if (obj->handle_count == 0) {
844 ret = -ENOENT;
845 goto err;
846 }
847
Chris Wilson8d59bae2009-02-11 14:26:28 +0000848 if (!obj->name) {
Chris Wilson0f646422016-01-04 10:11:01 +0000849 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
Tejun Heo2e928812013-02-27 17:04:08 -0800850 if (ret < 0)
Chris Wilson8d59bae2009-02-11 14:26:28 +0000851 goto err;
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900852
853 obj->name = ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700854 }
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000855
YoungJun Cho2e07fb22013-06-27 08:58:33 +0900856 args->name = (uint64_t) obj->name;
857 ret = 0;
858
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000859err:
Daniel Vettercd4f0132013-08-15 00:02:44 +0200860 mutex_unlock(&dev->object_name_lock);
Thierry Redinge6b62712017-02-28 15:46:41 +0100861 drm_gem_object_put_unlocked(obj);
Chris Wilson3e49c4f2009-02-09 11:31:41 +0000862 return ret;
Eric Anholt673a3942008-07-30 12:06:12 -0700863}
864
865/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100866 * drm_gem_open - implementation of the GEM_OPEN ioctl
867 * @dev: drm_device
868 * @data: ioctl data
869 * @file_priv: drm file-private structure
870 *
Eric Anholt673a3942008-07-30 12:06:12 -0700871 * Open an object using the global name, returning a handle and the size.
872 *
873 * This handle (of course) holds a reference to the object, so the object
874 * will not go away until the handle is deleted.
875 */
876int
877drm_gem_open_ioctl(struct drm_device *dev, void *data,
878 struct drm_file *file_priv)
879{
880 struct drm_gem_open *args = data;
881 struct drm_gem_object *obj;
882 int ret;
Pekka Paalanena1a2d1d2009-08-23 12:40:55 +0300883 u32 handle;
Eric Anholt673a3942008-07-30 12:06:12 -0700884
Andrzej Hajda1bcecfa2014-09-30 16:49:56 +0200885 if (!drm_core_check_feature(dev, DRIVER_GEM))
Chris Wilson69fdf422018-09-13 20:20:50 +0100886 return -EOPNOTSUPP;
Eric Anholt673a3942008-07-30 12:06:12 -0700887
Daniel Vettercd4f0132013-08-15 00:02:44 +0200888 mutex_lock(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700889 obj = idr_find(&dev->object_name_idr, (int) args->name);
Daniel Vetter20228c42013-08-15 00:02:45 +0200890 if (obj) {
Thierry Redinge6b62712017-02-28 15:46:41 +0100891 drm_gem_object_get(obj);
Daniel Vetter20228c42013-08-15 00:02:45 +0200892 } else {
893 mutex_unlock(&dev->object_name_lock);
Eric Anholt673a3942008-07-30 12:06:12 -0700894 return -ENOENT;
Daniel Vetter20228c42013-08-15 00:02:45 +0200895 }
Eric Anholt673a3942008-07-30 12:06:12 -0700896
Daniel Vetter20228c42013-08-15 00:02:45 +0200897 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
898 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
Thierry Redinge6b62712017-02-28 15:46:41 +0100899 drm_gem_object_put_unlocked(obj);
Eric Anholt673a3942008-07-30 12:06:12 -0700900 if (ret)
901 return ret;
902
903 args->handle = handle;
904 args->size = obj->size;
905
906 return 0;
907}
908
909/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100910 * gem_gem_open - initalizes GEM file-private structures at devnode open time
911 * @dev: drm_device which is being opened by userspace
912 * @file_private: drm file-private structure to set up
913 *
Eric Anholt673a3942008-07-30 12:06:12 -0700914 * Called at device open time, sets up the structure for handling refcounting
915 * of mm objects.
916 */
917void
918drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
919{
Chris Wilsone86584c2018-02-12 14:55:33 +0000920 idr_init_base(&file_private->object_idr, 1);
Eric Anholt673a3942008-07-30 12:06:12 -0700921 spin_lock_init(&file_private->table_lock);
922}
923
Eric Anholt673a3942008-07-30 12:06:12 -0700924/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100925 * drm_gem_release - release file-private GEM resources
926 * @dev: drm_device which is being closed by userspace
927 * @file_private: drm file-private structure to clean up
928 *
Eric Anholt673a3942008-07-30 12:06:12 -0700929 * Called at close time when the filp is going away.
930 *
931 * Releases any remaining references on objects by this filp.
932 */
933void
934drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
935{
Eric Anholt673a3942008-07-30 12:06:12 -0700936 idr_for_each(&file_private->object_idr,
Ben Skeggs304eda32011-06-09 00:24:59 +0000937 &drm_gem_object_release_handle, file_private);
Eric Anholt673a3942008-07-30 12:06:12 -0700938 idr_destroy(&file_private->object_idr);
Eric Anholt673a3942008-07-30 12:06:12 -0700939}
940
Daniel Vetterf74418a2016-03-30 11:40:52 +0200941/**
942 * drm_gem_object_release - release GEM buffer object resources
943 * @obj: GEM buffer object
944 *
945 * This releases any structures and resources used by @obj and is the invers of
946 * drm_gem_object_init().
947 */
Daniel Vetterfd632aa2010-04-09 19:05:05 +0000948void
949drm_gem_object_release(struct drm_gem_object *obj)
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000950{
Daniel Vetter319c9332013-08-15 00:02:46 +0200951 WARN_ON(obj->dma_buf);
952
Alan Cox62cb70112011-06-07 14:17:51 +0100953 if (obj->filp)
David Herrmann16d28312014-01-20 20:07:49 +0100954 fput(obj->filp);
David Herrmann77472342014-01-20 20:05:43 +0100955
Rob Herring1ba62712019-02-02 09:41:54 -0600956 reservation_object_fini(&obj->_resv);
David Herrmann77472342014-01-20 20:05:43 +0100957 drm_gem_free_mmap_offset(obj);
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000958}
Daniel Vetterfd632aa2010-04-09 19:05:05 +0000959EXPORT_SYMBOL(drm_gem_object_release);
Luca Barbieric3ae90c2010-02-09 05:49:11 +0000960
Eric Anholt673a3942008-07-30 12:06:12 -0700961/**
Daniel Vetter89d61fc2014-01-21 12:39:00 +0100962 * drm_gem_object_free - free a GEM object
963 * @kref: kref of the object to free
964 *
Eric Anholt673a3942008-07-30 12:06:12 -0700965 * Called after the last reference to the object has been lost.
Daniel Vetter940eba22017-01-25 07:26:46 +0100966 * Must be called holding &drm_device.struct_mutex.
Eric Anholt673a3942008-07-30 12:06:12 -0700967 *
968 * Frees the object
969 */
970void
971drm_gem_object_free(struct kref *kref)
972{
Daniel Vetter6ff774b2015-10-15 09:36:26 +0200973 struct drm_gem_object *obj =
974 container_of(kref, struct drm_gem_object, refcount);
Eric Anholt673a3942008-07-30 12:06:12 -0700975 struct drm_device *dev = obj->dev;
976
Noralf Trønnesb39b5392018-11-10 15:56:45 +0100977 if (obj->funcs) {
978 obj->funcs->free(obj);
979 } else if (dev->driver->gem_free_object_unlocked) {
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200980 dev->driver->gem_free_object_unlocked(obj);
Daniel Vetter6d3e7fd2016-05-04 14:10:44 +0200981 } else if (dev->driver->gem_free_object) {
982 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
983
Eric Anholt673a3942008-07-30 12:06:12 -0700984 dev->driver->gem_free_object(obj);
Daniel Vetter6d3e7fd2016-05-04 14:10:44 +0200985 }
Eric Anholt673a3942008-07-30 12:06:12 -0700986}
987EXPORT_SYMBOL(drm_gem_object_free);
988
Daniel Vetterdf2e0902015-10-22 19:11:29 +0200989/**
Thierry Redinge6b62712017-02-28 15:46:41 +0100990 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200991 * @obj: GEM buffer object
992 *
993 * This releases a reference to @obj. Callers must not hold the
Daniel Vetter940eba22017-01-25 07:26:46 +0100994 * &drm_device.struct_mutex lock when calling this function.
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200995 *
Thierry Redinge6b62712017-02-28 15:46:41 +0100996 * See also __drm_gem_object_put().
Daniel Vetter9f0ba532016-05-02 10:40:51 +0200997 */
998void
Thierry Redinge6b62712017-02-28 15:46:41 +0100999drm_gem_object_put_unlocked(struct drm_gem_object *obj)
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001000{
1001 struct drm_device *dev;
1002
1003 if (!obj)
1004 return;
1005
1006 dev = obj->dev;
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001007
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001008 if (dev->driver->gem_free_object) {
Daniel Vetter3379c042017-07-15 11:53:28 +02001009 might_lock(&dev->struct_mutex);
1010 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001011 &dev->struct_mutex))
Daniel Vetter3379c042017-07-15 11:53:28 +02001012 mutex_unlock(&dev->struct_mutex);
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001013 } else {
1014 kref_put(&obj->refcount, drm_gem_object_free);
Daniel Vetter3379c042017-07-15 11:53:28 +02001015 }
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001016}
Thierry Redinge6b62712017-02-28 15:46:41 +01001017EXPORT_SYMBOL(drm_gem_object_put_unlocked);
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001018
1019/**
Thierry Redinge6b62712017-02-28 15:46:41 +01001020 * drm_gem_object_put - release a GEM buffer object reference
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001021 * @obj: GEM buffer object
1022 *
Daniel Vetter940eba22017-01-25 07:26:46 +01001023 * This releases a reference to @obj. Callers must hold the
1024 * &drm_device.struct_mutex lock when calling this function, even when the
1025 * driver doesn't use &drm_device.struct_mutex for anything.
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001026 *
1027 * For drivers not encumbered with legacy locking use
Thierry Redinge6b62712017-02-28 15:46:41 +01001028 * drm_gem_object_put_unlocked() instead.
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001029 */
1030void
Thierry Redinge6b62712017-02-28 15:46:41 +01001031drm_gem_object_put(struct drm_gem_object *obj)
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001032{
1033 if (obj) {
1034 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1035
1036 kref_put(&obj->refcount, drm_gem_object_free);
1037 }
1038}
Thierry Redinge6b62712017-02-28 15:46:41 +01001039EXPORT_SYMBOL(drm_gem_object_put);
Daniel Vetter9f0ba532016-05-02 10:40:51 +02001040
1041/**
Daniel Vetterdf2e0902015-10-22 19:11:29 +02001042 * drm_gem_vm_open - vma->ops->open implementation for GEM
1043 * @vma: VM area structure
1044 *
1045 * This function implements the #vm_operations_struct open() callback for GEM
1046 * drivers. This must be used together with drm_gem_vm_close().
1047 */
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001048void drm_gem_vm_open(struct vm_area_struct *vma)
1049{
1050 struct drm_gem_object *obj = vma->vm_private_data;
1051
Thierry Redinge6b62712017-02-28 15:46:41 +01001052 drm_gem_object_get(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001053}
1054EXPORT_SYMBOL(drm_gem_vm_open);
1055
Daniel Vetterdf2e0902015-10-22 19:11:29 +02001056/**
1057 * drm_gem_vm_close - vma->ops->close implementation for GEM
1058 * @vma: VM area structure
1059 *
1060 * This function implements the #vm_operations_struct close() callback for GEM
1061 * drivers. This must be used together with drm_gem_vm_open().
1062 */
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001063void drm_gem_vm_close(struct vm_area_struct *vma)
1064{
1065 struct drm_gem_object *obj = vma->vm_private_data;
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001066
Thierry Redinge6b62712017-02-28 15:46:41 +01001067 drm_gem_object_put_unlocked(obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001068}
1069EXPORT_SYMBOL(drm_gem_vm_close);
1070
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001071/**
1072 * drm_gem_mmap_obj - memory map a GEM object
1073 * @obj: the GEM object to map
1074 * @obj_size: the object size to be mapped, in bytes
1075 * @vma: VMA for the area to be mapped
1076 *
1077 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1078 * provided by the driver. Depending on their requirements, drivers can either
1079 * provide a fault handler in their gem_vm_ops (in which case any accesses to
1080 * the object will be trapped, to perform migration, GTT binding, surface
1081 * register allocation, or performance monitoring), or mmap the buffer memory
1082 * synchronously after calling drm_gem_mmap_obj.
1083 *
1084 * This function is mainly intended to implement the DMABUF mmap operation, when
1085 * the GEM object is not looked up based on its fake offset. To implement the
1086 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1087 *
David Herrmannca481c92013-08-25 18:28:58 +02001088 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1089 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1090 * callers must verify access restrictions before calling this helper.
1091 *
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001092 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1093 * size, or if no gem_vm_ops are provided.
1094 */
1095int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1096 struct vm_area_struct *vma)
1097{
1098 struct drm_device *dev = obj->dev;
1099
1100 /* Check for valid size. */
1101 if (obj_size < vma->vm_end - vma->vm_start)
1102 return -EINVAL;
1103
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001104 if (obj->funcs && obj->funcs->vm_ops)
1105 vma->vm_ops = obj->funcs->vm_ops;
1106 else if (dev->driver->gem_vm_ops)
1107 vma->vm_ops = dev->driver->gem_vm_ops;
1108 else
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001109 return -EINVAL;
1110
1111 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001112 vma->vm_private_data = obj;
David Herrmann16d28312014-01-20 20:07:49 +01001113 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
Tom Lendacky95cf9262017-07-17 16:10:26 -05001114 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001115
1116 /* Take a ref for this mapping of the object, so that the fault
1117 * handler can dereference the mmap offset's pointer to the object.
1118 * This reference is cleaned up by the corresponding vm_close
1119 * (which should happen whether the vma was created by this call, or
1120 * by a vm_open due to mremap or partial unmap or whatever).
1121 */
Thierry Redinge6b62712017-02-28 15:46:41 +01001122 drm_gem_object_get(obj);
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001123
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001124 return 0;
1125}
1126EXPORT_SYMBOL(drm_gem_mmap_obj);
Jesse Barnesab00b3e2009-02-11 14:01:46 -08001127
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001128/**
1129 * drm_gem_mmap - memory map routine for GEM objects
1130 * @filp: DRM file pointer
1131 * @vma: VMA for the area to be mapped
1132 *
1133 * If a driver supports GEM object mapping, mmap calls on the DRM file
1134 * descriptor will end up here.
1135 *
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001136 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001137 * contain the fake offset we created when the GTT map ioctl was called on
Laurent Pinchart1c5aafa2013-04-16 14:14:52 +02001138 * the object) and map it with a call to drm_gem_mmap_obj().
David Herrmannca481c92013-08-25 18:28:58 +02001139 *
1140 * If the caller is not granted access to the buffer object, the mmap will fail
1141 * with EACCES. Please see the vma manager for more information.
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001142 */
1143int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1144{
1145 struct drm_file *priv = filp->private_data;
1146 struct drm_device *dev = priv->minor->dev;
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001147 struct drm_gem_object *obj = NULL;
David Herrmann0de23972013-07-24 21:07:52 +02001148 struct drm_vma_offset_node *node;
David Herrmanna8469aa2014-01-20 20:15:38 +01001149 int ret;
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001150
Daniel Vetterc07dcd62017-08-02 13:56:02 +02001151 if (drm_dev_is_unplugged(dev))
Dave Airlie2c07a212012-02-20 14:18:07 +00001152 return -ENODEV;
1153
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001154 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1155 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1156 vma->vm_pgoff,
1157 vma_pages(vma));
1158 if (likely(node)) {
1159 obj = container_of(node, struct drm_gem_object, vma_node);
1160 /*
1161 * When the object is being freed, after it hits 0-refcnt it
1162 * proceeds to tear down the object. In the process it will
1163 * attempt to remove the VMA offset and so acquire this
1164 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1165 * that matches our range, we know it is in the process of being
1166 * destroyed and will be freed as soon as we release the lock -
1167 * so we have to check for the 0-refcnted object and treat it as
1168 * invalid.
1169 */
1170 if (!kref_get_unless_zero(&obj->refcount))
1171 obj = NULL;
1172 }
1173 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001174
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001175 if (!obj)
Daniel Vetter197633b2014-09-23 15:46:48 +02001176 return -EINVAL;
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001177
David Herrmannd9a1f0b2016-09-01 14:48:33 +02001178 if (!drm_vma_node_is_allowed(node, priv)) {
Thierry Redinge6b62712017-02-28 15:46:41 +01001179 drm_gem_object_put_unlocked(obj);
David Herrmannca481c92013-08-25 18:28:58 +02001180 return -EACCES;
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001181 }
1182
Chris Wilson3e977ac2018-07-12 19:53:13 +01001183 if (node->readonly) {
1184 if (vma->vm_flags & VM_WRITE) {
1185 drm_gem_object_put_unlocked(obj);
1186 return -EINVAL;
1187 }
1188
1189 vma->vm_flags &= ~VM_MAYWRITE;
1190 }
1191
Daniel Vetter2225cfe2015-10-15 11:33:43 +02001192 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1193 vma);
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001194
Thierry Redinge6b62712017-02-28 15:46:41 +01001195 drm_gem_object_put_unlocked(obj);
Jesse Barnesa2c0a972008-11-05 10:31:53 -08001196
1197 return ret;
1198}
1199EXPORT_SYMBOL(drm_gem_mmap);
Noralf Trønnes45d58b42017-11-07 20:13:40 +01001200
1201void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1202 const struct drm_gem_object *obj)
1203{
1204 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1205 drm_printf_indent(p, indent, "refcount=%u\n",
1206 kref_read(&obj->refcount));
1207 drm_printf_indent(p, indent, "start=%08lx\n",
1208 drm_vma_node_start(&obj->vma_node));
1209 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1210 drm_printf_indent(p, indent, "imported=%s\n",
1211 obj->import_attach ? "yes" : "no");
1212
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001213 if (obj->funcs && obj->funcs->print_info)
1214 obj->funcs->print_info(p, indent, obj);
1215 else if (obj->dev->driver->gem_print_info)
Noralf Trønnes45d58b42017-11-07 20:13:40 +01001216 obj->dev->driver->gem_print_info(p, indent, obj);
1217}
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001218
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001219int drm_gem_pin(struct drm_gem_object *obj)
1220{
1221 if (obj->funcs && obj->funcs->pin)
1222 return obj->funcs->pin(obj);
1223 else if (obj->dev->driver->gem_prime_pin)
1224 return obj->dev->driver->gem_prime_pin(obj);
1225 else
1226 return 0;
1227}
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001228
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001229void drm_gem_unpin(struct drm_gem_object *obj)
1230{
1231 if (obj->funcs && obj->funcs->unpin)
1232 obj->funcs->unpin(obj);
1233 else if (obj->dev->driver->gem_prime_unpin)
1234 obj->dev->driver->gem_prime_unpin(obj);
1235}
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001236
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001237void *drm_gem_vmap(struct drm_gem_object *obj)
1238{
1239 void *vaddr;
1240
1241 if (obj->funcs && obj->funcs->vmap)
1242 vaddr = obj->funcs->vmap(obj);
1243 else if (obj->dev->driver->gem_prime_vmap)
1244 vaddr = obj->dev->driver->gem_prime_vmap(obj);
1245 else
1246 vaddr = ERR_PTR(-EOPNOTSUPP);
1247
1248 if (!vaddr)
1249 vaddr = ERR_PTR(-ENOMEM);
1250
1251 return vaddr;
1252}
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001253
Noralf Trønnesb39b5392018-11-10 15:56:45 +01001254void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1255{
1256 if (!vaddr)
1257 return;
1258
1259 if (obj->funcs && obj->funcs->vunmap)
1260 obj->funcs->vunmap(obj, vaddr);
1261 else if (obj->dev->driver->gem_prime_vunmap)
1262 obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1263}
Eric Anholt7edc3e32019-03-08 08:17:13 -08001264
1265/**
1266 * drm_gem_lock_reservations - Sets up the ww context and acquires
1267 * the lock on an array of GEM objects.
1268 *
1269 * Once you've locked your reservations, you'll want to set up space
1270 * for your shared fences (if applicable), submit your job, then
1271 * drm_gem_unlock_reservations().
1272 *
1273 * @objs: drm_gem_objects to lock
1274 * @count: Number of objects in @objs
1275 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1276 * part of tracking this set of locked reservations.
1277 */
1278int
1279drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1280 struct ww_acquire_ctx *acquire_ctx)
1281{
1282 int contended = -1;
1283 int i, ret;
1284
1285 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1286
1287retry:
1288 if (contended != -1) {
1289 struct drm_gem_object *obj = objs[contended];
1290
Christian König0dbd5552019-07-31 09:41:50 +02001291 ret = reservation_object_lock_slow_interruptible(obj->resv,
1292 acquire_ctx);
Eric Anholt7edc3e32019-03-08 08:17:13 -08001293 if (ret) {
1294 ww_acquire_done(acquire_ctx);
1295 return ret;
1296 }
1297 }
1298
1299 for (i = 0; i < count; i++) {
1300 if (i == contended)
1301 continue;
1302
Christian König0dbd5552019-07-31 09:41:50 +02001303 ret = reservation_object_lock_interruptible(objs[i]->resv,
1304 acquire_ctx);
Eric Anholt7edc3e32019-03-08 08:17:13 -08001305 if (ret) {
1306 int j;
1307
1308 for (j = 0; j < i; j++)
Christian König0dbd5552019-07-31 09:41:50 +02001309 reservation_object_unlock(objs[j]->resv);
Eric Anholt7edc3e32019-03-08 08:17:13 -08001310
1311 if (contended != -1 && contended >= i)
Christian König0dbd5552019-07-31 09:41:50 +02001312 reservation_object_unlock(objs[contended]->resv);
Eric Anholt7edc3e32019-03-08 08:17:13 -08001313
1314 if (ret == -EDEADLK) {
1315 contended = i;
1316 goto retry;
1317 }
1318
1319 ww_acquire_done(acquire_ctx);
1320 return ret;
1321 }
1322 }
1323
1324 ww_acquire_done(acquire_ctx);
1325
1326 return 0;
1327}
1328EXPORT_SYMBOL(drm_gem_lock_reservations);
1329
1330void
1331drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1332 struct ww_acquire_ctx *acquire_ctx)
1333{
1334 int i;
1335
1336 for (i = 0; i < count; i++)
Christian König0dbd5552019-07-31 09:41:50 +02001337 reservation_object_unlock(objs[i]->resv);
Eric Anholt7edc3e32019-03-08 08:17:13 -08001338
1339 ww_acquire_fini(acquire_ctx);
1340}
1341EXPORT_SYMBOL(drm_gem_unlock_reservations);
Eric Anholt5d5a1792019-04-01 15:26:33 -07001342
1343/**
1344 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1345 * waited on, deduplicating fences from the same context.
1346 *
Sean Paul761e4732019-04-24 16:49:09 -04001347 * @fence_array: array of dma_fence * for the job to block on.
1348 * @fence: the dma_fence to add to the list of dependencies.
Eric Anholt5d5a1792019-04-01 15:26:33 -07001349 *
1350 * Returns:
1351 * 0 on success, or an error on failing to expand the array.
1352 */
1353int drm_gem_fence_array_add(struct xarray *fence_array,
1354 struct dma_fence *fence)
1355{
1356 struct dma_fence *entry;
1357 unsigned long index;
1358 u32 id = 0;
1359 int ret;
1360
1361 if (!fence)
1362 return 0;
1363
1364 /* Deduplicate if we already depend on a fence from the same context.
1365 * This lets the size of the array of deps scale with the number of
1366 * engines involved, rather than the number of BOs.
1367 */
1368 xa_for_each(fence_array, index, entry) {
1369 if (entry->context != fence->context)
1370 continue;
1371
1372 if (dma_fence_is_later(fence, entry)) {
1373 dma_fence_put(entry);
1374 xa_store(fence_array, index, fence, GFP_KERNEL);
1375 } else {
1376 dma_fence_put(fence);
1377 }
1378 return 0;
1379 }
1380
1381 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1382 if (ret != 0)
1383 dma_fence_put(fence);
1384
1385 return ret;
1386}
1387EXPORT_SYMBOL(drm_gem_fence_array_add);
1388
1389/**
1390 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1391 * in the GEM object's reservation object to an array of dma_fences for use in
1392 * scheduling a rendering job.
1393 *
1394 * This should be called after drm_gem_lock_reservations() on your array of
1395 * GEM objects used in the job but before updating the reservations with your
1396 * own fences.
1397 *
Sean Paul761e4732019-04-24 16:49:09 -04001398 * @fence_array: array of dma_fence * for the job to block on.
1399 * @obj: the gem object to add new dependencies from.
1400 * @write: whether the job might write the object (so we need to depend on
Eric Anholt5d5a1792019-04-01 15:26:33 -07001401 * shared fences in the reservation object).
1402 */
1403int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1404 struct drm_gem_object *obj,
1405 bool write)
1406{
1407 int ret;
1408 struct dma_fence **fences;
1409 unsigned int i, fence_count;
1410
1411 if (!write) {
1412 struct dma_fence *fence =
1413 reservation_object_get_excl_rcu(obj->resv);
1414
1415 return drm_gem_fence_array_add(fence_array, fence);
1416 }
1417
1418 ret = reservation_object_get_fences_rcu(obj->resv, NULL,
1419 &fence_count, &fences);
1420 if (ret || !fence_count)
1421 return ret;
1422
1423 for (i = 0; i < fence_count; i++) {
1424 ret = drm_gem_fence_array_add(fence_array, fences[i]);
1425 if (ret)
1426 break;
1427 }
1428
1429 for (; i < fence_count; i++)
1430 dma_fence_put(fences[i]);
1431 kfree(fences);
1432 return ret;
1433}
1434EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);