Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * |
| 26 | */ |
| 27 | |
| 28 | #include <linux/types.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/mm.h> |
| 31 | #include <linux/uaccess.h> |
| 32 | #include <linux/fs.h> |
| 33 | #include <linux/file.h> |
| 34 | #include <linux/module.h> |
| 35 | #include <linux/mman.h> |
| 36 | #include <linux/pagemap.h> |
Hugh Dickins | 5949eac | 2011-06-27 16:18:18 -0700 | [diff] [blame] | 37 | #include <linux/shmem_fs.h> |
Dave Airlie | 3248877 | 2011-11-25 15:21:02 +0000 | [diff] [blame] | 38 | #include <linux/dma-buf.h> |
Thomas Zimmermann | 49a3f51 | 2020-11-03 10:30:11 +0100 | [diff] [blame] | 39 | #include <linux/dma-buf-map.h> |
Tom Lendacky | 95cf926 | 2017-07-17 16:10:26 -0500 | [diff] [blame] | 40 | #include <linux/mem_encrypt.h> |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 41 | #include <linux/pagevec.h> |
Sam Ravnborg | 0500c04 | 2019-05-26 19:35:35 +0200 | [diff] [blame] | 42 | |
Sam Ravnborg | 1c53587 | 2019-07-18 18:15:02 +0200 | [diff] [blame] | 43 | #include <drm/drm.h> |
Sam Ravnborg | 0500c04 | 2019-05-26 19:35:35 +0200 | [diff] [blame] | 44 | #include <drm/drm_device.h> |
| 45 | #include <drm/drm_drv.h> |
| 46 | #include <drm/drm_file.h> |
Daniel Vetter | d9fc941 | 2014-09-23 15:46:53 +0200 | [diff] [blame] | 47 | #include <drm/drm_gem.h> |
Daniel Vetter | 641b910 | 2020-03-23 15:49:22 +0100 | [diff] [blame] | 48 | #include <drm/drm_managed.h> |
Noralf Trønnes | 45d58b4 | 2017-11-07 20:13:40 +0100 | [diff] [blame] | 49 | #include <drm/drm_print.h> |
Sam Ravnborg | 0500c04 | 2019-05-26 19:35:35 +0200 | [diff] [blame] | 50 | #include <drm/drm_vma_manager.h> |
| 51 | |
Daniel Vetter | 67d0ec4 | 2014-09-10 12:43:53 +0200 | [diff] [blame] | 52 | #include "drm_internal.h" |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 53 | |
| 54 | /** @file drm_gem.c |
| 55 | * |
| 56 | * This file provides some of the base ioctls and library routines for |
| 57 | * the graphics memory manager implemented by each device driver. |
| 58 | * |
| 59 | * Because various devices have different requirements in terms of |
| 60 | * synchronization and migration strategies, implementing that is left up to |
| 61 | * the driver, and all that the general API provides should be generic -- |
| 62 | * allocating objects, reading/writing data with the cpu, freeing objects. |
| 63 | * Even there, platform-dependent optimizations for reading/writing data with |
| 64 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
| 65 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
| 66 | * |
| 67 | * The goal was to have swap-backed object allocation managed through |
| 68 | * struct file. However, file descriptors as handles to a struct file have |
| 69 | * two major failings: |
| 70 | * - Process limits prevent more than 1024 or so being used at a time by |
| 71 | * default. |
| 72 | * - Inability to allocate high fds will aggravate the X Server's select() |
| 73 | * handling, and likely that of many GL client applications as well. |
| 74 | * |
| 75 | * This led to a plan of using our own integer IDs (called handles, following |
| 76 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
| 77 | * ioctls. The objects themselves will still include the struct file so |
| 78 | * that we can transition to fds if the required kernel infrastructure shows |
| 79 | * up at a later date, and as our interface with shmfs for memory allocation. |
| 80 | */ |
| 81 | |
Daniel Vetter | 641b910 | 2020-03-23 15:49:22 +0100 | [diff] [blame] | 82 | static void |
| 83 | drm_gem_init_release(struct drm_device *dev, void *ptr) |
| 84 | { |
| 85 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
| 86 | } |
| 87 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 88 | /** |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 89 | * drm_gem_init - Initialize the GEM device fields |
| 90 | * @dev: drm_devic structure to initialize |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 91 | */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 92 | int |
| 93 | drm_gem_init(struct drm_device *dev) |
| 94 | { |
Daniel Vetter | b04a590 | 2013-12-11 14:24:46 +0100 | [diff] [blame] | 95 | struct drm_vma_offset_manager *vma_offset_manager; |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 96 | |
Daniel Vetter | cd4f013 | 2013-08-15 00:02:44 +0200 | [diff] [blame] | 97 | mutex_init(&dev->object_name_lock); |
Chris Wilson | e86584c | 2018-02-12 14:55:33 +0000 | [diff] [blame] | 98 | idr_init_base(&dev->object_name_idr, 1); |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 99 | |
Daniel Vetter | 641b910 | 2020-03-23 15:49:22 +0100 | [diff] [blame] | 100 | vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), |
| 101 | GFP_KERNEL); |
Daniel Vetter | b04a590 | 2013-12-11 14:24:46 +0100 | [diff] [blame] | 102 | if (!vma_offset_manager) { |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 103 | DRM_ERROR("out of memory\n"); |
| 104 | return -ENOMEM; |
| 105 | } |
| 106 | |
Daniel Vetter | b04a590 | 2013-12-11 14:24:46 +0100 | [diff] [blame] | 107 | dev->vma_offset_manager = vma_offset_manager; |
| 108 | drm_vma_offset_manager_init(vma_offset_manager, |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 109 | DRM_FILE_PAGE_OFFSET_START, |
| 110 | DRM_FILE_PAGE_OFFSET_SIZE); |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 111 | |
Daniel Vetter | 641b910 | 2020-03-23 15:49:22 +0100 | [diff] [blame] | 112 | return drmm_add_action(dev, drm_gem_init_release, NULL); |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 113 | } |
| 114 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 115 | /** |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 116 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object |
| 117 | * @dev: drm_device the object should be initialized for |
| 118 | * @obj: drm_gem_object to initialize |
| 119 | * @size: object size |
| 120 | * |
Alan Cox | 62cb7011 | 2011-06-07 14:17:51 +0100 | [diff] [blame] | 121 | * Initialize an already allocated GEM object of the specified size with |
Daniel Vetter | 1d39704 | 2010-04-09 19:05:04 +0000 | [diff] [blame] | 122 | * shmfs backing store. |
| 123 | */ |
| 124 | int drm_gem_object_init(struct drm_device *dev, |
| 125 | struct drm_gem_object *obj, size_t size) |
| 126 | { |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 127 | struct file *filp; |
Daniel Vetter | 1d39704 | 2010-04-09 19:05:04 +0000 | [diff] [blame] | 128 | |
Daniel Vetter | 6ab11a2 | 2014-01-20 08:21:54 +0100 | [diff] [blame] | 129 | drm_gem_private_object_init(dev, obj, size); |
| 130 | |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 131 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
| 132 | if (IS_ERR(filp)) |
| 133 | return PTR_ERR(filp); |
Daniel Vetter | 1d39704 | 2010-04-09 19:05:04 +0000 | [diff] [blame] | 134 | |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 135 | obj->filp = filp; |
Daniel Vetter | 1d39704 | 2010-04-09 19:05:04 +0000 | [diff] [blame] | 136 | |
Daniel Vetter | 1d39704 | 2010-04-09 19:05:04 +0000 | [diff] [blame] | 137 | return 0; |
| 138 | } |
| 139 | EXPORT_SYMBOL(drm_gem_object_init); |
| 140 | |
| 141 | /** |
Laurent Pinchart | 2a5706a | 2014-08-28 14:34:36 +0200 | [diff] [blame] | 142 | * drm_gem_private_object_init - initialize an allocated private GEM object |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 143 | * @dev: drm_device the object should be initialized for |
| 144 | * @obj: drm_gem_object to initialize |
| 145 | * @size: object size |
| 146 | * |
Alan Cox | 62cb7011 | 2011-06-07 14:17:51 +0100 | [diff] [blame] | 147 | * Initialize an already allocated GEM object of the specified size with |
| 148 | * no GEM provided backing store. Instead the caller is responsible for |
| 149 | * backing the object and handling it. |
| 150 | */ |
David Herrmann | 89c8233 | 2013-07-11 11:56:32 +0200 | [diff] [blame] | 151 | void drm_gem_private_object_init(struct drm_device *dev, |
| 152 | struct drm_gem_object *obj, size_t size) |
Alan Cox | 62cb7011 | 2011-06-07 14:17:51 +0100 | [diff] [blame] | 153 | { |
| 154 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
| 155 | |
| 156 | obj->dev = dev; |
| 157 | obj->filp = NULL; |
| 158 | |
| 159 | kref_init(&obj->refcount); |
Daniel Vetter | a8e11d1 | 2013-08-15 00:02:37 +0200 | [diff] [blame] | 160 | obj->handle_count = 0; |
Alan Cox | 62cb7011 | 2011-06-07 14:17:51 +0100 | [diff] [blame] | 161 | obj->size = size; |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 162 | dma_resv_init(&obj->_resv); |
Rob Herring | 1ba6271 | 2019-02-02 09:41:54 -0600 | [diff] [blame] | 163 | if (!obj->resv) |
| 164 | obj->resv = &obj->_resv; |
| 165 | |
David Herrmann | 88d7ebe | 2013-08-25 18:28:57 +0200 | [diff] [blame] | 166 | drm_vma_node_reset(&obj->vma_node); |
Alan Cox | 62cb7011 | 2011-06-07 14:17:51 +0100 | [diff] [blame] | 167 | } |
| 168 | EXPORT_SYMBOL(drm_gem_private_object_init); |
| 169 | |
Dave Airlie | 0ff926c | 2012-05-20 17:31:16 +0100 | [diff] [blame] | 170 | static void |
| 171 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) |
| 172 | { |
Daniel Vetter | 319c933 | 2013-08-15 00:02:46 +0200 | [diff] [blame] | 173 | /* |
| 174 | * Note: obj->dma_buf can't disappear as long as we still hold a |
| 175 | * handle reference in obj->handle_count. |
| 176 | */ |
Daniel Vetter | d0b2c53 | 2013-08-15 00:02:49 +0200 | [diff] [blame] | 177 | mutex_lock(&filp->prime.lock); |
Daniel Vetter | 319c933 | 2013-08-15 00:02:46 +0200 | [diff] [blame] | 178 | if (obj->dma_buf) { |
Daniel Vetter | d0b2c53 | 2013-08-15 00:02:49 +0200 | [diff] [blame] | 179 | drm_prime_remove_buf_handle_locked(&filp->prime, |
| 180 | obj->dma_buf); |
Dave Airlie | 0ff926c | 2012-05-20 17:31:16 +0100 | [diff] [blame] | 181 | } |
Daniel Vetter | d0b2c53 | 2013-08-15 00:02:49 +0200 | [diff] [blame] | 182 | mutex_unlock(&filp->prime.lock); |
Dave Airlie | 0ff926c | 2012-05-20 17:31:16 +0100 | [diff] [blame] | 183 | } |
| 184 | |
Daniel Vetter | 36da590 | 2013-08-15 00:02:34 +0200 | [diff] [blame] | 185 | /** |
Thierry Reding | c6a8432 | 2014-10-02 14:45:55 +0200 | [diff] [blame] | 186 | * drm_gem_object_handle_free - release resources bound to userspace handles |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 187 | * @obj: GEM object to clean up. |
| 188 | * |
Daniel Vetter | 36da590 | 2013-08-15 00:02:34 +0200 | [diff] [blame] | 189 | * Called after the last handle to the object has been closed |
| 190 | * |
| 191 | * Removes any name for the object. Note that this must be |
| 192 | * called before drm_gem_object_free or we'll be touching |
| 193 | * freed memory |
| 194 | */ |
| 195 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
| 196 | { |
| 197 | struct drm_device *dev = obj->dev; |
| 198 | |
| 199 | /* Remove any name for this object */ |
Daniel Vetter | 36da590 | 2013-08-15 00:02:34 +0200 | [diff] [blame] | 200 | if (obj->name) { |
| 201 | idr_remove(&dev->object_name_idr, obj->name); |
| 202 | obj->name = 0; |
Daniel Vetter | a8e11d1 | 2013-08-15 00:02:37 +0200 | [diff] [blame] | 203 | } |
Daniel Vetter | 36da590 | 2013-08-15 00:02:34 +0200 | [diff] [blame] | 204 | } |
| 205 | |
Daniel Vetter | 319c933 | 2013-08-15 00:02:46 +0200 | [diff] [blame] | 206 | static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) |
| 207 | { |
| 208 | /* Unbreak the reference cycle if we have an exported dma_buf. */ |
| 209 | if (obj->dma_buf) { |
| 210 | dma_buf_put(obj->dma_buf); |
| 211 | obj->dma_buf = NULL; |
| 212 | } |
| 213 | } |
| 214 | |
Daniel Vetter | becee2a | 2013-08-15 00:02:39 +0200 | [diff] [blame] | 215 | static void |
Thierry Reding | e6b6271 | 2017-02-28 15:46:41 +0100 | [diff] [blame] | 216 | drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) |
Daniel Vetter | 36da590 | 2013-08-15 00:02:34 +0200 | [diff] [blame] | 217 | { |
Chris Wilson | 98a8883 | 2016-01-04 10:11:00 +0000 | [diff] [blame] | 218 | struct drm_device *dev = obj->dev; |
| 219 | bool final = false; |
| 220 | |
Chris Wilson | 6afe692 | 2020-03-09 12:01:51 +0000 | [diff] [blame] | 221 | if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) |
Daniel Vetter | 36da590 | 2013-08-15 00:02:34 +0200 | [diff] [blame] | 222 | return; |
| 223 | |
| 224 | /* |
| 225 | * Must bump handle count first as this may be the last |
| 226 | * ref, in which case the object would disappear before we |
| 227 | * checked for a name |
| 228 | */ |
| 229 | |
Chris Wilson | 98a8883 | 2016-01-04 10:11:00 +0000 | [diff] [blame] | 230 | mutex_lock(&dev->object_name_lock); |
Daniel Vetter | 319c933 | 2013-08-15 00:02:46 +0200 | [diff] [blame] | 231 | if (--obj->handle_count == 0) { |
Daniel Vetter | 36da590 | 2013-08-15 00:02:34 +0200 | [diff] [blame] | 232 | drm_gem_object_handle_free(obj); |
Daniel Vetter | 319c933 | 2013-08-15 00:02:46 +0200 | [diff] [blame] | 233 | drm_gem_object_exported_dma_buf_free(obj); |
Chris Wilson | 98a8883 | 2016-01-04 10:11:00 +0000 | [diff] [blame] | 234 | final = true; |
Daniel Vetter | 319c933 | 2013-08-15 00:02:46 +0200 | [diff] [blame] | 235 | } |
Chris Wilson | 98a8883 | 2016-01-04 10:11:00 +0000 | [diff] [blame] | 236 | mutex_unlock(&dev->object_name_lock); |
Daniel Vetter | a8e11d1 | 2013-08-15 00:02:37 +0200 | [diff] [blame] | 237 | |
Chris Wilson | 98a8883 | 2016-01-04 10:11:00 +0000 | [diff] [blame] | 238 | if (final) |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 239 | drm_gem_object_put(obj); |
Daniel Vetter | 36da590 | 2013-08-15 00:02:34 +0200 | [diff] [blame] | 240 | } |
| 241 | |
Chris Wilson | 8815b23 | 2016-01-05 09:42:31 +0000 | [diff] [blame] | 242 | /* |
| 243 | * Called at device or object close to release the file's |
| 244 | * handle references on objects. |
| 245 | */ |
| 246 | static int |
| 247 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
| 248 | { |
| 249 | struct drm_file *file_priv = data; |
| 250 | struct drm_gem_object *obj = ptr; |
Chris Wilson | 8815b23 | 2016-01-05 09:42:31 +0000 | [diff] [blame] | 251 | |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 252 | if (obj->funcs->close) |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 253 | obj->funcs->close(obj, file_priv); |
Chris Wilson | d0a133f | 2017-08-19 13:05:58 +0100 | [diff] [blame] | 254 | |
Daniel Vetter | ae75f83 | 2019-06-14 22:35:20 +0200 | [diff] [blame] | 255 | drm_gem_remove_prime_handles(obj, file_priv); |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 256 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
Chris Wilson | 8815b23 | 2016-01-05 09:42:31 +0000 | [diff] [blame] | 257 | |
Thierry Reding | e6b6271 | 2017-02-28 15:46:41 +0100 | [diff] [blame] | 258 | drm_gem_object_handle_put_unlocked(obj); |
Chris Wilson | 8815b23 | 2016-01-05 09:42:31 +0000 | [diff] [blame] | 259 | |
| 260 | return 0; |
| 261 | } |
| 262 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 263 | /** |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 264 | * drm_gem_handle_delete - deletes the given file-private handle |
| 265 | * @filp: drm file-private structure to use for the handle look up |
| 266 | * @handle: userspace handle to delete |
| 267 | * |
Daniel Vetter | df2e090 | 2015-10-22 19:11:29 +0200 | [diff] [blame] | 268 | * Removes the GEM handle from the @filp lookup table which has been added with |
| 269 | * drm_gem_handle_create(). If this is the last handle also cleans up linked |
| 270 | * resources like GEM names. |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 271 | */ |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 272 | int |
Pekka Paalanen | a1a2d1d | 2009-08-23 12:40:55 +0300 | [diff] [blame] | 273 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 274 | { |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 275 | struct drm_gem_object *obj; |
| 276 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 277 | spin_lock(&filp->table_lock); |
| 278 | |
| 279 | /* Check if we currently have a reference on the object */ |
Chris Wilson | f6cd7da | 2016-04-15 12:55:08 +0100 | [diff] [blame] | 280 | obj = idr_replace(&filp->object_idr, NULL, handle); |
| 281 | spin_unlock(&filp->table_lock); |
| 282 | if (IS_ERR_OR_NULL(obj)) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 283 | return -EINVAL; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 284 | |
Chris Wilson | f6cd7da | 2016-04-15 12:55:08 +0100 | [diff] [blame] | 285 | /* Release driver's reference and decrement refcount. */ |
| 286 | drm_gem_object_release_handle(handle, obj, filp); |
| 287 | |
| 288 | /* And finally make the handle available for future allocations. */ |
| 289 | spin_lock(&filp->table_lock); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 290 | idr_remove(&filp->object_idr, handle); |
| 291 | spin_unlock(&filp->table_lock); |
| 292 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 293 | return 0; |
| 294 | } |
Dave Airlie | ff72145b | 2011-02-07 12:16:14 +1000 | [diff] [blame] | 295 | EXPORT_SYMBOL(drm_gem_handle_delete); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 296 | |
| 297 | /** |
Rob Herring | abd4e74 | 2019-08-07 10:52:47 -0400 | [diff] [blame] | 298 | * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object |
Noralf Trønnes | db61152 | 2017-07-23 21:16:17 +0200 | [diff] [blame] | 299 | * @file: drm file-private structure containing the gem object |
| 300 | * @dev: corresponding drm_device |
| 301 | * @handle: gem object handle |
| 302 | * @offset: return location for the fake mmap offset |
| 303 | * |
| 304 | * This implements the &drm_driver.dumb_map_offset kms driver callback for |
| 305 | * drivers which use gem to manage their backing storage. |
| 306 | * |
| 307 | * Returns: |
| 308 | * 0 on success or a negative error code on failure. |
| 309 | */ |
Rob Herring | abd4e74 | 2019-08-07 10:52:47 -0400 | [diff] [blame] | 310 | int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
Noralf Trønnes | db61152 | 2017-07-23 21:16:17 +0200 | [diff] [blame] | 311 | u32 handle, u64 *offset) |
| 312 | { |
| 313 | struct drm_gem_object *obj; |
| 314 | int ret; |
| 315 | |
| 316 | obj = drm_gem_object_lookup(file, handle); |
| 317 | if (!obj) |
| 318 | return -ENOENT; |
| 319 | |
Noralf Trønnes | 90378e5 | 2017-08-17 18:21:30 +0200 | [diff] [blame] | 320 | /* Don't allow imported objects to be mapped */ |
| 321 | if (obj->import_attach) { |
| 322 | ret = -EINVAL; |
| 323 | goto out; |
| 324 | } |
| 325 | |
Noralf Trønnes | db61152 | 2017-07-23 21:16:17 +0200 | [diff] [blame] | 326 | ret = drm_gem_create_mmap_offset(obj); |
| 327 | if (ret) |
| 328 | goto out; |
| 329 | |
| 330 | *offset = drm_vma_node_offset_addr(&obj->vma_node); |
| 331 | out: |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 332 | drm_gem_object_put(obj); |
Noralf Trønnes | db61152 | 2017-07-23 21:16:17 +0200 | [diff] [blame] | 333 | |
| 334 | return ret; |
| 335 | } |
Rob Herring | abd4e74 | 2019-08-07 10:52:47 -0400 | [diff] [blame] | 336 | EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); |
Noralf Trønnes | db61152 | 2017-07-23 21:16:17 +0200 | [diff] [blame] | 337 | |
| 338 | /** |
Daniel Vetter | 43387b3 | 2013-07-16 09:12:04 +0200 | [diff] [blame] | 339 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 340 | * @file: drm file-private structure to remove the dumb handle from |
| 341 | * @dev: corresponding drm_device |
| 342 | * @handle: the dumb handle to remove |
Noralf Trønnes | 1dd3a060 | 2017-10-26 18:57:26 +0200 | [diff] [blame] | 343 | * |
Daniel Vetter | 940eba2 | 2017-01-25 07:26:46 +0100 | [diff] [blame] | 344 | * This implements the &drm_driver.dumb_destroy kms driver callback for drivers |
| 345 | * which use gem to manage their backing storage. |
Daniel Vetter | 43387b3 | 2013-07-16 09:12:04 +0200 | [diff] [blame] | 346 | */ |
| 347 | int drm_gem_dumb_destroy(struct drm_file *file, |
| 348 | struct drm_device *dev, |
| 349 | uint32_t handle) |
| 350 | { |
| 351 | return drm_gem_handle_delete(file, handle); |
| 352 | } |
| 353 | EXPORT_SYMBOL(drm_gem_dumb_destroy); |
| 354 | |
| 355 | /** |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 356 | * drm_gem_handle_create_tail - internal functions to create a handle |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 357 | * @file_priv: drm file-private structure to register the handle for |
| 358 | * @obj: object to register |
Thierry Reding | 8bf8180 | 2014-11-03 13:20:52 +0100 | [diff] [blame] | 359 | * @handlep: pointer to return the created handle to the caller |
Noralf Trønnes | 1dd3a060 | 2017-10-26 18:57:26 +0200 | [diff] [blame] | 360 | * |
Daniel Vetter | 940eba2 | 2017-01-25 07:26:46 +0100 | [diff] [blame] | 361 | * This expects the &drm_device.object_name_lock to be held already and will |
| 362 | * drop it before returning. Used to avoid races in establishing new handles |
| 363 | * when importing an object from either an flink name or a dma-buf. |
Daniel Vetter | df2e090 | 2015-10-22 19:11:29 +0200 | [diff] [blame] | 364 | * |
| 365 | * Handles must be release again through drm_gem_handle_delete(). This is done |
| 366 | * when userspace closes @file_priv for all attached handles, or through the |
| 367 | * GEM_CLOSE ioctl for individual handles. |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 368 | */ |
| 369 | int |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 370 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
| 371 | struct drm_gem_object *obj, |
| 372 | u32 *handlep) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 373 | { |
Ben Skeggs | 304eda3 | 2011-06-09 00:24:59 +0000 | [diff] [blame] | 374 | struct drm_device *dev = obj->dev; |
Chris Wilson | 9649399e | 2016-01-05 09:42:30 +0000 | [diff] [blame] | 375 | u32 handle; |
Ben Skeggs | 304eda3 | 2011-06-09 00:24:59 +0000 | [diff] [blame] | 376 | int ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 377 | |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 378 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
Chris Wilson | 98a8883 | 2016-01-04 10:11:00 +0000 | [diff] [blame] | 379 | if (obj->handle_count++ == 0) |
Thierry Reding | e6b6271 | 2017-02-28 15:46:41 +0100 | [diff] [blame] | 380 | drm_gem_object_get(obj); |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 381 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 382 | /* |
Tejun Heo | 2e92881 | 2013-02-27 17:04:08 -0800 | [diff] [blame] | 383 | * Get the user-visible handle using idr. Preload and perform |
| 384 | * allocation under our spinlock. |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 385 | */ |
Tejun Heo | 2e92881 | 2013-02-27 17:04:08 -0800 | [diff] [blame] | 386 | idr_preload(GFP_KERNEL); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 387 | spin_lock(&file_priv->table_lock); |
Tejun Heo | 2e92881 | 2013-02-27 17:04:08 -0800 | [diff] [blame] | 388 | |
| 389 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
Chris Wilson | 98a8883 | 2016-01-04 10:11:00 +0000 | [diff] [blame] | 390 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 391 | spin_unlock(&file_priv->table_lock); |
Tejun Heo | 2e92881 | 2013-02-27 17:04:08 -0800 | [diff] [blame] | 392 | idr_preload_end(); |
Chris Wilson | 98a8883 | 2016-01-04 10:11:00 +0000 | [diff] [blame] | 393 | |
Daniel Vetter | cd4f013 | 2013-08-15 00:02:44 +0200 | [diff] [blame] | 394 | mutex_unlock(&dev->object_name_lock); |
Chris Wilson | 6984128 | 2016-01-04 10:10:59 +0000 | [diff] [blame] | 395 | if (ret < 0) |
| 396 | goto err_unref; |
| 397 | |
Chris Wilson | 9649399e | 2016-01-05 09:42:30 +0000 | [diff] [blame] | 398 | handle = ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 399 | |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 400 | ret = drm_vma_node_allow(&obj->vma_node, file_priv); |
Chris Wilson | 6984128 | 2016-01-04 10:10:59 +0000 | [diff] [blame] | 401 | if (ret) |
| 402 | goto err_remove; |
Ben Skeggs | 304eda3 | 2011-06-09 00:24:59 +0000 | [diff] [blame] | 403 | |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 404 | if (obj->funcs->open) { |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 405 | ret = obj->funcs->open(obj, file_priv); |
| 406 | if (ret) |
| 407 | goto err_revoke; |
Ben Skeggs | 304eda3 | 2011-06-09 00:24:59 +0000 | [diff] [blame] | 408 | } |
| 409 | |
Chris Wilson | 9649399e | 2016-01-05 09:42:30 +0000 | [diff] [blame] | 410 | *handlep = handle; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 411 | return 0; |
Chris Wilson | 6984128 | 2016-01-04 10:10:59 +0000 | [diff] [blame] | 412 | |
| 413 | err_revoke: |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 414 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
Chris Wilson | 6984128 | 2016-01-04 10:10:59 +0000 | [diff] [blame] | 415 | err_remove: |
| 416 | spin_lock(&file_priv->table_lock); |
Chris Wilson | 9649399e | 2016-01-05 09:42:30 +0000 | [diff] [blame] | 417 | idr_remove(&file_priv->object_idr, handle); |
Chris Wilson | 6984128 | 2016-01-04 10:10:59 +0000 | [diff] [blame] | 418 | spin_unlock(&file_priv->table_lock); |
| 419 | err_unref: |
Thierry Reding | e6b6271 | 2017-02-28 15:46:41 +0100 | [diff] [blame] | 420 | drm_gem_object_handle_put_unlocked(obj); |
Chris Wilson | 6984128 | 2016-01-04 10:10:59 +0000 | [diff] [blame] | 421 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 422 | } |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 423 | |
| 424 | /** |
Thierry Reding | 8bf8180 | 2014-11-03 13:20:52 +0100 | [diff] [blame] | 425 | * drm_gem_handle_create - create a gem handle for an object |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 426 | * @file_priv: drm file-private structure to register the handle for |
| 427 | * @obj: object to register |
Igor Matheus Andrade Torrente | 82c0ef9 | 2020-03-17 18:03:39 -0300 | [diff] [blame] | 428 | * @handlep: pointer to return the created handle to the caller |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 429 | * |
Daniel Vetter | 39031176 | 2018-03-22 09:02:33 +0100 | [diff] [blame] | 430 | * Create a handle for this object. This adds a handle reference to the object, |
| 431 | * which includes a regular reference count. Callers will likely want to |
| 432 | * dereference the object afterwards. |
| 433 | * |
| 434 | * Since this publishes @obj to userspace it must be fully set up by this point, |
| 435 | * drivers must call this last in their buffer object creation callbacks. |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 436 | */ |
Thierry Reding | 8bf8180 | 2014-11-03 13:20:52 +0100 | [diff] [blame] | 437 | int drm_gem_handle_create(struct drm_file *file_priv, |
| 438 | struct drm_gem_object *obj, |
| 439 | u32 *handlep) |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 440 | { |
| 441 | mutex_lock(&obj->dev->object_name_lock); |
| 442 | |
| 443 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
| 444 | } |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 445 | EXPORT_SYMBOL(drm_gem_handle_create); |
| 446 | |
Rob Clark | 75ef8b3 | 2011-08-10 08:09:07 -0500 | [diff] [blame] | 447 | |
| 448 | /** |
| 449 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
| 450 | * @obj: obj in question |
| 451 | * |
| 452 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
Daniel Vetter | f74418a | 2016-03-30 11:40:52 +0200 | [diff] [blame] | 453 | * |
| 454 | * Note that drm_gem_object_release() already calls this function, so drivers |
| 455 | * don't have to take care of releasing the mmap offset themselves when freeing |
| 456 | * the GEM object. |
Rob Clark | 75ef8b3 | 2011-08-10 08:09:07 -0500 | [diff] [blame] | 457 | */ |
| 458 | void |
| 459 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
| 460 | { |
| 461 | struct drm_device *dev = obj->dev; |
Rob Clark | 75ef8b3 | 2011-08-10 08:09:07 -0500 | [diff] [blame] | 462 | |
Daniel Vetter | b04a590 | 2013-12-11 14:24:46 +0100 | [diff] [blame] | 463 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
Rob Clark | 75ef8b3 | 2011-08-10 08:09:07 -0500 | [diff] [blame] | 464 | } |
| 465 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
| 466 | |
| 467 | /** |
Rob Clark | 367bbd4 | 2013-08-07 13:41:23 -0400 | [diff] [blame] | 468 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
| 469 | * @obj: obj in question |
| 470 | * @size: the virtual size |
| 471 | * |
| 472 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
| 473 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
| 474 | * up the object based on the offset and sets up the various memory mapping |
| 475 | * structures. |
| 476 | * |
| 477 | * This routine allocates and attaches a fake offset for @obj, in cases where |
Daniel Vetter | 940eba2 | 2017-01-25 07:26:46 +0100 | [diff] [blame] | 478 | * the virtual size differs from the physical size (ie. &drm_gem_object.size). |
| 479 | * Otherwise just use drm_gem_create_mmap_offset(). |
Daniel Vetter | f74418a | 2016-03-30 11:40:52 +0200 | [diff] [blame] | 480 | * |
| 481 | * This function is idempotent and handles an already allocated mmap offset |
| 482 | * transparently. Drivers do not need to check for this case. |
Rob Clark | 367bbd4 | 2013-08-07 13:41:23 -0400 | [diff] [blame] | 483 | */ |
| 484 | int |
| 485 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
| 486 | { |
| 487 | struct drm_device *dev = obj->dev; |
Rob Clark | 367bbd4 | 2013-08-07 13:41:23 -0400 | [diff] [blame] | 488 | |
Daniel Vetter | b04a590 | 2013-12-11 14:24:46 +0100 | [diff] [blame] | 489 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
Rob Clark | 367bbd4 | 2013-08-07 13:41:23 -0400 | [diff] [blame] | 490 | size / PAGE_SIZE); |
| 491 | } |
| 492 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
| 493 | |
| 494 | /** |
Rob Clark | 75ef8b3 | 2011-08-10 08:09:07 -0500 | [diff] [blame] | 495 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
| 496 | * @obj: obj in question |
| 497 | * |
| 498 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
| 499 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
| 500 | * up the object based on the offset and sets up the various memory mapping |
| 501 | * structures. |
| 502 | * |
| 503 | * This routine allocates and attaches a fake offset for @obj. |
Daniel Vetter | f74418a | 2016-03-30 11:40:52 +0200 | [diff] [blame] | 504 | * |
| 505 | * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release |
| 506 | * the fake offset again. |
Rob Clark | 75ef8b3 | 2011-08-10 08:09:07 -0500 | [diff] [blame] | 507 | */ |
Rob Clark | 367bbd4 | 2013-08-07 13:41:23 -0400 | [diff] [blame] | 508 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
Rob Clark | 75ef8b3 | 2011-08-10 08:09:07 -0500 | [diff] [blame] | 509 | { |
Rob Clark | 367bbd4 | 2013-08-07 13:41:23 -0400 | [diff] [blame] | 510 | return drm_gem_create_mmap_offset_size(obj, obj->size); |
Rob Clark | 75ef8b3 | 2011-08-10 08:09:07 -0500 | [diff] [blame] | 511 | } |
| 512 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
| 513 | |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 514 | /* |
| 515 | * Move pages to appropriate lru and release the pagevec, decrementing the |
| 516 | * ref count of those pages. |
| 517 | */ |
| 518 | static void drm_gem_check_release_pagevec(struct pagevec *pvec) |
| 519 | { |
| 520 | check_move_unevictable_pages(pvec); |
| 521 | __pagevec_release(pvec); |
| 522 | cond_resched(); |
| 523 | } |
| 524 | |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 525 | /** |
| 526 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
| 527 | * from shmem |
| 528 | * @obj: obj in question |
David Herrmann | 0cdbe8a | 2014-05-25 12:59:47 +0200 | [diff] [blame] | 529 | * |
| 530 | * This reads the page-array of the shmem-backing storage of the given gem |
| 531 | * object. An array of pages is returned. If a page is not allocated or |
| 532 | * swapped-out, this will allocate/swap-in the required pages. Note that the |
| 533 | * whole object is covered by the page-array and pinned in memory. |
| 534 | * |
| 535 | * Use drm_gem_put_pages() to release the array and unpin all pages. |
| 536 | * |
| 537 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). |
| 538 | * If you require other GFP-masks, you have to do those allocations yourself. |
| 539 | * |
| 540 | * Note that you are not allowed to change gfp-zones during runtime. That is, |
| 541 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as |
| 542 | * set during initialization. If you have special zone constraints, set them |
Jordan Crouse | 5b9fbff | 2017-10-03 09:38:10 -0600 | [diff] [blame] | 543 | * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care |
David Herrmann | 0cdbe8a | 2014-05-25 12:59:47 +0200 | [diff] [blame] | 544 | * to keep pages in the required zone during swap-in. |
Daniel Vetter | e0b3d21 | 2020-05-11 11:35:47 +0200 | [diff] [blame] | 545 | * |
| 546 | * This function is only valid on objects initialized with |
| 547 | * drm_gem_object_init(), but not for those initialized with |
| 548 | * drm_gem_private_object_init() only. |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 549 | */ |
David Herrmann | 0cdbe8a | 2014-05-25 12:59:47 +0200 | [diff] [blame] | 550 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 551 | { |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 552 | struct address_space *mapping; |
| 553 | struct page *p, **pages; |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 554 | struct pagevec pvec; |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 555 | int i, npages; |
| 556 | |
Daniel Vetter | e0b3d21 | 2020-05-11 11:35:47 +0200 | [diff] [blame] | 557 | |
| 558 | if (WARN_ON(!obj->filp)) |
| 559 | return ERR_PTR(-EINVAL); |
| 560 | |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 561 | /* This is the shared memory object that backs the GEM resource */ |
Al Viro | 93c76a3 | 2015-12-04 23:45:44 -0500 | [diff] [blame] | 562 | mapping = obj->filp->f_mapping; |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 563 | |
| 564 | /* We already BUG_ON() for non-page-aligned sizes in |
| 565 | * drm_gem_object_init(), so we should never hit this unless |
| 566 | * driver author is doing something really wrong: |
| 567 | */ |
| 568 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
| 569 | |
| 570 | npages = obj->size >> PAGE_SHIFT; |
| 571 | |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 572 | pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 573 | if (pages == NULL) |
| 574 | return ERR_PTR(-ENOMEM); |
| 575 | |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 576 | mapping_set_unevictable(mapping); |
| 577 | |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 578 | for (i = 0; i < npages; i++) { |
David Herrmann | 0cdbe8a | 2014-05-25 12:59:47 +0200 | [diff] [blame] | 579 | p = shmem_read_mapping_page(mapping, i); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 580 | if (IS_ERR(p)) |
| 581 | goto fail; |
| 582 | pages[i] = p; |
| 583 | |
David Herrmann | 2123000 | 2014-05-25 14:34:08 +0200 | [diff] [blame] | 584 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
| 585 | * correct region during swapin. Note that this requires |
| 586 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) |
| 587 | * so shmem can relocate pages during swapin if required. |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 588 | */ |
Michal Hocko | c62d255 | 2015-11-06 16:28:49 -0800 | [diff] [blame] | 589 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 590 | (page_to_pfn(p) >= 0x00100000UL)); |
| 591 | } |
| 592 | |
| 593 | return pages; |
| 594 | |
| 595 | fail: |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 596 | mapping_clear_unevictable(mapping); |
| 597 | pagevec_init(&pvec); |
| 598 | while (i--) { |
| 599 | if (!pagevec_add(&pvec, pages[i])) |
| 600 | drm_gem_check_release_pagevec(&pvec); |
| 601 | } |
| 602 | if (pagevec_count(&pvec)) |
| 603 | drm_gem_check_release_pagevec(&pvec); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 604 | |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 605 | kvfree(pages); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 606 | return ERR_CAST(p); |
| 607 | } |
| 608 | EXPORT_SYMBOL(drm_gem_get_pages); |
| 609 | |
| 610 | /** |
| 611 | * drm_gem_put_pages - helper to free backing pages for a GEM object |
| 612 | * @obj: obj in question |
| 613 | * @pages: pages to free |
| 614 | * @dirty: if true, pages will be marked as dirty |
| 615 | * @accessed: if true, the pages will be marked as accessed |
| 616 | */ |
| 617 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
| 618 | bool dirty, bool accessed) |
| 619 | { |
| 620 | int i, npages; |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 621 | struct address_space *mapping; |
| 622 | struct pagevec pvec; |
| 623 | |
| 624 | mapping = file_inode(obj->filp)->i_mapping; |
| 625 | mapping_clear_unevictable(mapping); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 626 | |
| 627 | /* We already BUG_ON() for non-page-aligned sizes in |
| 628 | * drm_gem_object_init(), so we should never hit this unless |
| 629 | * driver author is doing something really wrong: |
| 630 | */ |
| 631 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
| 632 | |
| 633 | npages = obj->size >> PAGE_SHIFT; |
| 634 | |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 635 | pagevec_init(&pvec); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 636 | for (i = 0; i < npages; i++) { |
Rob Herring | 930a402 | 2019-07-19 08:28:51 -0600 | [diff] [blame] | 637 | if (!pages[i]) |
| 638 | continue; |
| 639 | |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 640 | if (dirty) |
| 641 | set_page_dirty(pages[i]); |
| 642 | |
| 643 | if (accessed) |
| 644 | mark_page_accessed(pages[i]); |
| 645 | |
| 646 | /* Undo the reference we took when populating the table */ |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 647 | if (!pagevec_add(&pvec, pages[i])) |
| 648 | drm_gem_check_release_pagevec(&pvec); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 649 | } |
Kuo-Hsin Yang | fb4b492 | 2019-01-08 15:45:17 +0800 | [diff] [blame] | 650 | if (pagevec_count(&pvec)) |
| 651 | drm_gem_check_release_pagevec(&pvec); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 652 | |
Michal Hocko | 2098105 | 2017-05-17 14:23:12 +0200 | [diff] [blame] | 653 | kvfree(pages); |
Rob Clark | bcc5c9d | 2013-08-07 13:41:24 -0400 | [diff] [blame] | 654 | } |
| 655 | EXPORT_SYMBOL(drm_gem_put_pages); |
| 656 | |
Rob Herring | c117aa4 | 2019-03-08 14:26:02 -0600 | [diff] [blame] | 657 | static int objects_lookup(struct drm_file *filp, u32 *handle, int count, |
| 658 | struct drm_gem_object **objs) |
| 659 | { |
| 660 | int i, ret = 0; |
| 661 | struct drm_gem_object *obj; |
| 662 | |
| 663 | spin_lock(&filp->table_lock); |
| 664 | |
| 665 | for (i = 0; i < count; i++) { |
| 666 | /* Check if we currently have a reference on the object */ |
| 667 | obj = idr_find(&filp->object_idr, handle[i]); |
| 668 | if (!obj) { |
| 669 | ret = -ENOENT; |
| 670 | break; |
| 671 | } |
| 672 | drm_gem_object_get(obj); |
| 673 | objs[i] = obj; |
| 674 | } |
| 675 | spin_unlock(&filp->table_lock); |
| 676 | |
| 677 | return ret; |
| 678 | } |
| 679 | |
| 680 | /** |
| 681 | * drm_gem_objects_lookup - look up GEM objects from an array of handles |
| 682 | * @filp: DRM file private date |
| 683 | * @bo_handles: user pointer to array of userspace handle |
| 684 | * @count: size of handle array |
| 685 | * @objs_out: returned pointer to array of drm_gem_object pointers |
| 686 | * |
| 687 | * Takes an array of userspace handles and returns a newly allocated array of |
| 688 | * GEM objects. |
| 689 | * |
| 690 | * For a single handle lookup, use drm_gem_object_lookup(). |
| 691 | * |
| 692 | * Returns: |
| 693 | * |
| 694 | * @objs filled in with GEM object pointers. Returned GEM objects need to be |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 695 | * released with drm_gem_object_put(). -ENOENT is returned on a lookup |
Rob Herring | c117aa4 | 2019-03-08 14:26:02 -0600 | [diff] [blame] | 696 | * failure. 0 is returned on success. |
| 697 | * |
| 698 | */ |
| 699 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, |
| 700 | int count, struct drm_gem_object ***objs_out) |
| 701 | { |
| 702 | int ret; |
| 703 | u32 *handles; |
| 704 | struct drm_gem_object **objs; |
| 705 | |
| 706 | if (!count) |
| 707 | return 0; |
| 708 | |
| 709 | objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), |
| 710 | GFP_KERNEL | __GFP_ZERO); |
| 711 | if (!objs) |
| 712 | return -ENOMEM; |
| 713 | |
Dan Carpenter | ec0bb48 | 2020-03-20 16:23:34 +0300 | [diff] [blame] | 714 | *objs_out = objs; |
| 715 | |
Rob Herring | c117aa4 | 2019-03-08 14:26:02 -0600 | [diff] [blame] | 716 | handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); |
| 717 | if (!handles) { |
| 718 | ret = -ENOMEM; |
| 719 | goto out; |
| 720 | } |
| 721 | |
| 722 | if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { |
| 723 | ret = -EFAULT; |
| 724 | DRM_DEBUG("Failed to copy in GEM handles\n"); |
| 725 | goto out; |
| 726 | } |
| 727 | |
| 728 | ret = objects_lookup(filp, handles, count, objs); |
Rob Herring | c117aa4 | 2019-03-08 14:26:02 -0600 | [diff] [blame] | 729 | out: |
| 730 | kvfree(handles); |
| 731 | return ret; |
| 732 | |
| 733 | } |
| 734 | EXPORT_SYMBOL(drm_gem_objects_lookup); |
| 735 | |
Daniel Vetter | df2e090 | 2015-10-22 19:11:29 +0200 | [diff] [blame] | 736 | /** |
Matt Roper | 1e55a53 | 2019-02-01 17:23:26 -0800 | [diff] [blame] | 737 | * drm_gem_object_lookup - look up a GEM object from its handle |
Daniel Vetter | df2e090 | 2015-10-22 19:11:29 +0200 | [diff] [blame] | 738 | * @filp: DRM file private date |
| 739 | * @handle: userspace handle |
| 740 | * |
| 741 | * Returns: |
| 742 | * |
| 743 | * A reference to the object named by the handle if such exists on @filp, NULL |
| 744 | * otherwise. |
Rob Herring | c117aa4 | 2019-03-08 14:26:02 -0600 | [diff] [blame] | 745 | * |
| 746 | * If looking up an array of handles, use drm_gem_objects_lookup(). |
Daniel Vetter | df2e090 | 2015-10-22 19:11:29 +0200 | [diff] [blame] | 747 | */ |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 748 | struct drm_gem_object * |
Chris Wilson | a8ad0bd | 2016-05-09 11:04:54 +0100 | [diff] [blame] | 749 | drm_gem_object_lookup(struct drm_file *filp, u32 handle) |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 750 | { |
Rob Herring | c117aa4 | 2019-03-08 14:26:02 -0600 | [diff] [blame] | 751 | struct drm_gem_object *obj = NULL; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 752 | |
Rob Herring | c117aa4 | 2019-03-08 14:26:02 -0600 | [diff] [blame] | 753 | objects_lookup(filp, &handle, 1, &obj); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 754 | return obj; |
| 755 | } |
| 756 | EXPORT_SYMBOL(drm_gem_object_lookup); |
| 757 | |
| 758 | /** |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 759 | * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects |
Rob Herring | 1ba6271 | 2019-02-02 09:41:54 -0600 | [diff] [blame] | 760 | * shared and/or exclusive fences. |
| 761 | * @filep: DRM file private date |
| 762 | * @handle: userspace handle |
| 763 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence |
| 764 | * @timeout: timeout value in jiffies or zero to return immediately |
| 765 | * |
| 766 | * Returns: |
| 767 | * |
| 768 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
| 769 | * greater than 0 on success. |
| 770 | */ |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 771 | long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
Rob Herring | 1ba6271 | 2019-02-02 09:41:54 -0600 | [diff] [blame] | 772 | bool wait_all, unsigned long timeout) |
| 773 | { |
| 774 | long ret; |
| 775 | struct drm_gem_object *obj; |
| 776 | |
| 777 | obj = drm_gem_object_lookup(filep, handle); |
| 778 | if (!obj) { |
| 779 | DRM_DEBUG("Failed to look up GEM BO %d\n", handle); |
| 780 | return -EINVAL; |
| 781 | } |
| 782 | |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 783 | ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, |
Rob Herring | 1ba6271 | 2019-02-02 09:41:54 -0600 | [diff] [blame] | 784 | true, timeout); |
| 785 | if (ret == 0) |
| 786 | ret = -ETIME; |
| 787 | else if (ret > 0) |
| 788 | ret = 0; |
| 789 | |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 790 | drm_gem_object_put(obj); |
Rob Herring | 1ba6271 | 2019-02-02 09:41:54 -0600 | [diff] [blame] | 791 | |
| 792 | return ret; |
| 793 | } |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 794 | EXPORT_SYMBOL(drm_gem_dma_resv_wait); |
Rob Herring | 1ba6271 | 2019-02-02 09:41:54 -0600 | [diff] [blame] | 795 | |
| 796 | /** |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 797 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
| 798 | * @dev: drm_device |
| 799 | * @data: ioctl data |
| 800 | * @file_priv: drm file-private structure |
| 801 | * |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 802 | * Releases the handle to an mm object. |
| 803 | */ |
| 804 | int |
| 805 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
| 806 | struct drm_file *file_priv) |
| 807 | { |
| 808 | struct drm_gem_close *args = data; |
| 809 | int ret; |
| 810 | |
Andrzej Hajda | 1bcecfa | 2014-09-30 16:49:56 +0200 | [diff] [blame] | 811 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
Chris Wilson | 69fdf42 | 2018-09-13 20:20:50 +0100 | [diff] [blame] | 812 | return -EOPNOTSUPP; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 813 | |
| 814 | ret = drm_gem_handle_delete(file_priv, args->handle); |
| 815 | |
| 816 | return ret; |
| 817 | } |
| 818 | |
| 819 | /** |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 820 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
| 821 | * @dev: drm_device |
| 822 | * @data: ioctl data |
| 823 | * @file_priv: drm file-private structure |
| 824 | * |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 825 | * Create a global name for an object, returning the name. |
| 826 | * |
| 827 | * Note that the name does not hold a reference; when the object |
| 828 | * is freed, the name goes away. |
| 829 | */ |
| 830 | int |
| 831 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
| 832 | struct drm_file *file_priv) |
| 833 | { |
| 834 | struct drm_gem_flink *args = data; |
| 835 | struct drm_gem_object *obj; |
| 836 | int ret; |
| 837 | |
Andrzej Hajda | 1bcecfa | 2014-09-30 16:49:56 +0200 | [diff] [blame] | 838 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
Chris Wilson | 69fdf42 | 2018-09-13 20:20:50 +0100 | [diff] [blame] | 839 | return -EOPNOTSUPP; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 840 | |
Chris Wilson | a8ad0bd | 2016-05-09 11:04:54 +0100 | [diff] [blame] | 841 | obj = drm_gem_object_lookup(file_priv, args->handle); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 842 | if (obj == NULL) |
Chris Wilson | bf79cb9 | 2010-08-04 14:19:46 +0100 | [diff] [blame] | 843 | return -ENOENT; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 844 | |
Daniel Vetter | cd4f013 | 2013-08-15 00:02:44 +0200 | [diff] [blame] | 845 | mutex_lock(&dev->object_name_lock); |
Daniel Vetter | a8e11d1 | 2013-08-15 00:02:37 +0200 | [diff] [blame] | 846 | /* prevent races with concurrent gem_close. */ |
| 847 | if (obj->handle_count == 0) { |
| 848 | ret = -ENOENT; |
| 849 | goto err; |
| 850 | } |
| 851 | |
Chris Wilson | 8d59bae | 2009-02-11 14:26:28 +0000 | [diff] [blame] | 852 | if (!obj->name) { |
Chris Wilson | 0f64642 | 2016-01-04 10:11:01 +0000 | [diff] [blame] | 853 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); |
Tejun Heo | 2e92881 | 2013-02-27 17:04:08 -0800 | [diff] [blame] | 854 | if (ret < 0) |
Chris Wilson | 8d59bae | 2009-02-11 14:26:28 +0000 | [diff] [blame] | 855 | goto err; |
YoungJun Cho | 2e07fb2 | 2013-06-27 08:58:33 +0900 | [diff] [blame] | 856 | |
| 857 | obj->name = ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 858 | } |
Chris Wilson | 3e49c4f | 2009-02-09 11:31:41 +0000 | [diff] [blame] | 859 | |
YoungJun Cho | 2e07fb2 | 2013-06-27 08:58:33 +0900 | [diff] [blame] | 860 | args->name = (uint64_t) obj->name; |
| 861 | ret = 0; |
| 862 | |
Chris Wilson | 3e49c4f | 2009-02-09 11:31:41 +0000 | [diff] [blame] | 863 | err: |
Daniel Vetter | cd4f013 | 2013-08-15 00:02:44 +0200 | [diff] [blame] | 864 | mutex_unlock(&dev->object_name_lock); |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 865 | drm_gem_object_put(obj); |
Chris Wilson | 3e49c4f | 2009-02-09 11:31:41 +0000 | [diff] [blame] | 866 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 867 | } |
| 868 | |
| 869 | /** |
Mauro Carvalho Chehab | e9d2871 | 2020-11-16 11:18:01 +0100 | [diff] [blame] | 870 | * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 871 | * @dev: drm_device |
| 872 | * @data: ioctl data |
| 873 | * @file_priv: drm file-private structure |
| 874 | * |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 875 | * Open an object using the global name, returning a handle and the size. |
Steve Cohen | a9e10b1 | 2020-07-29 01:35:52 -0400 | [diff] [blame] | 876 | * |
| 877 | * This handle (of course) holds a reference to the object, so the object |
| 878 | * will not go away until the handle is deleted. |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 879 | */ |
| 880 | int |
| 881 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
| 882 | struct drm_file *file_priv) |
| 883 | { |
| 884 | struct drm_gem_open *args = data; |
| 885 | struct drm_gem_object *obj; |
| 886 | int ret; |
Pekka Paalanen | a1a2d1d | 2009-08-23 12:40:55 +0300 | [diff] [blame] | 887 | u32 handle; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 888 | |
Andrzej Hajda | 1bcecfa | 2014-09-30 16:49:56 +0200 | [diff] [blame] | 889 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
Chris Wilson | 69fdf42 | 2018-09-13 20:20:50 +0100 | [diff] [blame] | 890 | return -EOPNOTSUPP; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 891 | |
Daniel Vetter | cd4f013 | 2013-08-15 00:02:44 +0200 | [diff] [blame] | 892 | mutex_lock(&dev->object_name_lock); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 893 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 894 | if (obj) { |
Thierry Reding | e6b6271 | 2017-02-28 15:46:41 +0100 | [diff] [blame] | 895 | drm_gem_object_get(obj); |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 896 | } else { |
| 897 | mutex_unlock(&dev->object_name_lock); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 898 | return -ENOENT; |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 899 | } |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 900 | |
Daniel Vetter | 20228c4 | 2013-08-15 00:02:45 +0200 | [diff] [blame] | 901 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
| 902 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 903 | if (ret) |
Steve Cohen | 8490d6a | 2020-07-20 18:30:50 -0400 | [diff] [blame] | 904 | goto err; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 905 | |
| 906 | args->handle = handle; |
| 907 | args->size = obj->size; |
| 908 | |
Steve Cohen | 8490d6a | 2020-07-20 18:30:50 -0400 | [diff] [blame] | 909 | err: |
Dave Airlie | c44264f | 2020-08-11 11:58:31 +1000 | [diff] [blame] | 910 | drm_gem_object_put(obj); |
Steve Cohen | 8490d6a | 2020-07-20 18:30:50 -0400 | [diff] [blame] | 911 | return ret; |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 912 | } |
| 913 | |
| 914 | /** |
Mauro Carvalho Chehab | e9d2871 | 2020-11-16 11:18:01 +0100 | [diff] [blame] | 915 | * drm_gem_open - initalizes GEM file-private structures at devnode open time |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 916 | * @dev: drm_device which is being opened by userspace |
| 917 | * @file_private: drm file-private structure to set up |
| 918 | * |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 919 | * Called at device open time, sets up the structure for handling refcounting |
| 920 | * of mm objects. |
| 921 | */ |
| 922 | void |
| 923 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
| 924 | { |
Chris Wilson | e86584c | 2018-02-12 14:55:33 +0000 | [diff] [blame] | 925 | idr_init_base(&file_private->object_idr, 1); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 926 | spin_lock_init(&file_private->table_lock); |
| 927 | } |
| 928 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 929 | /** |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 930 | * drm_gem_release - release file-private GEM resources |
| 931 | * @dev: drm_device which is being closed by userspace |
| 932 | * @file_private: drm file-private structure to clean up |
| 933 | * |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 934 | * Called at close time when the filp is going away. |
| 935 | * |
| 936 | * Releases any remaining references on objects by this filp. |
| 937 | */ |
| 938 | void |
| 939 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
| 940 | { |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 941 | idr_for_each(&file_private->object_idr, |
Ben Skeggs | 304eda3 | 2011-06-09 00:24:59 +0000 | [diff] [blame] | 942 | &drm_gem_object_release_handle, file_private); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 943 | idr_destroy(&file_private->object_idr); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 944 | } |
| 945 | |
Daniel Vetter | f74418a | 2016-03-30 11:40:52 +0200 | [diff] [blame] | 946 | /** |
| 947 | * drm_gem_object_release - release GEM buffer object resources |
| 948 | * @obj: GEM buffer object |
| 949 | * |
| 950 | * This releases any structures and resources used by @obj and is the invers of |
| 951 | * drm_gem_object_init(). |
| 952 | */ |
Daniel Vetter | fd632aa | 2010-04-09 19:05:05 +0000 | [diff] [blame] | 953 | void |
| 954 | drm_gem_object_release(struct drm_gem_object *obj) |
Luca Barbieri | c3ae90c | 2010-02-09 05:49:11 +0000 | [diff] [blame] | 955 | { |
Daniel Vetter | 319c933 | 2013-08-15 00:02:46 +0200 | [diff] [blame] | 956 | WARN_ON(obj->dma_buf); |
| 957 | |
Alan Cox | 62cb7011 | 2011-06-07 14:17:51 +0100 | [diff] [blame] | 958 | if (obj->filp) |
David Herrmann | 16d2831 | 2014-01-20 20:07:49 +0100 | [diff] [blame] | 959 | fput(obj->filp); |
David Herrmann | 7747234 | 2014-01-20 20:05:43 +0100 | [diff] [blame] | 960 | |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 961 | dma_resv_fini(&obj->_resv); |
David Herrmann | 7747234 | 2014-01-20 20:05:43 +0100 | [diff] [blame] | 962 | drm_gem_free_mmap_offset(obj); |
Luca Barbieri | c3ae90c | 2010-02-09 05:49:11 +0000 | [diff] [blame] | 963 | } |
Daniel Vetter | fd632aa | 2010-04-09 19:05:05 +0000 | [diff] [blame] | 964 | EXPORT_SYMBOL(drm_gem_object_release); |
Luca Barbieri | c3ae90c | 2010-02-09 05:49:11 +0000 | [diff] [blame] | 965 | |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 966 | /** |
Daniel Vetter | 89d61fc | 2014-01-21 12:39:00 +0100 | [diff] [blame] | 967 | * drm_gem_object_free - free a GEM object |
| 968 | * @kref: kref of the object to free |
| 969 | * |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 970 | * Called after the last reference to the object has been lost. |
| 971 | * |
| 972 | * Frees the object |
| 973 | */ |
| 974 | void |
| 975 | drm_gem_object_free(struct kref *kref) |
| 976 | { |
Daniel Vetter | 6ff774b | 2015-10-15 09:36:26 +0200 | [diff] [blame] | 977 | struct drm_gem_object *obj = |
| 978 | container_of(kref, struct drm_gem_object, refcount); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 979 | |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 980 | if (WARN_ON(!obj->funcs->free)) |
| 981 | return; |
| 982 | |
| 983 | obj->funcs->free(obj); |
Eric Anholt | 673a394 | 2008-07-30 12:06:12 -0700 | [diff] [blame] | 984 | } |
| 985 | EXPORT_SYMBOL(drm_gem_object_free); |
| 986 | |
Daniel Vetter | df2e090 | 2015-10-22 19:11:29 +0200 | [diff] [blame] | 987 | /** |
Emil Velikov | eecd7fd | 2020-05-15 10:50:51 +0100 | [diff] [blame] | 988 | * drm_gem_object_put_locked - release a GEM buffer object reference |
Daniel Vetter | 9f0ba53 | 2016-05-02 10:40:51 +0200 | [diff] [blame] | 989 | * @obj: GEM buffer object |
| 990 | * |
Daniel Vetter | 940eba2 | 2017-01-25 07:26:46 +0100 | [diff] [blame] | 991 | * This releases a reference to @obj. Callers must hold the |
| 992 | * &drm_device.struct_mutex lock when calling this function, even when the |
| 993 | * driver doesn't use &drm_device.struct_mutex for anything. |
Daniel Vetter | 9f0ba53 | 2016-05-02 10:40:51 +0200 | [diff] [blame] | 994 | * |
| 995 | * For drivers not encumbered with legacy locking use |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 996 | * drm_gem_object_put() instead. |
Daniel Vetter | 9f0ba53 | 2016-05-02 10:40:51 +0200 | [diff] [blame] | 997 | */ |
| 998 | void |
Emil Velikov | eecd7fd | 2020-05-15 10:50:51 +0100 | [diff] [blame] | 999 | drm_gem_object_put_locked(struct drm_gem_object *obj) |
Daniel Vetter | 9f0ba53 | 2016-05-02 10:40:51 +0200 | [diff] [blame] | 1000 | { |
| 1001 | if (obj) { |
| 1002 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
| 1003 | |
| 1004 | kref_put(&obj->refcount, drm_gem_object_free); |
| 1005 | } |
| 1006 | } |
Emil Velikov | eecd7fd | 2020-05-15 10:50:51 +0100 | [diff] [blame] | 1007 | EXPORT_SYMBOL(drm_gem_object_put_locked); |
Daniel Vetter | 9f0ba53 | 2016-05-02 10:40:51 +0200 | [diff] [blame] | 1008 | |
| 1009 | /** |
Daniel Vetter | df2e090 | 2015-10-22 19:11:29 +0200 | [diff] [blame] | 1010 | * drm_gem_vm_open - vma->ops->open implementation for GEM |
| 1011 | * @vma: VM area structure |
| 1012 | * |
| 1013 | * This function implements the #vm_operations_struct open() callback for GEM |
| 1014 | * drivers. This must be used together with drm_gem_vm_close(). |
| 1015 | */ |
Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1016 | void drm_gem_vm_open(struct vm_area_struct *vma) |
| 1017 | { |
| 1018 | struct drm_gem_object *obj = vma->vm_private_data; |
| 1019 | |
Thierry Reding | e6b6271 | 2017-02-28 15:46:41 +0100 | [diff] [blame] | 1020 | drm_gem_object_get(obj); |
Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1021 | } |
| 1022 | EXPORT_SYMBOL(drm_gem_vm_open); |
| 1023 | |
Daniel Vetter | df2e090 | 2015-10-22 19:11:29 +0200 | [diff] [blame] | 1024 | /** |
| 1025 | * drm_gem_vm_close - vma->ops->close implementation for GEM |
| 1026 | * @vma: VM area structure |
| 1027 | * |
| 1028 | * This function implements the #vm_operations_struct close() callback for GEM |
| 1029 | * drivers. This must be used together with drm_gem_vm_open(). |
| 1030 | */ |
Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1031 | void drm_gem_vm_close(struct vm_area_struct *vma) |
| 1032 | { |
| 1033 | struct drm_gem_object *obj = vma->vm_private_data; |
Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1034 | |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 1035 | drm_gem_object_put(obj); |
Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1036 | } |
| 1037 | EXPORT_SYMBOL(drm_gem_vm_close); |
| 1038 | |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1039 | /** |
| 1040 | * drm_gem_mmap_obj - memory map a GEM object |
| 1041 | * @obj: the GEM object to map |
| 1042 | * @obj_size: the object size to be mapped, in bytes |
| 1043 | * @vma: VMA for the area to be mapped |
| 1044 | * |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 1045 | * Set up the VMA to prepare mapping of the GEM object using the GEM object's |
| 1046 | * vm_ops. Depending on their requirements, GEM objects can either |
| 1047 | * provide a fault handler in their vm_ops (in which case any accesses to |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1048 | * the object will be trapped, to perform migration, GTT binding, surface |
| 1049 | * register allocation, or performance monitoring), or mmap the buffer memory |
| 1050 | * synchronously after calling drm_gem_mmap_obj. |
| 1051 | * |
| 1052 | * This function is mainly intended to implement the DMABUF mmap operation, when |
| 1053 | * the GEM object is not looked up based on its fake offset. To implement the |
| 1054 | * DRM mmap operation, drivers should use the drm_gem_mmap() function. |
| 1055 | * |
David Herrmann | ca481c9 | 2013-08-25 18:28:58 +0200 | [diff] [blame] | 1056 | * drm_gem_mmap_obj() assumes the user is granted access to the buffer while |
| 1057 | * drm_gem_mmap() prevents unprivileged users from mapping random objects. So |
| 1058 | * callers must verify access restrictions before calling this helper. |
| 1059 | * |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1060 | * Return 0 or success or -EINVAL if the object size is smaller than the VMA |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 1061 | * size, or if no vm_ops are provided. |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1062 | */ |
| 1063 | int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, |
| 1064 | struct vm_area_struct *vma) |
| 1065 | { |
Gerd Hoffmann | c40069c | 2019-10-16 13:51:53 +0200 | [diff] [blame] | 1066 | int ret; |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1067 | |
| 1068 | /* Check for valid size. */ |
| 1069 | if (obj_size < vma->vm_end - vma->vm_start) |
| 1070 | return -EINVAL; |
| 1071 | |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1072 | /* Take a ref for this mapping of the object, so that the fault |
| 1073 | * handler can dereference the mmap offset's pointer to the object. |
| 1074 | * This reference is cleaned up by the corresponding vm_close |
| 1075 | * (which should happen whether the vma was created by this call, or |
| 1076 | * by a vm_open due to mremap or partial unmap or whatever). |
| 1077 | */ |
Thierry Reding | e6b6271 | 2017-02-28 15:46:41 +0100 | [diff] [blame] | 1078 | drm_gem_object_get(obj); |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1079 | |
Daniel Vetter | f49a51b | 2020-10-27 22:49:22 +0100 | [diff] [blame] | 1080 | vma->vm_private_data = obj; |
| 1081 | |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 1082 | if (obj->funcs->mmap) { |
Gerd Hoffmann | 9786b65 | 2019-11-13 14:56:12 +0100 | [diff] [blame] | 1083 | ret = obj->funcs->mmap(obj, vma); |
| 1084 | if (ret) { |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 1085 | drm_gem_object_put(obj); |
Gerd Hoffmann | 9786b65 | 2019-11-13 14:56:12 +0100 | [diff] [blame] | 1086 | return ret; |
| 1087 | } |
| 1088 | WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); |
| 1089 | } else { |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 1090 | if (obj->funcs->vm_ops) |
Gerd Hoffmann | 9786b65 | 2019-11-13 14:56:12 +0100 | [diff] [blame] | 1091 | vma->vm_ops = obj->funcs->vm_ops; |
Gerd Hoffmann | 9786b65 | 2019-11-13 14:56:12 +0100 | [diff] [blame] | 1092 | else { |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 1093 | drm_gem_object_put(obj); |
Gerd Hoffmann | 9786b65 | 2019-11-13 14:56:12 +0100 | [diff] [blame] | 1094 | return -EINVAL; |
| 1095 | } |
| 1096 | |
| 1097 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
| 1098 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 1099 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
| 1100 | } |
| 1101 | |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1102 | return 0; |
| 1103 | } |
| 1104 | EXPORT_SYMBOL(drm_gem_mmap_obj); |
Jesse Barnes | ab00b3e | 2009-02-11 14:01:46 -0800 | [diff] [blame] | 1105 | |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 1106 | /** |
| 1107 | * drm_gem_mmap - memory map routine for GEM objects |
| 1108 | * @filp: DRM file pointer |
| 1109 | * @vma: VMA for the area to be mapped |
| 1110 | * |
| 1111 | * If a driver supports GEM object mapping, mmap calls on the DRM file |
| 1112 | * descriptor will end up here. |
| 1113 | * |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1114 | * Look up the GEM object based on the offset passed in (vma->vm_pgoff will |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 1115 | * contain the fake offset we created when the GTT map ioctl was called on |
Laurent Pinchart | 1c5aafa | 2013-04-16 14:14:52 +0200 | [diff] [blame] | 1116 | * the object) and map it with a call to drm_gem_mmap_obj(). |
David Herrmann | ca481c9 | 2013-08-25 18:28:58 +0200 | [diff] [blame] | 1117 | * |
| 1118 | * If the caller is not granted access to the buffer object, the mmap will fail |
| 1119 | * with EACCES. Please see the vma manager for more information. |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 1120 | */ |
| 1121 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 1122 | { |
| 1123 | struct drm_file *priv = filp->private_data; |
| 1124 | struct drm_device *dev = priv->minor->dev; |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 1125 | struct drm_gem_object *obj = NULL; |
David Herrmann | 0de2397 | 2013-07-24 21:07:52 +0200 | [diff] [blame] | 1126 | struct drm_vma_offset_node *node; |
David Herrmann | a8469aa | 2014-01-20 20:15:38 +0100 | [diff] [blame] | 1127 | int ret; |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 1128 | |
Daniel Vetter | c07dcd6 | 2017-08-02 13:56:02 +0200 | [diff] [blame] | 1129 | if (drm_dev_is_unplugged(dev)) |
Dave Airlie | 2c07a21 | 2012-02-20 14:18:07 +0000 | [diff] [blame] | 1130 | return -ENODEV; |
| 1131 | |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 1132 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
| 1133 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, |
| 1134 | vma->vm_pgoff, |
| 1135 | vma_pages(vma)); |
| 1136 | if (likely(node)) { |
| 1137 | obj = container_of(node, struct drm_gem_object, vma_node); |
| 1138 | /* |
| 1139 | * When the object is being freed, after it hits 0-refcnt it |
| 1140 | * proceeds to tear down the object. In the process it will |
| 1141 | * attempt to remove the VMA offset and so acquire this |
| 1142 | * mgr->vm_lock. Therefore if we find an object with a 0-refcnt |
| 1143 | * that matches our range, we know it is in the process of being |
| 1144 | * destroyed and will be freed as soon as we release the lock - |
| 1145 | * so we have to check for the 0-refcnted object and treat it as |
| 1146 | * invalid. |
| 1147 | */ |
| 1148 | if (!kref_get_unless_zero(&obj->refcount)) |
| 1149 | obj = NULL; |
| 1150 | } |
| 1151 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 1152 | |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 1153 | if (!obj) |
Daniel Vetter | 197633b | 2014-09-23 15:46:48 +0200 | [diff] [blame] | 1154 | return -EINVAL; |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 1155 | |
David Herrmann | d9a1f0b | 2016-09-01 14:48:33 +0200 | [diff] [blame] | 1156 | if (!drm_vma_node_is_allowed(node, priv)) { |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 1157 | drm_gem_object_put(obj); |
David Herrmann | ca481c9 | 2013-08-25 18:28:58 +0200 | [diff] [blame] | 1158 | return -EACCES; |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 1159 | } |
| 1160 | |
Chris Wilson | 3e977ac | 2018-07-12 19:53:13 +0100 | [diff] [blame] | 1161 | if (node->readonly) { |
| 1162 | if (vma->vm_flags & VM_WRITE) { |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 1163 | drm_gem_object_put(obj); |
Chris Wilson | 3e977ac | 2018-07-12 19:53:13 +0100 | [diff] [blame] | 1164 | return -EINVAL; |
| 1165 | } |
| 1166 | |
| 1167 | vma->vm_flags &= ~VM_MAYWRITE; |
| 1168 | } |
| 1169 | |
Daniel Vetter | 2225cfe | 2015-10-15 11:33:43 +0200 | [diff] [blame] | 1170 | ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, |
| 1171 | vma); |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 1172 | |
Emil Velikov | be6ee10 | 2020-05-15 10:50:53 +0100 | [diff] [blame] | 1173 | drm_gem_object_put(obj); |
Jesse Barnes | a2c0a97 | 2008-11-05 10:31:53 -0800 | [diff] [blame] | 1174 | |
| 1175 | return ret; |
| 1176 | } |
| 1177 | EXPORT_SYMBOL(drm_gem_mmap); |
Noralf Trønnes | 45d58b4 | 2017-11-07 20:13:40 +0100 | [diff] [blame] | 1178 | |
| 1179 | void drm_gem_print_info(struct drm_printer *p, unsigned int indent, |
| 1180 | const struct drm_gem_object *obj) |
| 1181 | { |
| 1182 | drm_printf_indent(p, indent, "name=%d\n", obj->name); |
| 1183 | drm_printf_indent(p, indent, "refcount=%u\n", |
| 1184 | kref_read(&obj->refcount)); |
| 1185 | drm_printf_indent(p, indent, "start=%08lx\n", |
| 1186 | drm_vma_node_start(&obj->vma_node)); |
| 1187 | drm_printf_indent(p, indent, "size=%zu\n", obj->size); |
| 1188 | drm_printf_indent(p, indent, "imported=%s\n", |
| 1189 | obj->import_attach ? "yes" : "no"); |
| 1190 | |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 1191 | if (obj->funcs->print_info) |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1192 | obj->funcs->print_info(p, indent, obj); |
Noralf Trønnes | 45d58b4 | 2017-11-07 20:13:40 +0100 | [diff] [blame] | 1193 | } |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1194 | |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1195 | int drm_gem_pin(struct drm_gem_object *obj) |
| 1196 | { |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 1197 | if (obj->funcs->pin) |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1198 | return obj->funcs->pin(obj); |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1199 | else |
| 1200 | return 0; |
| 1201 | } |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1202 | |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1203 | void drm_gem_unpin(struct drm_gem_object *obj) |
| 1204 | { |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 1205 | if (obj->funcs->unpin) |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1206 | obj->funcs->unpin(obj); |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1207 | } |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1208 | |
Thomas Zimmermann | a745fb1 | 2020-11-03 10:30:12 +0100 | [diff] [blame] | 1209 | int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1210 | { |
Thomas Zimmermann | 49a3f51 | 2020-11-03 10:30:11 +0100 | [diff] [blame] | 1211 | int ret; |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1212 | |
Thomas Zimmermann | 49a3f51 | 2020-11-03 10:30:11 +0100 | [diff] [blame] | 1213 | if (!obj->funcs->vmap) |
Thomas Zimmermann | a745fb1 | 2020-11-03 10:30:12 +0100 | [diff] [blame] | 1214 | return -EOPNOTSUPP; |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1215 | |
Thomas Zimmermann | a745fb1 | 2020-11-03 10:30:12 +0100 | [diff] [blame] | 1216 | ret = obj->funcs->vmap(obj, map); |
Thomas Zimmermann | 49a3f51 | 2020-11-03 10:30:11 +0100 | [diff] [blame] | 1217 | if (ret) |
Thomas Zimmermann | a745fb1 | 2020-11-03 10:30:12 +0100 | [diff] [blame] | 1218 | return ret; |
| 1219 | else if (dma_buf_map_is_null(map)) |
| 1220 | return -ENOMEM; |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1221 | |
Thomas Zimmermann | a745fb1 | 2020-11-03 10:30:12 +0100 | [diff] [blame] | 1222 | return 0; |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1223 | } |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1224 | |
Thomas Zimmermann | a745fb1 | 2020-11-03 10:30:12 +0100 | [diff] [blame] | 1225 | void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1226 | { |
Thomas Zimmermann | a745fb1 | 2020-11-03 10:30:12 +0100 | [diff] [blame] | 1227 | if (dma_buf_map_is_null(map)) |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1228 | return; |
| 1229 | |
Thomas Zimmermann | d693def | 2020-09-23 12:21:59 +0200 | [diff] [blame] | 1230 | if (obj->funcs->vunmap) |
Thomas Zimmermann | a745fb1 | 2020-11-03 10:30:12 +0100 | [diff] [blame] | 1231 | obj->funcs->vunmap(obj, map); |
| 1232 | |
| 1233 | /* Always set the mapping to NULL. Callers may rely on this. */ |
| 1234 | dma_buf_map_clear(map); |
Noralf Trønnes | b39b539 | 2018-11-10 15:56:45 +0100 | [diff] [blame] | 1235 | } |
Eric Anholt | 7edc3e3 | 2019-03-08 08:17:13 -0800 | [diff] [blame] | 1236 | |
| 1237 | /** |
| 1238 | * drm_gem_lock_reservations - Sets up the ww context and acquires |
| 1239 | * the lock on an array of GEM objects. |
| 1240 | * |
| 1241 | * Once you've locked your reservations, you'll want to set up space |
| 1242 | * for your shared fences (if applicable), submit your job, then |
| 1243 | * drm_gem_unlock_reservations(). |
| 1244 | * |
| 1245 | * @objs: drm_gem_objects to lock |
| 1246 | * @count: Number of objects in @objs |
| 1247 | * @acquire_ctx: struct ww_acquire_ctx that will be initialized as |
| 1248 | * part of tracking this set of locked reservations. |
| 1249 | */ |
| 1250 | int |
| 1251 | drm_gem_lock_reservations(struct drm_gem_object **objs, int count, |
| 1252 | struct ww_acquire_ctx *acquire_ctx) |
| 1253 | { |
| 1254 | int contended = -1; |
| 1255 | int i, ret; |
| 1256 | |
| 1257 | ww_acquire_init(acquire_ctx, &reservation_ww_class); |
| 1258 | |
| 1259 | retry: |
| 1260 | if (contended != -1) { |
| 1261 | struct drm_gem_object *obj = objs[contended]; |
| 1262 | |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 1263 | ret = dma_resv_lock_slow_interruptible(obj->resv, |
Christian König | 0dbd555 | 2019-07-31 09:41:50 +0200 | [diff] [blame] | 1264 | acquire_ctx); |
Eric Anholt | 7edc3e3 | 2019-03-08 08:17:13 -0800 | [diff] [blame] | 1265 | if (ret) { |
| 1266 | ww_acquire_done(acquire_ctx); |
| 1267 | return ret; |
| 1268 | } |
| 1269 | } |
| 1270 | |
| 1271 | for (i = 0; i < count; i++) { |
| 1272 | if (i == contended) |
| 1273 | continue; |
| 1274 | |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 1275 | ret = dma_resv_lock_interruptible(objs[i]->resv, |
Christian König | 0dbd555 | 2019-07-31 09:41:50 +0200 | [diff] [blame] | 1276 | acquire_ctx); |
Eric Anholt | 7edc3e3 | 2019-03-08 08:17:13 -0800 | [diff] [blame] | 1277 | if (ret) { |
| 1278 | int j; |
| 1279 | |
| 1280 | for (j = 0; j < i; j++) |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 1281 | dma_resv_unlock(objs[j]->resv); |
Eric Anholt | 7edc3e3 | 2019-03-08 08:17:13 -0800 | [diff] [blame] | 1282 | |
| 1283 | if (contended != -1 && contended >= i) |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 1284 | dma_resv_unlock(objs[contended]->resv); |
Eric Anholt | 7edc3e3 | 2019-03-08 08:17:13 -0800 | [diff] [blame] | 1285 | |
| 1286 | if (ret == -EDEADLK) { |
| 1287 | contended = i; |
| 1288 | goto retry; |
| 1289 | } |
| 1290 | |
| 1291 | ww_acquire_done(acquire_ctx); |
| 1292 | return ret; |
| 1293 | } |
| 1294 | } |
| 1295 | |
| 1296 | ww_acquire_done(acquire_ctx); |
| 1297 | |
| 1298 | return 0; |
| 1299 | } |
| 1300 | EXPORT_SYMBOL(drm_gem_lock_reservations); |
| 1301 | |
| 1302 | void |
| 1303 | drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, |
| 1304 | struct ww_acquire_ctx *acquire_ctx) |
| 1305 | { |
| 1306 | int i; |
| 1307 | |
| 1308 | for (i = 0; i < count; i++) |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 1309 | dma_resv_unlock(objs[i]->resv); |
Eric Anholt | 7edc3e3 | 2019-03-08 08:17:13 -0800 | [diff] [blame] | 1310 | |
| 1311 | ww_acquire_fini(acquire_ctx); |
| 1312 | } |
| 1313 | EXPORT_SYMBOL(drm_gem_unlock_reservations); |
Eric Anholt | 5d5a179 | 2019-04-01 15:26:33 -0700 | [diff] [blame] | 1314 | |
| 1315 | /** |
| 1316 | * drm_gem_fence_array_add - Adds the fence to an array of fences to be |
| 1317 | * waited on, deduplicating fences from the same context. |
| 1318 | * |
Sean Paul | 761e473 | 2019-04-24 16:49:09 -0400 | [diff] [blame] | 1319 | * @fence_array: array of dma_fence * for the job to block on. |
| 1320 | * @fence: the dma_fence to add to the list of dependencies. |
Eric Anholt | 5d5a179 | 2019-04-01 15:26:33 -0700 | [diff] [blame] | 1321 | * |
| 1322 | * Returns: |
| 1323 | * 0 on success, or an error on failing to expand the array. |
| 1324 | */ |
| 1325 | int drm_gem_fence_array_add(struct xarray *fence_array, |
| 1326 | struct dma_fence *fence) |
| 1327 | { |
| 1328 | struct dma_fence *entry; |
| 1329 | unsigned long index; |
| 1330 | u32 id = 0; |
| 1331 | int ret; |
| 1332 | |
| 1333 | if (!fence) |
| 1334 | return 0; |
| 1335 | |
| 1336 | /* Deduplicate if we already depend on a fence from the same context. |
| 1337 | * This lets the size of the array of deps scale with the number of |
| 1338 | * engines involved, rather than the number of BOs. |
| 1339 | */ |
| 1340 | xa_for_each(fence_array, index, entry) { |
| 1341 | if (entry->context != fence->context) |
| 1342 | continue; |
| 1343 | |
| 1344 | if (dma_fence_is_later(fence, entry)) { |
| 1345 | dma_fence_put(entry); |
| 1346 | xa_store(fence_array, index, fence, GFP_KERNEL); |
| 1347 | } else { |
| 1348 | dma_fence_put(fence); |
| 1349 | } |
| 1350 | return 0; |
| 1351 | } |
| 1352 | |
| 1353 | ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); |
| 1354 | if (ret != 0) |
| 1355 | dma_fence_put(fence); |
| 1356 | |
| 1357 | return ret; |
| 1358 | } |
| 1359 | EXPORT_SYMBOL(drm_gem_fence_array_add); |
| 1360 | |
| 1361 | /** |
| 1362 | * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked |
| 1363 | * in the GEM object's reservation object to an array of dma_fences for use in |
| 1364 | * scheduling a rendering job. |
| 1365 | * |
| 1366 | * This should be called after drm_gem_lock_reservations() on your array of |
| 1367 | * GEM objects used in the job but before updating the reservations with your |
| 1368 | * own fences. |
| 1369 | * |
Sean Paul | 761e473 | 2019-04-24 16:49:09 -0400 | [diff] [blame] | 1370 | * @fence_array: array of dma_fence * for the job to block on. |
| 1371 | * @obj: the gem object to add new dependencies from. |
| 1372 | * @write: whether the job might write the object (so we need to depend on |
Eric Anholt | 5d5a179 | 2019-04-01 15:26:33 -0700 | [diff] [blame] | 1373 | * shared fences in the reservation object). |
| 1374 | */ |
| 1375 | int drm_gem_fence_array_add_implicit(struct xarray *fence_array, |
| 1376 | struct drm_gem_object *obj, |
| 1377 | bool write) |
| 1378 | { |
| 1379 | int ret; |
| 1380 | struct dma_fence **fences; |
| 1381 | unsigned int i, fence_count; |
| 1382 | |
| 1383 | if (!write) { |
| 1384 | struct dma_fence *fence = |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 1385 | dma_resv_get_excl_rcu(obj->resv); |
Eric Anholt | 5d5a179 | 2019-04-01 15:26:33 -0700 | [diff] [blame] | 1386 | |
| 1387 | return drm_gem_fence_array_add(fence_array, fence); |
| 1388 | } |
| 1389 | |
Christian König | 52791ee | 2019-08-11 10:06:32 +0200 | [diff] [blame] | 1390 | ret = dma_resv_get_fences_rcu(obj->resv, NULL, |
Eric Anholt | 5d5a179 | 2019-04-01 15:26:33 -0700 | [diff] [blame] | 1391 | &fence_count, &fences); |
| 1392 | if (ret || !fence_count) |
| 1393 | return ret; |
| 1394 | |
| 1395 | for (i = 0; i < fence_count; i++) { |
| 1396 | ret = drm_gem_fence_array_add(fence_array, fences[i]); |
| 1397 | if (ret) |
| 1398 | break; |
| 1399 | } |
| 1400 | |
| 1401 | for (; i < fence_count; i++) |
| 1402 | dma_fence_put(fences[i]); |
| 1403 | kfree(fences); |
| 1404 | return ret; |
| 1405 | } |
| 1406 | EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); |