blob: 5c5ba1f14307314d2a96d8ee996f37a271fb6ee8 [file] [log] [blame]
Dave Airliee9083422017-04-04 13:26:24 +10001/*
2 * Copyright 2017 Red Hat
Dave Airlie5e60a102017-08-25 10:52:22 -07003 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
Dave Airliee9083422017-04-04 13:26:24 +10005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
Daniel Vetter924fe8d2017-12-14 21:30:52 +010032 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33 * persistent objects that contain an optional fence. The fence can be updated
34 * with a new fence, or be NULL.
Dave Airliee9083422017-04-04 13:26:24 +100035 *
Dave Airlie5e60a102017-08-25 10:52:22 -070036 * syncobj's can be waited upon, where it will wait for the underlying
37 * fence.
38 *
Dave Airliee9083422017-04-04 13:26:24 +100039 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
41 *
42 * Their primary use-case is to implement Vulkan fences and semaphores.
43 *
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
47 */
48
49#include <drm/drmP.h>
50#include <linux/file.h>
51#include <linux/fs.h>
52#include <linux/anon_inodes.h>
Dave Airlie3ee45a32017-04-26 04:09:02 +010053#include <linux/sync_file.h>
Jason Ekstrande7aca5032017-08-25 10:52:24 -070054#include <linux/sched/signal.h>
Dave Airliee9083422017-04-04 13:26:24 +100055
56#include "drm_internal.h"
57#include <drm/drm_syncobj.h>
58
59/**
60 * drm_syncobj_find - lookup and reference a sync object.
61 * @file_private: drm file private pointer
62 * @handle: sync object handle to lookup.
63 *
Daniel Vetter924fe8d2017-12-14 21:30:52 +010064 * Returns a reference to the syncobj pointed to by handle or NULL. The
65 * reference must be released by calling drm_syncobj_put().
Dave Airliee9083422017-04-04 13:26:24 +100066 */
67struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
68 u32 handle)
69{
70 struct drm_syncobj *syncobj;
71
72 spin_lock(&file_private->syncobj_table_lock);
73
74 /* Check if we currently have a reference on the object */
75 syncobj = idr_find(&file_private->syncobj_idr, handle);
76 if (syncobj)
77 drm_syncobj_get(syncobj);
78
79 spin_unlock(&file_private->syncobj_table_lock);
80
81 return syncobj;
82}
83EXPORT_SYMBOL(drm_syncobj_find);
84
Chunming Zhou43cf1fc2018-10-23 17:37:45 +080085static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
86 struct drm_syncobj_cb *cb,
87 drm_syncobj_func_t func)
88{
89 cb->func = func;
90 list_add_tail(&cb->node, &syncobj->cb_list);
91}
92
Eric Anholt131280a2018-11-08 08:04:22 -080093static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
94 struct dma_fence **fence,
95 struct drm_syncobj_cb *cb,
96 drm_syncobj_func_t func)
Chunming Zhou43cf1fc2018-10-23 17:37:45 +080097{
Eric Anholt131280a2018-11-08 08:04:22 -080098 int ret;
Chunming Zhou43cf1fc2018-10-23 17:37:45 +080099
Eric Anholt131280a2018-11-08 08:04:22 -0800100 *fence = drm_syncobj_fence_get(syncobj);
101 if (*fence)
102 return 1;
Jason Ekstrand337fe9f2018-09-26 02:17:03 -0500103
Eric Anholt131280a2018-11-08 08:04:22 -0800104 spin_lock(&syncobj->lock);
105 /* We've already tried once to get a fence and failed. Now that we
106 * have the lock, try one more time just to be sure we don't add a
107 * callback when a fence has already been set.
108 */
109 if (syncobj->fence) {
110 *fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
111 lockdep_is_held(&syncobj->lock)));
112 ret = 1;
113 } else {
114 *fence = NULL;
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800115 drm_syncobj_add_callback_locked(syncobj, cb, func);
Eric Anholt131280a2018-11-08 08:04:22 -0800116 ret = 0;
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800117 }
Eric Anholt131280a2018-11-08 08:04:22 -0800118 spin_unlock(&syncobj->lock);
Chunming Zhou43cf1fc2018-10-23 17:37:45 +0800119
Chunming Zhou48197bc2018-10-18 14:18:36 +0800120 return ret;
121}
122
Eric Anholt131280a2018-11-08 08:04:22 -0800123void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
124 struct drm_syncobj_cb *cb,
125 drm_syncobj_func_t func)
Chunming Zhou48197bc2018-10-18 14:18:36 +0800126{
Eric Anholt131280a2018-11-08 08:04:22 -0800127 spin_lock(&syncobj->lock);
128 drm_syncobj_add_callback_locked(syncobj, cb, func);
129 spin_unlock(&syncobj->lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800130}
Eric Anholt131280a2018-11-08 08:04:22 -0800131
132void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
133 struct drm_syncobj_cb *cb)
134{
135 spin_lock(&syncobj->lock);
136 list_del_init(&cb->node);
137 spin_unlock(&syncobj->lock);
138}
139
Dave Airliee9083422017-04-04 13:26:24 +1000140/**
141 * drm_syncobj_replace_fence - replace fence in a sync object.
Dave Airliee9083422017-04-04 13:26:24 +1000142 * @syncobj: Sync object to replace fence in
Chunming Zhou9a09a422018-08-30 14:48:30 +0800143 * @point: timeline point
Dave Airliee9083422017-04-04 13:26:24 +1000144 * @fence: fence to install in sync file.
145 *
Chunming Zhou9a09a422018-08-30 14:48:30 +0800146 * This replaces the fence on a sync object, or a timeline point fence.
Dave Airliee9083422017-04-04 13:26:24 +1000147 */
Chris Wilson00fc2c22017-07-05 21:12:44 +0100148void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
Chunming Zhou9a09a422018-08-30 14:48:30 +0800149 u64 point,
Dave Airliee9083422017-04-04 13:26:24 +1000150 struct dma_fence *fence)
151{
Eric Anholt131280a2018-11-08 08:04:22 -0800152 struct dma_fence *old_fence;
153 struct drm_syncobj_cb *cur, *tmp;
Dave Airliee9083422017-04-04 13:26:24 +1000154
Eric Anholt131280a2018-11-08 08:04:22 -0800155 if (fence)
156 dma_fence_get(fence);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700157
Eric Anholt131280a2018-11-08 08:04:22 -0800158 spin_lock(&syncobj->lock);
159
160 old_fence = rcu_dereference_protected(syncobj->fence,
161 lockdep_is_held(&syncobj->lock));
162 rcu_assign_pointer(syncobj->fence, fence);
163
164 if (fence != old_fence) {
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700165 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
166 list_del_init(&cur->node);
167 cur->func(syncobj, cur);
168 }
169 }
Eric Anholt131280a2018-11-08 08:04:22 -0800170
171 spin_unlock(&syncobj->lock);
172
173 dma_fence_put(old_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000174}
175EXPORT_SYMBOL(drm_syncobj_replace_fence);
176
Christian König86bbd892018-11-13 14:14:00 +0100177/**
178 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
179 * @syncobj: sync object to assign the fence on
180 *
181 * Assign a already signaled stub fence to the sync object.
182 */
183static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700184{
Christian König86bbd892018-11-13 14:14:00 +0100185 struct dma_fence *fence = dma_fence_get_stub();
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700186
Christian König86bbd892018-11-13 14:14:00 +0100187 drm_syncobj_replace_fence(syncobj, 0, fence);
188 dma_fence_put(fence);
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700189}
190
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100191/**
192 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
193 * @file_private: drm file private pointer
194 * @handle: sync object handle to lookup.
Chunming Zhou0a6730e2018-08-30 14:48:29 +0800195 * @point: timeline point
Chunming Zhou871edc92018-10-17 15:03:18 +0800196 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100197 * @fence: out parameter for the fence
198 *
199 * This is just a convenience function that combines drm_syncobj_find() and
Eric Anholt131280a2018-11-08 08:04:22 -0800200 * drm_syncobj_fence_get().
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100201 *
202 * Returns 0 on success or a negative error value on failure. On success @fence
203 * contains a reference to the fence, which must be released by calling
204 * dma_fence_put().
205 */
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700206int drm_syncobj_find_fence(struct drm_file *file_private,
Chunming Zhou649fdce2018-10-15 16:55:47 +0800207 u32 handle, u64 point, u64 flags,
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700208 struct dma_fence **fence)
Dave Airliee9083422017-04-04 13:26:24 +1000209{
210 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
Eric Anholt131280a2018-11-08 08:04:22 -0800211 int ret = 0;
Dave Airliee9083422017-04-04 13:26:24 +1000212
Eric Anholt131280a2018-11-08 08:04:22 -0800213 if (!syncobj)
214 return -ENOENT;
215
216 *fence = drm_syncobj_fence_get(syncobj);
217 if (!*fence) {
218 ret = -EINVAL;
219 }
220 drm_syncobj_put(syncobj);
Dave Airliee9083422017-04-04 13:26:24 +1000221 return ret;
222}
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700223EXPORT_SYMBOL(drm_syncobj_find_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000224
225/**
226 * drm_syncobj_free - free a sync object.
227 * @kref: kref to free.
228 *
229 * Only to be called from kref_put in drm_syncobj_put.
230 */
231void drm_syncobj_free(struct kref *kref)
232{
233 struct drm_syncobj *syncobj = container_of(kref,
234 struct drm_syncobj,
235 refcount);
Eric Anholt131280a2018-11-08 08:04:22 -0800236 drm_syncobj_replace_fence(syncobj, 0, NULL);
Dave Airliee9083422017-04-04 13:26:24 +1000237 kfree(syncobj);
238}
239EXPORT_SYMBOL(drm_syncobj_free);
240
Marek Olšák1321fd22017-09-12 22:42:12 +0200241/**
242 * drm_syncobj_create - create a new syncobj
243 * @out_syncobj: returned syncobj
244 * @flags: DRM_SYNCOBJ_* flags
245 * @fence: if non-NULL, the syncobj will represent this fence
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100246 *
247 * This is the first function to create a sync object. After creating, drivers
248 * probably want to make it available to userspace, either through
249 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
250 *
251 * Returns 0 on success or a negative error value on failure.
Marek Olšák1321fd22017-09-12 22:42:12 +0200252 */
253int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
254 struct dma_fence *fence)
Dave Airliee9083422017-04-04 13:26:24 +1000255{
Dave Airliee9083422017-04-04 13:26:24 +1000256 struct drm_syncobj *syncobj;
257
258 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
259 if (!syncobj)
260 return -ENOMEM;
261
262 kref_init(&syncobj->refcount);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700263 INIT_LIST_HEAD(&syncobj->cb_list);
Eric Anholt131280a2018-11-08 08:04:22 -0800264 spin_lock_init(&syncobj->lock);
Dave Airliee9083422017-04-04 13:26:24 +1000265
Christian König86bbd892018-11-13 14:14:00 +0100266 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
267 drm_syncobj_assign_null_handle(syncobj);
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700268
Marek Olšák1321fd22017-09-12 22:42:12 +0200269 if (fence)
Chunming Zhou9a09a422018-08-30 14:48:30 +0800270 drm_syncobj_replace_fence(syncobj, 0, fence);
Marek Olšák1321fd22017-09-12 22:42:12 +0200271
272 *out_syncobj = syncobj;
273 return 0;
274}
275EXPORT_SYMBOL(drm_syncobj_create);
276
277/**
278 * drm_syncobj_get_handle - get a handle from a syncobj
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100279 * @file_private: drm file private pointer
280 * @syncobj: Sync object to export
281 * @handle: out parameter with the new handle
282 *
283 * Exports a sync object created with drm_syncobj_create() as a handle on
284 * @file_private to userspace.
285 *
286 * Returns 0 on success or a negative error value on failure.
Marek Olšák1321fd22017-09-12 22:42:12 +0200287 */
288int drm_syncobj_get_handle(struct drm_file *file_private,
289 struct drm_syncobj *syncobj, u32 *handle)
290{
291 int ret;
292
293 /* take a reference to put in the idr */
294 drm_syncobj_get(syncobj);
295
Dave Airliee9083422017-04-04 13:26:24 +1000296 idr_preload(GFP_KERNEL);
297 spin_lock(&file_private->syncobj_table_lock);
298 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
299 spin_unlock(&file_private->syncobj_table_lock);
300
301 idr_preload_end();
302
303 if (ret < 0) {
304 drm_syncobj_put(syncobj);
305 return ret;
306 }
307
308 *handle = ret;
309 return 0;
310}
Marek Olšák1321fd22017-09-12 22:42:12 +0200311EXPORT_SYMBOL(drm_syncobj_get_handle);
312
313static int drm_syncobj_create_as_handle(struct drm_file *file_private,
314 u32 *handle, uint32_t flags)
315{
316 int ret;
317 struct drm_syncobj *syncobj;
318
319 ret = drm_syncobj_create(&syncobj, flags, NULL);
320 if (ret)
321 return ret;
322
323 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
324 drm_syncobj_put(syncobj);
325 return ret;
326}
Dave Airliee9083422017-04-04 13:26:24 +1000327
328static int drm_syncobj_destroy(struct drm_file *file_private,
329 u32 handle)
330{
331 struct drm_syncobj *syncobj;
332
333 spin_lock(&file_private->syncobj_table_lock);
334 syncobj = idr_remove(&file_private->syncobj_idr, handle);
335 spin_unlock(&file_private->syncobj_table_lock);
336
337 if (!syncobj)
338 return -EINVAL;
339
340 drm_syncobj_put(syncobj);
341 return 0;
342}
343
344static int drm_syncobj_file_release(struct inode *inode, struct file *file)
345{
346 struct drm_syncobj *syncobj = file->private_data;
347
348 drm_syncobj_put(syncobj);
349 return 0;
350}
351
352static const struct file_operations drm_syncobj_file_fops = {
353 .release = drm_syncobj_file_release,
354};
355
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100356/**
357 * drm_syncobj_get_fd - get a file descriptor from a syncobj
358 * @syncobj: Sync object to export
359 * @p_fd: out parameter with the new file descriptor
360 *
361 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
362 *
363 * Returns 0 on success or a negative error value on failure.
364 */
Marek Olšák684fd0a2017-09-12 22:42:13 +0200365int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
366{
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000367 struct file *file;
Marek Olšák684fd0a2017-09-12 22:42:13 +0200368 int fd;
369
370 fd = get_unused_fd_flags(O_CLOEXEC);
371 if (fd < 0)
372 return fd;
373
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000374 file = anon_inode_getfile("syncobj_file",
375 &drm_syncobj_file_fops,
376 syncobj, 0);
377 if (IS_ERR(file)) {
378 put_unused_fd(fd);
379 return PTR_ERR(file);
Marek Olšák684fd0a2017-09-12 22:42:13 +0200380 }
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000381
382 drm_syncobj_get(syncobj);
383 fd_install(fd, file);
384
Marek Olšák684fd0a2017-09-12 22:42:13 +0200385 *p_fd = fd;
386 return 0;
387}
388EXPORT_SYMBOL(drm_syncobj_get_fd);
389
Dave Airliee9083422017-04-04 13:26:24 +1000390static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
391 u32 handle, int *p_fd)
392{
393 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
394 int ret;
Dave Airliee9083422017-04-04 13:26:24 +1000395
396 if (!syncobj)
397 return -EINVAL;
398
Marek Olšák684fd0a2017-09-12 22:42:13 +0200399 ret = drm_syncobj_get_fd(syncobj, p_fd);
Dave Airliee9083422017-04-04 13:26:24 +1000400 drm_syncobj_put(syncobj);
401 return ret;
402}
403
Dave Airliee9083422017-04-04 13:26:24 +1000404static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
405 int fd, u32 *handle)
406{
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000407 struct drm_syncobj *syncobj;
408 struct file *file;
Dave Airliee9083422017-04-04 13:26:24 +1000409 int ret;
410
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000411 file = fget(fd);
412 if (!file)
Dave Airliee9083422017-04-04 13:26:24 +1000413 return -EINVAL;
414
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000415 if (file->f_op != &drm_syncobj_file_fops) {
416 fput(file);
417 return -EINVAL;
418 }
419
Dave Airliee9083422017-04-04 13:26:24 +1000420 /* take a reference to put in the idr */
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000421 syncobj = file->private_data;
Dave Airliee9083422017-04-04 13:26:24 +1000422 drm_syncobj_get(syncobj);
423
424 idr_preload(GFP_KERNEL);
425 spin_lock(&file_private->syncobj_table_lock);
426 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
427 spin_unlock(&file_private->syncobj_table_lock);
428 idr_preload_end();
429
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000430 if (ret > 0) {
431 *handle = ret;
432 ret = 0;
433 } else
434 drm_syncobj_put(syncobj);
435
436 fput(file);
437 return ret;
Dave Airliee9083422017-04-04 13:26:24 +1000438}
439
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300440static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
441 int fd, int handle)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100442{
443 struct dma_fence *fence = sync_file_get_fence(fd);
444 struct drm_syncobj *syncobj;
445
446 if (!fence)
447 return -EINVAL;
448
449 syncobj = drm_syncobj_find(file_private, handle);
450 if (!syncobj) {
451 dma_fence_put(fence);
452 return -ENOENT;
453 }
454
Chunming Zhou9a09a422018-08-30 14:48:30 +0800455 drm_syncobj_replace_fence(syncobj, 0, fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100456 dma_fence_put(fence);
457 drm_syncobj_put(syncobj);
458 return 0;
459}
460
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300461static int drm_syncobj_export_sync_file(struct drm_file *file_private,
462 int handle, int *p_fd)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100463{
464 int ret;
465 struct dma_fence *fence;
466 struct sync_file *sync_file;
467 int fd = get_unused_fd_flags(O_CLOEXEC);
468
469 if (fd < 0)
470 return fd;
471
Chunming Zhou649fdce2018-10-15 16:55:47 +0800472 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100473 if (ret)
474 goto err_put_fd;
475
476 sync_file = sync_file_create(fence);
477
478 dma_fence_put(fence);
479
480 if (!sync_file) {
481 ret = -EINVAL;
482 goto err_put_fd;
483 }
484
485 fd_install(fd, sync_file->file);
486
487 *p_fd = fd;
488 return 0;
489err_put_fd:
490 put_unused_fd(fd);
491 return ret;
492}
Dave Airliee9083422017-04-04 13:26:24 +1000493/**
494 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
Dave Airliee9083422017-04-04 13:26:24 +1000495 * @file_private: drm file-private structure to set up
496 *
497 * Called at device open time, sets up the structure for handling refcounting
498 * of sync objects.
499 */
500void
501drm_syncobj_open(struct drm_file *file_private)
502{
Chris Wilsone86584c2018-02-12 14:55:33 +0000503 idr_init_base(&file_private->syncobj_idr, 1);
Dave Airliee9083422017-04-04 13:26:24 +1000504 spin_lock_init(&file_private->syncobj_table_lock);
505}
506
507static int
508drm_syncobj_release_handle(int id, void *ptr, void *data)
509{
510 struct drm_syncobj *syncobj = ptr;
511
512 drm_syncobj_put(syncobj);
513 return 0;
514}
515
516/**
517 * drm_syncobj_release - release file-private sync object resources
Dave Airliee9083422017-04-04 13:26:24 +1000518 * @file_private: drm file-private structure to clean up
519 *
520 * Called at close time when the filp is going away.
521 *
522 * Releases any remaining references on objects by this filp.
523 */
524void
525drm_syncobj_release(struct drm_file *file_private)
526{
527 idr_for_each(&file_private->syncobj_idr,
528 &drm_syncobj_release_handle, file_private);
529 idr_destroy(&file_private->syncobj_idr);
530}
531
532int
533drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
534 struct drm_file *file_private)
535{
536 struct drm_syncobj_create *args = data;
537
538 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100539 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000540
541 /* no valid flags yet */
Eric Anholt131280a2018-11-08 08:04:22 -0800542 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
Dave Airliee9083422017-04-04 13:26:24 +1000543 return -EINVAL;
544
Marek Olšák1321fd22017-09-12 22:42:12 +0200545 return drm_syncobj_create_as_handle(file_private,
546 &args->handle, args->flags);
Dave Airliee9083422017-04-04 13:26:24 +1000547}
548
549int
550drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
551 struct drm_file *file_private)
552{
553 struct drm_syncobj_destroy *args = data;
554
555 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100556 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000557
558 /* make sure padding is empty */
559 if (args->pad)
560 return -EINVAL;
561 return drm_syncobj_destroy(file_private, args->handle);
562}
563
564int
565drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
566 struct drm_file *file_private)
567{
568 struct drm_syncobj_handle *args = data;
569
570 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100571 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000572
Dave Airlie3ee45a32017-04-26 04:09:02 +0100573 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000574 return -EINVAL;
575
Dave Airlie3ee45a32017-04-26 04:09:02 +0100576 if (args->flags != 0 &&
577 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
578 return -EINVAL;
579
580 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
581 return drm_syncobj_export_sync_file(file_private, args->handle,
582 &args->fd);
583
Dave Airliee9083422017-04-04 13:26:24 +1000584 return drm_syncobj_handle_to_fd(file_private, args->handle,
585 &args->fd);
586}
587
588int
589drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
590 struct drm_file *file_private)
591{
592 struct drm_syncobj_handle *args = data;
593
594 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100595 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000596
Dave Airlie3ee45a32017-04-26 04:09:02 +0100597 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000598 return -EINVAL;
599
Dave Airlie3ee45a32017-04-26 04:09:02 +0100600 if (args->flags != 0 &&
601 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
602 return -EINVAL;
603
604 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
605 return drm_syncobj_import_sync_file_fence(file_private,
606 args->fd,
607 args->handle);
608
Dave Airliee9083422017-04-04 13:26:24 +1000609 return drm_syncobj_fd_to_handle(file_private, args->fd,
610 &args->handle);
611}
Dave Airlie5e60a102017-08-25 10:52:22 -0700612
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700613struct syncobj_wait_entry {
614 struct task_struct *task;
615 struct dma_fence *fence;
616 struct dma_fence_cb fence_cb;
617 struct drm_syncobj_cb syncobj_cb;
618};
619
620static void syncobj_wait_fence_func(struct dma_fence *fence,
621 struct dma_fence_cb *cb)
622{
623 struct syncobj_wait_entry *wait =
624 container_of(cb, struct syncobj_wait_entry, fence_cb);
625
626 wake_up_process(wait->task);
627}
628
629static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
630 struct drm_syncobj_cb *cb)
631{
632 struct syncobj_wait_entry *wait =
633 container_of(cb, struct syncobj_wait_entry, syncobj_cb);
634
Eric Anholt131280a2018-11-08 08:04:22 -0800635 /* This happens inside the syncobj lock */
636 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
637 lockdep_is_held(&syncobj->lock)));
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700638 wake_up_process(wait->task);
639}
640
641static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
642 uint32_t count,
643 uint32_t flags,
644 signed long timeout,
645 uint32_t *idx)
646{
647 struct syncobj_wait_entry *entries;
648 struct dma_fence *fence;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700649 uint32_t signaled_count, i;
650
651 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
652 if (!entries)
653 return -ENOMEM;
654
655 /* Walk the list of sync objects and initialize entries. We do
656 * this up-front so that we can properly return -EINVAL if there is
657 * a syncobj with a missing fence and then never have the chance of
658 * returning -EINVAL again.
659 */
660 signaled_count = 0;
661 for (i = 0; i < count; ++i) {
662 entries[i].task = current;
Eric Anholt131280a2018-11-08 08:04:22 -0800663 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700664 if (!entries[i].fence) {
665 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
666 continue;
667 } else {
Chris Wilson12fec622018-09-20 21:05:30 +0100668 timeout = -EINVAL;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700669 goto cleanup_entries;
670 }
671 }
672
673 if (dma_fence_is_signaled(entries[i].fence)) {
674 if (signaled_count == 0 && idx)
675 *idx = i;
676 signaled_count++;
677 }
678 }
679
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700680 if (signaled_count == count ||
681 (signaled_count > 0 &&
682 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
683 goto cleanup_entries;
684
685 /* There's a very annoying laxness in the dma_fence API here, in
686 * that backends are not required to automatically report when a
687 * fence is signaled prior to fence->ops->enable_signaling() being
688 * called. So here if we fail to match signaled_count, we need to
689 * fallthough and try a 0 timeout wait!
690 */
691
692 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
693 for (i = 0; i < count; ++i) {
694 drm_syncobj_fence_get_or_add_callback(syncobjs[i],
695 &entries[i].fence,
696 &entries[i].syncobj_cb,
697 syncobj_wait_syncobj_func);
698 }
699 }
700
701 do {
702 set_current_state(TASK_INTERRUPTIBLE);
703
704 signaled_count = 0;
705 for (i = 0; i < count; ++i) {
706 fence = entries[i].fence;
707 if (!fence)
708 continue;
709
710 if (dma_fence_is_signaled(fence) ||
711 (!entries[i].fence_cb.func &&
712 dma_fence_add_callback(fence,
713 &entries[i].fence_cb,
714 syncobj_wait_fence_func))) {
715 /* The fence has been signaled */
716 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
717 signaled_count++;
718 } else {
719 if (idx)
720 *idx = i;
721 goto done_waiting;
722 }
723 }
724 }
725
726 if (signaled_count == count)
727 goto done_waiting;
728
729 if (timeout == 0) {
Chris Wilson12fec622018-09-20 21:05:30 +0100730 timeout = -ETIME;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700731 goto done_waiting;
732 }
733
Chris Wilson12fec622018-09-20 21:05:30 +0100734 if (signal_pending(current)) {
735 timeout = -ERESTARTSYS;
736 goto done_waiting;
737 }
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700738
Chris Wilson12fec622018-09-20 21:05:30 +0100739 timeout = schedule_timeout(timeout);
740 } while (1);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700741
742done_waiting:
743 __set_current_state(TASK_RUNNING);
744
745cleanup_entries:
746 for (i = 0; i < count; ++i) {
747 if (entries[i].syncobj_cb.func)
748 drm_syncobj_remove_callback(syncobjs[i],
749 &entries[i].syncobj_cb);
750 if (entries[i].fence_cb.func)
751 dma_fence_remove_callback(entries[i].fence,
752 &entries[i].fence_cb);
753 dma_fence_put(entries[i].fence);
754 }
755 kfree(entries);
756
Chris Wilson12fec622018-09-20 21:05:30 +0100757 return timeout;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700758}
759
Dave Airlie5e60a102017-08-25 10:52:22 -0700760/**
761 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
762 *
763 * @timeout_nsec: timeout nsec component in ns, 0 for poll
764 *
765 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
766 */
767static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
768{
769 ktime_t abs_timeout, now;
770 u64 timeout_ns, timeout_jiffies64;
771
772 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
773 if (timeout_nsec == 0)
774 return 0;
775
776 abs_timeout = ns_to_ktime(timeout_nsec);
777 now = ktime_get();
778
779 if (!ktime_after(abs_timeout, now))
780 return 0;
781
782 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
783
784 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
785 /* clamp timeout to avoid infinite timeout */
786 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
787 return MAX_SCHEDULE_TIMEOUT - 1;
788
789 return timeout_jiffies64 + 1;
790}
791
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700792static int drm_syncobj_array_wait(struct drm_device *dev,
793 struct drm_file *file_private,
794 struct drm_syncobj_wait *wait,
795 struct drm_syncobj **syncobjs)
Dave Airlie5e60a102017-08-25 10:52:22 -0700796{
797 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
Dave Airlie5e60a102017-08-25 10:52:22 -0700798 uint32_t first = ~0;
799
Chris Wilson12fec622018-09-20 21:05:30 +0100800 timeout = drm_syncobj_array_wait_timeout(syncobjs,
801 wait->count_handles,
802 wait->flags,
803 timeout, &first);
804 if (timeout < 0)
805 return timeout;
Dave Airlie5e60a102017-08-25 10:52:22 -0700806
807 wait->first_signaled = first;
Dave Airlie5e60a102017-08-25 10:52:22 -0700808 return 0;
809}
810
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700811static int drm_syncobj_array_find(struct drm_file *file_private,
Ville Syrjälä9e554462017-09-01 19:53:26 +0300812 void __user *user_handles,
813 uint32_t count_handles,
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700814 struct drm_syncobj ***syncobjs_out)
815{
816 uint32_t i, *handles;
817 struct drm_syncobj **syncobjs;
818 int ret;
819
820 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
821 if (handles == NULL)
822 return -ENOMEM;
823
824 if (copy_from_user(handles, user_handles,
825 sizeof(uint32_t) * count_handles)) {
826 ret = -EFAULT;
827 goto err_free_handles;
828 }
829
830 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
831 if (syncobjs == NULL) {
832 ret = -ENOMEM;
833 goto err_free_handles;
834 }
835
836 for (i = 0; i < count_handles; i++) {
837 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
838 if (!syncobjs[i]) {
839 ret = -ENOENT;
840 goto err_put_syncobjs;
841 }
842 }
843
844 kfree(handles);
845 *syncobjs_out = syncobjs;
846 return 0;
847
848err_put_syncobjs:
849 while (i-- > 0)
850 drm_syncobj_put(syncobjs[i]);
851 kfree(syncobjs);
852err_free_handles:
853 kfree(handles);
854
855 return ret;
856}
857
858static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
859 uint32_t count)
860{
861 uint32_t i;
862 for (i = 0; i < count; i++)
863 drm_syncobj_put(syncobjs[i]);
864 kfree(syncobjs);
865}
866
Dave Airlie5e60a102017-08-25 10:52:22 -0700867int
868drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
869 struct drm_file *file_private)
870{
871 struct drm_syncobj_wait *args = data;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700872 struct drm_syncobj **syncobjs;
Dave Airlie5e60a102017-08-25 10:52:22 -0700873 int ret = 0;
Dave Airlie5e60a102017-08-25 10:52:22 -0700874
875 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100876 return -EOPNOTSUPP;
Dave Airlie5e60a102017-08-25 10:52:22 -0700877
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700878 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
879 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
Dave Airlie5e60a102017-08-25 10:52:22 -0700880 return -EINVAL;
881
882 if (args->count_handles == 0)
883 return -EINVAL;
884
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700885 ret = drm_syncobj_array_find(file_private,
886 u64_to_user_ptr(args->handles),
887 args->count_handles,
888 &syncobjs);
889 if (ret < 0)
890 return ret;
Dave Airlie5e60a102017-08-25 10:52:22 -0700891
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700892 ret = drm_syncobj_array_wait(dev, file_private,
893 args, syncobjs);
Dave Airlie5e60a102017-08-25 10:52:22 -0700894
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700895 drm_syncobj_array_free(syncobjs, args->count_handles);
Dave Airlie5e60a102017-08-25 10:52:22 -0700896
897 return ret;
898}
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700899
900int
901drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
902 struct drm_file *file_private)
903{
904 struct drm_syncobj_array *args = data;
905 struct drm_syncobj **syncobjs;
906 uint32_t i;
907 int ret;
908
909 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100910 return -EOPNOTSUPP;
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700911
912 if (args->pad != 0)
913 return -EINVAL;
914
915 if (args->count_handles == 0)
916 return -EINVAL;
917
918 ret = drm_syncobj_array_find(file_private,
919 u64_to_user_ptr(args->handles),
920 args->count_handles,
921 &syncobjs);
922 if (ret < 0)
923 return ret;
924
Eric Anholt131280a2018-11-08 08:04:22 -0800925 for (i = 0; i < args->count_handles; i++)
926 drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
927
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700928 drm_syncobj_array_free(syncobjs, args->count_handles);
929
Eric Anholt131280a2018-11-08 08:04:22 -0800930 return 0;
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700931}
Jason Ekstrandffa94432017-08-28 14:10:28 -0700932
933int
934drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
935 struct drm_file *file_private)
936{
937 struct drm_syncobj_array *args = data;
938 struct drm_syncobj **syncobjs;
939 uint32_t i;
940 int ret;
941
942 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100943 return -EOPNOTSUPP;
Jason Ekstrandffa94432017-08-28 14:10:28 -0700944
945 if (args->pad != 0)
946 return -EINVAL;
947
948 if (args->count_handles == 0)
949 return -EINVAL;
950
951 ret = drm_syncobj_array_find(file_private,
952 u64_to_user_ptr(args->handles),
953 args->count_handles,
954 &syncobjs);
955 if (ret < 0)
956 return ret;
957
Christian König86bbd892018-11-13 14:14:00 +0100958 for (i = 0; i < args->count_handles; i++)
959 drm_syncobj_assign_null_handle(syncobjs[i]);
Jason Ekstrandffa94432017-08-28 14:10:28 -0700960
961 drm_syncobj_array_free(syncobjs, args->count_handles);
962
963 return ret;
964}