blob: e19525af0ccee11e39277c589beaeb8811d8c0b2 [file] [log] [blame]
Dave Airliee9083422017-04-04 13:26:24 +10001/*
2 * Copyright 2017 Red Hat
Dave Airlie5e60a102017-08-25 10:52:22 -07003 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
Dave Airliee9083422017-04-04 13:26:24 +10005 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
Daniel Vetter924fe8d2017-12-14 21:30:52 +010032 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33 * persistent objects that contain an optional fence. The fence can be updated
34 * with a new fence, or be NULL.
Dave Airliee9083422017-04-04 13:26:24 +100035 *
Dave Airlie5e60a102017-08-25 10:52:22 -070036 * syncobj's can be waited upon, where it will wait for the underlying
37 * fence.
38 *
Dave Airliee9083422017-04-04 13:26:24 +100039 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
41 *
42 * Their primary use-case is to implement Vulkan fences and semaphores.
43 *
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
47 */
48
49#include <drm/drmP.h>
50#include <linux/file.h>
51#include <linux/fs.h>
52#include <linux/anon_inodes.h>
Dave Airlie3ee45a32017-04-26 04:09:02 +010053#include <linux/sync_file.h>
Jason Ekstrande7aca5032017-08-25 10:52:24 -070054#include <linux/sched/signal.h>
Dave Airliee9083422017-04-04 13:26:24 +100055
56#include "drm_internal.h"
57#include <drm/drm_syncobj.h>
58
Christian König61a98b12018-12-11 18:34:41 +080059struct syncobj_wait_entry {
60 struct list_head node;
61 struct task_struct *task;
62 struct dma_fence *fence;
63 struct dma_fence_cb fence_cb;
64};
65
66static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
67 struct syncobj_wait_entry *wait);
68
Dave Airliee9083422017-04-04 13:26:24 +100069/**
70 * drm_syncobj_find - lookup and reference a sync object.
71 * @file_private: drm file private pointer
72 * @handle: sync object handle to lookup.
73 *
Daniel Vetter924fe8d2017-12-14 21:30:52 +010074 * Returns a reference to the syncobj pointed to by handle or NULL. The
75 * reference must be released by calling drm_syncobj_put().
Dave Airliee9083422017-04-04 13:26:24 +100076 */
77struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
78 u32 handle)
79{
80 struct drm_syncobj *syncobj;
81
82 spin_lock(&file_private->syncobj_table_lock);
83
84 /* Check if we currently have a reference on the object */
85 syncobj = idr_find(&file_private->syncobj_idr, handle);
86 if (syncobj)
87 drm_syncobj_get(syncobj);
88
89 spin_unlock(&file_private->syncobj_table_lock);
90
91 return syncobj;
92}
93EXPORT_SYMBOL(drm_syncobj_find);
94
Christian König61a98b12018-12-11 18:34:41 +080095static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
96 struct syncobj_wait_entry *wait)
Chunming Zhou43cf1fc2018-10-23 17:37:45 +080097{
Christian König61a98b12018-12-11 18:34:41 +080098 if (wait->fence)
99 return;
Jason Ekstrand337fe9f2018-09-26 02:17:03 -0500100
Eric Anholt131280a2018-11-08 08:04:22 -0800101 spin_lock(&syncobj->lock);
102 /* We've already tried once to get a fence and failed. Now that we
103 * have the lock, try one more time just to be sure we don't add a
104 * callback when a fence has already been set.
105 */
Christian König61a98b12018-12-11 18:34:41 +0800106 if (syncobj->fence)
107 wait->fence = dma_fence_get(
108 rcu_dereference_protected(syncobj->fence, 1));
109 else
110 list_add_tail(&wait->node, &syncobj->cb_list);
Eric Anholt131280a2018-11-08 08:04:22 -0800111 spin_unlock(&syncobj->lock);
Chunming Zhou48197bc2018-10-18 14:18:36 +0800112}
Eric Anholt131280a2018-11-08 08:04:22 -0800113
Christian König61a98b12018-12-11 18:34:41 +0800114static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
115 struct syncobj_wait_entry *wait)
Eric Anholt131280a2018-11-08 08:04:22 -0800116{
Christian König61a98b12018-12-11 18:34:41 +0800117 if (!wait->node.next)
118 return;
119
Eric Anholt131280a2018-11-08 08:04:22 -0800120 spin_lock(&syncobj->lock);
Christian König61a98b12018-12-11 18:34:41 +0800121 list_del_init(&wait->node);
Eric Anholt131280a2018-11-08 08:04:22 -0800122 spin_unlock(&syncobj->lock);
123}
124
Dave Airliee9083422017-04-04 13:26:24 +1000125/**
126 * drm_syncobj_replace_fence - replace fence in a sync object.
Dave Airliee9083422017-04-04 13:26:24 +1000127 * @syncobj: Sync object to replace fence in
128 * @fence: fence to install in sync file.
129 *
Christian König0b258ed2018-11-14 14:24:27 +0100130 * This replaces the fence on a sync object.
Dave Airliee9083422017-04-04 13:26:24 +1000131 */
Chris Wilson00fc2c22017-07-05 21:12:44 +0100132void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
Dave Airliee9083422017-04-04 13:26:24 +1000133 struct dma_fence *fence)
134{
Eric Anholt131280a2018-11-08 08:04:22 -0800135 struct dma_fence *old_fence;
Christian König61a98b12018-12-11 18:34:41 +0800136 struct syncobj_wait_entry *cur, *tmp;
Dave Airliee9083422017-04-04 13:26:24 +1000137
Eric Anholt131280a2018-11-08 08:04:22 -0800138 if (fence)
139 dma_fence_get(fence);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700140
Eric Anholt131280a2018-11-08 08:04:22 -0800141 spin_lock(&syncobj->lock);
142
143 old_fence = rcu_dereference_protected(syncobj->fence,
144 lockdep_is_held(&syncobj->lock));
145 rcu_assign_pointer(syncobj->fence, fence);
146
147 if (fence != old_fence) {
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700148 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
149 list_del_init(&cur->node);
Christian König61a98b12018-12-11 18:34:41 +0800150 syncobj_wait_syncobj_func(syncobj, cur);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700151 }
152 }
Eric Anholt131280a2018-11-08 08:04:22 -0800153
154 spin_unlock(&syncobj->lock);
155
156 dma_fence_put(old_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000157}
158EXPORT_SYMBOL(drm_syncobj_replace_fence);
159
Christian König86bbd892018-11-13 14:14:00 +0100160/**
161 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
162 * @syncobj: sync object to assign the fence on
163 *
164 * Assign a already signaled stub fence to the sync object.
165 */
166static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700167{
Christian König86bbd892018-11-13 14:14:00 +0100168 struct dma_fence *fence = dma_fence_get_stub();
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700169
Christian König0b258ed2018-11-14 14:24:27 +0100170 drm_syncobj_replace_fence(syncobj, fence);
Christian König86bbd892018-11-13 14:14:00 +0100171 dma_fence_put(fence);
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700172}
173
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100174/**
175 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
176 * @file_private: drm file private pointer
177 * @handle: sync object handle to lookup.
Chunming Zhou0a6730e2018-08-30 14:48:29 +0800178 * @point: timeline point
Chunming Zhou871edc92018-10-17 15:03:18 +0800179 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100180 * @fence: out parameter for the fence
181 *
182 * This is just a convenience function that combines drm_syncobj_find() and
Eric Anholt131280a2018-11-08 08:04:22 -0800183 * drm_syncobj_fence_get().
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100184 *
185 * Returns 0 on success or a negative error value on failure. On success @fence
186 * contains a reference to the fence, which must be released by calling
187 * dma_fence_put().
188 */
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700189int drm_syncobj_find_fence(struct drm_file *file_private,
Chunming Zhou649fdce2018-10-15 16:55:47 +0800190 u32 handle, u64 point, u64 flags,
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700191 struct dma_fence **fence)
Dave Airliee9083422017-04-04 13:26:24 +1000192{
193 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
Eric Anholt131280a2018-11-08 08:04:22 -0800194 int ret = 0;
Dave Airliee9083422017-04-04 13:26:24 +1000195
Eric Anholt131280a2018-11-08 08:04:22 -0800196 if (!syncobj)
197 return -ENOENT;
198
199 *fence = drm_syncobj_fence_get(syncobj);
200 if (!*fence) {
201 ret = -EINVAL;
202 }
203 drm_syncobj_put(syncobj);
Dave Airliee9083422017-04-04 13:26:24 +1000204 return ret;
205}
Jason Ekstrandafaf5922017-08-25 10:52:19 -0700206EXPORT_SYMBOL(drm_syncobj_find_fence);
Dave Airliee9083422017-04-04 13:26:24 +1000207
208/**
209 * drm_syncobj_free - free a sync object.
210 * @kref: kref to free.
211 *
212 * Only to be called from kref_put in drm_syncobj_put.
213 */
214void drm_syncobj_free(struct kref *kref)
215{
216 struct drm_syncobj *syncobj = container_of(kref,
217 struct drm_syncobj,
218 refcount);
Christian König0b258ed2018-11-14 14:24:27 +0100219 drm_syncobj_replace_fence(syncobj, NULL);
Dave Airliee9083422017-04-04 13:26:24 +1000220 kfree(syncobj);
221}
222EXPORT_SYMBOL(drm_syncobj_free);
223
Marek Olšák1321fd22017-09-12 22:42:12 +0200224/**
225 * drm_syncobj_create - create a new syncobj
226 * @out_syncobj: returned syncobj
227 * @flags: DRM_SYNCOBJ_* flags
228 * @fence: if non-NULL, the syncobj will represent this fence
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100229 *
230 * This is the first function to create a sync object. After creating, drivers
231 * probably want to make it available to userspace, either through
232 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
233 *
234 * Returns 0 on success or a negative error value on failure.
Marek Olšák1321fd22017-09-12 22:42:12 +0200235 */
236int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
237 struct dma_fence *fence)
Dave Airliee9083422017-04-04 13:26:24 +1000238{
Dave Airliee9083422017-04-04 13:26:24 +1000239 struct drm_syncobj *syncobj;
240
241 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
242 if (!syncobj)
243 return -ENOMEM;
244
245 kref_init(&syncobj->refcount);
Jason Ekstrand9c19fb12017-08-28 07:39:25 -0700246 INIT_LIST_HEAD(&syncobj->cb_list);
Eric Anholt131280a2018-11-08 08:04:22 -0800247 spin_lock_init(&syncobj->lock);
Dave Airliee9083422017-04-04 13:26:24 +1000248
Christian König86bbd892018-11-13 14:14:00 +0100249 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
250 drm_syncobj_assign_null_handle(syncobj);
Jason Ekstrand1fc08212017-08-25 10:52:25 -0700251
Marek Olšák1321fd22017-09-12 22:42:12 +0200252 if (fence)
Christian König0b258ed2018-11-14 14:24:27 +0100253 drm_syncobj_replace_fence(syncobj, fence);
Marek Olšák1321fd22017-09-12 22:42:12 +0200254
255 *out_syncobj = syncobj;
256 return 0;
257}
258EXPORT_SYMBOL(drm_syncobj_create);
259
260/**
261 * drm_syncobj_get_handle - get a handle from a syncobj
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100262 * @file_private: drm file private pointer
263 * @syncobj: Sync object to export
264 * @handle: out parameter with the new handle
265 *
266 * Exports a sync object created with drm_syncobj_create() as a handle on
267 * @file_private to userspace.
268 *
269 * Returns 0 on success or a negative error value on failure.
Marek Olšák1321fd22017-09-12 22:42:12 +0200270 */
271int drm_syncobj_get_handle(struct drm_file *file_private,
272 struct drm_syncobj *syncobj, u32 *handle)
273{
274 int ret;
275
276 /* take a reference to put in the idr */
277 drm_syncobj_get(syncobj);
278
Dave Airliee9083422017-04-04 13:26:24 +1000279 idr_preload(GFP_KERNEL);
280 spin_lock(&file_private->syncobj_table_lock);
281 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
282 spin_unlock(&file_private->syncobj_table_lock);
283
284 idr_preload_end();
285
286 if (ret < 0) {
287 drm_syncobj_put(syncobj);
288 return ret;
289 }
290
291 *handle = ret;
292 return 0;
293}
Marek Olšák1321fd22017-09-12 22:42:12 +0200294EXPORT_SYMBOL(drm_syncobj_get_handle);
295
296static int drm_syncobj_create_as_handle(struct drm_file *file_private,
297 u32 *handle, uint32_t flags)
298{
299 int ret;
300 struct drm_syncobj *syncobj;
301
302 ret = drm_syncobj_create(&syncobj, flags, NULL);
303 if (ret)
304 return ret;
305
306 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
307 drm_syncobj_put(syncobj);
308 return ret;
309}
Dave Airliee9083422017-04-04 13:26:24 +1000310
311static int drm_syncobj_destroy(struct drm_file *file_private,
312 u32 handle)
313{
314 struct drm_syncobj *syncobj;
315
316 spin_lock(&file_private->syncobj_table_lock);
317 syncobj = idr_remove(&file_private->syncobj_idr, handle);
318 spin_unlock(&file_private->syncobj_table_lock);
319
320 if (!syncobj)
321 return -EINVAL;
322
323 drm_syncobj_put(syncobj);
324 return 0;
325}
326
327static int drm_syncobj_file_release(struct inode *inode, struct file *file)
328{
329 struct drm_syncobj *syncobj = file->private_data;
330
331 drm_syncobj_put(syncobj);
332 return 0;
333}
334
335static const struct file_operations drm_syncobj_file_fops = {
336 .release = drm_syncobj_file_release,
337};
338
Daniel Vetter924fe8d2017-12-14 21:30:52 +0100339/**
340 * drm_syncobj_get_fd - get a file descriptor from a syncobj
341 * @syncobj: Sync object to export
342 * @p_fd: out parameter with the new file descriptor
343 *
344 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
345 *
346 * Returns 0 on success or a negative error value on failure.
347 */
Marek Olšák684fd0a2017-09-12 22:42:13 +0200348int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
349{
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000350 struct file *file;
Marek Olšák684fd0a2017-09-12 22:42:13 +0200351 int fd;
352
353 fd = get_unused_fd_flags(O_CLOEXEC);
354 if (fd < 0)
355 return fd;
356
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000357 file = anon_inode_getfile("syncobj_file",
358 &drm_syncobj_file_fops,
359 syncobj, 0);
360 if (IS_ERR(file)) {
361 put_unused_fd(fd);
362 return PTR_ERR(file);
Marek Olšák684fd0a2017-09-12 22:42:13 +0200363 }
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000364
365 drm_syncobj_get(syncobj);
366 fd_install(fd, file);
367
Marek Olšák684fd0a2017-09-12 22:42:13 +0200368 *p_fd = fd;
369 return 0;
370}
371EXPORT_SYMBOL(drm_syncobj_get_fd);
372
Dave Airliee9083422017-04-04 13:26:24 +1000373static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
374 u32 handle, int *p_fd)
375{
376 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
377 int ret;
Dave Airliee9083422017-04-04 13:26:24 +1000378
379 if (!syncobj)
380 return -EINVAL;
381
Marek Olšák684fd0a2017-09-12 22:42:13 +0200382 ret = drm_syncobj_get_fd(syncobj, p_fd);
Dave Airliee9083422017-04-04 13:26:24 +1000383 drm_syncobj_put(syncobj);
384 return ret;
385}
386
Dave Airliee9083422017-04-04 13:26:24 +1000387static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
388 int fd, u32 *handle)
389{
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000390 struct drm_syncobj *syncobj;
391 struct file *file;
Dave Airliee9083422017-04-04 13:26:24 +1000392 int ret;
393
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000394 file = fget(fd);
395 if (!file)
Dave Airliee9083422017-04-04 13:26:24 +1000396 return -EINVAL;
397
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000398 if (file->f_op != &drm_syncobj_file_fops) {
399 fput(file);
400 return -EINVAL;
401 }
402
Dave Airliee9083422017-04-04 13:26:24 +1000403 /* take a reference to put in the idr */
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000404 syncobj = file->private_data;
Dave Airliee9083422017-04-04 13:26:24 +1000405 drm_syncobj_get(syncobj);
406
407 idr_preload(GFP_KERNEL);
408 spin_lock(&file_private->syncobj_table_lock);
409 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
410 spin_unlock(&file_private->syncobj_table_lock);
411 idr_preload_end();
412
Chris Wilsone7cdf5c2017-12-19 12:07:00 +0000413 if (ret > 0) {
414 *handle = ret;
415 ret = 0;
416 } else
417 drm_syncobj_put(syncobj);
418
419 fput(file);
420 return ret;
Dave Airliee9083422017-04-04 13:26:24 +1000421}
422
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300423static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
424 int fd, int handle)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100425{
426 struct dma_fence *fence = sync_file_get_fence(fd);
427 struct drm_syncobj *syncobj;
428
429 if (!fence)
430 return -EINVAL;
431
432 syncobj = drm_syncobj_find(file_private, handle);
433 if (!syncobj) {
434 dma_fence_put(fence);
435 return -ENOENT;
436 }
437
Christian König0b258ed2018-11-14 14:24:27 +0100438 drm_syncobj_replace_fence(syncobj, fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100439 dma_fence_put(fence);
440 drm_syncobj_put(syncobj);
441 return 0;
442}
443
Ville Syrjäläa32c94a2017-09-01 19:53:25 +0300444static int drm_syncobj_export_sync_file(struct drm_file *file_private,
445 int handle, int *p_fd)
Dave Airlie3ee45a32017-04-26 04:09:02 +0100446{
447 int ret;
448 struct dma_fence *fence;
449 struct sync_file *sync_file;
450 int fd = get_unused_fd_flags(O_CLOEXEC);
451
452 if (fd < 0)
453 return fd;
454
Chunming Zhou649fdce2018-10-15 16:55:47 +0800455 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
Dave Airlie3ee45a32017-04-26 04:09:02 +0100456 if (ret)
457 goto err_put_fd;
458
459 sync_file = sync_file_create(fence);
460
461 dma_fence_put(fence);
462
463 if (!sync_file) {
464 ret = -EINVAL;
465 goto err_put_fd;
466 }
467
468 fd_install(fd, sync_file->file);
469
470 *p_fd = fd;
471 return 0;
472err_put_fd:
473 put_unused_fd(fd);
474 return ret;
475}
Dave Airliee9083422017-04-04 13:26:24 +1000476/**
477 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
Dave Airliee9083422017-04-04 13:26:24 +1000478 * @file_private: drm file-private structure to set up
479 *
480 * Called at device open time, sets up the structure for handling refcounting
481 * of sync objects.
482 */
483void
484drm_syncobj_open(struct drm_file *file_private)
485{
Chris Wilsone86584c2018-02-12 14:55:33 +0000486 idr_init_base(&file_private->syncobj_idr, 1);
Dave Airliee9083422017-04-04 13:26:24 +1000487 spin_lock_init(&file_private->syncobj_table_lock);
488}
489
490static int
491drm_syncobj_release_handle(int id, void *ptr, void *data)
492{
493 struct drm_syncobj *syncobj = ptr;
494
495 drm_syncobj_put(syncobj);
496 return 0;
497}
498
499/**
500 * drm_syncobj_release - release file-private sync object resources
Dave Airliee9083422017-04-04 13:26:24 +1000501 * @file_private: drm file-private structure to clean up
502 *
503 * Called at close time when the filp is going away.
504 *
505 * Releases any remaining references on objects by this filp.
506 */
507void
508drm_syncobj_release(struct drm_file *file_private)
509{
510 idr_for_each(&file_private->syncobj_idr,
511 &drm_syncobj_release_handle, file_private);
512 idr_destroy(&file_private->syncobj_idr);
513}
514
515int
516drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *file_private)
518{
519 struct drm_syncobj_create *args = data;
520
521 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100522 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000523
524 /* no valid flags yet */
Eric Anholt131280a2018-11-08 08:04:22 -0800525 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
Dave Airliee9083422017-04-04 13:26:24 +1000526 return -EINVAL;
527
Marek Olšák1321fd22017-09-12 22:42:12 +0200528 return drm_syncobj_create_as_handle(file_private,
529 &args->handle, args->flags);
Dave Airliee9083422017-04-04 13:26:24 +1000530}
531
532int
533drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
534 struct drm_file *file_private)
535{
536 struct drm_syncobj_destroy *args = data;
537
538 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100539 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000540
541 /* make sure padding is empty */
542 if (args->pad)
543 return -EINVAL;
544 return drm_syncobj_destroy(file_private, args->handle);
545}
546
547int
548drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
549 struct drm_file *file_private)
550{
551 struct drm_syncobj_handle *args = data;
552
553 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100554 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000555
Dave Airlie3ee45a32017-04-26 04:09:02 +0100556 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000557 return -EINVAL;
558
Dave Airlie3ee45a32017-04-26 04:09:02 +0100559 if (args->flags != 0 &&
560 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
561 return -EINVAL;
562
563 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
564 return drm_syncobj_export_sync_file(file_private, args->handle,
565 &args->fd);
566
Dave Airliee9083422017-04-04 13:26:24 +1000567 return drm_syncobj_handle_to_fd(file_private, args->handle,
568 &args->fd);
569}
570
571int
572drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
573 struct drm_file *file_private)
574{
575 struct drm_syncobj_handle *args = data;
576
577 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100578 return -EOPNOTSUPP;
Dave Airliee9083422017-04-04 13:26:24 +1000579
Dave Airlie3ee45a32017-04-26 04:09:02 +0100580 if (args->pad)
Dave Airliee9083422017-04-04 13:26:24 +1000581 return -EINVAL;
582
Dave Airlie3ee45a32017-04-26 04:09:02 +0100583 if (args->flags != 0 &&
584 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
585 return -EINVAL;
586
587 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
588 return drm_syncobj_import_sync_file_fence(file_private,
589 args->fd,
590 args->handle);
591
Dave Airliee9083422017-04-04 13:26:24 +1000592 return drm_syncobj_fd_to_handle(file_private, args->fd,
593 &args->handle);
594}
Dave Airlie5e60a102017-08-25 10:52:22 -0700595
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700596static void syncobj_wait_fence_func(struct dma_fence *fence,
597 struct dma_fence_cb *cb)
598{
599 struct syncobj_wait_entry *wait =
600 container_of(cb, struct syncobj_wait_entry, fence_cb);
601
602 wake_up_process(wait->task);
603}
604
605static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
Christian König61a98b12018-12-11 18:34:41 +0800606 struct syncobj_wait_entry *wait)
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700607{
Eric Anholt131280a2018-11-08 08:04:22 -0800608 /* This happens inside the syncobj lock */
609 wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
610 lockdep_is_held(&syncobj->lock)));
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700611 wake_up_process(wait->task);
612}
613
614static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
615 uint32_t count,
616 uint32_t flags,
617 signed long timeout,
618 uint32_t *idx)
619{
620 struct syncobj_wait_entry *entries;
621 struct dma_fence *fence;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700622 uint32_t signaled_count, i;
623
624 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
625 if (!entries)
626 return -ENOMEM;
627
628 /* Walk the list of sync objects and initialize entries. We do
629 * this up-front so that we can properly return -EINVAL if there is
630 * a syncobj with a missing fence and then never have the chance of
631 * returning -EINVAL again.
632 */
633 signaled_count = 0;
634 for (i = 0; i < count; ++i) {
635 entries[i].task = current;
Eric Anholt131280a2018-11-08 08:04:22 -0800636 entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700637 if (!entries[i].fence) {
638 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
639 continue;
640 } else {
Chris Wilson12fec622018-09-20 21:05:30 +0100641 timeout = -EINVAL;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700642 goto cleanup_entries;
643 }
644 }
645
646 if (dma_fence_is_signaled(entries[i].fence)) {
647 if (signaled_count == 0 && idx)
648 *idx = i;
649 signaled_count++;
650 }
651 }
652
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700653 if (signaled_count == count ||
654 (signaled_count > 0 &&
655 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
656 goto cleanup_entries;
657
658 /* There's a very annoying laxness in the dma_fence API here, in
659 * that backends are not required to automatically report when a
660 * fence is signaled prior to fence->ops->enable_signaling() being
661 * called. So here if we fail to match signaled_count, we need to
662 * fallthough and try a 0 timeout wait!
663 */
664
665 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
Christian König61a98b12018-12-11 18:34:41 +0800666 for (i = 0; i < count; ++i)
667 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700668 }
669
670 do {
671 set_current_state(TASK_INTERRUPTIBLE);
672
673 signaled_count = 0;
674 for (i = 0; i < count; ++i) {
675 fence = entries[i].fence;
676 if (!fence)
677 continue;
678
679 if (dma_fence_is_signaled(fence) ||
680 (!entries[i].fence_cb.func &&
681 dma_fence_add_callback(fence,
682 &entries[i].fence_cb,
683 syncobj_wait_fence_func))) {
684 /* The fence has been signaled */
685 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
686 signaled_count++;
687 } else {
688 if (idx)
689 *idx = i;
690 goto done_waiting;
691 }
692 }
693 }
694
695 if (signaled_count == count)
696 goto done_waiting;
697
698 if (timeout == 0) {
Chris Wilson12fec622018-09-20 21:05:30 +0100699 timeout = -ETIME;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700700 goto done_waiting;
701 }
702
Chris Wilson12fec622018-09-20 21:05:30 +0100703 if (signal_pending(current)) {
704 timeout = -ERESTARTSYS;
705 goto done_waiting;
706 }
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700707
Chris Wilson12fec622018-09-20 21:05:30 +0100708 timeout = schedule_timeout(timeout);
709 } while (1);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700710
711done_waiting:
712 __set_current_state(TASK_RUNNING);
713
714cleanup_entries:
715 for (i = 0; i < count; ++i) {
Christian König61a98b12018-12-11 18:34:41 +0800716 drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700717 if (entries[i].fence_cb.func)
718 dma_fence_remove_callback(entries[i].fence,
719 &entries[i].fence_cb);
720 dma_fence_put(entries[i].fence);
721 }
722 kfree(entries);
723
Chris Wilson12fec622018-09-20 21:05:30 +0100724 return timeout;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700725}
726
Dave Airlie5e60a102017-08-25 10:52:22 -0700727/**
728 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
729 *
730 * @timeout_nsec: timeout nsec component in ns, 0 for poll
731 *
732 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
733 */
734static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
735{
736 ktime_t abs_timeout, now;
737 u64 timeout_ns, timeout_jiffies64;
738
739 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
740 if (timeout_nsec == 0)
741 return 0;
742
743 abs_timeout = ns_to_ktime(timeout_nsec);
744 now = ktime_get();
745
746 if (!ktime_after(abs_timeout, now))
747 return 0;
748
749 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
750
751 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
752 /* clamp timeout to avoid infinite timeout */
753 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
754 return MAX_SCHEDULE_TIMEOUT - 1;
755
756 return timeout_jiffies64 + 1;
757}
758
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700759static int drm_syncobj_array_wait(struct drm_device *dev,
760 struct drm_file *file_private,
761 struct drm_syncobj_wait *wait,
762 struct drm_syncobj **syncobjs)
Dave Airlie5e60a102017-08-25 10:52:22 -0700763{
764 signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
Dave Airlie5e60a102017-08-25 10:52:22 -0700765 uint32_t first = ~0;
766
Chris Wilson12fec622018-09-20 21:05:30 +0100767 timeout = drm_syncobj_array_wait_timeout(syncobjs,
768 wait->count_handles,
769 wait->flags,
770 timeout, &first);
771 if (timeout < 0)
772 return timeout;
Dave Airlie5e60a102017-08-25 10:52:22 -0700773
774 wait->first_signaled = first;
Dave Airlie5e60a102017-08-25 10:52:22 -0700775 return 0;
776}
777
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700778static int drm_syncobj_array_find(struct drm_file *file_private,
Ville Syrjälä9e554462017-09-01 19:53:26 +0300779 void __user *user_handles,
780 uint32_t count_handles,
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700781 struct drm_syncobj ***syncobjs_out)
782{
783 uint32_t i, *handles;
784 struct drm_syncobj **syncobjs;
785 int ret;
786
787 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
788 if (handles == NULL)
789 return -ENOMEM;
790
791 if (copy_from_user(handles, user_handles,
792 sizeof(uint32_t) * count_handles)) {
793 ret = -EFAULT;
794 goto err_free_handles;
795 }
796
797 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
798 if (syncobjs == NULL) {
799 ret = -ENOMEM;
800 goto err_free_handles;
801 }
802
803 for (i = 0; i < count_handles; i++) {
804 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
805 if (!syncobjs[i]) {
806 ret = -ENOENT;
807 goto err_put_syncobjs;
808 }
809 }
810
811 kfree(handles);
812 *syncobjs_out = syncobjs;
813 return 0;
814
815err_put_syncobjs:
816 while (i-- > 0)
817 drm_syncobj_put(syncobjs[i]);
818 kfree(syncobjs);
819err_free_handles:
820 kfree(handles);
821
822 return ret;
823}
824
825static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
826 uint32_t count)
827{
828 uint32_t i;
829 for (i = 0; i < count; i++)
830 drm_syncobj_put(syncobjs[i]);
831 kfree(syncobjs);
832}
833
Dave Airlie5e60a102017-08-25 10:52:22 -0700834int
835drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
836 struct drm_file *file_private)
837{
838 struct drm_syncobj_wait *args = data;
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700839 struct drm_syncobj **syncobjs;
Dave Airlie5e60a102017-08-25 10:52:22 -0700840 int ret = 0;
Dave Airlie5e60a102017-08-25 10:52:22 -0700841
842 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100843 return -EOPNOTSUPP;
Dave Airlie5e60a102017-08-25 10:52:22 -0700844
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700845 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
846 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
Dave Airlie5e60a102017-08-25 10:52:22 -0700847 return -EINVAL;
848
849 if (args->count_handles == 0)
850 return -EINVAL;
851
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700852 ret = drm_syncobj_array_find(file_private,
853 u64_to_user_ptr(args->handles),
854 args->count_handles,
855 &syncobjs);
856 if (ret < 0)
857 return ret;
Dave Airlie5e60a102017-08-25 10:52:22 -0700858
Jason Ekstrande7aca5032017-08-25 10:52:24 -0700859 ret = drm_syncobj_array_wait(dev, file_private,
860 args, syncobjs);
Dave Airlie5e60a102017-08-25 10:52:22 -0700861
Jason Ekstrand3e6fb722017-08-25 10:52:26 -0700862 drm_syncobj_array_free(syncobjs, args->count_handles);
Dave Airlie5e60a102017-08-25 10:52:22 -0700863
864 return ret;
865}
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700866
867int
868drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
869 struct drm_file *file_private)
870{
871 struct drm_syncobj_array *args = data;
872 struct drm_syncobj **syncobjs;
873 uint32_t i;
874 int ret;
875
876 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100877 return -EOPNOTSUPP;
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700878
879 if (args->pad != 0)
880 return -EINVAL;
881
882 if (args->count_handles == 0)
883 return -EINVAL;
884
885 ret = drm_syncobj_array_find(file_private,
886 u64_to_user_ptr(args->handles),
887 args->count_handles,
888 &syncobjs);
889 if (ret < 0)
890 return ret;
891
Eric Anholt131280a2018-11-08 08:04:22 -0800892 for (i = 0; i < args->count_handles; i++)
Christian König0b258ed2018-11-14 14:24:27 +0100893 drm_syncobj_replace_fence(syncobjs[i], NULL);
Eric Anholt131280a2018-11-08 08:04:22 -0800894
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700895 drm_syncobj_array_free(syncobjs, args->count_handles);
896
Eric Anholt131280a2018-11-08 08:04:22 -0800897 return 0;
Jason Ekstrandaa4035d2017-08-28 14:10:27 -0700898}
Jason Ekstrandffa94432017-08-28 14:10:28 -0700899
900int
901drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
902 struct drm_file *file_private)
903{
904 struct drm_syncobj_array *args = data;
905 struct drm_syncobj **syncobjs;
906 uint32_t i;
907 int ret;
908
909 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
Chris Wilson69fdf422018-09-13 20:20:50 +0100910 return -EOPNOTSUPP;
Jason Ekstrandffa94432017-08-28 14:10:28 -0700911
912 if (args->pad != 0)
913 return -EINVAL;
914
915 if (args->count_handles == 0)
916 return -EINVAL;
917
918 ret = drm_syncobj_array_find(file_private,
919 u64_to_user_ptr(args->handles),
920 args->count_handles,
921 &syncobjs);
922 if (ret < 0)
923 return ret;
924
Christian König86bbd892018-11-13 14:14:00 +0100925 for (i = 0; i < args->count_handles; i++)
926 drm_syncobj_assign_null_handle(syncobjs[i]);
Jason Ekstrandffa94432017-08-28 14:10:28 -0700927
928 drm_syncobj_array_free(syncobjs, args->count_handles);
929
930 return ret;
931}