blob: 3eddfa0ad206b1be31f215b3c16ed72fbab86e6d [file] [log] [blame]
Maarten Lankhorste9417592014-07-01 12:57:14 +02001/*
2 * Fence mechanism for dma-buf and to allow for asynchronous dma access
3 *
4 * Copyright (C) 2012 Canonical Ltd
5 * Copyright (C) 2012 Texas Instruments
6 *
7 * Authors:
8 * Rob Clark <robdclark@gmail.com>
9 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published by
13 * the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 */
20
21#include <linux/slab.h>
22#include <linux/export.h>
23#include <linux/atomic.h>
Chris Wilsonf54d1862016-10-25 13:00:45 +010024#include <linux/dma-fence.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010025#include <linux/sched/signal.h>
Maarten Lankhorste9417592014-07-01 12:57:14 +020026
27#define CREATE_TRACE_POINTS
Chris Wilsonf54d1862016-10-25 13:00:45 +010028#include <trace/events/dma_fence.h>
Maarten Lankhorste9417592014-07-01 12:57:14 +020029
Chris Wilsonf54d1862016-10-25 13:00:45 +010030EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
Chris Wilson8c96c672017-01-24 11:57:58 +000031EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
Chris Wilsonc36beba2019-05-08 12:24:52 +010032EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
Maarten Lankhorste9417592014-07-01 12:57:14 +020033
Christian König078dec332018-12-03 13:36:14 +010034static DEFINE_SPINLOCK(dma_fence_stub_lock);
35static struct dma_fence dma_fence_stub;
36
Thierry Redinge9f3b792014-08-08 12:42:32 +020037/*
Maarten Lankhorste9417592014-07-01 12:57:14 +020038 * fence context counter: each execution context should have its own
39 * fence context, this allows checking if fences belong to the same
40 * context or not. One device can have multiple separate contexts,
41 * and they're used if some engine can run independently of another.
42 */
Christian König078dec332018-12-03 13:36:14 +010043static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
Maarten Lankhorste9417592014-07-01 12:57:14 +020044
45/**
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +020046 * DOC: DMA fences overview
Maarten Lankhorste9417592014-07-01 12:57:14 +020047 *
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +020048 * DMA fences, represented by &struct dma_fence, are the kernel internal
49 * synchronization primitive for DMA operations like GPU rendering, video
50 * encoding/decoding, or displaying buffers on a screen.
51 *
52 * A fence is initialized using dma_fence_init() and completed using
53 * dma_fence_signal(). Fences are associated with a context, allocated through
54 * dma_fence_context_alloc(), and all fences on the same context are
55 * fully ordered.
56 *
57 * Since the purposes of fences is to facilitate cross-device and
58 * cross-application synchronization, there's multiple ways to use one:
59 *
60 * - Individual fences can be exposed as a &sync_file, accessed as a file
61 * descriptor from userspace, created by calling sync_file_create(). This is
62 * called explicit fencing, since userspace passes around explicit
63 * synchronization points.
64 *
65 * - Some subsystems also have their own explicit fencing primitives, like
66 * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
67 * fence to be updated.
68 *
69 * - Then there's also implicit fencing, where the synchronization points are
70 * implicitly passed around as part of shared &dma_buf instances. Such
71 * implicit fences are stored in &struct reservation_object through the
72 * &dma_buf.resv pointer.
73 */
74
Christian König078dec332018-12-03 13:36:14 +010075static const char *dma_fence_stub_get_name(struct dma_fence *fence)
76{
77 return "stub";
78}
79
80static const struct dma_fence_ops dma_fence_stub_ops = {
81 .get_driver_name = dma_fence_stub_get_name,
82 .get_timeline_name = dma_fence_stub_get_name,
83};
84
85/**
86 * dma_fence_get_stub - return a signaled fence
87 *
88 * Return a stub fence which is already signaled.
89 */
90struct dma_fence *dma_fence_get_stub(void)
91{
92 spin_lock(&dma_fence_stub_lock);
93 if (!dma_fence_stub.ops) {
94 dma_fence_init(&dma_fence_stub,
95 &dma_fence_stub_ops,
96 &dma_fence_stub_lock,
97 0, 0);
98 dma_fence_signal_locked(&dma_fence_stub);
99 }
100 spin_unlock(&dma_fence_stub_lock);
101
102 return dma_fence_get(&dma_fence_stub);
103}
104EXPORT_SYMBOL(dma_fence_get_stub);
105
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200106/**
107 * dma_fence_context_alloc - allocate an array of fence contexts
108 * @num: amount of contexts to allocate
109 *
110 * This function will return the first index of the number of fence contexts
111 * allocated. The fence context is used for setting &dma_fence.context to a
112 * unique number by passing the context to dma_fence_init().
Maarten Lankhorste9417592014-07-01 12:57:14 +0200113 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100114u64 dma_fence_context_alloc(unsigned num)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200115{
Daniel Vetter6ce31262017-07-20 14:51:07 +0200116 WARN_ON(!num);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100117 return atomic64_add_return(num, &dma_fence_context_counter) - num;
Maarten Lankhorste9417592014-07-01 12:57:14 +0200118}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100119EXPORT_SYMBOL(dma_fence_context_alloc);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200120
121/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100122 * dma_fence_signal_locked - signal completion of a fence
Maarten Lankhorste9417592014-07-01 12:57:14 +0200123 * @fence: the fence to signal
124 *
125 * Signal completion for software callbacks on a fence, this will unblock
Chris Wilsonf54d1862016-10-25 13:00:45 +0100126 * dma_fence_wait() calls and run all the callbacks added with
127 * dma_fence_add_callback(). Can be called multiple times, but since a fence
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200128 * can only go from the unsignaled to the signaled state and not back, it will
129 * only be effective the first time.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200130 *
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200131 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
132 * held.
133 *
134 * Returns 0 on success and a negative error value when @fence has been
135 * signalled already.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200136 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100137int dma_fence_signal_locked(struct dma_fence *fence)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200138{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100139 struct dma_fence_cb *cur, *tmp;
Maarten Lankhorste9417592014-07-01 12:57:14 +0200140 int ret = 0;
141
Rob Clark78010cd2016-10-24 15:57:10 -0400142 lockdep_assert_held(fence->lock);
143
Maarten Lankhorste9417592014-07-01 12:57:14 +0200144 if (WARN_ON(!fence))
145 return -EINVAL;
146
Chris Wilsonf54d1862016-10-25 13:00:45 +0100147 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
Maarten Lankhorste9417592014-07-01 12:57:14 +0200148 ret = -EINVAL;
149
150 /*
Chris Wilsonf54d1862016-10-25 13:00:45 +0100151 * we might have raced with the unlocked dma_fence_signal,
Maarten Lankhorste9417592014-07-01 12:57:14 +0200152 * still run through all callbacks
153 */
Chris Wilson76250f22017-02-14 12:40:01 +0000154 } else {
155 fence->timestamp = ktime_get();
156 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100157 trace_dma_fence_signaled(fence);
Chris Wilson76250f22017-02-14 12:40:01 +0000158 }
Maarten Lankhorste9417592014-07-01 12:57:14 +0200159
160 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
161 list_del_init(&cur->node);
162 cur->func(fence, cur);
163 }
164 return ret;
165}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100166EXPORT_SYMBOL(dma_fence_signal_locked);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200167
168/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100169 * dma_fence_signal - signal completion of a fence
Maarten Lankhorste9417592014-07-01 12:57:14 +0200170 * @fence: the fence to signal
171 *
172 * Signal completion for software callbacks on a fence, this will unblock
Chris Wilsonf54d1862016-10-25 13:00:45 +0100173 * dma_fence_wait() calls and run all the callbacks added with
174 * dma_fence_add_callback(). Can be called multiple times, but since a fence
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200175 * can only go from the unsignaled to the signaled state and not back, it will
176 * only be effective the first time.
177 *
178 * Returns 0 on success and a negative error value when @fence has been
179 * signalled already.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200180 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100181int dma_fence_signal(struct dma_fence *fence)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200182{
183 unsigned long flags;
184
185 if (!fence)
186 return -EINVAL;
187
Chris Wilsonf54d1862016-10-25 13:00:45 +0100188 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
Maarten Lankhorste9417592014-07-01 12:57:14 +0200189 return -EINVAL;
190
Chris Wilson76250f22017-02-14 12:40:01 +0000191 fence->timestamp = ktime_get();
192 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100193 trace_dma_fence_signaled(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200194
Chris Wilsonf54d1862016-10-25 13:00:45 +0100195 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
196 struct dma_fence_cb *cur, *tmp;
Maarten Lankhorste9417592014-07-01 12:57:14 +0200197
198 spin_lock_irqsave(fence->lock, flags);
199 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
200 list_del_init(&cur->node);
201 cur->func(fence, cur);
202 }
203 spin_unlock_irqrestore(fence->lock, flags);
204 }
205 return 0;
206}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100207EXPORT_SYMBOL(dma_fence_signal);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200208
209/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100210 * dma_fence_wait_timeout - sleep until the fence gets signaled
Maarten Lankhorste9417592014-07-01 12:57:14 +0200211 * or until timeout elapses
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200212 * @fence: the fence to wait on
213 * @intr: if true, do an interruptible wait
214 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
Maarten Lankhorste9417592014-07-01 12:57:14 +0200215 *
216 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
217 * remaining timeout in jiffies on success. Other error values may be
218 * returned on custom implementations.
219 *
220 * Performs a synchronous wait on this fence. It is assumed the caller
221 * directly or indirectly (buf-mgr between reservation and committing)
222 * holds a reference to the fence, otherwise the fence might be
223 * freed before return, resulting in undefined behavior.
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200224 *
225 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
Maarten Lankhorste9417592014-07-01 12:57:14 +0200226 */
227signed long
Chris Wilsonf54d1862016-10-25 13:00:45 +0100228dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200229{
230 signed long ret;
231
232 if (WARN_ON(timeout < 0))
233 return -EINVAL;
234
Chris Wilsonf54d1862016-10-25 13:00:45 +0100235 trace_dma_fence_wait_start(fence);
Daniel Vetter418cc6c2018-05-03 16:25:52 +0200236 if (fence->ops->wait)
237 ret = fence->ops->wait(fence, intr, timeout);
238 else
239 ret = dma_fence_default_wait(fence, intr, timeout);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100240 trace_dma_fence_wait_end(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200241 return ret;
242}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100243EXPORT_SYMBOL(dma_fence_wait_timeout);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200244
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200245/**
246 * dma_fence_release - default relese function for fences
247 * @kref: &dma_fence.recfount
248 *
249 * This is the default release functions for &dma_fence. Drivers shouldn't call
250 * this directly, but instead call dma_fence_put().
251 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100252void dma_fence_release(struct kref *kref)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200253{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100254 struct dma_fence *fence =
255 container_of(kref, struct dma_fence, refcount);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200256
Chris Wilsonf54d1862016-10-25 13:00:45 +0100257 trace_dma_fence_destroy(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200258
Chris Wilson427231b2019-06-09 12:00:02 +0100259 if (WARN(!list_empty(&fence->cb_list),
260 "Fence %s:%s:%llx:%llx released with pending signals!\n",
261 fence->ops->get_driver_name(fence),
262 fence->ops->get_timeline_name(fence),
263 fence->context, fence->seqno)) {
264 unsigned long flags;
265
266 /*
267 * Failed to signal before release, likely a refcounting issue.
268 *
269 * This should never happen, but if it does make sure that we
270 * don't leave chains dangling. We set the error flag first
271 * so that the callbacks know this signal is due to an error.
272 */
273 spin_lock_irqsave(fence->lock, flags);
274 fence->error = -EDEADLK;
275 dma_fence_signal_locked(fence);
276 spin_unlock_irqrestore(fence->lock, flags);
277 }
Maarten Lankhorste9417592014-07-01 12:57:14 +0200278
279 if (fence->ops->release)
280 fence->ops->release(fence);
281 else
Chris Wilsonf54d1862016-10-25 13:00:45 +0100282 dma_fence_free(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200283}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100284EXPORT_SYMBOL(dma_fence_release);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200285
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200286/**
287 * dma_fence_free - default release function for &dma_fence.
288 * @fence: fence to release
289 *
290 * This is the default implementation for &dma_fence_ops.release. It calls
291 * kfree_rcu() on @fence.
292 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100293void dma_fence_free(struct dma_fence *fence)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200294{
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200295 kfree_rcu(fence, rcu);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200296}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100297EXPORT_SYMBOL(dma_fence_free);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200298
299/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100300 * dma_fence_enable_sw_signaling - enable signaling on fence
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200301 * @fence: the fence to enable
Maarten Lankhorste9417592014-07-01 12:57:14 +0200302 *
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200303 * This will request for sw signaling to be enabled, to make the fence
304 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
305 * internally.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200306 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100307void dma_fence_enable_sw_signaling(struct dma_fence *fence)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200308{
309 unsigned long flags;
310
Chris Wilsonf54d1862016-10-25 13:00:45 +0100311 if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
312 &fence->flags) &&
Daniel Vetterc7013172018-05-04 16:10:34 +0200313 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
314 fence->ops->enable_signaling) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100315 trace_dma_fence_enable_signal(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200316
317 spin_lock_irqsave(fence->lock, flags);
318
319 if (!fence->ops->enable_signaling(fence))
Chris Wilsonf54d1862016-10-25 13:00:45 +0100320 dma_fence_signal_locked(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200321
322 spin_unlock_irqrestore(fence->lock, flags);
323 }
324}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100325EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200326
327/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100328 * dma_fence_add_callback - add a callback to be called when the fence
Maarten Lankhorste9417592014-07-01 12:57:14 +0200329 * is signaled
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200330 * @fence: the fence to wait on
331 * @cb: the callback to register
332 * @func: the function to call
Maarten Lankhorste9417592014-07-01 12:57:14 +0200333 *
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200334 * @cb will be initialized by dma_fence_add_callback(), no initialization
Maarten Lankhorste9417592014-07-01 12:57:14 +0200335 * by the caller is required. Any number of callbacks can be registered
336 * to a fence, but a callback can only be registered to one fence at a time.
337 *
338 * Note that the callback can be called from an atomic context. If
339 * fence is already signaled, this function will return -ENOENT (and
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200340 * *not* call the callback).
Maarten Lankhorste9417592014-07-01 12:57:14 +0200341 *
342 * Add a software callback to the fence. Same restrictions apply to
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200343 * refcount as it does to dma_fence_wait(), however the caller doesn't need to
344 * keep a refcount to fence afterward dma_fence_add_callback() has returned:
345 * when software access is enabled, the creator of the fence is required to keep
346 * the fence alive until after it signals with dma_fence_signal(). The callback
347 * itself can be called from irq context.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200348 *
Gustavo Padovanf642de12017-02-15 15:57:25 -0200349 * Returns 0 in case of success, -ENOENT if the fence is already signaled
350 * and -EINVAL in case of error.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200351 */
Chris Wilsonf54d1862016-10-25 13:00:45 +0100352int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
353 dma_fence_func_t func)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200354{
355 unsigned long flags;
356 int ret = 0;
357 bool was_set;
358
359 if (WARN_ON(!fence || !func))
360 return -EINVAL;
361
Chris Wilsonf54d1862016-10-25 13:00:45 +0100362 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
Maarten Lankhorste9417592014-07-01 12:57:14 +0200363 INIT_LIST_HEAD(&cb->node);
364 return -ENOENT;
365 }
366
367 spin_lock_irqsave(fence->lock, flags);
368
Chris Wilsonf54d1862016-10-25 13:00:45 +0100369 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
370 &fence->flags);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200371
Chris Wilsonf54d1862016-10-25 13:00:45 +0100372 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
Maarten Lankhorste9417592014-07-01 12:57:14 +0200373 ret = -ENOENT;
Daniel Vetterc7013172018-05-04 16:10:34 +0200374 else if (!was_set && fence->ops->enable_signaling) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100375 trace_dma_fence_enable_signal(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200376
377 if (!fence->ops->enable_signaling(fence)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100378 dma_fence_signal_locked(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200379 ret = -ENOENT;
380 }
381 }
382
383 if (!ret) {
384 cb->func = func;
385 list_add_tail(&cb->node, &fence->cb_list);
386 } else
387 INIT_LIST_HEAD(&cb->node);
388 spin_unlock_irqrestore(fence->lock, flags);
389
390 return ret;
391}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100392EXPORT_SYMBOL(dma_fence_add_callback);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200393
394/**
Chris Wilsond6c99f42017-01-04 14:12:21 +0000395 * dma_fence_get_status - returns the status upon completion
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200396 * @fence: the dma_fence to query
Chris Wilsond6c99f42017-01-04 14:12:21 +0000397 *
398 * This wraps dma_fence_get_status_locked() to return the error status
399 * condition on a signaled fence. See dma_fence_get_status_locked() for more
400 * details.
401 *
402 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
403 * been signaled without an error condition, or a negative error code
404 * if the fence has been completed in err.
405 */
406int dma_fence_get_status(struct dma_fence *fence)
407{
408 unsigned long flags;
409 int status;
410
411 spin_lock_irqsave(fence->lock, flags);
412 status = dma_fence_get_status_locked(fence);
413 spin_unlock_irqrestore(fence->lock, flags);
414
415 return status;
416}
417EXPORT_SYMBOL(dma_fence_get_status);
418
419/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100420 * dma_fence_remove_callback - remove a callback from the signaling list
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200421 * @fence: the fence to wait on
422 * @cb: the callback to remove
Maarten Lankhorste9417592014-07-01 12:57:14 +0200423 *
424 * Remove a previously queued callback from the fence. This function returns
Masanari Iidaf353d712014-10-22 00:00:14 +0900425 * true if the callback is successfully removed, or false if the fence has
Maarten Lankhorste9417592014-07-01 12:57:14 +0200426 * already been signaled.
427 *
428 * *WARNING*:
429 * Cancelling a callback should only be done if you really know what you're
430 * doing, since deadlocks and race conditions could occur all too easily. For
431 * this reason, it should only ever be done on hardware lockup recovery,
432 * with a reference held to the fence.
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200433 *
434 * Behaviour is undefined if @cb has not been added to @fence using
435 * dma_fence_add_callback() beforehand.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200436 */
437bool
Chris Wilsonf54d1862016-10-25 13:00:45 +0100438dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200439{
440 unsigned long flags;
441 bool ret;
442
443 spin_lock_irqsave(fence->lock, flags);
444
445 ret = !list_empty(&cb->node);
446 if (ret)
447 list_del_init(&cb->node);
448
449 spin_unlock_irqrestore(fence->lock, flags);
450
451 return ret;
452}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100453EXPORT_SYMBOL(dma_fence_remove_callback);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200454
455struct default_wait_cb {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100456 struct dma_fence_cb base;
Maarten Lankhorste9417592014-07-01 12:57:14 +0200457 struct task_struct *task;
458};
459
460static void
Chris Wilsonf54d1862016-10-25 13:00:45 +0100461dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200462{
463 struct default_wait_cb *wait =
464 container_of(cb, struct default_wait_cb, base);
465
466 wake_up_state(wait->task, TASK_NORMAL);
467}
468
469/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100470 * dma_fence_default_wait - default sleep until the fence gets signaled
Maarten Lankhorste9417592014-07-01 12:57:14 +0200471 * or until timeout elapses
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200472 * @fence: the fence to wait on
473 * @intr: if true, do an interruptible wait
474 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
Maarten Lankhorste9417592014-07-01 12:57:14 +0200475 *
476 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
Alex Deucherbcc004b2016-11-07 16:16:13 -0500477 * remaining timeout in jiffies on success. If timeout is zero the value one is
478 * returned if the fence is already signaled for consistency with other
479 * functions taking a jiffies timeout.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200480 */
481signed long
Chris Wilsonf54d1862016-10-25 13:00:45 +0100482dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200483{
484 struct default_wait_cb cb;
485 unsigned long flags;
Alex Deucherbcc004b2016-11-07 16:16:13 -0500486 signed long ret = timeout ? timeout : 1;
Maarten Lankhorste9417592014-07-01 12:57:14 +0200487 bool was_set;
488
Chris Wilsonf54d1862016-10-25 13:00:45 +0100489 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
Alex Deucherbcc004b2016-11-07 16:16:13 -0500490 return ret;
Maarten Lankhorste9417592014-07-01 12:57:14 +0200491
492 spin_lock_irqsave(fence->lock, flags);
493
494 if (intr && signal_pending(current)) {
495 ret = -ERESTARTSYS;
496 goto out;
497 }
498
Chris Wilsonf54d1862016-10-25 13:00:45 +0100499 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
500 &fence->flags);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200501
Chris Wilsonf54d1862016-10-25 13:00:45 +0100502 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
Maarten Lankhorste9417592014-07-01 12:57:14 +0200503 goto out;
504
Daniel Vetterc7013172018-05-04 16:10:34 +0200505 if (!was_set && fence->ops->enable_signaling) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100506 trace_dma_fence_enable_signal(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200507
508 if (!fence->ops->enable_signaling(fence)) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100509 dma_fence_signal_locked(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200510 goto out;
511 }
512 }
513
Andres Rodriguez03c0c5f2017-04-26 10:46:20 -0400514 if (!timeout) {
515 ret = 0;
516 goto out;
517 }
518
Chris Wilsonf54d1862016-10-25 13:00:45 +0100519 cb.base.func = dma_fence_default_wait_cb;
Maarten Lankhorste9417592014-07-01 12:57:14 +0200520 cb.task = current;
521 list_add(&cb.base.node, &fence->cb_list);
522
Chris Wilsonf54d1862016-10-25 13:00:45 +0100523 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
Maarten Lankhorste9417592014-07-01 12:57:14 +0200524 if (intr)
525 __set_current_state(TASK_INTERRUPTIBLE);
526 else
527 __set_current_state(TASK_UNINTERRUPTIBLE);
528 spin_unlock_irqrestore(fence->lock, flags);
529
530 ret = schedule_timeout(ret);
531
532 spin_lock_irqsave(fence->lock, flags);
533 if (ret > 0 && intr && signal_pending(current))
534 ret = -ERESTARTSYS;
535 }
536
537 if (!list_empty(&cb.base.node))
538 list_del(&cb.base.node);
539 __set_current_state(TASK_RUNNING);
540
541out:
542 spin_unlock_irqrestore(fence->lock, flags);
543 return ret;
544}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100545EXPORT_SYMBOL(dma_fence_default_wait);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200546
Christian Königa5194352015-10-20 16:34:16 +0200547static bool
monk.liu7392b4b2016-11-04 16:16:09 -0400548dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
549 uint32_t *idx)
Christian Königa5194352015-10-20 16:34:16 +0200550{
551 int i;
552
553 for (i = 0; i < count; ++i) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100554 struct dma_fence *fence = fences[i];
monk.liu7392b4b2016-11-04 16:16:09 -0400555 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
556 if (idx)
557 *idx = i;
Christian Königa5194352015-10-20 16:34:16 +0200558 return true;
monk.liu7392b4b2016-11-04 16:16:09 -0400559 }
Christian Königa5194352015-10-20 16:34:16 +0200560 }
561 return false;
562}
563
564/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100565 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
Christian Königa5194352015-10-20 16:34:16 +0200566 * or until timeout elapses
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200567 * @fences: array of fences to wait on
568 * @count: number of fences to wait on
569 * @intr: if true, do an interruptible wait
570 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
571 * @idx: used to store the first signaled fence index, meaningful only on
572 * positive return
Christian Königa5194352015-10-20 16:34:16 +0200573 *
574 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
575 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
576 * on success.
577 *
578 * Synchronous waits for the first fence in the array to be signaled. The
579 * caller needs to hold a reference to all fences in the array, otherwise a
580 * fence might be freed before return, resulting in undefined behavior.
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200581 *
582 * See also dma_fence_wait() and dma_fence_wait_timeout().
Christian Königa5194352015-10-20 16:34:16 +0200583 */
584signed long
Chris Wilsonf54d1862016-10-25 13:00:45 +0100585dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
monk.liu7392b4b2016-11-04 16:16:09 -0400586 bool intr, signed long timeout, uint32_t *idx)
Christian Königa5194352015-10-20 16:34:16 +0200587{
588 struct default_wait_cb *cb;
589 signed long ret = timeout;
590 unsigned i;
591
592 if (WARN_ON(!fences || !count || timeout < 0))
593 return -EINVAL;
594
595 if (timeout == 0) {
596 for (i = 0; i < count; ++i)
monk.liu7392b4b2016-11-04 16:16:09 -0400597 if (dma_fence_is_signaled(fences[i])) {
598 if (idx)
599 *idx = i;
Christian Königa5194352015-10-20 16:34:16 +0200600 return 1;
monk.liu7392b4b2016-11-04 16:16:09 -0400601 }
Christian Königa5194352015-10-20 16:34:16 +0200602
603 return 0;
604 }
605
606 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
607 if (cb == NULL) {
608 ret = -ENOMEM;
609 goto err_free_cb;
610 }
611
612 for (i = 0; i < count; ++i) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100613 struct dma_fence *fence = fences[i];
Christian Königa5194352015-10-20 16:34:16 +0200614
Christian Königa5194352015-10-20 16:34:16 +0200615 cb[i].task = current;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100616 if (dma_fence_add_callback(fence, &cb[i].base,
617 dma_fence_default_wait_cb)) {
Christian Königa5194352015-10-20 16:34:16 +0200618 /* This fence is already signaled */
monk.liu7392b4b2016-11-04 16:16:09 -0400619 if (idx)
620 *idx = i;
Christian Königa5194352015-10-20 16:34:16 +0200621 goto fence_rm_cb;
622 }
623 }
624
625 while (ret > 0) {
626 if (intr)
627 set_current_state(TASK_INTERRUPTIBLE);
628 else
629 set_current_state(TASK_UNINTERRUPTIBLE);
630
monk.liu7392b4b2016-11-04 16:16:09 -0400631 if (dma_fence_test_signaled_any(fences, count, idx))
Christian Königa5194352015-10-20 16:34:16 +0200632 break;
633
634 ret = schedule_timeout(ret);
635
636 if (ret > 0 && intr && signal_pending(current))
637 ret = -ERESTARTSYS;
638 }
639
640 __set_current_state(TASK_RUNNING);
641
642fence_rm_cb:
643 while (i-- > 0)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100644 dma_fence_remove_callback(fences[i], &cb[i].base);
Christian Königa5194352015-10-20 16:34:16 +0200645
646err_free_cb:
647 kfree(cb);
648
649 return ret;
650}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100651EXPORT_SYMBOL(dma_fence_wait_any_timeout);
Christian Königa5194352015-10-20 16:34:16 +0200652
Maarten Lankhorste9417592014-07-01 12:57:14 +0200653/**
Chris Wilsonf54d1862016-10-25 13:00:45 +0100654 * dma_fence_init - Initialize a custom fence.
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200655 * @fence: the fence to initialize
656 * @ops: the dma_fence_ops for operations on this fence
657 * @lock: the irqsafe spinlock to use for locking this fence
658 * @context: the execution context this fence is run on
659 * @seqno: a linear increasing sequence number for this context
Maarten Lankhorste9417592014-07-01 12:57:14 +0200660 *
661 * Initializes an allocated fence, the caller doesn't have to keep its
662 * refcount after committing with this fence, but it will need to hold a
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200663 * refcount again if &dma_fence_ops.enable_signaling gets called.
Maarten Lankhorste9417592014-07-01 12:57:14 +0200664 *
665 * context and seqno are used for easy comparison between fences, allowing
Daniel Vetter4dd3cdb2018-07-04 11:29:09 +0200666 * to check which fence is later by simply using dma_fence_later().
Maarten Lankhorste9417592014-07-01 12:57:14 +0200667 */
668void
Chris Wilsonf54d1862016-10-25 13:00:45 +0100669dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
Christian Königb312d8c2018-11-14 16:11:06 +0100670 spinlock_t *lock, u64 context, u64 seqno)
Maarten Lankhorste9417592014-07-01 12:57:14 +0200671{
672 BUG_ON(!lock);
Daniel Vetter418cc6c2018-05-03 16:25:52 +0200673 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200674
675 kref_init(&fence->refcount);
676 fence->ops = ops;
677 INIT_LIST_HEAD(&fence->cb_list);
678 fence->lock = lock;
679 fence->context = context;
680 fence->seqno = seqno;
681 fence->flags = 0UL;
Chris Wilsona009e972017-01-04 14:12:22 +0000682 fence->error = 0;
Maarten Lankhorste9417592014-07-01 12:57:14 +0200683
Chris Wilsonf54d1862016-10-25 13:00:45 +0100684 trace_dma_fence_init(fence);
Maarten Lankhorste9417592014-07-01 12:57:14 +0200685}
Chris Wilsonf54d1862016-10-25 13:00:45 +0100686EXPORT_SYMBOL(dma_fence_init);