blob: f5142683c8512cd8a88d5496923685479720864b [file] [log] [blame]
Maarten Lankhorst786d7252013-06-27 13:48:16 +02001/*
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +02002 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
Maarten Lankhorst786d7252013-06-27 13:48:16 +02003 *
4 * Based on bo.c which bears the following copyright notice,
5 * but is dual licensed:
6 *
7 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the
12 * "Software"), to deal in the Software without restriction, including
13 * without limitation the rights to use, copy, modify, merge, publish,
14 * distribute, sub license, and/or sell copies of the Software, and to
15 * permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the
19 * next paragraph) shall be included in all copies or substantial portions
20 * of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 *
30 **************************************************************************/
31/*
32 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 */
34
Christian König52791ee2019-08-11 10:06:32 +020035#include <linux/dma-resv.h>
Maarten Lankhorst786d7252013-06-27 13:48:16 +020036#include <linux/export.h>
37
Rob Clarkdad6c392016-03-31 16:26:51 -040038/**
39 * DOC: Reservation Object Overview
40 *
41 * The reservation object provides a mechanism to manage shared and
42 * exclusive fences associated with a buffer. A reservation object
43 * can have attached one exclusive fence (normally associated with
44 * write operations) or N shared fences (read operations). The RCU
45 * mechanism is used to protect read access to fences from locked
46 * write-side updates.
47 */
48
Thomas Hellstrom08295b32018-06-15 10:17:38 +020049DEFINE_WD_CLASS(reservation_ww_class);
Maarten Lankhorst786d7252013-06-27 13:48:16 +020050EXPORT_SYMBOL(reservation_ww_class);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +020051
Rob Clarkdad6c392016-03-31 16:26:51 -040052/**
Christian König52791ee2019-08-11 10:06:32 +020053 * dma_resv_list_alloc - allocate fence list
Christian König96e95492019-08-06 13:33:12 +020054 * @shared_max: number of fences we need space for
55 *
Christian König52791ee2019-08-11 10:06:32 +020056 * Allocate a new dma_resv_list and make sure to correctly initialize
Christian König96e95492019-08-06 13:33:12 +020057 * shared_max.
58 */
Christian König52791ee2019-08-11 10:06:32 +020059static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
Christian König96e95492019-08-06 13:33:12 +020060{
Christian König52791ee2019-08-11 10:06:32 +020061 struct dma_resv_list *list;
Christian König96e95492019-08-06 13:33:12 +020062
63 list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
64 if (!list)
65 return NULL;
66
67 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
68 sizeof(*list->shared);
69
70 return list;
71}
72
73/**
Christian König52791ee2019-08-11 10:06:32 +020074 * dma_resv_list_free - free fence list
Christian König96e95492019-08-06 13:33:12 +020075 * @list: list to free
76 *
Christian König52791ee2019-08-11 10:06:32 +020077 * Free a dma_resv_list and make sure to drop all references.
Christian König96e95492019-08-06 13:33:12 +020078 */
Christian König52791ee2019-08-11 10:06:32 +020079static void dma_resv_list_free(struct dma_resv_list *list)
Christian König96e95492019-08-06 13:33:12 +020080{
81 unsigned int i;
82
83 if (!list)
84 return;
85
86 for (i = 0; i < list->shared_count; ++i)
87 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
88
89 kfree_rcu(list, rcu);
90}
91
92/**
Christian König52791ee2019-08-11 10:06:32 +020093 * dma_resv_init - initialize a reservation object
Christian König8735f162019-06-26 16:31:46 +020094 * @obj: the reservation object
95 */
Christian König52791ee2019-08-11 10:06:32 +020096void dma_resv_init(struct dma_resv *obj)
Christian König8735f162019-06-26 16:31:46 +020097{
98 ww_mutex_init(&obj->lock, &reservation_ww_class);
Christian König8735f162019-06-26 16:31:46 +020099 RCU_INIT_POINTER(obj->fence, NULL);
100 RCU_INIT_POINTER(obj->fence_excl, NULL);
101}
Christian König52791ee2019-08-11 10:06:32 +0200102EXPORT_SYMBOL(dma_resv_init);
Christian König8735f162019-06-26 16:31:46 +0200103
104/**
Christian König52791ee2019-08-11 10:06:32 +0200105 * dma_resv_fini - destroys a reservation object
Christian König8735f162019-06-26 16:31:46 +0200106 * @obj: the reservation object
107 */
Christian König52791ee2019-08-11 10:06:32 +0200108void dma_resv_fini(struct dma_resv *obj)
Christian König8735f162019-06-26 16:31:46 +0200109{
Christian König52791ee2019-08-11 10:06:32 +0200110 struct dma_resv_list *fobj;
Christian König8735f162019-06-26 16:31:46 +0200111 struct dma_fence *excl;
112
113 /*
114 * This object should be dead and all references must have
115 * been released to it, so no need to be protected with rcu.
116 */
117 excl = rcu_dereference_protected(obj->fence_excl, 1);
118 if (excl)
119 dma_fence_put(excl);
120
121 fobj = rcu_dereference_protected(obj->fence, 1);
Christian König52791ee2019-08-11 10:06:32 +0200122 dma_resv_list_free(fobj);
Christian König8735f162019-06-26 16:31:46 +0200123 ww_mutex_destroy(&obj->lock);
124}
Christian König52791ee2019-08-11 10:06:32 +0200125EXPORT_SYMBOL(dma_resv_fini);
Christian König8735f162019-06-26 16:31:46 +0200126
127/**
Christian König52791ee2019-08-11 10:06:32 +0200128 * dma_resv_reserve_shared - Reserve space to add shared fences to
129 * a dma_resv.
Rob Clarkdad6c392016-03-31 16:26:51 -0400130 * @obj: reservation object
Christian Königca053592018-09-19 16:12:25 +0200131 * @num_fences: number of fences we want to add
Rob Clarkdad6c392016-03-31 16:26:51 -0400132 *
Christian König52791ee2019-08-11 10:06:32 +0200133 * Should be called before dma_resv_add_shared_fence(). Must
Rob Clarkdad6c392016-03-31 16:26:51 -0400134 * be called with obj->lock held.
135 *
136 * RETURNS
137 * Zero for success, or -errno
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200138 */
Christian König52791ee2019-08-11 10:06:32 +0200139int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200140{
Christian König52791ee2019-08-11 10:06:32 +0200141 struct dma_resv_list *old, *new;
Christian König27836b62018-08-08 16:01:22 +0200142 unsigned int i, j, k, max;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200143
Christian König52791ee2019-08-11 10:06:32 +0200144 dma_resv_assert_held(obj);
Lucas Stach547c7132017-06-13 10:26:46 +0200145
Christian König52791ee2019-08-11 10:06:32 +0200146 old = dma_resv_get_list(obj);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200147
148 if (old && old->shared_max) {
Christian Königca053592018-09-19 16:12:25 +0200149 if ((old->shared_count + num_fences) <= old->shared_max)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200150 return 0;
Christian König27836b62018-08-08 16:01:22 +0200151 else
Christian Königca053592018-09-19 16:12:25 +0200152 max = max(old->shared_count + num_fences,
153 old->shared_max * 2);
Christian Königca25fe52017-11-14 15:24:36 +0100154 } else {
Christian König27836b62018-08-08 16:01:22 +0200155 max = 4;
Christian Königca25fe52017-11-14 15:24:36 +0100156 }
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200157
Christian König52791ee2019-08-11 10:06:32 +0200158 new = dma_resv_list_alloc(max);
Christian König27836b62018-08-08 16:01:22 +0200159 if (!new)
160 return -ENOMEM;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200161
162 /*
163 * no need to bump fence refcounts, rcu_read access
164 * requires the use of kref_get_unless_zero, and the
165 * references from the old struct are carried over to
166 * the new.
167 */
Christian König27836b62018-08-08 16:01:22 +0200168 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
169 struct dma_fence *fence;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200170
Christian König27836b62018-08-08 16:01:22 +0200171 fence = rcu_dereference_protected(old->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200172 dma_resv_held(obj));
Christian König27836b62018-08-08 16:01:22 +0200173 if (dma_fence_is_signaled(fence))
174 RCU_INIT_POINTER(new->shared[--k], fence);
Christian König4d9c62e2017-11-14 15:24:35 +0100175 else
Christian König27836b62018-08-08 16:01:22 +0200176 RCU_INIT_POINTER(new->shared[j++], fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200177 }
Christian König27836b62018-08-08 16:01:22 +0200178 new->shared_count = j;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200179
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200180 /*
Chris Wilson30fe7b02019-07-12 09:03:14 +0100181 * We are not changing the effective set of fences here so can
182 * merely update the pointer to the new array; both existing
183 * readers and new readers will see exactly the same set of
184 * active (unsignaled) shared fences. Individual fences and the
185 * old array are protected by RCU and so will not vanish under
186 * the gaze of the rcu_read_lock() readers.
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200187 */
Chris Wilson30fe7b02019-07-12 09:03:14 +0100188 rcu_assign_pointer(obj->fence, new);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200189
Christian König4d9c62e2017-11-14 15:24:35 +0100190 if (!old)
Christian König27836b62018-08-08 16:01:22 +0200191 return 0;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200192
Christian König4d9c62e2017-11-14 15:24:35 +0100193 /* Drop the references to the signaled fences */
Chris Wilson94eb1e12019-07-12 09:03:13 +0100194 for (i = k; i < max; ++i) {
Christian König27836b62018-08-08 16:01:22 +0200195 struct dma_fence *fence;
Christian König4d9c62e2017-11-14 15:24:35 +0100196
Christian König27836b62018-08-08 16:01:22 +0200197 fence = rcu_dereference_protected(new->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200198 dma_resv_held(obj));
Christian König27836b62018-08-08 16:01:22 +0200199 dma_fence_put(fence);
Christian König4d9c62e2017-11-14 15:24:35 +0100200 }
201 kfree_rcu(old, rcu);
Christian König27836b62018-08-08 16:01:22 +0200202
203 return 0;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200204}
Christian König52791ee2019-08-11 10:06:32 +0200205EXPORT_SYMBOL(dma_resv_reserve_shared);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200206
Rob Clarkdad6c392016-03-31 16:26:51 -0400207/**
Christian König52791ee2019-08-11 10:06:32 +0200208 * dma_resv_add_shared_fence - Add a fence to a shared slot
Rob Clarkdad6c392016-03-31 16:26:51 -0400209 * @obj: the reservation object
210 * @fence: the shared fence to add
211 *
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200212 * Add a fence to a shared slot, obj->lock must be held, and
Christian König52791ee2019-08-11 10:06:32 +0200213 * dma_resv_reserve_shared() has been called.
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200214 */
Christian König52791ee2019-08-11 10:06:32 +0200215void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200216{
Christian König52791ee2019-08-11 10:06:32 +0200217 struct dma_resv_list *fobj;
Christian König93505ee2019-08-05 11:14:27 +0200218 struct dma_fence *old;
Chris Wilsona590d0f2018-10-26 09:03:02 +0100219 unsigned int i, count;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200220
Christian König27836b62018-08-08 16:01:22 +0200221 dma_fence_get(fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200222
Christian König52791ee2019-08-11 10:06:32 +0200223 dma_resv_assert_held(obj);
Lucas Stach547c7132017-06-13 10:26:46 +0200224
Christian König52791ee2019-08-11 10:06:32 +0200225 fobj = dma_resv_get_list(obj);
Chris Wilsona590d0f2018-10-26 09:03:02 +0100226 count = fobj->shared_count;
Christian König27836b62018-08-08 16:01:22 +0200227
Chris Wilsona590d0f2018-10-26 09:03:02 +0100228 for (i = 0; i < count; ++i) {
Christian König27836b62018-08-08 16:01:22 +0200229
Christian König93505ee2019-08-05 11:14:27 +0200230 old = rcu_dereference_protected(fobj->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200231 dma_resv_held(obj));
Christian König93505ee2019-08-05 11:14:27 +0200232 if (old->context == fence->context ||
233 dma_fence_is_signaled(old))
Christian König27836b62018-08-08 16:01:22 +0200234 goto replace;
Christian König27836b62018-08-08 16:01:22 +0200235 }
236
237 BUG_ON(fobj->shared_count >= fobj->shared_max);
Christian König93505ee2019-08-05 11:14:27 +0200238 old = NULL;
Chris Wilsona590d0f2018-10-26 09:03:02 +0100239 count++;
Christian König27836b62018-08-08 16:01:22 +0200240
241replace:
Christian König27836b62018-08-08 16:01:22 +0200242 RCU_INIT_POINTER(fobj->shared[i], fence);
Chris Wilsona590d0f2018-10-26 09:03:02 +0100243 /* pointer update must be visible before we extend the shared_count */
244 smp_store_mb(fobj->shared_count, count);
Christian König93505ee2019-08-05 11:14:27 +0200245 dma_fence_put(old);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200246}
Christian König52791ee2019-08-11 10:06:32 +0200247EXPORT_SYMBOL(dma_resv_add_shared_fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200248
Rob Clarkdad6c392016-03-31 16:26:51 -0400249/**
Christian König52791ee2019-08-11 10:06:32 +0200250 * dma_resv_add_excl_fence - Add an exclusive fence.
Rob Clarkdad6c392016-03-31 16:26:51 -0400251 * @obj: the reservation object
252 * @fence: the shared fence to add
253 *
254 * Add a fence to the exclusive slot. The obj->lock must be held.
255 */
Christian König52791ee2019-08-11 10:06:32 +0200256void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200257{
Christian König52791ee2019-08-11 10:06:32 +0200258 struct dma_fence *old_fence = dma_resv_get_excl(obj);
259 struct dma_resv_list *old;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200260 u32 i = 0;
261
Christian König52791ee2019-08-11 10:06:32 +0200262 dma_resv_assert_held(obj);
Lucas Stach547c7132017-06-13 10:26:46 +0200263
Christian König52791ee2019-08-11 10:06:32 +0200264 old = dma_resv_get_list(obj);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200265 if (old)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200266 i = old->shared_count;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200267
268 if (fence)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100269 dma_fence_get(fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200270
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200271 preempt_disable();
Christian König5d344f52019-08-05 14:24:55 +0200272 rcu_assign_pointer(obj->fence_excl, fence);
273 /* pointer update must be visible before we modify the shared_count */
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200274 if (old)
Christian König5d344f52019-08-05 14:24:55 +0200275 smp_store_mb(old->shared_count, 0);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200276 preempt_enable();
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200277
278 /* inplace update, no shared fences */
279 while (i--)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100280 dma_fence_put(rcu_dereference_protected(old->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200281 dma_resv_held(obj)));
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200282
Christian Königf3e31b72017-08-07 17:32:21 -0400283 dma_fence_put(old_fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200284}
Christian König52791ee2019-08-11 10:06:32 +0200285EXPORT_SYMBOL(dma_resv_add_excl_fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200286
Rob Clarkdad6c392016-03-31 16:26:51 -0400287/**
Christian König52791ee2019-08-11 10:06:32 +0200288* dma_resv_copy_fences - Copy all fences from src to dst.
Christian König7faf9522017-08-10 13:01:48 -0400289* @dst: the destination reservation object
290* @src: the source reservation object
291*
Christian König39e16ba2017-09-04 21:02:45 +0200292* Copy all fences from src to dst. dst-lock must be held.
Christian König7faf9522017-08-10 13:01:48 -0400293*/
Christian König52791ee2019-08-11 10:06:32 +0200294int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
Christian König7faf9522017-08-10 13:01:48 -0400295{
Christian König52791ee2019-08-11 10:06:32 +0200296 struct dma_resv_list *src_list, *dst_list;
Christian König7faf9522017-08-10 13:01:48 -0400297 struct dma_fence *old, *new;
Christian König67c97fb2019-08-06 14:19:33 +0200298 unsigned int i, shared_count;
Christian König7faf9522017-08-10 13:01:48 -0400299
Christian König52791ee2019-08-11 10:06:32 +0200300 dma_resv_assert_held(dst);
Lucas Stach547c7132017-06-13 10:26:46 +0200301
Christian König39e16ba2017-09-04 21:02:45 +0200302 rcu_read_lock();
Christian König7faf9522017-08-10 13:01:48 -0400303
Christian König39e16ba2017-09-04 21:02:45 +0200304retry:
Christian König52791ee2019-08-11 10:06:32 +0200305 dma_resv_fences(src, &new, &src_list, &shared_count);
Christian König67c97fb2019-08-06 14:19:33 +0200306 if (shared_count) {
Christian König39e16ba2017-09-04 21:02:45 +0200307 rcu_read_unlock();
308
Christian König52791ee2019-08-11 10:06:32 +0200309 dst_list = dma_resv_list_alloc(shared_count);
Christian König7faf9522017-08-10 13:01:48 -0400310 if (!dst_list)
311 return -ENOMEM;
312
Christian König39e16ba2017-09-04 21:02:45 +0200313 rcu_read_lock();
Christian König52791ee2019-08-11 10:06:32 +0200314 dma_resv_fences(src, &new, &src_list, &shared_count);
Christian König67c97fb2019-08-06 14:19:33 +0200315 if (!src_list || shared_count > dst_list->shared_max) {
Christian König39e16ba2017-09-04 21:02:45 +0200316 kfree(dst_list);
317 goto retry;
318 }
319
320 dst_list->shared_count = 0;
Christian König67c97fb2019-08-06 14:19:33 +0200321 for (i = 0; i < shared_count; ++i) {
Christian König39e16ba2017-09-04 21:02:45 +0200322 struct dma_fence *fence;
323
324 fence = rcu_dereference(src_list->shared[i]);
325 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
326 &fence->flags))
327 continue;
328
329 if (!dma_fence_get_rcu(fence)) {
Christian König52791ee2019-08-11 10:06:32 +0200330 dma_resv_list_free(dst_list);
Christian König39e16ba2017-09-04 21:02:45 +0200331 goto retry;
332 }
333
334 if (dma_fence_is_signaled(fence)) {
335 dma_fence_put(fence);
336 continue;
337 }
338
Ville Syrjäläad46d7b2017-11-02 22:03:36 +0200339 rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
Christian König39e16ba2017-09-04 21:02:45 +0200340 }
Christian König7faf9522017-08-10 13:01:48 -0400341 } else {
342 dst_list = NULL;
343 }
344
Christian König67c97fb2019-08-06 14:19:33 +0200345 if (new && !dma_fence_get_rcu(new)) {
Christian König52791ee2019-08-11 10:06:32 +0200346 dma_resv_list_free(dst_list);
Christian König67c97fb2019-08-06 14:19:33 +0200347 goto retry;
348 }
Christian König39e16ba2017-09-04 21:02:45 +0200349 rcu_read_unlock();
350
Christian König52791ee2019-08-11 10:06:32 +0200351 src_list = dma_resv_get_list(dst);
352 old = dma_resv_get_excl(dst);
Christian König7faf9522017-08-10 13:01:48 -0400353
354 preempt_disable();
Christian König5d344f52019-08-05 14:24:55 +0200355 rcu_assign_pointer(dst->fence_excl, new);
356 rcu_assign_pointer(dst->fence, dst_list);
Christian König7faf9522017-08-10 13:01:48 -0400357 preempt_enable();
358
Christian König52791ee2019-08-11 10:06:32 +0200359 dma_resv_list_free(src_list);
Christian König7faf9522017-08-10 13:01:48 -0400360 dma_fence_put(old);
361
362 return 0;
363}
Christian König52791ee2019-08-11 10:06:32 +0200364EXPORT_SYMBOL(dma_resv_copy_fences);
Christian König7faf9522017-08-10 13:01:48 -0400365
366/**
Christian König52791ee2019-08-11 10:06:32 +0200367 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
Rob Clarkdad6c392016-03-31 16:26:51 -0400368 * fences without update side lock held
369 * @obj: the reservation object
370 * @pfence_excl: the returned exclusive fence (or NULL)
371 * @pshared_count: the number of shared fences returned
372 * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
373 * the required size, and must be freed by caller)
374 *
Christian Königa35f2f32018-01-10 13:53:41 +0100375 * Retrieve all fences from the reservation object. If the pointer for the
376 * exclusive fence is not specified the fence is put into the array of the
377 * shared fences as well. Returns either zero or -ENOMEM.
Rob Clarkdad6c392016-03-31 16:26:51 -0400378 */
Christian König52791ee2019-08-11 10:06:32 +0200379int dma_resv_get_fences_rcu(struct dma_resv *obj,
380 struct dma_fence **pfence_excl,
381 unsigned *pshared_count,
382 struct dma_fence ***pshared)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200383{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100384 struct dma_fence **shared = NULL;
385 struct dma_fence *fence_excl;
Chris Wilsonfedf5412016-08-29 08:08:30 +0100386 unsigned int shared_count;
387 int ret = 1;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200388
Chris Wilsonfedf5412016-08-29 08:08:30 +0100389 do {
Christian König52791ee2019-08-11 10:06:32 +0200390 struct dma_resv_list *fobj;
Christian König67c97fb2019-08-06 14:19:33 +0200391 unsigned int i;
Christian Königa35f2f32018-01-10 13:53:41 +0100392 size_t sz = 0;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200393
Christian König67c97fb2019-08-06 14:19:33 +0200394 i = 0;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200395
396 rcu_read_lock();
Christian König52791ee2019-08-11 10:06:32 +0200397 dma_resv_fences(obj, &fence_excl, &fobj,
Christian König67c97fb2019-08-06 14:19:33 +0200398 &shared_count);
Chris Wilsonfedf5412016-08-29 08:08:30 +0100399
Chris Wilsonf54d1862016-10-25 13:00:45 +0100400 if (fence_excl && !dma_fence_get_rcu(fence_excl))
Chris Wilsonfedf5412016-08-29 08:08:30 +0100401 goto unlock;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200402
Christian Königa35f2f32018-01-10 13:53:41 +0100403 if (fobj)
404 sz += sizeof(*shared) * fobj->shared_max;
405
406 if (!pfence_excl && fence_excl)
407 sz += sizeof(*shared);
408
409 if (sz) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100410 struct dma_fence **nshared;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200411
412 nshared = krealloc(shared, sz,
413 GFP_NOWAIT | __GFP_NOWARN);
414 if (!nshared) {
415 rcu_read_unlock();
Chris Wilsonf5b07b02019-06-04 13:53:23 +0100416
417 dma_fence_put(fence_excl);
418 fence_excl = NULL;
419
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200420 nshared = krealloc(shared, sz, GFP_KERNEL);
421 if (nshared) {
422 shared = nshared;
423 continue;
424 }
425
426 ret = -ENOMEM;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200427 break;
428 }
429 shared = nshared;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200430 for (i = 0; i < shared_count; ++i) {
Chris Wilsonfedf5412016-08-29 08:08:30 +0100431 shared[i] = rcu_dereference(fobj->shared[i]);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100432 if (!dma_fence_get_rcu(shared[i]))
Chris Wilsonfedf5412016-08-29 08:08:30 +0100433 break;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200434 }
Chris Wilsonfedf5412016-08-29 08:08:30 +0100435 }
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200436
Christian König67c97fb2019-08-06 14:19:33 +0200437 if (i != shared_count) {
Chris Wilsonfedf5412016-08-29 08:08:30 +0100438 while (i--)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100439 dma_fence_put(shared[i]);
440 dma_fence_put(fence_excl);
Chris Wilsonfedf5412016-08-29 08:08:30 +0100441 goto unlock;
442 }
443
444 ret = 0;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200445unlock:
446 rcu_read_unlock();
Chris Wilsonfedf5412016-08-29 08:08:30 +0100447 } while (ret);
448
Christian Königb8c036d2019-08-05 11:49:20 +0200449 if (pfence_excl)
450 *pfence_excl = fence_excl;
451 else if (fence_excl)
452 shared[++shared_count] = fence_excl;
453
Chris Wilsonfedf5412016-08-29 08:08:30 +0100454 if (!shared_count) {
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200455 kfree(shared);
Chris Wilsonfedf5412016-08-29 08:08:30 +0100456 shared = NULL;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200457 }
Chris Wilsonfedf5412016-08-29 08:08:30 +0100458
459 *pshared_count = shared_count;
460 *pshared = shared;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200461 return ret;
462}
Christian König52791ee2019-08-11 10:06:32 +0200463EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200464
Rob Clarkdad6c392016-03-31 16:26:51 -0400465/**
Christian König52791ee2019-08-11 10:06:32 +0200466 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
Rob Clarkdad6c392016-03-31 16:26:51 -0400467 * shared and/or exclusive fences.
468 * @obj: the reservation object
469 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
470 * @intr: if true, do interruptible wait
471 * @timeout: timeout value in jiffies or zero to return immediately
472 *
473 * RETURNS
474 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
475 * greater than zer on success.
476 */
Christian König52791ee2019-08-11 10:06:32 +0200477long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
478 bool wait_all, bool intr,
479 unsigned long timeout)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200480{
Christian König52791ee2019-08-11 10:06:32 +0200481 struct dma_resv_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100482 struct dma_fence *fence;
Christian König67c97fb2019-08-06 14:19:33 +0200483 unsigned shared_count;
Christian König06a66b52016-11-07 16:16:16 -0500484 long ret = timeout ? timeout : 1;
Christian König5bffee82018-01-22 21:00:03 +0100485 int i;
Jammy Zhoufb8b7d22015-01-21 18:35:47 +0800486
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200487retry:
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200488 rcu_read_lock();
Christian König5bffee82018-01-22 21:00:03 +0100489 i = -1;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200490
Christian König52791ee2019-08-11 10:06:32 +0200491 dma_resv_fences(obj, &fence, &fobj, &shared_count);
Christian Königb88fa002017-08-10 13:01:49 -0400492 if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
493 if (!dma_fence_get_rcu(fence))
494 goto unlock_retry;
495
496 if (dma_fence_is_signaled(fence)) {
497 dma_fence_put(fence);
498 fence = NULL;
499 }
500
501 } else {
502 fence = NULL;
503 }
504
Christian König5bffee82018-01-22 21:00:03 +0100505 if (wait_all) {
Christian König5bffee82018-01-22 21:00:03 +0100506 for (i = 0; !fence && i < shared_count; ++i) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100507 struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200508
Chris Wilsonf54d1862016-10-25 13:00:45 +0100509 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
510 &lfence->flags))
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200511 continue;
512
Chris Wilsonf54d1862016-10-25 13:00:45 +0100513 if (!dma_fence_get_rcu(lfence))
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200514 goto unlock_retry;
515
Chris Wilsonf54d1862016-10-25 13:00:45 +0100516 if (dma_fence_is_signaled(lfence)) {
517 dma_fence_put(lfence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200518 continue;
519 }
520
521 fence = lfence;
522 break;
523 }
524 }
525
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200526 rcu_read_unlock();
527 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100528 ret = dma_fence_wait_timeout(fence, intr, ret);
529 dma_fence_put(fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200530 if (ret > 0 && wait_all && (i + 1 < shared_count))
531 goto retry;
532 }
533 return ret;
534
535unlock_retry:
536 rcu_read_unlock();
537 goto retry;
538}
Christian König52791ee2019-08-11 10:06:32 +0200539EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200540
541
Christian König52791ee2019-08-11 10:06:32 +0200542static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200543{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100544 struct dma_fence *fence, *lfence = passed_fence;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200545 int ret = 1;
546
Chris Wilsonf54d1862016-10-25 13:00:45 +0100547 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
548 fence = dma_fence_get_rcu(lfence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200549 if (!fence)
550 return -1;
551
Chris Wilsonf54d1862016-10-25 13:00:45 +0100552 ret = !!dma_fence_is_signaled(fence);
553 dma_fence_put(fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200554 }
555 return ret;
556}
557
Rob Clarkdad6c392016-03-31 16:26:51 -0400558/**
Christian König52791ee2019-08-11 10:06:32 +0200559 * dma_resv_test_signaled_rcu - Test if a reservation object's
Rob Clarkdad6c392016-03-31 16:26:51 -0400560 * fences have been signaled.
561 * @obj: the reservation object
562 * @test_all: if true, test all fences, otherwise only test the exclusive
563 * fence
564 *
565 * RETURNS
566 * true if all fences signaled, else false
567 */
Christian König52791ee2019-08-11 10:06:32 +0200568bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200569{
Christian König52791ee2019-08-11 10:06:32 +0200570 struct dma_resv_list *fobj;
Christian König67c97fb2019-08-06 14:19:33 +0200571 struct dma_fence *fence_excl;
572 unsigned shared_count;
Chris Wilsonb68d8372016-08-29 08:08:32 +0100573 int ret;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200574
Chris Wilsonb68d8372016-08-29 08:08:32 +0100575 rcu_read_lock();
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200576retry:
Chris Wilsonb68d8372016-08-29 08:08:32 +0100577 ret = true;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200578
Christian König52791ee2019-08-11 10:06:32 +0200579 dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200580 if (test_all) {
581 unsigned i;
582
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200583 for (i = 0; i < shared_count; ++i) {
Chris Wilsonf54d1862016-10-25 13:00:45 +0100584 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200585
Christian König52791ee2019-08-11 10:06:32 +0200586 ret = dma_resv_test_signaled_single(fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200587 if (ret < 0)
Chris Wilsonb68d8372016-08-29 08:08:32 +0100588 goto retry;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200589 else if (!ret)
590 break;
591 }
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200592 }
593
Christian König67c97fb2019-08-06 14:19:33 +0200594 if (!shared_count && fence_excl) {
Christian König52791ee2019-08-11 10:06:32 +0200595 ret = dma_resv_test_signaled_single(fence_excl);
Christian König67c97fb2019-08-06 14:19:33 +0200596 if (ret < 0)
597 goto retry;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200598 }
599
600 rcu_read_unlock();
601 return ret;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200602}
Christian König52791ee2019-08-11 10:06:32 +0200603EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);