blob: 4deea75c0b9cfa284219f0d4261b0d2d5ffad840 [file] [log] [blame]
Christian König068d9d72021-05-11 10:42:52 +02001// SPDX-License-Identifier: MIT
Maarten Lankhorst786d7252013-06-27 13:48:16 +02002/*
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +02003 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
Maarten Lankhorst786d7252013-06-27 13:48:16 +02004 *
5 * Based on bo.c which bears the following copyright notice,
6 * but is dual licensed:
7 *
8 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9 * All Rights Reserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
21 * of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 *
31 **************************************************************************/
32/*
33 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34 */
35
Christian König52791ee2019-08-11 10:06:32 +020036#include <linux/dma-resv.h>
Maarten Lankhorst786d7252013-06-27 13:48:16 +020037#include <linux/export.h>
Michel Lespinasse0adf65f2020-06-08 21:33:21 -070038#include <linux/mm.h>
Daniel Vetterb2a81162019-11-04 18:37:59 +010039#include <linux/sched/mm.h>
Daniel Vetterd0b9a9a2020-07-07 22:12:06 +020040#include <linux/mmu_notifier.h>
Christian Königa25efb32021-09-23 13:57:42 +020041#include <linux/seq_file.h>
Maarten Lankhorst786d7252013-06-27 13:48:16 +020042
Rob Clarkdad6c392016-03-31 16:26:51 -040043/**
44 * DOC: Reservation Object Overview
45 *
46 * The reservation object provides a mechanism to manage shared and
47 * exclusive fences associated with a buffer. A reservation object
48 * can have attached one exclusive fence (normally associated with
49 * write operations) or N shared fences (read operations). The RCU
50 * mechanism is used to protect read access to fences from locked
51 * write-side updates.
Daniel Vetterd9edf922021-08-05 12:47:05 +020052 *
53 * See struct dma_resv for more details.
Rob Clarkdad6c392016-03-31 16:26:51 -040054 */
55
Thomas Hellstrom08295b32018-06-15 10:17:38 +020056DEFINE_WD_CLASS(reservation_ww_class);
Maarten Lankhorst786d7252013-06-27 13:48:16 +020057EXPORT_SYMBOL(reservation_ww_class);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +020058
Rob Clarkdad6c392016-03-31 16:26:51 -040059/**
Christian König52791ee2019-08-11 10:06:32 +020060 * dma_resv_list_alloc - allocate fence list
Christian König96e95492019-08-06 13:33:12 +020061 * @shared_max: number of fences we need space for
62 *
Christian König52791ee2019-08-11 10:06:32 +020063 * Allocate a new dma_resv_list and make sure to correctly initialize
Christian König96e95492019-08-06 13:33:12 +020064 * shared_max.
65 */
Christian König52791ee2019-08-11 10:06:32 +020066static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
Christian König96e95492019-08-06 13:33:12 +020067{
Christian König52791ee2019-08-11 10:06:32 +020068 struct dma_resv_list *list;
Christian König96e95492019-08-06 13:33:12 +020069
Christian König82e1b932020-10-08 10:03:22 +020070 list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
Christian König96e95492019-08-06 13:33:12 +020071 if (!list)
72 return NULL;
73
74 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
75 sizeof(*list->shared);
76
77 return list;
78}
79
80/**
Christian König52791ee2019-08-11 10:06:32 +020081 * dma_resv_list_free - free fence list
Christian König96e95492019-08-06 13:33:12 +020082 * @list: list to free
83 *
Christian König52791ee2019-08-11 10:06:32 +020084 * Free a dma_resv_list and make sure to drop all references.
Christian König96e95492019-08-06 13:33:12 +020085 */
Christian König52791ee2019-08-11 10:06:32 +020086static void dma_resv_list_free(struct dma_resv_list *list)
Christian König96e95492019-08-06 13:33:12 +020087{
88 unsigned int i;
89
90 if (!list)
91 return;
92
93 for (i = 0; i < list->shared_count; ++i)
94 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
95
96 kfree_rcu(list, rcu);
97}
98
99/**
Christian König52791ee2019-08-11 10:06:32 +0200100 * dma_resv_init - initialize a reservation object
Christian König8735f162019-06-26 16:31:46 +0200101 * @obj: the reservation object
102 */
Christian König52791ee2019-08-11 10:06:32 +0200103void dma_resv_init(struct dma_resv *obj)
Christian König8735f162019-06-26 16:31:46 +0200104{
105 ww_mutex_init(&obj->lock, &reservation_ww_class);
Ahmed S. Darwishcd29f222020-07-20 17:55:18 +0200106 seqcount_ww_mutex_init(&obj->seq, &obj->lock);
Chris Wilsonb016cd62019-08-14 19:24:01 +0100107
Christian König8735f162019-06-26 16:31:46 +0200108 RCU_INIT_POINTER(obj->fence, NULL);
109 RCU_INIT_POINTER(obj->fence_excl, NULL);
110}
Christian König52791ee2019-08-11 10:06:32 +0200111EXPORT_SYMBOL(dma_resv_init);
Christian König8735f162019-06-26 16:31:46 +0200112
113/**
Christian König52791ee2019-08-11 10:06:32 +0200114 * dma_resv_fini - destroys a reservation object
Christian König8735f162019-06-26 16:31:46 +0200115 * @obj: the reservation object
116 */
Christian König52791ee2019-08-11 10:06:32 +0200117void dma_resv_fini(struct dma_resv *obj)
Christian König8735f162019-06-26 16:31:46 +0200118{
Christian König52791ee2019-08-11 10:06:32 +0200119 struct dma_resv_list *fobj;
Christian König8735f162019-06-26 16:31:46 +0200120 struct dma_fence *excl;
121
122 /*
123 * This object should be dead and all references must have
124 * been released to it, so no need to be protected with rcu.
125 */
126 excl = rcu_dereference_protected(obj->fence_excl, 1);
127 if (excl)
128 dma_fence_put(excl);
129
130 fobj = rcu_dereference_protected(obj->fence, 1);
Christian König52791ee2019-08-11 10:06:32 +0200131 dma_resv_list_free(fobj);
Christian König8735f162019-06-26 16:31:46 +0200132 ww_mutex_destroy(&obj->lock);
133}
Christian König52791ee2019-08-11 10:06:32 +0200134EXPORT_SYMBOL(dma_resv_fini);
Christian König8735f162019-06-26 16:31:46 +0200135
136/**
Christian König52791ee2019-08-11 10:06:32 +0200137 * dma_resv_reserve_shared - Reserve space to add shared fences to
138 * a dma_resv.
Rob Clarkdad6c392016-03-31 16:26:51 -0400139 * @obj: reservation object
Christian Königca053592018-09-19 16:12:25 +0200140 * @num_fences: number of fences we want to add
Rob Clarkdad6c392016-03-31 16:26:51 -0400141 *
Christian König52791ee2019-08-11 10:06:32 +0200142 * Should be called before dma_resv_add_shared_fence(). Must
Daniel Vetterd9edf922021-08-05 12:47:05 +0200143 * be called with @obj locked through dma_resv_lock().
144 *
145 * Note that the preallocated slots need to be re-reserved if @obj is unlocked
146 * at any time before calling dma_resv_add_shared_fence(). This is validated
147 * when CONFIG_DEBUG_MUTEXES is enabled.
Rob Clarkdad6c392016-03-31 16:26:51 -0400148 *
149 * RETURNS
150 * Zero for success, or -errno
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200151 */
Christian König52791ee2019-08-11 10:06:32 +0200152int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200153{
Christian König52791ee2019-08-11 10:06:32 +0200154 struct dma_resv_list *old, *new;
Christian König27836b62018-08-08 16:01:22 +0200155 unsigned int i, j, k, max;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200156
Christian König52791ee2019-08-11 10:06:32 +0200157 dma_resv_assert_held(obj);
Lucas Stach547c7132017-06-13 10:26:46 +0200158
Christian Königfb5ce732021-05-11 14:11:41 +0200159 old = dma_resv_shared_list(obj);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200160 if (old && old->shared_max) {
Christian Königca053592018-09-19 16:12:25 +0200161 if ((old->shared_count + num_fences) <= old->shared_max)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200162 return 0;
Christian König068d9d72021-05-11 10:42:52 +0200163 max = max(old->shared_count + num_fences, old->shared_max * 2);
Christian Königca25fe52017-11-14 15:24:36 +0100164 } else {
Maarten Lankhorstbf897582020-11-24 12:57:07 +0100165 max = max(4ul, roundup_pow_of_two(num_fences));
Christian Königca25fe52017-11-14 15:24:36 +0100166 }
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200167
Christian König52791ee2019-08-11 10:06:32 +0200168 new = dma_resv_list_alloc(max);
Christian König27836b62018-08-08 16:01:22 +0200169 if (!new)
170 return -ENOMEM;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200171
172 /*
173 * no need to bump fence refcounts, rcu_read access
174 * requires the use of kref_get_unless_zero, and the
175 * references from the old struct are carried over to
176 * the new.
177 */
Christian König27836b62018-08-08 16:01:22 +0200178 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
179 struct dma_fence *fence;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200180
Christian König27836b62018-08-08 16:01:22 +0200181 fence = rcu_dereference_protected(old->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200182 dma_resv_held(obj));
Christian König27836b62018-08-08 16:01:22 +0200183 if (dma_fence_is_signaled(fence))
184 RCU_INIT_POINTER(new->shared[--k], fence);
Christian König4d9c62e2017-11-14 15:24:35 +0100185 else
Christian König27836b62018-08-08 16:01:22 +0200186 RCU_INIT_POINTER(new->shared[j++], fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200187 }
Christian König27836b62018-08-08 16:01:22 +0200188 new->shared_count = j;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200189
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200190 /*
Chris Wilson30fe7b02019-07-12 09:03:14 +0100191 * We are not changing the effective set of fences here so can
192 * merely update the pointer to the new array; both existing
193 * readers and new readers will see exactly the same set of
194 * active (unsignaled) shared fences. Individual fences and the
195 * old array are protected by RCU and so will not vanish under
196 * the gaze of the rcu_read_lock() readers.
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200197 */
Chris Wilson30fe7b02019-07-12 09:03:14 +0100198 rcu_assign_pointer(obj->fence, new);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200199
Christian König4d9c62e2017-11-14 15:24:35 +0100200 if (!old)
Christian König27836b62018-08-08 16:01:22 +0200201 return 0;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200202
Christian König4d9c62e2017-11-14 15:24:35 +0100203 /* Drop the references to the signaled fences */
Chris Wilson94eb1e12019-07-12 09:03:13 +0100204 for (i = k; i < max; ++i) {
Christian König27836b62018-08-08 16:01:22 +0200205 struct dma_fence *fence;
Christian König4d9c62e2017-11-14 15:24:35 +0100206
Christian König27836b62018-08-08 16:01:22 +0200207 fence = rcu_dereference_protected(new->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200208 dma_resv_held(obj));
Christian König27836b62018-08-08 16:01:22 +0200209 dma_fence_put(fence);
Christian König4d9c62e2017-11-14 15:24:35 +0100210 }
211 kfree_rcu(old, rcu);
Christian König27836b62018-08-08 16:01:22 +0200212
213 return 0;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200214}
Christian König52791ee2019-08-11 10:06:32 +0200215EXPORT_SYMBOL(dma_resv_reserve_shared);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200216
Christian König0c6b5222021-05-06 14:16:01 +0200217#ifdef CONFIG_DEBUG_MUTEXES
218/**
219 * dma_resv_reset_shared_max - reset shared fences for debugging
220 * @obj: the dma_resv object to reset
221 *
222 * Reset the number of pre-reserved shared slots to test that drivers do
223 * correct slot allocation using dma_resv_reserve_shared(). See also
224 * &dma_resv_list.shared_max.
225 */
226void dma_resv_reset_shared_max(struct dma_resv *obj)
227{
Christian Königfb5ce732021-05-11 14:11:41 +0200228 struct dma_resv_list *fences = dma_resv_shared_list(obj);
Christian König0c6b5222021-05-06 14:16:01 +0200229
Christian Königfb5ce732021-05-11 14:11:41 +0200230 dma_resv_assert_held(obj);
231
232 /* Test shared fence slot reservation */
233 if (fences)
234 fences->shared_max = fences->shared_count;
Christian König0c6b5222021-05-06 14:16:01 +0200235}
Christian König415f6762021-06-04 17:47:39 +0200236EXPORT_SYMBOL(dma_resv_reset_shared_max);
Christian König0c6b5222021-05-06 14:16:01 +0200237#endif
238
Rob Clarkdad6c392016-03-31 16:26:51 -0400239/**
Christian König52791ee2019-08-11 10:06:32 +0200240 * dma_resv_add_shared_fence - Add a fence to a shared slot
Rob Clarkdad6c392016-03-31 16:26:51 -0400241 * @obj: the reservation object
242 * @fence: the shared fence to add
243 *
Daniel Vetterd9edf922021-08-05 12:47:05 +0200244 * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
Christian König52791ee2019-08-11 10:06:32 +0200245 * dma_resv_reserve_shared() has been called.
Daniel Vetterd9edf922021-08-05 12:47:05 +0200246 *
247 * See also &dma_resv.fence for a discussion of the semantics.
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200248 */
Christian König52791ee2019-08-11 10:06:32 +0200249void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200250{
Christian König52791ee2019-08-11 10:06:32 +0200251 struct dma_resv_list *fobj;
Christian König93505ee2019-08-05 11:14:27 +0200252 struct dma_fence *old;
Chris Wilsona590d0f2018-10-26 09:03:02 +0100253 unsigned int i, count;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200254
Christian König27836b62018-08-08 16:01:22 +0200255 dma_fence_get(fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200256
Christian König52791ee2019-08-11 10:06:32 +0200257 dma_resv_assert_held(obj);
Lucas Stach547c7132017-06-13 10:26:46 +0200258
Christian Königfb5ce732021-05-11 14:11:41 +0200259 fobj = dma_resv_shared_list(obj);
Chris Wilsona590d0f2018-10-26 09:03:02 +0100260 count = fobj->shared_count;
Christian König27836b62018-08-08 16:01:22 +0200261
Chris Wilsonb016cd62019-08-14 19:24:01 +0100262 write_seqcount_begin(&obj->seq);
263
Chris Wilsona590d0f2018-10-26 09:03:02 +0100264 for (i = 0; i < count; ++i) {
Christian König27836b62018-08-08 16:01:22 +0200265
Christian König93505ee2019-08-05 11:14:27 +0200266 old = rcu_dereference_protected(fobj->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200267 dma_resv_held(obj));
Christian König93505ee2019-08-05 11:14:27 +0200268 if (old->context == fence->context ||
269 dma_fence_is_signaled(old))
Christian König27836b62018-08-08 16:01:22 +0200270 goto replace;
Christian König27836b62018-08-08 16:01:22 +0200271 }
272
273 BUG_ON(fobj->shared_count >= fobj->shared_max);
Christian König93505ee2019-08-05 11:14:27 +0200274 old = NULL;
Chris Wilsona590d0f2018-10-26 09:03:02 +0100275 count++;
Christian König27836b62018-08-08 16:01:22 +0200276
277replace:
Christian König27836b62018-08-08 16:01:22 +0200278 RCU_INIT_POINTER(fobj->shared[i], fence);
Chris Wilsona590d0f2018-10-26 09:03:02 +0100279 /* pointer update must be visible before we extend the shared_count */
280 smp_store_mb(fobj->shared_count, count);
Chris Wilsonb016cd62019-08-14 19:24:01 +0100281
282 write_seqcount_end(&obj->seq);
Christian König93505ee2019-08-05 11:14:27 +0200283 dma_fence_put(old);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200284}
Christian König52791ee2019-08-11 10:06:32 +0200285EXPORT_SYMBOL(dma_resv_add_shared_fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200286
Rob Clarkdad6c392016-03-31 16:26:51 -0400287/**
Christian König52791ee2019-08-11 10:06:32 +0200288 * dma_resv_add_excl_fence - Add an exclusive fence.
Rob Clarkdad6c392016-03-31 16:26:51 -0400289 * @obj: the reservation object
Daniel Vetterd9edf922021-08-05 12:47:05 +0200290 * @fence: the exclusive fence to add
Rob Clarkdad6c392016-03-31 16:26:51 -0400291 *
Daniel Vetterd9edf922021-08-05 12:47:05 +0200292 * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
293 * Note that this function replaces all fences attached to @obj, see also
294 * &dma_resv.fence_excl for a discussion of the semantics.
Rob Clarkdad6c392016-03-31 16:26:51 -0400295 */
Christian König52791ee2019-08-11 10:06:32 +0200296void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200297{
Christian König6edbd6a2021-05-10 16:14:09 +0200298 struct dma_fence *old_fence = dma_resv_excl_fence(obj);
Christian König52791ee2019-08-11 10:06:32 +0200299 struct dma_resv_list *old;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200300 u32 i = 0;
301
Christian König52791ee2019-08-11 10:06:32 +0200302 dma_resv_assert_held(obj);
Lucas Stach547c7132017-06-13 10:26:46 +0200303
Christian Königfb5ce732021-05-11 14:11:41 +0200304 old = dma_resv_shared_list(obj);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200305 if (old)
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200306 i = old->shared_count;
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200307
Christian Königc99907c2021-11-11 10:46:46 +0100308 dma_fence_get(fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200309
Chris Wilsonb016cd62019-08-14 19:24:01 +0100310 write_seqcount_begin(&obj->seq);
311 /* write_seqcount_begin provides the necessary memory barrier */
312 RCU_INIT_POINTER(obj->fence_excl, fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200313 if (old)
Chris Wilsonb016cd62019-08-14 19:24:01 +0100314 old->shared_count = 0;
315 write_seqcount_end(&obj->seq);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200316
317 /* inplace update, no shared fences */
318 while (i--)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100319 dma_fence_put(rcu_dereference_protected(old->shared[i],
Christian König52791ee2019-08-11 10:06:32 +0200320 dma_resv_held(obj)));
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200321
Christian Königf3e31b72017-08-07 17:32:21 -0400322 dma_fence_put(old_fence);
Maarten Lankhorst04a5faa2014-07-01 12:57:54 +0200323}
Christian König52791ee2019-08-11 10:06:32 +0200324EXPORT_SYMBOL(dma_resv_add_excl_fence);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200325
Rob Clarkdad6c392016-03-31 16:26:51 -0400326/**
Christian Königc921ff32021-06-15 15:10:03 +0200327 * dma_resv_iter_restart_unlocked - restart the unlocked iterator
328 * @cursor: The dma_resv_iter object to restart
329 *
330 * Restart the unlocked iteration by initializing the cursor object.
331 */
332static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
333{
334 cursor->seq = read_seqcount_begin(&cursor->obj->seq);
335 cursor->index = -1;
Tvrtko Ursulin5e51cc02021-10-08 10:50:07 +0100336 cursor->shared_count = 0;
337 if (cursor->all_fences) {
Christian Königc921ff32021-06-15 15:10:03 +0200338 cursor->fences = dma_resv_shared_list(cursor->obj);
Tvrtko Ursulin5e51cc02021-10-08 10:50:07 +0100339 if (cursor->fences)
340 cursor->shared_count = cursor->fences->shared_count;
341 } else {
Christian Königc921ff32021-06-15 15:10:03 +0200342 cursor->fences = NULL;
Tvrtko Ursulin5e51cc02021-10-08 10:50:07 +0100343 }
Christian Königc921ff32021-06-15 15:10:03 +0200344 cursor->is_restarted = true;
345}
346
347/**
348 * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
349 * @cursor: cursor to record the current position
350 *
351 * Return all the fences in the dma_resv object which are not yet signaled.
352 * The returned fence has an extra local reference so will stay alive.
353 * If a concurrent modify is detected the whole iteration is started over again.
354 */
355static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
356{
357 struct dma_resv *obj = cursor->obj;
358
359 do {
360 /* Drop the reference from the previous round */
361 dma_fence_put(cursor->fence);
362
363 if (cursor->index == -1) {
364 cursor->fence = dma_resv_excl_fence(obj);
365 cursor->index++;
366 if (!cursor->fence)
367 continue;
368
369 } else if (!cursor->fences ||
Tvrtko Ursulin5e51cc02021-10-08 10:50:07 +0100370 cursor->index >= cursor->shared_count) {
Christian Königc921ff32021-06-15 15:10:03 +0200371 cursor->fence = NULL;
372 break;
373
374 } else {
375 struct dma_resv_list *fences = cursor->fences;
376 unsigned int idx = cursor->index++;
377
378 cursor->fence = rcu_dereference(fences->shared[idx]);
379 }
380 cursor->fence = dma_fence_get_rcu(cursor->fence);
381 if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
382 break;
383 } while (true);
384}
385
386/**
387 * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
388 * @cursor: the cursor with the current position
389 *
390 * Returns the first fence from an unlocked dma_resv obj.
391 */
392struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
393{
394 rcu_read_lock();
395 do {
396 dma_resv_iter_restart_unlocked(cursor);
397 dma_resv_iter_walk_unlocked(cursor);
398 } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
399 rcu_read_unlock();
400
401 return cursor->fence;
402}
403EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
404
405/**
406 * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
407 * @cursor: the cursor with the current position
408 *
409 * Returns the next fence from an unlocked dma_resv obj.
410 */
411struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
412{
413 bool restart;
414
415 rcu_read_lock();
416 cursor->is_restarted = false;
417 restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
418 do {
419 if (restart)
420 dma_resv_iter_restart_unlocked(cursor);
421 dma_resv_iter_walk_unlocked(cursor);
422 restart = true;
423 } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
424 rcu_read_unlock();
425
426 return cursor->fence;
427}
428EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
429
430/**
Christian König5baaac32021-06-15 15:10:03 +0200431 * dma_resv_iter_first - first fence from a locked dma_resv object
432 * @cursor: cursor to record the current position
433 *
434 * Return the first fence in the dma_resv object while holding the
435 * &dma_resv.lock.
436 */
437struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
438{
439 struct dma_fence *fence;
440
441 dma_resv_assert_held(cursor->obj);
442
443 cursor->index = 0;
444 if (cursor->all_fences)
445 cursor->fences = dma_resv_shared_list(cursor->obj);
446 else
447 cursor->fences = NULL;
448
449 fence = dma_resv_excl_fence(cursor->obj);
450 if (!fence)
451 fence = dma_resv_iter_next(cursor);
452
453 cursor->is_restarted = true;
454 return fence;
455}
456EXPORT_SYMBOL_GPL(dma_resv_iter_first);
457
458/**
459 * dma_resv_iter_next - next fence from a locked dma_resv object
460 * @cursor: cursor to record the current position
461 *
462 * Return the next fences from the dma_resv object while holding the
463 * &dma_resv.lock.
464 */
465struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
466{
467 unsigned int idx;
468
469 dma_resv_assert_held(cursor->obj);
470
471 cursor->is_restarted = false;
472 if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
473 return NULL;
474
475 idx = cursor->index++;
476 return rcu_dereference_protected(cursor->fences->shared[idx],
477 dma_resv_held(cursor->obj));
478}
479EXPORT_SYMBOL_GPL(dma_resv_iter_next);
480
481/**
Christian König068d9d72021-05-11 10:42:52 +0200482 * dma_resv_copy_fences - Copy all fences from src to dst.
483 * @dst: the destination reservation object
484 * @src: the source reservation object
485 *
486 * Copy all fences from src to dst. dst-lock must be held.
487 */
Christian König52791ee2019-08-11 10:06:32 +0200488int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
Christian König7faf9522017-08-10 13:01:48 -0400489{
Christian König96601e82021-06-15 15:15:01 +0200490 struct dma_resv_iter cursor;
491 struct dma_resv_list *list;
492 struct dma_fence *f, *excl;
Christian König7faf9522017-08-10 13:01:48 -0400493
Christian König52791ee2019-08-11 10:06:32 +0200494 dma_resv_assert_held(dst);
Lucas Stach547c7132017-06-13 10:26:46 +0200495
Christian König96601e82021-06-15 15:15:01 +0200496 list = NULL;
497 excl = NULL;
Christian König7faf9522017-08-10 13:01:48 -0400498
Christian König96601e82021-06-15 15:15:01 +0200499 dma_resv_iter_begin(&cursor, src, true);
500 dma_resv_for_each_fence_unlocked(&cursor, f) {
Chris Wilsonb016cd62019-08-14 19:24:01 +0100501
Christian König96601e82021-06-15 15:15:01 +0200502 if (dma_resv_iter_is_restarted(&cursor)) {
503 dma_resv_list_free(list);
504 dma_fence_put(excl);
Christian König39e16ba2017-09-04 21:02:45 +0200505
Tvrtko Ursulin5e51cc02021-10-08 10:50:07 +0100506 if (cursor.shared_count) {
507 list = dma_resv_list_alloc(cursor.shared_count);
Christian König96601e82021-06-15 15:15:01 +0200508 if (!list) {
509 dma_resv_iter_end(&cursor);
510 return -ENOMEM;
511 }
512
513 list->shared_count = 0;
514
515 } else {
516 list = NULL;
517 }
518 excl = NULL;
Christian König39e16ba2017-09-04 21:02:45 +0200519 }
520
Christian König96601e82021-06-15 15:15:01 +0200521 dma_fence_get(f);
522 if (dma_resv_iter_is_exclusive(&cursor))
523 excl = f;
524 else
525 RCU_INIT_POINTER(list->shared[list->shared_count++], f);
Christian König7faf9522017-08-10 13:01:48 -0400526 }
Christian König96601e82021-06-15 15:15:01 +0200527 dma_resv_iter_end(&cursor);
Christian König7faf9522017-08-10 13:01:48 -0400528
Chris Wilsonb016cd62019-08-14 19:24:01 +0100529 write_seqcount_begin(&dst->seq);
Christian König96601e82021-06-15 15:15:01 +0200530 excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
531 list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
Chris Wilsonb016cd62019-08-14 19:24:01 +0100532 write_seqcount_end(&dst->seq);
Christian König7faf9522017-08-10 13:01:48 -0400533
Christian König96601e82021-06-15 15:15:01 +0200534 dma_resv_list_free(list);
535 dma_fence_put(excl);
Christian König7faf9522017-08-10 13:01:48 -0400536
537 return 0;
538}
Christian König52791ee2019-08-11 10:06:32 +0200539EXPORT_SYMBOL(dma_resv_copy_fences);
Christian König7faf9522017-08-10 13:01:48 -0400540
541/**
Christian Königd3fae3b2021-06-02 13:01:15 +0200542 * dma_resv_get_fences - Get an object's shared and exclusive
Rob Clarkdad6c392016-03-31 16:26:51 -0400543 * fences without update side lock held
544 * @obj: the reservation object
Christian Königd3c80692021-06-15 15:25:22 +0200545 * @fence_excl: the returned exclusive fence (or NULL)
546 * @shared_count: the number of shared fences returned
547 * @shared: the array of shared fence ptrs returned (array is krealloc'd to
Rob Clarkdad6c392016-03-31 16:26:51 -0400548 * the required size, and must be freed by caller)
549 *
Christian Königa35f2f32018-01-10 13:53:41 +0100550 * Retrieve all fences from the reservation object. If the pointer for the
551 * exclusive fence is not specified the fence is put into the array of the
552 * shared fences as well. Returns either zero or -ENOMEM.
Rob Clarkdad6c392016-03-31 16:26:51 -0400553 */
Christian Königd3c80692021-06-15 15:25:22 +0200554int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
555 unsigned int *shared_count, struct dma_fence ***shared)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200556{
Christian Königd3c80692021-06-15 15:25:22 +0200557 struct dma_resv_iter cursor;
558 struct dma_fence *fence;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200559
Christian Königd3c80692021-06-15 15:25:22 +0200560 *shared_count = 0;
561 *shared = NULL;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200562
Christian Königd3c80692021-06-15 15:25:22 +0200563 if (fence_excl)
564 *fence_excl = NULL;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200565
Christian Königd3c80692021-06-15 15:25:22 +0200566 dma_resv_iter_begin(&cursor, obj, true);
567 dma_resv_for_each_fence_unlocked(&cursor, fence) {
Chris Wilsonfedf5412016-08-29 08:08:30 +0100568
Christian Königd3c80692021-06-15 15:25:22 +0200569 if (dma_resv_iter_is_restarted(&cursor)) {
570 unsigned int count;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200571
Christian Königd3c80692021-06-15 15:25:22 +0200572 while (*shared_count)
573 dma_fence_put((*shared)[--(*shared_count)]);
Christian Königa35f2f32018-01-10 13:53:41 +0100574
Christian Königd3c80692021-06-15 15:25:22 +0200575 if (fence_excl)
576 dma_fence_put(*fence_excl);
Christian Königa35f2f32018-01-10 13:53:41 +0100577
Tvrtko Ursulin5e51cc02021-10-08 10:50:07 +0100578 count = cursor.shared_count;
Christian Königd3c80692021-06-15 15:25:22 +0200579 count += fence_excl ? 0 : 1;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200580
Christian Königd3c80692021-06-15 15:25:22 +0200581 /* Eventually re-allocate the array */
582 *shared = krealloc_array(*shared, count,
583 sizeof(void *),
584 GFP_KERNEL);
585 if (count && !*shared) {
586 dma_resv_iter_end(&cursor);
587 return -ENOMEM;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200588 }
Chris Wilsonfedf5412016-08-29 08:08:30 +0100589 }
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200590
Christian Königd3c80692021-06-15 15:25:22 +0200591 dma_fence_get(fence);
592 if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
593 *fence_excl = fence;
594 else
595 (*shared)[(*shared_count)++] = fence;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200596 }
Christian Königd3c80692021-06-15 15:25:22 +0200597 dma_resv_iter_end(&cursor);
Chris Wilsonfedf5412016-08-29 08:08:30 +0100598
Christian Königd3c80692021-06-15 15:25:22 +0200599 return 0;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200600}
Christian Königd3fae3b2021-06-02 13:01:15 +0200601EXPORT_SYMBOL_GPL(dma_resv_get_fences);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200602
Rob Clarkdad6c392016-03-31 16:26:51 -0400603/**
Christian Königd3fae3b2021-06-02 13:01:15 +0200604 * dma_resv_wait_timeout - Wait on reservation's objects
Rob Clarkdad6c392016-03-31 16:26:51 -0400605 * shared and/or exclusive fences.
606 * @obj: the reservation object
607 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
608 * @intr: if true, do interruptible wait
609 * @timeout: timeout value in jiffies or zero to return immediately
610 *
Christian Königd3fae3b2021-06-02 13:01:15 +0200611 * Callers are not required to hold specific locks, but maybe hold
612 * dma_resv_lock() already
Rob Clarkdad6c392016-03-31 16:26:51 -0400613 * RETURNS
614 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
615 * greater than zer on success.
616 */
Christian Königd3fae3b2021-06-02 13:01:15 +0200617long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
618 unsigned long timeout)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200619{
Christian König06a66b52016-11-07 16:16:16 -0500620 long ret = timeout ? timeout : 1;
Christian Königada5c482021-06-15 15:28:03 +0200621 struct dma_resv_iter cursor;
Christian König068d9d72021-05-11 10:42:52 +0200622 struct dma_fence *fence;
Jammy Zhoufb8b7d22015-01-21 18:35:47 +0800623
Christian Königada5c482021-06-15 15:28:03 +0200624 dma_resv_iter_begin(&cursor, obj, wait_all);
625 dma_resv_for_each_fence_unlocked(&cursor, fence) {
Chris Wilsonb016cd62019-08-14 19:24:01 +0100626
Chris Wilsonf54d1862016-10-25 13:00:45 +0100627 ret = dma_fence_wait_timeout(fence, intr, ret);
Christian Königada5c482021-06-15 15:28:03 +0200628 if (ret <= 0) {
629 dma_resv_iter_end(&cursor);
630 return ret;
631 }
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200632 }
Christian Königada5c482021-06-15 15:28:03 +0200633 dma_resv_iter_end(&cursor);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200634
Christian Königada5c482021-06-15 15:28:03 +0200635 return ret;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200636}
Christian Königd3fae3b2021-06-02 13:01:15 +0200637EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200638
639
Rob Clarkdad6c392016-03-31 16:26:51 -0400640/**
Christian Königd3fae3b2021-06-02 13:01:15 +0200641 * dma_resv_test_signaled - Test if a reservation object's fences have been
642 * signaled.
Rob Clarkdad6c392016-03-31 16:26:51 -0400643 * @obj: the reservation object
644 * @test_all: if true, test all fences, otherwise only test the exclusive
645 * fence
646 *
Christian Königd3fae3b2021-06-02 13:01:15 +0200647 * Callers are not required to hold specific locks, but maybe hold
Daniel Vetterd9edf922021-08-05 12:47:05 +0200648 * dma_resv_lock() already.
649 *
Rob Clarkdad6c392016-03-31 16:26:51 -0400650 * RETURNS
Daniel Vetterd9edf922021-08-05 12:47:05 +0200651 *
652 * True if all fences signaled, else false.
Rob Clarkdad6c392016-03-31 16:26:51 -0400653 */
Christian Königd3fae3b2021-06-02 13:01:15 +0200654bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200655{
Christian König7fa828c2021-06-15 15:34:11 +0200656 struct dma_resv_iter cursor;
Christian König9d388142021-06-06 11:46:33 +0200657 struct dma_fence *fence;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200658
Christian König7fa828c2021-06-15 15:34:11 +0200659 dma_resv_iter_begin(&cursor, obj, test_all);
660 dma_resv_for_each_fence_unlocked(&cursor, fence) {
661 dma_resv_iter_end(&cursor);
662 return false;
Christian König9d388142021-06-06 11:46:33 +0200663 }
Christian König7fa828c2021-06-15 15:34:11 +0200664 dma_resv_iter_end(&cursor);
665 return true;
Maarten Lankhorst3c3b1772014-07-01 12:58:00 +0200666}
Christian Königd3fae3b2021-06-02 13:01:15 +0200667EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
Christian König068d9d72021-05-11 10:42:52 +0200668
Christian Königa25efb32021-09-23 13:57:42 +0200669/**
670 * dma_resv_describe - Dump description of the resv object into seq_file
671 * @obj: the reservation object
672 * @seq: the seq_file to dump the description into
673 *
674 * Dump a textual description of the fences inside an dma_resv object into the
675 * seq_file.
676 */
677void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
678{
679 struct dma_resv_iter cursor;
680 struct dma_fence *fence;
681
682 dma_resv_for_each_fence(&cursor, obj, true, fence) {
683 seq_printf(seq, "\t%s fence:",
684 dma_resv_iter_is_exclusive(&cursor) ?
685 "Exclusive" : "Shared");
686 dma_fence_describe(fence, seq);
687 }
688}
689EXPORT_SYMBOL_GPL(dma_resv_describe);
690
Christian König068d9d72021-05-11 10:42:52 +0200691#if IS_ENABLED(CONFIG_LOCKDEP)
692static int __init dma_resv_lockdep(void)
693{
694 struct mm_struct *mm = mm_alloc();
695 struct ww_acquire_ctx ctx;
696 struct dma_resv obj;
697 struct address_space mapping;
698 int ret;
699
700 if (!mm)
701 return -ENOMEM;
702
703 dma_resv_init(&obj);
704 address_space_init_once(&mapping);
705
706 mmap_read_lock(mm);
707 ww_acquire_init(&ctx, &reservation_ww_class);
708 ret = dma_resv_lock(&obj, &ctx);
709 if (ret == -EDEADLK)
710 dma_resv_lock_slow(&obj, &ctx);
711 fs_reclaim_acquire(GFP_KERNEL);
712 /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
713 i_mmap_lock_write(&mapping);
714 i_mmap_unlock_write(&mapping);
715#ifdef CONFIG_MMU_NOTIFIER
716 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
717 __dma_fence_might_wait();
718 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
719#else
720 __dma_fence_might_wait();
721#endif
722 fs_reclaim_release(GFP_KERNEL);
723 ww_mutex_unlock(&obj.lock);
724 ww_acquire_fini(&ctx);
725 mmap_read_unlock(mm);
726
727 mmput(mm);
728
729 return 0;
730}
731subsys_initcall(dma_resv_lockdep);
732#endif