Thomas Gleixner | 1802d0b | 2019-05-27 08:55:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 2 | /* |
| 3 | * dma-fence-array: aggregate fences to be waited together |
| 4 | * |
| 5 | * Copyright (C) 2016 Collabora Ltd |
| 6 | * Copyright (C) 2016 Advanced Micro Devices, Inc. |
| 7 | * Authors: |
| 8 | * Gustavo Padovan <gustavo@padovan.org> |
| 9 | * Christian König <christian.koenig@amd.com> |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/export.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/dma-fence-array.h> |
| 15 | |
Chris Wilson | 1f70b8b | 2019-08-11 22:09:02 +0100 | [diff] [blame] | 16 | #define PENDING_ERROR 1 |
| 17 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 18 | static const char *dma_fence_array_get_driver_name(struct dma_fence *fence) |
| 19 | { |
| 20 | return "dma_fence_array"; |
| 21 | } |
| 22 | |
| 23 | static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence) |
| 24 | { |
| 25 | return "unbound"; |
| 26 | } |
| 27 | |
Chris Wilson | 1f70b8b | 2019-08-11 22:09:02 +0100 | [diff] [blame] | 28 | static void dma_fence_array_set_pending_error(struct dma_fence_array *array, |
| 29 | int error) |
| 30 | { |
| 31 | /* |
| 32 | * Propagate the first error reported by any of our fences, but only |
| 33 | * before we ourselves are signaled. |
| 34 | */ |
| 35 | if (error) |
| 36 | cmpxchg(&array->base.error, PENDING_ERROR, error); |
| 37 | } |
| 38 | |
| 39 | static void dma_fence_array_clear_pending_error(struct dma_fence_array *array) |
| 40 | { |
| 41 | /* Clear the error flag if not actually set. */ |
| 42 | cmpxchg(&array->base.error, PENDING_ERROR, 0); |
| 43 | } |
| 44 | |
Chris Wilson | 03e4e0a | 2017-11-14 16:27:19 +0000 | [diff] [blame] | 45 | static void irq_dma_fence_array_work(struct irq_work *wrk) |
| 46 | { |
| 47 | struct dma_fence_array *array = container_of(wrk, typeof(*array), work); |
| 48 | |
Chris Wilson | 1f70b8b | 2019-08-11 22:09:02 +0100 | [diff] [blame] | 49 | dma_fence_array_clear_pending_error(array); |
| 50 | |
Chris Wilson | 03e4e0a | 2017-11-14 16:27:19 +0000 | [diff] [blame] | 51 | dma_fence_signal(&array->base); |
| 52 | dma_fence_put(&array->base); |
| 53 | } |
| 54 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 55 | static void dma_fence_array_cb_func(struct dma_fence *f, |
| 56 | struct dma_fence_cb *cb) |
| 57 | { |
| 58 | struct dma_fence_array_cb *array_cb = |
| 59 | container_of(cb, struct dma_fence_array_cb, cb); |
| 60 | struct dma_fence_array *array = array_cb->array; |
| 61 | |
Chris Wilson | 1f70b8b | 2019-08-11 22:09:02 +0100 | [diff] [blame] | 62 | dma_fence_array_set_pending_error(array, f->error); |
| 63 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 64 | if (atomic_dec_and_test(&array->num_pending)) |
Chris Wilson | 03e4e0a | 2017-11-14 16:27:19 +0000 | [diff] [blame] | 65 | irq_work_queue(&array->work); |
| 66 | else |
| 67 | dma_fence_put(&array->base); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | static bool dma_fence_array_enable_signaling(struct dma_fence *fence) |
| 71 | { |
| 72 | struct dma_fence_array *array = to_dma_fence_array(fence); |
| 73 | struct dma_fence_array_cb *cb = (void *)(&array[1]); |
| 74 | unsigned i; |
| 75 | |
| 76 | for (i = 0; i < array->num_fences; ++i) { |
| 77 | cb[i].array = array; |
| 78 | /* |
| 79 | * As we may report that the fence is signaled before all |
| 80 | * callbacks are complete, we need to take an additional |
| 81 | * reference count on the array so that we do not free it too |
| 82 | * early. The core fence handling will only hold the reference |
| 83 | * until we signal the array as complete (but that is now |
| 84 | * insufficient). |
| 85 | */ |
| 86 | dma_fence_get(&array->base); |
| 87 | if (dma_fence_add_callback(array->fences[i], &cb[i].cb, |
| 88 | dma_fence_array_cb_func)) { |
Chris Wilson | 1f70b8b | 2019-08-11 22:09:02 +0100 | [diff] [blame] | 89 | int error = array->fences[i]->error; |
| 90 | |
| 91 | dma_fence_array_set_pending_error(array, error); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 92 | dma_fence_put(&array->base); |
Chris Wilson | 1f70b8b | 2019-08-11 22:09:02 +0100 | [diff] [blame] | 93 | if (atomic_dec_and_test(&array->num_pending)) { |
| 94 | dma_fence_array_clear_pending_error(array); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 95 | return false; |
Chris Wilson | 1f70b8b | 2019-08-11 22:09:02 +0100 | [diff] [blame] | 96 | } |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 97 | } |
| 98 | } |
| 99 | |
| 100 | return true; |
| 101 | } |
| 102 | |
| 103 | static bool dma_fence_array_signaled(struct dma_fence *fence) |
| 104 | { |
| 105 | struct dma_fence_array *array = to_dma_fence_array(fence); |
| 106 | |
| 107 | return atomic_read(&array->num_pending) <= 0; |
| 108 | } |
| 109 | |
| 110 | static void dma_fence_array_release(struct dma_fence *fence) |
| 111 | { |
| 112 | struct dma_fence_array *array = to_dma_fence_array(fence); |
| 113 | unsigned i; |
| 114 | |
| 115 | for (i = 0; i < array->num_fences; ++i) |
| 116 | dma_fence_put(array->fences[i]); |
| 117 | |
| 118 | kfree(array->fences); |
| 119 | dma_fence_free(fence); |
| 120 | } |
| 121 | |
| 122 | const struct dma_fence_ops dma_fence_array_ops = { |
| 123 | .get_driver_name = dma_fence_array_get_driver_name, |
| 124 | .get_timeline_name = dma_fence_array_get_timeline_name, |
| 125 | .enable_signaling = dma_fence_array_enable_signaling, |
| 126 | .signaled = dma_fence_array_signaled, |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 127 | .release = dma_fence_array_release, |
| 128 | }; |
| 129 | EXPORT_SYMBOL(dma_fence_array_ops); |
| 130 | |
| 131 | /** |
| 132 | * dma_fence_array_create - Create a custom fence array |
| 133 | * @num_fences: [in] number of fences to add in the array |
| 134 | * @fences: [in] array containing the fences |
| 135 | * @context: [in] fence context to use |
| 136 | * @seqno: [in] sequence number to use |
| 137 | * @signal_on_any: [in] signal on any fence in the array |
| 138 | * |
| 139 | * Allocate a dma_fence_array object and initialize the base fence with |
| 140 | * dma_fence_init(). |
| 141 | * In case of error it returns NULL. |
| 142 | * |
| 143 | * The caller should allocate the fences array with num_fences size |
| 144 | * and fill it with the fences it wants to add to the object. Ownership of this |
| 145 | * array is taken and dma_fence_put() is used on each fence on release. |
| 146 | * |
| 147 | * If @signal_on_any is true the fence array signals if any fence in the array |
| 148 | * signals, otherwise it signals when all fences in the array signal. |
| 149 | */ |
| 150 | struct dma_fence_array *dma_fence_array_create(int num_fences, |
| 151 | struct dma_fence **fences, |
| 152 | u64 context, unsigned seqno, |
| 153 | bool signal_on_any) |
| 154 | { |
| 155 | struct dma_fence_array *array; |
| 156 | size_t size = sizeof(*array); |
| 157 | |
| 158 | /* Allocate the callback structures behind the array. */ |
| 159 | size += num_fences * sizeof(struct dma_fence_array_cb); |
| 160 | array = kzalloc(size, GFP_KERNEL); |
| 161 | if (!array) |
| 162 | return NULL; |
| 163 | |
| 164 | spin_lock_init(&array->lock); |
| 165 | dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock, |
| 166 | context, seqno); |
Chris Wilson | 03e4e0a | 2017-11-14 16:27:19 +0000 | [diff] [blame] | 167 | init_irq_work(&array->work, irq_dma_fence_array_work); |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 168 | |
| 169 | array->num_fences = num_fences; |
| 170 | atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); |
| 171 | array->fences = fences; |
| 172 | |
Chris Wilson | 1f70b8b | 2019-08-11 22:09:02 +0100 | [diff] [blame] | 173 | array->base.error = PENDING_ERROR; |
| 174 | |
Chris Wilson | f54d186 | 2016-10-25 13:00:45 +0100 | [diff] [blame] | 175 | return array; |
| 176 | } |
| 177 | EXPORT_SYMBOL(dma_fence_array_create); |
Philipp Zabel | d5b72a2 | 2017-03-17 17:34:49 +0100 | [diff] [blame] | 178 | |
| 179 | /** |
| 180 | * dma_fence_match_context - Check if all fences are from the given context |
| 181 | * @fence: [in] fence or fence array |
| 182 | * @context: [in] fence context to check all fences against |
| 183 | * |
| 184 | * Checks the provided fence or, for a fence array, all fences in the array |
| 185 | * against the given context. Returns false if any fence is from a different |
| 186 | * context. |
| 187 | */ |
| 188 | bool dma_fence_match_context(struct dma_fence *fence, u64 context) |
| 189 | { |
| 190 | struct dma_fence_array *array = to_dma_fence_array(fence); |
| 191 | unsigned i; |
| 192 | |
| 193 | if (!dma_fence_is_array(fence)) |
| 194 | return fence->context == context; |
| 195 | |
| 196 | for (i = 0; i < array->num_fences; i++) { |
| 197 | if (array->fences[i]->context != context) |
| 198 | return false; |
| 199 | } |
| 200 | |
| 201 | return true; |
| 202 | } |
| 203 | EXPORT_SYMBOL(dma_fence_match_context); |