Christian König | 1d51775 | 2021-09-24 17:10:19 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: MIT */ |
| 2 | |
| 3 | /* |
| 4 | * Copyright © 2019 Intel Corporation |
| 5 | * Copyright © 2021 Advanced Micro Devices, Inc. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/spinlock.h> |
| 10 | #include <linux/dma-resv.h> |
| 11 | |
| 12 | #include "selftest.h" |
| 13 | |
| 14 | static struct spinlock fence_lock; |
| 15 | |
| 16 | static const char *fence_name(struct dma_fence *f) |
| 17 | { |
| 18 | return "selftest"; |
| 19 | } |
| 20 | |
| 21 | static const struct dma_fence_ops fence_ops = { |
| 22 | .get_driver_name = fence_name, |
| 23 | .get_timeline_name = fence_name, |
| 24 | }; |
| 25 | |
| 26 | static struct dma_fence *alloc_fence(void) |
| 27 | { |
| 28 | struct dma_fence *f; |
| 29 | |
| 30 | f = kmalloc(sizeof(*f), GFP_KERNEL); |
| 31 | if (!f) |
| 32 | return NULL; |
| 33 | |
| 34 | dma_fence_init(f, &fence_ops, &fence_lock, 0, 0); |
| 35 | return f; |
| 36 | } |
| 37 | |
| 38 | static int sanitycheck(void *arg) |
| 39 | { |
| 40 | struct dma_resv resv; |
| 41 | struct dma_fence *f; |
| 42 | int r; |
| 43 | |
| 44 | f = alloc_fence(); |
| 45 | if (!f) |
| 46 | return -ENOMEM; |
| 47 | |
| 48 | dma_fence_signal(f); |
| 49 | dma_fence_put(f); |
| 50 | |
| 51 | dma_resv_init(&resv); |
| 52 | r = dma_resv_lock(&resv, NULL); |
| 53 | if (r) |
| 54 | pr_err("Resv locking failed\n"); |
| 55 | else |
| 56 | dma_resv_unlock(&resv); |
| 57 | dma_resv_fini(&resv); |
| 58 | return r; |
| 59 | } |
| 60 | |
| 61 | static int test_signaling(void *arg, bool shared) |
| 62 | { |
| 63 | struct dma_resv resv; |
| 64 | struct dma_fence *f; |
| 65 | int r; |
| 66 | |
| 67 | f = alloc_fence(); |
| 68 | if (!f) |
| 69 | return -ENOMEM; |
| 70 | |
| 71 | dma_resv_init(&resv); |
| 72 | r = dma_resv_lock(&resv, NULL); |
| 73 | if (r) { |
| 74 | pr_err("Resv locking failed\n"); |
| 75 | goto err_free; |
| 76 | } |
| 77 | |
| 78 | if (shared) { |
| 79 | r = dma_resv_reserve_shared(&resv, 1); |
| 80 | if (r) { |
| 81 | pr_err("Resv shared slot allocation failed\n"); |
| 82 | goto err_unlock; |
| 83 | } |
| 84 | |
| 85 | dma_resv_add_shared_fence(&resv, f); |
| 86 | } else { |
| 87 | dma_resv_add_excl_fence(&resv, f); |
| 88 | } |
| 89 | |
| 90 | if (dma_resv_test_signaled(&resv, shared)) { |
| 91 | pr_err("Resv unexpectedly signaled\n"); |
| 92 | r = -EINVAL; |
| 93 | goto err_unlock; |
| 94 | } |
| 95 | dma_fence_signal(f); |
| 96 | if (!dma_resv_test_signaled(&resv, shared)) { |
| 97 | pr_err("Resv not reporting signaled\n"); |
| 98 | r = -EINVAL; |
| 99 | goto err_unlock; |
| 100 | } |
| 101 | err_unlock: |
| 102 | dma_resv_unlock(&resv); |
| 103 | err_free: |
| 104 | dma_resv_fini(&resv); |
| 105 | dma_fence_put(f); |
| 106 | return r; |
| 107 | } |
| 108 | |
| 109 | static int test_excl_signaling(void *arg) |
| 110 | { |
| 111 | return test_signaling(arg, false); |
| 112 | } |
| 113 | |
| 114 | static int test_shared_signaling(void *arg) |
| 115 | { |
| 116 | return test_signaling(arg, true); |
| 117 | } |
| 118 | |
| 119 | static int test_for_each(void *arg, bool shared) |
| 120 | { |
| 121 | struct dma_resv_iter cursor; |
| 122 | struct dma_fence *f, *fence; |
| 123 | struct dma_resv resv; |
| 124 | int r; |
| 125 | |
| 126 | f = alloc_fence(); |
| 127 | if (!f) |
| 128 | return -ENOMEM; |
| 129 | |
| 130 | dma_resv_init(&resv); |
| 131 | r = dma_resv_lock(&resv, NULL); |
| 132 | if (r) { |
| 133 | pr_err("Resv locking failed\n"); |
| 134 | goto err_free; |
| 135 | } |
| 136 | |
| 137 | if (shared) { |
| 138 | r = dma_resv_reserve_shared(&resv, 1); |
| 139 | if (r) { |
| 140 | pr_err("Resv shared slot allocation failed\n"); |
| 141 | goto err_unlock; |
| 142 | } |
| 143 | |
| 144 | dma_resv_add_shared_fence(&resv, f); |
| 145 | } else { |
| 146 | dma_resv_add_excl_fence(&resv, f); |
| 147 | } |
| 148 | |
| 149 | r = -ENOENT; |
| 150 | dma_resv_for_each_fence(&cursor, &resv, shared, fence) { |
| 151 | if (!r) { |
| 152 | pr_err("More than one fence found\n"); |
| 153 | r = -EINVAL; |
| 154 | goto err_unlock; |
| 155 | } |
| 156 | if (f != fence) { |
| 157 | pr_err("Unexpected fence\n"); |
| 158 | r = -EINVAL; |
| 159 | goto err_unlock; |
| 160 | } |
| 161 | if (dma_resv_iter_is_exclusive(&cursor) != !shared) { |
| 162 | pr_err("Unexpected fence usage\n"); |
| 163 | r = -EINVAL; |
| 164 | goto err_unlock; |
| 165 | } |
| 166 | r = 0; |
| 167 | } |
| 168 | if (r) { |
| 169 | pr_err("No fence found\n"); |
| 170 | goto err_unlock; |
| 171 | } |
| 172 | dma_fence_signal(f); |
| 173 | err_unlock: |
| 174 | dma_resv_unlock(&resv); |
| 175 | err_free: |
| 176 | dma_resv_fini(&resv); |
| 177 | dma_fence_put(f); |
| 178 | return r; |
| 179 | } |
| 180 | |
| 181 | static int test_excl_for_each(void *arg) |
| 182 | { |
| 183 | return test_for_each(arg, false); |
| 184 | } |
| 185 | |
| 186 | static int test_shared_for_each(void *arg) |
| 187 | { |
| 188 | return test_for_each(arg, true); |
| 189 | } |
| 190 | |
| 191 | static int test_for_each_unlocked(void *arg, bool shared) |
| 192 | { |
| 193 | struct dma_resv_iter cursor; |
| 194 | struct dma_fence *f, *fence; |
| 195 | struct dma_resv resv; |
| 196 | int r; |
| 197 | |
| 198 | f = alloc_fence(); |
| 199 | if (!f) |
| 200 | return -ENOMEM; |
| 201 | |
| 202 | dma_resv_init(&resv); |
| 203 | r = dma_resv_lock(&resv, NULL); |
| 204 | if (r) { |
| 205 | pr_err("Resv locking failed\n"); |
| 206 | goto err_free; |
| 207 | } |
| 208 | |
| 209 | if (shared) { |
| 210 | r = dma_resv_reserve_shared(&resv, 1); |
| 211 | if (r) { |
| 212 | pr_err("Resv shared slot allocation failed\n"); |
| 213 | dma_resv_unlock(&resv); |
| 214 | goto err_free; |
| 215 | } |
| 216 | |
| 217 | dma_resv_add_shared_fence(&resv, f); |
| 218 | } else { |
| 219 | dma_resv_add_excl_fence(&resv, f); |
| 220 | } |
| 221 | dma_resv_unlock(&resv); |
| 222 | |
| 223 | r = -ENOENT; |
| 224 | dma_resv_iter_begin(&cursor, &resv, shared); |
| 225 | dma_resv_for_each_fence_unlocked(&cursor, fence) { |
| 226 | if (!r) { |
| 227 | pr_err("More than one fence found\n"); |
| 228 | r = -EINVAL; |
| 229 | goto err_iter_end; |
| 230 | } |
| 231 | if (!dma_resv_iter_is_restarted(&cursor)) { |
| 232 | pr_err("No restart flag\n"); |
| 233 | goto err_iter_end; |
| 234 | } |
| 235 | if (f != fence) { |
| 236 | pr_err("Unexpected fence\n"); |
| 237 | r = -EINVAL; |
| 238 | goto err_iter_end; |
| 239 | } |
| 240 | if (dma_resv_iter_is_exclusive(&cursor) != !shared) { |
| 241 | pr_err("Unexpected fence usage\n"); |
| 242 | r = -EINVAL; |
| 243 | goto err_iter_end; |
| 244 | } |
| 245 | |
| 246 | /* We use r as state here */ |
| 247 | if (r == -ENOENT) { |
| 248 | r = -EINVAL; |
| 249 | /* That should trigger an restart */ |
| 250 | cursor.seq--; |
| 251 | } else if (r == -EINVAL) { |
| 252 | r = 0; |
| 253 | } |
| 254 | } |
| 255 | if (r) |
| 256 | pr_err("No fence found\n"); |
| 257 | err_iter_end: |
| 258 | dma_resv_iter_end(&cursor); |
| 259 | dma_fence_signal(f); |
| 260 | err_free: |
| 261 | dma_resv_fini(&resv); |
| 262 | dma_fence_put(f); |
| 263 | return r; |
| 264 | } |
| 265 | |
| 266 | static int test_excl_for_each_unlocked(void *arg) |
| 267 | { |
| 268 | return test_for_each_unlocked(arg, false); |
| 269 | } |
| 270 | |
| 271 | static int test_shared_for_each_unlocked(void *arg) |
| 272 | { |
| 273 | return test_for_each_unlocked(arg, true); |
| 274 | } |
| 275 | |
| 276 | static int test_get_fences(void *arg, bool shared) |
| 277 | { |
| 278 | struct dma_fence *f, *excl = NULL, **fences = NULL; |
| 279 | struct dma_resv resv; |
| 280 | int r, i; |
| 281 | |
| 282 | f = alloc_fence(); |
| 283 | if (!f) |
| 284 | return -ENOMEM; |
| 285 | |
| 286 | dma_resv_init(&resv); |
| 287 | r = dma_resv_lock(&resv, NULL); |
| 288 | if (r) { |
| 289 | pr_err("Resv locking failed\n"); |
Arnd Bergmann | 55d5e4f | 2021-10-26 10:34:37 +0200 | [diff] [blame] | 290 | goto err_resv; |
Christian König | 1d51775 | 2021-09-24 17:10:19 +0200 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | if (shared) { |
| 294 | r = dma_resv_reserve_shared(&resv, 1); |
| 295 | if (r) { |
| 296 | pr_err("Resv shared slot allocation failed\n"); |
| 297 | dma_resv_unlock(&resv); |
Arnd Bergmann | 55d5e4f | 2021-10-26 10:34:37 +0200 | [diff] [blame] | 298 | goto err_resv; |
Christian König | 1d51775 | 2021-09-24 17:10:19 +0200 | [diff] [blame] | 299 | } |
| 300 | |
| 301 | dma_resv_add_shared_fence(&resv, f); |
| 302 | } else { |
| 303 | dma_resv_add_excl_fence(&resv, f); |
| 304 | } |
| 305 | dma_resv_unlock(&resv); |
| 306 | |
| 307 | r = dma_resv_get_fences(&resv, &excl, &i, &fences); |
| 308 | if (r) { |
| 309 | pr_err("get_fences failed\n"); |
| 310 | goto err_free; |
| 311 | } |
| 312 | |
| 313 | if (shared) { |
| 314 | if (excl != NULL) { |
| 315 | pr_err("get_fences returned unexpected excl fence\n"); |
| 316 | goto err_free; |
| 317 | } |
| 318 | if (i != 1 || fences[0] != f) { |
| 319 | pr_err("get_fences returned unexpected shared fence\n"); |
| 320 | goto err_free; |
| 321 | } |
| 322 | } else { |
| 323 | if (excl != f) { |
| 324 | pr_err("get_fences returned unexpected excl fence\n"); |
| 325 | goto err_free; |
| 326 | } |
| 327 | if (i != 0) { |
| 328 | pr_err("get_fences returned unexpected shared fence\n"); |
| 329 | goto err_free; |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | dma_fence_signal(f); |
| 334 | err_free: |
| 335 | dma_fence_put(excl); |
| 336 | while (i--) |
| 337 | dma_fence_put(fences[i]); |
| 338 | kfree(fences); |
Arnd Bergmann | 55d5e4f | 2021-10-26 10:34:37 +0200 | [diff] [blame] | 339 | err_resv: |
Christian König | 1d51775 | 2021-09-24 17:10:19 +0200 | [diff] [blame] | 340 | dma_resv_fini(&resv); |
| 341 | dma_fence_put(f); |
| 342 | return r; |
| 343 | } |
| 344 | |
| 345 | static int test_excl_get_fences(void *arg) |
| 346 | { |
| 347 | return test_get_fences(arg, false); |
| 348 | } |
| 349 | |
| 350 | static int test_shared_get_fences(void *arg) |
| 351 | { |
| 352 | return test_get_fences(arg, true); |
| 353 | } |
| 354 | |
| 355 | int dma_resv(void) |
| 356 | { |
| 357 | static const struct subtest tests[] = { |
| 358 | SUBTEST(sanitycheck), |
| 359 | SUBTEST(test_excl_signaling), |
| 360 | SUBTEST(test_shared_signaling), |
| 361 | SUBTEST(test_excl_for_each), |
| 362 | SUBTEST(test_shared_for_each), |
| 363 | SUBTEST(test_excl_for_each_unlocked), |
| 364 | SUBTEST(test_shared_for_each_unlocked), |
| 365 | SUBTEST(test_excl_get_fences), |
| 366 | SUBTEST(test_shared_get_fences), |
| 367 | }; |
| 368 | |
| 369 | spin_lock_init(&fence_lock); |
| 370 | return subtests(tests, NULL); |
| 371 | } |