blob: 901e213daf40bac66b1491c1d52003e822328902 [file] [log] [blame]
Oded Gabbaybe5d9262019-02-16 00:39:15 +02001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10
11#include <linux/mm.h>
12#include <linux/slab.h>
Ofir Bittonbf6d1092020-07-30 10:00:10 +030013#include <linux/uaccess.h>
Ofir Bittona04b7cd2020-07-13 13:36:55 +030014#include <linux/genalloc.h>
Oded Gabbaybe5d9262019-02-16 00:39:15 +020015
Tomer Tayaref6a0f62020-07-09 16:17:48 +030016static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
17{
18 struct hl_device *hdev = ctx->hdev;
19 struct asic_fixed_properties *prop = &hdev->asic_prop;
20 struct hl_vm_va_block *va_block, *tmp;
21 dma_addr_t bus_addr;
22 u64 virt_addr;
23 u32 page_size = prop->pmmu.page_size;
24 s32 offset;
25 int rc;
26
27 if (!hdev->supports_cb_mapping) {
28 dev_err_ratelimited(hdev->dev,
29 "Cannot map CB because no VA range is allocated for CB mapping\n");
30 return -EINVAL;
31 }
32
33 if (!hdev->mmu_enable) {
34 dev_err_ratelimited(hdev->dev,
35 "Cannot map CB because MMU is disabled\n");
36 return -EINVAL;
37 }
38
39 INIT_LIST_HEAD(&cb->va_block_list);
40
41 for (bus_addr = cb->bus_address;
42 bus_addr < cb->bus_address + cb->size;
43 bus_addr += page_size) {
44
45 virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
46 if (!virt_addr) {
47 dev_err(hdev->dev,
48 "Failed to allocate device virtual address for CB\n");
49 rc = -ENOMEM;
50 goto err_va_pool_free;
51 }
52
53 va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
54 if (!va_block) {
55 rc = -ENOMEM;
56 gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
57 goto err_va_pool_free;
58 }
59
60 va_block->start = virt_addr;
61 va_block->end = virt_addr + page_size;
62 va_block->size = page_size;
63 list_add_tail(&va_block->node, &cb->va_block_list);
64 }
65
66 mutex_lock(&ctx->mmu_lock);
67
68 bus_addr = cb->bus_address;
69 offset = 0;
70 list_for_each_entry(va_block, &cb->va_block_list, node) {
71 rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
72 list_is_last(&va_block->node,
73 &cb->va_block_list));
74 if (rc) {
75 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
76 va_block->start);
77 goto err_va_umap;
78 }
79
80 bus_addr += va_block->size;
81 offset += va_block->size;
82 }
83
84 hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
85
86 mutex_unlock(&ctx->mmu_lock);
87
88 cb->is_mmu_mapped = true;
89
90 return 0;
91
92err_va_umap:
93 list_for_each_entry(va_block, &cb->va_block_list, node) {
94 if (offset <= 0)
95 break;
96 hl_mmu_unmap(ctx, va_block->start, va_block->size,
97 offset <= va_block->size);
98 offset -= va_block->size;
99 }
100
101 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
102
103 mutex_unlock(&ctx->mmu_lock);
104
105err_va_pool_free:
106 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
107 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
108 list_del(&va_block->node);
109 kfree(va_block);
110 }
111
112 return rc;
113}
114
115static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
116{
117 struct hl_device *hdev = ctx->hdev;
118 struct hl_vm_va_block *va_block, *tmp;
119
120 mutex_lock(&ctx->mmu_lock);
121
122 list_for_each_entry(va_block, &cb->va_block_list, node)
123 if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
124 list_is_last(&va_block->node,
125 &cb->va_block_list)))
126 dev_warn_ratelimited(hdev->dev,
127 "Failed to unmap CB's va 0x%llx\n",
128 va_block->start);
129
130 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
131
132 mutex_unlock(&ctx->mmu_lock);
133
134 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
135 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
136 list_del(&va_block->node);
137 kfree(va_block);
138 }
139}
140
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200141static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
142{
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300143 if (cb->is_internal)
144 gen_pool_free(hdev->internal_cb_pool,
145 cb->kernel_address, cb->size);
146 else
147 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
148 (void *) (uintptr_t) cb->kernel_address,
149 cb->bus_address);
150
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200151 kfree(cb);
152}
153
154static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
155{
156 if (cb->is_pool) {
157 spin_lock(&hdev->cb_pool_lock);
158 list_add(&cb->pool_list, &hdev->cb_pool);
159 spin_unlock(&hdev->cb_pool_lock);
160 } else {
161 cb_fini(hdev, cb);
162 }
163}
164
165static void cb_release(struct kref *ref)
166{
167 struct hl_device *hdev;
168 struct hl_cb *cb;
169
170 cb = container_of(ref, struct hl_cb, refcount);
171 hdev = cb->hdev;
172
Oded Gabbayc2164772019-02-16 00:39:24 +0200173 hl_debugfs_remove_cb(cb);
174
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300175 if (cb->is_mmu_mapped)
176 cb_unmap_mem(cb->ctx, cb);
177
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300178 hl_ctx_put(cb->ctx);
179
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200180 cb_do_release(hdev, cb);
181}
182
183static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300184 int ctx_id, bool internal_cb)
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200185{
186 struct hl_cb *cb;
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300187 u32 cb_offset;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200188 void *p;
189
190 /*
191 * We use of GFP_ATOMIC here because this function can be called from
192 * the latency-sensitive code path for command submission. Due to H/W
193 * limitations in some of the ASICs, the kernel must copy the user CB
194 * that is designated for an external queue and actually enqueue
195 * the kernel's copy. Hence, we must never sleep in this code section
196 * and must use GFP_ATOMIC for all memory allocations.
197 */
198 if (ctx_id == HL_KERNEL_ASID_ID)
199 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
200 else
201 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
202
203 if (!cb)
204 return NULL;
205
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300206 if (internal_cb) {
207 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
208 if (!p) {
209 kfree(cb);
210 return NULL;
211 }
212
213 cb_offset = p - hdev->internal_cb_pool_virt_addr;
214 cb->is_internal = true;
215 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
216 } else if (ctx_id == HL_KERNEL_ASID_ID) {
Oded Gabbayd9c3aa82019-05-01 11:47:04 +0300217 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200218 &cb->bus_address, GFP_ATOMIC);
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300219 } else {
Oded Gabbayd9c3aa82019-05-01 11:47:04 +0300220 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200221 &cb->bus_address,
222 GFP_USER | __GFP_ZERO);
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300223 }
224
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200225 if (!p) {
226 dev_err(hdev->dev,
227 "failed to allocate %d of dma memory for CB\n",
228 cb_size);
229 kfree(cb);
230 return NULL;
231 }
232
233 cb->kernel_address = (u64) (uintptr_t) p;
234 cb->size = cb_size;
235
236 return cb;
237}
238
239int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300240 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300241 bool map_cb, u64 *handle)
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200242{
243 struct hl_cb *cb;
244 bool alloc_new_cb = true;
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300245 int rc, ctx_id = ctx->asid;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200246
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200247 /*
248 * Can't use generic function to check this because of special case
249 * where we create a CB as part of the reset process
250 */
251 if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
252 (ctx_id != HL_KERNEL_ASID_ID))) {
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200253 dev_warn_ratelimited(hdev->dev,
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200254 "Device is disabled or in reset. Can't create new CBs\n");
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200255 rc = -EBUSY;
256 goto out_err;
257 }
258
Oded Gabbay39b42512020-04-17 12:12:13 +0300259 if (cb_size > SZ_2M) {
260 dev_err(hdev->dev, "CB size %d must be less than %d\n",
261 cb_size, SZ_2M);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200262 rc = -EINVAL;
263 goto out_err;
264 }
265
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300266 if (!internal_cb) {
267 /* Minimum allocation must be PAGE SIZE */
268 if (cb_size < PAGE_SIZE)
269 cb_size = PAGE_SIZE;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200270
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300271 if (ctx_id == HL_KERNEL_ASID_ID &&
272 cb_size <= hdev->asic_prop.cb_pool_cb_size) {
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200273
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300274 spin_lock(&hdev->cb_pool_lock);
275 if (!list_empty(&hdev->cb_pool)) {
276 cb = list_first_entry(&hdev->cb_pool,
277 typeof(*cb), pool_list);
278 list_del(&cb->pool_list);
279 spin_unlock(&hdev->cb_pool_lock);
280 alloc_new_cb = false;
281 } else {
282 spin_unlock(&hdev->cb_pool_lock);
283 dev_dbg(hdev->dev, "CB pool is empty\n");
284 }
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200285 }
286 }
287
288 if (alloc_new_cb) {
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300289 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200290 if (!cb) {
291 rc = -ENOMEM;
292 goto out_err;
293 }
294 }
295
296 cb->hdev = hdev;
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300297 cb->ctx = ctx;
298 hl_ctx_get(hdev, cb->ctx);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200299
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300300 if (map_cb) {
301 if (ctx_id == HL_KERNEL_ASID_ID) {
302 dev_err(hdev->dev,
303 "CB mapping is not supported for kernel context\n");
304 rc = -EINVAL;
305 goto release_cb;
306 }
307
308 rc = cb_map_mem(ctx, cb);
309 if (rc)
310 goto release_cb;
311 }
312
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200313 spin_lock(&mgr->cb_lock);
314 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
315 spin_unlock(&mgr->cb_lock);
316
317 if (rc < 0) {
318 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300319 goto unmap_mem;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200320 }
321
Oded Gabbayf5b9c8c2020-08-12 10:11:20 +0300322 cb->id = (u64) rc;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200323
324 kref_init(&cb->refcount);
325 spin_lock_init(&cb->lock);
326
327 /*
328 * idr is 32-bit so we can safely OR it with a mask that is above
329 * 32 bit
330 */
Oded Gabbay3174ac92020-08-29 11:51:39 +0300331 *handle = cb->id | HL_MMAP_TYPE_CB;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200332 *handle <<= PAGE_SHIFT;
333
Oded Gabbayc2164772019-02-16 00:39:24 +0200334 hl_debugfs_add_cb(cb);
335
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200336 return 0;
337
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300338unmap_mem:
339 if (cb->is_mmu_mapped)
340 cb_unmap_mem(cb->ctx, cb);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200341release_cb:
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300342 hl_ctx_put(cb->ctx);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200343 cb_do_release(hdev, cb);
344out_err:
345 *handle = 0;
346
347 return rc;
348}
349
350int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
351{
352 struct hl_cb *cb;
353 u32 handle;
354 int rc = 0;
355
356 /*
357 * handle was given to user to do mmap, I need to shift it back to
358 * how the idr module gave it to me
359 */
360 cb_handle >>= PAGE_SHIFT;
361 handle = (u32) cb_handle;
362
363 spin_lock(&mgr->cb_lock);
364
365 cb = idr_find(&mgr->cb_handles, handle);
366 if (cb) {
367 idr_remove(&mgr->cb_handles, handle);
368 spin_unlock(&mgr->cb_lock);
369 kref_put(&cb->refcount, cb_release);
370 } else {
371 spin_unlock(&mgr->cb_lock);
372 dev_err(hdev->dev,
373 "CB destroy failed, no match to handle 0x%x\n", handle);
374 rc = -EINVAL;
375 }
376
377 return rc;
378}
379
380int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
381{
382 union hl_cb_args *args = data;
383 struct hl_device *hdev = hpriv->hdev;
Oded Gabbay39b42512020-04-17 12:12:13 +0300384 u64 handle = 0;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200385 int rc;
386
Oded Gabbay3f5398c2019-04-06 15:41:35 +0300387 if (hl_device_disabled_or_in_reset(hdev)) {
388 dev_warn_ratelimited(hdev->dev,
389 "Device is %s. Can't execute CB IOCTL\n",
390 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
391 return -EBUSY;
392 }
393
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200394 switch (args->in.op) {
395 case HL_CB_OP_CREATE:
Oded Gabbay39b42512020-04-17 12:12:13 +0300396 if (args->in.cb_size > HL_MAX_CB_SIZE) {
397 dev_err(hdev->dev,
398 "User requested CB size %d must be less than %d\n",
399 args->in.cb_size, HL_MAX_CB_SIZE);
400 rc = -EINVAL;
401 } else {
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300402 rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300403 args->in.cb_size, false,
404 !!(args->in.flags & HL_CB_FLAGS_MAP),
405 &handle);
Oded Gabbay39b42512020-04-17 12:12:13 +0300406 }
407
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200408 memset(args, 0, sizeof(*args));
409 args->out.cb_handle = handle;
410 break;
Oded Gabbay39b42512020-04-17 12:12:13 +0300411
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200412 case HL_CB_OP_DESTROY:
413 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
414 args->in.cb_handle);
415 break;
Oded Gabbay39b42512020-04-17 12:12:13 +0300416
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200417 default:
418 rc = -ENOTTY;
419 break;
420 }
421
422 return rc;
423}
424
425static void cb_vm_close(struct vm_area_struct *vma)
426{
427 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
Oded Gabbay9e28c172019-02-28 10:46:19 +0200428 long new_mmap_size;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200429
Oded Gabbay9e28c172019-02-28 10:46:19 +0200430 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200431
Oded Gabbay9e28c172019-02-28 10:46:19 +0200432 if (new_mmap_size > 0) {
433 cb->mmap_size = new_mmap_size;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200434 return;
Oded Gabbay9e28c172019-02-28 10:46:19 +0200435 }
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200436
437 spin_lock(&cb->lock);
438 cb->mmap = false;
439 spin_unlock(&cb->lock);
440
441 hl_cb_put(cb);
442 vma->vm_private_data = NULL;
443}
444
445static const struct vm_operations_struct cb_vm_ops = {
446 .close = cb_vm_close
447};
448
449int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
450{
451 struct hl_device *hdev = hpriv->hdev;
452 struct hl_cb *cb;
Ofir Bittonbf6d1092020-07-30 10:00:10 +0300453 u32 handle, user_cb_size;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200454 int rc;
455
Oded Gabbayc5e0ec62020-08-29 11:55:15 +0300456 /* We use the page offset to hold the idr and thus we need to clear
457 * it before doing the mmap itself
458 */
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200459 handle = vma->vm_pgoff;
Oded Gabbayc5e0ec62020-08-29 11:55:15 +0300460 vma->vm_pgoff = 0;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200461
462 /* reference was taken here */
463 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
464 if (!cb) {
465 dev_err(hdev->dev,
Dotan Barak0a62c392020-04-28 08:43:19 +0300466 "CB mmap failed, no match to handle 0x%x\n", handle);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200467 return -EINVAL;
468 }
469
470 /* Validation check */
Ofir Bittonbf6d1092020-07-30 10:00:10 +0300471 user_cb_size = vma->vm_end - vma->vm_start;
472 if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200473 dev_err(hdev->dev,
474 "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
475 vma->vm_end - vma->vm_start, cb->size);
476 rc = -EINVAL;
477 goto put_cb;
478 }
479
Ofir Bittonbf6d1092020-07-30 10:00:10 +0300480 if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
481 user_cb_size)) {
482 dev_err(hdev->dev,
483 "user pointer is invalid - 0x%lx\n",
484 vma->vm_start);
485
486 rc = -EINVAL;
487 goto put_cb;
488 }
489
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200490 spin_lock(&cb->lock);
491
492 if (cb->mmap) {
493 dev_err(hdev->dev,
494 "CB mmap failed, CB already mmaped to user\n");
495 rc = -EINVAL;
496 goto release_lock;
497 }
498
499 cb->mmap = true;
500
501 spin_unlock(&cb->lock);
502
503 vma->vm_ops = &cb_vm_ops;
504
505 /*
506 * Note: We're transferring the cb reference to
507 * vma->vm_private_data here.
508 */
509
510 vma->vm_private_data = cb;
511
Hillf Danton0db57532020-08-23 07:32:42 +0800512 rc = hdev->asic_funcs->cb_mmap(hdev, vma, (void *) cb->kernel_address,
513 cb->bus_address, cb->size);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200514 if (rc) {
515 spin_lock(&cb->lock);
516 cb->mmap = false;
517 goto release_lock;
518 }
519
520 cb->mmap_size = cb->size;
521
522 return 0;
523
524release_lock:
525 spin_unlock(&cb->lock);
526put_cb:
527 hl_cb_put(cb);
528 return rc;
529}
530
531struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
532 u32 handle)
533{
534 struct hl_cb *cb;
535
536 spin_lock(&mgr->cb_lock);
537 cb = idr_find(&mgr->cb_handles, handle);
538
539 if (!cb) {
540 spin_unlock(&mgr->cb_lock);
541 dev_warn(hdev->dev,
Dotan Barak0a62c392020-04-28 08:43:19 +0300542 "CB get failed, no match to handle 0x%x\n", handle);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200543 return NULL;
544 }
545
546 kref_get(&cb->refcount);
547
548 spin_unlock(&mgr->cb_lock);
549
550 return cb;
551
552}
553
554void hl_cb_put(struct hl_cb *cb)
555{
556 kref_put(&cb->refcount, cb_release);
557}
558
559void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
560{
561 spin_lock_init(&mgr->cb_lock);
562 idr_init(&mgr->cb_handles);
563}
564
565void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
566{
567 struct hl_cb *cb;
568 struct idr *idp;
569 u32 id;
570
571 idp = &mgr->cb_handles;
572
573 idr_for_each_entry(idp, cb, id) {
574 if (kref_put(&cb->refcount, cb_release) != 1)
575 dev_err(hdev->dev,
576 "CB %d for CTX ID %d is still alive\n",
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300577 id, cb->ctx->asid);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200578 }
579
580 idr_destroy(&mgr->cb_handles);
581}
582
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300583struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
584 bool internal_cb)
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200585{
586 u64 cb_handle;
587 struct hl_cb *cb;
588 int rc;
589
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300590 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300591 internal_cb, false, &cb_handle);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200592 if (rc) {
Oded Gabbay4c172bb2019-08-30 16:59:33 +0300593 dev_err(hdev->dev,
594 "Failed to allocate CB for the kernel driver %d\n", rc);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200595 return NULL;
596 }
597
598 cb_handle >>= PAGE_SHIFT;
599 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
600 /* hl_cb_get should never fail here so use kernel WARN */
601 WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
602 if (!cb)
603 goto destroy_cb;
604
605 return cb;
606
607destroy_cb:
608 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
609
610 return NULL;
611}
612
613int hl_cb_pool_init(struct hl_device *hdev)
614{
615 struct hl_cb *cb;
616 int i;
617
618 INIT_LIST_HEAD(&hdev->cb_pool);
619 spin_lock_init(&hdev->cb_pool_lock);
620
621 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
622 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300623 HL_KERNEL_ASID_ID, false);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200624 if (cb) {
625 cb->is_pool = true;
626 list_add(&cb->pool_list, &hdev->cb_pool);
627 } else {
628 hl_cb_pool_fini(hdev);
629 return -ENOMEM;
630 }
631 }
632
633 return 0;
634}
635
636int hl_cb_pool_fini(struct hl_device *hdev)
637{
638 struct hl_cb *cb, *tmp;
639
640 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
641 list_del(&cb->pool_list);
642 cb_fini(hdev, cb);
643 }
644
645 return 0;
646}
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300647
648int hl_cb_va_pool_init(struct hl_ctx *ctx)
649{
650 struct hl_device *hdev = ctx->hdev;
651 struct asic_fixed_properties *prop = &hdev->asic_prop;
652 int rc;
653
654 if (!hdev->supports_cb_mapping)
655 return 0;
656
657 ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
658 if (!ctx->cb_va_pool) {
659 dev_err(hdev->dev,
660 "Failed to create VA gen pool for CB mapping\n");
661 return -ENOMEM;
662 }
663
664 rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
665 prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
666 if (rc) {
667 dev_err(hdev->dev,
668 "Failed to add memory to VA gen pool for CB mapping\n");
669 goto err_pool_destroy;
670 }
671
672 return 0;
673
674err_pool_destroy:
675 gen_pool_destroy(ctx->cb_va_pool);
676
677 return rc;
678}
679
680void hl_cb_va_pool_fini(struct hl_ctx *ctx)
681{
682 struct hl_device *hdev = ctx->hdev;
683
684 if (!hdev->supports_cb_mapping)
685 return;
686
687 gen_pool_destroy(ctx->cb_va_pool);
688}