blob: 8132a84698d5a43ea0b779d08c0940a1ad1db48c [file] [log] [blame]
Oded Gabbaybe5d9262019-02-16 00:39:15 +02001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10
11#include <linux/mm.h>
12#include <linux/slab.h>
Ofir Bittonbf6d1092020-07-30 10:00:10 +030013#include <linux/uaccess.h>
Oded Gabbaybe5d9262019-02-16 00:39:15 +020014
Tomer Tayaref6a0f62020-07-09 16:17:48 +030015static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
16{
17 struct hl_device *hdev = ctx->hdev;
18 struct asic_fixed_properties *prop = &hdev->asic_prop;
19 struct hl_vm_va_block *va_block, *tmp;
20 dma_addr_t bus_addr;
21 u64 virt_addr;
22 u32 page_size = prop->pmmu.page_size;
23 s32 offset;
24 int rc;
25
26 if (!hdev->supports_cb_mapping) {
27 dev_err_ratelimited(hdev->dev,
28 "Cannot map CB because no VA range is allocated for CB mapping\n");
29 return -EINVAL;
30 }
31
32 if (!hdev->mmu_enable) {
33 dev_err_ratelimited(hdev->dev,
34 "Cannot map CB because MMU is disabled\n");
35 return -EINVAL;
36 }
37
38 INIT_LIST_HEAD(&cb->va_block_list);
39
40 for (bus_addr = cb->bus_address;
41 bus_addr < cb->bus_address + cb->size;
42 bus_addr += page_size) {
43
44 virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
45 if (!virt_addr) {
46 dev_err(hdev->dev,
47 "Failed to allocate device virtual address for CB\n");
48 rc = -ENOMEM;
49 goto err_va_pool_free;
50 }
51
52 va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
53 if (!va_block) {
54 rc = -ENOMEM;
55 gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
56 goto err_va_pool_free;
57 }
58
59 va_block->start = virt_addr;
60 va_block->end = virt_addr + page_size;
61 va_block->size = page_size;
62 list_add_tail(&va_block->node, &cb->va_block_list);
63 }
64
65 mutex_lock(&ctx->mmu_lock);
66
67 bus_addr = cb->bus_address;
68 offset = 0;
69 list_for_each_entry(va_block, &cb->va_block_list, node) {
Ofir Bitton5c054872020-10-22 15:13:10 +030070 rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
71 va_block->size, list_is_last(&va_block->node,
72 &cb->va_block_list));
Tomer Tayaref6a0f62020-07-09 16:17:48 +030073 if (rc) {
74 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
75 va_block->start);
76 goto err_va_umap;
77 }
78
79 bus_addr += va_block->size;
80 offset += va_block->size;
81 }
82
83 hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
84
85 mutex_unlock(&ctx->mmu_lock);
86
87 cb->is_mmu_mapped = true;
88
89 return 0;
90
91err_va_umap:
92 list_for_each_entry(va_block, &cb->va_block_list, node) {
93 if (offset <= 0)
94 break;
Ofir Bitton5c054872020-10-22 15:13:10 +030095 hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
Tomer Tayaref6a0f62020-07-09 16:17:48 +030096 offset <= va_block->size);
97 offset -= va_block->size;
98 }
99
100 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
101
102 mutex_unlock(&ctx->mmu_lock);
103
104err_va_pool_free:
105 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
106 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
107 list_del(&va_block->node);
108 kfree(va_block);
109 }
110
111 return rc;
112}
113
114static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
115{
116 struct hl_device *hdev = ctx->hdev;
117 struct hl_vm_va_block *va_block, *tmp;
118
119 mutex_lock(&ctx->mmu_lock);
120
121 list_for_each_entry(va_block, &cb->va_block_list, node)
Ofir Bitton5c054872020-10-22 15:13:10 +0300122 if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300123 list_is_last(&va_block->node,
124 &cb->va_block_list)))
125 dev_warn_ratelimited(hdev->dev,
126 "Failed to unmap CB's va 0x%llx\n",
127 va_block->start);
128
129 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
130
131 mutex_unlock(&ctx->mmu_lock);
132
133 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
134 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
135 list_del(&va_block->node);
136 kfree(va_block);
137 }
138}
139
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200140static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
141{
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300142 if (cb->is_internal)
143 gen_pool_free(hdev->internal_cb_pool,
Arnd Bergmann82948e62020-10-26 17:08:06 +0100144 (uintptr_t)cb->kernel_address, cb->size);
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300145 else
146 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
Arnd Bergmann82948e62020-10-26 17:08:06 +0100147 cb->kernel_address, cb->bus_address);
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300148
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200149 kfree(cb);
150}
151
152static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
153{
154 if (cb->is_pool) {
155 spin_lock(&hdev->cb_pool_lock);
156 list_add(&cb->pool_list, &hdev->cb_pool);
157 spin_unlock(&hdev->cb_pool_lock);
158 } else {
159 cb_fini(hdev, cb);
160 }
161}
162
163static void cb_release(struct kref *ref)
164{
165 struct hl_device *hdev;
166 struct hl_cb *cb;
167
168 cb = container_of(ref, struct hl_cb, refcount);
169 hdev = cb->hdev;
170
Oded Gabbayc2164772019-02-16 00:39:24 +0200171 hl_debugfs_remove_cb(cb);
172
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300173 if (cb->is_mmu_mapped)
174 cb_unmap_mem(cb->ctx, cb);
175
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300176 hl_ctx_put(cb->ctx);
177
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200178 cb_do_release(hdev, cb);
179}
180
181static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300182 int ctx_id, bool internal_cb)
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200183{
Ofir Bittond5eb8372021-02-14 15:35:56 +0200184 struct hl_cb *cb = NULL;
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300185 u32 cb_offset;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200186 void *p;
187
188 /*
189 * We use of GFP_ATOMIC here because this function can be called from
190 * the latency-sensitive code path for command submission. Due to H/W
191 * limitations in some of the ASICs, the kernel must copy the user CB
192 * that is designated for an external queue and actually enqueue
193 * the kernel's copy. Hence, we must never sleep in this code section
194 * and must use GFP_ATOMIC for all memory allocations.
195 */
Ofir Bittond5eb8372021-02-14 15:35:56 +0200196 if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200197 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
Ofir Bittond5eb8372021-02-14 15:35:56 +0200198
199 if (!cb)
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200200 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
201
202 if (!cb)
203 return NULL;
204
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300205 if (internal_cb) {
206 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
207 if (!p) {
208 kfree(cb);
209 return NULL;
210 }
211
212 cb_offset = p - hdev->internal_cb_pool_virt_addr;
213 cb->is_internal = true;
214 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
215 } else if (ctx_id == HL_KERNEL_ASID_ID) {
Oded Gabbayd9c3aa82019-05-01 11:47:04 +0300216 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200217 &cb->bus_address, GFP_ATOMIC);
Ofir Bittond5eb8372021-02-14 15:35:56 +0200218 if (!p)
219 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
220 cb_size, &cb->bus_address, GFP_KERNEL);
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300221 } else {
Oded Gabbayd9c3aa82019-05-01 11:47:04 +0300222 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200223 &cb->bus_address,
224 GFP_USER | __GFP_ZERO);
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300225 }
226
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200227 if (!p) {
228 dev_err(hdev->dev,
229 "failed to allocate %d of dma memory for CB\n",
230 cb_size);
231 kfree(cb);
232 return NULL;
233 }
234
Arnd Bergmann82948e62020-10-26 17:08:06 +0100235 cb->kernel_address = p;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200236 cb->size = cb_size;
237
238 return cb;
239}
240
241int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300242 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300243 bool map_cb, u64 *handle)
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200244{
245 struct hl_cb *cb;
246 bool alloc_new_cb = true;
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300247 int rc, ctx_id = ctx->asid;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200248
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200249 /*
250 * Can't use generic function to check this because of special case
251 * where we create a CB as part of the reset process
252 */
253 if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
254 (ctx_id != HL_KERNEL_ASID_ID))) {
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200255 dev_warn_ratelimited(hdev->dev,
Oded Gabbayf8c8c7d52019-02-16 00:39:20 +0200256 "Device is disabled or in reset. Can't create new CBs\n");
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200257 rc = -EBUSY;
258 goto out_err;
259 }
260
Oded Gabbay39b42512020-04-17 12:12:13 +0300261 if (cb_size > SZ_2M) {
262 dev_err(hdev->dev, "CB size %d must be less than %d\n",
263 cb_size, SZ_2M);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200264 rc = -EINVAL;
265 goto out_err;
266 }
267
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300268 if (!internal_cb) {
269 /* Minimum allocation must be PAGE SIZE */
270 if (cb_size < PAGE_SIZE)
271 cb_size = PAGE_SIZE;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200272
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300273 if (ctx_id == HL_KERNEL_ASID_ID &&
274 cb_size <= hdev->asic_prop.cb_pool_cb_size) {
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200275
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300276 spin_lock(&hdev->cb_pool_lock);
277 if (!list_empty(&hdev->cb_pool)) {
278 cb = list_first_entry(&hdev->cb_pool,
279 typeof(*cb), pool_list);
280 list_del(&cb->pool_list);
281 spin_unlock(&hdev->cb_pool_lock);
282 alloc_new_cb = false;
283 } else {
284 spin_unlock(&hdev->cb_pool_lock);
285 dev_dbg(hdev->dev, "CB pool is empty\n");
286 }
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200287 }
288 }
289
290 if (alloc_new_cb) {
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300291 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200292 if (!cb) {
293 rc = -ENOMEM;
294 goto out_err;
295 }
296 }
297
298 cb->hdev = hdev;
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300299 cb->ctx = ctx;
300 hl_ctx_get(hdev, cb->ctx);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200301
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300302 if (map_cb) {
303 if (ctx_id == HL_KERNEL_ASID_ID) {
304 dev_err(hdev->dev,
305 "CB mapping is not supported for kernel context\n");
306 rc = -EINVAL;
307 goto release_cb;
308 }
309
310 rc = cb_map_mem(ctx, cb);
311 if (rc)
312 goto release_cb;
313 }
314
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200315 spin_lock(&mgr->cb_lock);
316 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
317 spin_unlock(&mgr->cb_lock);
318
319 if (rc < 0) {
320 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300321 goto unmap_mem;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200322 }
323
Oded Gabbayf5b9c8c2020-08-12 10:11:20 +0300324 cb->id = (u64) rc;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200325
326 kref_init(&cb->refcount);
327 spin_lock_init(&cb->lock);
328
329 /*
330 * idr is 32-bit so we can safely OR it with a mask that is above
331 * 32 bit
332 */
Oded Gabbay3174ac92020-08-29 11:51:39 +0300333 *handle = cb->id | HL_MMAP_TYPE_CB;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200334 *handle <<= PAGE_SHIFT;
335
Oded Gabbayc2164772019-02-16 00:39:24 +0200336 hl_debugfs_add_cb(cb);
337
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200338 return 0;
339
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300340unmap_mem:
341 if (cb->is_mmu_mapped)
342 cb_unmap_mem(cb->ctx, cb);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200343release_cb:
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300344 hl_ctx_put(cb->ctx);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200345 cb_do_release(hdev, cb);
346out_err:
347 *handle = 0;
348
349 return rc;
350}
351
352int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
353{
354 struct hl_cb *cb;
355 u32 handle;
356 int rc = 0;
357
358 /*
359 * handle was given to user to do mmap, I need to shift it back to
360 * how the idr module gave it to me
361 */
362 cb_handle >>= PAGE_SHIFT;
363 handle = (u32) cb_handle;
364
365 spin_lock(&mgr->cb_lock);
366
367 cb = idr_find(&mgr->cb_handles, handle);
368 if (cb) {
369 idr_remove(&mgr->cb_handles, handle);
370 spin_unlock(&mgr->cb_lock);
371 kref_put(&cb->refcount, cb_release);
372 } else {
373 spin_unlock(&mgr->cb_lock);
374 dev_err(hdev->dev,
375 "CB destroy failed, no match to handle 0x%x\n", handle);
376 rc = -EINVAL;
377 }
378
379 return rc;
380}
381
Tomer Tayarf44afb52020-09-02 13:43:32 +0300382static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
383 u64 cb_handle, u32 *usage_cnt)
384{
385 struct hl_cb *cb;
386 u32 handle;
387 int rc = 0;
388
389 /* The CB handle was given to user to do mmap, so need to shift it back
390 * to the value which was allocated by the IDR module.
391 */
392 cb_handle >>= PAGE_SHIFT;
393 handle = (u32) cb_handle;
394
395 spin_lock(&mgr->cb_lock);
396
397 cb = idr_find(&mgr->cb_handles, handle);
398 if (!cb) {
399 dev_err(hdev->dev,
400 "CB info failed, no match to handle 0x%x\n", handle);
401 rc = -EINVAL;
402 goto out;
403 }
404
405 *usage_cnt = atomic_read(&cb->cs_cnt);
406
407out:
408 spin_unlock(&mgr->cb_lock);
409 return rc;
410}
411
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200412int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
413{
414 union hl_cb_args *args = data;
415 struct hl_device *hdev = hpriv->hdev;
Ofir Bitton66a76402020-10-05 14:40:10 +0300416 enum hl_device_status status;
Oded Gabbay39b42512020-04-17 12:12:13 +0300417 u64 handle = 0;
Tomer Tayarf44afb52020-09-02 13:43:32 +0300418 u32 usage_cnt = 0;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200419 int rc;
420
Ofir Bitton66a76402020-10-05 14:40:10 +0300421 if (!hl_device_operational(hdev, &status)) {
Oded Gabbay3f5398c2019-04-06 15:41:35 +0300422 dev_warn_ratelimited(hdev->dev,
423 "Device is %s. Can't execute CB IOCTL\n",
Ofir Bitton66a76402020-10-05 14:40:10 +0300424 hdev->status[status]);
Oded Gabbay3f5398c2019-04-06 15:41:35 +0300425 return -EBUSY;
426 }
427
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200428 switch (args->in.op) {
429 case HL_CB_OP_CREATE:
Oded Gabbay39b42512020-04-17 12:12:13 +0300430 if (args->in.cb_size > HL_MAX_CB_SIZE) {
431 dev_err(hdev->dev,
432 "User requested CB size %d must be less than %d\n",
433 args->in.cb_size, HL_MAX_CB_SIZE);
434 rc = -EINVAL;
435 } else {
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300436 rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300437 args->in.cb_size, false,
438 !!(args->in.flags & HL_CB_FLAGS_MAP),
439 &handle);
Oded Gabbay39b42512020-04-17 12:12:13 +0300440 }
441
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200442 memset(args, 0, sizeof(*args));
443 args->out.cb_handle = handle;
444 break;
Oded Gabbay39b42512020-04-17 12:12:13 +0300445
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200446 case HL_CB_OP_DESTROY:
447 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
448 args->in.cb_handle);
449 break;
Oded Gabbay39b42512020-04-17 12:12:13 +0300450
Tomer Tayarf44afb52020-09-02 13:43:32 +0300451 case HL_CB_OP_INFO:
452 rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
453 &usage_cnt);
454 memset(args, 0, sizeof(*args));
455 args->out.usage_cnt = usage_cnt;
456 break;
457
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200458 default:
459 rc = -ENOTTY;
460 break;
461 }
462
463 return rc;
464}
465
466static void cb_vm_close(struct vm_area_struct *vma)
467{
468 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
Oded Gabbay9e28c172019-02-28 10:46:19 +0200469 long new_mmap_size;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200470
Oded Gabbay9e28c172019-02-28 10:46:19 +0200471 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200472
Oded Gabbay9e28c172019-02-28 10:46:19 +0200473 if (new_mmap_size > 0) {
474 cb->mmap_size = new_mmap_size;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200475 return;
Oded Gabbay9e28c172019-02-28 10:46:19 +0200476 }
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200477
478 spin_lock(&cb->lock);
479 cb->mmap = false;
480 spin_unlock(&cb->lock);
481
482 hl_cb_put(cb);
483 vma->vm_private_data = NULL;
484}
485
486static const struct vm_operations_struct cb_vm_ops = {
487 .close = cb_vm_close
488};
489
490int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
491{
492 struct hl_device *hdev = hpriv->hdev;
493 struct hl_cb *cb;
Ofir Bittonbf6d1092020-07-30 10:00:10 +0300494 u32 handle, user_cb_size;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200495 int rc;
496
Oded Gabbayc5e0ec62020-08-29 11:55:15 +0300497 /* We use the page offset to hold the idr and thus we need to clear
498 * it before doing the mmap itself
499 */
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200500 handle = vma->vm_pgoff;
Oded Gabbayc5e0ec62020-08-29 11:55:15 +0300501 vma->vm_pgoff = 0;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200502
503 /* reference was taken here */
504 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
505 if (!cb) {
506 dev_err(hdev->dev,
Dotan Barak0a62c392020-04-28 08:43:19 +0300507 "CB mmap failed, no match to handle 0x%x\n", handle);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200508 return -EINVAL;
509 }
510
511 /* Validation check */
Ofir Bittonbf6d1092020-07-30 10:00:10 +0300512 user_cb_size = vma->vm_end - vma->vm_start;
513 if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200514 dev_err(hdev->dev,
515 "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
516 vma->vm_end - vma->vm_start, cb->size);
517 rc = -EINVAL;
518 goto put_cb;
519 }
520
Ofir Bittonbf6d1092020-07-30 10:00:10 +0300521 if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
522 user_cb_size)) {
523 dev_err(hdev->dev,
524 "user pointer is invalid - 0x%lx\n",
525 vma->vm_start);
526
527 rc = -EINVAL;
528 goto put_cb;
529 }
530
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200531 spin_lock(&cb->lock);
532
533 if (cb->mmap) {
534 dev_err(hdev->dev,
535 "CB mmap failed, CB already mmaped to user\n");
536 rc = -EINVAL;
537 goto release_lock;
538 }
539
540 cb->mmap = true;
541
542 spin_unlock(&cb->lock);
543
544 vma->vm_ops = &cb_vm_ops;
545
546 /*
547 * Note: We're transferring the cb reference to
548 * vma->vm_private_data here.
549 */
550
551 vma->vm_private_data = cb;
552
Zvika Yehudai1ee8e2b2021-07-06 13:50:32 +0300553 rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address,
Hillf Danton0db57532020-08-23 07:32:42 +0800554 cb->bus_address, cb->size);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200555 if (rc) {
556 spin_lock(&cb->lock);
557 cb->mmap = false;
558 goto release_lock;
559 }
560
561 cb->mmap_size = cb->size;
Oded Gabbay28e052c2020-10-29 18:38:31 +0200562 vma->vm_pgoff = handle;
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200563
564 return 0;
565
566release_lock:
567 spin_unlock(&cb->lock);
568put_cb:
569 hl_cb_put(cb);
570 return rc;
571}
572
573struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
574 u32 handle)
575{
576 struct hl_cb *cb;
577
578 spin_lock(&mgr->cb_lock);
579 cb = idr_find(&mgr->cb_handles, handle);
580
581 if (!cb) {
582 spin_unlock(&mgr->cb_lock);
583 dev_warn(hdev->dev,
Dotan Barak0a62c392020-04-28 08:43:19 +0300584 "CB get failed, no match to handle 0x%x\n", handle);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200585 return NULL;
586 }
587
588 kref_get(&cb->refcount);
589
590 spin_unlock(&mgr->cb_lock);
591
592 return cb;
593
594}
595
596void hl_cb_put(struct hl_cb *cb)
597{
598 kref_put(&cb->refcount, cb_release);
599}
600
601void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
602{
603 spin_lock_init(&mgr->cb_lock);
604 idr_init(&mgr->cb_handles);
605}
606
607void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
608{
609 struct hl_cb *cb;
610 struct idr *idp;
611 u32 id;
612
613 idp = &mgr->cb_handles;
614
615 idr_for_each_entry(idp, cb, id) {
616 if (kref_put(&cb->refcount, cb_release) != 1)
617 dev_err(hdev->dev,
618 "CB %d for CTX ID %d is still alive\n",
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300619 id, cb->ctx->asid);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200620 }
621
622 idr_destroy(&mgr->cb_handles);
623}
624
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300625struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
626 bool internal_cb)
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200627{
628 u64 cb_handle;
629 struct hl_cb *cb;
630 int rc;
631
Tomer Tayarfa8641a12020-09-07 17:36:41 +0300632 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300633 internal_cb, false, &cb_handle);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200634 if (rc) {
Oded Gabbay4c172bb2019-08-30 16:59:33 +0300635 dev_err(hdev->dev,
636 "Failed to allocate CB for the kernel driver %d\n", rc);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200637 return NULL;
638 }
639
640 cb_handle >>= PAGE_SHIFT;
641 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
Alon Mizrahi75d9a2a2020-12-03 17:32:19 +0200642 /* hl_cb_get should never fail here */
643 if (!cb) {
644 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
645 (u32) cb_handle);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200646 goto destroy_cb;
Alon Mizrahi75d9a2a2020-12-03 17:32:19 +0200647 }
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200648
649 return cb;
650
651destroy_cb:
652 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
653
654 return NULL;
655}
656
657int hl_cb_pool_init(struct hl_device *hdev)
658{
659 struct hl_cb *cb;
660 int i;
661
662 INIT_LIST_HEAD(&hdev->cb_pool);
663 spin_lock_init(&hdev->cb_pool_lock);
664
665 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
666 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
Ofir Bittona04b7cd2020-07-13 13:36:55 +0300667 HL_KERNEL_ASID_ID, false);
Oded Gabbaybe5d9262019-02-16 00:39:15 +0200668 if (cb) {
669 cb->is_pool = true;
670 list_add(&cb->pool_list, &hdev->cb_pool);
671 } else {
672 hl_cb_pool_fini(hdev);
673 return -ENOMEM;
674 }
675 }
676
677 return 0;
678}
679
680int hl_cb_pool_fini(struct hl_device *hdev)
681{
682 struct hl_cb *cb, *tmp;
683
684 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
685 list_del(&cb->pool_list);
686 cb_fini(hdev, cb);
687 }
688
689 return 0;
690}
Tomer Tayaref6a0f62020-07-09 16:17:48 +0300691
692int hl_cb_va_pool_init(struct hl_ctx *ctx)
693{
694 struct hl_device *hdev = ctx->hdev;
695 struct asic_fixed_properties *prop = &hdev->asic_prop;
696 int rc;
697
698 if (!hdev->supports_cb_mapping)
699 return 0;
700
701 ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
702 if (!ctx->cb_va_pool) {
703 dev_err(hdev->dev,
704 "Failed to create VA gen pool for CB mapping\n");
705 return -ENOMEM;
706 }
707
708 rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
709 prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
710 if (rc) {
711 dev_err(hdev->dev,
712 "Failed to add memory to VA gen pool for CB mapping\n");
713 goto err_pool_destroy;
714 }
715
716 return 0;
717
718err_pool_destroy:
719 gen_pool_destroy(ctx->cb_va_pool);
720
721 return rc;
722}
723
724void hl_cb_va_pool_fini(struct hl_ctx *ctx)
725{
726 struct hl_device *hdev = ctx->hdev;
727
728 if (!hdev->supports_cb_mapping)
729 return;
730
731 gen_pool_destroy(ctx->cb_va_pool);
732}