blob: f885812d99393d0a7e0bfa9cc719408199406478 [file] [log] [blame]
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
Omer Shpigelman0feaf862019-02-16 00:39:22 +02008#include <uapi/misc/habanalabs.h>
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02009#include "habanalabs.h"
Greg Kroah-Hartman7b16a152020-07-28 19:18:51 +020010#include "../include/hw_ip/mmu/mmu_general.h"
Oded Gabbayeff6f4a2019-02-16 00:39:21 +020011
12#include <linux/uaccess.h>
13#include <linux/slab.h>
Omer Shpigelman0feaf862019-02-16 00:39:22 +020014#include <linux/genalloc.h>
15
Omer Shpigelman0feaf862019-02-16 00:39:22 +020016#define HL_MMU_DEBUG 0
17
18/*
19 * The va ranges in context object contain a list with the available chunks of
20 * device virtual memory.
21 * There is one range for host allocations and one for DRAM allocations.
22 *
23 * On initialization each range contains one chunk of all of its available
24 * virtual range which is a half of the total device virtual range.
25 *
26 * On each mapping of physical pages, a suitable virtual range chunk (with a
27 * minimum size) is selected from the list. If the chunk size equals the
28 * requested size, the chunk is returned. Otherwise, the chunk is split into
29 * two chunks - one to return as result and a remainder to stay in the list.
30 *
31 * On each Unmapping of a virtual address, the relevant virtual chunk is
32 * returned to the list. The chunk is added to the list and if its edges match
33 * the edges of the adjacent chunks (means a contiguous chunk can be created),
34 * the chunks are merged.
35 *
36 * On finish, the list is checked to have only one chunk of all the relevant
37 * virtual range (which is a half of the device total virtual range).
38 * If not (means not all mappings were unmapped), a warning is printed.
39 */
40
41/*
42 * alloc_device_memory - allocate device memory
43 *
44 * @ctx : current context
45 * @args : host parameters containing the requested size
46 * @ret_handle : result handle
47 *
48 * This function does the following:
49 * - Allocate the requested size rounded up to 2MB pages
50 * - Return unique handle
51 */
52static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
53 u32 *ret_handle)
54{
55 struct hl_device *hdev = ctx->hdev;
56 struct hl_vm *vm = &hdev->vm;
57 struct hl_vm_phys_pg_pack *phys_pg_pack;
Omer Shpigelmanbfb1ce12019-03-05 10:59:16 +020058 u64 paddr = 0, total_size, num_pgs, i;
59 u32 num_curr_pgs, page_size, page_shift;
60 int handle, rc;
Omer Shpigelman0feaf862019-02-16 00:39:22 +020061 bool contiguous;
62
63 num_curr_pgs = 0;
64 page_size = hdev->asic_prop.dram_page_size;
65 page_shift = __ffs(page_size);
66 num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
67 total_size = num_pgs << page_shift;
68
Ofir Bitton08391522020-08-11 08:57:45 +030069 if (!total_size) {
70 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
71 return -EINVAL;
72 }
73
Omer Shpigelman0feaf862019-02-16 00:39:22 +020074 contiguous = args->flags & HL_MEM_CONTIGUOUS;
75
76 if (contiguous) {
77 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
78 if (!paddr) {
79 dev_err(hdev->dev,
Oded Gabbayfc6121e2020-09-23 14:07:32 +030080 "failed to allocate %llu contiguous pages with total size of %llu\n",
81 num_pgs, total_size);
Omer Shpigelman0feaf862019-02-16 00:39:22 +020082 return -ENOMEM;
83 }
84 }
85
86 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
87 if (!phys_pg_pack) {
88 rc = -ENOMEM;
89 goto pages_pack_err;
90 }
91
92 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
93 phys_pg_pack->asid = ctx->asid;
94 phys_pg_pack->npages = num_pgs;
95 phys_pg_pack->page_size = page_size;
96 phys_pg_pack->total_size = total_size;
97 phys_pg_pack->flags = args->flags;
98 phys_pg_pack->contiguous = contiguous;
99
Omer Shpigelman4eb1d122019-03-07 15:47:19 +0200100 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
Ofir Bitton08391522020-08-11 08:57:45 +0300101 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200102 rc = -ENOMEM;
103 goto pages_arr_err;
104 }
105
106 if (phys_pg_pack->contiguous) {
107 for (i = 0 ; i < num_pgs ; i++)
108 phys_pg_pack->pages[i] = paddr + i * page_size;
109 } else {
110 for (i = 0 ; i < num_pgs ; i++) {
111 phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
112 vm->dram_pg_pool,
113 page_size);
114 if (!phys_pg_pack->pages[i]) {
115 dev_err(hdev->dev,
Oded Gabbaycab8e3e2019-03-27 09:44:28 +0200116 "Failed to allocate device memory (out of memory)\n");
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200117 rc = -ENOMEM;
118 goto page_err;
119 }
120
121 num_curr_pgs++;
122 }
123 }
124
125 spin_lock(&vm->idr_lock);
126 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
Wei Yongjun668ae722019-02-22 05:46:01 +0000127 GFP_ATOMIC);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200128 spin_unlock(&vm->idr_lock);
129
130 if (handle < 0) {
131 dev_err(hdev->dev, "Failed to get handle for page\n");
132 rc = -EFAULT;
133 goto idr_err;
134 }
135
136 for (i = 0 ; i < num_pgs ; i++)
137 kref_get(&vm->dram_pg_pool_refcount);
138
139 phys_pg_pack->handle = handle;
140
141 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
142 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
143
144 *ret_handle = handle;
145
146 return 0;
147
148idr_err:
149page_err:
150 if (!phys_pg_pack->contiguous)
151 for (i = 0 ; i < num_curr_pgs ; i++)
152 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
153 page_size);
154
Omer Shpigelman4eb1d122019-03-07 15:47:19 +0200155 kvfree(phys_pg_pack->pages);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200156pages_arr_err:
157 kfree(phys_pg_pack);
158pages_pack_err:
159 if (contiguous)
160 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
161
162 return rc;
163}
164
165/*
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300166 * dma_map_host_va - DMA mapping of the given host virtual address.
167 * @hdev: habanalabs device structure
168 * @addr: the host virtual address of the memory area
169 * @size: the size of the memory area
170 * @p_userptr: pointer to result userptr structure
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200171 *
172 * This function does the following:
173 * - Allocate userptr structure
174 * - Pin the given host memory using the userptr structure
175 * - Perform DMA mapping to have the DMA addresses of the pages
176 */
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300177static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
178 struct hl_userptr **p_userptr)
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200179{
180 struct hl_userptr *userptr;
181 int rc;
182
183 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
184 if (!userptr) {
185 rc = -ENOMEM;
186 goto userptr_err;
187 }
188
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300189 rc = hl_pin_host_memory(hdev, addr, size, userptr);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200190 if (rc) {
191 dev_err(hdev->dev, "Failed to pin host memory\n");
192 goto pin_err;
193 }
194
195 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
196 userptr->sgt->nents, DMA_BIDIRECTIONAL);
197 if (rc) {
198 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
199 goto dma_map_err;
200 }
201
202 userptr->dma_mapped = true;
203 userptr->dir = DMA_BIDIRECTIONAL;
204 userptr->vm_type = VM_TYPE_USERPTR;
205
206 *p_userptr = userptr;
207
208 return 0;
209
210dma_map_err:
211 hl_unpin_host_memory(hdev, userptr);
212pin_err:
213 kfree(userptr);
214userptr_err:
215
216 return rc;
217}
218
219/*
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300220 * dma_unmap_host_va - DMA unmapping of the given host virtual address.
221 * @hdev: habanalabs device structure
222 * @userptr: userptr to free
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200223 *
224 * This function does the following:
225 * - Unpins the physical pages
226 * - Frees the userptr structure
227 */
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300228static void dma_unmap_host_va(struct hl_device *hdev,
229 struct hl_userptr *userptr)
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200230{
231 hl_unpin_host_memory(hdev, userptr);
232 kfree(userptr);
233}
234
235/*
236 * dram_pg_pool_do_release - free DRAM pages pool
237 *
238 * @ref : pointer to reference object
239 *
240 * This function does the following:
241 * - Frees the idr structure of physical pages handles
242 * - Frees the generic pool of DRAM physical pages
243 */
244static void dram_pg_pool_do_release(struct kref *ref)
245{
246 struct hl_vm *vm = container_of(ref, struct hl_vm,
247 dram_pg_pool_refcount);
248
249 /*
250 * free the idr here as only here we know for sure that there are no
251 * allocated physical pages and hence there are no handles in use
252 */
253 idr_destroy(&vm->phys_pg_pack_handles);
254 gen_pool_destroy(vm->dram_pg_pool);
255}
256
257/*
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300258 * free_phys_pg_pack - free physical page pack
259 * @hdev: habanalabs device structure
260 * @phys_pg_pack: physical page pack to free
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200261 *
262 * This function does the following:
263 * - For DRAM memory only, iterate over the pack and free each physical block
264 * structure by returning it to the general pool
265 * - Free the hl_vm_phys_pg_pack structure
266 */
267static void free_phys_pg_pack(struct hl_device *hdev,
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300268 struct hl_vm_phys_pg_pack *phys_pg_pack)
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200269{
270 struct hl_vm *vm = &hdev->vm;
Omer Shpigelmanbfb1ce12019-03-05 10:59:16 +0200271 u64 i;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200272
273 if (!phys_pg_pack->created_from_userptr) {
274 if (phys_pg_pack->contiguous) {
275 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
276 phys_pg_pack->total_size);
277
278 for (i = 0; i < phys_pg_pack->npages ; i++)
279 kref_put(&vm->dram_pg_pool_refcount,
280 dram_pg_pool_do_release);
281 } else {
282 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
283 gen_pool_free(vm->dram_pg_pool,
284 phys_pg_pack->pages[i],
285 phys_pg_pack->page_size);
286 kref_put(&vm->dram_pg_pool_refcount,
287 dram_pg_pool_do_release);
288 }
289 }
290 }
291
Omer Shpigelman4eb1d122019-03-07 15:47:19 +0200292 kvfree(phys_pg_pack->pages);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200293 kfree(phys_pg_pack);
294}
295
296/*
297 * free_device_memory - free device memory
298 *
299 * @ctx : current context
300 * @handle : handle of the memory chunk to free
301 *
302 * This function does the following:
303 * - Free the device memory related to the given handle
304 */
305static int free_device_memory(struct hl_ctx *ctx, u32 handle)
306{
307 struct hl_device *hdev = ctx->hdev;
308 struct hl_vm *vm = &hdev->vm;
309 struct hl_vm_phys_pg_pack *phys_pg_pack;
310
311 spin_lock(&vm->idr_lock);
312 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
313 if (phys_pg_pack) {
314 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
315 dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
316 handle);
317 spin_unlock(&vm->idr_lock);
318 return -EINVAL;
319 }
320
321 /*
322 * must remove from idr before the freeing of the physical
323 * pages as the refcount of the pool is also the trigger of the
324 * idr destroy
325 */
326 idr_remove(&vm->phys_pg_pack_handles, handle);
327 spin_unlock(&vm->idr_lock);
328
329 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
330 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
331
332 free_phys_pg_pack(hdev, phys_pg_pack);
333 } else {
334 spin_unlock(&vm->idr_lock);
335 dev_err(hdev->dev,
336 "free device memory failed, no match for handle %u\n",
337 handle);
338 return -EINVAL;
339 }
340
341 return 0;
342}
343
344/*
345 * clear_va_list_locked - free virtual addresses list
346 *
347 * @hdev : habanalabs device structure
348 * @va_list : list of virtual addresses to free
349 *
350 * This function does the following:
351 * - Iterate over the list and free each virtual addresses block
352 *
353 * This function should be called only when va_list lock is taken
354 */
355static void clear_va_list_locked(struct hl_device *hdev,
356 struct list_head *va_list)
357{
358 struct hl_vm_va_block *va_block, *tmp;
359
360 list_for_each_entry_safe(va_block, tmp, va_list, node) {
361 list_del(&va_block->node);
362 kfree(va_block);
363 }
364}
365
366/*
367 * print_va_list_locked - print virtual addresses list
368 *
369 * @hdev : habanalabs device structure
370 * @va_list : list of virtual addresses to print
371 *
372 * This function does the following:
373 * - Iterate over the list and print each virtual addresses block
374 *
375 * This function should be called only when va_list lock is taken
376 */
377static void print_va_list_locked(struct hl_device *hdev,
378 struct list_head *va_list)
379{
380#if HL_MMU_DEBUG
381 struct hl_vm_va_block *va_block;
382
383 dev_dbg(hdev->dev, "print va list:\n");
384
385 list_for_each_entry(va_block, va_list, node)
386 dev_dbg(hdev->dev,
387 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
388 va_block->start, va_block->end, va_block->size);
389#endif
390}
391
392/*
393 * merge_va_blocks_locked - merge a virtual block if possible
394 *
395 * @hdev : pointer to the habanalabs device structure
396 * @va_list : pointer to the virtual addresses block list
397 * @va_block : virtual block to merge with adjacent blocks
398 *
399 * This function does the following:
400 * - Merge the given blocks with the adjacent blocks if their virtual ranges
401 * create a contiguous virtual range
402 *
403 * This Function should be called only when va_list lock is taken
404 */
405static void merge_va_blocks_locked(struct hl_device *hdev,
406 struct list_head *va_list, struct hl_vm_va_block *va_block)
407{
408 struct hl_vm_va_block *prev, *next;
409
410 prev = list_prev_entry(va_block, node);
411 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
412 prev->end = va_block->end;
413 prev->size = prev->end - prev->start;
414 list_del(&va_block->node);
415 kfree(va_block);
416 va_block = prev;
417 }
418
419 next = list_next_entry(va_block, node);
420 if (&next->node != va_list && va_block->end + 1 == next->start) {
421 next->start = va_block->start;
422 next->size = next->end - next->start;
423 list_del(&va_block->node);
424 kfree(va_block);
425 }
426}
427
428/*
429 * add_va_block_locked - add a virtual block to the virtual addresses list
430 *
431 * @hdev : pointer to the habanalabs device structure
432 * @va_list : pointer to the virtual addresses block list
433 * @start : start virtual address
434 * @end : end virtual address
435 *
436 * This function does the following:
437 * - Add the given block to the virtual blocks list and merge with other
438 * blocks if a contiguous virtual block can be created
439 *
440 * This Function should be called only when va_list lock is taken
441 */
442static int add_va_block_locked(struct hl_device *hdev,
443 struct list_head *va_list, u64 start, u64 end)
444{
445 struct hl_vm_va_block *va_block, *res = NULL;
446 u64 size = end - start;
447
448 print_va_list_locked(hdev, va_list);
449
450 list_for_each_entry(va_block, va_list, node) {
451 /* TODO: remove upon matureness */
452 if (hl_mem_area_crosses_range(start, size, va_block->start,
453 va_block->end)) {
454 dev_err(hdev->dev,
455 "block crossing ranges at start 0x%llx, end 0x%llx\n",
456 va_block->start, va_block->end);
457 return -EINVAL;
458 }
459
460 if (va_block->end < start)
461 res = va_block;
462 }
463
464 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
465 if (!va_block)
466 return -ENOMEM;
467
468 va_block->start = start;
469 va_block->end = end;
470 va_block->size = size;
471
472 if (!res)
473 list_add(&va_block->node, va_list);
474 else
475 list_add(&va_block->node, &res->node);
476
477 merge_va_blocks_locked(hdev, va_list, va_block);
478
479 print_va_list_locked(hdev, va_list);
480
481 return 0;
482}
483
484/*
485 * add_va_block - wrapper for add_va_block_locked
486 *
487 * @hdev : pointer to the habanalabs device structure
488 * @va_list : pointer to the virtual addresses block list
489 * @start : start virtual address
490 * @end : end virtual address
491 *
492 * This function does the following:
493 * - Takes the list lock and calls add_va_block_locked
494 */
495static inline int add_va_block(struct hl_device *hdev,
496 struct hl_va_range *va_range, u64 start, u64 end)
497{
498 int rc;
499
500 mutex_lock(&va_range->lock);
501 rc = add_va_block_locked(hdev, &va_range->list, start, end);
502 mutex_unlock(&va_range->lock);
503
504 return rc;
505}
506
507/*
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300508 * get_va_block() - get a virtual block for the given size and alignment.
509 * @hdev: pointer to the habanalabs device structure.
510 * @va_range: pointer to the virtual addresses range.
511 * @size: requested block size.
512 * @hint_addr: hint for requested address by the user.
513 * @va_block_align: required alignment of the virtual block start address.
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200514 *
515 * This function does the following:
516 * - Iterate on the virtual block list to find a suitable virtual block for the
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300517 * given size and alignment.
518 * - Reserve the requested block and update the list.
519 * - Return the start address of the virtual block.
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200520 */
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300521static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
522 u64 size, u64 hint_addr, u32 va_block_align)
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200523{
524 struct hl_vm_va_block *va_block, *new_va_block = NULL;
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300525 u64 valid_start, valid_size, prev_start, prev_end, align_mask,
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200526 res_valid_start = 0, res_valid_size = 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200527 bool add_prev = false;
528
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300529 align_mask = ~((u64)va_block_align - 1);
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000530
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300531 /* check if hint_addr is aligned */
532 if (hint_addr & (va_block_align - 1))
533 hint_addr = 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200534
535 mutex_lock(&va_range->lock);
536
537 print_va_list_locked(hdev, &va_range->list);
538
539 list_for_each_entry(va_block, &va_range->list, node) {
540 /* calc the first possible aligned addr */
541 valid_start = va_block->start;
542
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300543 if (valid_start & (va_block_align - 1)) {
544 valid_start &= align_mask;
545 valid_start += va_block_align;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200546 if (valid_start > va_block->end)
547 continue;
548 }
549
550 valid_size = va_block->end - valid_start;
551
552 if (valid_size >= size &&
553 (!new_va_block || valid_size < res_valid_size)) {
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200554 new_va_block = va_block;
555 res_valid_start = valid_start;
556 res_valid_size = valid_size;
557 }
558
559 if (hint_addr && hint_addr >= valid_start &&
560 ((hint_addr + size) <= va_block->end)) {
561 new_va_block = va_block;
562 res_valid_start = hint_addr;
563 res_valid_size = valid_size;
564 break;
565 }
566 }
567
568 if (!new_va_block) {
Omer Shpigelmanbfb1ce12019-03-05 10:59:16 +0200569 dev_err(hdev->dev, "no available va block for size %llu\n",
570 size);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200571 goto out;
572 }
573
574 if (res_valid_start > new_va_block->start) {
575 prev_start = new_va_block->start;
576 prev_end = res_valid_start - 1;
577
578 new_va_block->start = res_valid_start;
579 new_va_block->size = res_valid_size;
580
581 add_prev = true;
582 }
583
584 if (new_va_block->size > size) {
585 new_va_block->start += size;
586 new_va_block->size = new_va_block->end - new_va_block->start;
587 } else {
588 list_del(&new_va_block->node);
589 kfree(new_va_block);
590 }
591
592 if (add_prev)
593 add_va_block_locked(hdev, &va_range->list, prev_start,
594 prev_end);
595
596 print_va_list_locked(hdev, &va_range->list);
597out:
598 mutex_unlock(&va_range->lock);
599
600 return res_valid_start;
601}
602
603/*
604 * get_sg_info - get number of pages and the DMA address from SG list
605 *
606 * @sg : the SG list
607 * @dma_addr : pointer to DMA address to return
608 *
609 * Calculate the number of consecutive pages described by the SG list. Take the
610 * offset of the address in the first page, add to it the length and round it up
611 * to the number of needed pages.
612 */
613static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
614{
615 *dma_addr = sg_dma_address(sg);
616
617 return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
618 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
619}
620
621/*
622 * init_phys_pg_pack_from_userptr - initialize physical page pack from host
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300623 * memory
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000624 * @ctx: current context
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300625 * @userptr: userptr to initialize from
626 * @pphys_pg_pack: result pointer
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200627 *
628 * This function does the following:
629 * - Pin the physical pages related to the given virtual block
630 * - Create a physical page pack from the physical pages related to the given
631 * virtual block
632 */
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000633static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
634 struct hl_userptr *userptr,
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300635 struct hl_vm_phys_pg_pack **pphys_pg_pack)
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200636{
637 struct hl_vm_phys_pg_pack *phys_pg_pack;
638 struct scatterlist *sg;
639 dma_addr_t dma_addr;
Omer Shpigelmanbfb1ce12019-03-05 10:59:16 +0200640 u64 page_mask, total_npages;
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000641 u32 npages, page_size = PAGE_SIZE,
Omer Shpigelman64a7e292020-01-05 09:05:45 +0000642 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200643 bool first = true, is_huge_page_opt = true;
644 int rc, i, j;
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000645 u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200646
647 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
648 if (!phys_pg_pack)
649 return -ENOMEM;
650
651 phys_pg_pack->vm_type = userptr->vm_type;
652 phys_pg_pack->created_from_userptr = true;
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000653 phys_pg_pack->asid = ctx->asid;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200654 atomic_set(&phys_pg_pack->mapping_cnt, 1);
655
656 /* Only if all dma_addrs are aligned to 2MB and their
657 * sizes is at least 2MB, we can use huge page mapping.
658 * We limit the 2MB optimization to this condition,
659 * since later on we acquire the related VA range as one
660 * consecutive block.
661 */
662 total_npages = 0;
663 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
664 npages = get_sg_info(sg, &dma_addr);
665
666 total_npages += npages;
667
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000668 if ((npages % pgs_in_huge_page) ||
669 (dma_addr & (huge_page_size - 1)))
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200670 is_huge_page_opt = false;
671 }
672
673 if (is_huge_page_opt) {
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000674 page_size = huge_page_size;
675 do_div(total_npages, pgs_in_huge_page);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200676 }
677
678 page_mask = ~(((u64) page_size) - 1);
679
Omer Shpigelman4eb1d122019-03-07 15:47:19 +0200680 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
681 GFP_KERNEL);
Ofir Bitton08391522020-08-11 08:57:45 +0300682 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200683 rc = -ENOMEM;
684 goto page_pack_arr_mem_err;
685 }
686
687 phys_pg_pack->npages = total_npages;
688 phys_pg_pack->page_size = page_size;
689 phys_pg_pack->total_size = total_npages * page_size;
690
691 j = 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200692 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
693 npages = get_sg_info(sg, &dma_addr);
694
695 /* align down to physical page size and save the offset */
696 if (first) {
697 first = false;
698 phys_pg_pack->offset = dma_addr & (page_size - 1);
699 dma_addr &= page_mask;
700 }
701
702 while (npages) {
703 phys_pg_pack->pages[j++] = dma_addr;
704 dma_addr += page_size;
705
706 if (is_huge_page_opt)
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000707 npages -= pgs_in_huge_page;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200708 else
709 npages--;
710 }
711 }
712
713 *pphys_pg_pack = phys_pg_pack;
714
715 return 0;
716
717page_pack_arr_mem_err:
718 kfree(phys_pg_pack);
719
720 return rc;
721}
722
723/*
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300724 * map_phys_pg_pack - maps the physical page pack.
725 * @ctx: current context
726 * @vaddr: start address of the virtual area to map from
727 * @phys_pg_pack: the pack of physical pages to map to
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200728 *
729 * This function does the following:
730 * - Maps each chunk of virtual memory to matching physical chunk
731 * - Stores number of successful mappings in the given argument
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300732 * - Returns 0 on success, error code otherwise
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200733 */
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300734static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
735 struct hl_vm_phys_pg_pack *phys_pg_pack)
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200736{
737 struct hl_device *hdev = ctx->hdev;
Omer Shpigelmanbfb1ce12019-03-05 10:59:16 +0200738 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200739 u32 page_size = phys_pg_pack->page_size;
Omer Shpigelmanbfb1ce12019-03-05 10:59:16 +0200740 int rc = 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200741
742 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
743 paddr = phys_pg_pack->pages[i];
744
Pawel Piskorski7fc40bc2019-12-06 17:32:38 +0200745 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size,
746 (i + 1) == phys_pg_pack->npages);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200747 if (rc) {
748 dev_err(hdev->dev,
Omer Shpigelmanbfb1ce12019-03-05 10:59:16 +0200749 "map failed for handle %u, npages: %llu, mapped: %llu",
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200750 phys_pg_pack->handle, phys_pg_pack->npages,
751 mapped_pg_cnt);
752 goto err;
753 }
754
755 mapped_pg_cnt++;
756 next_vaddr += page_size;
757 }
758
759 return 0;
760
761err:
762 next_vaddr = vaddr;
763 for (i = 0 ; i < mapped_pg_cnt ; i++) {
Pawel Piskorski7fc40bc2019-12-06 17:32:38 +0200764 if (hl_mmu_unmap(ctx, next_vaddr, page_size,
765 (i + 1) == mapped_pg_cnt))
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200766 dev_warn_ratelimited(hdev->dev,
767 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
768 phys_pg_pack->handle, next_vaddr,
769 phys_pg_pack->pages[i], page_size);
770
771 next_vaddr += page_size;
772 }
773
774 return rc;
775}
776
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300777/*
778 * unmap_phys_pg_pack - unmaps the physical page pack
779 * @ctx: current context
780 * @vaddr: start address of the virtual area to unmap
781 * @phys_pg_pack: the pack of physical pages to unmap
782 */
783static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
784 struct hl_vm_phys_pg_pack *phys_pg_pack)
785{
786 struct hl_device *hdev = ctx->hdev;
787 u64 next_vaddr, i;
788 u32 page_size;
789
790 page_size = phys_pg_pack->page_size;
791 next_vaddr = vaddr;
792
793 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
Pawel Piskorski7fc40bc2019-12-06 17:32:38 +0200794 if (hl_mmu_unmap(ctx, next_vaddr, page_size,
795 (i + 1) == phys_pg_pack->npages))
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300796 dev_warn_ratelimited(hdev->dev,
797 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
798
799 /*
800 * unmapping on Palladium can be really long, so avoid a CPU
801 * soft lockup bug by sleeping a little between unmapping pages
802 */
803 if (hdev->pldm)
804 usleep_range(500, 1000);
805 }
806}
807
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200808static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
809 u64 *paddr)
810{
811 struct hl_device *hdev = ctx->hdev;
812 struct hl_vm *vm = &hdev->vm;
813 struct hl_vm_phys_pg_pack *phys_pg_pack;
814 u32 handle;
815
816 handle = lower_32_bits(args->map_device.handle);
817 spin_lock(&vm->idr_lock);
818 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
819 if (!phys_pg_pack) {
820 spin_unlock(&vm->idr_lock);
821 dev_err(hdev->dev, "no match for handle %u\n", handle);
822 return -EINVAL;
823 }
824
825 *paddr = phys_pg_pack->pages[0];
826
827 spin_unlock(&vm->idr_lock);
828
829 return 0;
830}
831
832/*
833 * map_device_va - map the given memory
834 *
835 * @ctx : current context
836 * @args : host parameters with handle/host virtual address
837 * @device_addr : pointer to result device virtual address
838 *
839 * This function does the following:
840 * - If given a physical device memory handle, map to a device virtual block
841 * and return the start address of this block
842 * - If given a host virtual address and size, find the related physical pages,
843 * map a device virtual block to this pages and return the start address of
844 * this block
845 */
846static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
847 u64 *device_addr)
848{
849 struct hl_device *hdev = ctx->hdev;
850 struct hl_vm *vm = &hdev->vm;
851 struct hl_vm_phys_pg_pack *phys_pg_pack;
852 struct hl_userptr *userptr = NULL;
853 struct hl_vm_hash_node *hnode;
Omer Shpigelman64a7e292020-01-05 09:05:45 +0000854 struct hl_va_range *va_range;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200855 enum vm_type_t *vm_type;
856 u64 ret_vaddr, hint_addr;
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300857 u32 handle = 0, va_block_align;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200858 int rc;
859 bool is_userptr = args->flags & HL_MEM_USERPTR;
860
861 /* Assume failure */
862 *device_addr = 0;
863
864 if (is_userptr) {
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300865 u64 addr = args->map_host.host_virt_addr,
866 size = args->map_host.mem_size;
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300867 u32 page_size = hdev->asic_prop.pmmu.page_size,
868 huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300869
870 rc = dma_map_host_va(hdev, addr, size, &userptr);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200871 if (rc) {
872 dev_err(hdev->dev, "failed to get userptr from va\n");
873 return rc;
874 }
875
Omer Shpigelman54bb6742019-11-14 18:23:55 +0000876 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200877 &phys_pg_pack);
878 if (rc) {
879 dev_err(hdev->dev,
880 "unable to init page pack for vaddr 0x%llx\n",
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300881 addr);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200882 goto init_page_pack_err;
883 }
884
885 vm_type = (enum vm_type_t *) userptr;
886 hint_addr = args->map_host.hint_addr;
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +0300887 handle = phys_pg_pack->handle;
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300888
889 /* get required alignment */
890 if (phys_pg_pack->page_size == page_size) {
891 va_range = ctx->host_va_range;
892
893 /*
894 * huge page alignment may be needed in case of regular
895 * page mapping, depending on the host VA alignment
896 */
897 if (addr & (huge_page_size - 1))
898 va_block_align = page_size;
899 else
900 va_block_align = huge_page_size;
901 } else {
902 /*
903 * huge page alignment is needed in case of huge page
904 * mapping
905 */
906 va_range = ctx->host_huge_va_range;
907 va_block_align = huge_page_size;
908 }
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200909 } else {
910 handle = lower_32_bits(args->map_device.handle);
911
912 spin_lock(&vm->idr_lock);
913 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
914 if (!phys_pg_pack) {
915 spin_unlock(&vm->idr_lock);
916 dev_err(hdev->dev,
917 "no match for handle %u\n", handle);
918 return -EINVAL;
919 }
920
921 /* increment now to avoid freeing device memory while mapping */
922 atomic_inc(&phys_pg_pack->mapping_cnt);
923
924 spin_unlock(&vm->idr_lock);
925
926 vm_type = (enum vm_type_t *) phys_pg_pack;
927
928 hint_addr = args->map_device.hint_addr;
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300929
930 /* DRAM VA alignment is the same as the DRAM page size */
931 va_range = ctx->dram_va_range;
932 va_block_align = hdev->asic_prop.dmmu.page_size;
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200933 }
934
935 /*
936 * relevant for mapping device physical memory only, as host memory is
937 * implicitly shared
938 */
939 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
940 phys_pg_pack->asid != ctx->asid) {
941 dev_err(hdev->dev,
942 "Failed to map memory, handle %u is not shared\n",
943 handle);
944 rc = -EPERM;
945 goto shared_err;
946 }
947
948 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
949 if (!hnode) {
950 rc = -ENOMEM;
951 goto hnode_err;
952 }
953
Omer Shpigelman64a7e292020-01-05 09:05:45 +0000954 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
Omer Shpigelman7c52fb02020-06-28 21:15:53 +0300955 hint_addr, va_block_align);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200956 if (!ret_vaddr) {
957 dev_err(hdev->dev, "no available va block for handle %u\n",
958 handle);
959 rc = -ENOMEM;
960 goto va_block_err;
961 }
962
963 mutex_lock(&ctx->mmu_lock);
964
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +0300965 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200966 if (rc) {
967 mutex_unlock(&ctx->mmu_lock);
968 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
969 handle);
970 goto map_err;
971 }
972
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +0300973 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200974
975 mutex_unlock(&ctx->mmu_lock);
976
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +0300977 if (rc) {
978 dev_err(hdev->dev,
979 "mapping handle %u failed due to MMU cache invalidation\n",
980 handle);
981 goto map_err;
982 }
983
Omer Shpigelman0feaf862019-02-16 00:39:22 +0200984 ret_vaddr += phys_pg_pack->offset;
985
986 hnode->ptr = vm_type;
987 hnode->vaddr = ret_vaddr;
988
989 mutex_lock(&ctx->mem_hash_lock);
990 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
991 mutex_unlock(&ctx->mem_hash_lock);
992
993 *device_addr = ret_vaddr;
994
995 if (is_userptr)
996 free_phys_pg_pack(hdev, phys_pg_pack);
997
998 return 0;
999
1000map_err:
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001001 if (add_va_block(hdev, va_range, ret_vaddr,
1002 ret_vaddr + phys_pg_pack->total_size - 1))
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001003 dev_warn(hdev->dev,
1004 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1005 handle, ret_vaddr);
1006
1007va_block_err:
1008 kfree(hnode);
1009hnode_err:
1010shared_err:
1011 atomic_dec(&phys_pg_pack->mapping_cnt);
1012 if (is_userptr)
1013 free_phys_pg_pack(hdev, phys_pg_pack);
1014init_page_pack_err:
1015 if (is_userptr)
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001016 dma_unmap_host_va(hdev, userptr);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001017
1018 return rc;
1019}
1020
1021/*
1022 * unmap_device_va - unmap the given device virtual address
1023 *
1024 * @ctx : current context
1025 * @vaddr : device virtual address to unmap
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001026 * @ctx_free : true if in context free flow, false otherwise.
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001027 *
1028 * This function does the following:
1029 * - Unmap the physical pages related to the given virtual address
1030 * - return the device virtual block to the virtual block list
1031 */
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001032static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001033{
1034 struct hl_device *hdev = ctx->hdev;
1035 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1036 struct hl_vm_hash_node *hnode = NULL;
1037 struct hl_userptr *userptr = NULL;
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001038 struct hl_va_range *va_range;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001039 enum vm_type_t *vm_type;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001040 bool is_userptr;
Tomer Tayarc68f1ba2020-06-01 09:56:47 +03001041 int rc = 0;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001042
1043 /* protect from double entrance */
1044 mutex_lock(&ctx->mem_hash_lock);
1045 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1046 if (vaddr == hnode->vaddr)
1047 break;
1048
1049 if (!hnode) {
1050 mutex_unlock(&ctx->mem_hash_lock);
1051 dev_err(hdev->dev,
1052 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1053 vaddr);
1054 return -EINVAL;
1055 }
1056
1057 hash_del(&hnode->node);
1058 mutex_unlock(&ctx->mem_hash_lock);
1059
1060 vm_type = hnode->ptr;
1061
1062 if (*vm_type == VM_TYPE_USERPTR) {
1063 is_userptr = true;
1064 userptr = hnode->ptr;
Omer Shpigelman54bb6742019-11-14 18:23:55 +00001065 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001066 &phys_pg_pack);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001067 if (rc) {
1068 dev_err(hdev->dev,
1069 "unable to init page pack for vaddr 0x%llx\n",
1070 vaddr);
1071 goto vm_type_err;
1072 }
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001073
1074 if (phys_pg_pack->page_size ==
1075 hdev->asic_prop.pmmu.page_size)
1076 va_range = ctx->host_va_range;
1077 else
1078 va_range = ctx->host_huge_va_range;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001079 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1080 is_userptr = false;
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001081 va_range = ctx->dram_va_range;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001082 phys_pg_pack = hnode->ptr;
1083 } else {
1084 dev_warn(hdev->dev,
1085 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1086 vaddr);
1087 rc = -EFAULT;
1088 goto vm_type_err;
1089 }
1090
1091 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1092 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1093 rc = -EINVAL;
1094 goto mapping_cnt_err;
1095 }
1096
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001097 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001098
1099 mutex_lock(&ctx->mmu_lock);
1100
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001101 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001102
Omer Shpigelmanbea84c42019-11-14 18:23:58 +00001103 /*
1104 * During context free this function is called in a loop to clean all
1105 * the context mappings. Hence the cache invalidation can be called once
1106 * at the loop end rather than for each iteration
1107 */
1108 if (!ctx_free)
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +03001109 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
1110 *vm_type);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001111
1112 mutex_unlock(&ctx->mmu_lock);
1113
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001114 /*
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +03001115 * If the context is closing we don't need to check for the MMU cache
1116 * invalidation return code and update the VA free list as in this flow
1117 * we invalidate the MMU cache outside of this unmap function and the VA
1118 * free list will be freed anyway.
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001119 */
1120 if (!ctx_free) {
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +03001121 int tmp_rc;
1122
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001123 if (rc)
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +03001124 dev_err(hdev->dev,
1125 "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1126 vaddr);
1127
1128 tmp_rc = add_va_block(hdev, va_range, vaddr,
1129 vaddr + phys_pg_pack->total_size - 1);
1130 if (tmp_rc) {
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001131 dev_warn(hdev->dev,
1132 "add va block failed for vaddr: 0x%llx\n",
1133 vaddr);
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +03001134 if (!rc)
1135 rc = tmp_rc;
1136 }
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001137 }
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001138
1139 atomic_dec(&phys_pg_pack->mapping_cnt);
1140 kfree(hnode);
1141
1142 if (is_userptr) {
1143 free_phys_pg_pack(hdev, phys_pg_pack);
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001144 dma_unmap_host_va(hdev, userptr);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001145 }
1146
Omer Shpigelman8ff5f4f2020-05-24 23:06:59 +03001147 return rc;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001148
1149mapping_cnt_err:
1150 if (is_userptr)
1151 free_phys_pg_pack(hdev, phys_pg_pack);
1152vm_type_err:
1153 mutex_lock(&ctx->mem_hash_lock);
1154 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1155 mutex_unlock(&ctx->mem_hash_lock);
1156
1157 return rc;
1158}
1159
Oded Gabbay54303a12019-04-04 14:42:26 +03001160static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1161{
1162 struct hl_device *hdev = hpriv->hdev;
1163 struct hl_ctx *ctx = hpriv->ctx;
1164 u64 device_addr = 0;
1165 u32 handle = 0;
1166 int rc;
1167
1168 switch (args->in.op) {
1169 case HL_MEM_OP_ALLOC:
1170 if (args->in.alloc.mem_size == 0) {
1171 dev_err(hdev->dev,
1172 "alloc size must be larger than 0\n");
1173 rc = -EINVAL;
1174 goto out;
1175 }
1176
1177 /* Force contiguous as there are no real MMU
1178 * translations to overcome physical memory gaps
1179 */
1180 args->in.flags |= HL_MEM_CONTIGUOUS;
1181 rc = alloc_device_memory(ctx, &args->in, &handle);
1182
1183 memset(args, 0, sizeof(*args));
1184 args->out.handle = (__u64) handle;
1185 break;
1186
1187 case HL_MEM_OP_FREE:
1188 rc = free_device_memory(ctx, args->in.free.handle);
1189 break;
1190
1191 case HL_MEM_OP_MAP:
1192 if (args->in.flags & HL_MEM_USERPTR) {
1193 device_addr = args->in.map_host.host_virt_addr;
1194 rc = 0;
1195 } else {
1196 rc = get_paddr_from_handle(ctx, &args->in,
1197 &device_addr);
1198 }
1199
1200 memset(args, 0, sizeof(*args));
1201 args->out.device_virt_addr = device_addr;
1202 break;
1203
1204 case HL_MEM_OP_UNMAP:
1205 rc = 0;
1206 break;
1207
1208 default:
1209 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1210 rc = -ENOTTY;
1211 break;
1212 }
1213
1214out:
1215 return rc;
1216}
1217
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001218int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1219{
1220 union hl_mem_args *args = data;
1221 struct hl_device *hdev = hpriv->hdev;
1222 struct hl_ctx *ctx = hpriv->ctx;
1223 u64 device_addr = 0;
1224 u32 handle = 0;
1225 int rc;
1226
1227 if (hl_device_disabled_or_in_reset(hdev)) {
1228 dev_warn_ratelimited(hdev->dev,
Oded Gabbay3f5398c2019-04-06 15:41:35 +03001229 "Device is %s. Can't execute MEMORY IOCTL\n",
1230 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001231 return -EBUSY;
1232 }
1233
Oded Gabbay54303a12019-04-04 14:42:26 +03001234 if (!hdev->mmu_enable)
1235 return mem_ioctl_no_mmu(hpriv, args);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001236
Oded Gabbay54303a12019-04-04 14:42:26 +03001237 switch (args->in.op) {
1238 case HL_MEM_OP_ALLOC:
Oded Gabbay54303a12019-04-04 14:42:26 +03001239 if (args->in.alloc.mem_size == 0) {
1240 dev_err(hdev->dev,
1241 "alloc size must be larger than 0\n");
1242 rc = -EINVAL;
1243 goto out;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001244 }
Oded Gabbay3e622992020-10-18 15:32:23 +03001245
1246 /* If DRAM does not support virtual memory the driver won't
1247 * handle the allocation/freeing of that memory. However, for
1248 * system administration/monitoring purposes, the driver will
1249 * keep track of the amount of DRAM memory that is allocated
1250 * and freed by the user. Because this code totally relies on
1251 * the user's input, the driver can't ensure the validity
1252 * of this accounting.
1253 */
1254 if (!hdev->dram_supports_virtual_memory) {
1255 atomic64_add(args->in.alloc.mem_size,
1256 &ctx->dram_phys_mem);
1257 atomic64_add(args->in.alloc.mem_size,
1258 &hdev->dram_used_mem);
1259
1260 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1261 rc = 0;
1262
1263 memset(args, 0, sizeof(*args));
1264 args->out.handle = 0;
1265 goto out;
1266 }
1267
Oded Gabbay54303a12019-04-04 14:42:26 +03001268 rc = alloc_device_memory(ctx, &args->in, &handle);
1269
1270 memset(args, 0, sizeof(*args));
1271 args->out.handle = (__u64) handle;
1272 break;
1273
1274 case HL_MEM_OP_FREE:
Oded Gabbay3e622992020-10-18 15:32:23 +03001275 /* If DRAM does not support virtual memory the driver won't
1276 * handle the allocation/freeing of that memory. However, for
1277 * system administration/monitoring purposes, the driver will
1278 * keep track of the amount of DRAM memory that is allocated
1279 * and freed by the user. Because this code totally relies on
1280 * the user's input, the driver can't ensure the validity
1281 * of this accounting.
1282 */
1283 if (!hdev->dram_supports_virtual_memory) {
1284 atomic64_sub(args->in.alloc.mem_size,
1285 &ctx->dram_phys_mem);
1286 atomic64_sub(args->in.alloc.mem_size,
1287 &hdev->dram_used_mem);
1288
1289 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1290 rc = 0;
1291
1292 goto out;
1293 }
1294
Oded Gabbay54303a12019-04-04 14:42:26 +03001295 rc = free_device_memory(ctx, args->in.free.handle);
1296 break;
1297
1298 case HL_MEM_OP_MAP:
1299 rc = map_device_va(ctx, &args->in, &device_addr);
1300
1301 memset(args, 0, sizeof(*args));
1302 args->out.device_virt_addr = device_addr;
1303 break;
1304
1305 case HL_MEM_OP_UNMAP:
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001306 rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
1307 false);
Oded Gabbay54303a12019-04-04 14:42:26 +03001308 break;
1309
1310 default:
1311 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1312 rc = -ENOTTY;
1313 break;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001314 }
1315
1316out:
1317 return rc;
1318}
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001319
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001320static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
1321 u32 npages, u64 start, u32 offset,
1322 struct hl_userptr *userptr)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001323{
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001324 int rc;
1325
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001326 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
Oded Gabbay230afe72019-02-27 00:19:18 +02001327 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001328 return -EFAULT;
1329 }
1330
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001331 userptr->vec = frame_vector_create(npages);
1332 if (!userptr->vec) {
1333 dev_err(hdev->dev, "Failed to create frame vector\n");
1334 return -ENOMEM;
1335 }
1336
1337 rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1338 userptr->vec);
1339
1340 if (rc != npages) {
1341 dev_err(hdev->dev,
1342 "Failed to map host memory, user ptr probably wrong\n");
1343 if (rc < 0)
1344 goto destroy_framevec;
1345 rc = -EFAULT;
1346 goto put_framevec;
1347 }
1348
1349 if (frame_vector_to_pages(userptr->vec) < 0) {
1350 dev_err(hdev->dev,
1351 "Failed to translate frame vector to pages\n");
1352 rc = -EFAULT;
1353 goto put_framevec;
1354 }
1355
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001356 rc = sg_alloc_table_from_pages(userptr->sgt,
1357 frame_vector_pages(userptr->vec),
1358 npages, offset, size, GFP_ATOMIC);
1359 if (rc < 0) {
1360 dev_err(hdev->dev, "failed to create SG table from pages\n");
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001361 goto put_framevec;
1362 }
1363
1364 return 0;
1365
1366put_framevec:
1367 put_vaddr_frames(userptr->vec);
1368destroy_framevec:
1369 frame_vector_destroy(userptr->vec);
1370 return rc;
1371}
1372
1373/*
1374 * hl_pin_host_memory - pins a chunk of host memory.
1375 * @hdev: pointer to the habanalabs device structure
1376 * @addr: the host virtual address of the memory area
1377 * @size: the size of the memory area
1378 * @userptr: pointer to hl_userptr structure
1379 *
1380 * This function does the following:
1381 * - Pins the physical pages
1382 * - Create an SG list from those pages
1383 */
1384int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1385 struct hl_userptr *userptr)
1386{
1387 u64 start, end;
1388 u32 npages, offset;
1389 int rc;
1390
1391 if (!size) {
1392 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1393 return -EINVAL;
1394 }
1395
1396 /*
1397 * If the combination of the address and size requested for this memory
1398 * region causes an integer overflow, return error.
1399 */
1400 if (((addr + size) < addr) ||
1401 PAGE_ALIGN(addr + size) < (addr + size)) {
1402 dev_err(hdev->dev,
1403 "user pointer 0x%llx + %llu causes integer overflow\n",
1404 addr, size);
1405 return -EINVAL;
1406 }
1407
1408 /*
1409 * This function can be called also from data path, hence use atomic
1410 * always as it is not a big allocation.
1411 */
1412 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1413 if (!userptr->sgt)
1414 return -ENOMEM;
1415
1416 start = addr & PAGE_MASK;
1417 offset = addr & ~PAGE_MASK;
1418 end = PAGE_ALIGN(addr + size);
1419 npages = (end - start) >> PAGE_SHIFT;
1420
1421 userptr->size = size;
1422 userptr->addr = addr;
1423 userptr->dma_mapped = false;
1424 INIT_LIST_HEAD(&userptr->job_node);
1425
1426 rc = get_user_memory(hdev, addr, size, npages, start, offset,
1427 userptr);
1428 if (rc) {
1429 dev_err(hdev->dev,
1430 "failed to get user memory for address 0x%llx\n",
1431 addr);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001432 goto free_sgt;
1433 }
1434
Oded Gabbayc2164772019-02-16 00:39:24 +02001435 hl_debugfs_add_userptr(hdev, userptr);
1436
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001437 return 0;
1438
1439free_sgt:
1440 kfree(userptr->sgt);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001441 return rc;
1442}
1443
1444/*
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001445 * hl_unpin_host_memory - unpins a chunk of host memory.
1446 * @hdev: pointer to the habanalabs device structure
1447 * @userptr: pointer to hl_userptr structure
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001448 *
1449 * This function does the following:
1450 * - Unpins the physical pages related to the host memory
1451 * - Free the SG list
1452 */
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001453void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001454{
1455 struct page **pages;
1456
Oded Gabbayc2164772019-02-16 00:39:24 +02001457 hl_debugfs_remove_userptr(hdev, userptr);
1458
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001459 if (userptr->dma_mapped)
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001460 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
1461 userptr->sgt->nents,
1462 userptr->dir);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001463
1464 pages = frame_vector_pages(userptr->vec);
1465 if (!IS_ERR(pages)) {
1466 int i;
1467
1468 for (i = 0; i < frame_vector_count(userptr->vec); i++)
1469 set_page_dirty_lock(pages[i]);
1470 }
1471 put_vaddr_frames(userptr->vec);
1472 frame_vector_destroy(userptr->vec);
1473
1474 list_del(&userptr->job_node);
1475
1476 sg_free_table(userptr->sgt);
1477 kfree(userptr->sgt);
Oded Gabbayeff6f4a2019-02-16 00:39:21 +02001478}
1479
1480/*
1481 * hl_userptr_delete_list - clear userptr list
1482 *
1483 * @hdev : pointer to the habanalabs device structure
1484 * @userptr_list : pointer to the list to clear
1485 *
1486 * This function does the following:
1487 * - Iterates over the list and unpins the host memory and frees the userptr
1488 * structure.
1489 */
1490void hl_userptr_delete_list(struct hl_device *hdev,
1491 struct list_head *userptr_list)
1492{
1493 struct hl_userptr *userptr, *tmp;
1494
1495 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1496 hl_unpin_host_memory(hdev, userptr);
1497 kfree(userptr);
1498 }
1499
1500 INIT_LIST_HEAD(userptr_list);
1501}
1502
1503/*
1504 * hl_userptr_is_pinned - returns whether the given userptr is pinned
1505 *
1506 * @hdev : pointer to the habanalabs device structure
1507 * @userptr_list : pointer to the list to clear
1508 * @userptr : pointer to userptr to check
1509 *
1510 * This function does the following:
1511 * - Iterates over the list and checks if the given userptr is in it, means is
1512 * pinned. If so, returns true, otherwise returns false.
1513 */
1514bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1515 u32 size, struct list_head *userptr_list,
1516 struct hl_userptr **userptr)
1517{
1518 list_for_each_entry((*userptr), userptr_list, job_node) {
1519 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1520 return true;
1521 }
1522
1523 return false;
1524}
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001525
1526/*
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001527 * va_range_init - initialize virtual addresses range
1528 * @hdev: pointer to the habanalabs device structure
1529 * @va_range: pointer to the range to initialize
1530 * @start: range start address
1531 * @end: range end address
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001532 *
1533 * This function does the following:
1534 * - Initializes the virtual addresses list of the given range with the given
1535 * addresses.
1536 */
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001537static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
1538 u64 start, u64 end)
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001539{
1540 int rc;
1541
1542 INIT_LIST_HEAD(&va_range->list);
1543
1544 /* PAGE_SIZE alignment */
1545
1546 if (start & (PAGE_SIZE - 1)) {
1547 start &= PAGE_MASK;
1548 start += PAGE_SIZE;
1549 }
1550
1551 if (end & (PAGE_SIZE - 1))
1552 end &= PAGE_MASK;
1553
1554 if (start >= end) {
1555 dev_err(hdev->dev, "too small vm range for va list\n");
1556 return -EFAULT;
1557 }
1558
1559 rc = add_va_block(hdev, va_range, start, end);
1560
1561 if (rc) {
1562 dev_err(hdev->dev, "Failed to init host va list\n");
1563 return rc;
1564 }
1565
1566 va_range->start_addr = start;
1567 va_range->end_addr = end;
1568
1569 return 0;
1570}
1571
1572/*
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001573 * va_range_fini() - clear a virtual addresses range
1574 * @hdev: pointer to the habanalabs structure
1575 * va_range: pointer to virtual addresses range
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001576 *
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001577 * This function does the following:
1578 * - Frees the virtual addresses block list and its lock
1579 */
1580static void va_range_fini(struct hl_device *hdev,
1581 struct hl_va_range *va_range)
1582{
1583 mutex_lock(&va_range->lock);
1584 clear_va_list_locked(hdev, &va_range->list);
1585 mutex_unlock(&va_range->lock);
1586
1587 mutex_destroy(&va_range->lock);
1588 kfree(va_range);
1589}
1590
1591/*
1592 * vm_ctx_init_with_ranges() - initialize virtual memory for context
1593 * @ctx: pointer to the habanalabs context structure
1594 * @host_range_start: host virtual addresses range start.
1595 * @host_range_end: host virtual addresses range end.
1596 * @host_huge_range_start: host virtual addresses range start for memory
1597 * allocated with huge pages.
1598 * @host_huge_range_end: host virtual addresses range end for memory allocated
1599 * with huge pages.
1600 * @dram_range_start: dram virtual addresses range start.
1601 * @dram_range_end: dram virtual addresses range end.
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001602 *
1603 * This function initializes the following:
1604 * - MMU for context
1605 * - Virtual address to area descriptor hashtable
1606 * - Virtual block list of available virtual memory
1607 */
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001608static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
1609 u64 host_range_start,
1610 u64 host_range_end,
1611 u64 host_huge_range_start,
1612 u64 host_huge_range_end,
1613 u64 dram_range_start,
1614 u64 dram_range_end)
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001615{
1616 struct hl_device *hdev = ctx->hdev;
1617 int rc;
1618
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001619 ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL);
1620 if (!ctx->host_va_range)
1621 return -ENOMEM;
1622
1623 ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range),
1624 GFP_KERNEL);
1625 if (!ctx->host_huge_va_range) {
1626 rc = -ENOMEM;
1627 goto host_huge_va_range_err;
1628 }
1629
1630 ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL);
1631 if (!ctx->dram_va_range) {
1632 rc = -ENOMEM;
1633 goto dram_va_range_err;
1634 }
1635
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02001636 rc = hl_mmu_ctx_init(ctx);
1637 if (rc) {
1638 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001639 goto mmu_ctx_err;
Omer Shpigelman27ca384c2019-02-28 10:46:11 +02001640 }
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001641
1642 mutex_init(&ctx->mem_hash_lock);
1643 hash_init(ctx->mem_hash);
1644
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001645 mutex_init(&ctx->host_va_range->lock);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001646
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001647 rc = va_range_init(hdev, ctx->host_va_range, host_range_start,
1648 host_range_end);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001649 if (rc) {
1650 dev_err(hdev->dev, "failed to init host vm range\n");
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001651 goto host_page_range_err;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001652 }
1653
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001654 if (hdev->pmmu_huge_range) {
1655 mutex_init(&ctx->host_huge_va_range->lock);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001656
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001657 rc = va_range_init(hdev, ctx->host_huge_va_range,
1658 host_huge_range_start,
1659 host_huge_range_end);
1660 if (rc) {
1661 dev_err(hdev->dev,
1662 "failed to init host huge vm range\n");
1663 goto host_hpage_range_err;
1664 }
1665 } else {
1666 ctx->host_huge_va_range = ctx->host_va_range;
1667 }
1668
1669 mutex_init(&ctx->dram_va_range->lock);
1670
1671 rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start,
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001672 dram_range_end);
1673 if (rc) {
1674 dev_err(hdev->dev, "failed to init dram vm range\n");
1675 goto dram_vm_err;
1676 }
1677
Oded Gabbayc2164772019-02-16 00:39:24 +02001678 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1679
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001680 return 0;
1681
1682dram_vm_err:
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001683 mutex_destroy(&ctx->dram_va_range->lock);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001684
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001685 if (hdev->pmmu_huge_range) {
1686 mutex_lock(&ctx->host_huge_va_range->lock);
1687 clear_va_list_locked(hdev, &ctx->host_huge_va_range->list);
1688 mutex_unlock(&ctx->host_huge_va_range->lock);
1689 }
1690host_hpage_range_err:
1691 if (hdev->pmmu_huge_range)
1692 mutex_destroy(&ctx->host_huge_va_range->lock);
1693 mutex_lock(&ctx->host_va_range->lock);
1694 clear_va_list_locked(hdev, &ctx->host_va_range->list);
1695 mutex_unlock(&ctx->host_va_range->lock);
1696host_page_range_err:
1697 mutex_destroy(&ctx->host_va_range->lock);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001698 mutex_destroy(&ctx->mem_hash_lock);
1699 hl_mmu_ctx_fini(ctx);
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001700mmu_ctx_err:
1701 kfree(ctx->dram_va_range);
1702dram_va_range_err:
1703 kfree(ctx->host_huge_va_range);
1704host_huge_va_range_err:
1705 kfree(ctx->host_va_range);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001706
1707 return rc;
1708}
1709
1710int hl_vm_ctx_init(struct hl_ctx *ctx)
1711{
1712 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001713 u64 host_range_start, host_range_end, host_huge_range_start,
1714 host_huge_range_end, dram_range_start, dram_range_end;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001715
1716 atomic64_set(&ctx->dram_phys_mem, 0);
1717
1718 /*
1719 * - If MMU is enabled, init the ranges as usual.
1720 * - If MMU is disabled, in case of host mapping, the returned address
1721 * is the given one.
1722 * In case of DRAM mapping, the returned address is the physical
1723 * address of the memory related to the given handle.
1724 */
Oded Gabbayf3a965c2020-10-04 23:00:39 +03001725 if (!ctx->hdev->mmu_enable)
1726 return 0;
1727
1728 dram_range_start = prop->dmmu.start_addr;
1729 dram_range_end = prop->dmmu.end_addr;
1730 host_range_start = prop->pmmu.start_addr;
1731 host_range_end = prop->pmmu.end_addr;
1732 host_huge_range_start = prop->pmmu_huge.start_addr;
1733 host_huge_range_end = prop->pmmu_huge.end_addr;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001734
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001735 return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
Oded Gabbayf3a965c2020-10-04 23:00:39 +03001736 host_huge_range_start, host_huge_range_end,
1737 dram_range_start, dram_range_end);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001738}
1739
1740/*
1741 * hl_vm_ctx_fini - virtual memory teardown of context
1742 *
1743 * @ctx : pointer to the habanalabs context structure
1744 *
1745 * This function perform teardown the following:
1746 * - Virtual block list of available virtual memory
1747 * - Virtual address to area descriptor hashtable
1748 * - MMU for context
1749 *
1750 * In addition this function does the following:
1751 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1752 * hashtable should be empty as no valid mappings should exist at this
1753 * point.
1754 * - Frees any existing physical page list from the idr which relates to the
1755 * current context asid.
1756 * - This function checks the virtual block list for correctness. At this point
1757 * the list should contain one element which describes the whole virtual
1758 * memory range of the context. Otherwise, a warning is printed.
1759 */
1760void hl_vm_ctx_fini(struct hl_ctx *ctx)
1761{
1762 struct hl_device *hdev = ctx->hdev;
1763 struct hl_vm *vm = &hdev->vm;
1764 struct hl_vm_phys_pg_pack *phys_pg_list;
1765 struct hl_vm_hash_node *hnode;
1766 struct hlist_node *tmp_node;
1767 int i;
1768
Oded Gabbayf3a965c2020-10-04 23:00:39 +03001769 if (!ctx->hdev->mmu_enable)
1770 return;
1771
Oded Gabbayc2164772019-02-16 00:39:24 +02001772 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1773
Omer Shpigelmane604f552019-11-14 18:23:59 +00001774 /*
1775 * Clearly something went wrong on hard reset so no point in printing
1776 * another side effect error
1777 */
1778 if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
1779 dev_notice(hdev->dev,
Oded Gabbay0eab4f82020-06-22 09:52:22 +03001780 "user released device without removing its memory mappings\n");
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001781
1782 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1783 dev_dbg(hdev->dev,
1784 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1785 hnode->vaddr, ctx->asid);
Omer Shpigelman71c5e552019-11-14 18:23:57 +00001786 unmap_device_va(ctx, hnode->vaddr, true);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001787 }
1788
Omer Shpigelmanbea84c42019-11-14 18:23:58 +00001789 /* invalidate the cache once after the unmapping loop */
1790 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
1791 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
1792
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001793 spin_lock(&vm->idr_lock);
1794 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1795 if (phys_pg_list->asid == ctx->asid) {
1796 dev_dbg(hdev->dev,
Omer Shpigelman7f74d4d2019-08-12 11:48:46 +03001797 "page list 0x%px of asid %d is still alive\n",
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001798 phys_pg_list, ctx->asid);
Tomer Tayarc8113752019-08-04 07:03:41 +00001799 atomic64_sub(phys_pg_list->total_size,
1800 &hdev->dram_used_mem);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001801 free_phys_pg_pack(hdev, phys_pg_list);
1802 idr_remove(&vm->phys_pg_pack_handles, i);
1803 }
1804 spin_unlock(&vm->idr_lock);
1805
Omer Shpigelman64a7e292020-01-05 09:05:45 +00001806 va_range_fini(hdev, ctx->dram_va_range);
1807 if (hdev->pmmu_huge_range)
1808 va_range_fini(hdev, ctx->host_huge_va_range);
1809 va_range_fini(hdev, ctx->host_va_range);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001810
1811 mutex_destroy(&ctx->mem_hash_lock);
1812 hl_mmu_ctx_fini(ctx);
Oded Gabbay3e622992020-10-18 15:32:23 +03001813
1814 /* In this case we need to clear the global accounting of DRAM usage
1815 * because the user notifies us on allocations. If the user is no more,
1816 * all DRAM is available
1817 */
1818 if (!ctx->hdev->dram_supports_virtual_memory)
1819 atomic64_set(&ctx->hdev->dram_used_mem, 0);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001820}
1821
1822/*
1823 * hl_vm_init - initialize virtual memory module
1824 *
1825 * @hdev : pointer to the habanalabs device structure
1826 *
1827 * This function initializes the following:
1828 * - MMU module
1829 * - DRAM physical pages pool of 2MB
1830 * - Idr for device memory allocation handles
1831 */
1832int hl_vm_init(struct hl_device *hdev)
1833{
1834 struct asic_fixed_properties *prop = &hdev->asic_prop;
1835 struct hl_vm *vm = &hdev->vm;
1836 int rc;
1837
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001838 vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1839 if (!vm->dram_pg_pool) {
1840 dev_err(hdev->dev, "Failed to create dram page pool\n");
Oded Gabbay37d68ce2019-05-29 14:43:04 +03001841 return -ENOMEM;
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001842 }
1843
1844 kref_init(&vm->dram_pg_pool_refcount);
1845
1846 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1847 prop->dram_end_address - prop->dram_user_base_address,
1848 -1);
1849
1850 if (rc) {
1851 dev_err(hdev->dev,
1852 "Failed to add memory to dram page pool %d\n", rc);
1853 goto pool_add_err;
1854 }
1855
1856 spin_lock_init(&vm->idr_lock);
1857 idr_init(&vm->phys_pg_pack_handles);
1858
1859 atomic64_set(&hdev->dram_used_mem, 0);
1860
1861 vm->init_done = true;
1862
1863 return 0;
1864
1865pool_add_err:
1866 gen_pool_destroy(vm->dram_pg_pool);
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001867
1868 return rc;
1869}
1870
1871/*
1872 * hl_vm_fini - virtual memory module teardown
1873 *
1874 * @hdev : pointer to the habanalabs device structure
1875 *
1876 * This function perform teardown to the following:
1877 * - Idr for device memory allocation handles
1878 * - DRAM physical pages pool of 2MB
1879 * - MMU module
1880 */
1881void hl_vm_fini(struct hl_device *hdev)
1882{
1883 struct hl_vm *vm = &hdev->vm;
1884
1885 if (!vm->init_done)
1886 return;
1887
1888 /*
1889 * At this point all the contexts should be freed and hence no DRAM
1890 * memory should be in use. Hence the DRAM pool should be freed here.
1891 */
1892 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1893 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1894 __func__);
1895
Omer Shpigelman0feaf862019-02-16 00:39:22 +02001896 vm->init_done = false;
1897}