Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* |
| 4 | * Copyright 2016-2019 HabanaLabs, Ltd. |
| 5 | * All Rights Reserved. |
| 6 | */ |
| 7 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 8 | #include <uapi/misc/habanalabs.h> |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 9 | #include "habanalabs.h" |
Greg Kroah-Hartman | 7b16a15 | 2020-07-28 19:18:51 +0200 | [diff] [blame] | 10 | #include "../include/hw_ip/mmu/mmu_general.h" |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 11 | |
| 12 | #include <linux/uaccess.h> |
| 13 | #include <linux/slab.h> |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 14 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 15 | #define HL_MMU_DEBUG 0 |
| 16 | |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 17 | /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */ |
| 18 | #define DRAM_POOL_PAGE_SIZE SZ_8M |
| 19 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 20 | /* |
| 21 | * The va ranges in context object contain a list with the available chunks of |
| 22 | * device virtual memory. |
| 23 | * There is one range for host allocations and one for DRAM allocations. |
| 24 | * |
| 25 | * On initialization each range contains one chunk of all of its available |
| 26 | * virtual range which is a half of the total device virtual range. |
| 27 | * |
| 28 | * On each mapping of physical pages, a suitable virtual range chunk (with a |
| 29 | * minimum size) is selected from the list. If the chunk size equals the |
| 30 | * requested size, the chunk is returned. Otherwise, the chunk is split into |
| 31 | * two chunks - one to return as result and a remainder to stay in the list. |
| 32 | * |
| 33 | * On each Unmapping of a virtual address, the relevant virtual chunk is |
| 34 | * returned to the list. The chunk is added to the list and if its edges match |
| 35 | * the edges of the adjacent chunks (means a contiguous chunk can be created), |
| 36 | * the chunks are merged. |
| 37 | * |
| 38 | * On finish, the list is checked to have only one chunk of all the relevant |
| 39 | * virtual range (which is a half of the device total virtual range). |
| 40 | * If not (means not all mappings were unmapped), a warning is printed. |
| 41 | */ |
| 42 | |
| 43 | /* |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 44 | * alloc_device_memory() - allocate device memory. |
| 45 | * @ctx: pointer to the context structure. |
| 46 | * @args: host parameters containing the requested size. |
| 47 | * @ret_handle: result handle. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 48 | * |
| 49 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 50 | * - Allocate the requested size rounded up to 'dram_page_size' pages. |
| 51 | * - Return unique handle for later map/unmap/free. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 52 | */ |
| 53 | static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, |
| 54 | u32 *ret_handle) |
| 55 | { |
| 56 | struct hl_device *hdev = ctx->hdev; |
| 57 | struct hl_vm *vm = &hdev->vm; |
| 58 | struct hl_vm_phys_pg_pack *phys_pg_pack; |
Omer Shpigelman | bfb1ce1 | 2019-03-05 10:59:16 +0200 | [diff] [blame] | 59 | u64 paddr = 0, total_size, num_pgs, i; |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 60 | u32 num_curr_pgs, page_size; |
Omer Shpigelman | bfb1ce1 | 2019-03-05 10:59:16 +0200 | [diff] [blame] | 61 | int handle, rc; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 62 | bool contiguous; |
| 63 | |
| 64 | num_curr_pgs = 0; |
| 65 | page_size = hdev->asic_prop.dram_page_size; |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 66 | num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size); |
| 67 | total_size = num_pgs * page_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 68 | |
Ofir Bitton | 0839152 | 2020-08-11 08:57:45 +0300 | [diff] [blame] | 69 | if (!total_size) { |
| 70 | dev_err(hdev->dev, "Cannot allocate 0 bytes\n"); |
| 71 | return -EINVAL; |
| 72 | } |
| 73 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 74 | contiguous = args->flags & HL_MEM_CONTIGUOUS; |
| 75 | |
| 76 | if (contiguous) { |
| 77 | paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); |
| 78 | if (!paddr) { |
| 79 | dev_err(hdev->dev, |
Oded Gabbay | fc6121e | 2020-09-23 14:07:32 +0300 | [diff] [blame] | 80 | "failed to allocate %llu contiguous pages with total size of %llu\n", |
| 81 | num_pgs, total_size); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 82 | return -ENOMEM; |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); |
| 87 | if (!phys_pg_pack) { |
| 88 | rc = -ENOMEM; |
| 89 | goto pages_pack_err; |
| 90 | } |
| 91 | |
| 92 | phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK; |
| 93 | phys_pg_pack->asid = ctx->asid; |
| 94 | phys_pg_pack->npages = num_pgs; |
| 95 | phys_pg_pack->page_size = page_size; |
| 96 | phys_pg_pack->total_size = total_size; |
| 97 | phys_pg_pack->flags = args->flags; |
| 98 | phys_pg_pack->contiguous = contiguous; |
| 99 | |
Omer Shpigelman | 4eb1d12 | 2019-03-07 15:47:19 +0200 | [diff] [blame] | 100 | phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL); |
Ofir Bitton | 0839152 | 2020-08-11 08:57:45 +0300 | [diff] [blame] | 101 | if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 102 | rc = -ENOMEM; |
| 103 | goto pages_arr_err; |
| 104 | } |
| 105 | |
| 106 | if (phys_pg_pack->contiguous) { |
| 107 | for (i = 0 ; i < num_pgs ; i++) |
| 108 | phys_pg_pack->pages[i] = paddr + i * page_size; |
| 109 | } else { |
| 110 | for (i = 0 ; i < num_pgs ; i++) { |
| 111 | phys_pg_pack->pages[i] = (u64) gen_pool_alloc( |
| 112 | vm->dram_pg_pool, |
| 113 | page_size); |
| 114 | if (!phys_pg_pack->pages[i]) { |
| 115 | dev_err(hdev->dev, |
Oded Gabbay | cab8e3e | 2019-03-27 09:44:28 +0200 | [diff] [blame] | 116 | "Failed to allocate device memory (out of memory)\n"); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 117 | rc = -ENOMEM; |
| 118 | goto page_err; |
| 119 | } |
| 120 | |
| 121 | num_curr_pgs++; |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | spin_lock(&vm->idr_lock); |
| 126 | handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0, |
farah kassabri | 607b146 | 2021-08-15 11:16:16 +0300 | [diff] [blame] | 127 | GFP_ATOMIC); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 128 | spin_unlock(&vm->idr_lock); |
| 129 | |
| 130 | if (handle < 0) { |
| 131 | dev_err(hdev->dev, "Failed to get handle for page\n"); |
| 132 | rc = -EFAULT; |
| 133 | goto idr_err; |
| 134 | } |
| 135 | |
| 136 | for (i = 0 ; i < num_pgs ; i++) |
| 137 | kref_get(&vm->dram_pg_pool_refcount); |
| 138 | |
| 139 | phys_pg_pack->handle = handle; |
| 140 | |
| 141 | atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem); |
| 142 | atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem); |
| 143 | |
| 144 | *ret_handle = handle; |
| 145 | |
| 146 | return 0; |
| 147 | |
| 148 | idr_err: |
| 149 | page_err: |
| 150 | if (!phys_pg_pack->contiguous) |
| 151 | for (i = 0 ; i < num_curr_pgs ; i++) |
| 152 | gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], |
| 153 | page_size); |
| 154 | |
Omer Shpigelman | 4eb1d12 | 2019-03-07 15:47:19 +0200 | [diff] [blame] | 155 | kvfree(phys_pg_pack->pages); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 156 | pages_arr_err: |
| 157 | kfree(phys_pg_pack); |
| 158 | pages_pack_err: |
| 159 | if (contiguous) |
| 160 | gen_pool_free(vm->dram_pg_pool, paddr, total_size); |
| 161 | |
| 162 | return rc; |
| 163 | } |
| 164 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 165 | /** |
| 166 | * dma_map_host_va() - DMA mapping of the given host virtual address. |
| 167 | * @hdev: habanalabs device structure. |
| 168 | * @addr: the host virtual address of the memory area. |
| 169 | * @size: the size of the memory area. |
| 170 | * @p_userptr: pointer to result userptr structure. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 171 | * |
| 172 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 173 | * - Allocate userptr structure. |
| 174 | * - Pin the given host memory using the userptr structure. |
| 175 | * - Perform DMA mapping to have the DMA addresses of the pages. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 176 | */ |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 177 | static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size, |
| 178 | struct hl_userptr **p_userptr) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 179 | { |
| 180 | struct hl_userptr *userptr; |
| 181 | int rc; |
| 182 | |
| 183 | userptr = kzalloc(sizeof(*userptr), GFP_KERNEL); |
| 184 | if (!userptr) { |
| 185 | rc = -ENOMEM; |
| 186 | goto userptr_err; |
| 187 | } |
| 188 | |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 189 | rc = hl_pin_host_memory(hdev, addr, size, userptr); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 190 | if (rc) { |
| 191 | dev_err(hdev->dev, "Failed to pin host memory\n"); |
| 192 | goto pin_err; |
| 193 | } |
| 194 | |
| 195 | rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, |
| 196 | userptr->sgt->nents, DMA_BIDIRECTIONAL); |
| 197 | if (rc) { |
| 198 | dev_err(hdev->dev, "failed to map sgt with DMA region\n"); |
| 199 | goto dma_map_err; |
| 200 | } |
| 201 | |
| 202 | userptr->dma_mapped = true; |
| 203 | userptr->dir = DMA_BIDIRECTIONAL; |
| 204 | userptr->vm_type = VM_TYPE_USERPTR; |
| 205 | |
| 206 | *p_userptr = userptr; |
| 207 | |
| 208 | return 0; |
| 209 | |
| 210 | dma_map_err: |
| 211 | hl_unpin_host_memory(hdev, userptr); |
| 212 | pin_err: |
| 213 | kfree(userptr); |
| 214 | userptr_err: |
| 215 | |
| 216 | return rc; |
| 217 | } |
| 218 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 219 | /** |
| 220 | * dma_unmap_host_va() - DMA unmapping of the given host virtual address. |
| 221 | * @hdev: habanalabs device structure. |
| 222 | * @userptr: userptr to free. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 223 | * |
| 224 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 225 | * - Unpins the physical pages. |
| 226 | * - Frees the userptr structure. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 227 | */ |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 228 | static void dma_unmap_host_va(struct hl_device *hdev, |
| 229 | struct hl_userptr *userptr) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 230 | { |
| 231 | hl_unpin_host_memory(hdev, userptr); |
| 232 | kfree(userptr); |
| 233 | } |
| 234 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 235 | /** |
| 236 | * dram_pg_pool_do_release() - free DRAM pages pool |
| 237 | * @ref: pointer to reference object. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 238 | * |
| 239 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 240 | * - Frees the idr structure of physical pages handles. |
| 241 | * - Frees the generic pool of DRAM physical pages. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 242 | */ |
| 243 | static void dram_pg_pool_do_release(struct kref *ref) |
| 244 | { |
| 245 | struct hl_vm *vm = container_of(ref, struct hl_vm, |
| 246 | dram_pg_pool_refcount); |
| 247 | |
| 248 | /* |
| 249 | * free the idr here as only here we know for sure that there are no |
| 250 | * allocated physical pages and hence there are no handles in use |
| 251 | */ |
| 252 | idr_destroy(&vm->phys_pg_pack_handles); |
| 253 | gen_pool_destroy(vm->dram_pg_pool); |
| 254 | } |
| 255 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 256 | /** |
| 257 | * free_phys_pg_pack() - free physical page pack. |
| 258 | * @hdev: habanalabs device structure. |
| 259 | * @phys_pg_pack: physical page pack to free. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 260 | * |
| 261 | * This function does the following: |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 262 | * - For DRAM memory only |
| 263 | * - iterate over the pack, scrub and free each physical block structure by |
| 264 | * returning it to the general pool. |
| 265 | * In case of error during scrubbing, initiate hard reset. |
| 266 | * Once hard reset is triggered, scrubbing is bypassed while freeing the |
| 267 | * memory continues. |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 268 | * - Free the hl_vm_phys_pg_pack structure. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 269 | */ |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 270 | static int free_phys_pg_pack(struct hl_device *hdev, |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 271 | struct hl_vm_phys_pg_pack *phys_pg_pack) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 272 | { |
| 273 | struct hl_vm *vm = &hdev->vm; |
Omer Shpigelman | bfb1ce1 | 2019-03-05 10:59:16 +0200 | [diff] [blame] | 274 | u64 i; |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 275 | int rc = 0; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 276 | |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 277 | if (phys_pg_pack->created_from_userptr) |
| 278 | goto end; |
| 279 | |
| 280 | if (phys_pg_pack->contiguous) { |
| 281 | if (hdev->memory_scrub && !hdev->disabled) { |
| 282 | rc = hdev->asic_funcs->scrub_device_mem(hdev, |
| 283 | phys_pg_pack->pages[0], |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 284 | phys_pg_pack->total_size); |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 285 | if (rc) |
| 286 | dev_err(hdev->dev, |
| 287 | "Failed to scrub contiguous device memory\n"); |
| 288 | } |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 289 | |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 290 | gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0], |
| 291 | phys_pg_pack->total_size); |
| 292 | |
| 293 | for (i = 0; i < phys_pg_pack->npages ; i++) |
| 294 | kref_put(&vm->dram_pg_pool_refcount, |
| 295 | dram_pg_pool_do_release); |
| 296 | } else { |
| 297 | for (i = 0 ; i < phys_pg_pack->npages ; i++) { |
| 298 | if (hdev->memory_scrub && !hdev->disabled && rc == 0) { |
| 299 | rc = hdev->asic_funcs->scrub_device_mem( |
| 300 | hdev, |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 301 | phys_pg_pack->pages[i], |
| 302 | phys_pg_pack->page_size); |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 303 | if (rc) |
| 304 | dev_err(hdev->dev, |
| 305 | "Failed to scrub device memory\n"); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 306 | } |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 307 | gen_pool_free(vm->dram_pg_pool, |
| 308 | phys_pg_pack->pages[i], |
| 309 | phys_pg_pack->page_size); |
| 310 | kref_put(&vm->dram_pg_pool_refcount, |
| 311 | dram_pg_pool_do_release); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 312 | } |
| 313 | } |
| 314 | |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 315 | if (rc && !hdev->disabled) |
| 316 | hl_device_reset(hdev, HL_RESET_HARD); |
| 317 | |
| 318 | end: |
Omer Shpigelman | 4eb1d12 | 2019-03-07 15:47:19 +0200 | [diff] [blame] | 319 | kvfree(phys_pg_pack->pages); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 320 | kfree(phys_pg_pack); |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 321 | |
| 322 | return rc; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 323 | } |
| 324 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 325 | /** |
| 326 | * free_device_memory() - free device memory. |
| 327 | * @ctx: pointer to the context structure. |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 328 | * @args: host parameters containing the requested size. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 329 | * |
| 330 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 331 | * - Free the device memory related to the given handle. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 332 | */ |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 333 | static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 334 | { |
| 335 | struct hl_device *hdev = ctx->hdev; |
| 336 | struct hl_vm *vm = &hdev->vm; |
| 337 | struct hl_vm_phys_pg_pack *phys_pg_pack; |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 338 | u32 handle = args->free.handle; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 339 | |
| 340 | spin_lock(&vm->idr_lock); |
| 341 | phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); |
| 342 | if (phys_pg_pack) { |
| 343 | if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) { |
| 344 | dev_err(hdev->dev, "handle %u is mapped, cannot free\n", |
| 345 | handle); |
| 346 | spin_unlock(&vm->idr_lock); |
| 347 | return -EINVAL; |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * must remove from idr before the freeing of the physical |
| 352 | * pages as the refcount of the pool is also the trigger of the |
| 353 | * idr destroy |
| 354 | */ |
| 355 | idr_remove(&vm->phys_pg_pack_handles, handle); |
| 356 | spin_unlock(&vm->idr_lock); |
| 357 | |
| 358 | atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem); |
| 359 | atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem); |
| 360 | |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 361 | return free_phys_pg_pack(hdev, phys_pg_pack); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 362 | } else { |
| 363 | spin_unlock(&vm->idr_lock); |
| 364 | dev_err(hdev->dev, |
| 365 | "free device memory failed, no match for handle %u\n", |
| 366 | handle); |
| 367 | return -EINVAL; |
| 368 | } |
| 369 | |
| 370 | return 0; |
| 371 | } |
| 372 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 373 | /** |
| 374 | * clear_va_list_locked() - free virtual addresses list. |
| 375 | * @hdev: habanalabs device structure. |
| 376 | * @va_list: list of virtual addresses to free. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 377 | * |
| 378 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 379 | * - Iterate over the list and free each virtual addresses block. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 380 | * |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 381 | * This function should be called only when va_list lock is taken. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 382 | */ |
| 383 | static void clear_va_list_locked(struct hl_device *hdev, |
| 384 | struct list_head *va_list) |
| 385 | { |
| 386 | struct hl_vm_va_block *va_block, *tmp; |
| 387 | |
| 388 | list_for_each_entry_safe(va_block, tmp, va_list, node) { |
| 389 | list_del(&va_block->node); |
| 390 | kfree(va_block); |
| 391 | } |
| 392 | } |
| 393 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 394 | /** |
| 395 | * print_va_list_locked() - print virtual addresses list. |
| 396 | * @hdev: habanalabs device structure. |
| 397 | * @va_list: list of virtual addresses to print. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 398 | * |
| 399 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 400 | * - Iterate over the list and print each virtual addresses block. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 401 | * |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 402 | * This function should be called only when va_list lock is taken. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 403 | */ |
| 404 | static void print_va_list_locked(struct hl_device *hdev, |
| 405 | struct list_head *va_list) |
| 406 | { |
| 407 | #if HL_MMU_DEBUG |
| 408 | struct hl_vm_va_block *va_block; |
| 409 | |
| 410 | dev_dbg(hdev->dev, "print va list:\n"); |
| 411 | |
| 412 | list_for_each_entry(va_block, va_list, node) |
| 413 | dev_dbg(hdev->dev, |
| 414 | "va block, start: 0x%llx, end: 0x%llx, size: %llu\n", |
| 415 | va_block->start, va_block->end, va_block->size); |
| 416 | #endif |
| 417 | } |
| 418 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 419 | /** |
| 420 | * merge_va_blocks_locked() - merge a virtual block if possible. |
| 421 | * @hdev: pointer to the habanalabs device structure. |
| 422 | * @va_list: pointer to the virtual addresses block list. |
| 423 | * @va_block: virtual block to merge with adjacent blocks. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 424 | * |
| 425 | * This function does the following: |
| 426 | * - Merge the given blocks with the adjacent blocks if their virtual ranges |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 427 | * create a contiguous virtual range. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 428 | * |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 429 | * This Function should be called only when va_list lock is taken. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 430 | */ |
| 431 | static void merge_va_blocks_locked(struct hl_device *hdev, |
| 432 | struct list_head *va_list, struct hl_vm_va_block *va_block) |
| 433 | { |
| 434 | struct hl_vm_va_block *prev, *next; |
| 435 | |
| 436 | prev = list_prev_entry(va_block, node); |
| 437 | if (&prev->node != va_list && prev->end + 1 == va_block->start) { |
| 438 | prev->end = va_block->end; |
| 439 | prev->size = prev->end - prev->start; |
| 440 | list_del(&va_block->node); |
| 441 | kfree(va_block); |
| 442 | va_block = prev; |
| 443 | } |
| 444 | |
| 445 | next = list_next_entry(va_block, node); |
| 446 | if (&next->node != va_list && va_block->end + 1 == next->start) { |
| 447 | next->start = va_block->start; |
| 448 | next->size = next->end - next->start; |
| 449 | list_del(&va_block->node); |
| 450 | kfree(va_block); |
| 451 | } |
| 452 | } |
| 453 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 454 | /** |
| 455 | * add_va_block_locked() - add a virtual block to the virtual addresses list. |
| 456 | * @hdev: pointer to the habanalabs device structure. |
| 457 | * @va_list: pointer to the virtual addresses block list. |
| 458 | * @start: start virtual address. |
| 459 | * @end: end virtual address. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 460 | * |
| 461 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 462 | * - Add the given block to the virtual blocks list and merge with other blocks |
| 463 | * if a contiguous virtual block can be created. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 464 | * |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 465 | * This Function should be called only when va_list lock is taken. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 466 | */ |
| 467 | static int add_va_block_locked(struct hl_device *hdev, |
| 468 | struct list_head *va_list, u64 start, u64 end) |
| 469 | { |
| 470 | struct hl_vm_va_block *va_block, *res = NULL; |
| 471 | u64 size = end - start; |
| 472 | |
| 473 | print_va_list_locked(hdev, va_list); |
| 474 | |
| 475 | list_for_each_entry(va_block, va_list, node) { |
| 476 | /* TODO: remove upon matureness */ |
| 477 | if (hl_mem_area_crosses_range(start, size, va_block->start, |
| 478 | va_block->end)) { |
| 479 | dev_err(hdev->dev, |
| 480 | "block crossing ranges at start 0x%llx, end 0x%llx\n", |
| 481 | va_block->start, va_block->end); |
| 482 | return -EINVAL; |
| 483 | } |
| 484 | |
| 485 | if (va_block->end < start) |
| 486 | res = va_block; |
| 487 | } |
| 488 | |
| 489 | va_block = kmalloc(sizeof(*va_block), GFP_KERNEL); |
| 490 | if (!va_block) |
| 491 | return -ENOMEM; |
| 492 | |
| 493 | va_block->start = start; |
| 494 | va_block->end = end; |
| 495 | va_block->size = size; |
| 496 | |
| 497 | if (!res) |
| 498 | list_add(&va_block->node, va_list); |
| 499 | else |
| 500 | list_add(&va_block->node, &res->node); |
| 501 | |
| 502 | merge_va_blocks_locked(hdev, va_list, va_block); |
| 503 | |
| 504 | print_va_list_locked(hdev, va_list); |
| 505 | |
| 506 | return 0; |
| 507 | } |
| 508 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 509 | /** |
| 510 | * add_va_block() - wrapper for add_va_block_locked. |
| 511 | * @hdev: pointer to the habanalabs device structure. |
| 512 | * @va_list: pointer to the virtual addresses block list. |
| 513 | * @start: start virtual address. |
| 514 | * @end: end virtual address. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 515 | * |
| 516 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 517 | * - Takes the list lock and calls add_va_block_locked. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 518 | */ |
| 519 | static inline int add_va_block(struct hl_device *hdev, |
| 520 | struct hl_va_range *va_range, u64 start, u64 end) |
| 521 | { |
| 522 | int rc; |
| 523 | |
| 524 | mutex_lock(&va_range->lock); |
| 525 | rc = add_va_block_locked(hdev, &va_range->list, start, end); |
| 526 | mutex_unlock(&va_range->lock); |
| 527 | |
| 528 | return rc; |
| 529 | } |
| 530 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 531 | /** |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 532 | * is_hint_crossing_range() - check if hint address crossing specified reserved |
| 533 | * range. |
| 534 | */ |
| 535 | static inline bool is_hint_crossing_range(enum hl_va_range_type range_type, |
| 536 | u64 start_addr, u32 size, struct asic_fixed_properties *prop) { |
| 537 | bool range_cross; |
| 538 | |
| 539 | if (range_type == HL_VA_RANGE_TYPE_DRAM) |
| 540 | range_cross = |
| 541 | hl_mem_area_crosses_range(start_addr, size, |
| 542 | prop->hints_dram_reserved_va_range.start_addr, |
| 543 | prop->hints_dram_reserved_va_range.end_addr); |
| 544 | else if (range_type == HL_VA_RANGE_TYPE_HOST) |
| 545 | range_cross = |
| 546 | hl_mem_area_crosses_range(start_addr, size, |
| 547 | prop->hints_host_reserved_va_range.start_addr, |
| 548 | prop->hints_host_reserved_va_range.end_addr); |
| 549 | else |
| 550 | range_cross = |
| 551 | hl_mem_area_crosses_range(start_addr, size, |
| 552 | prop->hints_host_hpage_reserved_va_range.start_addr, |
| 553 | prop->hints_host_hpage_reserved_va_range.end_addr); |
| 554 | |
| 555 | return range_cross; |
| 556 | } |
| 557 | |
| 558 | /** |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 559 | * get_va_block() - get a virtual block for the given size and alignment. |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 560 | * |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 561 | * @hdev: pointer to the habanalabs device structure. |
| 562 | * @va_range: pointer to the virtual addresses range. |
| 563 | * @size: requested block size. |
| 564 | * @hint_addr: hint for requested address by the user. |
| 565 | * @va_block_align: required alignment of the virtual block start address. |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 566 | * @range_type: va range type (host, dram) |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 567 | * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 568 | * |
| 569 | * This function does the following: |
| 570 | * - Iterate on the virtual block list to find a suitable virtual block for the |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 571 | * given size, hint address and alignment. |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 572 | * - Reserve the requested block and update the list. |
| 573 | * - Return the start address of the virtual block. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 574 | */ |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 575 | static u64 get_va_block(struct hl_device *hdev, |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 576 | struct hl_va_range *va_range, |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 577 | u64 size, u64 hint_addr, u32 va_block_align, |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 578 | enum hl_va_range_type range_type, |
| 579 | u32 flags) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 580 | { |
| 581 | struct hl_vm_va_block *va_block, *new_va_block = NULL; |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 582 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 583 | u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end, |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 584 | align_mask, reserved_valid_start = 0, reserved_valid_size = 0, |
| 585 | dram_hint_mask = prop->dram_hints_align_mask; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 586 | bool add_prev = false; |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 587 | bool is_align_pow_2 = is_power_of_2(va_range->page_size); |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 588 | bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr); |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 589 | bool force_hint = flags & HL_MEM_FORCE_HINT; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 590 | |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 591 | if (is_align_pow_2) |
| 592 | align_mask = ~((u64)va_block_align - 1); |
| 593 | else |
| 594 | /* |
| 595 | * with non-power-of-2 range we work only with page granularity |
| 596 | * and the start address is page aligned, |
| 597 | * so no need for alignment checking. |
| 598 | */ |
| 599 | size = DIV_ROUND_UP_ULL(size, va_range->page_size) * |
| 600 | va_range->page_size; |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 601 | |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 602 | tmp_hint_addr = hint_addr & ~dram_hint_mask; |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 603 | |
| 604 | /* Check if we need to ignore hint address */ |
| 605 | if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) || |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 606 | (!is_align_pow_2 && is_hint_dram_addr && |
| 607 | do_div(tmp_hint_addr, va_range->page_size))) { |
Oded Gabbay | b8e785c | 2021-04-26 18:32:25 +0300 | [diff] [blame] | 608 | |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 609 | if (force_hint) { |
Oded Gabbay | 82629c7 | 2021-06-29 18:08:05 +0300 | [diff] [blame] | 610 | /* Hint must be respected, so here we just fail */ |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 611 | dev_err(hdev->dev, |
| 612 | "Hint address 0x%llx is not page aligned - cannot be respected\n", |
| 613 | hint_addr); |
| 614 | return 0; |
| 615 | } |
| 616 | |
Oded Gabbay | b8e785c | 2021-04-26 18:32:25 +0300 | [diff] [blame] | 617 | dev_dbg(hdev->dev, |
| 618 | "Hint address 0x%llx will be ignored because it is not aligned\n", |
| 619 | hint_addr); |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 620 | hint_addr = 0; |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 621 | } |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 622 | |
| 623 | mutex_lock(&va_range->lock); |
| 624 | |
| 625 | print_va_list_locked(hdev, &va_range->list); |
| 626 | |
| 627 | list_for_each_entry(va_block, &va_range->list, node) { |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 628 | /* Calc the first possible aligned addr */ |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 629 | valid_start = va_block->start; |
| 630 | |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 631 | if (is_align_pow_2 && (valid_start & (va_block_align - 1))) { |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 632 | valid_start &= align_mask; |
| 633 | valid_start += va_block_align; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 634 | if (valid_start > va_block->end) |
| 635 | continue; |
| 636 | } |
| 637 | |
| 638 | valid_size = va_block->end - valid_start; |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 639 | if (valid_size < size) |
| 640 | continue; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 641 | |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 642 | /* |
| 643 | * In case hint address is 0, and arc_hints_range_reservation |
| 644 | * property enabled, then avoid allocating va blocks from the |
| 645 | * range reserved for hint addresses |
| 646 | */ |
| 647 | if (prop->hints_range_reservation && !hint_addr) |
| 648 | if (is_hint_crossing_range(range_type, valid_start, |
| 649 | size, prop)) |
| 650 | continue; |
| 651 | |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 652 | /* Pick the minimal length block which has the required size */ |
| 653 | if (!new_va_block || (valid_size < reserved_valid_size)) { |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 654 | new_va_block = va_block; |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 655 | reserved_valid_start = valid_start; |
| 656 | reserved_valid_size = valid_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | if (hint_addr && hint_addr >= valid_start && |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 660 | (hint_addr + size) <= va_block->end) { |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 661 | new_va_block = va_block; |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 662 | reserved_valid_start = hint_addr; |
| 663 | reserved_valid_size = valid_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 664 | break; |
| 665 | } |
| 666 | } |
| 667 | |
| 668 | if (!new_va_block) { |
Omer Shpigelman | bfb1ce1 | 2019-03-05 10:59:16 +0200 | [diff] [blame] | 669 | dev_err(hdev->dev, "no available va block for size %llu\n", |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 670 | size); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 671 | goto out; |
| 672 | } |
| 673 | |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 674 | if (force_hint && reserved_valid_start != hint_addr) { |
| 675 | /* Hint address must be respected. If we are here - this means |
| 676 | * we could not respect it. |
| 677 | */ |
| 678 | dev_err(hdev->dev, |
| 679 | "Hint address 0x%llx could not be respected\n", |
| 680 | hint_addr); |
| 681 | reserved_valid_start = 0; |
| 682 | goto out; |
| 683 | } |
| 684 | |
farah kassabri | 8d79ce1 | 2021-01-11 10:10:00 +0200 | [diff] [blame] | 685 | /* |
| 686 | * Check if there is some leftover range due to reserving the new |
| 687 | * va block, then return it to the main virtual addresses list. |
| 688 | */ |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 689 | if (reserved_valid_start > new_va_block->start) { |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 690 | prev_start = new_va_block->start; |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 691 | prev_end = reserved_valid_start - 1; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 692 | |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 693 | new_va_block->start = reserved_valid_start; |
| 694 | new_va_block->size = reserved_valid_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 695 | |
| 696 | add_prev = true; |
| 697 | } |
| 698 | |
| 699 | if (new_va_block->size > size) { |
| 700 | new_va_block->start += size; |
| 701 | new_va_block->size = new_va_block->end - new_va_block->start; |
| 702 | } else { |
| 703 | list_del(&new_va_block->node); |
| 704 | kfree(new_va_block); |
| 705 | } |
| 706 | |
| 707 | if (add_prev) |
| 708 | add_va_block_locked(hdev, &va_range->list, prev_start, |
| 709 | prev_end); |
| 710 | |
| 711 | print_va_list_locked(hdev, &va_range->list); |
| 712 | out: |
| 713 | mutex_unlock(&va_range->lock); |
| 714 | |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 715 | return reserved_valid_start; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 716 | } |
| 717 | |
| 718 | /* |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 719 | * hl_reserve_va_block() - reserve a virtual block of a given size. |
| 720 | * @hdev: pointer to the habanalabs device structure. |
| 721 | * @ctx: current context |
| 722 | * @type: virtual addresses range type. |
| 723 | * @size: requested block size. |
Ofir Bitton | 412c41f | 2020-11-04 15:18:55 +0200 | [diff] [blame] | 724 | * @alignment: required alignment in bytes of the virtual block start address, |
| 725 | * 0 means no alignment. |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 726 | * |
| 727 | * This function does the following: |
| 728 | * - Iterate on the virtual block list to find a suitable virtual block for the |
Ofir Bitton | 412c41f | 2020-11-04 15:18:55 +0200 | [diff] [blame] | 729 | * given size and alignment. |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 730 | * - Reserve the requested block and update the list. |
| 731 | * - Return the start address of the virtual block. |
| 732 | */ |
| 733 | u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, |
Ofir Bitton | 412c41f | 2020-11-04 15:18:55 +0200 | [diff] [blame] | 734 | enum hl_va_range_type type, u32 size, u32 alignment) |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 735 | { |
| 736 | return get_va_block(hdev, ctx->va_range[type], size, 0, |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 737 | max(alignment, ctx->va_range[type]->page_size), |
| 738 | type, 0); |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 739 | } |
| 740 | |
| 741 | /** |
| 742 | * hl_get_va_range_type() - get va_range type for the given address and size. |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 743 | * @address: the start address of the area we want to validate. |
| 744 | * @size: the size in bytes of the area we want to validate. |
| 745 | * @type: returned va_range type. |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 746 | * |
| 747 | * Return: true if the area is inside a valid range, false otherwise. |
| 748 | */ |
| 749 | static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size, |
| 750 | enum hl_va_range_type *type) |
| 751 | { |
| 752 | int i; |
| 753 | |
| 754 | for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) { |
| 755 | if (hl_mem_area_inside_range(address, size, |
| 756 | ctx->va_range[i]->start_addr, |
| 757 | ctx->va_range[i]->end_addr)) { |
| 758 | *type = i; |
| 759 | return 0; |
| 760 | } |
| 761 | } |
| 762 | |
| 763 | return -EINVAL; |
| 764 | } |
| 765 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 766 | /** |
| 767 | * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block. |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 768 | * @hdev: pointer to the habanalabs device structure |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 769 | * @ctx: pointer to the context structure. |
| 770 | * @start: start virtual address. |
| 771 | * @end: end virtual address. |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 772 | * |
| 773 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 774 | * - Takes the list lock and calls add_va_block_locked. |
Ofir Bitton | be91b91 | 2020-10-22 15:04:10 +0300 | [diff] [blame] | 775 | */ |
| 776 | int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, |
| 777 | u64 start_addr, u64 size) |
| 778 | { |
| 779 | enum hl_va_range_type type; |
| 780 | int rc; |
| 781 | |
| 782 | rc = hl_get_va_range_type(ctx, start_addr, size, &type); |
| 783 | if (rc) { |
| 784 | dev_err(hdev->dev, |
| 785 | "cannot find va_range for va %#llx size %llu", |
| 786 | start_addr, size); |
| 787 | return rc; |
| 788 | } |
| 789 | |
| 790 | rc = add_va_block(hdev, ctx->va_range[type], start_addr, |
| 791 | start_addr + size - 1); |
| 792 | if (rc) |
| 793 | dev_warn(hdev->dev, |
| 794 | "add va block failed for vaddr: 0x%llx\n", start_addr); |
| 795 | |
| 796 | return rc; |
| 797 | } |
| 798 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 799 | /** |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 800 | * init_phys_pg_pack_from_userptr() - initialize physical page pack from host |
| 801 | * memory |
| 802 | * @ctx: pointer to the context structure. |
| 803 | * @userptr: userptr to initialize from. |
| 804 | * @pphys_pg_pack: result pointer. |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 805 | * @force_regular_page: tell the function to ignore huge page optimization, |
| 806 | * even if possible. Needed for cases where the device VA |
| 807 | * is allocated before we know the composition of the |
| 808 | * physical pages |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 809 | * |
| 810 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 811 | * - Pin the physical pages related to the given virtual block. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 812 | * - Create a physical page pack from the physical pages related to the given |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 813 | * virtual block. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 814 | */ |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 815 | static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, |
| 816 | struct hl_userptr *userptr, |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 817 | struct hl_vm_phys_pg_pack **pphys_pg_pack, |
| 818 | bool force_regular_page) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 819 | { |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 820 | u32 npages, page_size = PAGE_SIZE, |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 821 | huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size; |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 822 | u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size); |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 823 | struct hl_vm_phys_pg_pack *phys_pg_pack; |
| 824 | bool first = true, is_huge_page_opt; |
| 825 | u64 page_mask, total_npages; |
| 826 | struct scatterlist *sg; |
| 827 | dma_addr_t dma_addr; |
| 828 | int rc, i, j; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 829 | |
| 830 | phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); |
| 831 | if (!phys_pg_pack) |
| 832 | return -ENOMEM; |
| 833 | |
| 834 | phys_pg_pack->vm_type = userptr->vm_type; |
| 835 | phys_pg_pack->created_from_userptr = true; |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 836 | phys_pg_pack->asid = ctx->asid; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 837 | atomic_set(&phys_pg_pack->mapping_cnt, 1); |
| 838 | |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 839 | is_huge_page_opt = (force_regular_page ? false : true); |
| 840 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 841 | /* Only if all dma_addrs are aligned to 2MB and their |
| 842 | * sizes is at least 2MB, we can use huge page mapping. |
| 843 | * We limit the 2MB optimization to this condition, |
| 844 | * since later on we acquire the related VA range as one |
| 845 | * consecutive block. |
| 846 | */ |
| 847 | total_npages = 0; |
| 848 | for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { |
Yuri Nudelman | 89b2136 | 2021-07-29 11:44:28 +0300 | [diff] [blame] | 849 | npages = hl_get_sg_info(sg, &dma_addr); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 850 | |
| 851 | total_npages += npages; |
| 852 | |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 853 | if ((npages % pgs_in_huge_page) || |
| 854 | (dma_addr & (huge_page_size - 1))) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 855 | is_huge_page_opt = false; |
| 856 | } |
| 857 | |
| 858 | if (is_huge_page_opt) { |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 859 | page_size = huge_page_size; |
| 860 | do_div(total_npages, pgs_in_huge_page); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 861 | } |
| 862 | |
| 863 | page_mask = ~(((u64) page_size) - 1); |
| 864 | |
Omer Shpigelman | 4eb1d12 | 2019-03-07 15:47:19 +0200 | [diff] [blame] | 865 | phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64), |
| 866 | GFP_KERNEL); |
Ofir Bitton | 0839152 | 2020-08-11 08:57:45 +0300 | [diff] [blame] | 867 | if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) { |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 868 | rc = -ENOMEM; |
| 869 | goto page_pack_arr_mem_err; |
| 870 | } |
| 871 | |
| 872 | phys_pg_pack->npages = total_npages; |
| 873 | phys_pg_pack->page_size = page_size; |
| 874 | phys_pg_pack->total_size = total_npages * page_size; |
| 875 | |
| 876 | j = 0; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 877 | for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { |
Yuri Nudelman | 89b2136 | 2021-07-29 11:44:28 +0300 | [diff] [blame] | 878 | npages = hl_get_sg_info(sg, &dma_addr); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 879 | |
| 880 | /* align down to physical page size and save the offset */ |
| 881 | if (first) { |
| 882 | first = false; |
| 883 | phys_pg_pack->offset = dma_addr & (page_size - 1); |
| 884 | dma_addr &= page_mask; |
| 885 | } |
| 886 | |
| 887 | while (npages) { |
| 888 | phys_pg_pack->pages[j++] = dma_addr; |
| 889 | dma_addr += page_size; |
| 890 | |
| 891 | if (is_huge_page_opt) |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 892 | npages -= pgs_in_huge_page; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 893 | else |
| 894 | npages--; |
| 895 | } |
| 896 | } |
| 897 | |
| 898 | *pphys_pg_pack = phys_pg_pack; |
| 899 | |
| 900 | return 0; |
| 901 | |
| 902 | page_pack_arr_mem_err: |
| 903 | kfree(phys_pg_pack); |
| 904 | |
| 905 | return rc; |
| 906 | } |
| 907 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 908 | /** |
| 909 | * map_phys_pg_pack() - maps the physical page pack.. |
| 910 | * @ctx: pointer to the context structure. |
| 911 | * @vaddr: start address of the virtual area to map from. |
| 912 | * @phys_pg_pack: the pack of physical pages to map to. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 913 | * |
| 914 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 915 | * - Maps each chunk of virtual memory to matching physical chunk. |
| 916 | * - Stores number of successful mappings in the given argument. |
| 917 | * - Returns 0 on success, error code otherwise. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 918 | */ |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 919 | static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, |
| 920 | struct hl_vm_phys_pg_pack *phys_pg_pack) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 921 | { |
| 922 | struct hl_device *hdev = ctx->hdev; |
Omer Shpigelman | bfb1ce1 | 2019-03-05 10:59:16 +0200 | [diff] [blame] | 923 | u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 924 | u32 page_size = phys_pg_pack->page_size; |
Omer Shpigelman | bfb1ce1 | 2019-03-05 10:59:16 +0200 | [diff] [blame] | 925 | int rc = 0; |
farah kassabri | 2f6274e | 2021-03-11 11:24:57 +0200 | [diff] [blame] | 926 | bool is_host_addr; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 927 | |
| 928 | for (i = 0 ; i < phys_pg_pack->npages ; i++) { |
| 929 | paddr = phys_pg_pack->pages[i]; |
| 930 | |
Ofir Bitton | 5c05487 | 2020-10-22 15:13:10 +0300 | [diff] [blame] | 931 | rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size, |
Pawel Piskorski | 7fc40bc | 2019-12-06 17:32:38 +0200 | [diff] [blame] | 932 | (i + 1) == phys_pg_pack->npages); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 933 | if (rc) { |
| 934 | dev_err(hdev->dev, |
Omer Shpigelman | bfb1ce1 | 2019-03-05 10:59:16 +0200 | [diff] [blame] | 935 | "map failed for handle %u, npages: %llu, mapped: %llu", |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 936 | phys_pg_pack->handle, phys_pg_pack->npages, |
| 937 | mapped_pg_cnt); |
| 938 | goto err; |
| 939 | } |
| 940 | |
| 941 | mapped_pg_cnt++; |
| 942 | next_vaddr += page_size; |
| 943 | } |
| 944 | |
| 945 | return 0; |
| 946 | |
| 947 | err: |
farah kassabri | 2f6274e | 2021-03-11 11:24:57 +0200 | [diff] [blame] | 948 | is_host_addr = !hl_is_dram_va(hdev, vaddr); |
| 949 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 950 | next_vaddr = vaddr; |
| 951 | for (i = 0 ; i < mapped_pg_cnt ; i++) { |
Ofir Bitton | 5c05487 | 2020-10-22 15:13:10 +0300 | [diff] [blame] | 952 | if (hl_mmu_unmap_page(ctx, next_vaddr, page_size, |
Pawel Piskorski | 7fc40bc | 2019-12-06 17:32:38 +0200 | [diff] [blame] | 953 | (i + 1) == mapped_pg_cnt)) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 954 | dev_warn_ratelimited(hdev->dev, |
| 955 | "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n", |
| 956 | phys_pg_pack->handle, next_vaddr, |
| 957 | phys_pg_pack->pages[i], page_size); |
| 958 | |
| 959 | next_vaddr += page_size; |
farah kassabri | 2f6274e | 2021-03-11 11:24:57 +0200 | [diff] [blame] | 960 | |
| 961 | /* |
| 962 | * unmapping on Palladium can be really long, so avoid a CPU |
| 963 | * soft lockup bug by sleeping a little between unmapping pages |
| 964 | * |
| 965 | * In addition, on host num of pages could be huge, |
| 966 | * because page size could be 4KB, so when unmapping host |
| 967 | * pages sleep every 32K pages to avoid soft lockup |
| 968 | */ |
| 969 | if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0)) |
| 970 | usleep_range(50, 200); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 971 | } |
| 972 | |
| 973 | return rc; |
| 974 | } |
| 975 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 976 | /** |
| 977 | * unmap_phys_pg_pack() - unmaps the physical page pack. |
| 978 | * @ctx: pointer to the context structure. |
| 979 | * @vaddr: start address of the virtual area to unmap. |
| 980 | * @phys_pg_pack: the pack of physical pages to unmap. |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 981 | */ |
| 982 | static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, |
| 983 | struct hl_vm_phys_pg_pack *phys_pg_pack) |
| 984 | { |
| 985 | struct hl_device *hdev = ctx->hdev; |
| 986 | u64 next_vaddr, i; |
Oded Gabbay | 9488307 | 2021-01-11 17:49:30 +0200 | [diff] [blame] | 987 | bool is_host_addr; |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 988 | u32 page_size; |
| 989 | |
Oded Gabbay | 9488307 | 2021-01-11 17:49:30 +0200 | [diff] [blame] | 990 | is_host_addr = !hl_is_dram_va(hdev, vaddr); |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 991 | page_size = phys_pg_pack->page_size; |
| 992 | next_vaddr = vaddr; |
| 993 | |
| 994 | for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) { |
Ofir Bitton | 5c05487 | 2020-10-22 15:13:10 +0300 | [diff] [blame] | 995 | if (hl_mmu_unmap_page(ctx, next_vaddr, page_size, |
Pawel Piskorski | 7fc40bc | 2019-12-06 17:32:38 +0200 | [diff] [blame] | 996 | (i + 1) == phys_pg_pack->npages)) |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 997 | dev_warn_ratelimited(hdev->dev, |
| 998 | "unmap failed for vaddr: 0x%llx\n", next_vaddr); |
| 999 | |
| 1000 | /* |
| 1001 | * unmapping on Palladium can be really long, so avoid a CPU |
| 1002 | * soft lockup bug by sleeping a little between unmapping pages |
Oded Gabbay | 9488307 | 2021-01-11 17:49:30 +0200 | [diff] [blame] | 1003 | * |
farah kassabri | 2f6274e | 2021-03-11 11:24:57 +0200 | [diff] [blame] | 1004 | * In addition, on host num of pages could be huge, |
| 1005 | * because page size could be 4KB, so when unmapping host |
| 1006 | * pages sleep every 32K pages to avoid soft lockup |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1007 | */ |
Oded Gabbay | 9488307 | 2021-01-11 17:49:30 +0200 | [diff] [blame] | 1008 | if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0)) |
| 1009 | usleep_range(50, 200); |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1010 | } |
| 1011 | } |
| 1012 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1013 | static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1014 | u64 *paddr) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1015 | { |
| 1016 | struct hl_device *hdev = ctx->hdev; |
| 1017 | struct hl_vm *vm = &hdev->vm; |
| 1018 | struct hl_vm_phys_pg_pack *phys_pg_pack; |
| 1019 | u32 handle; |
| 1020 | |
| 1021 | handle = lower_32_bits(args->map_device.handle); |
| 1022 | spin_lock(&vm->idr_lock); |
| 1023 | phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); |
| 1024 | if (!phys_pg_pack) { |
| 1025 | spin_unlock(&vm->idr_lock); |
| 1026 | dev_err(hdev->dev, "no match for handle %u\n", handle); |
| 1027 | return -EINVAL; |
| 1028 | } |
| 1029 | |
| 1030 | *paddr = phys_pg_pack->pages[0]; |
| 1031 | |
| 1032 | spin_unlock(&vm->idr_lock); |
| 1033 | |
| 1034 | return 0; |
| 1035 | } |
| 1036 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1037 | /** |
| 1038 | * map_device_va() - map the given memory. |
| 1039 | * @ctx: pointer to the context structure. |
| 1040 | * @args: host parameters with handle/host virtual address. |
| 1041 | * @device_addr: pointer to result device virtual address. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1042 | * |
| 1043 | * This function does the following: |
| 1044 | * - If given a physical device memory handle, map to a device virtual block |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1045 | * and return the start address of this block. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1046 | * - If given a host virtual address and size, find the related physical pages, |
| 1047 | * map a device virtual block to this pages and return the start address of |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1048 | * this block. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1049 | */ |
| 1050 | static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, |
| 1051 | u64 *device_addr) |
| 1052 | { |
| 1053 | struct hl_device *hdev = ctx->hdev; |
| 1054 | struct hl_vm *vm = &hdev->vm; |
| 1055 | struct hl_vm_phys_pg_pack *phys_pg_pack; |
| 1056 | struct hl_userptr *userptr = NULL; |
| 1057 | struct hl_vm_hash_node *hnode; |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1058 | struct hl_va_range *va_range; |
Oded Gabbay | 82629c7 | 2021-06-29 18:08:05 +0300 | [diff] [blame] | 1059 | enum vm_type *vm_type; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1060 | u64 ret_vaddr, hint_addr; |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 1061 | u32 handle = 0, va_block_align; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1062 | int rc; |
| 1063 | bool is_userptr = args->flags & HL_MEM_USERPTR; |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 1064 | enum hl_va_range_type va_range_type = 0; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1065 | |
| 1066 | /* Assume failure */ |
| 1067 | *device_addr = 0; |
| 1068 | |
| 1069 | if (is_userptr) { |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1070 | u64 addr = args->map_host.host_virt_addr, |
| 1071 | size = args->map_host.mem_size; |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 1072 | u32 page_size = hdev->asic_prop.pmmu.page_size, |
| 1073 | huge_page_size = hdev->asic_prop.pmmu_huge.page_size; |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1074 | |
| 1075 | rc = dma_map_host_va(hdev, addr, size, &userptr); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1076 | if (rc) { |
| 1077 | dev_err(hdev->dev, "failed to get userptr from va\n"); |
| 1078 | return rc; |
| 1079 | } |
| 1080 | |
Omer Shpigelman | 54bb674 | 2019-11-14 18:23:55 +0000 | [diff] [blame] | 1081 | rc = init_phys_pg_pack_from_userptr(ctx, userptr, |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 1082 | &phys_pg_pack, false); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1083 | if (rc) { |
| 1084 | dev_err(hdev->dev, |
| 1085 | "unable to init page pack for vaddr 0x%llx\n", |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1086 | addr); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1087 | goto init_page_pack_err; |
| 1088 | } |
| 1089 | |
Oded Gabbay | 82629c7 | 2021-06-29 18:08:05 +0300 | [diff] [blame] | 1090 | vm_type = (enum vm_type *) userptr; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1091 | hint_addr = args->map_host.hint_addr; |
Omer Shpigelman | 8ff5f4f | 2020-05-24 23:06:59 +0300 | [diff] [blame] | 1092 | handle = phys_pg_pack->handle; |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 1093 | |
| 1094 | /* get required alignment */ |
| 1095 | if (phys_pg_pack->page_size == page_size) { |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1096 | va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST]; |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 1097 | va_range_type = HL_VA_RANGE_TYPE_HOST; |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 1098 | /* |
| 1099 | * huge page alignment may be needed in case of regular |
| 1100 | * page mapping, depending on the host VA alignment |
| 1101 | */ |
| 1102 | if (addr & (huge_page_size - 1)) |
| 1103 | va_block_align = page_size; |
| 1104 | else |
| 1105 | va_block_align = huge_page_size; |
| 1106 | } else { |
| 1107 | /* |
| 1108 | * huge page alignment is needed in case of huge page |
| 1109 | * mapping |
| 1110 | */ |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1111 | va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]; |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 1112 | va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE; |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 1113 | va_block_align = huge_page_size; |
| 1114 | } |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1115 | } else { |
| 1116 | handle = lower_32_bits(args->map_device.handle); |
| 1117 | |
| 1118 | spin_lock(&vm->idr_lock); |
| 1119 | phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); |
| 1120 | if (!phys_pg_pack) { |
| 1121 | spin_unlock(&vm->idr_lock); |
| 1122 | dev_err(hdev->dev, |
| 1123 | "no match for handle %u\n", handle); |
| 1124 | return -EINVAL; |
| 1125 | } |
| 1126 | |
| 1127 | /* increment now to avoid freeing device memory while mapping */ |
| 1128 | atomic_inc(&phys_pg_pack->mapping_cnt); |
| 1129 | |
| 1130 | spin_unlock(&vm->idr_lock); |
| 1131 | |
Oded Gabbay | 82629c7 | 2021-06-29 18:08:05 +0300 | [diff] [blame] | 1132 | vm_type = (enum vm_type *) phys_pg_pack; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1133 | |
| 1134 | hint_addr = args->map_device.hint_addr; |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 1135 | |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 1136 | /* DRAM VA alignment is the same as the MMU page size */ |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1137 | va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM]; |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 1138 | va_range_type = HL_VA_RANGE_TYPE_DRAM; |
Omer Shpigelman | 7c52fb0 | 2020-06-28 21:15:53 +0300 | [diff] [blame] | 1139 | va_block_align = hdev->asic_prop.dmmu.page_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1140 | } |
| 1141 | |
| 1142 | /* |
| 1143 | * relevant for mapping device physical memory only, as host memory is |
| 1144 | * implicitly shared |
| 1145 | */ |
| 1146 | if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) && |
| 1147 | phys_pg_pack->asid != ctx->asid) { |
| 1148 | dev_err(hdev->dev, |
| 1149 | "Failed to map memory, handle %u is not shared\n", |
| 1150 | handle); |
| 1151 | rc = -EPERM; |
| 1152 | goto shared_err; |
| 1153 | } |
| 1154 | |
| 1155 | hnode = kzalloc(sizeof(*hnode), GFP_KERNEL); |
| 1156 | if (!hnode) { |
| 1157 | rc = -ENOMEM; |
| 1158 | goto hnode_err; |
| 1159 | } |
| 1160 | |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 1161 | if (hint_addr && phys_pg_pack->offset) { |
| 1162 | if (args->flags & HL_MEM_FORCE_HINT) { |
Oded Gabbay | 82629c7 | 2021-06-29 18:08:05 +0300 | [diff] [blame] | 1163 | /* Fail if hint must be respected but it can't be */ |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 1164 | dev_err(hdev->dev, |
| 1165 | "Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n", |
| 1166 | hint_addr, phys_pg_pack->offset); |
| 1167 | rc = -EINVAL; |
| 1168 | goto va_block_err; |
| 1169 | } |
| 1170 | dev_dbg(hdev->dev, |
| 1171 | "Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n", |
| 1172 | hint_addr, phys_pg_pack->offset); |
| 1173 | } |
| 1174 | |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1175 | ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size, |
farah kassabri | 1ae32b9 | 2021-01-31 18:56:03 +0200 | [diff] [blame] | 1176 | hint_addr, va_block_align, |
Yuri Nudelman | 486e197 | 2021-06-03 17:51:58 +0300 | [diff] [blame] | 1177 | va_range_type, args->flags); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1178 | if (!ret_vaddr) { |
| 1179 | dev_err(hdev->dev, "no available va block for handle %u\n", |
| 1180 | handle); |
| 1181 | rc = -ENOMEM; |
| 1182 | goto va_block_err; |
| 1183 | } |
| 1184 | |
| 1185 | mutex_lock(&ctx->mmu_lock); |
| 1186 | |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1187 | rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1188 | if (rc) { |
| 1189 | mutex_unlock(&ctx->mmu_lock); |
| 1190 | dev_err(hdev->dev, "mapping page pack failed for handle %u\n", |
| 1191 | handle); |
| 1192 | goto map_err; |
| 1193 | } |
| 1194 | |
Alon Mizrahi | 08c03a1 | 2021-04-08 15:30:59 +0300 | [diff] [blame] | 1195 | rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false, |
| 1196 | *vm_type, ctx->asid, ret_vaddr, phys_pg_pack->total_size); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1197 | |
| 1198 | mutex_unlock(&ctx->mmu_lock); |
| 1199 | |
Omer Shpigelman | 8ff5f4f | 2020-05-24 23:06:59 +0300 | [diff] [blame] | 1200 | if (rc) { |
| 1201 | dev_err(hdev->dev, |
| 1202 | "mapping handle %u failed due to MMU cache invalidation\n", |
| 1203 | handle); |
| 1204 | goto map_err; |
| 1205 | } |
| 1206 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1207 | ret_vaddr += phys_pg_pack->offset; |
| 1208 | |
| 1209 | hnode->ptr = vm_type; |
| 1210 | hnode->vaddr = ret_vaddr; |
| 1211 | |
| 1212 | mutex_lock(&ctx->mem_hash_lock); |
| 1213 | hash_add(ctx->mem_hash, &hnode->node, ret_vaddr); |
| 1214 | mutex_unlock(&ctx->mem_hash_lock); |
| 1215 | |
| 1216 | *device_addr = ret_vaddr; |
| 1217 | |
| 1218 | if (is_userptr) |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 1219 | rc = free_phys_pg_pack(hdev, phys_pg_pack); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1220 | |
Bharat Jauhari | d4b1e5d | 2021-03-18 12:11:19 +0200 | [diff] [blame] | 1221 | return rc; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1222 | |
| 1223 | map_err: |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1224 | if (add_va_block(hdev, va_range, ret_vaddr, |
| 1225 | ret_vaddr + phys_pg_pack->total_size - 1)) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1226 | dev_warn(hdev->dev, |
| 1227 | "release va block failed for handle 0x%x, vaddr: 0x%llx\n", |
| 1228 | handle, ret_vaddr); |
| 1229 | |
| 1230 | va_block_err: |
| 1231 | kfree(hnode); |
| 1232 | hnode_err: |
| 1233 | shared_err: |
| 1234 | atomic_dec(&phys_pg_pack->mapping_cnt); |
| 1235 | if (is_userptr) |
| 1236 | free_phys_pg_pack(hdev, phys_pg_pack); |
| 1237 | init_page_pack_err: |
| 1238 | if (is_userptr) |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1239 | dma_unmap_host_va(hdev, userptr); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1240 | |
| 1241 | return rc; |
| 1242 | } |
| 1243 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1244 | /** |
| 1245 | * unmap_device_va() - unmap the given device virtual address. |
| 1246 | * @ctx: pointer to the context structure. |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 1247 | * @args: host parameters with device virtual address to unmap. |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1248 | * @ctx_free: true if in context free flow, false otherwise. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1249 | * |
| 1250 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1251 | * - unmap the physical pages related to the given virtual address. |
| 1252 | * - return the device virtual block to the virtual block list. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1253 | */ |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 1254 | static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, |
| 1255 | bool ctx_free) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1256 | { |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1257 | struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 1258 | u64 vaddr = args->unmap.device_virt_addr; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1259 | struct hl_vm_hash_node *hnode = NULL; |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 1260 | struct asic_fixed_properties *prop; |
| 1261 | struct hl_device *hdev = ctx->hdev; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1262 | struct hl_userptr *userptr = NULL; |
Omer Shpigelman | 71c5e55 | 2019-11-14 18:23:57 +0000 | [diff] [blame] | 1263 | struct hl_va_range *va_range; |
Oded Gabbay | 82629c7 | 2021-06-29 18:08:05 +0300 | [diff] [blame] | 1264 | enum vm_type *vm_type; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1265 | bool is_userptr; |
Tomer Tayar | c68f1ba | 2020-06-01 09:56:47 +0300 | [diff] [blame] | 1266 | int rc = 0; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1267 | |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 1268 | prop = &hdev->asic_prop; |
| 1269 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1270 | /* protect from double entrance */ |
| 1271 | mutex_lock(&ctx->mem_hash_lock); |
| 1272 | hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr) |
| 1273 | if (vaddr == hnode->vaddr) |
| 1274 | break; |
| 1275 | |
| 1276 | if (!hnode) { |
| 1277 | mutex_unlock(&ctx->mem_hash_lock); |
| 1278 | dev_err(hdev->dev, |
| 1279 | "unmap failed, no mem hnode for vaddr 0x%llx\n", |
| 1280 | vaddr); |
| 1281 | return -EINVAL; |
| 1282 | } |
| 1283 | |
| 1284 | hash_del(&hnode->node); |
| 1285 | mutex_unlock(&ctx->mem_hash_lock); |
| 1286 | |
| 1287 | vm_type = hnode->ptr; |
| 1288 | |
| 1289 | if (*vm_type == VM_TYPE_USERPTR) { |
| 1290 | is_userptr = true; |
| 1291 | userptr = hnode->ptr; |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 1292 | |
| 1293 | rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack, |
| 1294 | false); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1295 | if (rc) { |
| 1296 | dev_err(hdev->dev, |
| 1297 | "unable to init page pack for vaddr 0x%llx\n", |
| 1298 | vaddr); |
| 1299 | goto vm_type_err; |
| 1300 | } |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1301 | |
| 1302 | if (phys_pg_pack->page_size == |
| 1303 | hdev->asic_prop.pmmu.page_size) |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1304 | va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST]; |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1305 | else |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1306 | va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1307 | } else if (*vm_type == VM_TYPE_PHYS_PACK) { |
| 1308 | is_userptr = false; |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1309 | va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM]; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1310 | phys_pg_pack = hnode->ptr; |
| 1311 | } else { |
| 1312 | dev_warn(hdev->dev, |
| 1313 | "unmap failed, unknown vm desc for vaddr 0x%llx\n", |
| 1314 | vaddr); |
| 1315 | rc = -EFAULT; |
| 1316 | goto vm_type_err; |
| 1317 | } |
| 1318 | |
| 1319 | if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) { |
| 1320 | dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr); |
| 1321 | rc = -EINVAL; |
| 1322 | goto mapping_cnt_err; |
| 1323 | } |
| 1324 | |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 1325 | if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size)) |
| 1326 | vaddr = prop->dram_base_address + |
| 1327 | DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address, |
| 1328 | phys_pg_pack->page_size) * |
| 1329 | phys_pg_pack->page_size; |
| 1330 | else |
| 1331 | vaddr &= ~(((u64) phys_pg_pack->page_size) - 1); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1332 | |
| 1333 | mutex_lock(&ctx->mmu_lock); |
| 1334 | |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1335 | unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1336 | |
Omer Shpigelman | bea84c4 | 2019-11-14 18:23:58 +0000 | [diff] [blame] | 1337 | /* |
| 1338 | * During context free this function is called in a loop to clean all |
| 1339 | * the context mappings. Hence the cache invalidation can be called once |
| 1340 | * at the loop end rather than for each iteration |
| 1341 | */ |
| 1342 | if (!ctx_free) |
Alon Mizrahi | 08c03a1 | 2021-04-08 15:30:59 +0300 | [diff] [blame] | 1343 | rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, true, |
| 1344 | *vm_type, ctx->asid, vaddr, |
| 1345 | phys_pg_pack->total_size); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1346 | |
| 1347 | mutex_unlock(&ctx->mmu_lock); |
| 1348 | |
Omer Shpigelman | 71c5e55 | 2019-11-14 18:23:57 +0000 | [diff] [blame] | 1349 | /* |
Omer Shpigelman | 8ff5f4f | 2020-05-24 23:06:59 +0300 | [diff] [blame] | 1350 | * If the context is closing we don't need to check for the MMU cache |
| 1351 | * invalidation return code and update the VA free list as in this flow |
| 1352 | * we invalidate the MMU cache outside of this unmap function and the VA |
| 1353 | * free list will be freed anyway. |
Omer Shpigelman | 71c5e55 | 2019-11-14 18:23:57 +0000 | [diff] [blame] | 1354 | */ |
| 1355 | if (!ctx_free) { |
Omer Shpigelman | 8ff5f4f | 2020-05-24 23:06:59 +0300 | [diff] [blame] | 1356 | int tmp_rc; |
| 1357 | |
Omer Shpigelman | 71c5e55 | 2019-11-14 18:23:57 +0000 | [diff] [blame] | 1358 | if (rc) |
Omer Shpigelman | 8ff5f4f | 2020-05-24 23:06:59 +0300 | [diff] [blame] | 1359 | dev_err(hdev->dev, |
| 1360 | "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n", |
| 1361 | vaddr); |
| 1362 | |
| 1363 | tmp_rc = add_va_block(hdev, va_range, vaddr, |
| 1364 | vaddr + phys_pg_pack->total_size - 1); |
| 1365 | if (tmp_rc) { |
Omer Shpigelman | 71c5e55 | 2019-11-14 18:23:57 +0000 | [diff] [blame] | 1366 | dev_warn(hdev->dev, |
| 1367 | "add va block failed for vaddr: 0x%llx\n", |
| 1368 | vaddr); |
Omer Shpigelman | 8ff5f4f | 2020-05-24 23:06:59 +0300 | [diff] [blame] | 1369 | if (!rc) |
| 1370 | rc = tmp_rc; |
| 1371 | } |
Omer Shpigelman | 71c5e55 | 2019-11-14 18:23:57 +0000 | [diff] [blame] | 1372 | } |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1373 | |
| 1374 | atomic_dec(&phys_pg_pack->mapping_cnt); |
| 1375 | kfree(hnode); |
| 1376 | |
| 1377 | if (is_userptr) { |
Oded Gabbay | fbcd0ef | 2021-06-29 18:23:41 +0300 | [diff] [blame] | 1378 | free_phys_pg_pack(hdev, phys_pg_pack); |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1379 | dma_unmap_host_va(hdev, userptr); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1380 | } |
| 1381 | |
Omer Shpigelman | 8ff5f4f | 2020-05-24 23:06:59 +0300 | [diff] [blame] | 1382 | return rc; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1383 | |
| 1384 | mapping_cnt_err: |
| 1385 | if (is_userptr) |
| 1386 | free_phys_pg_pack(hdev, phys_pg_pack); |
| 1387 | vm_type_err: |
| 1388 | mutex_lock(&ctx->mem_hash_lock); |
| 1389 | hash_add(ctx->mem_hash, &hnode->node, vaddr); |
| 1390 | mutex_unlock(&ctx->mem_hash_lock); |
| 1391 | |
| 1392 | return rc; |
| 1393 | } |
| 1394 | |
Oded Gabbay | 6df50d2 | 2021-02-05 16:04:34 +0200 | [diff] [blame] | 1395 | static int map_block(struct hl_device *hdev, u64 address, u64 *handle, |
| 1396 | u32 *size) |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1397 | { |
| 1398 | u32 block_id = 0; |
| 1399 | int rc; |
| 1400 | |
Oded Gabbay | 6df50d2 | 2021-02-05 16:04:34 +0200 | [diff] [blame] | 1401 | rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id); |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1402 | |
| 1403 | *handle = block_id | HL_MMAP_TYPE_BLOCK; |
| 1404 | *handle <<= PAGE_SHIFT; |
| 1405 | |
| 1406 | return rc; |
| 1407 | } |
| 1408 | |
| 1409 | static void hw_block_vm_close(struct vm_area_struct *vma) |
| 1410 | { |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1411 | struct hl_vm_hw_block_list_node *lnode = |
| 1412 | (struct hl_vm_hw_block_list_node *) vma->vm_private_data; |
| 1413 | struct hl_ctx *ctx = lnode->ctx; |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1414 | |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1415 | mutex_lock(&ctx->hw_block_list_lock); |
| 1416 | list_del(&lnode->node); |
| 1417 | mutex_unlock(&ctx->hw_block_list_lock); |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1418 | hl_ctx_put(ctx); |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1419 | kfree(lnode); |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1420 | vma->vm_private_data = NULL; |
| 1421 | } |
| 1422 | |
| 1423 | static const struct vm_operations_struct hw_block_vm_ops = { |
| 1424 | .close = hw_block_vm_close |
| 1425 | }; |
| 1426 | |
| 1427 | /** |
| 1428 | * hl_hw_block_mmap() - mmap a hw block to user. |
| 1429 | * @hpriv: pointer to the private data of the fd |
| 1430 | * @vma: pointer to vm_area_struct of the process |
| 1431 | * |
| 1432 | * Driver increments context reference for every HW block mapped in order |
| 1433 | * to prevent user from closing FD without unmapping first |
| 1434 | */ |
| 1435 | int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) |
| 1436 | { |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1437 | struct hl_vm_hw_block_list_node *lnode; |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1438 | struct hl_device *hdev = hpriv->hdev; |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1439 | struct hl_ctx *ctx = hpriv->ctx; |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1440 | u32 block_id, block_size; |
| 1441 | int rc; |
| 1442 | |
| 1443 | /* We use the page offset to hold the block id and thus we need to clear |
| 1444 | * it before doing the mmap itself |
| 1445 | */ |
| 1446 | block_id = vma->vm_pgoff; |
| 1447 | vma->vm_pgoff = 0; |
| 1448 | |
| 1449 | /* Driver only allows mapping of a complete HW block */ |
| 1450 | block_size = vma->vm_end - vma->vm_start; |
| 1451 | |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1452 | if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) { |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1453 | dev_err(hdev->dev, |
| 1454 | "user pointer is invalid - 0x%lx\n", |
| 1455 | vma->vm_start); |
| 1456 | |
| 1457 | return -EINVAL; |
| 1458 | } |
| 1459 | |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1460 | lnode = kzalloc(sizeof(*lnode), GFP_KERNEL); |
| 1461 | if (!lnode) |
| 1462 | return -ENOMEM; |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1463 | |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1464 | vma->vm_ops = &hw_block_vm_ops; |
| 1465 | vma->vm_private_data = lnode; |
| 1466 | |
| 1467 | hl_ctx_get(hdev, ctx); |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1468 | |
| 1469 | rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size); |
| 1470 | if (rc) { |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1471 | hl_ctx_put(ctx); |
| 1472 | kfree(lnode); |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1473 | return rc; |
| 1474 | } |
| 1475 | |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 1476 | lnode->ctx = ctx; |
| 1477 | lnode->vaddr = vma->vm_start; |
| 1478 | lnode->size = block_size; |
| 1479 | lnode->id = block_id; |
| 1480 | |
| 1481 | mutex_lock(&ctx->hw_block_list_lock); |
| 1482 | list_add_tail(&lnode->node, &ctx->hw_block_mem_list); |
| 1483 | mutex_unlock(&ctx->hw_block_list_lock); |
| 1484 | |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1485 | vma->vm_pgoff = block_id; |
| 1486 | |
| 1487 | return 0; |
| 1488 | } |
| 1489 | |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1490 | static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args) |
| 1491 | { |
| 1492 | struct hl_device *hdev = hpriv->hdev; |
| 1493 | struct hl_ctx *ctx = hpriv->ctx; |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1494 | u64 block_handle, device_addr = 0; |
Oded Gabbay | 6df50d2 | 2021-02-05 16:04:34 +0200 | [diff] [blame] | 1495 | u32 handle = 0, block_size; |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1496 | int rc; |
| 1497 | |
| 1498 | switch (args->in.op) { |
| 1499 | case HL_MEM_OP_ALLOC: |
| 1500 | if (args->in.alloc.mem_size == 0) { |
| 1501 | dev_err(hdev->dev, |
| 1502 | "alloc size must be larger than 0\n"); |
| 1503 | rc = -EINVAL; |
| 1504 | goto out; |
| 1505 | } |
| 1506 | |
| 1507 | /* Force contiguous as there are no real MMU |
| 1508 | * translations to overcome physical memory gaps |
| 1509 | */ |
| 1510 | args->in.flags |= HL_MEM_CONTIGUOUS; |
| 1511 | rc = alloc_device_memory(ctx, &args->in, &handle); |
| 1512 | |
| 1513 | memset(args, 0, sizeof(*args)); |
| 1514 | args->out.handle = (__u64) handle; |
| 1515 | break; |
| 1516 | |
| 1517 | case HL_MEM_OP_FREE: |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 1518 | rc = free_device_memory(ctx, &args->in); |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1519 | break; |
| 1520 | |
| 1521 | case HL_MEM_OP_MAP: |
| 1522 | if (args->in.flags & HL_MEM_USERPTR) { |
| 1523 | device_addr = args->in.map_host.host_virt_addr; |
| 1524 | rc = 0; |
| 1525 | } else { |
| 1526 | rc = get_paddr_from_handle(ctx, &args->in, |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1527 | &device_addr); |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1528 | } |
| 1529 | |
| 1530 | memset(args, 0, sizeof(*args)); |
| 1531 | args->out.device_virt_addr = device_addr; |
| 1532 | break; |
| 1533 | |
| 1534 | case HL_MEM_OP_UNMAP: |
| 1535 | rc = 0; |
| 1536 | break; |
| 1537 | |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1538 | case HL_MEM_OP_MAP_BLOCK: |
| 1539 | rc = map_block(hdev, args->in.map_block.block_addr, |
Oded Gabbay | 6df50d2 | 2021-02-05 16:04:34 +0200 | [diff] [blame] | 1540 | &block_handle, &block_size); |
| 1541 | args->out.block_handle = block_handle; |
| 1542 | args->out.block_size = block_size; |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1543 | break; |
| 1544 | |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1545 | default: |
| 1546 | dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); |
| 1547 | rc = -ENOTTY; |
| 1548 | break; |
| 1549 | } |
| 1550 | |
| 1551 | out: |
| 1552 | return rc; |
| 1553 | } |
| 1554 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1555 | int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) |
| 1556 | { |
Ofir Bitton | 66a7640 | 2020-10-05 14:40:10 +0300 | [diff] [blame] | 1557 | enum hl_device_status status; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1558 | union hl_mem_args *args = data; |
| 1559 | struct hl_device *hdev = hpriv->hdev; |
| 1560 | struct hl_ctx *ctx = hpriv->ctx; |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1561 | u64 block_handle, device_addr = 0; |
Oded Gabbay | 6df50d2 | 2021-02-05 16:04:34 +0200 | [diff] [blame] | 1562 | u32 handle = 0, block_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1563 | int rc; |
| 1564 | |
Ofir Bitton | 66a7640 | 2020-10-05 14:40:10 +0300 | [diff] [blame] | 1565 | if (!hl_device_operational(hdev, &status)) { |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1566 | dev_warn_ratelimited(hdev->dev, |
Oded Gabbay | 3f5398c | 2019-04-06 15:41:35 +0300 | [diff] [blame] | 1567 | "Device is %s. Can't execute MEMORY IOCTL\n", |
Ofir Bitton | 66a7640 | 2020-10-05 14:40:10 +0300 | [diff] [blame] | 1568 | hdev->status[status]); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1569 | return -EBUSY; |
| 1570 | } |
| 1571 | |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1572 | if (!hdev->mmu_enable) |
| 1573 | return mem_ioctl_no_mmu(hpriv, args); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1574 | |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1575 | switch (args->in.op) { |
| 1576 | case HL_MEM_OP_ALLOC: |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1577 | if (args->in.alloc.mem_size == 0) { |
| 1578 | dev_err(hdev->dev, |
| 1579 | "alloc size must be larger than 0\n"); |
| 1580 | rc = -EINVAL; |
| 1581 | goto out; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1582 | } |
Oded Gabbay | 3e62299 | 2020-10-18 15:32:23 +0300 | [diff] [blame] | 1583 | |
| 1584 | /* If DRAM does not support virtual memory the driver won't |
| 1585 | * handle the allocation/freeing of that memory. However, for |
| 1586 | * system administration/monitoring purposes, the driver will |
| 1587 | * keep track of the amount of DRAM memory that is allocated |
| 1588 | * and freed by the user. Because this code totally relies on |
| 1589 | * the user's input, the driver can't ensure the validity |
| 1590 | * of this accounting. |
| 1591 | */ |
Oded Gabbay | 7f070c9 | 2020-11-09 09:48:31 +0200 | [diff] [blame] | 1592 | if (!hdev->asic_prop.dram_supports_virtual_memory) { |
Oded Gabbay | 3e62299 | 2020-10-18 15:32:23 +0300 | [diff] [blame] | 1593 | atomic64_add(args->in.alloc.mem_size, |
| 1594 | &ctx->dram_phys_mem); |
| 1595 | atomic64_add(args->in.alloc.mem_size, |
| 1596 | &hdev->dram_used_mem); |
| 1597 | |
| 1598 | dev_dbg(hdev->dev, "DRAM alloc is not supported\n"); |
| 1599 | rc = 0; |
| 1600 | |
| 1601 | memset(args, 0, sizeof(*args)); |
| 1602 | args->out.handle = 0; |
| 1603 | goto out; |
| 1604 | } |
| 1605 | |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1606 | rc = alloc_device_memory(ctx, &args->in, &handle); |
| 1607 | |
| 1608 | memset(args, 0, sizeof(*args)); |
| 1609 | args->out.handle = (__u64) handle; |
| 1610 | break; |
| 1611 | |
| 1612 | case HL_MEM_OP_FREE: |
Oded Gabbay | 3e62299 | 2020-10-18 15:32:23 +0300 | [diff] [blame] | 1613 | /* If DRAM does not support virtual memory the driver won't |
| 1614 | * handle the allocation/freeing of that memory. However, for |
| 1615 | * system administration/monitoring purposes, the driver will |
| 1616 | * keep track of the amount of DRAM memory that is allocated |
| 1617 | * and freed by the user. Because this code totally relies on |
| 1618 | * the user's input, the driver can't ensure the validity |
| 1619 | * of this accounting. |
| 1620 | */ |
Oded Gabbay | 7f070c9 | 2020-11-09 09:48:31 +0200 | [diff] [blame] | 1621 | if (!hdev->asic_prop.dram_supports_virtual_memory) { |
Oded Gabbay | 3e62299 | 2020-10-18 15:32:23 +0300 | [diff] [blame] | 1622 | atomic64_sub(args->in.alloc.mem_size, |
| 1623 | &ctx->dram_phys_mem); |
| 1624 | atomic64_sub(args->in.alloc.mem_size, |
| 1625 | &hdev->dram_used_mem); |
| 1626 | |
| 1627 | dev_dbg(hdev->dev, "DRAM alloc is not supported\n"); |
| 1628 | rc = 0; |
| 1629 | |
| 1630 | goto out; |
| 1631 | } |
| 1632 | |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 1633 | rc = free_device_memory(ctx, &args->in); |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1634 | break; |
| 1635 | |
| 1636 | case HL_MEM_OP_MAP: |
| 1637 | rc = map_device_va(ctx, &args->in, &device_addr); |
| 1638 | |
| 1639 | memset(args, 0, sizeof(*args)); |
| 1640 | args->out.device_virt_addr = device_addr; |
| 1641 | break; |
| 1642 | |
| 1643 | case HL_MEM_OP_UNMAP: |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 1644 | rc = unmap_device_va(ctx, &args->in, false); |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1645 | break; |
| 1646 | |
Ofir Bitton | d00697f | 2021-01-05 12:55:06 +0200 | [diff] [blame] | 1647 | case HL_MEM_OP_MAP_BLOCK: |
| 1648 | rc = map_block(hdev, args->in.map_block.block_addr, |
Oded Gabbay | 6df50d2 | 2021-02-05 16:04:34 +0200 | [diff] [blame] | 1649 | &block_handle, &block_size); |
| 1650 | args->out.block_handle = block_handle; |
| 1651 | args->out.block_size = block_size; |
Oded Gabbay | 54303a1 | 2019-04-04 14:42:26 +0300 | [diff] [blame] | 1652 | break; |
| 1653 | |
| 1654 | default: |
| 1655 | dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); |
| 1656 | rc = -ENOTTY; |
| 1657 | break; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1658 | } |
| 1659 | |
| 1660 | out: |
| 1661 | return rc; |
| 1662 | } |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1663 | |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1664 | static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size, |
| 1665 | u32 npages, u64 start, u32 offset, |
| 1666 | struct hl_userptr *userptr) |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1667 | { |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1668 | int rc; |
| 1669 | |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1670 | if (!access_ok((void __user *) (uintptr_t) addr, size)) { |
Oded Gabbay | 230afe7 | 2019-02-27 00:19:18 +0200 | [diff] [blame] | 1671 | dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1672 | return -EFAULT; |
| 1673 | } |
| 1674 | |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1675 | userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages), |
| 1676 | GFP_KERNEL); |
| 1677 | if (!userptr->pages) |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1678 | return -ENOMEM; |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1679 | |
Daniel Vetter | d88a0c1 | 2020-11-27 17:41:18 +0100 | [diff] [blame] | 1680 | rc = pin_user_pages_fast(start, npages, |
| 1681 | FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM, |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1682 | userptr->pages); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1683 | |
| 1684 | if (rc != npages) { |
| 1685 | dev_err(hdev->dev, |
Tomer Tayar | f5d6e39 | 2021-06-10 20:48:39 +0300 | [diff] [blame] | 1686 | "Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n", |
| 1687 | rc, addr, size, npages); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1688 | if (rc < 0) |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1689 | goto destroy_pages; |
| 1690 | npages = rc; |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1691 | rc = -EFAULT; |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1692 | goto put_pages; |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1693 | } |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1694 | userptr->npages = npages; |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1695 | |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1696 | rc = sg_alloc_table_from_pages(userptr->sgt, |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1697 | userptr->pages, |
Ofir Bitton | d5eb837 | 2021-02-14 15:35:56 +0200 | [diff] [blame] | 1698 | npages, offset, size, GFP_KERNEL); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1699 | if (rc < 0) { |
| 1700 | dev_err(hdev->dev, "failed to create SG table from pages\n"); |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1701 | goto put_pages; |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1702 | } |
| 1703 | |
| 1704 | return 0; |
| 1705 | |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1706 | put_pages: |
| 1707 | unpin_user_pages(userptr->pages, npages); |
| 1708 | destroy_pages: |
| 1709 | kvfree(userptr->pages); |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1710 | return rc; |
| 1711 | } |
| 1712 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1713 | /** |
| 1714 | * hl_pin_host_memory() - pins a chunk of host memory. |
| 1715 | * @hdev: pointer to the habanalabs device structure. |
| 1716 | * @addr: the host virtual address of the memory area. |
| 1717 | * @size: the size of the memory area. |
| 1718 | * @userptr: pointer to hl_userptr structure. |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1719 | * |
| 1720 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1721 | * - Pins the physical pages. |
| 1722 | * - Create an SG list from those pages. |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1723 | */ |
| 1724 | int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, |
| 1725 | struct hl_userptr *userptr) |
| 1726 | { |
| 1727 | u64 start, end; |
| 1728 | u32 npages, offset; |
| 1729 | int rc; |
| 1730 | |
| 1731 | if (!size) { |
| 1732 | dev_err(hdev->dev, "size to pin is invalid - %llu\n", size); |
| 1733 | return -EINVAL; |
| 1734 | } |
| 1735 | |
| 1736 | /* |
| 1737 | * If the combination of the address and size requested for this memory |
| 1738 | * region causes an integer overflow, return error. |
| 1739 | */ |
| 1740 | if (((addr + size) < addr) || |
| 1741 | PAGE_ALIGN(addr + size) < (addr + size)) { |
| 1742 | dev_err(hdev->dev, |
| 1743 | "user pointer 0x%llx + %llu causes integer overflow\n", |
| 1744 | addr, size); |
| 1745 | return -EINVAL; |
| 1746 | } |
| 1747 | |
Yuri Nudelman | 714fccb | 2021-07-27 17:39:42 +0300 | [diff] [blame] | 1748 | userptr->pid = current->pid; |
Ofir Bitton | d5eb837 | 2021-02-14 15:35:56 +0200 | [diff] [blame] | 1749 | userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL); |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1750 | if (!userptr->sgt) |
| 1751 | return -ENOMEM; |
| 1752 | |
| 1753 | start = addr & PAGE_MASK; |
| 1754 | offset = addr & ~PAGE_MASK; |
| 1755 | end = PAGE_ALIGN(addr + size); |
| 1756 | npages = (end - start) >> PAGE_SHIFT; |
| 1757 | |
| 1758 | userptr->size = size; |
| 1759 | userptr->addr = addr; |
| 1760 | userptr->dma_mapped = false; |
| 1761 | INIT_LIST_HEAD(&userptr->job_node); |
| 1762 | |
| 1763 | rc = get_user_memory(hdev, addr, size, npages, start, offset, |
| 1764 | userptr); |
| 1765 | if (rc) { |
| 1766 | dev_err(hdev->dev, |
| 1767 | "failed to get user memory for address 0x%llx\n", |
| 1768 | addr); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1769 | goto free_sgt; |
| 1770 | } |
| 1771 | |
Oded Gabbay | c216477 | 2019-02-16 00:39:24 +0200 | [diff] [blame] | 1772 | hl_debugfs_add_userptr(hdev, userptr); |
| 1773 | |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1774 | return 0; |
| 1775 | |
| 1776 | free_sgt: |
| 1777 | kfree(userptr->sgt); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1778 | return rc; |
| 1779 | } |
| 1780 | |
| 1781 | /* |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1782 | * hl_unpin_host_memory - unpins a chunk of host memory. |
| 1783 | * @hdev: pointer to the habanalabs device structure |
| 1784 | * @userptr: pointer to hl_userptr structure |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1785 | * |
| 1786 | * This function does the following: |
| 1787 | * - Unpins the physical pages related to the host memory |
| 1788 | * - Free the SG list |
| 1789 | */ |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1790 | void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1791 | { |
Oded Gabbay | c216477 | 2019-02-16 00:39:24 +0200 | [diff] [blame] | 1792 | hl_debugfs_remove_userptr(hdev, userptr); |
| 1793 | |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1794 | if (userptr->dma_mapped) |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 1795 | hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl, |
| 1796 | userptr->sgt->nents, |
| 1797 | userptr->dir); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1798 | |
Daniel Vetter | d4cb192 | 2020-11-27 17:41:17 +0100 | [diff] [blame] | 1799 | unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true); |
| 1800 | kvfree(userptr->pages); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1801 | |
| 1802 | list_del(&userptr->job_node); |
| 1803 | |
| 1804 | sg_free_table(userptr->sgt); |
| 1805 | kfree(userptr->sgt); |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1806 | } |
| 1807 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1808 | /** |
| 1809 | * hl_userptr_delete_list() - clear userptr list. |
| 1810 | * @hdev: pointer to the habanalabs device structure. |
| 1811 | * @userptr_list: pointer to the list to clear. |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1812 | * |
| 1813 | * This function does the following: |
| 1814 | * - Iterates over the list and unpins the host memory and frees the userptr |
| 1815 | * structure. |
| 1816 | */ |
| 1817 | void hl_userptr_delete_list(struct hl_device *hdev, |
| 1818 | struct list_head *userptr_list) |
| 1819 | { |
| 1820 | struct hl_userptr *userptr, *tmp; |
| 1821 | |
| 1822 | list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) { |
| 1823 | hl_unpin_host_memory(hdev, userptr); |
| 1824 | kfree(userptr); |
| 1825 | } |
| 1826 | |
| 1827 | INIT_LIST_HEAD(userptr_list); |
| 1828 | } |
| 1829 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1830 | /** |
| 1831 | * hl_userptr_is_pinned() - returns whether the given userptr is pinned. |
| 1832 | * @hdev: pointer to the habanalabs device structure. |
| 1833 | * @userptr_list: pointer to the list to clear. |
| 1834 | * @userptr: pointer to userptr to check. |
Oded Gabbay | eff6f4a | 2019-02-16 00:39:21 +0200 | [diff] [blame] | 1835 | * |
| 1836 | * This function does the following: |
| 1837 | * - Iterates over the list and checks if the given userptr is in it, means is |
| 1838 | * pinned. If so, returns true, otherwise returns false. |
| 1839 | */ |
| 1840 | bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, |
| 1841 | u32 size, struct list_head *userptr_list, |
| 1842 | struct hl_userptr **userptr) |
| 1843 | { |
| 1844 | list_for_each_entry((*userptr), userptr_list, job_node) { |
| 1845 | if ((addr == (*userptr)->addr) && (size == (*userptr)->size)) |
| 1846 | return true; |
| 1847 | } |
| 1848 | |
| 1849 | return false; |
| 1850 | } |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1851 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1852 | /** |
| 1853 | * va_range_init() - initialize virtual addresses range. |
| 1854 | * @hdev: pointer to the habanalabs device structure. |
| 1855 | * @va_range: pointer to the range to initialize. |
| 1856 | * @start: range start address. |
| 1857 | * @end: range end address. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1858 | * |
| 1859 | * This function does the following: |
| 1860 | * - Initializes the virtual addresses list of the given range with the given |
| 1861 | * addresses. |
| 1862 | */ |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1863 | static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1864 | u64 start, u64 end, u32 page_size) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1865 | { |
| 1866 | int rc; |
| 1867 | |
| 1868 | INIT_LIST_HEAD(&va_range->list); |
| 1869 | |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 1870 | /* |
| 1871 | * PAGE_SIZE alignment |
| 1872 | * it is the callers responsibility to align the addresses if the |
| 1873 | * page size is not a power of 2 |
| 1874 | */ |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1875 | |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 1876 | if (is_power_of_2(page_size)) { |
| 1877 | if (start & (PAGE_SIZE - 1)) { |
| 1878 | start &= PAGE_MASK; |
| 1879 | start += PAGE_SIZE; |
| 1880 | } |
| 1881 | |
| 1882 | if (end & (PAGE_SIZE - 1)) |
| 1883 | end &= PAGE_MASK; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1884 | } |
| 1885 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1886 | if (start >= end) { |
| 1887 | dev_err(hdev->dev, "too small vm range for va list\n"); |
| 1888 | return -EFAULT; |
| 1889 | } |
| 1890 | |
| 1891 | rc = add_va_block(hdev, va_range, start, end); |
| 1892 | |
| 1893 | if (rc) { |
| 1894 | dev_err(hdev->dev, "Failed to init host va list\n"); |
| 1895 | return rc; |
| 1896 | } |
| 1897 | |
| 1898 | va_range->start_addr = start; |
| 1899 | va_range->end_addr = end; |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1900 | va_range->page_size = page_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1901 | |
| 1902 | return 0; |
| 1903 | } |
| 1904 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1905 | /** |
| 1906 | * va_range_fini() - clear a virtual addresses range. |
| 1907 | * @hdev: pointer to the habanalabs structure. |
| 1908 | * va_range: pointer to virtual addresses rang.e |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1909 | * |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1910 | * This function does the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1911 | * - Frees the virtual addresses block list and its lock. |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1912 | */ |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1913 | static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range) |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1914 | { |
| 1915 | mutex_lock(&va_range->lock); |
| 1916 | clear_va_list_locked(hdev, &va_range->list); |
| 1917 | mutex_unlock(&va_range->lock); |
| 1918 | |
| 1919 | mutex_destroy(&va_range->lock); |
| 1920 | kfree(va_range); |
| 1921 | } |
| 1922 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1923 | /** |
| 1924 | * vm_ctx_init_with_ranges() - initialize virtual memory for context. |
| 1925 | * @ctx: pointer to the habanalabs context structure. |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1926 | * @host_range_start: host virtual addresses range start. |
| 1927 | * @host_range_end: host virtual addresses range end. |
| 1928 | * @host_huge_range_start: host virtual addresses range start for memory |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1929 | * allocated with huge pages. |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1930 | * @host_huge_range_end: host virtual addresses range end for memory allocated |
| 1931 | * with huge pages. |
| 1932 | * @dram_range_start: dram virtual addresses range start. |
| 1933 | * @dram_range_end: dram virtual addresses range end. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1934 | * |
| 1935 | * This function initializes the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 1936 | * - MMU for context. |
| 1937 | * - Virtual address to area descriptor hashtable. |
| 1938 | * - Virtual block list of available virtual memory. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1939 | */ |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1940 | static int vm_ctx_init_with_ranges(struct hl_ctx *ctx, |
| 1941 | u64 host_range_start, |
| 1942 | u64 host_range_end, |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1943 | u32 host_page_size, |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1944 | u64 host_huge_range_start, |
| 1945 | u64 host_huge_range_end, |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1946 | u32 host_huge_page_size, |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1947 | u64 dram_range_start, |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1948 | u64 dram_range_end, |
| 1949 | u32 dram_page_size) |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1950 | { |
| 1951 | struct hl_device *hdev = ctx->hdev; |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1952 | int i, rc; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1953 | |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1954 | for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) { |
| 1955 | ctx->va_range[i] = |
| 1956 | kzalloc(sizeof(struct hl_va_range), GFP_KERNEL); |
| 1957 | if (!ctx->va_range[i]) { |
| 1958 | rc = -ENOMEM; |
| 1959 | goto free_va_range; |
| 1960 | } |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1961 | } |
| 1962 | |
Omer Shpigelman | 27ca384c | 2019-02-28 10:46:11 +0200 | [diff] [blame] | 1963 | rc = hl_mmu_ctx_init(ctx); |
| 1964 | if (rc) { |
| 1965 | dev_err(hdev->dev, "failed to init context %d\n", ctx->asid); |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1966 | goto free_va_range; |
Omer Shpigelman | 27ca384c | 2019-02-28 10:46:11 +0200 | [diff] [blame] | 1967 | } |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1968 | |
| 1969 | mutex_init(&ctx->mem_hash_lock); |
| 1970 | hash_init(ctx->mem_hash); |
| 1971 | |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1972 | mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1973 | |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1974 | rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST], |
| 1975 | host_range_start, host_range_end, host_page_size); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1976 | if (rc) { |
| 1977 | dev_err(hdev->dev, "failed to init host vm range\n"); |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1978 | goto mmu_ctx_fini; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1979 | } |
| 1980 | |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1981 | if (hdev->pmmu_huge_range) { |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1982 | mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 1983 | |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1984 | rc = va_range_init(hdev, |
| 1985 | ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE], |
| 1986 | host_huge_range_start, host_huge_range_end, |
| 1987 | host_huge_page_size); |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1988 | if (rc) { |
| 1989 | dev_err(hdev->dev, |
| 1990 | "failed to init host huge vm range\n"); |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1991 | goto clear_host_va_range; |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1992 | } |
| 1993 | } else { |
Ofir Bitton | 8e718f2 | 2020-11-26 13:01:11 +0200 | [diff] [blame] | 1994 | kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]); |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1995 | ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] = |
| 1996 | ctx->va_range[HL_VA_RANGE_TYPE_HOST]; |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 1997 | } |
| 1998 | |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 1999 | mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock); |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 2000 | |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2001 | rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM], |
| 2002 | dram_range_start, dram_range_end, dram_page_size); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2003 | if (rc) { |
| 2004 | dev_err(hdev->dev, "failed to init dram vm range\n"); |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2005 | goto clear_host_huge_va_range; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2006 | } |
| 2007 | |
Oded Gabbay | c216477 | 2019-02-16 00:39:24 +0200 | [diff] [blame] | 2008 | hl_debugfs_add_ctx_mem_hash(hdev, ctx); |
| 2009 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2010 | return 0; |
| 2011 | |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2012 | clear_host_huge_va_range: |
| 2013 | mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2014 | |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 2015 | if (hdev->pmmu_huge_range) { |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2016 | mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); |
| 2017 | clear_va_list_locked(hdev, |
| 2018 | &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list); |
| 2019 | mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 2020 | } |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2021 | clear_host_va_range: |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 2022 | if (hdev->pmmu_huge_range) |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2023 | mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); |
| 2024 | mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); |
| 2025 | clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list); |
| 2026 | mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); |
| 2027 | mmu_ctx_fini: |
| 2028 | mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2029 | mutex_destroy(&ctx->mem_hash_lock); |
| 2030 | hl_mmu_ctx_fini(ctx); |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2031 | free_va_range: |
| 2032 | for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) |
| 2033 | kfree(ctx->va_range[i]); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2034 | |
| 2035 | return rc; |
| 2036 | } |
| 2037 | |
| 2038 | int hl_vm_ctx_init(struct hl_ctx *ctx) |
| 2039 | { |
| 2040 | struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 2041 | u64 host_range_start, host_range_end, host_huge_range_start, |
| 2042 | host_huge_range_end, dram_range_start, dram_range_end; |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2043 | u32 host_page_size, host_huge_page_size, dram_page_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2044 | |
| 2045 | atomic64_set(&ctx->dram_phys_mem, 0); |
| 2046 | |
| 2047 | /* |
| 2048 | * - If MMU is enabled, init the ranges as usual. |
| 2049 | * - If MMU is disabled, in case of host mapping, the returned address |
| 2050 | * is the given one. |
| 2051 | * In case of DRAM mapping, the returned address is the physical |
| 2052 | * address of the memory related to the given handle. |
| 2053 | */ |
Oded Gabbay | f3a965c | 2020-10-04 23:00:39 +0300 | [diff] [blame] | 2054 | if (!ctx->hdev->mmu_enable) |
| 2055 | return 0; |
| 2056 | |
| 2057 | dram_range_start = prop->dmmu.start_addr; |
| 2058 | dram_range_end = prop->dmmu.end_addr; |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 2059 | dram_page_size = prop->dram_page_size ? |
| 2060 | prop->dram_page_size : prop->dmmu.page_size; |
Oded Gabbay | f3a965c | 2020-10-04 23:00:39 +0300 | [diff] [blame] | 2061 | host_range_start = prop->pmmu.start_addr; |
| 2062 | host_range_end = prop->pmmu.end_addr; |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2063 | host_page_size = prop->pmmu.page_size; |
Oded Gabbay | f3a965c | 2020-10-04 23:00:39 +0300 | [diff] [blame] | 2064 | host_huge_range_start = prop->pmmu_huge.start_addr; |
| 2065 | host_huge_range_end = prop->pmmu_huge.end_addr; |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2066 | host_huge_page_size = prop->pmmu_huge.page_size; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2067 | |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 2068 | return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2069 | host_page_size, host_huge_range_start, |
| 2070 | host_huge_range_end, host_huge_page_size, |
| 2071 | dram_range_start, dram_range_end, dram_page_size); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2072 | } |
| 2073 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 2074 | /** |
| 2075 | * hl_vm_ctx_fini() - virtual memory teardown of context. |
| 2076 | * @ctx: pointer to the habanalabs context structure. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2077 | * |
| 2078 | * This function perform teardown the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 2079 | * - Virtual block list of available virtual memory. |
| 2080 | * - Virtual address to area descriptor hashtable. |
| 2081 | * - MMU for context. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2082 | * |
| 2083 | * In addition this function does the following: |
| 2084 | * - Unmaps the existing hashtable nodes if the hashtable is not empty. The |
| 2085 | * hashtable should be empty as no valid mappings should exist at this |
| 2086 | * point. |
| 2087 | * - Frees any existing physical page list from the idr which relates to the |
| 2088 | * current context asid. |
| 2089 | * - This function checks the virtual block list for correctness. At this point |
| 2090 | * the list should contain one element which describes the whole virtual |
| 2091 | * memory range of the context. Otherwise, a warning is printed. |
| 2092 | */ |
| 2093 | void hl_vm_ctx_fini(struct hl_ctx *ctx) |
| 2094 | { |
| 2095 | struct hl_device *hdev = ctx->hdev; |
| 2096 | struct hl_vm *vm = &hdev->vm; |
| 2097 | struct hl_vm_phys_pg_pack *phys_pg_list; |
| 2098 | struct hl_vm_hash_node *hnode; |
| 2099 | struct hlist_node *tmp_node; |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 2100 | struct hl_mem_in args; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2101 | int i; |
| 2102 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 2103 | if (!hdev->mmu_enable) |
Oded Gabbay | f3a965c | 2020-10-04 23:00:39 +0300 | [diff] [blame] | 2104 | return; |
| 2105 | |
Oded Gabbay | c216477 | 2019-02-16 00:39:24 +0200 | [diff] [blame] | 2106 | hl_debugfs_remove_ctx_mem_hash(hdev, ctx); |
| 2107 | |
Omer Shpigelman | e604f55 | 2019-11-14 18:23:59 +0000 | [diff] [blame] | 2108 | /* |
| 2109 | * Clearly something went wrong on hard reset so no point in printing |
| 2110 | * another side effect error |
| 2111 | */ |
| 2112 | if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash)) |
Oded Gabbay | a6cd255 | 2021-07-13 08:11:54 +0300 | [diff] [blame] | 2113 | dev_dbg(hdev->dev, |
Oded Gabbay | 0eab4f8 | 2020-06-22 09:52:22 +0300 | [diff] [blame] | 2114 | "user released device without removing its memory mappings\n"); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2115 | |
| 2116 | hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) { |
| 2117 | dev_dbg(hdev->dev, |
| 2118 | "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n", |
| 2119 | hnode->vaddr, ctx->asid); |
Omer Shpigelman | f19040c | 2020-12-09 13:34:11 +0200 | [diff] [blame] | 2120 | args.unmap.device_virt_addr = hnode->vaddr; |
| 2121 | unmap_device_va(ctx, &args, true); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2122 | } |
| 2123 | |
Ohad Sharabi | cb6ef0e | 2020-11-26 09:39:26 +0200 | [diff] [blame] | 2124 | mutex_lock(&ctx->mmu_lock); |
| 2125 | |
Omer Shpigelman | bea84c4 | 2019-11-14 18:23:58 +0000 | [diff] [blame] | 2126 | /* invalidate the cache once after the unmapping loop */ |
| 2127 | hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); |
| 2128 | hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK); |
| 2129 | |
Ohad Sharabi | cb6ef0e | 2020-11-26 09:39:26 +0200 | [diff] [blame] | 2130 | mutex_unlock(&ctx->mmu_lock); |
| 2131 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2132 | spin_lock(&vm->idr_lock); |
| 2133 | idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i) |
| 2134 | if (phys_pg_list->asid == ctx->asid) { |
| 2135 | dev_dbg(hdev->dev, |
Omer Shpigelman | 7f74d4d | 2019-08-12 11:48:46 +0300 | [diff] [blame] | 2136 | "page list 0x%px of asid %d is still alive\n", |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2137 | phys_pg_list, ctx->asid); |
Tomer Tayar | c811375 | 2019-08-04 07:03:41 +0000 | [diff] [blame] | 2138 | atomic64_sub(phys_pg_list->total_size, |
| 2139 | &hdev->dram_used_mem); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2140 | free_phys_pg_pack(hdev, phys_pg_list); |
| 2141 | idr_remove(&vm->phys_pg_pack_handles, i); |
| 2142 | } |
| 2143 | spin_unlock(&vm->idr_lock); |
| 2144 | |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2145 | va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]); |
Ofir Bitton | 8e718f2 | 2020-11-26 13:01:11 +0200 | [diff] [blame] | 2146 | va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]); |
| 2147 | |
Omer Shpigelman | 64a7e29 | 2020-01-05 09:05:45 +0000 | [diff] [blame] | 2148 | if (hdev->pmmu_huge_range) |
Ofir Bitton | 784b916 | 2020-10-22 11:05:55 +0300 | [diff] [blame] | 2149 | va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2150 | |
| 2151 | mutex_destroy(&ctx->mem_hash_lock); |
| 2152 | hl_mmu_ctx_fini(ctx); |
Oded Gabbay | 3e62299 | 2020-10-18 15:32:23 +0300 | [diff] [blame] | 2153 | |
| 2154 | /* In this case we need to clear the global accounting of DRAM usage |
| 2155 | * because the user notifies us on allocations. If the user is no more, |
| 2156 | * all DRAM is available |
| 2157 | */ |
Ofir Bitton | 8e39e75 | 2020-11-12 11:03:32 +0200 | [diff] [blame] | 2158 | if (ctx->asid != HL_KERNEL_ASID_ID && |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 2159 | !hdev->asic_prop.dram_supports_virtual_memory) |
| 2160 | atomic64_set(&hdev->dram_used_mem, 0); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2161 | } |
| 2162 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 2163 | /** |
| 2164 | * hl_vm_init() - initialize virtual memory module. |
| 2165 | * @hdev: pointer to the habanalabs device structure. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2166 | * |
| 2167 | * This function initializes the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 2168 | * - MMU module. |
| 2169 | * - DRAM physical pages pool of 2MB. |
| 2170 | * - Idr for device memory allocation handles. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2171 | */ |
| 2172 | int hl_vm_init(struct hl_device *hdev) |
| 2173 | { |
| 2174 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 2175 | struct hl_vm *vm = &hdev->vm; |
| 2176 | int rc; |
| 2177 | |
Moti Haimovski | b19dc67 | 2020-11-18 20:15:29 +0200 | [diff] [blame] | 2178 | if (is_power_of_2(prop->dram_page_size)) |
| 2179 | vm->dram_pg_pool = |
| 2180 | gen_pool_create(__ffs(prop->dram_page_size), -1); |
| 2181 | else |
| 2182 | vm->dram_pg_pool = |
| 2183 | gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1); |
| 2184 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2185 | if (!vm->dram_pg_pool) { |
| 2186 | dev_err(hdev->dev, "Failed to create dram page pool\n"); |
Oded Gabbay | 37d68ce | 2019-05-29 14:43:04 +0300 | [diff] [blame] | 2187 | return -ENOMEM; |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2188 | } |
| 2189 | |
| 2190 | kref_init(&vm->dram_pg_pool_refcount); |
| 2191 | |
| 2192 | rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address, |
| 2193 | prop->dram_end_address - prop->dram_user_base_address, |
| 2194 | -1); |
| 2195 | |
| 2196 | if (rc) { |
| 2197 | dev_err(hdev->dev, |
| 2198 | "Failed to add memory to dram page pool %d\n", rc); |
| 2199 | goto pool_add_err; |
| 2200 | } |
| 2201 | |
| 2202 | spin_lock_init(&vm->idr_lock); |
| 2203 | idr_init(&vm->phys_pg_pack_handles); |
| 2204 | |
| 2205 | atomic64_set(&hdev->dram_used_mem, 0); |
| 2206 | |
| 2207 | vm->init_done = true; |
| 2208 | |
| 2209 | return 0; |
| 2210 | |
| 2211 | pool_add_err: |
| 2212 | gen_pool_destroy(vm->dram_pg_pool); |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2213 | |
| 2214 | return rc; |
| 2215 | } |
| 2216 | |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 2217 | /** |
| 2218 | * hl_vm_fini() - virtual memory module teardown. |
| 2219 | * @hdev: pointer to the habanalabs device structure. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2220 | * |
| 2221 | * This function perform teardown to the following: |
Omer Shpigelman | 3b762f5 | 2020-12-09 13:28:46 +0200 | [diff] [blame] | 2222 | * - Idr for device memory allocation handles. |
| 2223 | * - DRAM physical pages pool of 2MB. |
| 2224 | * - MMU module. |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2225 | */ |
| 2226 | void hl_vm_fini(struct hl_device *hdev) |
| 2227 | { |
| 2228 | struct hl_vm *vm = &hdev->vm; |
| 2229 | |
| 2230 | if (!vm->init_done) |
| 2231 | return; |
| 2232 | |
| 2233 | /* |
| 2234 | * At this point all the contexts should be freed and hence no DRAM |
| 2235 | * memory should be in use. Hence the DRAM pool should be freed here. |
| 2236 | */ |
| 2237 | if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1) |
| 2238 | dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n", |
| 2239 | __func__); |
| 2240 | |
Omer Shpigelman | 0feaf86 | 2019-02-16 00:39:22 +0200 | [diff] [blame] | 2241 | vm->init_done = false; |
| 2242 | } |
Sagiv Ozeri | a4371c1 | 2021-02-23 11:01:08 +0200 | [diff] [blame] | 2243 | |
| 2244 | /** |
| 2245 | * hl_hw_block_mem_init() - HW block memory initialization. |
| 2246 | * @ctx: pointer to the habanalabs context structure. |
| 2247 | * |
| 2248 | * This function initializes the HW block virtual mapped addresses list and |
| 2249 | * it's lock. |
| 2250 | */ |
| 2251 | void hl_hw_block_mem_init(struct hl_ctx *ctx) |
| 2252 | { |
| 2253 | mutex_init(&ctx->hw_block_list_lock); |
| 2254 | INIT_LIST_HEAD(&ctx->hw_block_mem_list); |
| 2255 | } |
| 2256 | |
| 2257 | /** |
| 2258 | * hl_hw_block_mem_fini() - HW block memory teardown. |
| 2259 | * @ctx: pointer to the habanalabs context structure. |
| 2260 | * |
| 2261 | * This function clears the HW block virtual mapped addresses list and destroys |
| 2262 | * it's lock. |
| 2263 | */ |
| 2264 | void hl_hw_block_mem_fini(struct hl_ctx *ctx) |
| 2265 | { |
| 2266 | struct hl_vm_hw_block_list_node *lnode, *tmp; |
| 2267 | |
| 2268 | if (!list_empty(&ctx->hw_block_mem_list)) |
| 2269 | dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n"); |
| 2270 | |
| 2271 | list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) { |
| 2272 | list_del(&lnode->node); |
| 2273 | kfree(lnode); |
| 2274 | } |
| 2275 | |
| 2276 | mutex_destroy(&ctx->hw_block_list_lock); |
| 2277 | } |