Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* |
| 4 | * Copyright 2016-2019 HabanaLabs, Ltd. |
| 5 | * All Rights Reserved. |
| 6 | */ |
| 7 | |
| 8 | #include <uapi/misc/habanalabs.h> |
| 9 | #include "habanalabs.h" |
| 10 | |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/slab.h> |
Ofir Bitton | bf6d109 | 2020-07-30 10:00:10 +0300 | [diff] [blame] | 13 | #include <linux/uaccess.h> |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 14 | #include <linux/genalloc.h> |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 15 | |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 16 | static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) |
| 17 | { |
| 18 | struct hl_device *hdev = ctx->hdev; |
| 19 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 20 | struct hl_vm_va_block *va_block, *tmp; |
| 21 | dma_addr_t bus_addr; |
| 22 | u64 virt_addr; |
| 23 | u32 page_size = prop->pmmu.page_size; |
| 24 | s32 offset; |
| 25 | int rc; |
| 26 | |
| 27 | if (!hdev->supports_cb_mapping) { |
| 28 | dev_err_ratelimited(hdev->dev, |
| 29 | "Cannot map CB because no VA range is allocated for CB mapping\n"); |
| 30 | return -EINVAL; |
| 31 | } |
| 32 | |
| 33 | if (!hdev->mmu_enable) { |
| 34 | dev_err_ratelimited(hdev->dev, |
| 35 | "Cannot map CB because MMU is disabled\n"); |
| 36 | return -EINVAL; |
| 37 | } |
| 38 | |
| 39 | INIT_LIST_HEAD(&cb->va_block_list); |
| 40 | |
| 41 | for (bus_addr = cb->bus_address; |
| 42 | bus_addr < cb->bus_address + cb->size; |
| 43 | bus_addr += page_size) { |
| 44 | |
| 45 | virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size); |
| 46 | if (!virt_addr) { |
| 47 | dev_err(hdev->dev, |
| 48 | "Failed to allocate device virtual address for CB\n"); |
| 49 | rc = -ENOMEM; |
| 50 | goto err_va_pool_free; |
| 51 | } |
| 52 | |
| 53 | va_block = kzalloc(sizeof(*va_block), GFP_KERNEL); |
| 54 | if (!va_block) { |
| 55 | rc = -ENOMEM; |
| 56 | gen_pool_free(ctx->cb_va_pool, virt_addr, page_size); |
| 57 | goto err_va_pool_free; |
| 58 | } |
| 59 | |
| 60 | va_block->start = virt_addr; |
| 61 | va_block->end = virt_addr + page_size; |
| 62 | va_block->size = page_size; |
| 63 | list_add_tail(&va_block->node, &cb->va_block_list); |
| 64 | } |
| 65 | |
| 66 | mutex_lock(&ctx->mmu_lock); |
| 67 | |
| 68 | bus_addr = cb->bus_address; |
| 69 | offset = 0; |
| 70 | list_for_each_entry(va_block, &cb->va_block_list, node) { |
| 71 | rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size, |
| 72 | list_is_last(&va_block->node, |
| 73 | &cb->va_block_list)); |
| 74 | if (rc) { |
| 75 | dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", |
| 76 | va_block->start); |
| 77 | goto err_va_umap; |
| 78 | } |
| 79 | |
| 80 | bus_addr += va_block->size; |
| 81 | offset += va_block->size; |
| 82 | } |
| 83 | |
| 84 | hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); |
| 85 | |
| 86 | mutex_unlock(&ctx->mmu_lock); |
| 87 | |
| 88 | cb->is_mmu_mapped = true; |
| 89 | |
| 90 | return 0; |
| 91 | |
| 92 | err_va_umap: |
| 93 | list_for_each_entry(va_block, &cb->va_block_list, node) { |
| 94 | if (offset <= 0) |
| 95 | break; |
| 96 | hl_mmu_unmap(ctx, va_block->start, va_block->size, |
| 97 | offset <= va_block->size); |
| 98 | offset -= va_block->size; |
| 99 | } |
| 100 | |
| 101 | hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); |
| 102 | |
| 103 | mutex_unlock(&ctx->mmu_lock); |
| 104 | |
| 105 | err_va_pool_free: |
| 106 | list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { |
| 107 | gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); |
| 108 | list_del(&va_block->node); |
| 109 | kfree(va_block); |
| 110 | } |
| 111 | |
| 112 | return rc; |
| 113 | } |
| 114 | |
| 115 | static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) |
| 116 | { |
| 117 | struct hl_device *hdev = ctx->hdev; |
| 118 | struct hl_vm_va_block *va_block, *tmp; |
| 119 | |
| 120 | mutex_lock(&ctx->mmu_lock); |
| 121 | |
| 122 | list_for_each_entry(va_block, &cb->va_block_list, node) |
| 123 | if (hl_mmu_unmap(ctx, va_block->start, va_block->size, |
| 124 | list_is_last(&va_block->node, |
| 125 | &cb->va_block_list))) |
| 126 | dev_warn_ratelimited(hdev->dev, |
| 127 | "Failed to unmap CB's va 0x%llx\n", |
| 128 | va_block->start); |
| 129 | |
| 130 | hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); |
| 131 | |
| 132 | mutex_unlock(&ctx->mmu_lock); |
| 133 | |
| 134 | list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { |
| 135 | gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); |
| 136 | list_del(&va_block->node); |
| 137 | kfree(va_block); |
| 138 | } |
| 139 | } |
| 140 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 141 | static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) |
| 142 | { |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 143 | if (cb->is_internal) |
| 144 | gen_pool_free(hdev->internal_cb_pool, |
| 145 | cb->kernel_address, cb->size); |
| 146 | else |
| 147 | hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size, |
| 148 | (void *) (uintptr_t) cb->kernel_address, |
| 149 | cb->bus_address); |
| 150 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 151 | kfree(cb); |
| 152 | } |
| 153 | |
| 154 | static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) |
| 155 | { |
| 156 | if (cb->is_pool) { |
| 157 | spin_lock(&hdev->cb_pool_lock); |
| 158 | list_add(&cb->pool_list, &hdev->cb_pool); |
| 159 | spin_unlock(&hdev->cb_pool_lock); |
| 160 | } else { |
| 161 | cb_fini(hdev, cb); |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | static void cb_release(struct kref *ref) |
| 166 | { |
| 167 | struct hl_device *hdev; |
| 168 | struct hl_cb *cb; |
| 169 | |
| 170 | cb = container_of(ref, struct hl_cb, refcount); |
| 171 | hdev = cb->hdev; |
| 172 | |
Oded Gabbay | c216477 | 2019-02-16 00:39:24 +0200 | [diff] [blame] | 173 | hl_debugfs_remove_cb(cb); |
| 174 | |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 175 | if (cb->is_mmu_mapped) |
| 176 | cb_unmap_mem(cb->ctx, cb); |
| 177 | |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 178 | hl_ctx_put(cb->ctx); |
| 179 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 180 | cb_do_release(hdev, cb); |
| 181 | } |
| 182 | |
| 183 | static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 184 | int ctx_id, bool internal_cb) |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 185 | { |
| 186 | struct hl_cb *cb; |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 187 | u32 cb_offset; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 188 | void *p; |
| 189 | |
| 190 | /* |
| 191 | * We use of GFP_ATOMIC here because this function can be called from |
| 192 | * the latency-sensitive code path for command submission. Due to H/W |
| 193 | * limitations in some of the ASICs, the kernel must copy the user CB |
| 194 | * that is designated for an external queue and actually enqueue |
| 195 | * the kernel's copy. Hence, we must never sleep in this code section |
| 196 | * and must use GFP_ATOMIC for all memory allocations. |
| 197 | */ |
| 198 | if (ctx_id == HL_KERNEL_ASID_ID) |
| 199 | cb = kzalloc(sizeof(*cb), GFP_ATOMIC); |
| 200 | else |
| 201 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); |
| 202 | |
| 203 | if (!cb) |
| 204 | return NULL; |
| 205 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 206 | if (internal_cb) { |
| 207 | p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size); |
| 208 | if (!p) { |
| 209 | kfree(cb); |
| 210 | return NULL; |
| 211 | } |
| 212 | |
| 213 | cb_offset = p - hdev->internal_cb_pool_virt_addr; |
| 214 | cb->is_internal = true; |
| 215 | cb->bus_address = hdev->internal_cb_va_base + cb_offset; |
| 216 | } else if (ctx_id == HL_KERNEL_ASID_ID) { |
Oded Gabbay | d9c3aa8 | 2019-05-01 11:47:04 +0300 | [diff] [blame] | 217 | p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size, |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 218 | &cb->bus_address, GFP_ATOMIC); |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 219 | } else { |
Oded Gabbay | d9c3aa8 | 2019-05-01 11:47:04 +0300 | [diff] [blame] | 220 | p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size, |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 221 | &cb->bus_address, |
| 222 | GFP_USER | __GFP_ZERO); |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 223 | } |
| 224 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 225 | if (!p) { |
| 226 | dev_err(hdev->dev, |
| 227 | "failed to allocate %d of dma memory for CB\n", |
| 228 | cb_size); |
| 229 | kfree(cb); |
| 230 | return NULL; |
| 231 | } |
| 232 | |
| 233 | cb->kernel_address = (u64) (uintptr_t) p; |
| 234 | cb->size = cb_size; |
| 235 | |
| 236 | return cb; |
| 237 | } |
| 238 | |
| 239 | int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 240 | struct hl_ctx *ctx, u32 cb_size, bool internal_cb, |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 241 | bool map_cb, u64 *handle) |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 242 | { |
| 243 | struct hl_cb *cb; |
| 244 | bool alloc_new_cb = true; |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 245 | int rc, ctx_id = ctx->asid; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 246 | |
Oded Gabbay | f8c8c7d5 | 2019-02-16 00:39:20 +0200 | [diff] [blame] | 247 | /* |
| 248 | * Can't use generic function to check this because of special case |
| 249 | * where we create a CB as part of the reset process |
| 250 | */ |
| 251 | if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) && |
| 252 | (ctx_id != HL_KERNEL_ASID_ID))) { |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 253 | dev_warn_ratelimited(hdev->dev, |
Oded Gabbay | f8c8c7d5 | 2019-02-16 00:39:20 +0200 | [diff] [blame] | 254 | "Device is disabled or in reset. Can't create new CBs\n"); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 255 | rc = -EBUSY; |
| 256 | goto out_err; |
| 257 | } |
| 258 | |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 259 | if (cb_size > SZ_2M) { |
| 260 | dev_err(hdev->dev, "CB size %d must be less than %d\n", |
| 261 | cb_size, SZ_2M); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 262 | rc = -EINVAL; |
| 263 | goto out_err; |
| 264 | } |
| 265 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 266 | if (!internal_cb) { |
| 267 | /* Minimum allocation must be PAGE SIZE */ |
| 268 | if (cb_size < PAGE_SIZE) |
| 269 | cb_size = PAGE_SIZE; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 270 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 271 | if (ctx_id == HL_KERNEL_ASID_ID && |
| 272 | cb_size <= hdev->asic_prop.cb_pool_cb_size) { |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 273 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 274 | spin_lock(&hdev->cb_pool_lock); |
| 275 | if (!list_empty(&hdev->cb_pool)) { |
| 276 | cb = list_first_entry(&hdev->cb_pool, |
| 277 | typeof(*cb), pool_list); |
| 278 | list_del(&cb->pool_list); |
| 279 | spin_unlock(&hdev->cb_pool_lock); |
| 280 | alloc_new_cb = false; |
| 281 | } else { |
| 282 | spin_unlock(&hdev->cb_pool_lock); |
| 283 | dev_dbg(hdev->dev, "CB pool is empty\n"); |
| 284 | } |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 285 | } |
| 286 | } |
| 287 | |
| 288 | if (alloc_new_cb) { |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 289 | cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 290 | if (!cb) { |
| 291 | rc = -ENOMEM; |
| 292 | goto out_err; |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | cb->hdev = hdev; |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 297 | cb->ctx = ctx; |
| 298 | hl_ctx_get(hdev, cb->ctx); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 299 | |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 300 | if (map_cb) { |
| 301 | if (ctx_id == HL_KERNEL_ASID_ID) { |
| 302 | dev_err(hdev->dev, |
| 303 | "CB mapping is not supported for kernel context\n"); |
| 304 | rc = -EINVAL; |
| 305 | goto release_cb; |
| 306 | } |
| 307 | |
| 308 | rc = cb_map_mem(ctx, cb); |
| 309 | if (rc) |
| 310 | goto release_cb; |
| 311 | } |
| 312 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 313 | spin_lock(&mgr->cb_lock); |
| 314 | rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); |
| 315 | spin_unlock(&mgr->cb_lock); |
| 316 | |
| 317 | if (rc < 0) { |
| 318 | dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n"); |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 319 | goto unmap_mem; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 320 | } |
| 321 | |
Oded Gabbay | f5b9c8c | 2020-08-12 10:11:20 +0300 | [diff] [blame] | 322 | cb->id = (u64) rc; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 323 | |
| 324 | kref_init(&cb->refcount); |
| 325 | spin_lock_init(&cb->lock); |
| 326 | |
| 327 | /* |
| 328 | * idr is 32-bit so we can safely OR it with a mask that is above |
| 329 | * 32 bit |
| 330 | */ |
Oded Gabbay | 3174ac9 | 2020-08-29 11:51:39 +0300 | [diff] [blame] | 331 | *handle = cb->id | HL_MMAP_TYPE_CB; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 332 | *handle <<= PAGE_SHIFT; |
| 333 | |
Oded Gabbay | c216477 | 2019-02-16 00:39:24 +0200 | [diff] [blame] | 334 | hl_debugfs_add_cb(cb); |
| 335 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 336 | return 0; |
| 337 | |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 338 | unmap_mem: |
| 339 | if (cb->is_mmu_mapped) |
| 340 | cb_unmap_mem(cb->ctx, cb); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 341 | release_cb: |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 342 | hl_ctx_put(cb->ctx); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 343 | cb_do_release(hdev, cb); |
| 344 | out_err: |
| 345 | *handle = 0; |
| 346 | |
| 347 | return rc; |
| 348 | } |
| 349 | |
| 350 | int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) |
| 351 | { |
| 352 | struct hl_cb *cb; |
| 353 | u32 handle; |
| 354 | int rc = 0; |
| 355 | |
| 356 | /* |
| 357 | * handle was given to user to do mmap, I need to shift it back to |
| 358 | * how the idr module gave it to me |
| 359 | */ |
| 360 | cb_handle >>= PAGE_SHIFT; |
| 361 | handle = (u32) cb_handle; |
| 362 | |
| 363 | spin_lock(&mgr->cb_lock); |
| 364 | |
| 365 | cb = idr_find(&mgr->cb_handles, handle); |
| 366 | if (cb) { |
| 367 | idr_remove(&mgr->cb_handles, handle); |
| 368 | spin_unlock(&mgr->cb_lock); |
| 369 | kref_put(&cb->refcount, cb_release); |
| 370 | } else { |
| 371 | spin_unlock(&mgr->cb_lock); |
| 372 | dev_err(hdev->dev, |
| 373 | "CB destroy failed, no match to handle 0x%x\n", handle); |
| 374 | rc = -EINVAL; |
| 375 | } |
| 376 | |
| 377 | return rc; |
| 378 | } |
| 379 | |
| 380 | int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) |
| 381 | { |
| 382 | union hl_cb_args *args = data; |
| 383 | struct hl_device *hdev = hpriv->hdev; |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 384 | u64 handle = 0; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 385 | int rc; |
| 386 | |
Oded Gabbay | 3f5398c | 2019-04-06 15:41:35 +0300 | [diff] [blame] | 387 | if (hl_device_disabled_or_in_reset(hdev)) { |
| 388 | dev_warn_ratelimited(hdev->dev, |
| 389 | "Device is %s. Can't execute CB IOCTL\n", |
| 390 | atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); |
| 391 | return -EBUSY; |
| 392 | } |
| 393 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 394 | switch (args->in.op) { |
| 395 | case HL_CB_OP_CREATE: |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 396 | if (args->in.cb_size > HL_MAX_CB_SIZE) { |
| 397 | dev_err(hdev->dev, |
| 398 | "User requested CB size %d must be less than %d\n", |
| 399 | args->in.cb_size, HL_MAX_CB_SIZE); |
| 400 | rc = -EINVAL; |
| 401 | } else { |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 402 | rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx, |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 403 | args->in.cb_size, false, |
| 404 | !!(args->in.flags & HL_CB_FLAGS_MAP), |
| 405 | &handle); |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 406 | } |
| 407 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 408 | memset(args, 0, sizeof(*args)); |
| 409 | args->out.cb_handle = handle; |
| 410 | break; |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 411 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 412 | case HL_CB_OP_DESTROY: |
| 413 | rc = hl_cb_destroy(hdev, &hpriv->cb_mgr, |
| 414 | args->in.cb_handle); |
| 415 | break; |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 416 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 417 | default: |
| 418 | rc = -ENOTTY; |
| 419 | break; |
| 420 | } |
| 421 | |
| 422 | return rc; |
| 423 | } |
| 424 | |
| 425 | static void cb_vm_close(struct vm_area_struct *vma) |
| 426 | { |
| 427 | struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data; |
Oded Gabbay | 9e28c17 | 2019-02-28 10:46:19 +0200 | [diff] [blame] | 428 | long new_mmap_size; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 429 | |
Oded Gabbay | 9e28c17 | 2019-02-28 10:46:19 +0200 | [diff] [blame] | 430 | new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 431 | |
Oded Gabbay | 9e28c17 | 2019-02-28 10:46:19 +0200 | [diff] [blame] | 432 | if (new_mmap_size > 0) { |
| 433 | cb->mmap_size = new_mmap_size; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 434 | return; |
Oded Gabbay | 9e28c17 | 2019-02-28 10:46:19 +0200 | [diff] [blame] | 435 | } |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 436 | |
| 437 | spin_lock(&cb->lock); |
| 438 | cb->mmap = false; |
| 439 | spin_unlock(&cb->lock); |
| 440 | |
| 441 | hl_cb_put(cb); |
| 442 | vma->vm_private_data = NULL; |
| 443 | } |
| 444 | |
| 445 | static const struct vm_operations_struct cb_vm_ops = { |
| 446 | .close = cb_vm_close |
| 447 | }; |
| 448 | |
| 449 | int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) |
| 450 | { |
| 451 | struct hl_device *hdev = hpriv->hdev; |
| 452 | struct hl_cb *cb; |
Ofir Bitton | bf6d109 | 2020-07-30 10:00:10 +0300 | [diff] [blame] | 453 | u32 handle, user_cb_size; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 454 | int rc; |
| 455 | |
Oded Gabbay | c5e0ec6 | 2020-08-29 11:55:15 +0300 | [diff] [blame] | 456 | /* We use the page offset to hold the idr and thus we need to clear |
| 457 | * it before doing the mmap itself |
| 458 | */ |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 459 | handle = vma->vm_pgoff; |
Oded Gabbay | c5e0ec6 | 2020-08-29 11:55:15 +0300 | [diff] [blame] | 460 | vma->vm_pgoff = 0; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 461 | |
| 462 | /* reference was taken here */ |
| 463 | cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); |
| 464 | if (!cb) { |
| 465 | dev_err(hdev->dev, |
Dotan Barak | 0a62c39 | 2020-04-28 08:43:19 +0300 | [diff] [blame] | 466 | "CB mmap failed, no match to handle 0x%x\n", handle); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 467 | return -EINVAL; |
| 468 | } |
| 469 | |
| 470 | /* Validation check */ |
Ofir Bitton | bf6d109 | 2020-07-30 10:00:10 +0300 | [diff] [blame] | 471 | user_cb_size = vma->vm_end - vma->vm_start; |
| 472 | if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) { |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 473 | dev_err(hdev->dev, |
| 474 | "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n", |
| 475 | vma->vm_end - vma->vm_start, cb->size); |
| 476 | rc = -EINVAL; |
| 477 | goto put_cb; |
| 478 | } |
| 479 | |
Ofir Bitton | bf6d109 | 2020-07-30 10:00:10 +0300 | [diff] [blame] | 480 | if (!access_ok((void __user *) (uintptr_t) vma->vm_start, |
| 481 | user_cb_size)) { |
| 482 | dev_err(hdev->dev, |
| 483 | "user pointer is invalid - 0x%lx\n", |
| 484 | vma->vm_start); |
| 485 | |
| 486 | rc = -EINVAL; |
| 487 | goto put_cb; |
| 488 | } |
| 489 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 490 | spin_lock(&cb->lock); |
| 491 | |
| 492 | if (cb->mmap) { |
| 493 | dev_err(hdev->dev, |
| 494 | "CB mmap failed, CB already mmaped to user\n"); |
| 495 | rc = -EINVAL; |
| 496 | goto release_lock; |
| 497 | } |
| 498 | |
| 499 | cb->mmap = true; |
| 500 | |
| 501 | spin_unlock(&cb->lock); |
| 502 | |
| 503 | vma->vm_ops = &cb_vm_ops; |
| 504 | |
| 505 | /* |
| 506 | * Note: We're transferring the cb reference to |
| 507 | * vma->vm_private_data here. |
| 508 | */ |
| 509 | |
| 510 | vma->vm_private_data = cb; |
| 511 | |
Hillf Danton | 0db5753 | 2020-08-23 07:32:42 +0800 | [diff] [blame] | 512 | rc = hdev->asic_funcs->cb_mmap(hdev, vma, (void *) cb->kernel_address, |
| 513 | cb->bus_address, cb->size); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 514 | if (rc) { |
| 515 | spin_lock(&cb->lock); |
| 516 | cb->mmap = false; |
| 517 | goto release_lock; |
| 518 | } |
| 519 | |
| 520 | cb->mmap_size = cb->size; |
| 521 | |
| 522 | return 0; |
| 523 | |
| 524 | release_lock: |
| 525 | spin_unlock(&cb->lock); |
| 526 | put_cb: |
| 527 | hl_cb_put(cb); |
| 528 | return rc; |
| 529 | } |
| 530 | |
| 531 | struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, |
| 532 | u32 handle) |
| 533 | { |
| 534 | struct hl_cb *cb; |
| 535 | |
| 536 | spin_lock(&mgr->cb_lock); |
| 537 | cb = idr_find(&mgr->cb_handles, handle); |
| 538 | |
| 539 | if (!cb) { |
| 540 | spin_unlock(&mgr->cb_lock); |
| 541 | dev_warn(hdev->dev, |
Dotan Barak | 0a62c39 | 2020-04-28 08:43:19 +0300 | [diff] [blame] | 542 | "CB get failed, no match to handle 0x%x\n", handle); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 543 | return NULL; |
| 544 | } |
| 545 | |
| 546 | kref_get(&cb->refcount); |
| 547 | |
| 548 | spin_unlock(&mgr->cb_lock); |
| 549 | |
| 550 | return cb; |
| 551 | |
| 552 | } |
| 553 | |
| 554 | void hl_cb_put(struct hl_cb *cb) |
| 555 | { |
| 556 | kref_put(&cb->refcount, cb_release); |
| 557 | } |
| 558 | |
| 559 | void hl_cb_mgr_init(struct hl_cb_mgr *mgr) |
| 560 | { |
| 561 | spin_lock_init(&mgr->cb_lock); |
| 562 | idr_init(&mgr->cb_handles); |
| 563 | } |
| 564 | |
| 565 | void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr) |
| 566 | { |
| 567 | struct hl_cb *cb; |
| 568 | struct idr *idp; |
| 569 | u32 id; |
| 570 | |
| 571 | idp = &mgr->cb_handles; |
| 572 | |
| 573 | idr_for_each_entry(idp, cb, id) { |
| 574 | if (kref_put(&cb->refcount, cb_release) != 1) |
| 575 | dev_err(hdev->dev, |
| 576 | "CB %d for CTX ID %d is still alive\n", |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 577 | id, cb->ctx->asid); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 578 | } |
| 579 | |
| 580 | idr_destroy(&mgr->cb_handles); |
| 581 | } |
| 582 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 583 | struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, |
| 584 | bool internal_cb) |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 585 | { |
| 586 | u64 cb_handle; |
| 587 | struct hl_cb *cb; |
| 588 | int rc; |
| 589 | |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 590 | rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size, |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 591 | internal_cb, false, &cb_handle); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 592 | if (rc) { |
Oded Gabbay | 4c172bb | 2019-08-30 16:59:33 +0300 | [diff] [blame] | 593 | dev_err(hdev->dev, |
| 594 | "Failed to allocate CB for the kernel driver %d\n", rc); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 595 | return NULL; |
| 596 | } |
| 597 | |
| 598 | cb_handle >>= PAGE_SHIFT; |
| 599 | cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); |
| 600 | /* hl_cb_get should never fail here so use kernel WARN */ |
| 601 | WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle); |
| 602 | if (!cb) |
| 603 | goto destroy_cb; |
| 604 | |
| 605 | return cb; |
| 606 | |
| 607 | destroy_cb: |
| 608 | hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT); |
| 609 | |
| 610 | return NULL; |
| 611 | } |
| 612 | |
| 613 | int hl_cb_pool_init(struct hl_device *hdev) |
| 614 | { |
| 615 | struct hl_cb *cb; |
| 616 | int i; |
| 617 | |
| 618 | INIT_LIST_HEAD(&hdev->cb_pool); |
| 619 | spin_lock_init(&hdev->cb_pool_lock); |
| 620 | |
| 621 | for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) { |
| 622 | cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 623 | HL_KERNEL_ASID_ID, false); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 624 | if (cb) { |
| 625 | cb->is_pool = true; |
| 626 | list_add(&cb->pool_list, &hdev->cb_pool); |
| 627 | } else { |
| 628 | hl_cb_pool_fini(hdev); |
| 629 | return -ENOMEM; |
| 630 | } |
| 631 | } |
| 632 | |
| 633 | return 0; |
| 634 | } |
| 635 | |
| 636 | int hl_cb_pool_fini(struct hl_device *hdev) |
| 637 | { |
| 638 | struct hl_cb *cb, *tmp; |
| 639 | |
| 640 | list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { |
| 641 | list_del(&cb->pool_list); |
| 642 | cb_fini(hdev, cb); |
| 643 | } |
| 644 | |
| 645 | return 0; |
| 646 | } |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame^] | 647 | |
| 648 | int hl_cb_va_pool_init(struct hl_ctx *ctx) |
| 649 | { |
| 650 | struct hl_device *hdev = ctx->hdev; |
| 651 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 652 | int rc; |
| 653 | |
| 654 | if (!hdev->supports_cb_mapping) |
| 655 | return 0; |
| 656 | |
| 657 | ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1); |
| 658 | if (!ctx->cb_va_pool) { |
| 659 | dev_err(hdev->dev, |
| 660 | "Failed to create VA gen pool for CB mapping\n"); |
| 661 | return -ENOMEM; |
| 662 | } |
| 663 | |
| 664 | rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr, |
| 665 | prop->cb_va_end_addr - prop->cb_va_start_addr, -1); |
| 666 | if (rc) { |
| 667 | dev_err(hdev->dev, |
| 668 | "Failed to add memory to VA gen pool for CB mapping\n"); |
| 669 | goto err_pool_destroy; |
| 670 | } |
| 671 | |
| 672 | return 0; |
| 673 | |
| 674 | err_pool_destroy: |
| 675 | gen_pool_destroy(ctx->cb_va_pool); |
| 676 | |
| 677 | return rc; |
| 678 | } |
| 679 | |
| 680 | void hl_cb_va_pool_fini(struct hl_ctx *ctx) |
| 681 | { |
| 682 | struct hl_device *hdev = ctx->hdev; |
| 683 | |
| 684 | if (!hdev->supports_cb_mapping) |
| 685 | return; |
| 686 | |
| 687 | gen_pool_destroy(ctx->cb_va_pool); |
| 688 | } |