Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* |
| 4 | * Copyright 2016-2019 HabanaLabs, Ltd. |
| 5 | * All Rights Reserved. |
| 6 | */ |
| 7 | |
| 8 | #include <uapi/misc/habanalabs.h> |
| 9 | #include "habanalabs.h" |
| 10 | |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/slab.h> |
Ofir Bitton | bf6d109 | 2020-07-30 10:00:10 +0300 | [diff] [blame] | 13 | #include <linux/uaccess.h> |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 14 | |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 15 | static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) |
| 16 | { |
| 17 | struct hl_device *hdev = ctx->hdev; |
| 18 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 19 | struct hl_vm_va_block *va_block, *tmp; |
| 20 | dma_addr_t bus_addr; |
| 21 | u64 virt_addr; |
| 22 | u32 page_size = prop->pmmu.page_size; |
| 23 | s32 offset; |
| 24 | int rc; |
| 25 | |
| 26 | if (!hdev->supports_cb_mapping) { |
| 27 | dev_err_ratelimited(hdev->dev, |
| 28 | "Cannot map CB because no VA range is allocated for CB mapping\n"); |
| 29 | return -EINVAL; |
| 30 | } |
| 31 | |
| 32 | if (!hdev->mmu_enable) { |
| 33 | dev_err_ratelimited(hdev->dev, |
| 34 | "Cannot map CB because MMU is disabled\n"); |
| 35 | return -EINVAL; |
| 36 | } |
| 37 | |
| 38 | INIT_LIST_HEAD(&cb->va_block_list); |
| 39 | |
| 40 | for (bus_addr = cb->bus_address; |
| 41 | bus_addr < cb->bus_address + cb->size; |
| 42 | bus_addr += page_size) { |
| 43 | |
| 44 | virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size); |
| 45 | if (!virt_addr) { |
| 46 | dev_err(hdev->dev, |
| 47 | "Failed to allocate device virtual address for CB\n"); |
| 48 | rc = -ENOMEM; |
| 49 | goto err_va_pool_free; |
| 50 | } |
| 51 | |
| 52 | va_block = kzalloc(sizeof(*va_block), GFP_KERNEL); |
| 53 | if (!va_block) { |
| 54 | rc = -ENOMEM; |
| 55 | gen_pool_free(ctx->cb_va_pool, virt_addr, page_size); |
| 56 | goto err_va_pool_free; |
| 57 | } |
| 58 | |
| 59 | va_block->start = virt_addr; |
| 60 | va_block->end = virt_addr + page_size; |
| 61 | va_block->size = page_size; |
| 62 | list_add_tail(&va_block->node, &cb->va_block_list); |
| 63 | } |
| 64 | |
| 65 | mutex_lock(&ctx->mmu_lock); |
| 66 | |
| 67 | bus_addr = cb->bus_address; |
| 68 | offset = 0; |
| 69 | list_for_each_entry(va_block, &cb->va_block_list, node) { |
Ofir Bitton | 5c05487 | 2020-10-22 15:13:10 +0300 | [diff] [blame] | 70 | rc = hl_mmu_map_page(ctx, va_block->start, bus_addr, |
| 71 | va_block->size, list_is_last(&va_block->node, |
| 72 | &cb->va_block_list)); |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 73 | if (rc) { |
| 74 | dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", |
| 75 | va_block->start); |
| 76 | goto err_va_umap; |
| 77 | } |
| 78 | |
| 79 | bus_addr += va_block->size; |
| 80 | offset += va_block->size; |
| 81 | } |
| 82 | |
| 83 | hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); |
| 84 | |
| 85 | mutex_unlock(&ctx->mmu_lock); |
| 86 | |
| 87 | cb->is_mmu_mapped = true; |
| 88 | |
| 89 | return 0; |
| 90 | |
| 91 | err_va_umap: |
| 92 | list_for_each_entry(va_block, &cb->va_block_list, node) { |
| 93 | if (offset <= 0) |
| 94 | break; |
Ofir Bitton | 5c05487 | 2020-10-22 15:13:10 +0300 | [diff] [blame] | 95 | hl_mmu_unmap_page(ctx, va_block->start, va_block->size, |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 96 | offset <= va_block->size); |
| 97 | offset -= va_block->size; |
| 98 | } |
| 99 | |
| 100 | hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); |
| 101 | |
| 102 | mutex_unlock(&ctx->mmu_lock); |
| 103 | |
| 104 | err_va_pool_free: |
| 105 | list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { |
| 106 | gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); |
| 107 | list_del(&va_block->node); |
| 108 | kfree(va_block); |
| 109 | } |
| 110 | |
| 111 | return rc; |
| 112 | } |
| 113 | |
| 114 | static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) |
| 115 | { |
| 116 | struct hl_device *hdev = ctx->hdev; |
| 117 | struct hl_vm_va_block *va_block, *tmp; |
| 118 | |
| 119 | mutex_lock(&ctx->mmu_lock); |
| 120 | |
| 121 | list_for_each_entry(va_block, &cb->va_block_list, node) |
Ofir Bitton | 5c05487 | 2020-10-22 15:13:10 +0300 | [diff] [blame] | 122 | if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size, |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 123 | list_is_last(&va_block->node, |
| 124 | &cb->va_block_list))) |
| 125 | dev_warn_ratelimited(hdev->dev, |
| 126 | "Failed to unmap CB's va 0x%llx\n", |
| 127 | va_block->start); |
| 128 | |
| 129 | hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); |
| 130 | |
| 131 | mutex_unlock(&ctx->mmu_lock); |
| 132 | |
| 133 | list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) { |
| 134 | gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size); |
| 135 | list_del(&va_block->node); |
| 136 | kfree(va_block); |
| 137 | } |
| 138 | } |
| 139 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 140 | static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) |
| 141 | { |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 142 | if (cb->is_internal) |
| 143 | gen_pool_free(hdev->internal_cb_pool, |
Arnd Bergmann | 82948e6 | 2020-10-26 17:08:06 +0100 | [diff] [blame] | 144 | (uintptr_t)cb->kernel_address, cb->size); |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 145 | else |
| 146 | hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size, |
Arnd Bergmann | 82948e6 | 2020-10-26 17:08:06 +0100 | [diff] [blame] | 147 | cb->kernel_address, cb->bus_address); |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 148 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 149 | kfree(cb); |
| 150 | } |
| 151 | |
| 152 | static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) |
| 153 | { |
| 154 | if (cb->is_pool) { |
| 155 | spin_lock(&hdev->cb_pool_lock); |
| 156 | list_add(&cb->pool_list, &hdev->cb_pool); |
| 157 | spin_unlock(&hdev->cb_pool_lock); |
| 158 | } else { |
| 159 | cb_fini(hdev, cb); |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | static void cb_release(struct kref *ref) |
| 164 | { |
| 165 | struct hl_device *hdev; |
| 166 | struct hl_cb *cb; |
| 167 | |
| 168 | cb = container_of(ref, struct hl_cb, refcount); |
| 169 | hdev = cb->hdev; |
| 170 | |
Oded Gabbay | c216477 | 2019-02-16 00:39:24 +0200 | [diff] [blame] | 171 | hl_debugfs_remove_cb(cb); |
| 172 | |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 173 | if (cb->is_mmu_mapped) |
| 174 | cb_unmap_mem(cb->ctx, cb); |
| 175 | |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 176 | hl_ctx_put(cb->ctx); |
| 177 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 178 | cb_do_release(hdev, cb); |
| 179 | } |
| 180 | |
| 181 | static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 182 | int ctx_id, bool internal_cb) |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 183 | { |
Ofir Bitton | d5eb837 | 2021-02-14 15:35:56 +0200 | [diff] [blame] | 184 | struct hl_cb *cb = NULL; |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 185 | u32 cb_offset; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 186 | void *p; |
| 187 | |
| 188 | /* |
| 189 | * We use of GFP_ATOMIC here because this function can be called from |
| 190 | * the latency-sensitive code path for command submission. Due to H/W |
| 191 | * limitations in some of the ASICs, the kernel must copy the user CB |
| 192 | * that is designated for an external queue and actually enqueue |
| 193 | * the kernel's copy. Hence, we must never sleep in this code section |
| 194 | * and must use GFP_ATOMIC for all memory allocations. |
| 195 | */ |
Ofir Bitton | d5eb837 | 2021-02-14 15:35:56 +0200 | [diff] [blame] | 196 | if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled) |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 197 | cb = kzalloc(sizeof(*cb), GFP_ATOMIC); |
Ofir Bitton | d5eb837 | 2021-02-14 15:35:56 +0200 | [diff] [blame] | 198 | |
| 199 | if (!cb) |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 200 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); |
| 201 | |
| 202 | if (!cb) |
| 203 | return NULL; |
| 204 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 205 | if (internal_cb) { |
| 206 | p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size); |
| 207 | if (!p) { |
| 208 | kfree(cb); |
| 209 | return NULL; |
| 210 | } |
| 211 | |
| 212 | cb_offset = p - hdev->internal_cb_pool_virt_addr; |
| 213 | cb->is_internal = true; |
| 214 | cb->bus_address = hdev->internal_cb_va_base + cb_offset; |
| 215 | } else if (ctx_id == HL_KERNEL_ASID_ID) { |
Oded Gabbay | d9c3aa8 | 2019-05-01 11:47:04 +0300 | [diff] [blame] | 216 | p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size, |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 217 | &cb->bus_address, GFP_ATOMIC); |
Ofir Bitton | d5eb837 | 2021-02-14 15:35:56 +0200 | [diff] [blame] | 218 | if (!p) |
| 219 | p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, |
| 220 | cb_size, &cb->bus_address, GFP_KERNEL); |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 221 | } else { |
Oded Gabbay | d9c3aa8 | 2019-05-01 11:47:04 +0300 | [diff] [blame] | 222 | p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size, |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 223 | &cb->bus_address, |
| 224 | GFP_USER | __GFP_ZERO); |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 225 | } |
| 226 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 227 | if (!p) { |
| 228 | dev_err(hdev->dev, |
| 229 | "failed to allocate %d of dma memory for CB\n", |
| 230 | cb_size); |
| 231 | kfree(cb); |
| 232 | return NULL; |
| 233 | } |
| 234 | |
Arnd Bergmann | 82948e6 | 2020-10-26 17:08:06 +0100 | [diff] [blame] | 235 | cb->kernel_address = p; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 236 | cb->size = cb_size; |
| 237 | |
| 238 | return cb; |
| 239 | } |
| 240 | |
| 241 | int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 242 | struct hl_ctx *ctx, u32 cb_size, bool internal_cb, |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 243 | bool map_cb, u64 *handle) |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 244 | { |
| 245 | struct hl_cb *cb; |
| 246 | bool alloc_new_cb = true; |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 247 | int rc, ctx_id = ctx->asid; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 248 | |
Oded Gabbay | f8c8c7d5 | 2019-02-16 00:39:20 +0200 | [diff] [blame] | 249 | /* |
| 250 | * Can't use generic function to check this because of special case |
| 251 | * where we create a CB as part of the reset process |
| 252 | */ |
| 253 | if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) && |
| 254 | (ctx_id != HL_KERNEL_ASID_ID))) { |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 255 | dev_warn_ratelimited(hdev->dev, |
Oded Gabbay | f8c8c7d5 | 2019-02-16 00:39:20 +0200 | [diff] [blame] | 256 | "Device is disabled or in reset. Can't create new CBs\n"); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 257 | rc = -EBUSY; |
| 258 | goto out_err; |
| 259 | } |
| 260 | |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 261 | if (cb_size > SZ_2M) { |
| 262 | dev_err(hdev->dev, "CB size %d must be less than %d\n", |
| 263 | cb_size, SZ_2M); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 264 | rc = -EINVAL; |
| 265 | goto out_err; |
| 266 | } |
| 267 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 268 | if (!internal_cb) { |
| 269 | /* Minimum allocation must be PAGE SIZE */ |
| 270 | if (cb_size < PAGE_SIZE) |
| 271 | cb_size = PAGE_SIZE; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 272 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 273 | if (ctx_id == HL_KERNEL_ASID_ID && |
| 274 | cb_size <= hdev->asic_prop.cb_pool_cb_size) { |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 275 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 276 | spin_lock(&hdev->cb_pool_lock); |
| 277 | if (!list_empty(&hdev->cb_pool)) { |
| 278 | cb = list_first_entry(&hdev->cb_pool, |
| 279 | typeof(*cb), pool_list); |
| 280 | list_del(&cb->pool_list); |
| 281 | spin_unlock(&hdev->cb_pool_lock); |
| 282 | alloc_new_cb = false; |
| 283 | } else { |
| 284 | spin_unlock(&hdev->cb_pool_lock); |
| 285 | dev_dbg(hdev->dev, "CB pool is empty\n"); |
| 286 | } |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 287 | } |
| 288 | } |
| 289 | |
| 290 | if (alloc_new_cb) { |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 291 | cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 292 | if (!cb) { |
| 293 | rc = -ENOMEM; |
| 294 | goto out_err; |
| 295 | } |
| 296 | } |
| 297 | |
| 298 | cb->hdev = hdev; |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 299 | cb->ctx = ctx; |
| 300 | hl_ctx_get(hdev, cb->ctx); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 301 | |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 302 | if (map_cb) { |
| 303 | if (ctx_id == HL_KERNEL_ASID_ID) { |
| 304 | dev_err(hdev->dev, |
| 305 | "CB mapping is not supported for kernel context\n"); |
| 306 | rc = -EINVAL; |
| 307 | goto release_cb; |
| 308 | } |
| 309 | |
| 310 | rc = cb_map_mem(ctx, cb); |
| 311 | if (rc) |
| 312 | goto release_cb; |
| 313 | } |
| 314 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 315 | spin_lock(&mgr->cb_lock); |
| 316 | rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); |
| 317 | spin_unlock(&mgr->cb_lock); |
| 318 | |
| 319 | if (rc < 0) { |
| 320 | dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n"); |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 321 | goto unmap_mem; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 322 | } |
| 323 | |
Oded Gabbay | f5b9c8c | 2020-08-12 10:11:20 +0300 | [diff] [blame] | 324 | cb->id = (u64) rc; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 325 | |
| 326 | kref_init(&cb->refcount); |
| 327 | spin_lock_init(&cb->lock); |
| 328 | |
| 329 | /* |
| 330 | * idr is 32-bit so we can safely OR it with a mask that is above |
| 331 | * 32 bit |
| 332 | */ |
Oded Gabbay | 3174ac9 | 2020-08-29 11:51:39 +0300 | [diff] [blame] | 333 | *handle = cb->id | HL_MMAP_TYPE_CB; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 334 | *handle <<= PAGE_SHIFT; |
| 335 | |
Oded Gabbay | c216477 | 2019-02-16 00:39:24 +0200 | [diff] [blame] | 336 | hl_debugfs_add_cb(cb); |
| 337 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 338 | return 0; |
| 339 | |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 340 | unmap_mem: |
| 341 | if (cb->is_mmu_mapped) |
| 342 | cb_unmap_mem(cb->ctx, cb); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 343 | release_cb: |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 344 | hl_ctx_put(cb->ctx); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 345 | cb_do_release(hdev, cb); |
| 346 | out_err: |
| 347 | *handle = 0; |
| 348 | |
| 349 | return rc; |
| 350 | } |
| 351 | |
| 352 | int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) |
| 353 | { |
| 354 | struct hl_cb *cb; |
| 355 | u32 handle; |
| 356 | int rc = 0; |
| 357 | |
| 358 | /* |
| 359 | * handle was given to user to do mmap, I need to shift it back to |
| 360 | * how the idr module gave it to me |
| 361 | */ |
| 362 | cb_handle >>= PAGE_SHIFT; |
| 363 | handle = (u32) cb_handle; |
| 364 | |
| 365 | spin_lock(&mgr->cb_lock); |
| 366 | |
| 367 | cb = idr_find(&mgr->cb_handles, handle); |
| 368 | if (cb) { |
| 369 | idr_remove(&mgr->cb_handles, handle); |
| 370 | spin_unlock(&mgr->cb_lock); |
| 371 | kref_put(&cb->refcount, cb_release); |
| 372 | } else { |
| 373 | spin_unlock(&mgr->cb_lock); |
| 374 | dev_err(hdev->dev, |
| 375 | "CB destroy failed, no match to handle 0x%x\n", handle); |
| 376 | rc = -EINVAL; |
| 377 | } |
| 378 | |
| 379 | return rc; |
| 380 | } |
| 381 | |
Tomer Tayar | f44afb5 | 2020-09-02 13:43:32 +0300 | [diff] [blame] | 382 | static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr, |
| 383 | u64 cb_handle, u32 *usage_cnt) |
| 384 | { |
| 385 | struct hl_cb *cb; |
| 386 | u32 handle; |
| 387 | int rc = 0; |
| 388 | |
| 389 | /* The CB handle was given to user to do mmap, so need to shift it back |
| 390 | * to the value which was allocated by the IDR module. |
| 391 | */ |
| 392 | cb_handle >>= PAGE_SHIFT; |
| 393 | handle = (u32) cb_handle; |
| 394 | |
| 395 | spin_lock(&mgr->cb_lock); |
| 396 | |
| 397 | cb = idr_find(&mgr->cb_handles, handle); |
| 398 | if (!cb) { |
| 399 | dev_err(hdev->dev, |
| 400 | "CB info failed, no match to handle 0x%x\n", handle); |
| 401 | rc = -EINVAL; |
| 402 | goto out; |
| 403 | } |
| 404 | |
| 405 | *usage_cnt = atomic_read(&cb->cs_cnt); |
| 406 | |
| 407 | out: |
| 408 | spin_unlock(&mgr->cb_lock); |
| 409 | return rc; |
| 410 | } |
| 411 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 412 | int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) |
| 413 | { |
| 414 | union hl_cb_args *args = data; |
| 415 | struct hl_device *hdev = hpriv->hdev; |
Ofir Bitton | 66a7640 | 2020-10-05 14:40:10 +0300 | [diff] [blame] | 416 | enum hl_device_status status; |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 417 | u64 handle = 0; |
Tomer Tayar | f44afb5 | 2020-09-02 13:43:32 +0300 | [diff] [blame] | 418 | u32 usage_cnt = 0; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 419 | int rc; |
| 420 | |
Ofir Bitton | 66a7640 | 2020-10-05 14:40:10 +0300 | [diff] [blame] | 421 | if (!hl_device_operational(hdev, &status)) { |
Oded Gabbay | 3f5398c | 2019-04-06 15:41:35 +0300 | [diff] [blame] | 422 | dev_warn_ratelimited(hdev->dev, |
| 423 | "Device is %s. Can't execute CB IOCTL\n", |
Ofir Bitton | 66a7640 | 2020-10-05 14:40:10 +0300 | [diff] [blame] | 424 | hdev->status[status]); |
Oded Gabbay | 3f5398c | 2019-04-06 15:41:35 +0300 | [diff] [blame] | 425 | return -EBUSY; |
| 426 | } |
| 427 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 428 | switch (args->in.op) { |
| 429 | case HL_CB_OP_CREATE: |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 430 | if (args->in.cb_size > HL_MAX_CB_SIZE) { |
| 431 | dev_err(hdev->dev, |
| 432 | "User requested CB size %d must be less than %d\n", |
| 433 | args->in.cb_size, HL_MAX_CB_SIZE); |
| 434 | rc = -EINVAL; |
| 435 | } else { |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 436 | rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx, |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 437 | args->in.cb_size, false, |
| 438 | !!(args->in.flags & HL_CB_FLAGS_MAP), |
| 439 | &handle); |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 440 | } |
| 441 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 442 | memset(args, 0, sizeof(*args)); |
| 443 | args->out.cb_handle = handle; |
| 444 | break; |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 445 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 446 | case HL_CB_OP_DESTROY: |
| 447 | rc = hl_cb_destroy(hdev, &hpriv->cb_mgr, |
| 448 | args->in.cb_handle); |
| 449 | break; |
Oded Gabbay | 39b4251 | 2020-04-17 12:12:13 +0300 | [diff] [blame] | 450 | |
Tomer Tayar | f44afb5 | 2020-09-02 13:43:32 +0300 | [diff] [blame] | 451 | case HL_CB_OP_INFO: |
| 452 | rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle, |
| 453 | &usage_cnt); |
| 454 | memset(args, 0, sizeof(*args)); |
| 455 | args->out.usage_cnt = usage_cnt; |
| 456 | break; |
| 457 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 458 | default: |
| 459 | rc = -ENOTTY; |
| 460 | break; |
| 461 | } |
| 462 | |
| 463 | return rc; |
| 464 | } |
| 465 | |
| 466 | static void cb_vm_close(struct vm_area_struct *vma) |
| 467 | { |
| 468 | struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data; |
Oded Gabbay | 9e28c17 | 2019-02-28 10:46:19 +0200 | [diff] [blame] | 469 | long new_mmap_size; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 470 | |
Oded Gabbay | 9e28c17 | 2019-02-28 10:46:19 +0200 | [diff] [blame] | 471 | new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 472 | |
Oded Gabbay | 9e28c17 | 2019-02-28 10:46:19 +0200 | [diff] [blame] | 473 | if (new_mmap_size > 0) { |
| 474 | cb->mmap_size = new_mmap_size; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 475 | return; |
Oded Gabbay | 9e28c17 | 2019-02-28 10:46:19 +0200 | [diff] [blame] | 476 | } |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 477 | |
| 478 | spin_lock(&cb->lock); |
| 479 | cb->mmap = false; |
| 480 | spin_unlock(&cb->lock); |
| 481 | |
| 482 | hl_cb_put(cb); |
| 483 | vma->vm_private_data = NULL; |
| 484 | } |
| 485 | |
| 486 | static const struct vm_operations_struct cb_vm_ops = { |
| 487 | .close = cb_vm_close |
| 488 | }; |
| 489 | |
| 490 | int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) |
| 491 | { |
| 492 | struct hl_device *hdev = hpriv->hdev; |
| 493 | struct hl_cb *cb; |
Ofir Bitton | bf6d109 | 2020-07-30 10:00:10 +0300 | [diff] [blame] | 494 | u32 handle, user_cb_size; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 495 | int rc; |
| 496 | |
Oded Gabbay | c5e0ec6 | 2020-08-29 11:55:15 +0300 | [diff] [blame] | 497 | /* We use the page offset to hold the idr and thus we need to clear |
| 498 | * it before doing the mmap itself |
| 499 | */ |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 500 | handle = vma->vm_pgoff; |
Oded Gabbay | c5e0ec6 | 2020-08-29 11:55:15 +0300 | [diff] [blame] | 501 | vma->vm_pgoff = 0; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 502 | |
| 503 | /* reference was taken here */ |
| 504 | cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); |
| 505 | if (!cb) { |
| 506 | dev_err(hdev->dev, |
Dotan Barak | 0a62c39 | 2020-04-28 08:43:19 +0300 | [diff] [blame] | 507 | "CB mmap failed, no match to handle 0x%x\n", handle); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 508 | return -EINVAL; |
| 509 | } |
| 510 | |
| 511 | /* Validation check */ |
Ofir Bitton | bf6d109 | 2020-07-30 10:00:10 +0300 | [diff] [blame] | 512 | user_cb_size = vma->vm_end - vma->vm_start; |
| 513 | if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) { |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 514 | dev_err(hdev->dev, |
| 515 | "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n", |
| 516 | vma->vm_end - vma->vm_start, cb->size); |
| 517 | rc = -EINVAL; |
| 518 | goto put_cb; |
| 519 | } |
| 520 | |
Ofir Bitton | bf6d109 | 2020-07-30 10:00:10 +0300 | [diff] [blame] | 521 | if (!access_ok((void __user *) (uintptr_t) vma->vm_start, |
| 522 | user_cb_size)) { |
| 523 | dev_err(hdev->dev, |
| 524 | "user pointer is invalid - 0x%lx\n", |
| 525 | vma->vm_start); |
| 526 | |
| 527 | rc = -EINVAL; |
| 528 | goto put_cb; |
| 529 | } |
| 530 | |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 531 | spin_lock(&cb->lock); |
| 532 | |
| 533 | if (cb->mmap) { |
| 534 | dev_err(hdev->dev, |
| 535 | "CB mmap failed, CB already mmaped to user\n"); |
| 536 | rc = -EINVAL; |
| 537 | goto release_lock; |
| 538 | } |
| 539 | |
| 540 | cb->mmap = true; |
| 541 | |
| 542 | spin_unlock(&cb->lock); |
| 543 | |
| 544 | vma->vm_ops = &cb_vm_ops; |
| 545 | |
| 546 | /* |
| 547 | * Note: We're transferring the cb reference to |
| 548 | * vma->vm_private_data here. |
| 549 | */ |
| 550 | |
| 551 | vma->vm_private_data = cb; |
| 552 | |
Zvika Yehudai | 1ee8e2b | 2021-07-06 13:50:32 +0300 | [diff] [blame] | 553 | rc = hdev->asic_funcs->mmap(hdev, vma, cb->kernel_address, |
Hillf Danton | 0db5753 | 2020-08-23 07:32:42 +0800 | [diff] [blame] | 554 | cb->bus_address, cb->size); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 555 | if (rc) { |
| 556 | spin_lock(&cb->lock); |
| 557 | cb->mmap = false; |
| 558 | goto release_lock; |
| 559 | } |
| 560 | |
| 561 | cb->mmap_size = cb->size; |
Oded Gabbay | 28e052c | 2020-10-29 18:38:31 +0200 | [diff] [blame] | 562 | vma->vm_pgoff = handle; |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 563 | |
| 564 | return 0; |
| 565 | |
| 566 | release_lock: |
| 567 | spin_unlock(&cb->lock); |
| 568 | put_cb: |
| 569 | hl_cb_put(cb); |
| 570 | return rc; |
| 571 | } |
| 572 | |
| 573 | struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, |
| 574 | u32 handle) |
| 575 | { |
| 576 | struct hl_cb *cb; |
| 577 | |
| 578 | spin_lock(&mgr->cb_lock); |
| 579 | cb = idr_find(&mgr->cb_handles, handle); |
| 580 | |
| 581 | if (!cb) { |
| 582 | spin_unlock(&mgr->cb_lock); |
| 583 | dev_warn(hdev->dev, |
Dotan Barak | 0a62c39 | 2020-04-28 08:43:19 +0300 | [diff] [blame] | 584 | "CB get failed, no match to handle 0x%x\n", handle); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 585 | return NULL; |
| 586 | } |
| 587 | |
| 588 | kref_get(&cb->refcount); |
| 589 | |
| 590 | spin_unlock(&mgr->cb_lock); |
| 591 | |
| 592 | return cb; |
| 593 | |
| 594 | } |
| 595 | |
| 596 | void hl_cb_put(struct hl_cb *cb) |
| 597 | { |
| 598 | kref_put(&cb->refcount, cb_release); |
| 599 | } |
| 600 | |
| 601 | void hl_cb_mgr_init(struct hl_cb_mgr *mgr) |
| 602 | { |
| 603 | spin_lock_init(&mgr->cb_lock); |
| 604 | idr_init(&mgr->cb_handles); |
| 605 | } |
| 606 | |
| 607 | void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr) |
| 608 | { |
| 609 | struct hl_cb *cb; |
| 610 | struct idr *idp; |
| 611 | u32 id; |
| 612 | |
| 613 | idp = &mgr->cb_handles; |
| 614 | |
| 615 | idr_for_each_entry(idp, cb, id) { |
| 616 | if (kref_put(&cb->refcount, cb_release) != 1) |
| 617 | dev_err(hdev->dev, |
| 618 | "CB %d for CTX ID %d is still alive\n", |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 619 | id, cb->ctx->asid); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 620 | } |
| 621 | |
| 622 | idr_destroy(&mgr->cb_handles); |
| 623 | } |
| 624 | |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 625 | struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size, |
| 626 | bool internal_cb) |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 627 | { |
| 628 | u64 cb_handle; |
| 629 | struct hl_cb *cb; |
| 630 | int rc; |
| 631 | |
Tomer Tayar | fa8641a1 | 2020-09-07 17:36:41 +0300 | [diff] [blame] | 632 | rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size, |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 633 | internal_cb, false, &cb_handle); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 634 | if (rc) { |
Oded Gabbay | 4c172bb | 2019-08-30 16:59:33 +0300 | [diff] [blame] | 635 | dev_err(hdev->dev, |
| 636 | "Failed to allocate CB for the kernel driver %d\n", rc); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 637 | return NULL; |
| 638 | } |
| 639 | |
| 640 | cb_handle >>= PAGE_SHIFT; |
| 641 | cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); |
Alon Mizrahi | 75d9a2a | 2020-12-03 17:32:19 +0200 | [diff] [blame] | 642 | /* hl_cb_get should never fail here */ |
| 643 | if (!cb) { |
| 644 | dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n", |
| 645 | (u32) cb_handle); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 646 | goto destroy_cb; |
Alon Mizrahi | 75d9a2a | 2020-12-03 17:32:19 +0200 | [diff] [blame] | 647 | } |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 648 | |
| 649 | return cb; |
| 650 | |
| 651 | destroy_cb: |
| 652 | hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT); |
| 653 | |
| 654 | return NULL; |
| 655 | } |
| 656 | |
| 657 | int hl_cb_pool_init(struct hl_device *hdev) |
| 658 | { |
| 659 | struct hl_cb *cb; |
| 660 | int i; |
| 661 | |
| 662 | INIT_LIST_HEAD(&hdev->cb_pool); |
| 663 | spin_lock_init(&hdev->cb_pool_lock); |
| 664 | |
| 665 | for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) { |
| 666 | cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, |
Ofir Bitton | a04b7cd | 2020-07-13 13:36:55 +0300 | [diff] [blame] | 667 | HL_KERNEL_ASID_ID, false); |
Oded Gabbay | be5d926 | 2019-02-16 00:39:15 +0200 | [diff] [blame] | 668 | if (cb) { |
| 669 | cb->is_pool = true; |
| 670 | list_add(&cb->pool_list, &hdev->cb_pool); |
| 671 | } else { |
| 672 | hl_cb_pool_fini(hdev); |
| 673 | return -ENOMEM; |
| 674 | } |
| 675 | } |
| 676 | |
| 677 | return 0; |
| 678 | } |
| 679 | |
| 680 | int hl_cb_pool_fini(struct hl_device *hdev) |
| 681 | { |
| 682 | struct hl_cb *cb, *tmp; |
| 683 | |
| 684 | list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { |
| 685 | list_del(&cb->pool_list); |
| 686 | cb_fini(hdev, cb); |
| 687 | } |
| 688 | |
| 689 | return 0; |
| 690 | } |
Tomer Tayar | ef6a0f6 | 2020-07-09 16:17:48 +0300 | [diff] [blame] | 691 | |
| 692 | int hl_cb_va_pool_init(struct hl_ctx *ctx) |
| 693 | { |
| 694 | struct hl_device *hdev = ctx->hdev; |
| 695 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 696 | int rc; |
| 697 | |
| 698 | if (!hdev->supports_cb_mapping) |
| 699 | return 0; |
| 700 | |
| 701 | ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1); |
| 702 | if (!ctx->cb_va_pool) { |
| 703 | dev_err(hdev->dev, |
| 704 | "Failed to create VA gen pool for CB mapping\n"); |
| 705 | return -ENOMEM; |
| 706 | } |
| 707 | |
| 708 | rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr, |
| 709 | prop->cb_va_end_addr - prop->cb_va_start_addr, -1); |
| 710 | if (rc) { |
| 711 | dev_err(hdev->dev, |
| 712 | "Failed to add memory to VA gen pool for CB mapping\n"); |
| 713 | goto err_pool_destroy; |
| 714 | } |
| 715 | |
| 716 | return 0; |
| 717 | |
| 718 | err_pool_destroy: |
| 719 | gen_pool_destroy(ctx->cb_va_pool); |
| 720 | |
| 721 | return rc; |
| 722 | } |
| 723 | |
| 724 | void hl_cb_va_pool_fini(struct hl_ctx *ctx) |
| 725 | { |
| 726 | struct hl_device *hdev = ctx->hdev; |
| 727 | |
| 728 | if (!hdev->supports_cb_mapping) |
| 729 | return; |
| 730 | |
| 731 | gen_pool_destroy(ctx->cb_va_pool); |
| 732 | } |