blob: fa41e0d7d17d191ef8b54e289311456e79006565 [file] [log] [blame]
Christian König2280ab52014-02-20 10:25:15 +01001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
30#include "radeon.h"
31#include "radeon_trace.h"
32
33/*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53/**
54 * radeon_vm_num_pde - return the number of page directory entries
55 *
56 * @rdev: radeon_device pointer
57 *
58 * Calculate the number of page directory entries (cayman+).
59 */
60static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61{
Christian König4510fb92014-06-05 23:56:50 -040062 return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
Christian König2280ab52014-02-20 10:25:15 +010063}
64
65/**
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @rdev: radeon_device pointer
69 *
70 * Calculate the size of the page directory in bytes (cayman+).
71 */
72static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73{
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75}
76
77/**
78 * radeon_vm_manager_init - init the vm manager
79 *
80 * @rdev: radeon_device pointer
81 *
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
84 */
85int radeon_vm_manager_init(struct radeon_device *rdev)
86{
Christian König2280ab52014-02-20 10:25:15 +010087 int r;
Christian König2280ab52014-02-20 10:25:15 +010088
89 if (!rdev->vm_manager.enabled) {
Christian König2280ab52014-02-20 10:25:15 +010090 r = radeon_asic_vm_init(rdev);
91 if (r)
92 return r;
93
94 rdev->vm_manager.enabled = true;
Christian König2280ab52014-02-20 10:25:15 +010095 }
96 return 0;
97}
98
99/**
Christian König2280ab52014-02-20 10:25:15 +0100100 * radeon_vm_manager_fini - tear down the vm manager
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Tear down the VM manager (cayman+).
105 */
106void radeon_vm_manager_fini(struct radeon_device *rdev)
107{
Christian König2280ab52014-02-20 10:25:15 +0100108 int i;
109
110 if (!rdev->vm_manager.enabled)
111 return;
112
Christian König6d2f2942014-02-20 13:42:17 +0100113 for (i = 0; i < RADEON_NUM_VM; ++i)
Christian König2280ab52014-02-20 10:25:15 +0100114 radeon_fence_unref(&rdev->vm_manager.active[i]);
Christian König2280ab52014-02-20 10:25:15 +0100115 radeon_asic_vm_fini(rdev);
Christian König2280ab52014-02-20 10:25:15 +0100116 rdev->vm_manager.enabled = false;
117}
118
119/**
Christian König6d2f2942014-02-20 13:42:17 +0100120 * radeon_vm_get_bos - add the vm BOs to a validation list
Christian König2280ab52014-02-20 10:25:15 +0100121 *
Christian König6d2f2942014-02-20 13:42:17 +0100122 * @vm: vm providing the BOs
123 * @head: head of validation list
Christian König2280ab52014-02-20 10:25:15 +0100124 *
Christian König6d2f2942014-02-20 13:42:17 +0100125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
Christian König2280ab52014-02-20 10:25:15 +0100127 */
Christian Königdf0af442014-03-03 12:38:08 +0100128struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm,
130 struct list_head *head)
Christian König2280ab52014-02-20 10:25:15 +0100131{
Christian Königdf0af442014-03-03 12:38:08 +0100132 struct radeon_cs_reloc *list;
Christian König7d95f6c2014-05-28 12:24:17 +0200133 unsigned i, idx;
Christian König2280ab52014-02-20 10:25:15 +0100134
Christian König2f93dc32014-05-31 20:38:34 +0200135 list = kmalloc_array(vm->max_pde_used + 2,
Christian König7d95f6c2014-05-28 12:24:17 +0200136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
Christian König6d2f2942014-02-20 13:42:17 +0100137 if (!list)
138 return NULL;
Christian König2280ab52014-02-20 10:25:15 +0100139
Christian König6d2f2942014-02-20 13:42:17 +0100140 /* add the vm page table to the list */
Christian Königdf0af442014-03-03 12:38:08 +0100141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory;
Christian Königce6758c2014-06-02 17:33:07 +0200143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
Christian König6d2f2942014-02-20 13:42:17 +0100145 list[0].tv.bo = &vm->page_directory->tbo;
Christian Königdf0af442014-03-03 12:38:08 +0100146 list[0].tiling_flags = 0;
147 list[0].handle = 0;
Christian König6d2f2942014-02-20 13:42:17 +0100148 list_add(&list[0].tv.head, head);
Christian König2280ab52014-02-20 10:25:15 +0100149
Christian König6d2f2942014-02-20 13:42:17 +0100150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
151 if (!vm->page_tables[i].bo)
152 continue;
Christian König2280ab52014-02-20 10:25:15 +0100153
Christian Königdf0af442014-03-03 12:38:08 +0100154 list[idx].gobj = NULL;
155 list[idx].robj = vm->page_tables[i].bo;
Christian Königce6758c2014-06-02 17:33:07 +0200156 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
Christian Königdf0af442014-03-03 12:38:08 +0100158 list[idx].tv.bo = &list[idx].robj->tbo;
159 list[idx].tiling_flags = 0;
160 list[idx].handle = 0;
Christian König6d2f2942014-02-20 13:42:17 +0100161 list_add(&list[idx++].tv.head, head);
Christian König2280ab52014-02-20 10:25:15 +0100162 }
163
Christian König6d2f2942014-02-20 13:42:17 +0100164 return list;
Christian König2280ab52014-02-20 10:25:15 +0100165}
166
167/**
168 * radeon_vm_grab_id - allocate the next free VMID
169 *
170 * @rdev: radeon_device pointer
171 * @vm: vm to allocate id for
172 * @ring: ring we want to submit job to
173 *
174 * Allocate an id for the vm (cayman+).
175 * Returns the fence we need to sync to (if any).
176 *
177 * Global and local mutex must be locked!
178 */
179struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
180 struct radeon_vm *vm, int ring)
181{
182 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
183 unsigned choices[2] = {};
184 unsigned i;
185
186 /* check if the id is still valid */
187 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
188 return NULL;
189
190 /* we definately need to flush */
191 radeon_fence_unref(&vm->last_flush);
192
193 /* skip over VMID 0, since it is the system VM */
194 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
195 struct radeon_fence *fence = rdev->vm_manager.active[i];
196
197 if (fence == NULL) {
198 /* found a free one */
199 vm->id = i;
200 trace_radeon_vm_grab_id(vm->id, ring);
201 return NULL;
202 }
203
204 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
205 best[fence->ring] = fence;
206 choices[fence->ring == ring ? 0 : 1] = i;
207 }
208 }
209
210 for (i = 0; i < 2; ++i) {
211 if (choices[i]) {
212 vm->id = choices[i];
213 trace_radeon_vm_grab_id(vm->id, ring);
214 return rdev->vm_manager.active[choices[i]];
215 }
216 }
217
218 /* should never happen */
219 BUG();
220 return NULL;
221}
222
223/**
Christian Königfa688342014-02-20 10:47:05 +0100224 * radeon_vm_flush - hardware flush the vm
225 *
226 * @rdev: radeon_device pointer
227 * @vm: vm we want to flush
228 * @ring: ring to use for flush
229 *
230 * Flush the vm (cayman+).
231 *
232 * Global and local mutex must be locked!
233 */
234void radeon_vm_flush(struct radeon_device *rdev,
235 struct radeon_vm *vm,
236 int ring)
237{
Christian König6d2f2942014-02-20 13:42:17 +0100238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
239
Christian Königfa688342014-02-20 10:47:05 +0100240 /* if we can't remember our last VM flush then flush now! */
241 /* XXX figure out why we have to flush all the time */
Christian König6d2f2942014-02-20 13:42:17 +0100242 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
243 vm->pd_gpu_addr = pd_addr;
Christian Königfa688342014-02-20 10:47:05 +0100244 radeon_ring_vm_flush(rdev, ring, vm);
Christian König6d2f2942014-02-20 13:42:17 +0100245 }
Christian Königfa688342014-02-20 10:47:05 +0100246}
247
248/**
Christian König2280ab52014-02-20 10:25:15 +0100249 * radeon_vm_fence - remember fence for vm
250 *
251 * @rdev: radeon_device pointer
252 * @vm: vm we want to fence
253 * @fence: fence to remember
254 *
255 * Fence the vm (cayman+).
256 * Set the fence used to protect page table and id.
257 *
258 * Global and local mutex must be locked!
259 */
260void radeon_vm_fence(struct radeon_device *rdev,
261 struct radeon_vm *vm,
262 struct radeon_fence *fence)
263{
Christian König2280ab52014-02-20 10:25:15 +0100264 radeon_fence_unref(&vm->fence);
265 vm->fence = radeon_fence_ref(fence);
266
Christian Königfa688342014-02-20 10:47:05 +0100267 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
268 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
269
Christian König2280ab52014-02-20 10:25:15 +0100270 radeon_fence_unref(&vm->last_id_use);
271 vm->last_id_use = radeon_fence_ref(fence);
Christian Königfa688342014-02-20 10:47:05 +0100272
273 /* we just flushed the VM, remember that */
274 if (!vm->last_flush)
275 vm->last_flush = radeon_fence_ref(fence);
Christian König2280ab52014-02-20 10:25:15 +0100276}
277
278/**
279 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
280 *
281 * @vm: requested vm
282 * @bo: requested buffer object
283 *
284 * Find @bo inside the requested vm (cayman+).
285 * Search inside the @bos vm list for the requested vm
286 * Returns the found bo_va or NULL if none is found
287 *
288 * Object has to be reserved!
289 */
290struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
291 struct radeon_bo *bo)
292{
293 struct radeon_bo_va *bo_va;
294
295 list_for_each_entry(bo_va, &bo->va, bo_list) {
296 if (bo_va->vm == vm) {
297 return bo_va;
298 }
299 }
300 return NULL;
301}
302
303/**
304 * radeon_vm_bo_add - add a bo to a specific vm
305 *
306 * @rdev: radeon_device pointer
307 * @vm: requested vm
308 * @bo: radeon buffer object
309 *
310 * Add @bo into the requested vm (cayman+).
311 * Add @bo to the list of bos associated with the vm
312 * Returns newly added bo_va or NULL for failure
313 *
314 * Object has to be reserved!
315 */
316struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
317 struct radeon_vm *vm,
318 struct radeon_bo *bo)
319{
320 struct radeon_bo_va *bo_va;
321
322 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
323 if (bo_va == NULL) {
324 return NULL;
325 }
326 bo_va->vm = vm;
327 bo_va->bo = bo;
328 bo_va->soffset = 0;
329 bo_va->eoffset = 0;
330 bo_va->flags = 0;
331 bo_va->valid = false;
332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list);
Christian König036bf462014-07-18 08:56:40 +0200335 INIT_LIST_HEAD(&bo_va->vm_status);
Christian König2280ab52014-02-20 10:25:15 +0100336
337 mutex_lock(&vm->mutex);
338 list_add(&bo_va->vm_list, &vm->va);
339 list_add_tail(&bo_va->bo_list, &bo->va);
340 mutex_unlock(&vm->mutex);
341
342 return bo_va;
343}
344
345/**
Christian König6d2f2942014-02-20 13:42:17 +0100346 * radeon_vm_clear_bo - initially clear the page dir/table
347 *
348 * @rdev: radeon_device pointer
349 * @bo: bo to clear
350 */
351static int radeon_vm_clear_bo(struct radeon_device *rdev,
352 struct radeon_bo *bo)
353{
354 struct ttm_validate_buffer tv;
355 struct ww_acquire_ctx ticket;
356 struct list_head head;
357 struct radeon_ib ib;
358 unsigned entries;
359 uint64_t addr;
360 int r;
361
362 memset(&tv, 0, sizeof(tv));
363 tv.bo = &bo->tbo;
364
365 INIT_LIST_HEAD(&head);
366 list_add(&tv.head, &head);
367
368 r = ttm_eu_reserve_buffers(&ticket, &head);
369 if (r)
370 return r;
371
372 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
373 if (r)
374 goto error;
375
376 addr = radeon_bo_gpu_offset(bo);
377 entries = radeon_bo_size(bo) / 8;
378
379 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
380 NULL, entries * 2 + 64);
381 if (r)
382 goto error;
383
384 ib.length_dw = 0;
385
386 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
387
388 r = radeon_ib_schedule(rdev, &ib, NULL);
389 if (r)
390 goto error;
391
392 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
393 radeon_ib_free(rdev, &ib);
394
395 return 0;
396
397error:
398 ttm_eu_backoff_reservation(&ticket, &head);
399 return r;
400}
401
402/**
Christian König2280ab52014-02-20 10:25:15 +0100403 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
404 *
405 * @rdev: radeon_device pointer
406 * @bo_va: bo_va to store the address
407 * @soffset: requested offset of the buffer in the VM address space
408 * @flags: attributes of pages (read/write/valid/etc.)
409 *
410 * Set offset of @bo_va (cayman+).
411 * Validate and set the offset requested within the vm address space.
412 * Returns 0 for success, error for failure.
413 *
414 * Object has to be reserved!
415 */
416int radeon_vm_bo_set_addr(struct radeon_device *rdev,
417 struct radeon_bo_va *bo_va,
418 uint64_t soffset,
419 uint32_t flags)
420{
421 uint64_t size = radeon_bo_size(bo_va->bo);
422 uint64_t eoffset, last_offset = 0;
423 struct radeon_vm *vm = bo_va->vm;
424 struct radeon_bo_va *tmp;
425 struct list_head *head;
Christian König6d2f2942014-02-20 13:42:17 +0100426 unsigned last_pfn, pt_idx;
427 int r;
Christian König2280ab52014-02-20 10:25:15 +0100428
429 if (soffset) {
430 /* make sure object fit at this offset */
431 eoffset = soffset + size;
432 if (soffset >= eoffset) {
433 return -EINVAL;
434 }
435
436 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
437 if (last_pfn > rdev->vm_manager.max_pfn) {
438 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
439 last_pfn, rdev->vm_manager.max_pfn);
440 return -EINVAL;
441 }
442
443 } else {
444 eoffset = last_pfn = 0;
445 }
446
447 mutex_lock(&vm->mutex);
448 head = &vm->va;
449 last_offset = 0;
450 list_for_each_entry(tmp, &vm->va, vm_list) {
451 if (bo_va == tmp) {
452 /* skip over currently modified bo */
453 continue;
454 }
455
456 if (soffset >= last_offset && eoffset <= tmp->soffset) {
457 /* bo can be added before this one */
458 break;
459 }
460 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
461 /* bo and tmp overlap, invalid offset */
462 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
463 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
464 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
465 mutex_unlock(&vm->mutex);
466 return -EINVAL;
467 }
468 last_offset = tmp->eoffset;
469 head = &tmp->vm_list;
470 }
471
Christian König036bf462014-07-18 08:56:40 +0200472 if (bo_va->soffset) {
473 /* add a clone of the bo_va to clear the old address */
474 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
475 tmp->soffset = bo_va->soffset;
476 tmp->eoffset = bo_va->eoffset;
477 tmp->vm = vm;
478 list_add(&tmp->vm_status, &vm->freed);
479 }
480
Christian König2280ab52014-02-20 10:25:15 +0100481 bo_va->soffset = soffset;
482 bo_va->eoffset = eoffset;
483 bo_va->flags = flags;
484 bo_va->valid = false;
485 list_move(&bo_va->vm_list, head);
486
Christian König4510fb92014-06-05 23:56:50 -0400487 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
488 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
489
490 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
Christian König6d2f2942014-02-20 13:42:17 +0100491
492 if (eoffset > vm->max_pde_used)
493 vm->max_pde_used = eoffset;
494
495 radeon_bo_unreserve(bo_va->bo);
496
497 /* walk over the address space and allocate the page tables */
498 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
499 struct radeon_bo *pt;
500
501 if (vm->page_tables[pt_idx].bo)
502 continue;
503
504 /* drop mutex to allocate and clear page table */
505 mutex_unlock(&vm->mutex);
506
507 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
Christian König7dae77f2014-07-02 21:28:10 +0200508 RADEON_GPU_PAGE_SIZE, true,
Christian König6d2f2942014-02-20 13:42:17 +0100509 RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
510 if (r)
511 return r;
512
513 r = radeon_vm_clear_bo(rdev, pt);
514 if (r) {
515 radeon_bo_unref(&pt);
516 radeon_bo_reserve(bo_va->bo, false);
517 return r;
518 }
519
520 /* aquire mutex again */
521 mutex_lock(&vm->mutex);
522 if (vm->page_tables[pt_idx].bo) {
523 /* someone else allocated the pt in the meantime */
524 mutex_unlock(&vm->mutex);
525 radeon_bo_unref(&pt);
526 mutex_lock(&vm->mutex);
527 continue;
528 }
529
530 vm->page_tables[pt_idx].addr = 0;
531 vm->page_tables[pt_idx].bo = pt;
532 }
533
Christian König2280ab52014-02-20 10:25:15 +0100534 mutex_unlock(&vm->mutex);
Christian König6d2f2942014-02-20 13:42:17 +0100535 return radeon_bo_reserve(bo_va->bo, false);
Christian König2280ab52014-02-20 10:25:15 +0100536}
537
538/**
539 * radeon_vm_map_gart - get the physical address of a gart page
540 *
541 * @rdev: radeon_device pointer
542 * @addr: the unmapped addr
543 *
544 * Look up the physical address of the page that the pte resolves
545 * to (cayman+).
546 * Returns the physical address of the page.
547 */
548uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
549{
550 uint64_t result;
551
552 /* page table offset */
553 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
554
555 /* in case cpu page size != gpu page size*/
556 result |= addr & (~PAGE_MASK);
557
558 return result;
559}
560
561/**
562 * radeon_vm_page_flags - translate page flags to what the hw uses
563 *
564 * @flags: flags comming from userspace
565 *
566 * Translate the flags the userspace ABI uses to hw flags.
567 */
568static uint32_t radeon_vm_page_flags(uint32_t flags)
569{
570 uint32_t hw_flags = 0;
571 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
572 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
573 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
574 if (flags & RADEON_VM_PAGE_SYSTEM) {
575 hw_flags |= R600_PTE_SYSTEM;
576 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
577 }
578 return hw_flags;
579}
580
581/**
582 * radeon_vm_update_pdes - make sure that page directory is valid
583 *
584 * @rdev: radeon_device pointer
585 * @vm: requested vm
586 * @start: start of GPU address range
587 * @end: end of GPU address range
588 *
589 * Allocates new page tables if necessary
590 * and updates the page directory (cayman+).
591 * Returns 0 for success, error for failure.
592 *
593 * Global and local mutex must be locked!
594 */
Christian König6d2f2942014-02-20 13:42:17 +0100595int radeon_vm_update_page_directory(struct radeon_device *rdev,
596 struct radeon_vm *vm)
Christian König2280ab52014-02-20 10:25:15 +0100597{
Christian König37903b52014-05-30 15:21:16 +0200598 struct radeon_bo *pd = vm->page_directory;
599 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
Christian König4510fb92014-06-05 23:56:50 -0400600 uint32_t incr = RADEON_VM_PTE_COUNT * 8;
Christian König2280ab52014-02-20 10:25:15 +0100601 uint64_t last_pde = ~0, last_pt = ~0;
Christian König6d2f2942014-02-20 13:42:17 +0100602 unsigned count = 0, pt_idx, ndw;
603 struct radeon_ib ib;
Christian König2280ab52014-02-20 10:25:15 +0100604 int r;
605
Christian König6d2f2942014-02-20 13:42:17 +0100606 /* padding, etc. */
607 ndw = 64;
608
609 /* assume the worst case */
Christian König4906f682014-05-12 14:46:11 +0200610 ndw += vm->max_pde_used * 16;
Christian König6d2f2942014-02-20 13:42:17 +0100611
612 /* update too big for an IB */
613 if (ndw > 0xfffff)
614 return -ENOMEM;
615
616 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
617 if (r)
618 return r;
619 ib.length_dw = 0;
Christian König2280ab52014-02-20 10:25:15 +0100620
621 /* walk over the address space and update the page directory */
Christian König6d2f2942014-02-20 13:42:17 +0100622 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
623 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
Christian König2280ab52014-02-20 10:25:15 +0100624 uint64_t pde, pt;
625
Christian König6d2f2942014-02-20 13:42:17 +0100626 if (bo == NULL)
Christian König2280ab52014-02-20 10:25:15 +0100627 continue;
628
Christian König6d2f2942014-02-20 13:42:17 +0100629 pt = radeon_bo_gpu_offset(bo);
630 if (vm->page_tables[pt_idx].addr == pt)
631 continue;
632 vm->page_tables[pt_idx].addr = pt;
Christian König2280ab52014-02-20 10:25:15 +0100633
Christian König6d2f2942014-02-20 13:42:17 +0100634 pde = pd_addr + pt_idx * 8;
Christian König2280ab52014-02-20 10:25:15 +0100635 if (((last_pde + 8 * count) != pde) ||
636 ((last_pt + incr * count) != pt)) {
637
638 if (count) {
Christian König6d2f2942014-02-20 13:42:17 +0100639 radeon_asic_vm_set_page(rdev, &ib, last_pde,
Christian König2280ab52014-02-20 10:25:15 +0100640 last_pt, count, incr,
641 R600_PTE_VALID);
Christian König2280ab52014-02-20 10:25:15 +0100642 }
643
644 count = 1;
645 last_pde = pde;
646 last_pt = pt;
647 } else {
648 ++count;
649 }
650 }
651
Christian König6d2f2942014-02-20 13:42:17 +0100652 if (count)
653 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
Christian König2280ab52014-02-20 10:25:15 +0100654 incr, R600_PTE_VALID);
655
Christian König6d2f2942014-02-20 13:42:17 +0100656 if (ib.length_dw != 0) {
Christian König37903b52014-05-30 15:21:16 +0200657 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
Christian König6d2f2942014-02-20 13:42:17 +0100658 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
659 r = radeon_ib_schedule(rdev, &ib, NULL);
660 if (r) {
661 radeon_ib_free(rdev, &ib);
662 return r;
663 }
664 radeon_fence_unref(&vm->fence);
665 vm->fence = radeon_fence_ref(ib.fence);
666 radeon_fence_unref(&vm->last_flush);
Christian König2280ab52014-02-20 10:25:15 +0100667 }
Christian König6d2f2942014-02-20 13:42:17 +0100668 radeon_ib_free(rdev, &ib);
Christian König2280ab52014-02-20 10:25:15 +0100669
670 return 0;
671}
672
673/**
Christian Königec3dbbc2014-05-10 12:17:55 +0200674 * radeon_vm_frag_ptes - add fragment information to PTEs
675 *
676 * @rdev: radeon_device pointer
677 * @ib: IB for the update
678 * @pe_start: first PTE to handle
679 * @pe_end: last PTE to handle
680 * @addr: addr those PTEs should point to
681 * @flags: hw mapping flags
682 *
683 * Global and local mutex must be locked!
684 */
685static void radeon_vm_frag_ptes(struct radeon_device *rdev,
686 struct radeon_ib *ib,
687 uint64_t pe_start, uint64_t pe_end,
688 uint64_t addr, uint32_t flags)
689{
690 /**
691 * The MC L1 TLB supports variable sized pages, based on a fragment
692 * field in the PTE. When this field is set to a non-zero value, page
693 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
694 * flags are considered valid for all PTEs within the fragment range
695 * and corresponding mappings are assumed to be physically contiguous.
696 *
697 * The L1 TLB can store a single PTE for the whole fragment,
698 * significantly increasing the space available for translation
699 * caching. This leads to large improvements in throughput when the
700 * TLB is under pressure.
701 *
702 * The L2 TLB distributes small and large fragments into two
703 * asymmetric partitions. The large fragment cache is significantly
704 * larger. Thus, we try to use large fragments wherever possible.
705 * Userspace can support this by aligning virtual base address and
706 * allocation size to the fragment size.
707 */
708
709 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
710 uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
711 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
712 uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
713
714 uint64_t frag_start = ALIGN(pe_start, frag_align);
715 uint64_t frag_end = pe_end & ~(frag_align - 1);
716
717 unsigned count;
718
719 /* system pages are non continuously */
720 if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
721 (frag_start >= frag_end)) {
722
723 count = (pe_end - pe_start) / 8;
724 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
725 RADEON_GPU_PAGE_SIZE, flags);
726 return;
727 }
728
729 /* handle the 4K area at the beginning */
730 if (pe_start != frag_start) {
731 count = (frag_start - pe_start) / 8;
732 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
733 RADEON_GPU_PAGE_SIZE, flags);
734 addr += RADEON_GPU_PAGE_SIZE * count;
735 }
736
737 /* handle the area in the middle */
738 count = (frag_end - frag_start) / 8;
739 radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
740 RADEON_GPU_PAGE_SIZE, flags | frag_flags);
741
742 /* handle the 4K area at the end */
743 if (frag_end != pe_end) {
744 addr += RADEON_GPU_PAGE_SIZE * count;
745 count = (pe_end - frag_end) / 8;
746 radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
747 RADEON_GPU_PAGE_SIZE, flags);
748 }
749}
750
751/**
Christian König2280ab52014-02-20 10:25:15 +0100752 * radeon_vm_update_ptes - make sure that page tables are valid
753 *
754 * @rdev: radeon_device pointer
755 * @vm: requested vm
756 * @start: start of GPU address range
757 * @end: end of GPU address range
758 * @dst: destination address to map to
759 * @flags: mapping flags
760 *
761 * Update the page tables in the range @start - @end (cayman+).
762 *
763 * Global and local mutex must be locked!
764 */
765static void radeon_vm_update_ptes(struct radeon_device *rdev,
766 struct radeon_vm *vm,
767 struct radeon_ib *ib,
768 uint64_t start, uint64_t end,
769 uint64_t dst, uint32_t flags)
770{
Christian König4510fb92014-06-05 23:56:50 -0400771 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
Christian König2280ab52014-02-20 10:25:15 +0100772 uint64_t last_pte = ~0, last_dst = ~0;
773 unsigned count = 0;
774 uint64_t addr;
775
776 start = start / RADEON_GPU_PAGE_SIZE;
777 end = end / RADEON_GPU_PAGE_SIZE;
778
779 /* walk over the address space and update the page tables */
780 for (addr = start; addr < end; ) {
Christian König4510fb92014-06-05 23:56:50 -0400781 uint64_t pt_idx = addr >> radeon_vm_block_size;
Christian König37903b52014-05-30 15:21:16 +0200782 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
Christian König2280ab52014-02-20 10:25:15 +0100783 unsigned nptes;
784 uint64_t pte;
785
Christian König37903b52014-05-30 15:21:16 +0200786 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
787
Christian König2280ab52014-02-20 10:25:15 +0100788 if ((addr & ~mask) == (end & ~mask))
789 nptes = end - addr;
790 else
791 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
792
Christian König37903b52014-05-30 15:21:16 +0200793 pte = radeon_bo_gpu_offset(pt);
Christian König2280ab52014-02-20 10:25:15 +0100794 pte += (addr & mask) * 8;
795
796 if ((last_pte + 8 * count) != pte) {
797
798 if (count) {
Christian Königec3dbbc2014-05-10 12:17:55 +0200799 radeon_vm_frag_ptes(rdev, ib, last_pte,
800 last_pte + 8 * count,
801 last_dst, flags);
Christian König2280ab52014-02-20 10:25:15 +0100802 }
803
804 count = nptes;
805 last_pte = pte;
806 last_dst = dst;
807 } else {
808 count += nptes;
809 }
810
811 addr += nptes;
812 dst += nptes * RADEON_GPU_PAGE_SIZE;
813 }
814
815 if (count) {
Christian Königec3dbbc2014-05-10 12:17:55 +0200816 radeon_vm_frag_ptes(rdev, ib, last_pte,
817 last_pte + 8 * count,
818 last_dst, flags);
Christian König2280ab52014-02-20 10:25:15 +0100819 }
820}
821
822/**
823 * radeon_vm_bo_update - map a bo into the vm page table
824 *
825 * @rdev: radeon_device pointer
826 * @vm: requested vm
827 * @bo: radeon buffer object
828 * @mem: ttm mem
829 *
830 * Fill in the page table entries for @bo (cayman+).
831 * Returns 0 for success, -EINVAL for failure.
832 *
Christian König529364e2014-02-20 19:33:15 +0100833 * Object have to be reserved and mutex must be locked!
Christian König2280ab52014-02-20 10:25:15 +0100834 */
835int radeon_vm_bo_update(struct radeon_device *rdev,
Christian König036bf462014-07-18 08:56:40 +0200836 struct radeon_bo_va *bo_va,
Christian König2280ab52014-02-20 10:25:15 +0100837 struct ttm_mem_reg *mem)
838{
Christian König036bf462014-07-18 08:56:40 +0200839 struct radeon_vm *vm = bo_va->vm;
Christian König2280ab52014-02-20 10:25:15 +0100840 struct radeon_ib ib;
Christian König6d2f2942014-02-20 13:42:17 +0100841 unsigned nptes, ndw;
Christian König2280ab52014-02-20 10:25:15 +0100842 uint64_t addr;
843 int r;
844
Christian König2280ab52014-02-20 10:25:15 +0100845
846 if (!bo_va->soffset) {
847 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
Christian König036bf462014-07-18 08:56:40 +0200848 bo_va->bo, vm);
Christian König2280ab52014-02-20 10:25:15 +0100849 return -EINVAL;
850 }
851
852 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
853 return 0;
854
855 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
856 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
857 if (mem) {
858 addr = mem->start << PAGE_SHIFT;
859 if (mem->mem_type != TTM_PL_SYSTEM) {
860 bo_va->flags |= RADEON_VM_PAGE_VALID;
861 bo_va->valid = true;
862 }
863 if (mem->mem_type == TTM_PL_TT) {
864 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
865 } else {
866 addr += rdev->vm_manager.vram_base_offset;
867 }
868 } else {
869 addr = 0;
870 bo_va->valid = false;
871 }
872
873 trace_radeon_vm_bo_update(bo_va);
874
Christian König036bf462014-07-18 08:56:40 +0200875 nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
Christian König2280ab52014-02-20 10:25:15 +0100876
Christian König2280ab52014-02-20 10:25:15 +0100877 /* padding, etc. */
878 ndw = 64;
879
Christian König4510fb92014-06-05 23:56:50 -0400880 if (radeon_vm_block_size > 11)
Christian König2280ab52014-02-20 10:25:15 +0100881 /* reserve space for one header for every 2k dwords */
882 ndw += (nptes >> 11) * 4;
883 else
884 /* reserve space for one header for
885 every (1 << BLOCK_SIZE) entries */
Christian König4510fb92014-06-05 23:56:50 -0400886 ndw += (nptes >> radeon_vm_block_size) * 4;
Christian König2280ab52014-02-20 10:25:15 +0100887
888 /* reserve space for pte addresses */
889 ndw += nptes * 2;
890
Christian König2280ab52014-02-20 10:25:15 +0100891 /* update too big for an IB */
892 if (ndw > 0xfffff)
893 return -ENOMEM;
894
895 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
896 if (r)
897 return r;
898 ib.length_dw = 0;
899
Christian König2280ab52014-02-20 10:25:15 +0100900 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
901 addr, radeon_vm_page_flags(bo_va->flags));
902
903 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
904 r = radeon_ib_schedule(rdev, &ib, NULL);
905 if (r) {
906 radeon_ib_free(rdev, &ib);
907 return r;
908 }
909 radeon_fence_unref(&vm->fence);
910 vm->fence = radeon_fence_ref(ib.fence);
911 radeon_ib_free(rdev, &ib);
912 radeon_fence_unref(&vm->last_flush);
913
914 return 0;
915}
916
917/**
Christian König036bf462014-07-18 08:56:40 +0200918 * radeon_vm_clear_freed - clear freed BOs in the PT
919 *
920 * @rdev: radeon_device pointer
921 * @vm: requested vm
922 *
923 * Make sure all freed BOs are cleared in the PT.
924 * Returns 0 for success.
925 *
926 * PTs have to be reserved and mutex must be locked!
927 */
928int radeon_vm_clear_freed(struct radeon_device *rdev,
929 struct radeon_vm *vm)
930{
931 struct radeon_bo_va *bo_va, *tmp;
932 int r;
933
934 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
935 list_del(&bo_va->vm_status);
936 r = radeon_vm_bo_update(rdev, bo_va, NULL);
937 kfree(bo_va);
938 if (r)
939 return r;
940 }
941 return 0;
942
943}
944
945/**
Christian König2280ab52014-02-20 10:25:15 +0100946 * radeon_vm_bo_rmv - remove a bo to a specific vm
947 *
948 * @rdev: radeon_device pointer
949 * @bo_va: requested bo_va
950 *
951 * Remove @bo_va->bo from the requested vm (cayman+).
Christian König2280ab52014-02-20 10:25:15 +0100952 *
953 * Object have to be reserved!
954 */
Christian König036bf462014-07-18 08:56:40 +0200955void radeon_vm_bo_rmv(struct radeon_device *rdev,
956 struct radeon_bo_va *bo_va)
Christian König2280ab52014-02-20 10:25:15 +0100957{
Christian König036bf462014-07-18 08:56:40 +0200958 struct radeon_vm *vm = bo_va->vm;
Christian König2280ab52014-02-20 10:25:15 +0100959
Christian König2280ab52014-02-20 10:25:15 +0100960 list_del(&bo_va->bo_list);
961
Christian König036bf462014-07-18 08:56:40 +0200962 mutex_lock(&vm->mutex);
963 list_del(&bo_va->vm_list);
964
965 if (bo_va->soffset) {
966 bo_va->bo = NULL;
967 list_add(&bo_va->vm_status, &vm->freed);
968 } else {
969 kfree(bo_va);
970 }
971
972 mutex_unlock(&vm->mutex);
Christian König2280ab52014-02-20 10:25:15 +0100973}
974
975/**
976 * radeon_vm_bo_invalidate - mark the bo as invalid
977 *
978 * @rdev: radeon_device pointer
979 * @vm: requested vm
980 * @bo: radeon buffer object
981 *
982 * Mark @bo as invalid (cayman+).
983 */
984void radeon_vm_bo_invalidate(struct radeon_device *rdev,
985 struct radeon_bo *bo)
986{
987 struct radeon_bo_va *bo_va;
988
989 list_for_each_entry(bo_va, &bo->va, bo_list) {
990 bo_va->valid = false;
991 }
992}
993
994/**
995 * radeon_vm_init - initialize a vm instance
996 *
997 * @rdev: radeon_device pointer
998 * @vm: requested vm
999 *
1000 * Init @vm fields (cayman+).
1001 */
Christian König6d2f2942014-02-20 13:42:17 +01001002int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
Christian König2280ab52014-02-20 10:25:15 +01001003{
Christian König1c89d272014-05-10 12:17:56 +02001004 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1005 RADEON_VM_PTE_COUNT * 8);
Christian König6d2f2942014-02-20 13:42:17 +01001006 unsigned pd_size, pd_entries, pts_size;
1007 int r;
1008
Christian König2280ab52014-02-20 10:25:15 +01001009 vm->id = 0;
Christian Königcc9e67e2014-07-18 13:48:10 +02001010 vm->ib_bo_va = NULL;
Christian König2280ab52014-02-20 10:25:15 +01001011 vm->fence = NULL;
1012 vm->last_flush = NULL;
1013 vm->last_id_use = NULL;
1014 mutex_init(&vm->mutex);
Christian König2280ab52014-02-20 10:25:15 +01001015 INIT_LIST_HEAD(&vm->va);
Christian König036bf462014-07-18 08:56:40 +02001016 INIT_LIST_HEAD(&vm->freed);
Christian König6d2f2942014-02-20 13:42:17 +01001017
1018 pd_size = radeon_vm_directory_size(rdev);
1019 pd_entries = radeon_vm_num_pdes(rdev);
1020
1021 /* allocate page table array */
1022 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
1023 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1024 if (vm->page_tables == NULL) {
1025 DRM_ERROR("Cannot allocate memory for page table array\n");
1026 return -ENOMEM;
1027 }
1028
Christian König7dae77f2014-07-02 21:28:10 +02001029 r = radeon_bo_create(rdev, pd_size, align, true,
Christian König6d2f2942014-02-20 13:42:17 +01001030 RADEON_GEM_DOMAIN_VRAM, NULL,
1031 &vm->page_directory);
1032 if (r)
1033 return r;
1034
1035 r = radeon_vm_clear_bo(rdev, vm->page_directory);
1036 if (r) {
1037 radeon_bo_unref(&vm->page_directory);
1038 vm->page_directory = NULL;
1039 return r;
1040 }
1041
1042 return 0;
Christian König2280ab52014-02-20 10:25:15 +01001043}
1044
1045/**
1046 * radeon_vm_fini - tear down a vm instance
1047 *
1048 * @rdev: radeon_device pointer
1049 * @vm: requested vm
1050 *
1051 * Tear down @vm (cayman+).
1052 * Unbind the VM and remove all bos from the vm bo list
1053 */
1054void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1055{
1056 struct radeon_bo_va *bo_va, *tmp;
Christian König6d2f2942014-02-20 13:42:17 +01001057 int i, r;
Christian König2280ab52014-02-20 10:25:15 +01001058
1059 if (!list_empty(&vm->va)) {
1060 dev_err(rdev->dev, "still active bo inside vm\n");
1061 }
1062 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
1063 list_del_init(&bo_va->vm_list);
1064 r = radeon_bo_reserve(bo_va->bo, false);
1065 if (!r) {
1066 list_del_init(&bo_va->bo_list);
1067 radeon_bo_unreserve(bo_va->bo);
1068 kfree(bo_va);
1069 }
1070 }
Christian König036bf462014-07-18 08:56:40 +02001071 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
1072 kfree(bo_va);
Christian König6d2f2942014-02-20 13:42:17 +01001073
1074 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1075 radeon_bo_unref(&vm->page_tables[i].bo);
1076 kfree(vm->page_tables);
1077
1078 radeon_bo_unref(&vm->page_directory);
1079
Christian König2280ab52014-02-20 10:25:15 +01001080 radeon_fence_unref(&vm->fence);
1081 radeon_fence_unref(&vm->last_flush);
1082 radeon_fence_unref(&vm->last_id_use);
Christian König6d2f2942014-02-20 13:42:17 +01001083
1084 mutex_destroy(&vm->mutex);
Christian König2280ab52014-02-20 10:25:15 +01001085}