blob: a0c9e00e70625557f0c972af57601b22d4b9c055 [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35
36static void
37nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
38{
39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010040 struct drm_device *dev = dev_priv->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100041 struct nouveau_bo *nvbo = nouveau_bo(bo);
42
43 ttm_bo_kunmap(&nvbo->kmap);
44
45 if (unlikely(nvbo->gem))
46 DRM_ERROR("bo %p still attached to GEM object\n", bo);
47
Francisco Jereza0af9ad2009-12-11 16:51:09 +010048 if (nvbo->tile)
49 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
50
Ben Skeggs6ee73862009-12-11 19:24:15 +100051 spin_lock(&dev_priv->ttm.bo_list_lock);
52 list_del(&nvbo->head);
53 spin_unlock(&dev_priv->ttm.bo_list_lock);
54 kfree(nvbo);
55}
56
Francisco Jereza0af9ad2009-12-11 16:51:09 +010057static void
58nouveau_bo_fixup_align(struct drm_device *dev,
59 uint32_t tile_mode, uint32_t tile_flags,
60 int *align, int *size)
61{
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63
64 /*
65 * Some of the tile_flags have a periodic structure of N*4096 bytes,
66 * align to to that as well as the page size. Overallocate memory to
67 * avoid corruption of other buffer objects.
68 */
69 if (dev_priv->card_type == NV_50) {
70 switch (tile_flags) {
71 case 0x1800:
72 case 0x2800:
73 case 0x4800:
74 case 0x7a00:
75 if (dev_priv->chipset >= 0xA0) {
Maarten Maathuis1c7059e2009-12-25 18:51:17 +010076 *size = roundup(*size, 28672);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010077 /* This is based on high end cards with 448 bits
78 * memory bus, could be different elsewhere.*/
79 *size += 6 * 28672;
80 /* 8 * 28672 is the actual alignment requirement
81 * but we must also align to page size. */
82 *align = 2 * 8 * 28672;
83 } else if (dev_priv->chipset >= 0x90) {
Maarten Maathuis1c7059e2009-12-25 18:51:17 +010084 *size = roundup(*size, 16384);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010085 *size += 3 * 16384;
86 *align = 12 * 16384;
87 } else {
Maarten Maathuis1c7059e2009-12-25 18:51:17 +010088 *size = roundup(*size, 8192);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010089 *size += 3 * 8192;
90 /* 12 * 8192 is the actual alignment requirement
91 * but we must also align to page size. */
92 *align = 2 * 12 * 8192;
93 }
94 break;
95 default:
96 break;
97 }
98
99 } else {
100 if (tile_mode) {
101 if (dev_priv->chipset >= 0x40) {
102 *align = 65536;
103 *size = roundup(*size, 64 * tile_mode);
104
105 } else if (dev_priv->chipset >= 0x30) {
106 *align = 32768;
107 *size = roundup(*size, 64 * tile_mode);
108
109 } else if (dev_priv->chipset >= 0x20) {
110 *align = 16384;
111 *size = roundup(*size, 64 * tile_mode);
112
113 } else if (dev_priv->chipset >= 0x10) {
114 *align = 16384;
115 *size = roundup(*size, 32 * tile_mode);
116 }
117 }
118 }
119
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100120 /* ALIGN works only on powers of two. */
121 *size = roundup(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100122
123 if (dev_priv->card_type == NV_50) {
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100124 *size = roundup(*size, 65536);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100125 *align = max(65536, *align);
126 }
127}
128
Ben Skeggs6ee73862009-12-11 19:24:15 +1000129int
130nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
131 int size, int align, uint32_t flags, uint32_t tile_mode,
132 uint32_t tile_flags, bool no_vm, bool mappable,
133 struct nouveau_bo **pnvbo)
134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_bo *nvbo;
Francisco Jerez8dea4a12009-12-16 19:03:28 +0100137 int ret = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000138
139 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
140 if (!nvbo)
141 return -ENOMEM;
142 INIT_LIST_HEAD(&nvbo->head);
143 INIT_LIST_HEAD(&nvbo->entry);
144 nvbo->mappable = mappable;
145 nvbo->no_vm = no_vm;
146 nvbo->tile_mode = tile_mode;
147 nvbo->tile_flags = tile_flags;
148
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100149 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000150 align >>= PAGE_SHIFT;
151
Ben Skeggs6ee73862009-12-11 19:24:15 +1000152 nvbo->placement.fpfn = 0;
153 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
Francisco Jerez8dea4a12009-12-16 19:03:28 +0100154 nouveau_bo_placement_set(nvbo, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000155
156 nvbo->channel = chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000157 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
158 ttm_bo_type_device, &nvbo->placement, align, 0,
159 false, NULL, size, nouveau_bo_del_ttm);
160 nvbo->channel = NULL;
161 if (ret) {
162 /* ttm will call nouveau_bo_del_ttm if it fails.. */
163 return ret;
164 }
165
166 spin_lock(&dev_priv->ttm.bo_list_lock);
167 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
168 spin_unlock(&dev_priv->ttm.bo_list_lock);
169 *pnvbo = nvbo;
170 return 0;
171}
172
173void
174nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
175{
176 int n = 0;
177
178 if (memtype & TTM_PL_FLAG_VRAM)
179 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
180 if (memtype & TTM_PL_FLAG_TT)
181 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
182 if (memtype & TTM_PL_FLAG_SYSTEM)
183 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
184 nvbo->placement.placement = nvbo->placements;
185 nvbo->placement.busy_placement = nvbo->placements;
186 nvbo->placement.num_placement = n;
187 nvbo->placement.num_busy_placement = n;
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000188
189 if (nvbo->pin_refcnt) {
190 while (n--)
191 nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
192 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000193}
194
195int
196nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
197{
198 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
199 struct ttm_buffer_object *bo = &nvbo->bo;
200 int ret, i;
201
202 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
203 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
204 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
205 1 << bo->mem.mem_type, memtype);
206 return -EINVAL;
207 }
208
209 if (nvbo->pin_refcnt++)
210 return 0;
211
212 ret = ttm_bo_reserve(bo, false, false, false, 0);
213 if (ret)
214 goto out;
215
216 nouveau_bo_placement_set(nvbo, memtype);
217 for (i = 0; i < nvbo->placement.num_placement; i++)
218 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
219
220 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
221 if (ret == 0) {
222 switch (bo->mem.mem_type) {
223 case TTM_PL_VRAM:
224 dev_priv->fb_aper_free -= bo->mem.size;
225 break;
226 case TTM_PL_TT:
227 dev_priv->gart_info.aper_free -= bo->mem.size;
228 break;
229 default:
230 break;
231 }
232 }
233 ttm_bo_unreserve(bo);
234out:
235 if (unlikely(ret))
236 nvbo->pin_refcnt--;
237 return ret;
238}
239
240int
241nouveau_bo_unpin(struct nouveau_bo *nvbo)
242{
243 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
244 struct ttm_buffer_object *bo = &nvbo->bo;
245 int ret, i;
246
247 if (--nvbo->pin_refcnt)
248 return 0;
249
250 ret = ttm_bo_reserve(bo, false, false, false, 0);
251 if (ret)
252 return ret;
253
254 for (i = 0; i < nvbo->placement.num_placement; i++)
255 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
256
257 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
258 if (ret == 0) {
259 switch (bo->mem.mem_type) {
260 case TTM_PL_VRAM:
261 dev_priv->fb_aper_free += bo->mem.size;
262 break;
263 case TTM_PL_TT:
264 dev_priv->gart_info.aper_free += bo->mem.size;
265 break;
266 default:
267 break;
268 }
269 }
270
271 ttm_bo_unreserve(bo);
272 return ret;
273}
274
275int
276nouveau_bo_map(struct nouveau_bo *nvbo)
277{
278 int ret;
279
280 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
281 if (ret)
282 return ret;
283
284 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
285 ttm_bo_unreserve(&nvbo->bo);
286 return ret;
287}
288
289void
290nouveau_bo_unmap(struct nouveau_bo *nvbo)
291{
292 ttm_bo_kunmap(&nvbo->kmap);
293}
294
295u16
296nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
297{
298 bool is_iomem;
299 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
300 mem = &mem[index];
301 if (is_iomem)
302 return ioread16_native((void __force __iomem *)mem);
303 else
304 return *mem;
305}
306
307void
308nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
309{
310 bool is_iomem;
311 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
312 mem = &mem[index];
313 if (is_iomem)
314 iowrite16_native(val, (void __force __iomem *)mem);
315 else
316 *mem = val;
317}
318
319u32
320nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
321{
322 bool is_iomem;
323 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
324 mem = &mem[index];
325 if (is_iomem)
326 return ioread32_native((void __force __iomem *)mem);
327 else
328 return *mem;
329}
330
331void
332nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
333{
334 bool is_iomem;
335 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
336 mem = &mem[index];
337 if (is_iomem)
338 iowrite32_native(val, (void __force __iomem *)mem);
339 else
340 *mem = val;
341}
342
343static struct ttm_backend *
344nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
345{
346 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
347 struct drm_device *dev = dev_priv->dev;
348
349 switch (dev_priv->gart_info.type) {
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000350#if __OS_HAS_AGP
Ben Skeggs6ee73862009-12-11 19:24:15 +1000351 case NOUVEAU_GART_AGP:
352 return ttm_agp_backend_init(bdev, dev->agp->bridge);
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000353#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000354 case NOUVEAU_GART_SGDMA:
355 return nouveau_sgdma_init_ttm(dev);
356 default:
357 NV_ERROR(dev, "Unknown GART type %d\n",
358 dev_priv->gart_info.type);
359 break;
360 }
361
362 return NULL;
363}
364
365static int
366nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
367{
368 /* We'll do this from user space. */
369 return 0;
370}
371
372static int
373nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
374 struct ttm_mem_type_manager *man)
375{
376 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
377 struct drm_device *dev = dev_priv->dev;
378
379 switch (type) {
380 case TTM_PL_SYSTEM:
381 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
382 man->available_caching = TTM_PL_MASK_CACHING;
383 man->default_caching = TTM_PL_FLAG_CACHED;
384 break;
385 case TTM_PL_VRAM:
386 man->flags = TTM_MEMTYPE_FLAG_FIXED |
387 TTM_MEMTYPE_FLAG_MAPPABLE |
388 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
389 man->available_caching = TTM_PL_FLAG_UNCACHED |
390 TTM_PL_FLAG_WC;
391 man->default_caching = TTM_PL_FLAG_WC;
392
393 man->io_addr = NULL;
394 man->io_offset = drm_get_resource_start(dev, 1);
395 man->io_size = drm_get_resource_len(dev, 1);
396 if (man->io_size > nouveau_mem_fb_amount(dev))
397 man->io_size = nouveau_mem_fb_amount(dev);
398
399 man->gpu_offset = dev_priv->vm_vram_base;
400 break;
401 case TTM_PL_TT:
402 switch (dev_priv->gart_info.type) {
403 case NOUVEAU_GART_AGP:
404 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
405 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
406 man->available_caching = TTM_PL_FLAG_UNCACHED;
407 man->default_caching = TTM_PL_FLAG_UNCACHED;
408 break;
409 case NOUVEAU_GART_SGDMA:
410 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
411 TTM_MEMTYPE_FLAG_CMA;
412 man->available_caching = TTM_PL_MASK_CACHING;
413 man->default_caching = TTM_PL_FLAG_CACHED;
414 break;
415 default:
416 NV_ERROR(dev, "Unknown GART type: %d\n",
417 dev_priv->gart_info.type);
418 return -EINVAL;
419 }
420
421 man->io_offset = dev_priv->gart_info.aper_base;
422 man->io_size = dev_priv->gart_info.aper_size;
423 man->io_addr = NULL;
424 man->gpu_offset = dev_priv->vm_gart_base;
425 break;
426 default:
427 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
428 return -EINVAL;
429 }
430 return 0;
431}
432
433static void
434nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
435{
436 struct nouveau_bo *nvbo = nouveau_bo(bo);
437
438 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100439 case TTM_PL_VRAM:
440 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
441 TTM_PL_FLAG_SYSTEM);
442 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000443 default:
444 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
445 break;
446 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100447
448 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000449}
450
451
452/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
453 * TTM_PL_{VRAM,TT} directly.
454 */
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100455
Ben Skeggs6ee73862009-12-11 19:24:15 +1000456static int
457nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
458 struct nouveau_bo *nvbo, bool evict, bool no_wait,
459 struct ttm_mem_reg *new_mem)
460{
461 struct nouveau_fence *fence = NULL;
462 int ret;
463
464 ret = nouveau_fence_new(chan, &fence, true);
465 if (ret)
466 return ret;
467
468 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
469 evict, no_wait, new_mem);
470 nouveau_fence_unref((void *)&fence);
471 return ret;
472}
473
474static inline uint32_t
475nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
476 struct ttm_mem_reg *mem)
477{
478 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
479 if (mem->mem_type == TTM_PL_TT)
480 return NvDmaGART;
481 return NvDmaVRAM;
482 }
483
484 if (mem->mem_type == TTM_PL_TT)
485 return chan->gart_handle;
486 return chan->vram_handle;
487}
488
489static int
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100490nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
491 int no_wait, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000492{
493 struct nouveau_bo *nvbo = nouveau_bo(bo);
494 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100495 struct ttm_mem_reg *old_mem = &bo->mem;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000496 struct nouveau_channel *chan;
497 uint64_t src_offset, dst_offset;
498 uint32_t page_count;
499 int ret;
500
501 chan = nvbo->channel;
Ben Skeggs0735f622009-12-16 14:28:55 +1000502 if (!chan || nvbo->tile_flags || nvbo->no_vm)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000503 chan = dev_priv->channel;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000504
505 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
506 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
507 if (chan != dev_priv->channel) {
508 if (old_mem->mem_type == TTM_PL_TT)
509 src_offset += dev_priv->vm_gart_base;
510 else
511 src_offset += dev_priv->vm_vram_base;
512
513 if (new_mem->mem_type == TTM_PL_TT)
514 dst_offset += dev_priv->vm_gart_base;
515 else
516 dst_offset += dev_priv->vm_vram_base;
517 }
518
519 ret = RING_SPACE(chan, 3);
520 if (ret)
521 return ret;
522 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
523 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
524 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
525
526 if (dev_priv->card_type >= NV_50) {
527 ret = RING_SPACE(chan, 4);
528 if (ret)
529 return ret;
530 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
531 OUT_RING(chan, 1);
532 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
533 OUT_RING(chan, 1);
534 }
535
536 page_count = new_mem->num_pages;
537 while (page_count) {
538 int line_count = (page_count > 2047) ? 2047 : page_count;
539
540 if (dev_priv->card_type >= NV_50) {
541 ret = RING_SPACE(chan, 3);
542 if (ret)
543 return ret;
544 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
545 OUT_RING(chan, upper_32_bits(src_offset));
546 OUT_RING(chan, upper_32_bits(dst_offset));
547 }
548 ret = RING_SPACE(chan, 11);
549 if (ret)
550 return ret;
551 BEGIN_RING(chan, NvSubM2MF,
552 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
553 OUT_RING(chan, lower_32_bits(src_offset));
554 OUT_RING(chan, lower_32_bits(dst_offset));
555 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
556 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
557 OUT_RING(chan, PAGE_SIZE); /* line_length */
558 OUT_RING(chan, line_count);
559 OUT_RING(chan, (1<<8)|(1<<0));
560 OUT_RING(chan, 0);
561 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
562 OUT_RING(chan, 0);
563
564 page_count -= line_count;
565 src_offset += (PAGE_SIZE * line_count);
566 dst_offset += (PAGE_SIZE * line_count);
567 }
568
569 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
570}
571
572static int
573nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
574 bool no_wait, struct ttm_mem_reg *new_mem)
575{
576 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
577 struct ttm_placement placement;
578 struct ttm_mem_reg tmp_mem;
579 int ret;
580
581 placement.fpfn = placement.lpfn = 0;
582 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +0100583 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000584
585 tmp_mem = *new_mem;
586 tmp_mem.mm_node = NULL;
587 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
588 if (ret)
589 return ret;
590
591 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
592 if (ret)
593 goto out;
594
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100595 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000596 if (ret)
597 goto out;
598
599 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
600out:
601 if (tmp_mem.mm_node) {
602 spin_lock(&bo->bdev->glob->lru_lock);
603 drm_mm_put_block(tmp_mem.mm_node);
604 spin_unlock(&bo->bdev->glob->lru_lock);
605 }
606
607 return ret;
608}
609
610static int
611nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
612 bool no_wait, struct ttm_mem_reg *new_mem)
613{
614 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
615 struct ttm_placement placement;
616 struct ttm_mem_reg tmp_mem;
617 int ret;
618
619 placement.fpfn = placement.lpfn = 0;
620 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +0100621 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000622
623 tmp_mem = *new_mem;
624 tmp_mem.mm_node = NULL;
625 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
626 if (ret)
627 return ret;
628
629 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
630 if (ret)
631 goto out;
632
Francisco Jerez27f691a2009-12-16 19:05:38 +0100633 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000634 if (ret)
635 goto out;
636
637out:
638 if (tmp_mem.mm_node) {
639 spin_lock(&bo->bdev->glob->lru_lock);
640 drm_mm_put_block(tmp_mem.mm_node);
641 spin_unlock(&bo->bdev->glob->lru_lock);
642 }
643
644 return ret;
645}
646
647static int
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100648nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
649 struct nouveau_tile_reg **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000650{
651 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000652 struct drm_device *dev = dev_priv->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100653 struct nouveau_bo *nvbo = nouveau_bo(bo);
654 uint64_t offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000655 int ret;
656
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100657 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
658 /* Nothing to do. */
659 *new_tile = NULL;
660 return 0;
661 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000662
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100663 offset = new_mem->mm_node->start << PAGE_SHIFT;
664
665 if (dev_priv->card_type == NV_50) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000666 ret = nv50_mem_vm_bind_linear(dev,
667 offset + dev_priv->vm_vram_base,
668 new_mem->size, nvbo->tile_flags,
669 offset);
670 if (ret)
671 return ret;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100672
673 } else if (dev_priv->card_type >= NV_10) {
674 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
675 nvbo->tile_mode);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000676 }
677
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100678 return 0;
679}
Ben Skeggs6ee73862009-12-11 19:24:15 +1000680
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100681static void
682nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
683 struct nouveau_tile_reg *new_tile,
684 struct nouveau_tile_reg **old_tile)
685{
686 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
687 struct drm_device *dev = dev_priv->dev;
688
689 if (dev_priv->card_type >= NV_10 &&
690 dev_priv->card_type < NV_50) {
691 if (*old_tile)
692 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
693
694 *old_tile = new_tile;
695 }
696}
697
698static int
699nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
700 bool no_wait, struct ttm_mem_reg *new_mem)
701{
702 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
703 struct nouveau_bo *nvbo = nouveau_bo(bo);
704 struct ttm_mem_reg *old_mem = &bo->mem;
705 struct nouveau_tile_reg *new_tile = NULL;
706 int ret = 0;
707
708 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
709 if (ret)
710 return ret;
711
712 /* Software copy if the card isn't up and running yet. */
713 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
714 !dev_priv->channel) {
715 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
716 goto out;
717 }
718
719 /* Fake bo copy. */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000720 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
721 BUG_ON(bo->mem.mm_node != NULL);
722 bo->mem = *new_mem;
723 new_mem->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100724 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000725 }
726
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100727 /* Hardware assisted copy. */
728 if (new_mem->mem_type == TTM_PL_SYSTEM)
729 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
730 else if (old_mem->mem_type == TTM_PL_SYSTEM)
731 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
732 else
733 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000734
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100735 if (!ret)
736 goto out;
737
738 /* Fallback to software copy. */
739 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
740
741out:
742 if (ret)
743 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
744 else
745 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
746
747 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000748}
749
750static int
751nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
752{
753 return 0;
754}
755
756struct ttm_bo_driver nouveau_bo_driver = {
757 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
758 .invalidate_caches = nouveau_bo_invalidate_caches,
759 .init_mem_type = nouveau_bo_init_mem_type,
760 .evict_flags = nouveau_bo_evict_flags,
761 .move = nouveau_bo_move,
762 .verify_access = nouveau_bo_verify_access,
763 .sync_obj_signaled = nouveau_fence_signalled,
764 .sync_obj_wait = nouveau_fence_wait,
765 .sync_obj_flush = nouveau_fence_flush,
766 .sync_obj_unref = nouveau_fence_unref,
767 .sync_obj_ref = nouveau_fence_ref,
768};
769