blob: 2148e2d73de34028a7eb3eecf8d4f94a2a2eb9bd [file] [log] [blame]
Ben Skeggs6ee73862009-12-11 19:24:15 +10001/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35
Maarten Maathuisa5106042009-12-26 21:46:36 +010036#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Maarten Maathuisa5106042009-12-26 21:46:36 +010038
Ben Skeggs415e6182010-07-23 09:06:52 +100039int
40nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
41{
42 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
43 int ret;
44
45 if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
46 return 0;
47
48 spin_lock(&nvbo->bo.lock);
49 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
50 spin_unlock(&nvbo->bo.lock);
51 return ret;
52}
53
Ben Skeggs6ee73862009-12-11 19:24:15 +100054static void
55nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
56{
57 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Francisco Jereza0af9ad2009-12-11 16:51:09 +010058 struct drm_device *dev = dev_priv->dev;
Ben Skeggs6ee73862009-12-11 19:24:15 +100059 struct nouveau_bo *nvbo = nouveau_bo(bo);
60
Ben Skeggs6ee73862009-12-11 19:24:15 +100061 if (unlikely(nvbo->gem))
62 DRM_ERROR("bo %p still attached to GEM object\n", bo);
63
Francisco Jereza0af9ad2009-12-11 16:51:09 +010064 if (nvbo->tile)
65 nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
66
Ben Skeggs6ee73862009-12-11 19:24:15 +100067 kfree(nvbo);
68}
69
Francisco Jereza0af9ad2009-12-11 16:51:09 +010070static void
71nouveau_bo_fixup_align(struct drm_device *dev,
72 uint32_t tile_mode, uint32_t tile_flags,
73 int *align, int *size)
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76
77 /*
78 * Some of the tile_flags have a periodic structure of N*4096 bytes,
Maarten Maathuiseb1dba02009-12-27 12:22:07 +010079 * align to to that as well as the page size. Align the size to the
80 * appropriate boundaries. This does imply that sizes are rounded up
81 * 3-7 pages, so be aware of this and do not waste memory by allocating
82 * many small buffers.
Francisco Jereza0af9ad2009-12-11 16:51:09 +010083 */
84 if (dev_priv->card_type == NV_50) {
Ben Skeggsa76fb4e2010-03-18 09:45:20 +100085 uint32_t block_size = dev_priv->vram_size >> 15;
Maarten Maathuisa5106042009-12-26 21:46:36 +010086 int i;
87
Francisco Jereza0af9ad2009-12-11 16:51:09 +010088 switch (tile_flags) {
89 case 0x1800:
90 case 0x2800:
91 case 0x4800:
92 case 0x7a00:
Maarten Maathuisa5106042009-12-26 21:46:36 +010093 if (is_power_of_2(block_size)) {
Maarten Maathuisa5106042009-12-26 21:46:36 +010094 for (i = 1; i < 10; i++) {
95 *align = 12 * i * block_size;
96 if (!(*align % 65536))
97 break;
98 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +010099 } else {
Maarten Maathuisa5106042009-12-26 21:46:36 +0100100 for (i = 1; i < 10; i++) {
101 *align = 8 * i * block_size;
102 if (!(*align % 65536))
103 break;
104 }
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100105 }
Maarten Maathuiseb1dba02009-12-27 12:22:07 +0100106 *size = roundup(*size, *align);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100107 break;
108 default:
109 break;
110 }
111
112 } else {
113 if (tile_mode) {
114 if (dev_priv->chipset >= 0x40) {
115 *align = 65536;
116 *size = roundup(*size, 64 * tile_mode);
117
118 } else if (dev_priv->chipset >= 0x30) {
119 *align = 32768;
120 *size = roundup(*size, 64 * tile_mode);
121
122 } else if (dev_priv->chipset >= 0x20) {
123 *align = 16384;
124 *size = roundup(*size, 64 * tile_mode);
125
126 } else if (dev_priv->chipset >= 0x10) {
127 *align = 16384;
128 *size = roundup(*size, 32 * tile_mode);
129 }
130 }
131 }
132
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100133 /* ALIGN works only on powers of two. */
134 *size = roundup(*size, PAGE_SIZE);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100135
136 if (dev_priv->card_type == NV_50) {
Maarten Maathuis1c7059e2009-12-25 18:51:17 +0100137 *size = roundup(*size, 65536);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100138 *align = max(65536, *align);
139 }
140}
141
Ben Skeggs6ee73862009-12-11 19:24:15 +1000142int
143nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
144 int size, int align, uint32_t flags, uint32_t tile_mode,
145 uint32_t tile_flags, bool no_vm, bool mappable,
146 struct nouveau_bo **pnvbo)
147{
148 struct drm_nouveau_private *dev_priv = dev->dev_private;
149 struct nouveau_bo *nvbo;
Francisco Jerez8dea4a12009-12-16 19:03:28 +0100150 int ret = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000151
152 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
153 if (!nvbo)
154 return -ENOMEM;
155 INIT_LIST_HEAD(&nvbo->head);
156 INIT_LIST_HEAD(&nvbo->entry);
157 nvbo->mappable = mappable;
158 nvbo->no_vm = no_vm;
159 nvbo->tile_mode = tile_mode;
160 nvbo->tile_flags = tile_flags;
161
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100162 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000163 align >>= PAGE_SHIFT;
164
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100165 nouveau_bo_placement_set(nvbo, flags, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000166
167 nvbo->channel = chan;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000168 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
169 ttm_bo_type_device, &nvbo->placement, align, 0,
170 false, NULL, size, nouveau_bo_del_ttm);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000171 if (ret) {
172 /* ttm will call nouveau_bo_del_ttm if it fails.. */
173 return ret;
174 }
Ben Skeggs90af89b2010-04-15 14:42:34 +1000175 nvbo->channel = NULL;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000176
Ben Skeggs6ee73862009-12-11 19:24:15 +1000177 *pnvbo = nvbo;
178 return 0;
179}
180
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100181static void
182set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000183{
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100184 *n = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000185
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100186 if (type & TTM_PL_FLAG_VRAM)
187 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
188 if (type & TTM_PL_FLAG_TT)
189 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
190 if (type & TTM_PL_FLAG_SYSTEM)
191 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
192}
Ben Skeggs37cb3e082009-12-16 16:22:42 +1000193
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100194void
195nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
196{
197 struct ttm_placement *pl = &nvbo->placement;
198 uint32_t flags = TTM_PL_MASK_CACHING |
199 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
200
201 pl->placement = nvbo->placements;
202 set_placement_list(nvbo->placements, &pl->num_placement,
203 type, flags);
204
205 pl->busy_placement = nvbo->busy_placements;
206 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
207 type | busy, flags);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000208}
209
210int
211nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
212{
213 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
214 struct ttm_buffer_object *bo = &nvbo->bo;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100215 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000216
217 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
218 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
219 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
220 1 << bo->mem.mem_type, memtype);
221 return -EINVAL;
222 }
223
224 if (nvbo->pin_refcnt++)
225 return 0;
226
227 ret = ttm_bo_reserve(bo, false, false, false, 0);
228 if (ret)
229 goto out;
230
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100231 nouveau_bo_placement_set(nvbo, memtype, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000232
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000233 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000234 if (ret == 0) {
235 switch (bo->mem.mem_type) {
236 case TTM_PL_VRAM:
237 dev_priv->fb_aper_free -= bo->mem.size;
238 break;
239 case TTM_PL_TT:
240 dev_priv->gart_info.aper_free -= bo->mem.size;
241 break;
242 default:
243 break;
244 }
245 }
246 ttm_bo_unreserve(bo);
247out:
248 if (unlikely(ret))
249 nvbo->pin_refcnt--;
250 return ret;
251}
252
253int
254nouveau_bo_unpin(struct nouveau_bo *nvbo)
255{
256 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
257 struct ttm_buffer_object *bo = &nvbo->bo;
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100258 int ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000259
260 if (--nvbo->pin_refcnt)
261 return 0;
262
263 ret = ttm_bo_reserve(bo, false, false, false, 0);
264 if (ret)
265 return ret;
266
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100267 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000268
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000269 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000270 if (ret == 0) {
271 switch (bo->mem.mem_type) {
272 case TTM_PL_VRAM:
273 dev_priv->fb_aper_free += bo->mem.size;
274 break;
275 case TTM_PL_TT:
276 dev_priv->gart_info.aper_free += bo->mem.size;
277 break;
278 default:
279 break;
280 }
281 }
282
283 ttm_bo_unreserve(bo);
284 return ret;
285}
286
287int
288nouveau_bo_map(struct nouveau_bo *nvbo)
289{
290 int ret;
291
292 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
293 if (ret)
294 return ret;
295
296 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
297 ttm_bo_unreserve(&nvbo->bo);
298 return ret;
299}
300
301void
302nouveau_bo_unmap(struct nouveau_bo *nvbo)
303{
Ben Skeggs9d59e8a2010-08-27 13:04:41 +1000304 if (nvbo)
305 ttm_bo_kunmap(&nvbo->kmap);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000306}
307
308u16
309nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
310{
311 bool is_iomem;
312 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
313 mem = &mem[index];
314 if (is_iomem)
315 return ioread16_native((void __force __iomem *)mem);
316 else
317 return *mem;
318}
319
320void
321nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
322{
323 bool is_iomem;
324 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
325 mem = &mem[index];
326 if (is_iomem)
327 iowrite16_native(val, (void __force __iomem *)mem);
328 else
329 *mem = val;
330}
331
332u32
333nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
334{
335 bool is_iomem;
336 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
337 mem = &mem[index];
338 if (is_iomem)
339 return ioread32_native((void __force __iomem *)mem);
340 else
341 return *mem;
342}
343
344void
345nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
346{
347 bool is_iomem;
348 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
349 mem = &mem[index];
350 if (is_iomem)
351 iowrite32_native(val, (void __force __iomem *)mem);
352 else
353 *mem = val;
354}
355
356static struct ttm_backend *
357nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
358{
359 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
360 struct drm_device *dev = dev_priv->dev;
361
362 switch (dev_priv->gart_info.type) {
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000363#if __OS_HAS_AGP
Ben Skeggs6ee73862009-12-11 19:24:15 +1000364 case NOUVEAU_GART_AGP:
365 return ttm_agp_backend_init(bdev, dev->agp->bridge);
Ben Skeggsb694dfb2009-12-15 10:38:32 +1000366#endif
Ben Skeggs6ee73862009-12-11 19:24:15 +1000367 case NOUVEAU_GART_SGDMA:
368 return nouveau_sgdma_init_ttm(dev);
369 default:
370 NV_ERROR(dev, "Unknown GART type %d\n",
371 dev_priv->gart_info.type);
372 break;
373 }
374
375 return NULL;
376}
377
378static int
379nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
380{
381 /* We'll do this from user space. */
382 return 0;
383}
384
385static int
386nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
387 struct ttm_mem_type_manager *man)
388{
389 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
390 struct drm_device *dev = dev_priv->dev;
391
392 switch (type) {
393 case TTM_PL_SYSTEM:
394 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
395 man->available_caching = TTM_PL_MASK_CACHING;
396 man->default_caching = TTM_PL_FLAG_CACHED;
397 break;
398 case TTM_PL_VRAM:
399 man->flags = TTM_MEMTYPE_FLAG_FIXED |
Jerome Glissef32f02f2010-04-09 14:39:25 +0200400 TTM_MEMTYPE_FLAG_MAPPABLE;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000401 man->available_caching = TTM_PL_FLAG_UNCACHED |
402 TTM_PL_FLAG_WC;
403 man->default_caching = TTM_PL_FLAG_WC;
Ben Skeggsfbd2895e2010-09-01 15:24:34 +1000404 if (dev_priv->card_type == NV_50)
405 man->gpu_offset = 0x40000000;
406 else
407 man->gpu_offset = 0;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000408 break;
409 case TTM_PL_TT:
410 switch (dev_priv->gart_info.type) {
411 case NOUVEAU_GART_AGP:
Jerome Glissef32f02f2010-04-09 14:39:25 +0200412 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000413 man->available_caching = TTM_PL_FLAG_UNCACHED;
414 man->default_caching = TTM_PL_FLAG_UNCACHED;
415 break;
416 case NOUVEAU_GART_SGDMA:
417 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
418 TTM_MEMTYPE_FLAG_CMA;
419 man->available_caching = TTM_PL_MASK_CACHING;
420 man->default_caching = TTM_PL_FLAG_CACHED;
421 break;
422 default:
423 NV_ERROR(dev, "Unknown GART type: %d\n",
424 dev_priv->gart_info.type);
425 return -EINVAL;
426 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000427 man->gpu_offset = dev_priv->vm_gart_base;
428 break;
429 default:
430 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
431 return -EINVAL;
432 }
433 return 0;
434}
435
436static void
437nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
438{
439 struct nouveau_bo *nvbo = nouveau_bo(bo);
440
441 switch (bo->mem.mem_type) {
Francisco Jerez22fbd532009-12-11 18:40:17 +0100442 case TTM_PL_VRAM:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100443 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
444 TTM_PL_FLAG_SYSTEM);
Francisco Jerez22fbd532009-12-11 18:40:17 +0100445 break;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000446 default:
Francisco Jerez78ad0f72010-03-18 13:07:47 +0100447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000448 break;
449 }
Francisco Jerez22fbd532009-12-11 18:40:17 +0100450
451 *pl = nvbo->placement;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000452}
453
454
455/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
456 * TTM_PL_{VRAM,TT} directly.
457 */
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100458
Ben Skeggs6ee73862009-12-11 19:24:15 +1000459static int
460nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000461 struct nouveau_bo *nvbo, bool evict,
462 bool no_wait_reserve, bool no_wait_gpu,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000463 struct ttm_mem_reg *new_mem)
464{
465 struct nouveau_fence *fence = NULL;
466 int ret;
467
468 ret = nouveau_fence_new(chan, &fence, true);
469 if (ret)
470 return ret;
471
472 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
Francisco Jerez311ab692010-07-04 12:54:23 +0200473 evict || (nvbo->channel &&
474 nvbo->channel != chan),
475 no_wait_reserve, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000476 nouveau_fence_unref((void *)&fence);
477 return ret;
478}
479
480static inline uint32_t
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000481nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
482 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000483{
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000484 struct nouveau_bo *nvbo = nouveau_bo(bo);
485
486 if (nvbo->no_vm) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000487 if (mem->mem_type == TTM_PL_TT)
488 return NvDmaGART;
489 return NvDmaVRAM;
490 }
491
492 if (mem->mem_type == TTM_PL_TT)
493 return chan->gart_handle;
494 return chan->vram_handle;
495}
496
497static int
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000498nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
499 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000500{
Ben Skeggs6ee73862009-12-11 19:24:15 +1000501 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000502 struct nouveau_bo *nvbo = nouveau_bo(bo);
503 u64 length = (new_mem->num_pages << PAGE_SHIFT);
504 u64 src_offset, dst_offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000505 int ret;
506
Ben Skeggs6ee73862009-12-11 19:24:15 +1000507 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
508 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000509 if (!nvbo->no_vm) {
510 if (old_mem->mem_type == TTM_PL_VRAM)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000511 src_offset += dev_priv->vm_vram_base;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000512 else
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000513 src_offset += dev_priv->vm_gart_base;
514
515 if (new_mem->mem_type == TTM_PL_VRAM)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000516 dst_offset += dev_priv->vm_vram_base;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000517 else
518 dst_offset += dev_priv->vm_gart_base;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000519 }
520
521 ret = RING_SPACE(chan, 3);
522 if (ret)
523 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000524
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000525 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
526 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
527 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
528
529 while (length) {
530 u32 amount, stride, height;
531
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000532 amount = min(length, (u64)(4 * 1024 * 1024));
533 stride = 16 * 4;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000534 height = amount / stride;
535
536 if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
537 ret = RING_SPACE(chan, 8);
538 if (ret)
539 return ret;
540
541 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
542 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000543 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000544 OUT_RING (chan, stride);
545 OUT_RING (chan, height);
546 OUT_RING (chan, 1);
547 OUT_RING (chan, 0);
548 OUT_RING (chan, 0);
549 } else {
550 ret = RING_SPACE(chan, 2);
551 if (ret)
552 return ret;
553
554 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
555 OUT_RING (chan, 1);
556 }
557 if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
558 ret = RING_SPACE(chan, 8);
559 if (ret)
560 return ret;
561
562 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
563 OUT_RING (chan, 0);
Ben Skeggs5220b3c2010-09-23 15:21:17 +1000564 OUT_RING (chan, 0);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000565 OUT_RING (chan, stride);
566 OUT_RING (chan, height);
567 OUT_RING (chan, 1);
568 OUT_RING (chan, 0);
569 OUT_RING (chan, 0);
570 } else {
571 ret = RING_SPACE(chan, 2);
572 if (ret)
573 return ret;
574
575 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
576 OUT_RING (chan, 1);
577 }
578
579 ret = RING_SPACE(chan, 14);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000580 if (ret)
581 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000582
583 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
584 OUT_RING (chan, upper_32_bits(src_offset));
585 OUT_RING (chan, upper_32_bits(dst_offset));
586 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
587 OUT_RING (chan, lower_32_bits(src_offset));
588 OUT_RING (chan, lower_32_bits(dst_offset));
589 OUT_RING (chan, stride);
590 OUT_RING (chan, stride);
591 OUT_RING (chan, stride);
592 OUT_RING (chan, height);
593 OUT_RING (chan, 0x00000101);
594 OUT_RING (chan, 0x00000000);
595 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
596 OUT_RING (chan, 0);
597
598 length -= amount;
599 src_offset += amount;
600 dst_offset += amount;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000601 }
602
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000603 return 0;
604}
605
606static int
607nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
608 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
609{
610 u32 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
611 u32 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
612 u32 page_count = new_mem->num_pages;
613 int ret;
614
615 ret = RING_SPACE(chan, 3);
616 if (ret)
617 return ret;
618
619 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
620 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
621 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
622
Ben Skeggs6ee73862009-12-11 19:24:15 +1000623 page_count = new_mem->num_pages;
624 while (page_count) {
625 int line_count = (page_count > 2047) ? 2047 : page_count;
626
Ben Skeggs6ee73862009-12-11 19:24:15 +1000627 ret = RING_SPACE(chan, 11);
628 if (ret)
629 return ret;
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000630
Ben Skeggs6ee73862009-12-11 19:24:15 +1000631 BEGIN_RING(chan, NvSubM2MF,
632 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000633 OUT_RING (chan, src_offset);
634 OUT_RING (chan, dst_offset);
635 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
636 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
637 OUT_RING (chan, PAGE_SIZE); /* line_length */
638 OUT_RING (chan, line_count);
639 OUT_RING (chan, 0x00000101);
640 OUT_RING (chan, 0x00000000);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000641 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000642 OUT_RING (chan, 0);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000643
644 page_count -= line_count;
645 src_offset += (PAGE_SIZE * line_count);
646 dst_offset += (PAGE_SIZE * line_count);
647 }
648
Ben Skeggsf1ab0cc2010-08-26 11:32:01 +1000649 return 0;
650}
651
652static int
653nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
654 bool no_wait_reserve, bool no_wait_gpu,
655 struct ttm_mem_reg *new_mem)
656{
657 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
658 struct nouveau_bo *nvbo = nouveau_bo(bo);
659 struct nouveau_channel *chan;
660 int ret;
661
662 chan = nvbo->channel;
663 if (!chan || nvbo->no_vm)
664 chan = dev_priv->channel;
665
666 if (dev_priv->card_type < NV_50)
667 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
668 else
669 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
670 if (ret)
671 return ret;
672
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000673 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000674}
675
676static int
677nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000678 bool no_wait_reserve, bool no_wait_gpu,
679 struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000680{
681 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
682 struct ttm_placement placement;
683 struct ttm_mem_reg tmp_mem;
684 int ret;
685
686 placement.fpfn = placement.lpfn = 0;
687 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +0100688 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000689
690 tmp_mem = *new_mem;
691 tmp_mem.mm_node = NULL;
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000692 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000693 if (ret)
694 return ret;
695
696 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
697 if (ret)
698 goto out;
699
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000700 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000701 if (ret)
702 goto out;
703
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000704 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000705out:
706 if (tmp_mem.mm_node) {
707 spin_lock(&bo->bdev->glob->lru_lock);
708 drm_mm_put_block(tmp_mem.mm_node);
709 spin_unlock(&bo->bdev->glob->lru_lock);
710 }
711
712 return ret;
713}
714
715static int
716nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000717 bool no_wait_reserve, bool no_wait_gpu,
718 struct ttm_mem_reg *new_mem)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000719{
720 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
721 struct ttm_placement placement;
722 struct ttm_mem_reg tmp_mem;
723 int ret;
724
725 placement.fpfn = placement.lpfn = 0;
726 placement.num_placement = placement.num_busy_placement = 1;
Francisco Jerez77e2b5e2009-12-16 19:05:00 +0100727 placement.placement = placement.busy_placement = &placement_memtype;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000728
729 tmp_mem = *new_mem;
730 tmp_mem.mm_node = NULL;
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000731 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000732 if (ret)
733 return ret;
734
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000735 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000736 if (ret)
737 goto out;
738
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000739 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000740 if (ret)
741 goto out;
742
743out:
744 if (tmp_mem.mm_node) {
745 spin_lock(&bo->bdev->glob->lru_lock);
746 drm_mm_put_block(tmp_mem.mm_node);
747 spin_unlock(&bo->bdev->glob->lru_lock);
748 }
749
750 return ret;
751}
752
753static int
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100754nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
755 struct nouveau_tile_reg **new_tile)
Ben Skeggs6ee73862009-12-11 19:24:15 +1000756{
757 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000758 struct drm_device *dev = dev_priv->dev;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100759 struct nouveau_bo *nvbo = nouveau_bo(bo);
760 uint64_t offset;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000761 int ret;
762
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100763 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
764 /* Nothing to do. */
765 *new_tile = NULL;
766 return 0;
767 }
Ben Skeggs6ee73862009-12-11 19:24:15 +1000768
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100769 offset = new_mem->mm_node->start << PAGE_SHIFT;
770
771 if (dev_priv->card_type == NV_50) {
Ben Skeggs6ee73862009-12-11 19:24:15 +1000772 ret = nv50_mem_vm_bind_linear(dev,
773 offset + dev_priv->vm_vram_base,
774 new_mem->size, nvbo->tile_flags,
775 offset);
776 if (ret)
777 return ret;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100778
779 } else if (dev_priv->card_type >= NV_10) {
780 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
781 nvbo->tile_mode);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000782 }
783
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100784 return 0;
785}
Ben Skeggs6ee73862009-12-11 19:24:15 +1000786
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100787static void
788nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
789 struct nouveau_tile_reg *new_tile,
790 struct nouveau_tile_reg **old_tile)
791{
792 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
793 struct drm_device *dev = dev_priv->dev;
794
795 if (dev_priv->card_type >= NV_10 &&
796 dev_priv->card_type < NV_50) {
797 if (*old_tile)
798 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
799
800 *old_tile = new_tile;
801 }
802}
803
804static int
805nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000806 bool no_wait_reserve, bool no_wait_gpu,
807 struct ttm_mem_reg *new_mem)
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100808{
809 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
810 struct nouveau_bo *nvbo = nouveau_bo(bo);
811 struct ttm_mem_reg *old_mem = &bo->mem;
812 struct nouveau_tile_reg *new_tile = NULL;
813 int ret = 0;
814
815 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
816 if (ret)
817 return ret;
818
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100819 /* Fake bo copy. */
Ben Skeggs6ee73862009-12-11 19:24:15 +1000820 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
821 BUG_ON(bo->mem.mm_node != NULL);
822 bo->mem = *new_mem;
823 new_mem->mm_node = NULL;
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100824 goto out;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000825 }
826
Ben Skeggsb8a6a802010-08-27 11:55:43 +1000827 /* Software copy if the card isn't up and running yet. */
828 if (!dev_priv->channel) {
829 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
830 goto out;
831 }
832
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100833 /* Hardware assisted copy. */
834 if (new_mem->mem_type == TTM_PL_SYSTEM)
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000835 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100836 else if (old_mem->mem_type == TTM_PL_SYSTEM)
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000837 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100838 else
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000839 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
Ben Skeggs6ee73862009-12-11 19:24:15 +1000840
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100841 if (!ret)
842 goto out;
843
844 /* Fallback to software copy. */
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000845 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
Francisco Jereza0af9ad2009-12-11 16:51:09 +0100846
847out:
848 if (ret)
849 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
850 else
851 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
852
853 return ret;
Ben Skeggs6ee73862009-12-11 19:24:15 +1000854}
855
856static int
857nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
858{
859 return 0;
860}
861
Jerome Glissef32f02f2010-04-09 14:39:25 +0200862static int
863nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
864{
865 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
866 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
867 struct drm_device *dev = dev_priv->dev;
868
869 mem->bus.addr = NULL;
870 mem->bus.offset = 0;
871 mem->bus.size = mem->num_pages << PAGE_SHIFT;
872 mem->bus.base = 0;
873 mem->bus.is_iomem = false;
874 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
875 return -EINVAL;
876 switch (mem->mem_type) {
877 case TTM_PL_SYSTEM:
878 /* System memory */
879 return 0;
880 case TTM_PL_TT:
881#if __OS_HAS_AGP
882 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
883 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
884 mem->bus.base = dev_priv->gart_info.aper_base;
885 mem->bus.is_iomem = true;
886 }
887#endif
888 break;
889 case TTM_PL_VRAM:
890 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
Jordan Crouse01d73a62010-05-27 13:40:24 -0600891 mem->bus.base = pci_resource_start(dev->pdev, 1);
Jerome Glissef32f02f2010-04-09 14:39:25 +0200892 mem->bus.is_iomem = true;
893 break;
894 default:
895 return -EINVAL;
896 }
897 return 0;
898}
899
900static void
901nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
902{
903}
904
905static int
906nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
907{
Ben Skeggse1429b42010-09-10 11:12:25 +1000908 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
909 struct nouveau_bo *nvbo = nouveau_bo(bo);
910
911 /* as long as the bo isn't in vram, and isn't tiled, we've got
912 * nothing to do here.
913 */
914 if (bo->mem.mem_type != TTM_PL_VRAM) {
Ben Skeggs9bb58632010-09-23 15:23:16 +1000915 if (dev_priv->card_type < NV_50 || !nvbo->tile_flags)
Ben Skeggse1429b42010-09-10 11:12:25 +1000916 return 0;
917 }
918
919 /* make sure bo is in mappable vram */
920 if (bo->mem.mm_node->start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
921 return 0;
922
923
924 nvbo->placement.fpfn = 0;
925 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
926 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
927 return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
Jerome Glissef32f02f2010-04-09 14:39:25 +0200928}
929
Ben Skeggs6ee73862009-12-11 19:24:15 +1000930struct ttm_bo_driver nouveau_bo_driver = {
931 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
932 .invalidate_caches = nouveau_bo_invalidate_caches,
933 .init_mem_type = nouveau_bo_init_mem_type,
934 .evict_flags = nouveau_bo_evict_flags,
935 .move = nouveau_bo_move,
936 .verify_access = nouveau_bo_verify_access,
937 .sync_obj_signaled = nouveau_fence_signalled,
938 .sync_obj_wait = nouveau_fence_wait,
939 .sync_obj_flush = nouveau_fence_flush,
940 .sync_obj_unref = nouveau_fence_unref,
941 .sync_obj_ref = nouveau_fence_ref,
Jerome Glissef32f02f2010-04-09 14:39:25 +0200942 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
943 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
944 .io_mem_free = &nouveau_ttm_io_mem_free,
Ben Skeggs6ee73862009-12-11 19:24:15 +1000945};
946