blob: db809e034cc48b6d8c246cba1ede0660113e30de [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
Jerome Glisseca262a9992009-12-08 15:33:32 +010030/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020038
39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h"
42#include <linux/jiffies.h>
43#include <linux/slab.h>
44#include <linux/sched.h>
45#include <linux/mm.h>
46#include <linux/file.h>
47#include <linux/module.h>
48
49#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...)
51#define TTM_BO_HASH_ORDER 13
52
53static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020054static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
Thomas Hellstroma987fca2009-08-18 16:51:56 +020055static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57static struct attribute ttm_bo_count = {
58 .name = "bo_count",
59 .mode = S_IRUGO
60};
61
Jerome Glissefb53f862009-12-09 21:55:10 +010062static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
Jerome Glisse5012f502009-12-10 18:07:26 +010074static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
Jerome Glissefb53f862009-12-09 21:55:10 +010075{
Jerome Glisse5012f502009-12-10 18:07:26 +010076 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
Jerome Glissefb53f862009-12-09 21:55:10 +010078 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
Jerome Glisseeb6d2c32009-12-10 16:15:52 +010082 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
Jerome Glissefb53f862009-12-09 21:55:10 +010083 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
84 man->available_caching);
85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
86 man->default_caching);
Jerome Glisse5012f502009-12-10 18:07:26 +010087 if (mem_type != TTM_PL_SYSTEM) {
88 spin_lock(&bdev->glob->lru_lock);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&bdev->glob->lru_lock);
91 }
Jerome Glissefb53f862009-12-09 21:55:10 +010092}
93
94static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
95 struct ttm_placement *placement)
96{
Jerome Glissefb53f862009-12-09 21:55:10 +010097 int i, ret, mem_type;
98
Jerome Glisseeb6d2c32009-12-10 16:15:52 +010099 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
Jerome Glissefb53f862009-12-09 21:55:10 +0100100 bo, bo->mem.num_pages, bo->mem.size >> 10,
101 bo->mem.size >> 20);
102 for (i = 0; i < placement->num_placement; i++) {
103 ret = ttm_mem_type_from_flags(placement->placement[i],
104 &mem_type);
105 if (ret)
106 return;
Jerome Glissefb53f862009-12-09 21:55:10 +0100107 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
108 i, placement->placement[i], mem_type);
Jerome Glisse5012f502009-12-10 18:07:26 +0100109 ttm_mem_type_debug(bo->bdev, mem_type);
Jerome Glissefb53f862009-12-09 21:55:10 +0100110 }
111}
112
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200113static ssize_t ttm_bo_global_show(struct kobject *kobj,
114 struct attribute *attr,
115 char *buffer)
116{
117 struct ttm_bo_global *glob =
118 container_of(kobj, struct ttm_bo_global, kobj);
119
120 return snprintf(buffer, PAGE_SIZE, "%lu\n",
121 (unsigned long) atomic_read(&glob->bo_count));
122}
123
124static struct attribute *ttm_bo_global_attrs[] = {
125 &ttm_bo_count,
126 NULL
127};
128
Emese Revfy52cf25d2010-01-19 02:58:23 +0100129static const struct sysfs_ops ttm_bo_global_ops = {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200130 .show = &ttm_bo_global_show
131};
132
133static struct kobj_type ttm_bo_glob_kobj_type = {
134 .release = &ttm_bo_global_kobj_release,
135 .sysfs_ops = &ttm_bo_global_ops,
136 .default_attrs = ttm_bo_global_attrs
137};
138
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200139
140static inline uint32_t ttm_bo_type_flags(unsigned type)
141{
142 return 1 << (type);
143}
144
145static void ttm_bo_release_list(struct kref *list_kref)
146{
147 struct ttm_buffer_object *bo =
148 container_of(list_kref, struct ttm_buffer_object, list_kref);
149 struct ttm_bo_device *bdev = bo->bdev;
150
151 BUG_ON(atomic_read(&bo->list_kref.refcount));
152 BUG_ON(atomic_read(&bo->kref.refcount));
153 BUG_ON(atomic_read(&bo->cpu_writers));
154 BUG_ON(bo->sync_obj != NULL);
155 BUG_ON(bo->mem.mm_node != NULL);
156 BUG_ON(!list_empty(&bo->lru));
157 BUG_ON(!list_empty(&bo->ddestroy));
158
159 if (bo->ttm)
160 ttm_tt_destroy(bo->ttm);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200161 atomic_dec(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200162 if (bo->destroy)
163 bo->destroy(bo);
164 else {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200165 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200166 kfree(bo);
167 }
168}
169
170int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
171{
172
173 if (interruptible) {
174 int ret = 0;
175
176 ret = wait_event_interruptible(bo->event_queue,
177 atomic_read(&bo->reserved) == 0);
178 if (unlikely(ret != 0))
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100179 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200180 } else {
181 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
182 }
183 return 0;
184}
Ben Skeggsd1ede142009-12-11 15:13:00 +1000185EXPORT_SYMBOL(ttm_bo_wait_unreserved);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200186
187static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
188{
189 struct ttm_bo_device *bdev = bo->bdev;
190 struct ttm_mem_type_manager *man;
191
192 BUG_ON(!atomic_read(&bo->reserved));
193
194 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
195
196 BUG_ON(!list_empty(&bo->lru));
197
198 man = &bdev->man[bo->mem.mem_type];
199 list_add_tail(&bo->lru, &man->lru);
200 kref_get(&bo->list_kref);
201
202 if (bo->ttm != NULL) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200203 list_add_tail(&bo->swap, &bo->glob->swap_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200204 kref_get(&bo->list_kref);
205 }
206 }
207}
208
209/**
210 * Call with the lru_lock held.
211 */
212
213static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
214{
215 int put_count = 0;
216
217 if (!list_empty(&bo->swap)) {
218 list_del_init(&bo->swap);
219 ++put_count;
220 }
221 if (!list_empty(&bo->lru)) {
222 list_del_init(&bo->lru);
223 ++put_count;
224 }
225
226 /*
227 * TODO: Add a driver hook to delete from
228 * driver-specific LRU's here.
229 */
230
231 return put_count;
232}
233
234int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
235 bool interruptible,
236 bool no_wait, bool use_sequence, uint32_t sequence)
237{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200238 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200239 int ret;
240
241 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
242 if (use_sequence && bo->seq_valid &&
243 (sequence - bo->val_seq < (1 << 31))) {
244 return -EAGAIN;
245 }
246
247 if (no_wait)
248 return -EBUSY;
249
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200250 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200251 ret = ttm_bo_wait_unreserved(bo, interruptible);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200252 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200253
254 if (unlikely(ret))
255 return ret;
256 }
257
258 if (use_sequence) {
259 bo->val_seq = sequence;
260 bo->seq_valid = true;
261 } else {
262 bo->seq_valid = false;
263 }
264
265 return 0;
266}
267EXPORT_SYMBOL(ttm_bo_reserve);
268
269static void ttm_bo_ref_bug(struct kref *list_kref)
270{
271 BUG();
272}
273
274int ttm_bo_reserve(struct ttm_buffer_object *bo,
275 bool interruptible,
276 bool no_wait, bool use_sequence, uint32_t sequence)
277{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200278 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200279 int put_count = 0;
280 int ret;
281
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200282 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200283 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
284 sequence);
285 if (likely(ret == 0))
286 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200287 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200288
289 while (put_count--)
290 kref_put(&bo->list_kref, ttm_bo_ref_bug);
291
292 return ret;
293}
294
295void ttm_bo_unreserve(struct ttm_buffer_object *bo)
296{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200297 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200298
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200299 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200300 ttm_bo_add_to_lru(bo);
301 atomic_set(&bo->reserved, 0);
302 wake_up_all(&bo->event_queue);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200303 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200304}
305EXPORT_SYMBOL(ttm_bo_unreserve);
306
307/*
308 * Call bo->mutex locked.
309 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200310static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
311{
312 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200313 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200314 int ret = 0;
315 uint32_t page_flags = 0;
316
317 TTM_ASSERT_LOCKED(&bo->mutex);
318 bo->ttm = NULL;
319
Dave Airliead49f502009-07-10 22:36:26 +1000320 if (bdev->need_dma32)
321 page_flags |= TTM_PAGE_FLAG_DMA32;
322
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200323 switch (bo->type) {
324 case ttm_bo_type_device:
325 if (zero_alloc)
326 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
327 case ttm_bo_type_kernel:
328 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200329 page_flags, glob->dummy_read_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200330 if (unlikely(bo->ttm == NULL))
331 ret = -ENOMEM;
332 break;
333 case ttm_bo_type_user:
334 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
335 page_flags | TTM_PAGE_FLAG_USER,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200336 glob->dummy_read_page);
Dave Airlie447aeb92009-12-08 09:25:45 +1000337 if (unlikely(bo->ttm == NULL)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200338 ret = -ENOMEM;
Dave Airlie447aeb92009-12-08 09:25:45 +1000339 break;
340 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200341
342 ret = ttm_tt_set_user(bo->ttm, current,
343 bo->buffer_start, bo->num_pages);
344 if (unlikely(ret != 0))
345 ttm_tt_destroy(bo->ttm);
346 break;
347 default:
348 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
349 ret = -EINVAL;
350 break;
351 }
352
353 return ret;
354}
355
356static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
357 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000358 bool evict, bool interruptible,
359 bool no_wait_reserve, bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200360{
361 struct ttm_bo_device *bdev = bo->bdev;
362 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
363 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
364 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
365 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
366 int ret = 0;
367
368 if (old_is_pci || new_is_pci ||
369 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
370 ttm_bo_unmap_virtual(bo);
371
372 /*
373 * Create and bind a ttm if required.
374 */
375
376 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
377 ret = ttm_bo_add_ttm(bo, false);
378 if (ret)
379 goto out_err;
380
381 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
382 if (ret)
Thomas Hellstrom87ef9202009-06-17 12:29:57 +0200383 goto out_err;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200384
385 if (mem->mem_type != TTM_PL_SYSTEM) {
386 ret = ttm_tt_bind(bo->ttm, mem);
387 if (ret)
388 goto out_err;
389 }
390
391 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100392 bo->mem = *mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200393 mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200394 goto moved;
395 }
396
397 }
398
Dave Airliee024e112009-06-24 09:48:08 +1000399 if (bdev->driver->move_notify)
400 bdev->driver->move_notify(bo, mem);
401
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200402 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
403 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000404 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200405 else if (bdev->driver->move)
406 ret = bdev->driver->move(bo, evict, interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000407 no_wait_reserve, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200408 else
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000409 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200410
411 if (ret)
412 goto out_err;
413
414moved:
415 if (bo->evicted) {
416 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
417 if (ret)
418 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
419 bo->evicted = false;
420 }
421
422 if (bo->mem.mm_node) {
423 spin_lock(&bo->lock);
424 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
425 bdev->man[bo->mem.mem_type].gpu_offset;
426 bo->cur_placement = bo->mem.placement;
427 spin_unlock(&bo->lock);
Thomas Hellstrom354fb522010-01-13 22:28:45 +0100428 } else
429 bo->offset = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200430
431 return 0;
432
433out_err:
434 new_man = &bdev->man[bo->mem.mem_type];
435 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
436 ttm_tt_unbind(bo->ttm);
437 ttm_tt_destroy(bo->ttm);
438 bo->ttm = NULL;
439 }
440
441 return ret;
442}
443
444/**
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200445 * Call bo::reserved and with the lru lock held.
446 * Will release GPU memory type usage on destruction.
447 * This is the place to put in driver specific hooks.
448 * Will release the bo::reserved lock and the
449 * lru lock on exit.
450 */
451
452static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
453{
454 struct ttm_bo_global *glob = bo->glob;
455
456 if (bo->ttm) {
457
458 /**
459 * Release the lru_lock, since we don't want to have
460 * an atomic requirement on ttm_tt[unbind|destroy].
461 */
462
463 spin_unlock(&glob->lru_lock);
464 ttm_tt_unbind(bo->ttm);
465 ttm_tt_destroy(bo->ttm);
466 bo->ttm = NULL;
467 spin_lock(&glob->lru_lock);
468 }
469
470 if (bo->mem.mm_node) {
471 drm_mm_put_block(bo->mem.mm_node);
472 bo->mem.mm_node = NULL;
473 }
474
475 atomic_set(&bo->reserved, 0);
476 wake_up_all(&bo->event_queue);
477 spin_unlock(&glob->lru_lock);
478}
479
480
481/**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200482 * If bo idle, remove from delayed- and lru lists, and unref.
483 * If not idle, and already on delayed list, do nothing.
484 * If not idle, and not on delayed list, put on delayed list,
485 * up the list_kref and schedule a delayed list check.
486 */
487
488static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
489{
490 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200491 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200492 struct ttm_bo_driver *driver = bdev->driver;
493 int ret;
494
495 spin_lock(&bo->lock);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200496retry:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200497 (void) ttm_bo_wait(bo, false, false, !remove_all);
498
499 if (!bo->sync_obj) {
500 int put_count;
501
502 spin_unlock(&bo->lock);
503
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200504 spin_lock(&glob->lru_lock);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200505 ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
Thomas Hellstromaaa20732009-12-02 18:33:45 +0100506
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200507 /**
508 * Someone else has the object reserved. Bail and retry.
509 */
510
511 if (unlikely(ret == -EBUSY)) {
512 spin_unlock(&glob->lru_lock);
513 spin_lock(&bo->lock);
514 goto requeue;
515 }
516
517 /**
518 * We can re-check for sync object without taking
519 * the bo::lock since setting the sync object requires
520 * also bo::reserved. A busy object at this point may
521 * be caused by another thread starting an accelerated
522 * eviction.
523 */
524
525 if (unlikely(bo->sync_obj)) {
526 atomic_set(&bo->reserved, 0);
527 wake_up_all(&bo->event_queue);
528 spin_unlock(&glob->lru_lock);
529 spin_lock(&bo->lock);
530 if (remove_all)
531 goto retry;
532 else
533 goto requeue;
534 }
535
536 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200537
538 if (!list_empty(&bo->ddestroy)) {
539 list_del_init(&bo->ddestroy);
Thomas Hellstromaaa20732009-12-02 18:33:45 +0100540 ++put_count;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200541 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200542
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200543 ttm_bo_cleanup_memtype_use(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200544
545 while (put_count--)
Thomas Hellstromaaa20732009-12-02 18:33:45 +0100546 kref_put(&bo->list_kref, ttm_bo_ref_bug);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200547
548 return 0;
549 }
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200550requeue:
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200551 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200552 if (list_empty(&bo->ddestroy)) {
553 void *sync_obj = bo->sync_obj;
554 void *sync_obj_arg = bo->sync_obj_arg;
555
556 kref_get(&bo->list_kref);
557 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200558 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200559 spin_unlock(&bo->lock);
560
561 if (sync_obj)
562 driver->sync_obj_flush(sync_obj, sync_obj_arg);
563 schedule_delayed_work(&bdev->wq,
564 ((HZ / 100) < 1) ? 1 : HZ / 100);
565 ret = 0;
566
567 } else {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200568 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200569 spin_unlock(&bo->lock);
570 ret = -EBUSY;
571 }
572
573 return ret;
574}
575
576/**
577 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
578 * encountered buffers.
579 */
580
581static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
582{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200583 struct ttm_bo_global *glob = bdev->glob;
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100584 struct ttm_buffer_object *entry = NULL;
585 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200586
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200587 spin_lock(&glob->lru_lock);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100588 if (list_empty(&bdev->ddestroy))
589 goto out_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200590
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100591 entry = list_first_entry(&bdev->ddestroy,
592 struct ttm_buffer_object, ddestroy);
593 kref_get(&entry->list_kref);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200594
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100595 for (;;) {
596 struct ttm_buffer_object *nentry = NULL;
597
598 if (entry->ddestroy.next != &bdev->ddestroy) {
599 nentry = list_first_entry(&entry->ddestroy,
600 struct ttm_buffer_object, ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200601 kref_get(&nentry->list_kref);
602 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200603
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200604 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200605 ret = ttm_bo_cleanup_refs(entry, remove_all);
606 kref_put(&entry->list_kref, ttm_bo_release_list);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100607 entry = nentry;
608
609 if (ret || !entry)
610 goto out;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200611
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200612 spin_lock(&glob->lru_lock);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100613 if (list_empty(&entry->ddestroy))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200614 break;
615 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200616
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100617out_unlock:
618 spin_unlock(&glob->lru_lock);
619out:
620 if (entry)
621 kref_put(&entry->list_kref, ttm_bo_release_list);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200622 return ret;
623}
624
625static void ttm_bo_delayed_workqueue(struct work_struct *work)
626{
627 struct ttm_bo_device *bdev =
628 container_of(work, struct ttm_bo_device, wq.work);
629
630 if (ttm_bo_delayed_delete(bdev, false)) {
631 schedule_delayed_work(&bdev->wq,
632 ((HZ / 100) < 1) ? 1 : HZ / 100);
633 }
634}
635
636static void ttm_bo_release(struct kref *kref)
637{
638 struct ttm_buffer_object *bo =
639 container_of(kref, struct ttm_buffer_object, kref);
640 struct ttm_bo_device *bdev = bo->bdev;
641
642 if (likely(bo->vm_node != NULL)) {
643 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
644 drm_mm_put_block(bo->vm_node);
645 bo->vm_node = NULL;
646 }
647 write_unlock(&bdev->vm_lock);
648 ttm_bo_cleanup_refs(bo, false);
649 kref_put(&bo->list_kref, ttm_bo_release_list);
650 write_lock(&bdev->vm_lock);
651}
652
653void ttm_bo_unref(struct ttm_buffer_object **p_bo)
654{
655 struct ttm_buffer_object *bo = *p_bo;
656 struct ttm_bo_device *bdev = bo->bdev;
657
658 *p_bo = NULL;
659 write_lock(&bdev->vm_lock);
660 kref_put(&bo->kref, ttm_bo_release);
661 write_unlock(&bdev->vm_lock);
662}
663EXPORT_SYMBOL(ttm_bo_unref);
664
Matthew Garrett7c5ee532010-04-26 16:00:09 -0400665int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
666{
667 return cancel_delayed_work_sync(&bdev->wq);
668}
669EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
670
671void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
672{
673 if (resched)
674 schedule_delayed_work(&bdev->wq,
675 ((HZ / 100) < 1) ? 1 : HZ / 100);
676}
677EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
678
Jerome Glisseca262a9992009-12-08 15:33:32 +0100679static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000680 bool no_wait_reserve, bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200681{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200682 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200683 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200684 struct ttm_mem_reg evict_mem;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100685 struct ttm_placement placement;
686 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200687
688 spin_lock(&bo->lock);
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000689 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200690 spin_unlock(&bo->lock);
691
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200692 if (unlikely(ret != 0)) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100693 if (ret != -ERESTARTSYS) {
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200694 printk(KERN_ERR TTM_PFX
695 "Failed to expire sync object before "
696 "buffer eviction.\n");
697 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200698 goto out;
699 }
700
701 BUG_ON(!atomic_read(&bo->reserved));
702
703 evict_mem = bo->mem;
704 evict_mem.mm_node = NULL;
Jerome Glisse82c5da62010-04-09 14:39:23 +0200705 evict_mem.bus.io_reserved = false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200706
Jerome Glisse7cb7d1d2009-12-09 22:14:27 +0100707 placement.fpfn = 0;
708 placement.lpfn = 0;
709 placement.num_placement = 0;
710 placement.num_busy_placement = 0;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100711 bdev->driver->evict_flags(bo, &placement);
712 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000713 no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200714 if (ret) {
Jerome Glissefb53f862009-12-09 21:55:10 +0100715 if (ret != -ERESTARTSYS) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200716 printk(KERN_ERR TTM_PFX
717 "Failed to find memory space for "
718 "buffer 0x%p eviction.\n", bo);
Jerome Glissefb53f862009-12-09 21:55:10 +0100719 ttm_bo_mem_space_debug(bo, &placement);
720 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200721 goto out;
722 }
723
724 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000725 no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200726 if (ret) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100727 if (ret != -ERESTARTSYS)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200728 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
Jerome Glisseca262a9992009-12-08 15:33:32 +0100729 spin_lock(&glob->lru_lock);
730 if (evict_mem.mm_node) {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100731 drm_mm_put_block(evict_mem.mm_node);
732 evict_mem.mm_node = NULL;
733 }
734 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200735 goto out;
736 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200737 bo->evicted = true;
738out:
739 return ret;
740}
741
Jerome Glisseca262a9992009-12-08 15:33:32 +0100742static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
743 uint32_t mem_type,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000744 bool interruptible, bool no_wait_reserve,
745 bool no_wait_gpu)
Jerome Glisseca262a9992009-12-08 15:33:32 +0100746{
747 struct ttm_bo_global *glob = bdev->glob;
748 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
749 struct ttm_buffer_object *bo;
750 int ret, put_count = 0;
751
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100752retry:
Jerome Glisseca262a9992009-12-08 15:33:32 +0100753 spin_lock(&glob->lru_lock);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100754 if (list_empty(&man->lru)) {
755 spin_unlock(&glob->lru_lock);
756 return -EBUSY;
757 }
758
Jerome Glisseca262a9992009-12-08 15:33:32 +0100759 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
760 kref_get(&bo->list_kref);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100761
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000762 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100763
764 if (unlikely(ret == -EBUSY)) {
765 spin_unlock(&glob->lru_lock);
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000766 if (likely(!no_wait_gpu))
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100767 ret = ttm_bo_wait_unreserved(bo, interruptible);
768
769 kref_put(&bo->list_kref, ttm_bo_release_list);
770
771 /**
772 * We *need* to retry after releasing the lru lock.
773 */
774
775 if (unlikely(ret != 0))
776 return ret;
777 goto retry;
778 }
779
780 put_count = ttm_bo_del_from_lru(bo);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100781 spin_unlock(&glob->lru_lock);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100782
783 BUG_ON(ret != 0);
784
Jerome Glisseca262a9992009-12-08 15:33:32 +0100785 while (put_count--)
786 kref_put(&bo->list_kref, ttm_bo_ref_bug);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100787
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000788 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100789 ttm_bo_unreserve(bo);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100790
Jerome Glisseca262a9992009-12-08 15:33:32 +0100791 kref_put(&bo->list_kref, ttm_bo_release_list);
792 return ret;
793}
794
795static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
796 struct ttm_mem_type_manager *man,
797 struct ttm_placement *placement,
798 struct ttm_mem_reg *mem,
799 struct drm_mm_node **node)
800{
801 struct ttm_bo_global *glob = bo->glob;
802 unsigned long lpfn;
803 int ret;
804
805 lpfn = placement->lpfn;
806 if (!lpfn)
807 lpfn = man->size;
808 *node = NULL;
809 do {
810 ret = drm_mm_pre_get(&man->manager);
811 if (unlikely(ret))
812 return ret;
813
814 spin_lock(&glob->lru_lock);
815 *node = drm_mm_search_free_in_range(&man->manager,
816 mem->num_pages, mem->page_alignment,
817 placement->fpfn, lpfn, 1);
818 if (unlikely(*node == NULL)) {
819 spin_unlock(&glob->lru_lock);
820 return 0;
821 }
822 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
823 mem->page_alignment,
824 placement->fpfn,
825 lpfn);
826 spin_unlock(&glob->lru_lock);
827 } while (*node == NULL);
828 return 0;
829}
830
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200831/**
832 * Repeatedly evict memory from the LRU for @mem_type until we create enough
833 * space, or we've evicted everything and there isn't enough space.
834 */
Jerome Glisseca262a9992009-12-08 15:33:32 +0100835static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
836 uint32_t mem_type,
837 struct ttm_placement *placement,
838 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000839 bool interruptible,
840 bool no_wait_reserve,
841 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200842{
Jerome Glisseca262a9992009-12-08 15:33:32 +0100843 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200844 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200845 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Jerome Glisseca262a9992009-12-08 15:33:32 +0100846 struct drm_mm_node *node;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200847 int ret;
848
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200849 do {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100850 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200851 if (unlikely(ret != 0))
852 return ret;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100853 if (node)
854 break;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200855 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100856 if (list_empty(&man->lru)) {
857 spin_unlock(&glob->lru_lock);
858 break;
859 }
860 spin_unlock(&glob->lru_lock);
861 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000862 no_wait_reserve, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100863 if (unlikely(ret != 0))
864 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200865 } while (1);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100866 if (node == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200867 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200868 mem->mm_node = node;
869 mem->mem_type = mem_type;
870 return 0;
871}
872
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200873static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
874 uint32_t cur_placement,
875 uint32_t proposed_placement)
876{
877 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
878 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
879
880 /**
881 * Keep current caching if possible.
882 */
883
884 if ((cur_placement & caching) != 0)
885 result |= (cur_placement & caching);
886 else if ((man->default_caching & caching) != 0)
887 result |= man->default_caching;
888 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
889 result |= TTM_PL_FLAG_CACHED;
890 else if ((TTM_PL_FLAG_WC & caching) != 0)
891 result |= TTM_PL_FLAG_WC;
892 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
893 result |= TTM_PL_FLAG_UNCACHED;
894
895 return result;
896}
897
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200898static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
899 bool disallow_fixed,
900 uint32_t mem_type,
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200901 uint32_t proposed_placement,
902 uint32_t *masked_placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200903{
904 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
905
906 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
907 return false;
908
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200909 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200910 return false;
911
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200912 if ((proposed_placement & man->available_caching) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200913 return false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200914
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200915 cur_flags |= (proposed_placement & man->available_caching);
916
917 *masked_placement = cur_flags;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200918 return true;
919}
920
921/**
922 * Creates space for memory region @mem according to its type.
923 *
924 * This function first searches for free space in compatible memory types in
925 * the priority order defined by the driver. If free space isn't found, then
926 * ttm_bo_mem_force_space is attempted in priority order to evict and find
927 * space.
928 */
929int ttm_bo_mem_space(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100930 struct ttm_placement *placement,
931 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000932 bool interruptible, bool no_wait_reserve,
933 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200934{
935 struct ttm_bo_device *bdev = bo->bdev;
936 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200937 uint32_t mem_type = TTM_PL_SYSTEM;
938 uint32_t cur_flags = 0;
939 bool type_found = false;
940 bool type_ok = false;
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100941 bool has_erestartsys = false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200942 struct drm_mm_node *node = NULL;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100943 int i, ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200944
945 mem->mm_node = NULL;
Dave Airlieb6637522009-12-14 14:51:35 +1000946 for (i = 0; i < placement->num_placement; ++i) {
Jerome Glisseca262a9992009-12-08 15:33:32 +0100947 ret = ttm_mem_type_from_flags(placement->placement[i],
948 &mem_type);
949 if (ret)
950 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200951 man = &bdev->man[mem_type];
952
953 type_ok = ttm_bo_mt_compatible(man,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100954 bo->type == ttm_bo_type_user,
955 mem_type,
956 placement->placement[i],
957 &cur_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200958
959 if (!type_ok)
960 continue;
961
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200962 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
963 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100964 /*
965 * Use the access and other non-mapping-related flag bits from
966 * the memory placement flags to the current flags
967 */
968 ttm_flag_masked(&cur_flags, placement->placement[i],
969 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200970
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200971 if (mem_type == TTM_PL_SYSTEM)
972 break;
973
974 if (man->has_type && man->use_type) {
975 type_found = true;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100976 ret = ttm_bo_man_get_node(bo, man, placement, mem,
977 &node);
978 if (unlikely(ret))
979 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200980 }
981 if (node)
982 break;
983 }
984
985 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
986 mem->mm_node = node;
987 mem->mem_type = mem_type;
988 mem->placement = cur_flags;
989 return 0;
990 }
991
992 if (!type_found)
993 return -EINVAL;
994
Dave Airlieb6637522009-12-14 14:51:35 +1000995 for (i = 0; i < placement->num_busy_placement; ++i) {
996 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +0100997 &mem_type);
998 if (ret)
999 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001000 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001001 if (!man->has_type)
1002 continue;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001003 if (!ttm_bo_mt_compatible(man,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001004 bo->type == ttm_bo_type_user,
1005 mem_type,
Dave Airlieb6637522009-12-14 14:51:35 +10001006 placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +01001007 &cur_flags))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001008 continue;
1009
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001010 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1011 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001012 /*
1013 * Use the access and other non-mapping-related flag bits from
1014 * the memory placement flags to the current flags
1015 */
Dave Airlieb6637522009-12-14 14:51:35 +10001016 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +01001017 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001018
Thomas Hellstrom0eaddb22010-01-16 16:05:04 +01001019
1020 if (mem_type == TTM_PL_SYSTEM) {
1021 mem->mem_type = mem_type;
1022 mem->placement = cur_flags;
1023 mem->mm_node = NULL;
1024 return 0;
1025 }
1026
Jerome Glisseca262a9992009-12-08 15:33:32 +01001027 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001028 interruptible, no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001029 if (ret == 0 && mem->mm_node) {
1030 mem->placement = cur_flags;
1031 return 0;
1032 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001033 if (ret == -ERESTARTSYS)
1034 has_erestartsys = true;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001035 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001036 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001037 return ret;
1038}
1039EXPORT_SYMBOL(ttm_bo_mem_space);
1040
1041int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1042{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001043 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1044 return -EBUSY;
1045
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001046 return wait_event_interruptible(bo->event_queue,
1047 atomic_read(&bo->cpu_writers) == 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001048}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001049EXPORT_SYMBOL(ttm_bo_wait_cpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001050
1051int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001052 struct ttm_placement *placement,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001053 bool interruptible, bool no_wait_reserve,
1054 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001055{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001056 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001057 int ret = 0;
1058 struct ttm_mem_reg mem;
1059
1060 BUG_ON(!atomic_read(&bo->reserved));
1061
1062 /*
1063 * FIXME: It's possible to pipeline buffer moves.
1064 * Have the driver move function wait for idle when necessary,
1065 * instead of doing it here.
1066 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001067 spin_lock(&bo->lock);
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001068 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001069 spin_unlock(&bo->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001070 if (ret)
1071 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001072 mem.num_pages = bo->num_pages;
1073 mem.size = mem.num_pages << PAGE_SHIFT;
1074 mem.page_alignment = bo->mem.page_alignment;
Jerome Glisse82c5da62010-04-09 14:39:23 +02001075 mem.bus.io_reserved = false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001076 /*
1077 * Determine where to move the buffer.
1078 */
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001079 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001080 if (ret)
1081 goto out_unlock;
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001082 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001083out_unlock:
1084 if (ret && mem.mm_node) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001085 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001086 drm_mm_put_block(mem.mm_node);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001087 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001088 }
1089 return ret;
1090}
1091
Jerome Glisseca262a9992009-12-08 15:33:32 +01001092static int ttm_bo_mem_compat(struct ttm_placement *placement,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001093 struct ttm_mem_reg *mem)
1094{
Jerome Glisseca262a9992009-12-08 15:33:32 +01001095 int i;
Thomas Hellstrome22238e2010-02-12 00:18:00 +01001096 struct drm_mm_node *node = mem->mm_node;
1097
1098 if (node && placement->lpfn != 0 &&
1099 (node->start < placement->fpfn ||
1100 node->start + node->size > placement->lpfn))
1101 return -1;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001102
Jerome Glisseca262a9992009-12-08 15:33:32 +01001103 for (i = 0; i < placement->num_placement; i++) {
1104 if ((placement->placement[i] & mem->placement &
1105 TTM_PL_MASK_CACHING) &&
1106 (placement->placement[i] & mem->placement &
1107 TTM_PL_MASK_MEM))
1108 return i;
1109 }
1110 return -1;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001111}
1112
Jerome Glisse09855ac2009-12-10 17:16:27 +01001113int ttm_bo_validate(struct ttm_buffer_object *bo,
1114 struct ttm_placement *placement,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001115 bool interruptible, bool no_wait_reserve,
1116 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001117{
1118 int ret;
1119
1120 BUG_ON(!atomic_read(&bo->reserved));
Jerome Glisseca262a9992009-12-08 15:33:32 +01001121 /* Check that range is valid */
1122 if (placement->lpfn || placement->fpfn)
1123 if (placement->fpfn > placement->lpfn ||
1124 (placement->lpfn - placement->fpfn) < bo->num_pages)
1125 return -EINVAL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001126 /*
1127 * Check whether we need to move buffer.
1128 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001129 ret = ttm_bo_mem_compat(placement, &bo->mem);
1130 if (ret < 0) {
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001131 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001132 if (ret)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001133 return ret;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001134 } else {
1135 /*
1136 * Use the access and other non-mapping-related flag bits from
1137 * the compatible memory placement flags to the active flags
1138 */
1139 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1140 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001141 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001142 /*
1143 * We might need to add a TTM.
1144 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001145 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1146 ret = ttm_bo_add_ttm(bo, true);
1147 if (ret)
1148 return ret;
1149 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001150 return 0;
1151}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001152EXPORT_SYMBOL(ttm_bo_validate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001153
Jerome Glisse09855ac2009-12-10 17:16:27 +01001154int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1155 struct ttm_placement *placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001156{
Jerome Glisse09855ac2009-12-10 17:16:27 +01001157 int i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001158
Jerome Glisse09855ac2009-12-10 17:16:27 +01001159 if (placement->fpfn || placement->lpfn) {
1160 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1161 printk(KERN_ERR TTM_PFX "Page number range to small "
1162 "Need %lu pages, range is [%u, %u]\n",
1163 bo->mem.num_pages, placement->fpfn,
1164 placement->lpfn);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001165 return -EINVAL;
1166 }
Jerome Glisse09855ac2009-12-10 17:16:27 +01001167 }
1168 for (i = 0; i < placement->num_placement; i++) {
1169 if (!capable(CAP_SYS_ADMIN)) {
1170 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1171 printk(KERN_ERR TTM_PFX "Need to be root to "
1172 "modify NO_EVICT status.\n");
1173 return -EINVAL;
1174 }
1175 }
1176 }
1177 for (i = 0; i < placement->num_busy_placement; i++) {
1178 if (!capable(CAP_SYS_ADMIN)) {
1179 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1180 printk(KERN_ERR TTM_PFX "Need to be root to "
1181 "modify NO_EVICT status.\n");
1182 return -EINVAL;
1183 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001184 }
1185 }
1186 return 0;
1187}
1188
Jerome Glisse09855ac2009-12-10 17:16:27 +01001189int ttm_bo_init(struct ttm_bo_device *bdev,
1190 struct ttm_buffer_object *bo,
1191 unsigned long size,
1192 enum ttm_bo_type type,
1193 struct ttm_placement *placement,
1194 uint32_t page_alignment,
1195 unsigned long buffer_start,
1196 bool interruptible,
1197 struct file *persistant_swap_storage,
1198 size_t acc_size,
1199 void (*destroy) (struct ttm_buffer_object *))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001200{
Jerome Glisse09855ac2009-12-10 17:16:27 +01001201 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001202 unsigned long num_pages;
1203
1204 size += buffer_start & ~PAGE_MASK;
1205 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1206 if (num_pages == 0) {
1207 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1208 return -EINVAL;
1209 }
1210 bo->destroy = destroy;
1211
1212 spin_lock_init(&bo->lock);
1213 kref_init(&bo->kref);
1214 kref_init(&bo->list_kref);
1215 atomic_set(&bo->cpu_writers, 0);
1216 atomic_set(&bo->reserved, 1);
1217 init_waitqueue_head(&bo->event_queue);
1218 INIT_LIST_HEAD(&bo->lru);
1219 INIT_LIST_HEAD(&bo->ddestroy);
1220 INIT_LIST_HEAD(&bo->swap);
1221 bo->bdev = bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001222 bo->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001223 bo->type = type;
1224 bo->num_pages = num_pages;
Jerome Glisseeb6d2c32009-12-10 16:15:52 +01001225 bo->mem.size = num_pages << PAGE_SHIFT;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001226 bo->mem.mem_type = TTM_PL_SYSTEM;
1227 bo->mem.num_pages = bo->num_pages;
1228 bo->mem.mm_node = NULL;
1229 bo->mem.page_alignment = page_alignment;
Jerome Glisse82c5da62010-04-09 14:39:23 +02001230 bo->mem.bus.io_reserved = false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001231 bo->buffer_start = buffer_start & PAGE_MASK;
1232 bo->priv_flags = 0;
1233 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1234 bo->seq_valid = false;
1235 bo->persistant_swap_storage = persistant_swap_storage;
1236 bo->acc_size = acc_size;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001237 atomic_inc(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001238
Jerome Glisse09855ac2009-12-10 17:16:27 +01001239 ret = ttm_bo_check_placement(bo, placement);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001240 if (unlikely(ret != 0))
1241 goto out_err;
1242
1243 /*
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001244 * For ttm_bo_type_device buffers, allocate
1245 * address space from the device.
1246 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001247 if (bo->type == ttm_bo_type_device) {
1248 ret = ttm_bo_setup_vm(bo);
1249 if (ret)
1250 goto out_err;
1251 }
1252
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001253 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001254 if (ret)
1255 goto out_err;
1256
1257 ttm_bo_unreserve(bo);
1258 return 0;
1259
1260out_err:
1261 ttm_bo_unreserve(bo);
1262 ttm_bo_unref(&bo);
1263
1264 return ret;
1265}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001266EXPORT_SYMBOL(ttm_bo_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001267
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001268static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001269 unsigned long num_pages)
1270{
1271 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1272 PAGE_MASK;
1273
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001274 return glob->ttm_bo_size + 2 * page_array_size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001275}
1276
Jerome Glisse09855ac2009-12-10 17:16:27 +01001277int ttm_bo_create(struct ttm_bo_device *bdev,
1278 unsigned long size,
1279 enum ttm_bo_type type,
1280 struct ttm_placement *placement,
1281 uint32_t page_alignment,
1282 unsigned long buffer_start,
1283 bool interruptible,
1284 struct file *persistant_swap_storage,
1285 struct ttm_buffer_object **p_bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001286{
1287 struct ttm_buffer_object *bo;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001288 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001289 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001290
1291 size_t acc_size =
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001292 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +02001293 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001294 if (unlikely(ret != 0))
1295 return ret;
1296
1297 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1298
1299 if (unlikely(bo == NULL)) {
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +02001300 ttm_mem_global_free(mem_glob, acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001301 return -ENOMEM;
1302 }
1303
Jerome Glisse09855ac2009-12-10 17:16:27 +01001304 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1305 buffer_start, interruptible,
1306 persistant_swap_storage, acc_size, NULL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001307 if (likely(ret == 0))
1308 *p_bo = bo;
1309
1310 return ret;
1311}
1312
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001313static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001314 unsigned mem_type, bool allow_errors)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001315{
Jerome Glisseca262a9992009-12-08 15:33:32 +01001316 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001317 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001318 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001319
1320 /*
1321 * Can't use standard list traversal since we're unlocking.
1322 */
1323
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001324 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001325 while (!list_empty(&man->lru)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001326 spin_unlock(&glob->lru_lock);
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001327 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001328 if (ret) {
1329 if (allow_errors) {
1330 return ret;
1331 } else {
1332 printk(KERN_ERR TTM_PFX
1333 "Cleanup eviction failed\n");
1334 }
1335 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001336 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001337 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001338 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001339 return 0;
1340}
1341
1342int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1343{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001344 struct ttm_bo_global *glob = bdev->glob;
Roel Kluinc96e7c72009-08-03 14:22:53 +02001345 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001346 int ret = -EINVAL;
1347
1348 if (mem_type >= TTM_NUM_MEM_TYPES) {
1349 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1350 return ret;
1351 }
Roel Kluinc96e7c72009-08-03 14:22:53 +02001352 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001353
1354 if (!man->has_type) {
1355 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1356 "memory manager type %u\n", mem_type);
1357 return ret;
1358 }
1359
1360 man->use_type = false;
1361 man->has_type = false;
1362
1363 ret = 0;
1364 if (mem_type > 0) {
Jerome Glisseca262a9992009-12-08 15:33:32 +01001365 ttm_bo_force_list_clean(bdev, mem_type, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001366
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001367 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001368 if (drm_mm_clean(&man->manager))
1369 drm_mm_takedown(&man->manager);
1370 else
1371 ret = -EBUSY;
1372
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001373 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001374 }
1375
1376 return ret;
1377}
1378EXPORT_SYMBOL(ttm_bo_clean_mm);
1379
1380int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1381{
1382 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1383
1384 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1385 printk(KERN_ERR TTM_PFX
1386 "Illegal memory manager memory type %u.\n",
1387 mem_type);
1388 return -EINVAL;
1389 }
1390
1391 if (!man->has_type) {
1392 printk(KERN_ERR TTM_PFX
1393 "Memory type %u has not been initialized.\n",
1394 mem_type);
1395 return 0;
1396 }
1397
Jerome Glisseca262a9992009-12-08 15:33:32 +01001398 return ttm_bo_force_list_clean(bdev, mem_type, true);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001399}
1400EXPORT_SYMBOL(ttm_bo_evict_mm);
1401
1402int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001403 unsigned long p_size)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001404{
1405 int ret = -EINVAL;
1406 struct ttm_mem_type_manager *man;
1407
1408 if (type >= TTM_NUM_MEM_TYPES) {
1409 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1410 return ret;
1411 }
1412
1413 man = &bdev->man[type];
1414 if (man->has_type) {
1415 printk(KERN_ERR TTM_PFX
1416 "Memory manager already initialized for type %d\n",
1417 type);
1418 return ret;
1419 }
1420
1421 ret = bdev->driver->init_mem_type(bdev, type, man);
1422 if (ret)
1423 return ret;
1424
1425 ret = 0;
1426 if (type != TTM_PL_SYSTEM) {
1427 if (!p_size) {
1428 printk(KERN_ERR TTM_PFX
1429 "Zero size memory manager type %d\n",
1430 type);
1431 return ret;
1432 }
Jerome Glisseca262a9992009-12-08 15:33:32 +01001433 ret = drm_mm_init(&man->manager, 0, p_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001434 if (ret)
1435 return ret;
1436 }
1437 man->has_type = true;
1438 man->use_type = true;
1439 man->size = p_size;
1440
1441 INIT_LIST_HEAD(&man->lru);
1442
1443 return 0;
1444}
1445EXPORT_SYMBOL(ttm_bo_init_mm);
1446
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001447static void ttm_bo_global_kobj_release(struct kobject *kobj)
1448{
1449 struct ttm_bo_global *glob =
1450 container_of(kobj, struct ttm_bo_global, kobj);
1451
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001452 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1453 __free_page(glob->dummy_read_page);
1454 kfree(glob);
1455}
1456
Dave Airlieba4420c2010-03-09 10:56:52 +10001457void ttm_bo_global_release(struct drm_global_reference *ref)
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001458{
1459 struct ttm_bo_global *glob = ref->object;
1460
1461 kobject_del(&glob->kobj);
1462 kobject_put(&glob->kobj);
1463}
1464EXPORT_SYMBOL(ttm_bo_global_release);
1465
Dave Airlieba4420c2010-03-09 10:56:52 +10001466int ttm_bo_global_init(struct drm_global_reference *ref)
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001467{
1468 struct ttm_bo_global_ref *bo_ref =
1469 container_of(ref, struct ttm_bo_global_ref, ref);
1470 struct ttm_bo_global *glob = ref->object;
1471 int ret;
1472
1473 mutex_init(&glob->device_list_mutex);
1474 spin_lock_init(&glob->lru_lock);
1475 glob->mem_glob = bo_ref->mem_glob;
1476 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1477
1478 if (unlikely(glob->dummy_read_page == NULL)) {
1479 ret = -ENOMEM;
1480 goto out_no_drp;
1481 }
1482
1483 INIT_LIST_HEAD(&glob->swap_lru);
1484 INIT_LIST_HEAD(&glob->device_list);
1485
1486 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1487 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1488 if (unlikely(ret != 0)) {
1489 printk(KERN_ERR TTM_PFX
1490 "Could not register buffer object swapout.\n");
1491 goto out_no_shrink;
1492 }
1493
1494 glob->ttm_bo_extra_size =
1495 ttm_round_pot(sizeof(struct ttm_tt)) +
1496 ttm_round_pot(sizeof(struct ttm_backend));
1497
1498 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1499 ttm_round_pot(sizeof(struct ttm_buffer_object));
1500
1501 atomic_set(&glob->bo_count, 0);
1502
Robert P. J. Dayb642ed02010-03-13 10:36:32 +00001503 ret = kobject_init_and_add(
1504 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001505 if (unlikely(ret != 0))
1506 kobject_put(&glob->kobj);
1507 return ret;
1508out_no_shrink:
1509 __free_page(glob->dummy_read_page);
1510out_no_drp:
1511 kfree(glob);
1512 return ret;
1513}
1514EXPORT_SYMBOL(ttm_bo_global_init);
1515
1516
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001517int ttm_bo_device_release(struct ttm_bo_device *bdev)
1518{
1519 int ret = 0;
1520 unsigned i = TTM_NUM_MEM_TYPES;
1521 struct ttm_mem_type_manager *man;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001522 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001523
1524 while (i--) {
1525 man = &bdev->man[i];
1526 if (man->has_type) {
1527 man->use_type = false;
1528 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1529 ret = -EBUSY;
1530 printk(KERN_ERR TTM_PFX
1531 "DRM memory manager type %d "
1532 "is not clean.\n", i);
1533 }
1534 man->has_type = false;
1535 }
1536 }
1537
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001538 mutex_lock(&glob->device_list_mutex);
1539 list_del(&bdev->device_list);
1540 mutex_unlock(&glob->device_list_mutex);
1541
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001542 if (!cancel_delayed_work(&bdev->wq))
1543 flush_scheduled_work();
1544
1545 while (ttm_bo_delayed_delete(bdev, true))
1546 ;
1547
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001548 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001549 if (list_empty(&bdev->ddestroy))
1550 TTM_DEBUG("Delayed destroy list was clean\n");
1551
1552 if (list_empty(&bdev->man[0].lru))
1553 TTM_DEBUG("Swap list was clean\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001554 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001555
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001556 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1557 write_lock(&bdev->vm_lock);
1558 drm_mm_takedown(&bdev->addr_space_mm);
1559 write_unlock(&bdev->vm_lock);
1560
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001561 return ret;
1562}
1563EXPORT_SYMBOL(ttm_bo_device_release);
1564
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001565int ttm_bo_device_init(struct ttm_bo_device *bdev,
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001566 struct ttm_bo_global *glob,
1567 struct ttm_bo_driver *driver,
Dave Airlie51c8b402009-08-20 13:38:04 +10001568 uint64_t file_page_offset,
Dave Airliead49f502009-07-10 22:36:26 +10001569 bool need_dma32)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001570{
1571 int ret = -EINVAL;
1572
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001573 rwlock_init(&bdev->vm_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001574 bdev->driver = driver;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001575
1576 memset(bdev->man, 0, sizeof(bdev->man));
1577
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001578 /*
1579 * Initialize the system memory buffer type.
1580 * Other types need to be driver / IOCTL initialized.
1581 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001582 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001583 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001584 goto out_no_sys;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001585
1586 bdev->addr_space_rb = RB_ROOT;
1587 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1588 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001589 goto out_no_addr_mm;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001590
1591 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1592 bdev->nice_mode = true;
1593 INIT_LIST_HEAD(&bdev->ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001594 bdev->dev_mapping = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001595 bdev->glob = glob;
Dave Airliead49f502009-07-10 22:36:26 +10001596 bdev->need_dma32 = need_dma32;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001597
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001598 mutex_lock(&glob->device_list_mutex);
1599 list_add_tail(&bdev->device_list, &glob->device_list);
1600 mutex_unlock(&glob->device_list_mutex);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001601
1602 return 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001603out_no_addr_mm:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001604 ttm_bo_clean_mm(bdev, 0);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001605out_no_sys:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001606 return ret;
1607}
1608EXPORT_SYMBOL(ttm_bo_device_init);
1609
1610/*
1611 * buffer object vm functions.
1612 */
1613
1614bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1615{
1616 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1617
1618 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1619 if (mem->mem_type == TTM_PL_SYSTEM)
1620 return false;
1621
1622 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1623 return false;
1624
1625 if (mem->placement & TTM_PL_FLAG_CACHED)
1626 return false;
1627 }
1628 return true;
1629}
1630
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001631void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1632{
1633 struct ttm_bo_device *bdev = bo->bdev;
1634 loff_t offset = (loff_t) bo->addr_space_offset;
1635 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1636
1637 if (!bdev->dev_mapping)
1638 return;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001639 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
Jerome Glisse82c5da62010-04-09 14:39:23 +02001640 ttm_mem_io_free(bdev, &bo->mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001641}
Dave Airliee024e112009-06-24 09:48:08 +10001642EXPORT_SYMBOL(ttm_bo_unmap_virtual);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001643
1644static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1645{
1646 struct ttm_bo_device *bdev = bo->bdev;
1647 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1648 struct rb_node *parent = NULL;
1649 struct ttm_buffer_object *cur_bo;
1650 unsigned long offset = bo->vm_node->start;
1651 unsigned long cur_offset;
1652
1653 while (*cur) {
1654 parent = *cur;
1655 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1656 cur_offset = cur_bo->vm_node->start;
1657 if (offset < cur_offset)
1658 cur = &parent->rb_left;
1659 else if (offset > cur_offset)
1660 cur = &parent->rb_right;
1661 else
1662 BUG();
1663 }
1664
1665 rb_link_node(&bo->vm_rb, parent, cur);
1666 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1667}
1668
1669/**
1670 * ttm_bo_setup_vm:
1671 *
1672 * @bo: the buffer to allocate address space for
1673 *
1674 * Allocate address space in the drm device so that applications
1675 * can mmap the buffer and access the contents. This only
1676 * applies to ttm_bo_type_device objects as others are not
1677 * placed in the drm device address space.
1678 */
1679
1680static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1681{
1682 struct ttm_bo_device *bdev = bo->bdev;
1683 int ret;
1684
1685retry_pre_get:
1686 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1687 if (unlikely(ret != 0))
1688 return ret;
1689
1690 write_lock(&bdev->vm_lock);
1691 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1692 bo->mem.num_pages, 0, 0);
1693
1694 if (unlikely(bo->vm_node == NULL)) {
1695 ret = -ENOMEM;
1696 goto out_unlock;
1697 }
1698
1699 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1700 bo->mem.num_pages, 0);
1701
1702 if (unlikely(bo->vm_node == NULL)) {
1703 write_unlock(&bdev->vm_lock);
1704 goto retry_pre_get;
1705 }
1706
1707 ttm_bo_vm_insert_rb(bo);
1708 write_unlock(&bdev->vm_lock);
1709 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1710
1711 return 0;
1712out_unlock:
1713 write_unlock(&bdev->vm_lock);
1714 return ret;
1715}
1716
1717int ttm_bo_wait(struct ttm_buffer_object *bo,
1718 bool lazy, bool interruptible, bool no_wait)
1719{
1720 struct ttm_bo_driver *driver = bo->bdev->driver;
1721 void *sync_obj;
1722 void *sync_obj_arg;
1723 int ret = 0;
1724
1725 if (likely(bo->sync_obj == NULL))
1726 return 0;
1727
1728 while (bo->sync_obj) {
1729
1730 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1731 void *tmp_obj = bo->sync_obj;
1732 bo->sync_obj = NULL;
1733 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1734 spin_unlock(&bo->lock);
1735 driver->sync_obj_unref(&tmp_obj);
1736 spin_lock(&bo->lock);
1737 continue;
1738 }
1739
1740 if (no_wait)
1741 return -EBUSY;
1742
1743 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1744 sync_obj_arg = bo->sync_obj_arg;
1745 spin_unlock(&bo->lock);
1746 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1747 lazy, interruptible);
1748 if (unlikely(ret != 0)) {
1749 driver->sync_obj_unref(&sync_obj);
1750 spin_lock(&bo->lock);
1751 return ret;
1752 }
1753 spin_lock(&bo->lock);
1754 if (likely(bo->sync_obj == sync_obj &&
1755 bo->sync_obj_arg == sync_obj_arg)) {
1756 void *tmp_obj = bo->sync_obj;
1757 bo->sync_obj = NULL;
1758 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1759 &bo->priv_flags);
1760 spin_unlock(&bo->lock);
1761 driver->sync_obj_unref(&sync_obj);
1762 driver->sync_obj_unref(&tmp_obj);
1763 spin_lock(&bo->lock);
Thomas Hellstromfee280d2009-08-03 12:39:06 +02001764 } else {
1765 spin_unlock(&bo->lock);
1766 driver->sync_obj_unref(&sync_obj);
1767 spin_lock(&bo->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001768 }
1769 }
1770 return 0;
1771}
1772EXPORT_SYMBOL(ttm_bo_wait);
1773
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001774int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1775{
1776 int ret = 0;
1777
1778 /*
Thomas Hellstrom8cfe92d2010-04-28 11:33:25 +02001779 * Using ttm_bo_reserve makes sure the lru lists are updated.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001780 */
1781
1782 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1783 if (unlikely(ret != 0))
1784 return ret;
1785 spin_lock(&bo->lock);
1786 ret = ttm_bo_wait(bo, false, true, no_wait);
1787 spin_unlock(&bo->lock);
1788 if (likely(ret == 0))
1789 atomic_inc(&bo->cpu_writers);
1790 ttm_bo_unreserve(bo);
1791 return ret;
1792}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001793EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001794
1795void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1796{
1797 if (atomic_dec_and_test(&bo->cpu_writers))
1798 wake_up_all(&bo->event_queue);
1799}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001800EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001801
1802/**
1803 * A buffer object shrink method that tries to swap out the first
1804 * buffer object on the bo_global::swap_lru list.
1805 */
1806
1807static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1808{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001809 struct ttm_bo_global *glob =
1810 container_of(shrink, struct ttm_bo_global, shrink);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001811 struct ttm_buffer_object *bo;
1812 int ret = -EBUSY;
1813 int put_count;
1814 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1815
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001816 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001817 while (ret == -EBUSY) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001818 if (unlikely(list_empty(&glob->swap_lru))) {
1819 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001820 return -EBUSY;
1821 }
1822
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001823 bo = list_first_entry(&glob->swap_lru,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001824 struct ttm_buffer_object, swap);
1825 kref_get(&bo->list_kref);
1826
1827 /**
1828 * Reserve buffer. Since we unlock while sleeping, we need
1829 * to re-check that nobody removed us from the swap-list while
1830 * we slept.
1831 */
1832
1833 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1834 if (unlikely(ret == -EBUSY)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001835 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001836 ttm_bo_wait_unreserved(bo, false);
1837 kref_put(&bo->list_kref, ttm_bo_release_list);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001838 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001839 }
1840 }
1841
1842 BUG_ON(ret != 0);
1843 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001844 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001845
1846 while (put_count--)
1847 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1848
1849 /**
1850 * Wait for GPU, then move to system cached.
1851 */
1852
1853 spin_lock(&bo->lock);
1854 ret = ttm_bo_wait(bo, false, false, false);
1855 spin_unlock(&bo->lock);
1856
1857 if (unlikely(ret != 0))
1858 goto out;
1859
1860 if ((bo->mem.placement & swap_placement) != swap_placement) {
1861 struct ttm_mem_reg evict_mem;
1862
1863 evict_mem = bo->mem;
1864 evict_mem.mm_node = NULL;
1865 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1866 evict_mem.mem_type = TTM_PL_SYSTEM;
1867
1868 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001869 false, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001870 if (unlikely(ret != 0))
1871 goto out;
1872 }
1873
1874 ttm_bo_unmap_virtual(bo);
1875
1876 /**
1877 * Swap out. Buffer will be swapped in again as soon as
1878 * anyone tries to access a ttm page.
1879 */
1880
Thomas Hellstrom3f09ea42010-01-13 22:28:40 +01001881 if (bo->bdev->driver->swap_notify)
1882 bo->bdev->driver->swap_notify(bo);
1883
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001884 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1885out:
1886
1887 /**
1888 *
1889 * Unreserve without putting on LRU to avoid swapping out an
1890 * already swapped buffer.
1891 */
1892
1893 atomic_set(&bo->reserved, 0);
1894 wake_up_all(&bo->event_queue);
1895 kref_put(&bo->list_kref, ttm_bo_release_list);
1896 return ret;
1897}
1898
1899void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1900{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001901 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001902 ;
1903}
Thomas Hellstrome99e1e72010-01-13 22:28:42 +01001904EXPORT_SYMBOL(ttm_bo_swapout_all);