blob: fa87ccbcc6cec3fdfdb65b3c063239e6f73ea736 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h>
40
41#define TTM_ASSERT_LOCKED(param)
42#define TTM_DEBUG(fmt, arg...)
43#define TTM_BO_HASH_ORDER 13
44
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020046static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
Thomas Hellstroma987fca2009-08-18 16:51:56 +020047static void ttm_bo_global_kobj_release(struct kobject *kobj);
48
49static struct attribute ttm_bo_count = {
50 .name = "bo_count",
51 .mode = S_IRUGO
52};
53
54static ssize_t ttm_bo_global_show(struct kobject *kobj,
55 struct attribute *attr,
56 char *buffer)
57{
58 struct ttm_bo_global *glob =
59 container_of(kobj, struct ttm_bo_global, kobj);
60
61 return snprintf(buffer, PAGE_SIZE, "%lu\n",
62 (unsigned long) atomic_read(&glob->bo_count));
63}
64
65static struct attribute *ttm_bo_global_attrs[] = {
66 &ttm_bo_count,
67 NULL
68};
69
70static struct sysfs_ops ttm_bo_global_ops = {
71 .show = &ttm_bo_global_show
72};
73
74static struct kobj_type ttm_bo_glob_kobj_type = {
75 .release = &ttm_bo_global_kobj_release,
76 .sysfs_ops = &ttm_bo_global_ops,
77 .default_attrs = ttm_bo_global_attrs
78};
79
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020080
81static inline uint32_t ttm_bo_type_flags(unsigned type)
82{
83 return 1 << (type);
84}
85
86static void ttm_bo_release_list(struct kref *list_kref)
87{
88 struct ttm_buffer_object *bo =
89 container_of(list_kref, struct ttm_buffer_object, list_kref);
90 struct ttm_bo_device *bdev = bo->bdev;
91
92 BUG_ON(atomic_read(&bo->list_kref.refcount));
93 BUG_ON(atomic_read(&bo->kref.refcount));
94 BUG_ON(atomic_read(&bo->cpu_writers));
95 BUG_ON(bo->sync_obj != NULL);
96 BUG_ON(bo->mem.mm_node != NULL);
97 BUG_ON(!list_empty(&bo->lru));
98 BUG_ON(!list_empty(&bo->ddestroy));
99
100 if (bo->ttm)
101 ttm_tt_destroy(bo->ttm);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200102 atomic_dec(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200103 if (bo->destroy)
104 bo->destroy(bo);
105 else {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200106 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200107 kfree(bo);
108 }
109}
110
111int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
112{
113
114 if (interruptible) {
115 int ret = 0;
116
117 ret = wait_event_interruptible(bo->event_queue,
118 atomic_read(&bo->reserved) == 0);
119 if (unlikely(ret != 0))
120 return -ERESTART;
121 } else {
122 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
123 }
124 return 0;
125}
126
127static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
128{
129 struct ttm_bo_device *bdev = bo->bdev;
130 struct ttm_mem_type_manager *man;
131
132 BUG_ON(!atomic_read(&bo->reserved));
133
134 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
135
136 BUG_ON(!list_empty(&bo->lru));
137
138 man = &bdev->man[bo->mem.mem_type];
139 list_add_tail(&bo->lru, &man->lru);
140 kref_get(&bo->list_kref);
141
142 if (bo->ttm != NULL) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200143 list_add_tail(&bo->swap, &bo->glob->swap_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200144 kref_get(&bo->list_kref);
145 }
146 }
147}
148
149/**
150 * Call with the lru_lock held.
151 */
152
153static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
154{
155 int put_count = 0;
156
157 if (!list_empty(&bo->swap)) {
158 list_del_init(&bo->swap);
159 ++put_count;
160 }
161 if (!list_empty(&bo->lru)) {
162 list_del_init(&bo->lru);
163 ++put_count;
164 }
165
166 /*
167 * TODO: Add a driver hook to delete from
168 * driver-specific LRU's here.
169 */
170
171 return put_count;
172}
173
174int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
175 bool interruptible,
176 bool no_wait, bool use_sequence, uint32_t sequence)
177{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200178 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200179 int ret;
180
181 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
182 if (use_sequence && bo->seq_valid &&
183 (sequence - bo->val_seq < (1 << 31))) {
184 return -EAGAIN;
185 }
186
187 if (no_wait)
188 return -EBUSY;
189
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200190 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200191 ret = ttm_bo_wait_unreserved(bo, interruptible);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200192 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200193
194 if (unlikely(ret))
195 return ret;
196 }
197
198 if (use_sequence) {
199 bo->val_seq = sequence;
200 bo->seq_valid = true;
201 } else {
202 bo->seq_valid = false;
203 }
204
205 return 0;
206}
207EXPORT_SYMBOL(ttm_bo_reserve);
208
209static void ttm_bo_ref_bug(struct kref *list_kref)
210{
211 BUG();
212}
213
214int ttm_bo_reserve(struct ttm_buffer_object *bo,
215 bool interruptible,
216 bool no_wait, bool use_sequence, uint32_t sequence)
217{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200218 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200219 int put_count = 0;
220 int ret;
221
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200222 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200223 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
224 sequence);
225 if (likely(ret == 0))
226 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200227 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200228
229 while (put_count--)
230 kref_put(&bo->list_kref, ttm_bo_ref_bug);
231
232 return ret;
233}
234
235void ttm_bo_unreserve(struct ttm_buffer_object *bo)
236{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200237 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200238
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200239 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200240 ttm_bo_add_to_lru(bo);
241 atomic_set(&bo->reserved, 0);
242 wake_up_all(&bo->event_queue);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200243 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200244}
245EXPORT_SYMBOL(ttm_bo_unreserve);
246
247/*
248 * Call bo->mutex locked.
249 */
250
251static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
252{
253 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200254 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200255 int ret = 0;
256 uint32_t page_flags = 0;
257
258 TTM_ASSERT_LOCKED(&bo->mutex);
259 bo->ttm = NULL;
260
Dave Airliead49f502009-07-10 22:36:26 +1000261 if (bdev->need_dma32)
262 page_flags |= TTM_PAGE_FLAG_DMA32;
263
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200264 switch (bo->type) {
265 case ttm_bo_type_device:
266 if (zero_alloc)
267 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
268 case ttm_bo_type_kernel:
269 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200270 page_flags, glob->dummy_read_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200271 if (unlikely(bo->ttm == NULL))
272 ret = -ENOMEM;
273 break;
274 case ttm_bo_type_user:
275 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
276 page_flags | TTM_PAGE_FLAG_USER,
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200277 glob->dummy_read_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200278 if (unlikely(bo->ttm == NULL))
279 ret = -ENOMEM;
280 break;
281
282 ret = ttm_tt_set_user(bo->ttm, current,
283 bo->buffer_start, bo->num_pages);
284 if (unlikely(ret != 0))
285 ttm_tt_destroy(bo->ttm);
286 break;
287 default:
288 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
289 ret = -EINVAL;
290 break;
291 }
292
293 return ret;
294}
295
296static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
297 struct ttm_mem_reg *mem,
298 bool evict, bool interruptible, bool no_wait)
299{
300 struct ttm_bo_device *bdev = bo->bdev;
301 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
302 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
303 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
304 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
305 int ret = 0;
306
307 if (old_is_pci || new_is_pci ||
308 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
309 ttm_bo_unmap_virtual(bo);
310
311 /*
312 * Create and bind a ttm if required.
313 */
314
315 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
316 ret = ttm_bo_add_ttm(bo, false);
317 if (ret)
318 goto out_err;
319
320 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
321 if (ret)
Thomas Hellstrom87ef9202009-06-17 12:29:57 +0200322 goto out_err;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200323
324 if (mem->mem_type != TTM_PL_SYSTEM) {
325 ret = ttm_tt_bind(bo->ttm, mem);
326 if (ret)
327 goto out_err;
328 }
329
330 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
331
332 struct ttm_mem_reg *old_mem = &bo->mem;
333 uint32_t save_flags = old_mem->placement;
334
335 *old_mem = *mem;
336 mem->mm_node = NULL;
337 ttm_flag_masked(&save_flags, mem->placement,
338 TTM_PL_MASK_MEMTYPE);
339 goto moved;
340 }
341
342 }
343
Dave Airliee024e112009-06-24 09:48:08 +1000344 if (bdev->driver->move_notify)
345 bdev->driver->move_notify(bo, mem);
346
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200347 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
348 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
349 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
350 else if (bdev->driver->move)
351 ret = bdev->driver->move(bo, evict, interruptible,
352 no_wait, mem);
353 else
354 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
355
356 if (ret)
357 goto out_err;
358
359moved:
360 if (bo->evicted) {
361 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
362 if (ret)
363 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
364 bo->evicted = false;
365 }
366
367 if (bo->mem.mm_node) {
368 spin_lock(&bo->lock);
369 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
370 bdev->man[bo->mem.mem_type].gpu_offset;
371 bo->cur_placement = bo->mem.placement;
372 spin_unlock(&bo->lock);
373 }
374
375 return 0;
376
377out_err:
378 new_man = &bdev->man[bo->mem.mem_type];
379 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
380 ttm_tt_unbind(bo->ttm);
381 ttm_tt_destroy(bo->ttm);
382 bo->ttm = NULL;
383 }
384
385 return ret;
386}
387
388/**
389 * If bo idle, remove from delayed- and lru lists, and unref.
390 * If not idle, and already on delayed list, do nothing.
391 * If not idle, and not on delayed list, put on delayed list,
392 * up the list_kref and schedule a delayed list check.
393 */
394
395static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
396{
397 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200398 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200399 struct ttm_bo_driver *driver = bdev->driver;
400 int ret;
401
402 spin_lock(&bo->lock);
403 (void) ttm_bo_wait(bo, false, false, !remove_all);
404
405 if (!bo->sync_obj) {
406 int put_count;
407
408 spin_unlock(&bo->lock);
409
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200410 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200411 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
412 BUG_ON(ret);
413 if (bo->ttm)
414 ttm_tt_unbind(bo->ttm);
415
416 if (!list_empty(&bo->ddestroy)) {
417 list_del_init(&bo->ddestroy);
418 kref_put(&bo->list_kref, ttm_bo_ref_bug);
419 }
420 if (bo->mem.mm_node) {
421 drm_mm_put_block(bo->mem.mm_node);
422 bo->mem.mm_node = NULL;
423 }
424 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200425 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200426
427 atomic_set(&bo->reserved, 0);
428
429 while (put_count--)
430 kref_put(&bo->list_kref, ttm_bo_release_list);
431
432 return 0;
433 }
434
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200435 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200436 if (list_empty(&bo->ddestroy)) {
437 void *sync_obj = bo->sync_obj;
438 void *sync_obj_arg = bo->sync_obj_arg;
439
440 kref_get(&bo->list_kref);
441 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200442 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200443 spin_unlock(&bo->lock);
444
445 if (sync_obj)
446 driver->sync_obj_flush(sync_obj, sync_obj_arg);
447 schedule_delayed_work(&bdev->wq,
448 ((HZ / 100) < 1) ? 1 : HZ / 100);
449 ret = 0;
450
451 } else {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200452 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200453 spin_unlock(&bo->lock);
454 ret = -EBUSY;
455 }
456
457 return ret;
458}
459
460/**
461 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
462 * encountered buffers.
463 */
464
465static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
466{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200467 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200468 struct ttm_buffer_object *entry, *nentry;
469 struct list_head *list, *next;
470 int ret;
471
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200472 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200473 list_for_each_safe(list, next, &bdev->ddestroy) {
474 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
475 nentry = NULL;
476
477 /*
478 * Protect the next list entry from destruction while we
479 * unlock the lru_lock.
480 */
481
482 if (next != &bdev->ddestroy) {
483 nentry = list_entry(next, struct ttm_buffer_object,
484 ddestroy);
485 kref_get(&nentry->list_kref);
486 }
487 kref_get(&entry->list_kref);
488
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200489 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200490 ret = ttm_bo_cleanup_refs(entry, remove_all);
491 kref_put(&entry->list_kref, ttm_bo_release_list);
492
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200493 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200494 if (nentry) {
495 bool next_onlist = !list_empty(next);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200496 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200497 kref_put(&nentry->list_kref, ttm_bo_release_list);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200498 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200499 /*
500 * Someone might have raced us and removed the
501 * next entry from the list. We don't bother restarting
502 * list traversal.
503 */
504
505 if (!next_onlist)
506 break;
507 }
508 if (ret)
509 break;
510 }
511 ret = !list_empty(&bdev->ddestroy);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200512 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200513
514 return ret;
515}
516
517static void ttm_bo_delayed_workqueue(struct work_struct *work)
518{
519 struct ttm_bo_device *bdev =
520 container_of(work, struct ttm_bo_device, wq.work);
521
522 if (ttm_bo_delayed_delete(bdev, false)) {
523 schedule_delayed_work(&bdev->wq,
524 ((HZ / 100) < 1) ? 1 : HZ / 100);
525 }
526}
527
528static void ttm_bo_release(struct kref *kref)
529{
530 struct ttm_buffer_object *bo =
531 container_of(kref, struct ttm_buffer_object, kref);
532 struct ttm_bo_device *bdev = bo->bdev;
533
534 if (likely(bo->vm_node != NULL)) {
535 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
536 drm_mm_put_block(bo->vm_node);
537 bo->vm_node = NULL;
538 }
539 write_unlock(&bdev->vm_lock);
540 ttm_bo_cleanup_refs(bo, false);
541 kref_put(&bo->list_kref, ttm_bo_release_list);
542 write_lock(&bdev->vm_lock);
543}
544
545void ttm_bo_unref(struct ttm_buffer_object **p_bo)
546{
547 struct ttm_buffer_object *bo = *p_bo;
548 struct ttm_bo_device *bdev = bo->bdev;
549
550 *p_bo = NULL;
551 write_lock(&bdev->vm_lock);
552 kref_put(&bo->kref, ttm_bo_release);
553 write_unlock(&bdev->vm_lock);
554}
555EXPORT_SYMBOL(ttm_bo_unref);
556
557static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
558 bool interruptible, bool no_wait)
559{
560 int ret = 0;
561 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200562 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200563 struct ttm_mem_reg evict_mem;
564 uint32_t proposed_placement;
565
566 if (bo->mem.mem_type != mem_type)
567 goto out;
568
569 spin_lock(&bo->lock);
570 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
571 spin_unlock(&bo->lock);
572
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200573 if (unlikely(ret != 0)) {
574 if (ret != -ERESTART) {
575 printk(KERN_ERR TTM_PFX
576 "Failed to expire sync object before "
577 "buffer eviction.\n");
578 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200579 goto out;
580 }
581
582 BUG_ON(!atomic_read(&bo->reserved));
583
584 evict_mem = bo->mem;
585 evict_mem.mm_node = NULL;
586
587 proposed_placement = bdev->driver->evict_flags(bo);
588
589 ret = ttm_bo_mem_space(bo, proposed_placement,
590 &evict_mem, interruptible, no_wait);
591 if (unlikely(ret != 0 && ret != -ERESTART))
592 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
593 &evict_mem, interruptible, no_wait);
594
595 if (ret) {
596 if (ret != -ERESTART)
597 printk(KERN_ERR TTM_PFX
598 "Failed to find memory space for "
599 "buffer 0x%p eviction.\n", bo);
600 goto out;
601 }
602
603 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
604 no_wait);
605 if (ret) {
606 if (ret != -ERESTART)
607 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
608 goto out;
609 }
610
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200611 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200612 if (evict_mem.mm_node) {
613 drm_mm_put_block(evict_mem.mm_node);
614 evict_mem.mm_node = NULL;
615 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200616 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200617 bo->evicted = true;
618out:
619 return ret;
620}
621
622/**
623 * Repeatedly evict memory from the LRU for @mem_type until we create enough
624 * space, or we've evicted everything and there isn't enough space.
625 */
626static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
627 struct ttm_mem_reg *mem,
628 uint32_t mem_type,
629 bool interruptible, bool no_wait)
630{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200631 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200632 struct drm_mm_node *node;
633 struct ttm_buffer_object *entry;
634 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
635 struct list_head *lru;
636 unsigned long num_pages = mem->num_pages;
637 int put_count = 0;
638 int ret;
639
640retry_pre_get:
641 ret = drm_mm_pre_get(&man->manager);
642 if (unlikely(ret != 0))
643 return ret;
644
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200645 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200646 do {
647 node = drm_mm_search_free(&man->manager, num_pages,
648 mem->page_alignment, 1);
649 if (node)
650 break;
651
652 lru = &man->lru;
653 if (list_empty(lru))
654 break;
655
656 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
657 kref_get(&entry->list_kref);
658
659 ret =
660 ttm_bo_reserve_locked(entry, interruptible, no_wait,
661 false, 0);
662
663 if (likely(ret == 0))
664 put_count = ttm_bo_del_from_lru(entry);
665
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200666 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200667
668 if (unlikely(ret != 0))
669 return ret;
670
671 while (put_count--)
672 kref_put(&entry->list_kref, ttm_bo_ref_bug);
673
674 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
675
676 ttm_bo_unreserve(entry);
677
678 kref_put(&entry->list_kref, ttm_bo_release_list);
679 if (ret)
680 return ret;
681
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200682 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200683 } while (1);
684
685 if (!node) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200686 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200687 return -ENOMEM;
688 }
689
690 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
691 if (unlikely(!node)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200692 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200693 goto retry_pre_get;
694 }
695
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200696 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200697 mem->mm_node = node;
698 mem->mem_type = mem_type;
699 return 0;
700}
701
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200702static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
703 uint32_t cur_placement,
704 uint32_t proposed_placement)
705{
706 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
707 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
708
709 /**
710 * Keep current caching if possible.
711 */
712
713 if ((cur_placement & caching) != 0)
714 result |= (cur_placement & caching);
715 else if ((man->default_caching & caching) != 0)
716 result |= man->default_caching;
717 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
718 result |= TTM_PL_FLAG_CACHED;
719 else if ((TTM_PL_FLAG_WC & caching) != 0)
720 result |= TTM_PL_FLAG_WC;
721 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
722 result |= TTM_PL_FLAG_UNCACHED;
723
724 return result;
725}
726
727
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200728static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
729 bool disallow_fixed,
730 uint32_t mem_type,
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200731 uint32_t proposed_placement,
732 uint32_t *masked_placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200733{
734 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
735
736 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
737 return false;
738
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200739 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200740 return false;
741
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200742 if ((proposed_placement & man->available_caching) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200743 return false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200744
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200745 cur_flags |= (proposed_placement & man->available_caching);
746
747 *masked_placement = cur_flags;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200748 return true;
749}
750
751/**
752 * Creates space for memory region @mem according to its type.
753 *
754 * This function first searches for free space in compatible memory types in
755 * the priority order defined by the driver. If free space isn't found, then
756 * ttm_bo_mem_force_space is attempted in priority order to evict and find
757 * space.
758 */
759int ttm_bo_mem_space(struct ttm_buffer_object *bo,
760 uint32_t proposed_placement,
761 struct ttm_mem_reg *mem,
762 bool interruptible, bool no_wait)
763{
764 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200765 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200766 struct ttm_mem_type_manager *man;
767
768 uint32_t num_prios = bdev->driver->num_mem_type_prio;
769 const uint32_t *prios = bdev->driver->mem_type_prio;
770 uint32_t i;
771 uint32_t mem_type = TTM_PL_SYSTEM;
772 uint32_t cur_flags = 0;
773 bool type_found = false;
774 bool type_ok = false;
775 bool has_eagain = false;
776 struct drm_mm_node *node = NULL;
777 int ret;
778
779 mem->mm_node = NULL;
780 for (i = 0; i < num_prios; ++i) {
781 mem_type = prios[i];
782 man = &bdev->man[mem_type];
783
784 type_ok = ttm_bo_mt_compatible(man,
785 bo->type == ttm_bo_type_user,
786 mem_type, proposed_placement,
787 &cur_flags);
788
789 if (!type_ok)
790 continue;
791
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200792 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
793 cur_flags);
794
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200795 if (mem_type == TTM_PL_SYSTEM)
796 break;
797
798 if (man->has_type && man->use_type) {
799 type_found = true;
800 do {
801 ret = drm_mm_pre_get(&man->manager);
802 if (unlikely(ret))
803 return ret;
804
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200805 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200806 node = drm_mm_search_free(&man->manager,
807 mem->num_pages,
808 mem->page_alignment,
809 1);
810 if (unlikely(!node)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200811 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200812 break;
813 }
814 node = drm_mm_get_block_atomic(node,
815 mem->num_pages,
816 mem->
817 page_alignment);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200818 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200819 } while (!node);
820 }
821 if (node)
822 break;
823 }
824
825 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
826 mem->mm_node = node;
827 mem->mem_type = mem_type;
828 mem->placement = cur_flags;
829 return 0;
830 }
831
832 if (!type_found)
833 return -EINVAL;
834
835 num_prios = bdev->driver->num_mem_busy_prio;
836 prios = bdev->driver->mem_busy_prio;
837
838 for (i = 0; i < num_prios; ++i) {
839 mem_type = prios[i];
840 man = &bdev->man[mem_type];
841
842 if (!man->has_type)
843 continue;
844
845 if (!ttm_bo_mt_compatible(man,
846 bo->type == ttm_bo_type_user,
847 mem_type,
848 proposed_placement, &cur_flags))
849 continue;
850
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200851 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
852 cur_flags);
853
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200854 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
855 interruptible, no_wait);
856
857 if (ret == 0 && mem->mm_node) {
858 mem->placement = cur_flags;
859 return 0;
860 }
861
862 if (ret == -ERESTART)
863 has_eagain = true;
864 }
865
866 ret = (has_eagain) ? -ERESTART : -ENOMEM;
867 return ret;
868}
869EXPORT_SYMBOL(ttm_bo_mem_space);
870
871int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
872{
873 int ret = 0;
874
875 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
876 return -EBUSY;
877
878 ret = wait_event_interruptible(bo->event_queue,
879 atomic_read(&bo->cpu_writers) == 0);
880
881 if (ret == -ERESTARTSYS)
882 ret = -ERESTART;
883
884 return ret;
885}
886
887int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
888 uint32_t proposed_placement,
889 bool interruptible, bool no_wait)
890{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200891 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200892 int ret = 0;
893 struct ttm_mem_reg mem;
894
895 BUG_ON(!atomic_read(&bo->reserved));
896
897 /*
898 * FIXME: It's possible to pipeline buffer moves.
899 * Have the driver move function wait for idle when necessary,
900 * instead of doing it here.
901 */
902
903 spin_lock(&bo->lock);
904 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
905 spin_unlock(&bo->lock);
906
907 if (ret)
908 return ret;
909
910 mem.num_pages = bo->num_pages;
911 mem.size = mem.num_pages << PAGE_SHIFT;
912 mem.page_alignment = bo->mem.page_alignment;
913
914 /*
915 * Determine where to move the buffer.
916 */
917
918 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
919 interruptible, no_wait);
920 if (ret)
921 goto out_unlock;
922
923 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
924
925out_unlock:
926 if (ret && mem.mm_node) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200927 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200928 drm_mm_put_block(mem.mm_node);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200929 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200930 }
931 return ret;
932}
933
934static int ttm_bo_mem_compat(uint32_t proposed_placement,
935 struct ttm_mem_reg *mem)
936{
937 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
938 return 0;
939 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
940 return 0;
941
942 return 1;
943}
944
945int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
946 uint32_t proposed_placement,
947 bool interruptible, bool no_wait)
948{
949 int ret;
950
951 BUG_ON(!atomic_read(&bo->reserved));
952 bo->proposed_placement = proposed_placement;
953
954 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
955 (unsigned long)proposed_placement,
956 (unsigned long)bo->mem.placement);
957
958 /*
959 * Check whether we need to move buffer.
960 */
961
962 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
963 ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
964 interruptible, no_wait);
965 if (ret) {
966 if (ret != -ERESTART)
967 printk(KERN_ERR TTM_PFX
968 "Failed moving buffer. "
969 "Proposed placement 0x%08x\n",
970 bo->proposed_placement);
971 if (ret == -ENOMEM)
972 printk(KERN_ERR TTM_PFX
973 "Out of aperture space or "
974 "DRM memory quota.\n");
975 return ret;
976 }
977 }
978
979 /*
980 * We might need to add a TTM.
981 */
982
983 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
984 ret = ttm_bo_add_ttm(bo, true);
985 if (ret)
986 return ret;
987 }
988 /*
989 * Validation has succeeded, move the access and other
990 * non-mapping-related flag bits from the proposed flags to
991 * the active flags
992 */
993
994 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
995 ~TTM_PL_MASK_MEMTYPE);
996
997 return 0;
998}
999EXPORT_SYMBOL(ttm_buffer_object_validate);
1000
1001int
1002ttm_bo_check_placement(struct ttm_buffer_object *bo,
1003 uint32_t set_flags, uint32_t clr_flags)
1004{
1005 uint32_t new_mask = set_flags | clr_flags;
1006
1007 if ((bo->type == ttm_bo_type_user) &&
1008 (clr_flags & TTM_PL_FLAG_CACHED)) {
1009 printk(KERN_ERR TTM_PFX
1010 "User buffers require cache-coherent memory.\n");
1011 return -EINVAL;
1012 }
1013
1014 if (!capable(CAP_SYS_ADMIN)) {
1015 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
1016 printk(KERN_ERR TTM_PFX "Need to be root to modify"
1017 " NO_EVICT status.\n");
1018 return -EINVAL;
1019 }
1020
1021 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
1022 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1023 printk(KERN_ERR TTM_PFX
1024 "Incompatible memory specification"
1025 " for NO_EVICT buffer.\n");
1026 return -EINVAL;
1027 }
1028 }
1029 return 0;
1030}
1031
1032int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1033 struct ttm_buffer_object *bo,
1034 unsigned long size,
1035 enum ttm_bo_type type,
1036 uint32_t flags,
1037 uint32_t page_alignment,
1038 unsigned long buffer_start,
1039 bool interruptible,
1040 struct file *persistant_swap_storage,
1041 size_t acc_size,
1042 void (*destroy) (struct ttm_buffer_object *))
1043{
1044 int ret = 0;
1045 unsigned long num_pages;
1046
1047 size += buffer_start & ~PAGE_MASK;
1048 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1049 if (num_pages == 0) {
1050 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1051 return -EINVAL;
1052 }
1053 bo->destroy = destroy;
1054
1055 spin_lock_init(&bo->lock);
1056 kref_init(&bo->kref);
1057 kref_init(&bo->list_kref);
1058 atomic_set(&bo->cpu_writers, 0);
1059 atomic_set(&bo->reserved, 1);
1060 init_waitqueue_head(&bo->event_queue);
1061 INIT_LIST_HEAD(&bo->lru);
1062 INIT_LIST_HEAD(&bo->ddestroy);
1063 INIT_LIST_HEAD(&bo->swap);
1064 bo->bdev = bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001065 bo->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001066 bo->type = type;
1067 bo->num_pages = num_pages;
1068 bo->mem.mem_type = TTM_PL_SYSTEM;
1069 bo->mem.num_pages = bo->num_pages;
1070 bo->mem.mm_node = NULL;
1071 bo->mem.page_alignment = page_alignment;
1072 bo->buffer_start = buffer_start & PAGE_MASK;
1073 bo->priv_flags = 0;
1074 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1075 bo->seq_valid = false;
1076 bo->persistant_swap_storage = persistant_swap_storage;
1077 bo->acc_size = acc_size;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001078 atomic_inc(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001079
1080 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1081 if (unlikely(ret != 0))
1082 goto out_err;
1083
1084 /*
1085 * If no caching attributes are set, accept any form of caching.
1086 */
1087
1088 if ((flags & TTM_PL_MASK_CACHING) == 0)
1089 flags |= TTM_PL_MASK_CACHING;
1090
1091 /*
1092 * For ttm_bo_type_device buffers, allocate
1093 * address space from the device.
1094 */
1095
1096 if (bo->type == ttm_bo_type_device) {
1097 ret = ttm_bo_setup_vm(bo);
1098 if (ret)
1099 goto out_err;
1100 }
1101
1102 ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
1103 if (ret)
1104 goto out_err;
1105
1106 ttm_bo_unreserve(bo);
1107 return 0;
1108
1109out_err:
1110 ttm_bo_unreserve(bo);
1111 ttm_bo_unref(&bo);
1112
1113 return ret;
1114}
1115EXPORT_SYMBOL(ttm_buffer_object_init);
1116
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001117static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001118 unsigned long num_pages)
1119{
1120 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1121 PAGE_MASK;
1122
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001123 return glob->ttm_bo_size + 2 * page_array_size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001124}
1125
1126int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1127 unsigned long size,
1128 enum ttm_bo_type type,
1129 uint32_t flags,
1130 uint32_t page_alignment,
1131 unsigned long buffer_start,
1132 bool interruptible,
1133 struct file *persistant_swap_storage,
1134 struct ttm_buffer_object **p_bo)
1135{
1136 struct ttm_buffer_object *bo;
1137 int ret;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001138 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001139
1140 size_t acc_size =
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001141 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +02001142 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001143 if (unlikely(ret != 0))
1144 return ret;
1145
1146 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1147
1148 if (unlikely(bo == NULL)) {
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +02001149 ttm_mem_global_free(mem_glob, acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001150 return -ENOMEM;
1151 }
1152
1153 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1154 page_alignment, buffer_start,
1155 interruptible,
1156 persistant_swap_storage, acc_size, NULL);
1157 if (likely(ret == 0))
1158 *p_bo = bo;
1159
1160 return ret;
1161}
1162
1163static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1164 uint32_t mem_type, bool allow_errors)
1165{
1166 int ret;
1167
1168 spin_lock(&bo->lock);
1169 ret = ttm_bo_wait(bo, false, false, false);
1170 spin_unlock(&bo->lock);
1171
1172 if (ret && allow_errors)
1173 goto out;
1174
1175 if (bo->mem.mem_type == mem_type)
1176 ret = ttm_bo_evict(bo, mem_type, false, false);
1177
1178 if (ret) {
1179 if (allow_errors) {
1180 goto out;
1181 } else {
1182 ret = 0;
1183 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1184 }
1185 }
1186
1187out:
1188 return ret;
1189}
1190
1191static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1192 struct list_head *head,
1193 unsigned mem_type, bool allow_errors)
1194{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001195 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001196 struct ttm_buffer_object *entry;
1197 int ret;
1198 int put_count;
1199
1200 /*
1201 * Can't use standard list traversal since we're unlocking.
1202 */
1203
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001204 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001205
1206 while (!list_empty(head)) {
1207 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1208 kref_get(&entry->list_kref);
1209 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1210 put_count = ttm_bo_del_from_lru(entry);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001211 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001212 while (put_count--)
1213 kref_put(&entry->list_kref, ttm_bo_ref_bug);
1214 BUG_ON(ret);
1215 ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
1216 ttm_bo_unreserve(entry);
1217 kref_put(&entry->list_kref, ttm_bo_release_list);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001218 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001219 }
1220
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001221 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001222
1223 return 0;
1224}
1225
1226int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1227{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001228 struct ttm_bo_global *glob = bdev->glob;
Roel Kluinc96e7c72009-08-03 14:22:53 +02001229 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001230 int ret = -EINVAL;
1231
1232 if (mem_type >= TTM_NUM_MEM_TYPES) {
1233 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1234 return ret;
1235 }
Roel Kluinc96e7c72009-08-03 14:22:53 +02001236 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001237
1238 if (!man->has_type) {
1239 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1240 "memory manager type %u\n", mem_type);
1241 return ret;
1242 }
1243
1244 man->use_type = false;
1245 man->has_type = false;
1246
1247 ret = 0;
1248 if (mem_type > 0) {
1249 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
1250
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001251 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001252 if (drm_mm_clean(&man->manager))
1253 drm_mm_takedown(&man->manager);
1254 else
1255 ret = -EBUSY;
1256
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001257 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001258 }
1259
1260 return ret;
1261}
1262EXPORT_SYMBOL(ttm_bo_clean_mm);
1263
1264int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1265{
1266 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1267
1268 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1269 printk(KERN_ERR TTM_PFX
1270 "Illegal memory manager memory type %u.\n",
1271 mem_type);
1272 return -EINVAL;
1273 }
1274
1275 if (!man->has_type) {
1276 printk(KERN_ERR TTM_PFX
1277 "Memory type %u has not been initialized.\n",
1278 mem_type);
1279 return 0;
1280 }
1281
1282 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
1283}
1284EXPORT_SYMBOL(ttm_bo_evict_mm);
1285
1286int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1287 unsigned long p_offset, unsigned long p_size)
1288{
1289 int ret = -EINVAL;
1290 struct ttm_mem_type_manager *man;
1291
1292 if (type >= TTM_NUM_MEM_TYPES) {
1293 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1294 return ret;
1295 }
1296
1297 man = &bdev->man[type];
1298 if (man->has_type) {
1299 printk(KERN_ERR TTM_PFX
1300 "Memory manager already initialized for type %d\n",
1301 type);
1302 return ret;
1303 }
1304
1305 ret = bdev->driver->init_mem_type(bdev, type, man);
1306 if (ret)
1307 return ret;
1308
1309 ret = 0;
1310 if (type != TTM_PL_SYSTEM) {
1311 if (!p_size) {
1312 printk(KERN_ERR TTM_PFX
1313 "Zero size memory manager type %d\n",
1314 type);
1315 return ret;
1316 }
1317 ret = drm_mm_init(&man->manager, p_offset, p_size);
1318 if (ret)
1319 return ret;
1320 }
1321 man->has_type = true;
1322 man->use_type = true;
1323 man->size = p_size;
1324
1325 INIT_LIST_HEAD(&man->lru);
1326
1327 return 0;
1328}
1329EXPORT_SYMBOL(ttm_bo_init_mm);
1330
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001331static void ttm_bo_global_kobj_release(struct kobject *kobj)
1332{
1333 struct ttm_bo_global *glob =
1334 container_of(kobj, struct ttm_bo_global, kobj);
1335
1336 printk(KERN_INFO TTM_PFX "Freeing bo global.\n");
1337 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1338 __free_page(glob->dummy_read_page);
1339 kfree(glob);
1340}
1341
1342void ttm_bo_global_release(struct ttm_global_reference *ref)
1343{
1344 struct ttm_bo_global *glob = ref->object;
1345
1346 kobject_del(&glob->kobj);
1347 kobject_put(&glob->kobj);
1348}
1349EXPORT_SYMBOL(ttm_bo_global_release);
1350
1351int ttm_bo_global_init(struct ttm_global_reference *ref)
1352{
1353 struct ttm_bo_global_ref *bo_ref =
1354 container_of(ref, struct ttm_bo_global_ref, ref);
1355 struct ttm_bo_global *glob = ref->object;
1356 int ret;
1357
1358 mutex_init(&glob->device_list_mutex);
1359 spin_lock_init(&glob->lru_lock);
1360 glob->mem_glob = bo_ref->mem_glob;
1361 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1362
1363 if (unlikely(glob->dummy_read_page == NULL)) {
1364 ret = -ENOMEM;
1365 goto out_no_drp;
1366 }
1367
1368 INIT_LIST_HEAD(&glob->swap_lru);
1369 INIT_LIST_HEAD(&glob->device_list);
1370
1371 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1372 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1373 if (unlikely(ret != 0)) {
1374 printk(KERN_ERR TTM_PFX
1375 "Could not register buffer object swapout.\n");
1376 goto out_no_shrink;
1377 }
1378
1379 glob->ttm_bo_extra_size =
1380 ttm_round_pot(sizeof(struct ttm_tt)) +
1381 ttm_round_pot(sizeof(struct ttm_backend));
1382
1383 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1384 ttm_round_pot(sizeof(struct ttm_buffer_object));
1385
1386 atomic_set(&glob->bo_count, 0);
1387
1388 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1389 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1390 if (unlikely(ret != 0))
1391 kobject_put(&glob->kobj);
1392 return ret;
1393out_no_shrink:
1394 __free_page(glob->dummy_read_page);
1395out_no_drp:
1396 kfree(glob);
1397 return ret;
1398}
1399EXPORT_SYMBOL(ttm_bo_global_init);
1400
1401
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001402int ttm_bo_device_release(struct ttm_bo_device *bdev)
1403{
1404 int ret = 0;
1405 unsigned i = TTM_NUM_MEM_TYPES;
1406 struct ttm_mem_type_manager *man;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001407 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001408
1409 while (i--) {
1410 man = &bdev->man[i];
1411 if (man->has_type) {
1412 man->use_type = false;
1413 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1414 ret = -EBUSY;
1415 printk(KERN_ERR TTM_PFX
1416 "DRM memory manager type %d "
1417 "is not clean.\n", i);
1418 }
1419 man->has_type = false;
1420 }
1421 }
1422
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001423 mutex_lock(&glob->device_list_mutex);
1424 list_del(&bdev->device_list);
1425 mutex_unlock(&glob->device_list_mutex);
1426
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001427 if (!cancel_delayed_work(&bdev->wq))
1428 flush_scheduled_work();
1429
1430 while (ttm_bo_delayed_delete(bdev, true))
1431 ;
1432
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001433 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001434 if (list_empty(&bdev->ddestroy))
1435 TTM_DEBUG("Delayed destroy list was clean\n");
1436
1437 if (list_empty(&bdev->man[0].lru))
1438 TTM_DEBUG("Swap list was clean\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001439 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001440
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001441 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1442 write_lock(&bdev->vm_lock);
1443 drm_mm_takedown(&bdev->addr_space_mm);
1444 write_unlock(&bdev->vm_lock);
1445
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001446 return ret;
1447}
1448EXPORT_SYMBOL(ttm_bo_device_release);
1449
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001450int ttm_bo_device_init(struct ttm_bo_device *bdev,
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001451 struct ttm_bo_global *glob,
1452 struct ttm_bo_driver *driver,
Dave Airlie51c8b402009-08-20 13:38:04 +10001453 uint64_t file_page_offset,
Dave Airliead49f502009-07-10 22:36:26 +10001454 bool need_dma32)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001455{
1456 int ret = -EINVAL;
1457
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001458 rwlock_init(&bdev->vm_lock);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001459 spin_lock_init(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001460
1461 bdev->driver = driver;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001462
1463 memset(bdev->man, 0, sizeof(bdev->man));
1464
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001465 /*
1466 * Initialize the system memory buffer type.
1467 * Other types need to be driver / IOCTL initialized.
1468 */
1469 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
1470 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001471 goto out_no_sys;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001472
1473 bdev->addr_space_rb = RB_ROOT;
1474 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1475 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001476 goto out_no_addr_mm;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001477
1478 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1479 bdev->nice_mode = true;
1480 INIT_LIST_HEAD(&bdev->ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001481 bdev->dev_mapping = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001482 bdev->glob = glob;
Dave Airliead49f502009-07-10 22:36:26 +10001483 bdev->need_dma32 = need_dma32;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001484
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001485 mutex_lock(&glob->device_list_mutex);
1486 list_add_tail(&bdev->device_list, &glob->device_list);
1487 mutex_unlock(&glob->device_list_mutex);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001488
1489 return 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001490out_no_addr_mm:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001491 ttm_bo_clean_mm(bdev, 0);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001492out_no_sys:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001493 return ret;
1494}
1495EXPORT_SYMBOL(ttm_bo_device_init);
1496
1497/*
1498 * buffer object vm functions.
1499 */
1500
1501bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1502{
1503 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1504
1505 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1506 if (mem->mem_type == TTM_PL_SYSTEM)
1507 return false;
1508
1509 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1510 return false;
1511
1512 if (mem->placement & TTM_PL_FLAG_CACHED)
1513 return false;
1514 }
1515 return true;
1516}
1517
1518int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1519 struct ttm_mem_reg *mem,
1520 unsigned long *bus_base,
1521 unsigned long *bus_offset, unsigned long *bus_size)
1522{
1523 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1524
1525 *bus_size = 0;
1526 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1527 return -EINVAL;
1528
1529 if (ttm_mem_reg_is_pci(bdev, mem)) {
1530 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1531 *bus_size = mem->num_pages << PAGE_SHIFT;
1532 *bus_base = man->io_offset;
1533 }
1534
1535 return 0;
1536}
1537
1538void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1539{
1540 struct ttm_bo_device *bdev = bo->bdev;
1541 loff_t offset = (loff_t) bo->addr_space_offset;
1542 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1543
1544 if (!bdev->dev_mapping)
1545 return;
1546
1547 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1548}
Dave Airliee024e112009-06-24 09:48:08 +10001549EXPORT_SYMBOL(ttm_bo_unmap_virtual);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001550
1551static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1552{
1553 struct ttm_bo_device *bdev = bo->bdev;
1554 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1555 struct rb_node *parent = NULL;
1556 struct ttm_buffer_object *cur_bo;
1557 unsigned long offset = bo->vm_node->start;
1558 unsigned long cur_offset;
1559
1560 while (*cur) {
1561 parent = *cur;
1562 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1563 cur_offset = cur_bo->vm_node->start;
1564 if (offset < cur_offset)
1565 cur = &parent->rb_left;
1566 else if (offset > cur_offset)
1567 cur = &parent->rb_right;
1568 else
1569 BUG();
1570 }
1571
1572 rb_link_node(&bo->vm_rb, parent, cur);
1573 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1574}
1575
1576/**
1577 * ttm_bo_setup_vm:
1578 *
1579 * @bo: the buffer to allocate address space for
1580 *
1581 * Allocate address space in the drm device so that applications
1582 * can mmap the buffer and access the contents. This only
1583 * applies to ttm_bo_type_device objects as others are not
1584 * placed in the drm device address space.
1585 */
1586
1587static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1588{
1589 struct ttm_bo_device *bdev = bo->bdev;
1590 int ret;
1591
1592retry_pre_get:
1593 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1594 if (unlikely(ret != 0))
1595 return ret;
1596
1597 write_lock(&bdev->vm_lock);
1598 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1599 bo->mem.num_pages, 0, 0);
1600
1601 if (unlikely(bo->vm_node == NULL)) {
1602 ret = -ENOMEM;
1603 goto out_unlock;
1604 }
1605
1606 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1607 bo->mem.num_pages, 0);
1608
1609 if (unlikely(bo->vm_node == NULL)) {
1610 write_unlock(&bdev->vm_lock);
1611 goto retry_pre_get;
1612 }
1613
1614 ttm_bo_vm_insert_rb(bo);
1615 write_unlock(&bdev->vm_lock);
1616 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1617
1618 return 0;
1619out_unlock:
1620 write_unlock(&bdev->vm_lock);
1621 return ret;
1622}
1623
1624int ttm_bo_wait(struct ttm_buffer_object *bo,
1625 bool lazy, bool interruptible, bool no_wait)
1626{
1627 struct ttm_bo_driver *driver = bo->bdev->driver;
1628 void *sync_obj;
1629 void *sync_obj_arg;
1630 int ret = 0;
1631
1632 if (likely(bo->sync_obj == NULL))
1633 return 0;
1634
1635 while (bo->sync_obj) {
1636
1637 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1638 void *tmp_obj = bo->sync_obj;
1639 bo->sync_obj = NULL;
1640 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1641 spin_unlock(&bo->lock);
1642 driver->sync_obj_unref(&tmp_obj);
1643 spin_lock(&bo->lock);
1644 continue;
1645 }
1646
1647 if (no_wait)
1648 return -EBUSY;
1649
1650 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1651 sync_obj_arg = bo->sync_obj_arg;
1652 spin_unlock(&bo->lock);
1653 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1654 lazy, interruptible);
1655 if (unlikely(ret != 0)) {
1656 driver->sync_obj_unref(&sync_obj);
1657 spin_lock(&bo->lock);
1658 return ret;
1659 }
1660 spin_lock(&bo->lock);
1661 if (likely(bo->sync_obj == sync_obj &&
1662 bo->sync_obj_arg == sync_obj_arg)) {
1663 void *tmp_obj = bo->sync_obj;
1664 bo->sync_obj = NULL;
1665 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1666 &bo->priv_flags);
1667 spin_unlock(&bo->lock);
1668 driver->sync_obj_unref(&sync_obj);
1669 driver->sync_obj_unref(&tmp_obj);
1670 spin_lock(&bo->lock);
Thomas Hellstromfee280d2009-08-03 12:39:06 +02001671 } else {
1672 spin_unlock(&bo->lock);
1673 driver->sync_obj_unref(&sync_obj);
1674 spin_lock(&bo->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001675 }
1676 }
1677 return 0;
1678}
1679EXPORT_SYMBOL(ttm_bo_wait);
1680
1681void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1682{
1683 atomic_set(&bo->reserved, 0);
1684 wake_up_all(&bo->event_queue);
1685}
1686
1687int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1688 bool no_wait)
1689{
1690 int ret;
1691
1692 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1693 if (no_wait)
1694 return -EBUSY;
1695 else if (interruptible) {
1696 ret = wait_event_interruptible
1697 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1698 if (unlikely(ret != 0))
1699 return -ERESTART;
1700 } else {
1701 wait_event(bo->event_queue,
1702 atomic_read(&bo->reserved) == 0);
1703 }
1704 }
1705 return 0;
1706}
1707
1708int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1709{
1710 int ret = 0;
1711
1712 /*
1713 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1714 * makes sure the lru lists are updated.
1715 */
1716
1717 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1718 if (unlikely(ret != 0))
1719 return ret;
1720 spin_lock(&bo->lock);
1721 ret = ttm_bo_wait(bo, false, true, no_wait);
1722 spin_unlock(&bo->lock);
1723 if (likely(ret == 0))
1724 atomic_inc(&bo->cpu_writers);
1725 ttm_bo_unreserve(bo);
1726 return ret;
1727}
1728
1729void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1730{
1731 if (atomic_dec_and_test(&bo->cpu_writers))
1732 wake_up_all(&bo->event_queue);
1733}
1734
1735/**
1736 * A buffer object shrink method that tries to swap out the first
1737 * buffer object on the bo_global::swap_lru list.
1738 */
1739
1740static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1741{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001742 struct ttm_bo_global *glob =
1743 container_of(shrink, struct ttm_bo_global, shrink);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001744 struct ttm_buffer_object *bo;
1745 int ret = -EBUSY;
1746 int put_count;
1747 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1748
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001749 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001750 while (ret == -EBUSY) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001751 if (unlikely(list_empty(&glob->swap_lru))) {
1752 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001753 return -EBUSY;
1754 }
1755
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001756 bo = list_first_entry(&glob->swap_lru,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001757 struct ttm_buffer_object, swap);
1758 kref_get(&bo->list_kref);
1759
1760 /**
1761 * Reserve buffer. Since we unlock while sleeping, we need
1762 * to re-check that nobody removed us from the swap-list while
1763 * we slept.
1764 */
1765
1766 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1767 if (unlikely(ret == -EBUSY)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001768 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001769 ttm_bo_wait_unreserved(bo, false);
1770 kref_put(&bo->list_kref, ttm_bo_release_list);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001771 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001772 }
1773 }
1774
1775 BUG_ON(ret != 0);
1776 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001777 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001778
1779 while (put_count--)
1780 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1781
1782 /**
1783 * Wait for GPU, then move to system cached.
1784 */
1785
1786 spin_lock(&bo->lock);
1787 ret = ttm_bo_wait(bo, false, false, false);
1788 spin_unlock(&bo->lock);
1789
1790 if (unlikely(ret != 0))
1791 goto out;
1792
1793 if ((bo->mem.placement & swap_placement) != swap_placement) {
1794 struct ttm_mem_reg evict_mem;
1795
1796 evict_mem = bo->mem;
1797 evict_mem.mm_node = NULL;
1798 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1799 evict_mem.mem_type = TTM_PL_SYSTEM;
1800
1801 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1802 false, false);
1803 if (unlikely(ret != 0))
1804 goto out;
1805 }
1806
1807 ttm_bo_unmap_virtual(bo);
1808
1809 /**
1810 * Swap out. Buffer will be swapped in again as soon as
1811 * anyone tries to access a ttm page.
1812 */
1813
1814 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1815out:
1816
1817 /**
1818 *
1819 * Unreserve without putting on LRU to avoid swapping out an
1820 * already swapped buffer.
1821 */
1822
1823 atomic_set(&bo->reserved, 0);
1824 wake_up_all(&bo->event_queue);
1825 kref_put(&bo->list_kref, ttm_bo_release_list);
1826 return ret;
1827}
1828
1829void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1830{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001831 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001832 ;
1833}