blob: b912375b9c18051bad4f4fdbecfc75f0bfab601d [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Joe Perches25d04792012-03-16 21:43:50 -070031#define pr_fmt(fmt) "[TTM] " fmt
32
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/ttm/ttm_module.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_placement.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020036#include <linux/jiffies.h>
37#include <linux/slab.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/file.h>
41#include <linux/module.h>
Arun Sharma600634972011-07-26 16:09:06 -070042#include <linux/atomic.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020043
44#define TTM_ASSERT_LOCKED(param)
45#define TTM_DEBUG(fmt, arg...)
46#define TTM_BO_HASH_ORDER 13
47
48static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020049static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
Thomas Hellstroma987fca2009-08-18 16:51:56 +020050static void ttm_bo_global_kobj_release(struct kobject *kobj);
51
52static struct attribute ttm_bo_count = {
53 .name = "bo_count",
54 .mode = S_IRUGO
55};
56
Jerome Glissefb53f862009-12-09 21:55:10 +010057static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
58{
59 int i;
60
61 for (i = 0; i <= TTM_PL_PRIV5; i++)
62 if (flags & (1 << i)) {
63 *mem_type = i;
64 return 0;
65 }
66 return -EINVAL;
67}
68
Jerome Glisse5012f502009-12-10 18:07:26 +010069static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
Jerome Glissefb53f862009-12-09 21:55:10 +010070{
Jerome Glisse5012f502009-12-10 18:07:26 +010071 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
72
Joe Perches25d04792012-03-16 21:43:50 -070073 pr_err(" has_type: %d\n", man->has_type);
74 pr_err(" use_type: %d\n", man->use_type);
75 pr_err(" flags: 0x%08X\n", man->flags);
76 pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
77 pr_err(" size: %llu\n", man->size);
78 pr_err(" available_caching: 0x%08X\n", man->available_caching);
79 pr_err(" default_caching: 0x%08X\n", man->default_caching);
Ben Skeggsd961db72010-08-05 10:48:18 +100080 if (mem_type != TTM_PL_SYSTEM)
81 (*man->func->debug)(man, TTM_PFX);
Jerome Glissefb53f862009-12-09 21:55:10 +010082}
83
84static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
85 struct ttm_placement *placement)
86{
Jerome Glissefb53f862009-12-09 21:55:10 +010087 int i, ret, mem_type;
88
Joe Perches25d04792012-03-16 21:43:50 -070089 pr_err("No space for %p (%lu pages, %luK, %luM)\n",
90 bo, bo->mem.num_pages, bo->mem.size >> 10,
91 bo->mem.size >> 20);
Jerome Glissefb53f862009-12-09 21:55:10 +010092 for (i = 0; i < placement->num_placement; i++) {
93 ret = ttm_mem_type_from_flags(placement->placement[i],
94 &mem_type);
95 if (ret)
96 return;
Joe Perches25d04792012-03-16 21:43:50 -070097 pr_err(" placement[%d]=0x%08X (%d)\n",
98 i, placement->placement[i], mem_type);
Jerome Glisse5012f502009-12-10 18:07:26 +010099 ttm_mem_type_debug(bo->bdev, mem_type);
Jerome Glissefb53f862009-12-09 21:55:10 +0100100 }
101}
102
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200103static ssize_t ttm_bo_global_show(struct kobject *kobj,
104 struct attribute *attr,
105 char *buffer)
106{
107 struct ttm_bo_global *glob =
108 container_of(kobj, struct ttm_bo_global, kobj);
109
110 return snprintf(buffer, PAGE_SIZE, "%lu\n",
111 (unsigned long) atomic_read(&glob->bo_count));
112}
113
114static struct attribute *ttm_bo_global_attrs[] = {
115 &ttm_bo_count,
116 NULL
117};
118
Emese Revfy52cf25d2010-01-19 02:58:23 +0100119static const struct sysfs_ops ttm_bo_global_ops = {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200120 .show = &ttm_bo_global_show
121};
122
123static struct kobj_type ttm_bo_glob_kobj_type = {
124 .release = &ttm_bo_global_kobj_release,
125 .sysfs_ops = &ttm_bo_global_ops,
126 .default_attrs = ttm_bo_global_attrs
127};
128
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200129
130static inline uint32_t ttm_bo_type_flags(unsigned type)
131{
132 return 1 << (type);
133}
134
135static void ttm_bo_release_list(struct kref *list_kref)
136{
137 struct ttm_buffer_object *bo =
138 container_of(list_kref, struct ttm_buffer_object, list_kref);
139 struct ttm_bo_device *bdev = bo->bdev;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500140 size_t acc_size = bo->acc_size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200141
142 BUG_ON(atomic_read(&bo->list_kref.refcount));
143 BUG_ON(atomic_read(&bo->kref.refcount));
144 BUG_ON(atomic_read(&bo->cpu_writers));
145 BUG_ON(bo->sync_obj != NULL);
146 BUG_ON(bo->mem.mm_node != NULL);
147 BUG_ON(!list_empty(&bo->lru));
148 BUG_ON(!list_empty(&bo->ddestroy));
149
150 if (bo->ttm)
151 ttm_tt_destroy(bo->ttm);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200152 atomic_dec(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200153 if (bo->destroy)
154 bo->destroy(bo);
155 else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200156 kfree(bo);
157 }
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200159}
160
Maarten Lankhorstcc4c0c42013-01-15 14:57:28 +0100161static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
162 bool interruptible)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200163{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200164 if (interruptible) {
Jean Delvare965d3802010-10-09 12:36:45 +0000165 return wait_event_interruptible(bo->event_queue,
Maarten Lankhorsta9dbfff2012-10-12 16:58:36 +0200166 !ttm_bo_is_reserved(bo));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200167 } else {
Maarten Lankhorsta9dbfff2012-10-12 16:58:36 +0200168 wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
Jean Delvare965d3802010-10-09 12:36:45 +0000169 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200170 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200171}
172
Dave Airlied6ea8882010-11-22 13:24:40 +1000173void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200174{
175 struct ttm_bo_device *bdev = bo->bdev;
176 struct ttm_mem_type_manager *man;
177
Maarten Lankhorsta9dbfff2012-10-12 16:58:36 +0200178 BUG_ON(!ttm_bo_is_reserved(bo));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200179
180 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
181
182 BUG_ON(!list_empty(&bo->lru));
183
184 man = &bdev->man[bo->mem.mem_type];
185 list_add_tail(&bo->lru, &man->lru);
186 kref_get(&bo->list_kref);
187
188 if (bo->ttm != NULL) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200189 list_add_tail(&bo->swap, &bo->glob->swap_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200190 kref_get(&bo->list_kref);
191 }
192 }
193}
194
Dave Airlied6ea8882010-11-22 13:24:40 +1000195int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200196{
197 int put_count = 0;
198
199 if (!list_empty(&bo->swap)) {
200 list_del_init(&bo->swap);
201 ++put_count;
202 }
203 if (!list_empty(&bo->lru)) {
204 list_del_init(&bo->lru);
205 ++put_count;
206 }
207
208 /*
209 * TODO: Add a driver hook to delete from
210 * driver-specific LRU's here.
211 */
212
213 return put_count;
214}
215
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200217 bool interruptible,
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200218 bool no_wait, bool use_ticket,
219 struct ww_acquire_ctx *ticket)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200220{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200221 int ret;
222
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100223 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
Thomas Hellstrom95ccb0f2010-11-11 10:04:53 +0100224 /**
225 * Deadlock avoidance for multi-bo reserving.
226 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200227 if (use_ticket && bo->seq_valid) {
Thomas Hellstrom96726fe2010-11-17 12:28:28 +0000228 /**
229 * We've already reserved this one.
230 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200231 if (unlikely(ticket->stamp == bo->val_seq))
Thomas Hellstrom96726fe2010-11-17 12:28:28 +0000232 return -EDEADLK;
233 /**
234 * Already reserved by a thread that will not back
235 * off for us. We need to back off.
236 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200237 if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
Thomas Hellstrom96726fe2010-11-17 12:28:28 +0000238 return -EAGAIN;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200239 }
240
241 if (no_wait)
242 return -EBUSY;
243
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200244 ret = ttm_bo_wait_unreserved(bo, interruptible);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200245
246 if (unlikely(ret))
247 return ret;
248 }
249
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200250 if (use_ticket) {
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100251 bool wake_up = false;
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200252
Thomas Hellstrom95ccb0f2010-11-11 10:04:53 +0100253 /**
254 * Wake up waiters that may need to recheck for deadlock,
255 * if we decreased the sequence number.
256 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200257 if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
Thomas Hellstrom95ccb0f2010-11-11 10:04:53 +0100258 || !bo->seq_valid))
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100259 wake_up = true;
Thomas Hellstrom95ccb0f2010-11-11 10:04:53 +0100260
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100261 /*
262 * In the worst case with memory ordering these values can be
263 * seen in the wrong order. However since we call wake_up_all
264 * in that case, this will hopefully not pose a problem,
265 * and the worst case would only cause someone to accidentally
266 * hit -EAGAIN in ttm_bo_reserve when they see old value of
267 * val_seq. However this would only happen if seq_valid was
268 * written before val_seq was, and just means some slightly
269 * increased cpu usage
270 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200271 bo->val_seq = ticket->stamp;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200272 bo->seq_valid = true;
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100273 if (wake_up)
274 wake_up_all(&bo->event_queue);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200275 } else {
276 bo->seq_valid = false;
277 }
278
279 return 0;
280}
281EXPORT_SYMBOL(ttm_bo_reserve);
282
283static void ttm_bo_ref_bug(struct kref *list_kref)
284{
285 BUG();
286}
287
Dave Airlied6ea8882010-11-22 13:24:40 +1000288void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
289 bool never_free)
290{
Thomas Hellstrom2357cbe2010-11-16 15:21:08 +0100291 kref_sub(&bo->list_kref, count,
292 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
Dave Airlied6ea8882010-11-22 13:24:40 +1000293}
294
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200295int ttm_bo_reserve(struct ttm_buffer_object *bo,
296 bool interruptible,
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200297 bool no_wait, bool use_ticket,
298 struct ww_acquire_ctx *ticket)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200299{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200300 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200301 int put_count = 0;
302 int ret;
303
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200304 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
305 ticket);
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100306 if (likely(ret == 0)) {
307 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200308 put_count = ttm_bo_del_from_lru(bo);
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100309 spin_unlock(&glob->lru_lock);
310 ttm_bo_list_ref_sub(bo, put_count, true);
311 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200312
313 return ret;
314}
315
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100316int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200317 bool interruptible,
318 struct ww_acquire_ctx *ticket)
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100319{
320 bool wake_up = false;
321 int ret;
322
323 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200324 WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100325
326 ret = ttm_bo_wait_unreserved(bo, interruptible);
327
328 if (unlikely(ret))
329 return ret;
330 }
331
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200332 if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100333 wake_up = true;
334
335 /**
336 * Wake up waiters that may need to recheck for deadlock,
337 * if we decreased the sequence number.
338 */
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200339 bo->val_seq = ticket->stamp;
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100340 bo->seq_valid = true;
341 if (wake_up)
342 wake_up_all(&bo->event_queue);
343
344 return 0;
345}
346
347int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200348 bool interruptible, struct ww_acquire_ctx *ticket)
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100349{
350 struct ttm_bo_global *glob = bo->glob;
351 int put_count, ret;
352
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200353 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket);
Maarten Lankhorst5e45d7d2013-01-15 14:57:05 +0100354 if (likely(!ret)) {
355 spin_lock(&glob->lru_lock);
356 put_count = ttm_bo_del_from_lru(bo);
357 spin_unlock(&glob->lru_lock);
358 ttm_bo_list_ref_sub(bo, put_count, true);
359 }
360 return ret;
361}
362EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
363
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200364void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
Thomas Hellstrom95762c22010-11-17 12:28:30 +0000365{
366 ttm_bo_add_to_lru(bo);
367 atomic_set(&bo->reserved, 0);
368 wake_up_all(&bo->event_queue);
369}
370
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200371void ttm_bo_unreserve(struct ttm_buffer_object *bo)
372{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200373 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200374
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200375 spin_lock(&glob->lru_lock);
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200376 ttm_bo_unreserve_ticket_locked(bo, NULL);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200377 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200378}
379EXPORT_SYMBOL(ttm_bo_unreserve);
380
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200381void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct ww_acquire_ctx *ticket)
382{
383 struct ttm_bo_global *glob = bo->glob;
384
385 spin_lock(&glob->lru_lock);
386 ttm_bo_unreserve_ticket_locked(bo, ticket);
387 spin_unlock(&glob->lru_lock);
388}
389EXPORT_SYMBOL(ttm_bo_unreserve_ticket);
390
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200391/*
392 * Call bo->mutex locked.
393 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200394static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
395{
396 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200397 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200398 int ret = 0;
399 uint32_t page_flags = 0;
400
401 TTM_ASSERT_LOCKED(&bo->mutex);
402 bo->ttm = NULL;
403
Dave Airliead49f502009-07-10 22:36:26 +1000404 if (bdev->need_dma32)
405 page_flags |= TTM_PAGE_FLAG_DMA32;
406
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200407 switch (bo->type) {
408 case ttm_bo_type_device:
409 if (zero_alloc)
410 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
411 case ttm_bo_type_kernel:
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400412 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
413 page_flags, glob->dummy_read_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200414 if (unlikely(bo->ttm == NULL))
415 ret = -ENOMEM;
416 break;
Dave Airlie129b78b2012-04-02 11:46:06 +0100417 case ttm_bo_type_sg:
418 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
419 page_flags | TTM_PAGE_FLAG_SG,
420 glob->dummy_read_page);
421 if (unlikely(bo->ttm == NULL)) {
422 ret = -ENOMEM;
423 break;
424 }
425 bo->ttm->sg = bo->sg;
426 break;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200427 default:
Joe Perches25d04792012-03-16 21:43:50 -0700428 pr_err("Illegal buffer object type\n");
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200429 ret = -EINVAL;
430 break;
431 }
432
433 return ret;
434}
435
436static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
437 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000438 bool evict, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000439 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200440{
441 struct ttm_bo_device *bdev = bo->bdev;
442 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
443 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
444 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
445 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
446 int ret = 0;
447
448 if (old_is_pci || new_is_pci ||
Thomas Hellstromeba67092010-11-11 09:41:57 +0100449 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
450 ret = ttm_mem_io_lock(old_man, true);
451 if (unlikely(ret != 0))
452 goto out_err;
453 ttm_bo_unmap_virtual_locked(bo);
454 ttm_mem_io_unlock(old_man);
455 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200456
457 /*
458 * Create and bind a ttm if required.
459 */
460
Ben Skeggs8d3bb232011-08-22 03:15:05 +0000461 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
462 if (bo->ttm == NULL) {
Ben Skeggsff02b132011-09-14 06:08:06 +1000463 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
464 ret = ttm_bo_add_ttm(bo, zero);
Ben Skeggs8d3bb232011-08-22 03:15:05 +0000465 if (ret)
466 goto out_err;
467 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200468
469 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
470 if (ret)
Thomas Hellstrom87ef9202009-06-17 12:29:57 +0200471 goto out_err;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200472
473 if (mem->mem_type != TTM_PL_SYSTEM) {
474 ret = ttm_tt_bind(bo->ttm, mem);
475 if (ret)
476 goto out_err;
477 }
478
479 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
Ben Skeggs82ef5942011-02-02 00:27:10 +0000480 if (bdev->driver->move_notify)
481 bdev->driver->move_notify(bo, mem);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100482 bo->mem = *mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200483 mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200484 goto moved;
485 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200486 }
487
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000488 if (bdev->driver->move_notify)
489 bdev->driver->move_notify(bo, mem);
490
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200491 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
492 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000493 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200494 else if (bdev->driver->move)
495 ret = bdev->driver->move(bo, evict, interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000496 no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200497 else
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000498 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200499
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000500 if (ret) {
501 if (bdev->driver->move_notify) {
502 struct ttm_mem_reg tmp_mem = *mem;
503 *mem = bo->mem;
504 bo->mem = tmp_mem;
505 bdev->driver->move_notify(bo, mem);
506 bo->mem = *mem;
Dave Airlie014b3442013-01-16 15:58:34 +1000507 *mem = tmp_mem;
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000508 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200509
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000510 goto out_err;
511 }
Jerome Glissedc97b342011-11-18 11:47:03 -0500512
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200513moved:
514 if (bo->evicted) {
515 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
516 if (ret)
Joe Perches25d04792012-03-16 21:43:50 -0700517 pr_err("Can not flush read caches\n");
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200518 bo->evicted = false;
519 }
520
521 if (bo->mem.mm_node) {
Ben Skeggsd961db72010-08-05 10:48:18 +1000522 bo->offset = (bo->mem.start << PAGE_SHIFT) +
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200523 bdev->man[bo->mem.mem_type].gpu_offset;
524 bo->cur_placement = bo->mem.placement;
Thomas Hellstrom354fb522010-01-13 22:28:45 +0100525 } else
526 bo->offset = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200527
528 return 0;
529
530out_err:
531 new_man = &bdev->man[bo->mem.mem_type];
532 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
533 ttm_tt_unbind(bo->ttm);
534 ttm_tt_destroy(bo->ttm);
535 bo->ttm = NULL;
536 }
537
538 return ret;
539}
540
541/**
Thomas Hellstrom40d857b2010-10-19 09:01:00 +0200542 * Call bo::reserved.
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200543 * Will release GPU memory type usage on destruction.
Thomas Hellstrom40d857b2010-10-19 09:01:00 +0200544 * This is the place to put in driver specific hooks to release
545 * driver private resources.
546 * Will release the bo::reserved lock.
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200547 */
548
549static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
550{
Jerome Glissedc97b342011-11-18 11:47:03 -0500551 if (bo->bdev->driver->move_notify)
552 bo->bdev->driver->move_notify(bo, NULL);
553
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200554 if (bo->ttm) {
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200555 ttm_tt_unbind(bo->ttm);
556 ttm_tt_destroy(bo->ttm);
557 bo->ttm = NULL;
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200558 }
Thomas Hellstrom40d857b2010-10-19 09:01:00 +0200559 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200560
561 atomic_set(&bo->reserved, 0);
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000562 wake_up_all(&bo->event_queue);
Thomas Hellstrom06fba6d2010-10-29 10:46:48 +0200563
564 /*
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000565 * Since the final reference to this bo may not be dropped by
566 * the current task we have to put a memory barrier here to make
567 * sure the changes done in this function are always visible.
568 *
569 * This function only needs protection against the final kref_put.
Thomas Hellstrom06fba6d2010-10-29 10:46:48 +0200570 */
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000571 smp_mb__before_atomic_dec();
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200572}
573
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200574static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200575{
576 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200577 struct ttm_bo_global *glob = bo->glob;
Maarten Lankhorst4154f052012-11-28 12:25:39 +0100578 struct ttm_bo_driver *driver = bdev->driver;
Thomas Hellstromaa123262010-11-02 13:21:47 +0000579 void *sync_obj = NULL;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200580 int put_count;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200581 int ret;
582
Maarten Lankhorst4154f052012-11-28 12:25:39 +0100583 spin_lock(&glob->lru_lock);
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100584 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
Maarten Lankhorst4154f052012-11-28 12:25:39 +0100585
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000586 spin_lock(&bdev->fence_lock);
Dave Airlie1717c0e2011-10-27 18:28:37 +0200587 (void) ttm_bo_wait(bo, false, false, true);
Maarten Lankhorst4154f052012-11-28 12:25:39 +0100588 if (!ret && !bo->sync_obj) {
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000589 spin_unlock(&bdev->fence_lock);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200590 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200591
Thomas Hellstrom40d857b2010-10-19 09:01:00 +0200592 spin_unlock(&glob->lru_lock);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200593 ttm_bo_cleanup_memtype_use(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200594
Dave Airlied6ea8882010-11-22 13:24:40 +1000595 ttm_bo_list_ref_sub(bo, put_count, true);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200596
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200597 return;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200598 }
Thomas Hellstromaa123262010-11-02 13:21:47 +0000599 if (bo->sync_obj)
600 sync_obj = driver->sync_obj_ref(bo->sync_obj);
Maarten Lankhorst4154f052012-11-28 12:25:39 +0100601 spin_unlock(&bdev->fence_lock);
602
603 if (!ret) {
604 atomic_set(&bo->reserved, 0);
605 wake_up_all(&bo->event_queue);
606 }
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200607
608 kref_get(&bo->list_kref);
609 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
610 spin_unlock(&glob->lru_lock);
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200611
Thomas Hellstromaa123262010-11-02 13:21:47 +0000612 if (sync_obj) {
Maarten Lankhorstdedfdff2012-10-12 15:04:00 +0000613 driver->sync_obj_flush(sync_obj);
Thomas Hellstromaa123262010-11-02 13:21:47 +0000614 driver->sync_obj_unref(&sync_obj);
615 }
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200616 schedule_delayed_work(&bdev->wq,
617 ((HZ / 100) < 1) ? 1 : HZ / 100);
618}
619
620/**
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000621 * function ttm_bo_cleanup_refs_and_unlock
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200622 * If bo idle, remove from delayed- and lru lists, and unref.
623 * If not idle, do nothing.
624 *
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000625 * Must be called with lru_lock and reservation held, this function
626 * will drop both before returning.
627 *
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200628 * @interruptible Any sleeps should occur interruptibly.
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200629 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
630 */
631
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000632static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
633 bool interruptible,
634 bool no_wait_gpu)
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200635{
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000636 struct ttm_bo_device *bdev = bo->bdev;
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000637 struct ttm_bo_driver *driver = bdev->driver;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200638 struct ttm_bo_global *glob = bo->glob;
639 int put_count;
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000640 int ret;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200641
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000642 spin_lock(&bdev->fence_lock);
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000643 ret = ttm_bo_wait(bo, false, false, true);
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200644
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000645 if (ret && !no_wait_gpu) {
646 void *sync_obj;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200647
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000648 /*
649 * Take a reference to the fence and unreserve,
650 * at this point the buffer should be dead, so
651 * no new sync objects can be attached.
652 */
Maarten Lankhorst0953e762012-12-19 18:21:10 +0100653 sync_obj = driver->sync_obj_ref(bo->sync_obj);
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000654 spin_unlock(&bdev->fence_lock);
Thomas Hellstrom26cc40a2011-11-21 13:05:02 +0100655
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200656 atomic_set(&bo->reserved, 0);
657 wake_up_all(&bo->event_queue);
658 spin_unlock(&glob->lru_lock);
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000659
660 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
661 driver->sync_obj_unref(&sync_obj);
662 if (ret)
663 return ret;
664
665 /*
666 * remove sync_obj with ttm_bo_wait, the wait should be
667 * finished, and no new wait object should have been added.
668 */
669 spin_lock(&bdev->fence_lock);
670 ret = ttm_bo_wait(bo, false, false, true);
671 WARN_ON(ret);
672 spin_unlock(&bdev->fence_lock);
673 if (ret)
674 return ret;
675
676 spin_lock(&glob->lru_lock);
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100677 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000678
679 /*
680 * We raced, and lost, someone else holds the reservation now,
681 * and is probably busy in ttm_bo_cleanup_memtype_use.
682 *
683 * Even if it's not the case, because we finished waiting any
684 * delayed destruction would succeed, so just return success
685 * here.
686 */
687 if (ret) {
688 spin_unlock(&glob->lru_lock);
689 return 0;
690 }
691 } else
692 spin_unlock(&bdev->fence_lock);
693
694 if (ret || unlikely(list_empty(&bo->ddestroy))) {
695 atomic_set(&bo->reserved, 0);
696 wake_up_all(&bo->event_queue);
697 spin_unlock(&glob->lru_lock);
698 return ret;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200699 }
700
701 put_count = ttm_bo_del_from_lru(bo);
702 list_del_init(&bo->ddestroy);
703 ++put_count;
704
705 spin_unlock(&glob->lru_lock);
706 ttm_bo_cleanup_memtype_use(bo);
707
Dave Airlied6ea8882010-11-22 13:24:40 +1000708 ttm_bo_list_ref_sub(bo, put_count, true);
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200709
710 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200711}
712
713/**
714 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
715 * encountered buffers.
716 */
717
718static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
719{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200720 struct ttm_bo_global *glob = bdev->glob;
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100721 struct ttm_buffer_object *entry = NULL;
722 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200723
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200724 spin_lock(&glob->lru_lock);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100725 if (list_empty(&bdev->ddestroy))
726 goto out_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200727
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100728 entry = list_first_entry(&bdev->ddestroy,
729 struct ttm_buffer_object, ddestroy);
730 kref_get(&entry->list_kref);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200731
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100732 for (;;) {
733 struct ttm_buffer_object *nentry = NULL;
734
735 if (entry->ddestroy.next != &bdev->ddestroy) {
736 nentry = list_first_entry(&entry->ddestroy,
737 struct ttm_buffer_object, ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200738 kref_get(&nentry->list_kref);
739 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200740
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100741 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
742 if (remove_all && ret) {
743 spin_unlock(&glob->lru_lock);
744 ret = ttm_bo_reserve_nolru(entry, false, false,
745 false, 0);
746 spin_lock(&glob->lru_lock);
747 }
748
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000749 if (!ret)
750 ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
751 !remove_all);
752 else
753 spin_unlock(&glob->lru_lock);
754
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200755 kref_put(&entry->list_kref, ttm_bo_release_list);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100756 entry = nentry;
757
758 if (ret || !entry)
759 goto out;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200760
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200761 spin_lock(&glob->lru_lock);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100762 if (list_empty(&entry->ddestroy))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200763 break;
764 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200765
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100766out_unlock:
767 spin_unlock(&glob->lru_lock);
768out:
769 if (entry)
770 kref_put(&entry->list_kref, ttm_bo_release_list);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200771 return ret;
772}
773
774static void ttm_bo_delayed_workqueue(struct work_struct *work)
775{
776 struct ttm_bo_device *bdev =
777 container_of(work, struct ttm_bo_device, wq.work);
778
779 if (ttm_bo_delayed_delete(bdev, false)) {
780 schedule_delayed_work(&bdev->wq,
781 ((HZ / 100) < 1) ? 1 : HZ / 100);
782 }
783}
784
785static void ttm_bo_release(struct kref *kref)
786{
787 struct ttm_buffer_object *bo =
788 container_of(kref, struct ttm_buffer_object, kref);
789 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100790 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200791
Thomas Hellstrom82fe50b2012-11-21 14:53:21 +0000792 write_lock(&bdev->vm_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200793 if (likely(bo->vm_node != NULL)) {
794 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
795 drm_mm_put_block(bo->vm_node);
796 bo->vm_node = NULL;
797 }
798 write_unlock(&bdev->vm_lock);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100799 ttm_mem_io_lock(man, false);
800 ttm_mem_io_free_vm(bo);
801 ttm_mem_io_unlock(man);
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200802 ttm_bo_cleanup_refs_or_queue(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200803 kref_put(&bo->list_kref, ttm_bo_release_list);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200804}
805
806void ttm_bo_unref(struct ttm_buffer_object **p_bo)
807{
808 struct ttm_buffer_object *bo = *p_bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200809
810 *p_bo = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200811 kref_put(&bo->kref, ttm_bo_release);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200812}
813EXPORT_SYMBOL(ttm_bo_unref);
814
Matthew Garrett7c5ee532010-04-26 16:00:09 -0400815int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
816{
817 return cancel_delayed_work_sync(&bdev->wq);
818}
819EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
820
821void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
822{
823 if (resched)
824 schedule_delayed_work(&bdev->wq,
825 ((HZ / 100) < 1) ? 1 : HZ / 100);
826}
827EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
828
Jerome Glisseca262a9992009-12-08 15:33:32 +0100829static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000830 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200831{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200832 struct ttm_bo_device *bdev = bo->bdev;
833 struct ttm_mem_reg evict_mem;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100834 struct ttm_placement placement;
835 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200836
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000837 spin_lock(&bdev->fence_lock);
Dave Airlie1717c0e2011-10-27 18:28:37 +0200838 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
Thomas Hellstrom702adba2010-11-17 12:28:29 +0000839 spin_unlock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200840
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200841 if (unlikely(ret != 0)) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100842 if (ret != -ERESTARTSYS) {
Joe Perches25d04792012-03-16 21:43:50 -0700843 pr_err("Failed to expire sync object before buffer eviction\n");
Thomas Hellstrom78ecf092009-06-17 12:29:55 +0200844 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200845 goto out;
846 }
847
Maarten Lankhorsta9dbfff2012-10-12 16:58:36 +0200848 BUG_ON(!ttm_bo_is_reserved(bo));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200849
850 evict_mem = bo->mem;
851 evict_mem.mm_node = NULL;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100852 evict_mem.bus.io_reserved_vm = false;
853 evict_mem.bus.io_reserved_count = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200854
Jerome Glisse7cb7d1d2009-12-09 22:14:27 +0100855 placement.fpfn = 0;
856 placement.lpfn = 0;
857 placement.num_placement = 0;
858 placement.num_busy_placement = 0;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100859 bdev->driver->evict_flags(bo, &placement);
860 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000861 no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200862 if (ret) {
Jerome Glissefb53f862009-12-09 21:55:10 +0100863 if (ret != -ERESTARTSYS) {
Joe Perches25d04792012-03-16 21:43:50 -0700864 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
865 bo);
Jerome Glissefb53f862009-12-09 21:55:10 +0100866 ttm_bo_mem_space_debug(bo, &placement);
867 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200868 goto out;
869 }
870
871 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000872 no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200873 if (ret) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100874 if (ret != -ERESTARTSYS)
Joe Perches25d04792012-03-16 21:43:50 -0700875 pr_err("Buffer eviction failed\n");
Ben Skeggs42311ff2010-08-04 12:07:08 +1000876 ttm_bo_mem_put(bo, &evict_mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200877 goto out;
878 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200879 bo->evicted = true;
880out:
881 return ret;
882}
883
Jerome Glisseca262a9992009-12-08 15:33:32 +0100884static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
885 uint32_t mem_type,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000886 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000887 bool no_wait_gpu)
Jerome Glisseca262a9992009-12-08 15:33:32 +0100888{
889 struct ttm_bo_global *glob = bdev->glob;
890 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
891 struct ttm_buffer_object *bo;
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000892 int ret = -EBUSY, put_count;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100893
894 spin_lock(&glob->lru_lock);
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000895 list_for_each_entry(bo, &man->lru, lru) {
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100896 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000897 if (!ret)
898 break;
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100899 }
900
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000901 if (ret) {
902 spin_unlock(&glob->lru_lock);
Thomas Hellstromb8e902f2012-10-22 12:51:26 +0000903 return ret;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200904 }
905
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000906 kref_get(&bo->list_kref);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100907
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000908 if (!list_empty(&bo->ddestroy)) {
909 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
910 no_wait_gpu);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100911 kref_put(&bo->list_kref, ttm_bo_release_list);
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000912 return ret;
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100913 }
914
915 put_count = ttm_bo_del_from_lru(bo);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100916 spin_unlock(&glob->lru_lock);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100917
918 BUG_ON(ret != 0);
919
Dave Airlied6ea8882010-11-22 13:24:40 +1000920 ttm_bo_list_ref_sub(bo, put_count, true);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100921
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000922 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100923 ttm_bo_unreserve(bo);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100924
Jerome Glisseca262a9992009-12-08 15:33:32 +0100925 kref_put(&bo->list_kref, ttm_bo_release_list);
926 return ret;
927}
928
Ben Skeggs42311ff2010-08-04 12:07:08 +1000929void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
930{
Ben Skeggsd961db72010-08-05 10:48:18 +1000931 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
Ben Skeggs42311ff2010-08-04 12:07:08 +1000932
Ben Skeggsd961db72010-08-05 10:48:18 +1000933 if (mem->mm_node)
934 (*man->func->put_node)(man, mem);
Ben Skeggs42311ff2010-08-04 12:07:08 +1000935}
936EXPORT_SYMBOL(ttm_bo_mem_put);
937
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200938/**
939 * Repeatedly evict memory from the LRU for @mem_type until we create enough
940 * space, or we've evicted everything and there isn't enough space.
941 */
Jerome Glisseca262a9992009-12-08 15:33:32 +0100942static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
943 uint32_t mem_type,
944 struct ttm_placement *placement,
945 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000946 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000947 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200948{
Jerome Glisseca262a9992009-12-08 15:33:32 +0100949 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200950 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200951 int ret;
952
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200953 do {
Ben Skeggsd961db72010-08-05 10:48:18 +1000954 ret = (*man->func->get_node)(man, bo, placement, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200955 if (unlikely(ret != 0))
956 return ret;
Ben Skeggsd961db72010-08-05 10:48:18 +1000957 if (mem->mm_node)
Jerome Glisseca262a9992009-12-08 15:33:32 +0100958 break;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000959 ret = ttm_mem_evict_first(bdev, mem_type,
960 interruptible, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100961 if (unlikely(ret != 0))
962 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200963 } while (1);
Ben Skeggsd961db72010-08-05 10:48:18 +1000964 if (mem->mm_node == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200965 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200966 mem->mem_type = mem_type;
967 return 0;
968}
969
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200970static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
971 uint32_t cur_placement,
972 uint32_t proposed_placement)
973{
974 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
975 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
976
977 /**
978 * Keep current caching if possible.
979 */
980
981 if ((cur_placement & caching) != 0)
982 result |= (cur_placement & caching);
983 else if ((man->default_caching & caching) != 0)
984 result |= man->default_caching;
985 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
986 result |= TTM_PL_FLAG_CACHED;
987 else if ((TTM_PL_FLAG_WC & caching) != 0)
988 result |= TTM_PL_FLAG_WC;
989 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
990 result |= TTM_PL_FLAG_UNCACHED;
991
992 return result;
993}
994
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200995static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200996 uint32_t mem_type,
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200997 uint32_t proposed_placement,
998 uint32_t *masked_placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200999{
1000 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
1001
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001002 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001003 return false;
1004
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001005 if ((proposed_placement & man->available_caching) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001006 return false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001007
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001008 cur_flags |= (proposed_placement & man->available_caching);
1009
1010 *masked_placement = cur_flags;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001011 return true;
1012}
1013
1014/**
1015 * Creates space for memory region @mem according to its type.
1016 *
1017 * This function first searches for free space in compatible memory types in
1018 * the priority order defined by the driver. If free space isn't found, then
1019 * ttm_bo_mem_force_space is attempted in priority order to evict and find
1020 * space.
1021 */
1022int ttm_bo_mem_space(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001023 struct ttm_placement *placement,
1024 struct ttm_mem_reg *mem,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001025 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001026 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001027{
1028 struct ttm_bo_device *bdev = bo->bdev;
1029 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001030 uint32_t mem_type = TTM_PL_SYSTEM;
1031 uint32_t cur_flags = 0;
1032 bool type_found = false;
1033 bool type_ok = false;
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001034 bool has_erestartsys = false;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001035 int i, ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001036
1037 mem->mm_node = NULL;
Dave Airlieb6637522009-12-14 14:51:35 +10001038 for (i = 0; i < placement->num_placement; ++i) {
Jerome Glisseca262a9992009-12-08 15:33:32 +01001039 ret = ttm_mem_type_from_flags(placement->placement[i],
1040 &mem_type);
1041 if (ret)
1042 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001043 man = &bdev->man[mem_type];
1044
1045 type_ok = ttm_bo_mt_compatible(man,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001046 mem_type,
1047 placement->placement[i],
1048 &cur_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001049
1050 if (!type_ok)
1051 continue;
1052
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001053 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1054 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001055 /*
1056 * Use the access and other non-mapping-related flag bits from
1057 * the memory placement flags to the current flags
1058 */
1059 ttm_flag_masked(&cur_flags, placement->placement[i],
1060 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001061
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001062 if (mem_type == TTM_PL_SYSTEM)
1063 break;
1064
1065 if (man->has_type && man->use_type) {
1066 type_found = true;
Ben Skeggsd961db72010-08-05 10:48:18 +10001067 ret = (*man->func->get_node)(man, bo, placement, mem);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001068 if (unlikely(ret))
1069 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001070 }
Ben Skeggsd961db72010-08-05 10:48:18 +10001071 if (mem->mm_node)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001072 break;
1073 }
1074
Ben Skeggsd961db72010-08-05 10:48:18 +10001075 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001076 mem->mem_type = mem_type;
1077 mem->placement = cur_flags;
1078 return 0;
1079 }
1080
1081 if (!type_found)
1082 return -EINVAL;
1083
Dave Airlieb6637522009-12-14 14:51:35 +10001084 for (i = 0; i < placement->num_busy_placement; ++i) {
1085 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +01001086 &mem_type);
1087 if (ret)
1088 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001089 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001090 if (!man->has_type)
1091 continue;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001092 if (!ttm_bo_mt_compatible(man,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001093 mem_type,
Dave Airlieb6637522009-12-14 14:51:35 +10001094 placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +01001095 &cur_flags))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001096 continue;
1097
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001098 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1099 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001100 /*
1101 * Use the access and other non-mapping-related flag bits from
1102 * the memory placement flags to the current flags
1103 */
Dave Airlieb6637522009-12-14 14:51:35 +10001104 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
Jerome Glisseca262a9992009-12-08 15:33:32 +01001105 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001106
Thomas Hellstrom0eaddb22010-01-16 16:05:04 +01001107
1108 if (mem_type == TTM_PL_SYSTEM) {
1109 mem->mem_type = mem_type;
1110 mem->placement = cur_flags;
1111 mem->mm_node = NULL;
1112 return 0;
1113 }
1114
Jerome Glisseca262a9992009-12-08 15:33:32 +01001115 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001116 interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001117 if (ret == 0 && mem->mm_node) {
1118 mem->placement = cur_flags;
1119 return 0;
1120 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001121 if (ret == -ERESTARTSYS)
1122 has_erestartsys = true;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001123 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001124 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001125 return ret;
1126}
1127EXPORT_SYMBOL(ttm_bo_mem_space);
1128
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001129int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001130 struct ttm_placement *placement,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001131 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001132 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001133{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001134 int ret = 0;
1135 struct ttm_mem_reg mem;
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001136 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001137
Maarten Lankhorsta9dbfff2012-10-12 16:58:36 +02001138 BUG_ON(!ttm_bo_is_reserved(bo));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001139
1140 /*
1141 * FIXME: It's possible to pipeline buffer moves.
1142 * Have the driver move function wait for idle when necessary,
1143 * instead of doing it here.
1144 */
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001145 spin_lock(&bdev->fence_lock);
Dave Airlie1717c0e2011-10-27 18:28:37 +02001146 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001147 spin_unlock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001148 if (ret)
1149 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001150 mem.num_pages = bo->num_pages;
1151 mem.size = mem.num_pages << PAGE_SHIFT;
1152 mem.page_alignment = bo->mem.page_alignment;
Thomas Hellstromeba67092010-11-11 09:41:57 +01001153 mem.bus.io_reserved_vm = false;
1154 mem.bus.io_reserved_count = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001155 /*
1156 * Determine where to move the buffer.
1157 */
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001158 ret = ttm_bo_mem_space(bo, placement, &mem,
1159 interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001160 if (ret)
1161 goto out_unlock;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001162 ret = ttm_bo_handle_move_mem(bo, &mem, false,
1163 interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001164out_unlock:
Ben Skeggsd961db72010-08-05 10:48:18 +10001165 if (ret && mem.mm_node)
1166 ttm_bo_mem_put(bo, &mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001167 return ret;
1168}
1169
Jerome Glisseca262a9992009-12-08 15:33:32 +01001170static int ttm_bo_mem_compat(struct ttm_placement *placement,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001171 struct ttm_mem_reg *mem)
1172{
Jerome Glisseca262a9992009-12-08 15:33:32 +01001173 int i;
Thomas Hellstrome22238e2010-02-12 00:18:00 +01001174
Ben Skeggsd961db72010-08-05 10:48:18 +10001175 if (mem->mm_node && placement->lpfn != 0 &&
1176 (mem->start < placement->fpfn ||
1177 mem->start + mem->num_pages > placement->lpfn))
Thomas Hellstrome22238e2010-02-12 00:18:00 +01001178 return -1;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001179
Jerome Glisseca262a9992009-12-08 15:33:32 +01001180 for (i = 0; i < placement->num_placement; i++) {
1181 if ((placement->placement[i] & mem->placement &
1182 TTM_PL_MASK_CACHING) &&
1183 (placement->placement[i] & mem->placement &
1184 TTM_PL_MASK_MEM))
1185 return i;
1186 }
1187 return -1;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001188}
1189
Jerome Glisse09855ac2009-12-10 17:16:27 +01001190int ttm_bo_validate(struct ttm_buffer_object *bo,
1191 struct ttm_placement *placement,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001192 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001193 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001194{
1195 int ret;
1196
Maarten Lankhorsta9dbfff2012-10-12 16:58:36 +02001197 BUG_ON(!ttm_bo_is_reserved(bo));
Jerome Glisseca262a9992009-12-08 15:33:32 +01001198 /* Check that range is valid */
1199 if (placement->lpfn || placement->fpfn)
1200 if (placement->fpfn > placement->lpfn ||
1201 (placement->lpfn - placement->fpfn) < bo->num_pages)
1202 return -EINVAL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001203 /*
1204 * Check whether we need to move buffer.
1205 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001206 ret = ttm_bo_mem_compat(placement, &bo->mem);
1207 if (ret < 0) {
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001208 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1209 no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001210 if (ret)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001211 return ret;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001212 } else {
1213 /*
1214 * Use the access and other non-mapping-related flag bits from
1215 * the compatible memory placement flags to the active flags
1216 */
1217 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1218 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001219 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001220 /*
1221 * We might need to add a TTM.
1222 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001223 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1224 ret = ttm_bo_add_ttm(bo, true);
1225 if (ret)
1226 return ret;
1227 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001228 return 0;
1229}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001230EXPORT_SYMBOL(ttm_bo_validate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001231
Jerome Glisse09855ac2009-12-10 17:16:27 +01001232int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1233 struct ttm_placement *placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001234{
Thomas Hellstrom29e190e2010-11-02 13:21:48 +00001235 BUG_ON((placement->fpfn || placement->lpfn) &&
1236 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001237
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001238 return 0;
1239}
1240
Jerome Glisse09855ac2009-12-10 17:16:27 +01001241int ttm_bo_init(struct ttm_bo_device *bdev,
1242 struct ttm_buffer_object *bo,
1243 unsigned long size,
1244 enum ttm_bo_type type,
1245 struct ttm_placement *placement,
1246 uint32_t page_alignment,
Jerome Glisse09855ac2009-12-10 17:16:27 +01001247 bool interruptible,
Jan Engelhardt5df23972011-04-04 01:25:18 +02001248 struct file *persistent_swap_storage,
Jerome Glisse09855ac2009-12-10 17:16:27 +01001249 size_t acc_size,
Dave Airlie129b78b2012-04-02 11:46:06 +01001250 struct sg_table *sg,
Jerome Glisse09855ac2009-12-10 17:16:27 +01001251 void (*destroy) (struct ttm_buffer_object *))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001252{
Jerome Glisse09855ac2009-12-10 17:16:27 +01001253 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001254 unsigned long num_pages;
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001255 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1256
1257 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1258 if (ret) {
Joe Perches25d04792012-03-16 21:43:50 -07001259 pr_err("Out of kernel memory\n");
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001260 if (destroy)
1261 (*destroy)(bo);
1262 else
1263 kfree(bo);
1264 return -ENOMEM;
1265 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001266
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001267 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1268 if (num_pages == 0) {
Joe Perches25d04792012-03-16 21:43:50 -07001269 pr_err("Illegal buffer object size\n");
Thomas Hellstrom7dfbbdc2010-11-09 21:31:44 +01001270 if (destroy)
1271 (*destroy)(bo);
1272 else
1273 kfree(bo);
Thomas Hellstroma393c732012-06-12 13:28:42 +02001274 ttm_mem_global_free(mem_glob, acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001275 return -EINVAL;
1276 }
1277 bo->destroy = destroy;
1278
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001279 kref_init(&bo->kref);
1280 kref_init(&bo->list_kref);
1281 atomic_set(&bo->cpu_writers, 0);
1282 atomic_set(&bo->reserved, 1);
1283 init_waitqueue_head(&bo->event_queue);
1284 INIT_LIST_HEAD(&bo->lru);
1285 INIT_LIST_HEAD(&bo->ddestroy);
1286 INIT_LIST_HEAD(&bo->swap);
Thomas Hellstromeba67092010-11-11 09:41:57 +01001287 INIT_LIST_HEAD(&bo->io_reserve_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001288 bo->bdev = bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001289 bo->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001290 bo->type = type;
1291 bo->num_pages = num_pages;
Jerome Glisseeb6d2c32009-12-10 16:15:52 +01001292 bo->mem.size = num_pages << PAGE_SHIFT;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001293 bo->mem.mem_type = TTM_PL_SYSTEM;
1294 bo->mem.num_pages = bo->num_pages;
1295 bo->mem.mm_node = NULL;
1296 bo->mem.page_alignment = page_alignment;
Thomas Hellstromeba67092010-11-11 09:41:57 +01001297 bo->mem.bus.io_reserved_vm = false;
1298 bo->mem.bus.io_reserved_count = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001299 bo->priv_flags = 0;
1300 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1301 bo->seq_valid = false;
Jan Engelhardt5df23972011-04-04 01:25:18 +02001302 bo->persistent_swap_storage = persistent_swap_storage;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001303 bo->acc_size = acc_size;
Dave Airlie129b78b2012-04-02 11:46:06 +01001304 bo->sg = sg;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001305 atomic_inc(&bo->glob->bo_count);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001306
Jerome Glisse09855ac2009-12-10 17:16:27 +01001307 ret = ttm_bo_check_placement(bo, placement);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001308 if (unlikely(ret != 0))
1309 goto out_err;
1310
1311 /*
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001312 * For ttm_bo_type_device buffers, allocate
1313 * address space from the device.
1314 */
Dave Airlie129b78b2012-04-02 11:46:06 +01001315 if (bo->type == ttm_bo_type_device ||
1316 bo->type == ttm_bo_type_sg) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001317 ret = ttm_bo_setup_vm(bo);
1318 if (ret)
1319 goto out_err;
1320 }
1321
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001322 ret = ttm_bo_validate(bo, placement, interruptible, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001323 if (ret)
1324 goto out_err;
1325
1326 ttm_bo_unreserve(bo);
1327 return 0;
1328
1329out_err:
1330 ttm_bo_unreserve(bo);
1331 ttm_bo_unref(&bo);
1332
1333 return ret;
1334}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001335EXPORT_SYMBOL(ttm_bo_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001336
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001337size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1338 unsigned long bo_size,
1339 unsigned struct_size)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001340{
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001341 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1342 size_t size = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001343
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001344 size += ttm_round_pot(struct_size);
1345 size += PAGE_ALIGN(npages * sizeof(void *));
1346 size += ttm_round_pot(sizeof(struct ttm_tt));
1347 return size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001348}
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001349EXPORT_SYMBOL(ttm_bo_acc_size);
1350
1351size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1352 unsigned long bo_size,
1353 unsigned struct_size)
1354{
1355 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1356 size_t size = 0;
1357
1358 size += ttm_round_pot(struct_size);
1359 size += PAGE_ALIGN(npages * sizeof(void *));
1360 size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1361 size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1362 return size;
1363}
1364EXPORT_SYMBOL(ttm_bo_dma_acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001365
Jerome Glisse09855ac2009-12-10 17:16:27 +01001366int ttm_bo_create(struct ttm_bo_device *bdev,
1367 unsigned long size,
1368 enum ttm_bo_type type,
1369 struct ttm_placement *placement,
1370 uint32_t page_alignment,
Jerome Glisse09855ac2009-12-10 17:16:27 +01001371 bool interruptible,
Jan Engelhardt5df23972011-04-04 01:25:18 +02001372 struct file *persistent_swap_storage,
Jerome Glisse09855ac2009-12-10 17:16:27 +01001373 struct ttm_buffer_object **p_bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001374{
1375 struct ttm_buffer_object *bo;
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001376 size_t acc_size;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001377 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001378
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001379 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
Thomas Hellstroma393c732012-06-12 13:28:42 +02001380 if (unlikely(bo == NULL))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001381 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001382
Thomas Hellstroma393c732012-06-12 13:28:42 +02001383 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
Jerome Glisse09855ac2009-12-10 17:16:27 +01001384 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +00001385 interruptible, persistent_swap_storage, acc_size,
1386 NULL, NULL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001387 if (likely(ret == 0))
1388 *p_bo = bo;
1389
1390 return ret;
1391}
Thomas Hellstrom4d798932011-10-04 20:13:11 +02001392EXPORT_SYMBOL(ttm_bo_create);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001393
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001394static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001395 unsigned mem_type, bool allow_errors)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001396{
Jerome Glisseca262a9992009-12-08 15:33:32 +01001397 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001398 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001399 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001400
1401 /*
1402 * Can't use standard list traversal since we're unlocking.
1403 */
1404
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001405 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001406 while (!list_empty(&man->lru)) {
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001407 spin_unlock(&glob->lru_lock);
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001408 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001409 if (ret) {
1410 if (allow_errors) {
1411 return ret;
1412 } else {
Joe Perches25d04792012-03-16 21:43:50 -07001413 pr_err("Cleanup eviction failed\n");
Jerome Glisseca262a9992009-12-08 15:33:32 +01001414 }
1415 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001416 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001417 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001418 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001419 return 0;
1420}
1421
1422int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1423{
Roel Kluinc96e7c72009-08-03 14:22:53 +02001424 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001425 int ret = -EINVAL;
1426
1427 if (mem_type >= TTM_NUM_MEM_TYPES) {
Joe Perches25d04792012-03-16 21:43:50 -07001428 pr_err("Illegal memory type %d\n", mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001429 return ret;
1430 }
Roel Kluinc96e7c72009-08-03 14:22:53 +02001431 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001432
1433 if (!man->has_type) {
Joe Perches25d04792012-03-16 21:43:50 -07001434 pr_err("Trying to take down uninitialized memory manager type %u\n",
1435 mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001436 return ret;
1437 }
1438
1439 man->use_type = false;
1440 man->has_type = false;
1441
1442 ret = 0;
1443 if (mem_type > 0) {
Jerome Glisseca262a9992009-12-08 15:33:32 +01001444 ttm_bo_force_list_clean(bdev, mem_type, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001445
Ben Skeggsd961db72010-08-05 10:48:18 +10001446 ret = (*man->func->takedown)(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001447 }
1448
1449 return ret;
1450}
1451EXPORT_SYMBOL(ttm_bo_clean_mm);
1452
1453int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1454{
1455 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1456
1457 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
Joe Perches25d04792012-03-16 21:43:50 -07001458 pr_err("Illegal memory manager memory type %u\n", mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001459 return -EINVAL;
1460 }
1461
1462 if (!man->has_type) {
Joe Perches25d04792012-03-16 21:43:50 -07001463 pr_err("Memory type %u has not been initialized\n", mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001464 return 0;
1465 }
1466
Jerome Glisseca262a9992009-12-08 15:33:32 +01001467 return ttm_bo_force_list_clean(bdev, mem_type, true);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001468}
1469EXPORT_SYMBOL(ttm_bo_evict_mm);
1470
1471int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001472 unsigned long p_size)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001473{
1474 int ret = -EINVAL;
1475 struct ttm_mem_type_manager *man;
1476
Thomas Hellstromdbc4a5b2010-10-29 10:46:47 +02001477 BUG_ON(type >= TTM_NUM_MEM_TYPES);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001478 man = &bdev->man[type];
Thomas Hellstromdbc4a5b2010-10-29 10:46:47 +02001479 BUG_ON(man->has_type);
Thomas Hellstromeba67092010-11-11 09:41:57 +01001480 man->io_reserve_fastpath = true;
1481 man->use_io_reserve_lru = false;
1482 mutex_init(&man->io_reserve_mutex);
1483 INIT_LIST_HEAD(&man->io_reserve_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001484
1485 ret = bdev->driver->init_mem_type(bdev, type, man);
1486 if (ret)
1487 return ret;
Ben Skeggsd961db72010-08-05 10:48:18 +10001488 man->bdev = bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001489
1490 ret = 0;
1491 if (type != TTM_PL_SYSTEM) {
Ben Skeggsd961db72010-08-05 10:48:18 +10001492 ret = (*man->func->init)(man, p_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001493 if (ret)
1494 return ret;
1495 }
1496 man->has_type = true;
1497 man->use_type = true;
1498 man->size = p_size;
1499
1500 INIT_LIST_HEAD(&man->lru);
1501
1502 return 0;
1503}
1504EXPORT_SYMBOL(ttm_bo_init_mm);
1505
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001506static void ttm_bo_global_kobj_release(struct kobject *kobj)
1507{
1508 struct ttm_bo_global *glob =
1509 container_of(kobj, struct ttm_bo_global, kobj);
1510
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001511 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1512 __free_page(glob->dummy_read_page);
1513 kfree(glob);
1514}
1515
Dave Airlieba4420c2010-03-09 10:56:52 +10001516void ttm_bo_global_release(struct drm_global_reference *ref)
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001517{
1518 struct ttm_bo_global *glob = ref->object;
1519
1520 kobject_del(&glob->kobj);
1521 kobject_put(&glob->kobj);
1522}
1523EXPORT_SYMBOL(ttm_bo_global_release);
1524
Dave Airlieba4420c2010-03-09 10:56:52 +10001525int ttm_bo_global_init(struct drm_global_reference *ref)
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001526{
1527 struct ttm_bo_global_ref *bo_ref =
1528 container_of(ref, struct ttm_bo_global_ref, ref);
1529 struct ttm_bo_global *glob = ref->object;
1530 int ret;
1531
1532 mutex_init(&glob->device_list_mutex);
1533 spin_lock_init(&glob->lru_lock);
1534 glob->mem_glob = bo_ref->mem_glob;
1535 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1536
1537 if (unlikely(glob->dummy_read_page == NULL)) {
1538 ret = -ENOMEM;
1539 goto out_no_drp;
1540 }
1541
1542 INIT_LIST_HEAD(&glob->swap_lru);
1543 INIT_LIST_HEAD(&glob->device_list);
1544
1545 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1546 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1547 if (unlikely(ret != 0)) {
Joe Perches25d04792012-03-16 21:43:50 -07001548 pr_err("Could not register buffer object swapout\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001549 goto out_no_shrink;
1550 }
1551
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001552 atomic_set(&glob->bo_count, 0);
1553
Robert P. J. Dayb642ed02010-03-13 10:36:32 +00001554 ret = kobject_init_and_add(
1555 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001556 if (unlikely(ret != 0))
1557 kobject_put(&glob->kobj);
1558 return ret;
1559out_no_shrink:
1560 __free_page(glob->dummy_read_page);
1561out_no_drp:
1562 kfree(glob);
1563 return ret;
1564}
1565EXPORT_SYMBOL(ttm_bo_global_init);
1566
1567
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001568int ttm_bo_device_release(struct ttm_bo_device *bdev)
1569{
1570 int ret = 0;
1571 unsigned i = TTM_NUM_MEM_TYPES;
1572 struct ttm_mem_type_manager *man;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001573 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001574
1575 while (i--) {
1576 man = &bdev->man[i];
1577 if (man->has_type) {
1578 man->use_type = false;
1579 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1580 ret = -EBUSY;
Joe Perches25d04792012-03-16 21:43:50 -07001581 pr_err("DRM memory manager type %d is not clean\n",
1582 i);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001583 }
1584 man->has_type = false;
1585 }
1586 }
1587
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001588 mutex_lock(&glob->device_list_mutex);
1589 list_del(&bdev->device_list);
1590 mutex_unlock(&glob->device_list_mutex);
1591
Tejun Heof094cfc2010-12-24 15:59:06 +01001592 cancel_delayed_work_sync(&bdev->wq);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001593
1594 while (ttm_bo_delayed_delete(bdev, true))
1595 ;
1596
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001597 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001598 if (list_empty(&bdev->ddestroy))
1599 TTM_DEBUG("Delayed destroy list was clean\n");
1600
1601 if (list_empty(&bdev->man[0].lru))
1602 TTM_DEBUG("Swap list was clean\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001603 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001604
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001605 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1606 write_lock(&bdev->vm_lock);
1607 drm_mm_takedown(&bdev->addr_space_mm);
1608 write_unlock(&bdev->vm_lock);
1609
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001610 return ret;
1611}
1612EXPORT_SYMBOL(ttm_bo_device_release);
1613
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001614int ttm_bo_device_init(struct ttm_bo_device *bdev,
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001615 struct ttm_bo_global *glob,
1616 struct ttm_bo_driver *driver,
Dave Airlie51c8b402009-08-20 13:38:04 +10001617 uint64_t file_page_offset,
Dave Airliead49f502009-07-10 22:36:26 +10001618 bool need_dma32)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001619{
1620 int ret = -EINVAL;
1621
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001622 rwlock_init(&bdev->vm_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001623 bdev->driver = driver;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001624
1625 memset(bdev->man, 0, sizeof(bdev->man));
1626
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001627 /*
1628 * Initialize the system memory buffer type.
1629 * Other types need to be driver / IOCTL initialized.
1630 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001631 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001632 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001633 goto out_no_sys;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001634
1635 bdev->addr_space_rb = RB_ROOT;
1636 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1637 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001638 goto out_no_addr_mm;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001639
1640 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001641 INIT_LIST_HEAD(&bdev->ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001642 bdev->dev_mapping = NULL;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001643 bdev->glob = glob;
Dave Airliead49f502009-07-10 22:36:26 +10001644 bdev->need_dma32 = need_dma32;
Thomas Hellstrom65705962010-11-17 12:28:31 +00001645 bdev->val_seq = 0;
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001646 spin_lock_init(&bdev->fence_lock);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001647 mutex_lock(&glob->device_list_mutex);
1648 list_add_tail(&bdev->device_list, &glob->device_list);
1649 mutex_unlock(&glob->device_list_mutex);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001650
1651 return 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001652out_no_addr_mm:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001653 ttm_bo_clean_mm(bdev, 0);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001654out_no_sys:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001655 return ret;
1656}
1657EXPORT_SYMBOL(ttm_bo_device_init);
1658
1659/*
1660 * buffer object vm functions.
1661 */
1662
1663bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1664{
1665 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1666
1667 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1668 if (mem->mem_type == TTM_PL_SYSTEM)
1669 return false;
1670
1671 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1672 return false;
1673
1674 if (mem->placement & TTM_PL_FLAG_CACHED)
1675 return false;
1676 }
1677 return true;
1678}
1679
Thomas Hellstromeba67092010-11-11 09:41:57 +01001680void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001681{
1682 struct ttm_bo_device *bdev = bo->bdev;
1683 loff_t offset = (loff_t) bo->addr_space_offset;
1684 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1685
1686 if (!bdev->dev_mapping)
1687 return;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001688 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
Thomas Hellstromeba67092010-11-11 09:41:57 +01001689 ttm_mem_io_free_vm(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001690}
Thomas Hellstromeba67092010-11-11 09:41:57 +01001691
1692void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1693{
1694 struct ttm_bo_device *bdev = bo->bdev;
1695 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1696
1697 ttm_mem_io_lock(man, false);
1698 ttm_bo_unmap_virtual_locked(bo);
1699 ttm_mem_io_unlock(man);
1700}
1701
1702
Dave Airliee024e112009-06-24 09:48:08 +10001703EXPORT_SYMBOL(ttm_bo_unmap_virtual);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001704
1705static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1706{
1707 struct ttm_bo_device *bdev = bo->bdev;
1708 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1709 struct rb_node *parent = NULL;
1710 struct ttm_buffer_object *cur_bo;
1711 unsigned long offset = bo->vm_node->start;
1712 unsigned long cur_offset;
1713
1714 while (*cur) {
1715 parent = *cur;
1716 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1717 cur_offset = cur_bo->vm_node->start;
1718 if (offset < cur_offset)
1719 cur = &parent->rb_left;
1720 else if (offset > cur_offset)
1721 cur = &parent->rb_right;
1722 else
1723 BUG();
1724 }
1725
1726 rb_link_node(&bo->vm_rb, parent, cur);
1727 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1728}
1729
1730/**
1731 * ttm_bo_setup_vm:
1732 *
1733 * @bo: the buffer to allocate address space for
1734 *
1735 * Allocate address space in the drm device so that applications
1736 * can mmap the buffer and access the contents. This only
1737 * applies to ttm_bo_type_device objects as others are not
1738 * placed in the drm device address space.
1739 */
1740
1741static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1742{
1743 struct ttm_bo_device *bdev = bo->bdev;
1744 int ret;
1745
1746retry_pre_get:
1747 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1748 if (unlikely(ret != 0))
1749 return ret;
1750
1751 write_lock(&bdev->vm_lock);
1752 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1753 bo->mem.num_pages, 0, 0);
1754
1755 if (unlikely(bo->vm_node == NULL)) {
1756 ret = -ENOMEM;
1757 goto out_unlock;
1758 }
1759
1760 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1761 bo->mem.num_pages, 0);
1762
1763 if (unlikely(bo->vm_node == NULL)) {
1764 write_unlock(&bdev->vm_lock);
1765 goto retry_pre_get;
1766 }
1767
1768 ttm_bo_vm_insert_rb(bo);
1769 write_unlock(&bdev->vm_lock);
1770 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1771
1772 return 0;
1773out_unlock:
1774 write_unlock(&bdev->vm_lock);
1775 return ret;
1776}
1777
1778int ttm_bo_wait(struct ttm_buffer_object *bo,
Dave Airlie1717c0e2011-10-27 18:28:37 +02001779 bool lazy, bool interruptible, bool no_wait)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001780{
1781 struct ttm_bo_driver *driver = bo->bdev->driver;
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001782 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001783 void *sync_obj;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001784 int ret = 0;
1785
Dave Airlie1717c0e2011-10-27 18:28:37 +02001786 if (likely(bo->sync_obj == NULL))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001787 return 0;
1788
Dave Airlie1717c0e2011-10-27 18:28:37 +02001789 while (bo->sync_obj) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001790
Maarten Lankhorstdedfdff2012-10-12 15:04:00 +00001791 if (driver->sync_obj_signaled(bo->sync_obj)) {
Dave Airlie1717c0e2011-10-27 18:28:37 +02001792 void *tmp_obj = bo->sync_obj;
1793 bo->sync_obj = NULL;
1794 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1795 spin_unlock(&bdev->fence_lock);
1796 driver->sync_obj_unref(&tmp_obj);
1797 spin_lock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001798 continue;
1799 }
1800
1801 if (no_wait)
1802 return -EBUSY;
1803
Dave Airlie1717c0e2011-10-27 18:28:37 +02001804 sync_obj = driver->sync_obj_ref(bo->sync_obj);
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001805 spin_unlock(&bdev->fence_lock);
Maarten Lankhorstdedfdff2012-10-12 15:04:00 +00001806 ret = driver->sync_obj_wait(sync_obj,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001807 lazy, interruptible);
1808 if (unlikely(ret != 0)) {
1809 driver->sync_obj_unref(&sync_obj);
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001810 spin_lock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001811 return ret;
1812 }
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001813 spin_lock(&bdev->fence_lock);
Maarten Lankhorst5fb4ef02012-10-12 15:02:19 +00001814 if (likely(bo->sync_obj == sync_obj)) {
Dave Airlie1717c0e2011-10-27 18:28:37 +02001815 void *tmp_obj = bo->sync_obj;
1816 bo->sync_obj = NULL;
1817 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1818 &bo->priv_flags);
1819 spin_unlock(&bdev->fence_lock);
1820 driver->sync_obj_unref(&sync_obj);
1821 driver->sync_obj_unref(&tmp_obj);
1822 spin_lock(&bdev->fence_lock);
Thomas Hellstromfee280d2009-08-03 12:39:06 +02001823 } else {
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001824 spin_unlock(&bdev->fence_lock);
Thomas Hellstromfee280d2009-08-03 12:39:06 +02001825 driver->sync_obj_unref(&sync_obj);
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001826 spin_lock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001827 }
1828 }
1829 return 0;
1830}
1831EXPORT_SYMBOL(ttm_bo_wait);
1832
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001833int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1834{
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001835 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001836 int ret = 0;
1837
1838 /*
Thomas Hellstrom8cfe92d2010-04-28 11:33:25 +02001839 * Using ttm_bo_reserve makes sure the lru lists are updated.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001840 */
1841
1842 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1843 if (unlikely(ret != 0))
1844 return ret;
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001845 spin_lock(&bdev->fence_lock);
Dave Airlie1717c0e2011-10-27 18:28:37 +02001846 ret = ttm_bo_wait(bo, false, true, no_wait);
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001847 spin_unlock(&bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001848 if (likely(ret == 0))
1849 atomic_inc(&bo->cpu_writers);
1850 ttm_bo_unreserve(bo);
1851 return ret;
1852}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001853EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001854
1855void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1856{
Maarten Lankhorst654aa7922012-11-06 14:39:43 +01001857 atomic_dec(&bo->cpu_writers);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001858}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001859EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001860
1861/**
1862 * A buffer object shrink method that tries to swap out the first
1863 * buffer object on the bo_global::swap_lru list.
1864 */
1865
1866static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1867{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001868 struct ttm_bo_global *glob =
1869 container_of(shrink, struct ttm_bo_global, shrink);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001870 struct ttm_buffer_object *bo;
1871 int ret = -EBUSY;
1872 int put_count;
1873 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1874
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001875 spin_lock(&glob->lru_lock);
Maarten Lankhorst2b7b3ad2012-11-28 11:25:42 +00001876 list_for_each_entry(bo, &glob->swap_lru, swap) {
Maarten Lankhorst63d0a412013-01-15 14:56:37 +01001877 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
Maarten Lankhorst2b7b3ad2012-11-28 11:25:42 +00001878 if (!ret)
1879 break;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001880 }
1881
Maarten Lankhorst2b7b3ad2012-11-28 11:25:42 +00001882 if (ret) {
1883 spin_unlock(&glob->lru_lock);
1884 return ret;
1885 }
1886
1887 kref_get(&bo->list_kref);
1888
1889 if (!list_empty(&bo->ddestroy)) {
1890 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1891 kref_put(&bo->list_kref, ttm_bo_release_list);
1892 return ret;
1893 }
1894
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001895 put_count = ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001896 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001897
Dave Airlied6ea8882010-11-22 13:24:40 +10001898 ttm_bo_list_ref_sub(bo, put_count, true);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001899
1900 /**
1901 * Wait for GPU, then move to system cached.
1902 */
1903
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001904 spin_lock(&bo->bdev->fence_lock);
Dave Airlie1717c0e2011-10-27 18:28:37 +02001905 ret = ttm_bo_wait(bo, false, false, false);
Thomas Hellstrom702adba2010-11-17 12:28:29 +00001906 spin_unlock(&bo->bdev->fence_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001907
1908 if (unlikely(ret != 0))
1909 goto out;
1910
1911 if ((bo->mem.placement & swap_placement) != swap_placement) {
1912 struct ttm_mem_reg evict_mem;
1913
1914 evict_mem = bo->mem;
1915 evict_mem.mm_node = NULL;
1916 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1917 evict_mem.mem_type = TTM_PL_SYSTEM;
1918
1919 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001920 false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001921 if (unlikely(ret != 0))
1922 goto out;
1923 }
1924
1925 ttm_bo_unmap_virtual(bo);
1926
1927 /**
1928 * Swap out. Buffer will be swapped in again as soon as
1929 * anyone tries to access a ttm page.
1930 */
1931
Thomas Hellstrom3f09ea42010-01-13 22:28:40 +01001932 if (bo->bdev->driver->swap_notify)
1933 bo->bdev->driver->swap_notify(bo);
1934
Jan Engelhardt5df23972011-04-04 01:25:18 +02001935 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001936out:
1937
1938 /**
1939 *
1940 * Unreserve without putting on LRU to avoid swapping out an
1941 * already swapped buffer.
1942 */
1943
1944 atomic_set(&bo->reserved, 0);
1945 wake_up_all(&bo->event_queue);
1946 kref_put(&bo->list_kref, ttm_bo_release_list);
1947 return ret;
1948}
1949
1950void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1951{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001952 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001953 ;
1954}
Thomas Hellstrome99e1e72010-01-13 22:28:42 +01001955EXPORT_SYMBOL(ttm_bo_swapout_all);