blob: cba11f13d994fbb9b51ee99d8beac08cac26d8fe [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Joe Perches25d04792012-03-16 21:43:50 -070031#define pr_fmt(fmt) "[TTM] " fmt
32
David Howells760285e2012-10-02 18:01:07 +010033#include <drm/ttm/ttm_module.h>
34#include <drm/ttm/ttm_bo_driver.h>
35#include <drm/ttm/ttm_placement.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020036#include <linux/jiffies.h>
37#include <linux/slab.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/file.h>
41#include <linux/module.h>
Arun Sharma600634972011-07-26 16:09:06 -070042#include <linux/atomic.h>
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +020043#include <linux/reservation.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020044
45#define TTM_ASSERT_LOCKED(param)
46#define TTM_DEBUG(fmt, arg...)
47#define TTM_BO_HASH_ORDER 13
48
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020049static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
Thomas Hellstroma987fca2009-08-18 16:51:56 +020050static void ttm_bo_global_kobj_release(struct kobject *kobj);
51
52static struct attribute ttm_bo_count = {
53 .name = "bo_count",
54 .mode = S_IRUGO
55};
56
Christian Königf1217ed2014-08-27 13:16:04 +020057static inline int ttm_mem_type_from_place(const struct ttm_place *place,
58 uint32_t *mem_type)
Jerome Glissefb53f862009-12-09 21:55:10 +010059{
Christian König5d98d0bc2016-09-12 13:16:16 +020060 int pos;
Jerome Glissefb53f862009-12-09 21:55:10 +010061
Christian König5d98d0bc2016-09-12 13:16:16 +020062 pos = ffs(place->flags & TTM_PL_MASK_MEM);
63 if (unlikely(!pos))
64 return -EINVAL;
65
66 *mem_type = pos - 1;
67 return 0;
Jerome Glissefb53f862009-12-09 21:55:10 +010068}
69
Jerome Glisse5012f502009-12-10 18:07:26 +010070static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
Jerome Glissefb53f862009-12-09 21:55:10 +010071{
Jerome Glisse5012f502009-12-10 18:07:26 +010072 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Christian König373533f2017-08-07 11:13:41 +020073 struct drm_printer p = drm_debug_printer(TTM_PFX);
Jerome Glisse5012f502009-12-10 18:07:26 +010074
Joe Perches25d04792012-03-16 21:43:50 -070075 pr_err(" has_type: %d\n", man->has_type);
76 pr_err(" use_type: %d\n", man->use_type);
77 pr_err(" flags: 0x%08X\n", man->flags);
Alex Deucher54c4cd62015-03-04 00:18:38 -050078 pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
Joe Perches25d04792012-03-16 21:43:50 -070079 pr_err(" size: %llu\n", man->size);
80 pr_err(" available_caching: 0x%08X\n", man->available_caching);
81 pr_err(" default_caching: 0x%08X\n", man->default_caching);
Ben Skeggsd961db72010-08-05 10:48:18 +100082 if (mem_type != TTM_PL_SYSTEM)
Christian König373533f2017-08-07 11:13:41 +020083 (*man->func->debug)(man, &p);
Jerome Glissefb53f862009-12-09 21:55:10 +010084}
85
86static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
87 struct ttm_placement *placement)
88{
Jerome Glissefb53f862009-12-09 21:55:10 +010089 int i, ret, mem_type;
90
Joe Perches25d04792012-03-16 21:43:50 -070091 pr_err("No space for %p (%lu pages, %luK, %luM)\n",
92 bo, bo->mem.num_pages, bo->mem.size >> 10,
93 bo->mem.size >> 20);
Jerome Glissefb53f862009-12-09 21:55:10 +010094 for (i = 0; i < placement->num_placement; i++) {
Christian Königf1217ed2014-08-27 13:16:04 +020095 ret = ttm_mem_type_from_place(&placement->placement[i],
Jerome Glissefb53f862009-12-09 21:55:10 +010096 &mem_type);
97 if (ret)
98 return;
Joe Perches25d04792012-03-16 21:43:50 -070099 pr_err(" placement[%d]=0x%08X (%d)\n",
Christian Königf1217ed2014-08-27 13:16:04 +0200100 i, placement->placement[i].flags, mem_type);
Jerome Glisse5012f502009-12-10 18:07:26 +0100101 ttm_mem_type_debug(bo->bdev, mem_type);
Jerome Glissefb53f862009-12-09 21:55:10 +0100102 }
103}
104
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200105static ssize_t ttm_bo_global_show(struct kobject *kobj,
106 struct attribute *attr,
107 char *buffer)
108{
109 struct ttm_bo_global *glob =
110 container_of(kobj, struct ttm_bo_global, kobj);
111
112 return snprintf(buffer, PAGE_SIZE, "%lu\n",
113 (unsigned long) atomic_read(&glob->bo_count));
114}
115
116static struct attribute *ttm_bo_global_attrs[] = {
117 &ttm_bo_count,
118 NULL
119};
120
Emese Revfy52cf25d2010-01-19 02:58:23 +0100121static const struct sysfs_ops ttm_bo_global_ops = {
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200122 .show = &ttm_bo_global_show
123};
124
125static struct kobj_type ttm_bo_glob_kobj_type = {
126 .release = &ttm_bo_global_kobj_release,
127 .sysfs_ops = &ttm_bo_global_ops,
128 .default_attrs = ttm_bo_global_attrs
129};
130
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200131
132static inline uint32_t ttm_bo_type_flags(unsigned type)
133{
134 return 1 << (type);
135}
136
137static void ttm_bo_release_list(struct kref *list_kref)
138{
139 struct ttm_buffer_object *bo =
140 container_of(list_kref, struct ttm_buffer_object, list_kref);
141 struct ttm_bo_device *bdev = bo->bdev;
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500142 size_t acc_size = bo->acc_size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200143
Peter Zijlstra2c935bc2016-11-14 17:29:48 +0100144 BUG_ON(kref_read(&bo->list_kref));
145 BUG_ON(kref_read(&bo->kref));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200146 BUG_ON(atomic_read(&bo->cpu_writers));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200147 BUG_ON(bo->mem.mm_node != NULL);
148 BUG_ON(!list_empty(&bo->lru));
149 BUG_ON(!list_empty(&bo->ddestroy));
Christian König4279cb12016-06-06 10:17:51 +0200150 ttm_tt_destroy(bo->ttm);
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200151 atomic_dec(&bo->glob->bo_count);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100152 dma_fence_put(bo->moving);
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200153 if (bo->resv == &bo->ttm_resv)
154 reservation_object_fini(&bo->ttm_resv);
Thomas Hellstromc58f0092013-11-14 10:49:05 -0800155 mutex_destroy(&bo->wu_mutex);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200156 if (bo->destroy)
157 bo->destroy(bo);
158 else {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200159 kfree(bo);
160 }
Jerome Glisse57de4ba2011-11-11 15:42:57 -0500161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200162}
163
Dave Airlied6ea8882010-11-22 13:24:40 +1000164void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200165{
166 struct ttm_bo_device *bdev = bo->bdev;
Christian König260498f2017-01-12 11:50:13 +0100167 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200168
Maarten Lankhorst009a9da2013-06-27 13:48:25 +0200169 lockdep_assert_held(&bo->resv->lock.base);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200170
171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
172
173 BUG_ON(!list_empty(&bo->lru));
174
Christian König260498f2017-01-12 11:50:13 +0100175 man = &bdev->man[bo->mem.mem_type];
176 list_add_tail(&bo->lru, &man->lru[bo->priority]);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200177 kref_get(&bo->list_kref);
178
Christian Königed704a42016-01-11 15:35:19 +0100179 if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
Christian König260498f2017-01-12 11:50:13 +0100180 list_add_tail(&bo->swap,
181 &bo->glob->swap_lru[bo->priority]);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200182 kref_get(&bo->list_kref);
183 }
184 }
185}
Maarten Lankhorst34820322013-06-27 13:48:24 +0200186EXPORT_SYMBOL(ttm_bo_add_to_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200187
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100188static void ttm_bo_ref_bug(struct kref *list_kref)
189{
190 BUG();
191}
192
193void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200194{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200195 if (!list_empty(&bo->swap)) {
196 list_del_init(&bo->swap);
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100197 kref_put(&bo->list_kref, ttm_bo_ref_bug);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200198 }
199 if (!list_empty(&bo->lru)) {
200 list_del_init(&bo->lru);
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100201 kref_put(&bo->list_kref, ttm_bo_ref_bug);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200202 }
203
Christian König896d6302017-01-12 11:54:11 +0100204 /*
205 * TODO: Add a driver hook to delete from
206 * driver-specific LRU's here.
207 */
Dave Airlied6ea8882010-11-22 13:24:40 +1000208}
209
Maarten Lankhorst34820322013-06-27 13:48:24 +0200210void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200211{
Maarten Lankhorst34820322013-06-27 13:48:24 +0200212 spin_lock(&bo->glob->lru_lock);
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100213 ttm_bo_del_from_lru(bo);
Maarten Lankhorst34820322013-06-27 13:48:24 +0200214 spin_unlock(&bo->glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200215}
Maarten Lankhorst34820322013-06-27 13:48:24 +0200216EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
Maarten Lankhorstecff6652013-06-27 13:48:17 +0200217
Christian Königab749612016-01-11 15:35:20 +0100218void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
219{
Christian Königab749612016-01-11 15:35:20 +0100220 lockdep_assert_held(&bo->resv->lock.base);
221
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100222 ttm_bo_del_from_lru(bo);
Flora Cui56fc3502016-04-20 10:23:47 +0800223 ttm_bo_add_to_lru(bo);
Christian Königab749612016-01-11 15:35:20 +0100224}
225EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
226
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200227/*
228 * Call bo->mutex locked.
229 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200230static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
231{
232 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200233 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200234 int ret = 0;
235 uint32_t page_flags = 0;
236
237 TTM_ASSERT_LOCKED(&bo->mutex);
238 bo->ttm = NULL;
239
Dave Airliead49f502009-07-10 22:36:26 +1000240 if (bdev->need_dma32)
241 page_flags |= TTM_PAGE_FLAG_DMA32;
242
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200243 switch (bo->type) {
244 case ttm_bo_type_device:
245 if (zero_alloc)
246 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
247 case ttm_bo_type_kernel:
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400248 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
249 page_flags, glob->dummy_read_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200250 if (unlikely(bo->ttm == NULL))
251 ret = -ENOMEM;
252 break;
Dave Airlie129b78b2012-04-02 11:46:06 +0100253 case ttm_bo_type_sg:
254 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
255 page_flags | TTM_PAGE_FLAG_SG,
256 glob->dummy_read_page);
257 if (unlikely(bo->ttm == NULL)) {
258 ret = -ENOMEM;
259 break;
260 }
261 bo->ttm->sg = bo->sg;
262 break;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200263 default:
Joe Perches25d04792012-03-16 21:43:50 -0700264 pr_err("Illegal buffer object type\n");
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200265 ret = -EINVAL;
266 break;
267 }
268
269 return ret;
270}
271
272static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
273 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000274 bool evict, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000275 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200276{
277 struct ttm_bo_device *bdev = bo->bdev;
278 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
279 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
280 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
281 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
282 int ret = 0;
283
284 if (old_is_pci || new_is_pci ||
Thomas Hellstromeba67092010-11-11 09:41:57 +0100285 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
286 ret = ttm_mem_io_lock(old_man, true);
287 if (unlikely(ret != 0))
288 goto out_err;
289 ttm_bo_unmap_virtual_locked(bo);
290 ttm_mem_io_unlock(old_man);
291 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200292
293 /*
294 * Create and bind a ttm if required.
295 */
296
Ben Skeggs8d3bb232011-08-22 03:15:05 +0000297 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
298 if (bo->ttm == NULL) {
Ben Skeggsff02b132011-09-14 06:08:06 +1000299 bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
300 ret = ttm_bo_add_ttm(bo, zero);
Ben Skeggs8d3bb232011-08-22 03:15:05 +0000301 if (ret)
302 goto out_err;
303 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200304
305 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
306 if (ret)
Thomas Hellstrom87ef9202009-06-17 12:29:57 +0200307 goto out_err;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200308
309 if (mem->mem_type != TTM_PL_SYSTEM) {
310 ret = ttm_tt_bind(bo->ttm, mem);
311 if (ret)
312 goto out_err;
313 }
314
315 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
Ben Skeggs82ef5942011-02-02 00:27:10 +0000316 if (bdev->driver->move_notify)
Nicolai Hähnle66257db2016-12-15 17:23:49 +0100317 bdev->driver->move_notify(bo, evict, mem);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100318 bo->mem = *mem;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200319 mem->mm_node = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200320 goto moved;
321 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200322 }
323
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000324 if (bdev->driver->move_notify)
Nicolai Hähnle66257db2016-12-15 17:23:49 +0100325 bdev->driver->move_notify(bo, evict, mem);
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000326
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200327 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
328 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
Michel Dänzer4e2f0ca2016-08-08 12:28:25 +0900329 ret = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200330 else if (bdev->driver->move)
331 ret = bdev->driver->move(bo, evict, interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000332 no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200333 else
Michel Dänzer4499f2a2016-08-08 12:28:26 +0900334 ret = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200335
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000336 if (ret) {
337 if (bdev->driver->move_notify) {
338 struct ttm_mem_reg tmp_mem = *mem;
339 *mem = bo->mem;
340 bo->mem = tmp_mem;
Nicolai Hähnle66257db2016-12-15 17:23:49 +0100341 bdev->driver->move_notify(bo, false, mem);
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000342 bo->mem = *mem;
Dave Airlie014b3442013-01-16 15:58:34 +1000343 *mem = tmp_mem;
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000344 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200345
Ben Skeggs9f1feed2012-01-25 15:34:22 +1000346 goto out_err;
347 }
Jerome Glissedc97b342011-11-18 11:47:03 -0500348
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200349moved:
350 if (bo->evicted) {
Rob Clark9ef75062014-03-12 10:59:37 -0400351 if (bdev->driver->invalidate_caches) {
352 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
353 if (ret)
354 pr_err("Can not flush read caches\n");
355 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200356 bo->evicted = false;
357 }
358
359 if (bo->mem.mm_node) {
Ben Skeggsd961db72010-08-05 10:48:18 +1000360 bo->offset = (bo->mem.start << PAGE_SHIFT) +
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200361 bdev->man[bo->mem.mem_type].gpu_offset;
362 bo->cur_placement = bo->mem.placement;
Thomas Hellstrom354fb522010-01-13 22:28:45 +0100363 } else
364 bo->offset = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200365
366 return 0;
367
368out_err:
369 new_man = &bdev->man[bo->mem.mem_type];
Christian König4279cb12016-06-06 10:17:51 +0200370 if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200371 ttm_tt_destroy(bo->ttm);
372 bo->ttm = NULL;
373 }
374
375 return ret;
376}
377
378/**
Thomas Hellstrom40d857b2010-10-19 09:01:00 +0200379 * Call bo::reserved.
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200380 * Will release GPU memory type usage on destruction.
Thomas Hellstrom40d857b2010-10-19 09:01:00 +0200381 * This is the place to put in driver specific hooks to release
382 * driver private resources.
383 * Will release the bo::reserved lock.
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200384 */
385
386static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
387{
Jerome Glissedc97b342011-11-18 11:47:03 -0500388 if (bo->bdev->driver->move_notify)
Nicolai Hähnle66257db2016-12-15 17:23:49 +0100389 bo->bdev->driver->move_notify(bo, false, NULL);
Jerome Glissedc97b342011-11-18 11:47:03 -0500390
Christian König4279cb12016-06-06 10:17:51 +0200391 ttm_tt_destroy(bo->ttm);
392 bo->ttm = NULL;
Thomas Hellstrom40d857b2010-10-19 09:01:00 +0200393 ttm_bo_mem_put(bo, &bo->mem);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200394
Maarten Lankhorst5e338402013-06-27 13:48:19 +0200395 ww_mutex_unlock (&bo->resv->lock);
Thomas Hellstrom1df6a2e2010-09-30 12:36:45 +0200396}
397
Christian König841e7632017-07-20 20:55:06 +0200398static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
399{
400 int r;
401
402 if (bo->resv == &bo->ttm_resv)
403 return 0;
404
405 reservation_object_init(&bo->ttm_resv);
Christian Königdf9bcb02017-08-17 12:23:51 +0200406 BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
Christian König841e7632017-07-20 20:55:06 +0200407
408 r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
409 if (r) {
410 reservation_object_unlock(&bo->ttm_resv);
411 reservation_object_fini(&bo->ttm_resv);
412 }
413
414 return r;
415}
416
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200417static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
418{
419 struct reservation_object_list *fobj;
Chris Wilsonf54d1862016-10-25 13:00:45 +0100420 struct dma_fence *fence;
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200421 int i;
422
Christian König841e7632017-07-20 20:55:06 +0200423 fobj = reservation_object_get_list(&bo->ttm_resv);
424 fence = reservation_object_get_excl(&bo->ttm_resv);
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200425 if (fence && !fence->ops->signaled)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100426 dma_fence_enable_sw_signaling(fence);
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200427
428 for (i = 0; fobj && i < fobj->shared_count; ++i) {
429 fence = rcu_dereference_protected(fobj->shared[i],
430 reservation_object_held(bo->resv));
431
432 if (!fence->ops->signaled)
Chris Wilsonf54d1862016-10-25 13:00:45 +0100433 dma_fence_enable_sw_signaling(fence);
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +0200434 }
435}
436
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200437static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200438{
439 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200440 struct ttm_bo_global *glob = bo->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200441 int ret;
442
Maarten Lankhorst4154f052012-11-28 12:25:39 +0100443 spin_lock(&glob->lru_lock);
Christian Königdfd5e502016-04-06 11:12:03 +0200444 ret = __ttm_bo_reserve(bo, false, true, NULL);
Maarten Lankhorst4154f052012-11-28 12:25:39 +0100445
Thomas Hellstrom15205fb2013-10-10 11:09:03 -0700446 if (!ret) {
Christian König8aa6d4f2016-04-06 11:12:04 +0200447 if (!ttm_bo_wait(bo, false, true)) {
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100448 ttm_bo_del_from_lru(bo);
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +0100449 spin_unlock(&glob->lru_lock);
450 ttm_bo_cleanup_memtype_use(bo);
451
Maarten Lankhorstdd7cfd62014-01-21 13:07:31 +0100452 return;
Christian König841e7632017-07-20 20:55:06 +0200453 }
454
455 ret = ttm_bo_individualize_resv(bo);
456 if (ret) {
457 /* Last resort, if we fail to allocate memory for the
458 * fences block for the BO to become idle and free it.
459 */
460 spin_unlock(&glob->lru_lock);
461 ttm_bo_wait(bo, true, true);
462 ttm_bo_cleanup_memtype_use(bo);
463 return;
464 }
465 ttm_bo_flush_all_fences(bo);
Thomas Hellstrom15205fb2013-10-10 11:09:03 -0700466
467 /*
468 * Make NO_EVICT bos immediately available to
469 * shrinkers, now that they are queued for
470 * destruction.
471 */
472 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
473 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
474 ttm_bo_add_to_lru(bo);
475 }
476
Christian König841e7632017-07-20 20:55:06 +0200477 if (bo->resv != &bo->ttm_resv)
478 reservation_object_unlock(&bo->ttm_resv);
Thomas Hellstromc7523082014-02-20 11:36:25 +0100479 __ttm_bo_unreserve(bo);
Thomas Hellstrom15205fb2013-10-10 11:09:03 -0700480 }
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200481
482 kref_get(&bo->list_kref);
483 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
484 spin_unlock(&glob->lru_lock);
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200485
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200486 schedule_delayed_work(&bdev->wq,
487 ((HZ / 100) < 1) ? 1 : HZ / 100);
488}
489
490/**
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000491 * function ttm_bo_cleanup_refs_and_unlock
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200492 * If bo idle, remove from delayed- and lru lists, and unref.
493 * If not idle, do nothing.
494 *
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000495 * Must be called with lru_lock and reservation held, this function
496 * will drop both before returning.
497 *
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200498 * @interruptible Any sleeps should occur interruptibly.
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200499 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
500 */
501
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000502static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
503 bool interruptible,
504 bool no_wait_gpu)
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200505{
506 struct ttm_bo_global *glob = bo->glob;
Christian König841e7632017-07-20 20:55:06 +0200507 struct reservation_object *resv;
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000508 int ret;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200509
Christian König841e7632017-07-20 20:55:06 +0200510 if (unlikely(list_empty(&bo->ddestroy)))
511 resv = bo->resv;
512 else
513 resv = &bo->ttm_resv;
514
515 if (reservation_object_test_signaled_rcu(resv, true))
516 ret = 0;
517 else
518 ret = -EBUSY;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200519
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000520 if (ret && !no_wait_gpu) {
Maarten Lankhorst472db7a2014-05-14 15:42:29 +0200521 long lret;
522 ww_mutex_unlock(&bo->resv->lock);
523 spin_unlock(&glob->lru_lock);
524
Christian König841e7632017-07-20 20:55:06 +0200525 lret = reservation_object_wait_timeout_rcu(resv, true,
Maarten Lankhorst472db7a2014-05-14 15:42:29 +0200526 interruptible,
527 30 * HZ);
528
529 if (lret < 0)
530 return lret;
531 else if (lret == 0)
532 return -EBUSY;
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000533
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000534 spin_lock(&glob->lru_lock);
Christian Königdfd5e502016-04-06 11:12:03 +0200535 ret = __ttm_bo_reserve(bo, false, true, NULL);
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000536
537 /*
538 * We raced, and lost, someone else holds the reservation now,
539 * and is probably busy in ttm_bo_cleanup_memtype_use.
540 *
541 * Even if it's not the case, because we finished waiting any
542 * delayed destruction would succeed, so just return success
543 * here.
544 */
545 if (ret) {
546 spin_unlock(&glob->lru_lock);
547 return 0;
548 }
Maarten Lankhorst70401382014-01-21 13:07:01 +0100549 }
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000550
551 if (ret || unlikely(list_empty(&bo->ddestroy))) {
Thomas Hellstromc7523082014-02-20 11:36:25 +0100552 __ttm_bo_unreserve(bo);
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000553 spin_unlock(&glob->lru_lock);
554 return ret;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200555 }
556
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100557 ttm_bo_del_from_lru(bo);
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200558 list_del_init(&bo->ddestroy);
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100559 kref_put(&bo->list_kref, ttm_bo_ref_bug);
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200560
561 spin_unlock(&glob->lru_lock);
562 ttm_bo_cleanup_memtype_use(bo);
563
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200564 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200565}
566
567/**
568 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
569 * encountered buffers.
570 */
571
572static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
573{
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200574 struct ttm_bo_global *glob = bdev->glob;
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100575 struct ttm_buffer_object *entry = NULL;
576 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200577
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200578 spin_lock(&glob->lru_lock);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100579 if (list_empty(&bdev->ddestroy))
580 goto out_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200581
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100582 entry = list_first_entry(&bdev->ddestroy,
583 struct ttm_buffer_object, ddestroy);
584 kref_get(&entry->list_kref);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200585
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100586 for (;;) {
587 struct ttm_buffer_object *nentry = NULL;
588
589 if (entry->ddestroy.next != &bdev->ddestroy) {
590 nentry = list_first_entry(&entry->ddestroy,
591 struct ttm_buffer_object, ddestroy);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200592 kref_get(&nentry->list_kref);
593 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200594
Christian Königdfd5e502016-04-06 11:12:03 +0200595 ret = __ttm_bo_reserve(entry, false, true, NULL);
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100596 if (remove_all && ret) {
597 spin_unlock(&glob->lru_lock);
Christian Königdfd5e502016-04-06 11:12:03 +0200598 ret = __ttm_bo_reserve(entry, false, false, NULL);
Maarten Lankhorst63d0a412013-01-15 14:56:37 +0100599 spin_lock(&glob->lru_lock);
600 }
601
Maarten Lankhorst85b144f2012-11-29 11:36:54 +0000602 if (!ret)
603 ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
604 !remove_all);
605 else
606 spin_unlock(&glob->lru_lock);
607
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200608 kref_put(&entry->list_kref, ttm_bo_release_list);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100609 entry = nentry;
610
611 if (ret || !entry)
612 goto out;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200613
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200614 spin_lock(&glob->lru_lock);
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100615 if (list_empty(&entry->ddestroy))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200616 break;
617 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200618
Luca Barbieri1a961ce2010-01-20 20:01:30 +0100619out_unlock:
620 spin_unlock(&glob->lru_lock);
621out:
622 if (entry)
623 kref_put(&entry->list_kref, ttm_bo_release_list);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200624 return ret;
625}
626
627static void ttm_bo_delayed_workqueue(struct work_struct *work)
628{
629 struct ttm_bo_device *bdev =
630 container_of(work, struct ttm_bo_device, wq.work);
631
632 if (ttm_bo_delayed_delete(bdev, false)) {
633 schedule_delayed_work(&bdev->wq,
634 ((HZ / 100) < 1) ? 1 : HZ / 100);
635 }
636}
637
638static void ttm_bo_release(struct kref *kref)
639{
640 struct ttm_buffer_object *bo =
641 container_of(kref, struct ttm_buffer_object, kref);
642 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100643 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200644
David Herrmann72525b32013-07-24 21:08:53 +0200645 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
Thomas Hellstromeba67092010-11-11 09:41:57 +0100646 ttm_mem_io_lock(man, false);
647 ttm_mem_io_free_vm(bo);
648 ttm_mem_io_unlock(man);
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200649 ttm_bo_cleanup_refs_or_queue(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200650 kref_put(&bo->list_kref, ttm_bo_release_list);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200651}
652
653void ttm_bo_unref(struct ttm_buffer_object **p_bo)
654{
655 struct ttm_buffer_object *bo = *p_bo;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200656
657 *p_bo = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200658 kref_put(&bo->kref, ttm_bo_release);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200659}
660EXPORT_SYMBOL(ttm_bo_unref);
661
Matthew Garrett7c5ee532010-04-26 16:00:09 -0400662int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
663{
664 return cancel_delayed_work_sync(&bdev->wq);
665}
666EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
667
668void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
669{
670 if (resched)
671 schedule_delayed_work(&bdev->wq,
672 ((HZ / 100) < 1) ? 1 : HZ / 100);
673}
674EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
675
Jerome Glisseca262a9992009-12-08 15:33:32 +0100676static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000677 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200678{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200679 struct ttm_bo_device *bdev = bo->bdev;
680 struct ttm_mem_reg evict_mem;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100681 struct ttm_placement placement;
682 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200683
Maarten Lankhorst009a9da2013-06-27 13:48:25 +0200684 lockdep_assert_held(&bo->resv->lock.base);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200685
686 evict_mem = bo->mem;
687 evict_mem.mm_node = NULL;
Thomas Hellstromeba67092010-11-11 09:41:57 +0100688 evict_mem.bus.io_reserved_vm = false;
689 evict_mem.bus.io_reserved_count = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200690
Jerome Glisse7cb7d1d2009-12-09 22:14:27 +0100691 placement.num_placement = 0;
692 placement.num_busy_placement = 0;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100693 bdev->driver->evict_flags(bo, &placement);
694 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000695 no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200696 if (ret) {
Jerome Glissefb53f862009-12-09 21:55:10 +0100697 if (ret != -ERESTARTSYS) {
Joe Perches25d04792012-03-16 21:43:50 -0700698 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
699 bo);
Jerome Glissefb53f862009-12-09 21:55:10 +0100700 ttm_bo_mem_space_debug(bo, &placement);
701 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200702 goto out;
703 }
704
705 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000706 no_wait_gpu);
Christian König17d33bc2016-06-06 10:17:56 +0200707 if (unlikely(ret)) {
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100708 if (ret != -ERESTARTSYS)
Joe Perches25d04792012-03-16 21:43:50 -0700709 pr_err("Buffer eviction failed\n");
Ben Skeggs42311ff2010-08-04 12:07:08 +1000710 ttm_bo_mem_put(bo, &evict_mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200711 goto out;
712 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200713 bo->evicted = true;
714out:
715 return ret;
716}
717
Christian Königa2ab19fe2016-08-30 17:26:04 +0200718bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
719 const struct ttm_place *place)
720{
721 /* Don't evict this BO if it's outside of the
722 * requested placement range
723 */
724 if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
725 (place->lpfn && place->lpfn <= bo->mem.start))
726 return false;
727
728 return true;
729}
730EXPORT_SYMBOL(ttm_bo_eviction_valuable);
731
Jerome Glisseca262a9992009-12-08 15:33:32 +0100732static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
733 uint32_t mem_type,
Michel Dänzere3001802014-10-09 15:02:59 +0900734 const struct ttm_place *place,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000735 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000736 bool no_wait_gpu)
Jerome Glisseca262a9992009-12-08 15:33:32 +0100737{
738 struct ttm_bo_global *glob = bdev->glob;
739 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
740 struct ttm_buffer_object *bo;
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100741 int ret = -EBUSY;
Christian Königcf6c4672017-01-10 14:08:28 +0100742 unsigned i;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100743
744 spin_lock(&glob->lru_lock);
Christian Königcf6c4672017-01-10 14:08:28 +0100745 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
746 list_for_each_entry(bo, &man->lru[i], lru) {
747 ret = __ttm_bo_reserve(bo, false, true, NULL);
748 if (ret)
749 continue;
Michel Dänzere3001802014-10-09 15:02:59 +0900750
Christian Königcf6c4672017-01-10 14:08:28 +0100751 if (place && !bdev->driver->eviction_valuable(bo,
752 place)) {
753 __ttm_bo_unreserve(bo);
754 ret = -EBUSY;
755 continue;
756 }
757
758 break;
Michel Dänzere3001802014-10-09 15:02:59 +0900759 }
Christian Königa2ab19fe2016-08-30 17:26:04 +0200760
Christian Königcf6c4672017-01-10 14:08:28 +0100761 if (!ret)
762 break;
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100763 }
764
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000765 if (ret) {
766 spin_unlock(&glob->lru_lock);
Thomas Hellstromb8e902f2012-10-22 12:51:26 +0000767 return ret;
Thomas Hellstrome1efc9b2010-10-19 09:01:01 +0200768 }
769
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000770 kref_get(&bo->list_kref);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100771
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000772 if (!list_empty(&bo->ddestroy)) {
773 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
774 no_wait_gpu);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100775 kref_put(&bo->list_kref, ttm_bo_release_list);
Maarten Lankhorste7ab2012012-11-28 11:25:43 +0000776 return ret;
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100777 }
778
Peter Zijlstrabdfafc42016-11-14 17:34:19 +0100779 ttm_bo_del_from_lru(bo);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100780 spin_unlock(&glob->lru_lock);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100781
782 BUG_ON(ret != 0);
783
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000784 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100785 ttm_bo_unreserve(bo);
Thomas Hellstrom9c51ba12009-12-02 18:33:46 +0100786
Jerome Glisseca262a9992009-12-08 15:33:32 +0100787 kref_put(&bo->list_kref, ttm_bo_release_list);
788 return ret;
789}
790
Ben Skeggs42311ff2010-08-04 12:07:08 +1000791void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
792{
Ben Skeggsd961db72010-08-05 10:48:18 +1000793 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
Ben Skeggs42311ff2010-08-04 12:07:08 +1000794
Ben Skeggsd961db72010-08-05 10:48:18 +1000795 if (mem->mm_node)
796 (*man->func->put_node)(man, mem);
Ben Skeggs42311ff2010-08-04 12:07:08 +1000797}
798EXPORT_SYMBOL(ttm_bo_mem_put);
799
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200800/**
Christian König3ddf4ad2016-06-15 13:44:03 +0200801 * Add the last move fence to the BO and reserve a new shared slot.
802 */
803static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
804 struct ttm_mem_type_manager *man,
805 struct ttm_mem_reg *mem)
806{
Chris Wilsonf54d1862016-10-25 13:00:45 +0100807 struct dma_fence *fence;
Christian König3ddf4ad2016-06-15 13:44:03 +0200808 int ret;
809
810 spin_lock(&man->move_lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +0100811 fence = dma_fence_get(man->move);
Christian König3ddf4ad2016-06-15 13:44:03 +0200812 spin_unlock(&man->move_lock);
813
814 if (fence) {
815 reservation_object_add_shared_fence(bo->resv, fence);
816
817 ret = reservation_object_reserve_shared(bo->resv);
818 if (unlikely(ret))
819 return ret;
820
Chris Wilsonf54d1862016-10-25 13:00:45 +0100821 dma_fence_put(bo->moving);
Christian König3ddf4ad2016-06-15 13:44:03 +0200822 bo->moving = fence;
823 }
824
825 return 0;
826}
827
828/**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200829 * Repeatedly evict memory from the LRU for @mem_type until we create enough
830 * space, or we've evicted everything and there isn't enough space.
831 */
Jerome Glisseca262a9992009-12-08 15:33:32 +0100832static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
833 uint32_t mem_type,
Christian Königf1217ed2014-08-27 13:16:04 +0200834 const struct ttm_place *place,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100835 struct ttm_mem_reg *mem,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000836 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000837 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200838{
Jerome Glisseca262a9992009-12-08 15:33:32 +0100839 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200840 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200841 int ret;
842
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200843 do {
Christian Königf1217ed2014-08-27 13:16:04 +0200844 ret = (*man->func->get_node)(man, bo, place, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200845 if (unlikely(ret != 0))
846 return ret;
Ben Skeggsd961db72010-08-05 10:48:18 +1000847 if (mem->mm_node)
Jerome Glisseca262a9992009-12-08 15:33:32 +0100848 break;
Michel Dänzere3001802014-10-09 15:02:59 +0900849 ret = ttm_mem_evict_first(bdev, mem_type, place,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000850 interruptible, no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100851 if (unlikely(ret != 0))
852 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200853 } while (1);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200854 mem->mem_type = mem_type;
Christian König3ddf4ad2016-06-15 13:44:03 +0200855 return ttm_bo_add_move_fence(bo, man, mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200856}
857
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200858static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
859 uint32_t cur_placement,
860 uint32_t proposed_placement)
861{
862 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
863 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
864
865 /**
866 * Keep current caching if possible.
867 */
868
869 if ((cur_placement & caching) != 0)
870 result |= (cur_placement & caching);
871 else if ((man->default_caching & caching) != 0)
872 result |= man->default_caching;
873 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
874 result |= TTM_PL_FLAG_CACHED;
875 else if ((TTM_PL_FLAG_WC & caching) != 0)
876 result |= TTM_PL_FLAG_WC;
877 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
878 result |= TTM_PL_FLAG_UNCACHED;
879
880 return result;
881}
882
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200883static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200884 uint32_t mem_type,
Christian Königf1217ed2014-08-27 13:16:04 +0200885 const struct ttm_place *place,
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200886 uint32_t *masked_placement)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200887{
888 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
889
Christian Königf1217ed2014-08-27 13:16:04 +0200890 if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200891 return false;
892
Christian Königf1217ed2014-08-27 13:16:04 +0200893 if ((place->flags & man->available_caching) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200894 return false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200895
Christian Königf1217ed2014-08-27 13:16:04 +0200896 cur_flags |= (place->flags & man->available_caching);
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200897
898 *masked_placement = cur_flags;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200899 return true;
900}
901
902/**
903 * Creates space for memory region @mem according to its type.
904 *
905 * This function first searches for free space in compatible memory types in
906 * the priority order defined by the driver. If free space isn't found, then
907 * ttm_bo_mem_force_space is attempted in priority order to evict and find
908 * space.
909 */
910int ttm_bo_mem_space(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100911 struct ttm_placement *placement,
912 struct ttm_mem_reg *mem,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +0000913 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +0000914 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200915{
916 struct ttm_bo_device *bdev = bo->bdev;
917 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200918 uint32_t mem_type = TTM_PL_SYSTEM;
919 uint32_t cur_flags = 0;
920 bool type_found = false;
921 bool type_ok = false;
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +0100922 bool has_erestartsys = false;
Jerome Glisseca262a9992009-12-08 15:33:32 +0100923 int i, ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200924
Christian König3ddf4ad2016-06-15 13:44:03 +0200925 ret = reservation_object_reserve_shared(bo->resv);
926 if (unlikely(ret))
927 return ret;
928
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200929 mem->mm_node = NULL;
Dave Airlieb6637522009-12-14 14:51:35 +1000930 for (i = 0; i < placement->num_placement; ++i) {
Christian Königf1217ed2014-08-27 13:16:04 +0200931 const struct ttm_place *place = &placement->placement[i];
932
933 ret = ttm_mem_type_from_place(place, &mem_type);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100934 if (ret)
935 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200936 man = &bdev->man[mem_type];
Thomas Hellstrome30f3962015-09-14 01:24:41 -0700937 if (!man->has_type || !man->use_type)
938 continue;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200939
Christian Königf1217ed2014-08-27 13:16:04 +0200940 type_ok = ttm_bo_mt_compatible(man, mem_type, place,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100941 &cur_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200942
943 if (!type_ok)
944 continue;
945
Thomas Hellstrome30f3962015-09-14 01:24:41 -0700946 type_found = true;
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200947 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
948 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100949 /*
950 * Use the access and other non-mapping-related flag bits from
951 * the memory placement flags to the current flags
952 */
Christian Königf1217ed2014-08-27 13:16:04 +0200953 ttm_flag_masked(&cur_flags, place->flags,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100954 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200955
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200956 if (mem_type == TTM_PL_SYSTEM)
957 break;
958
Thomas Hellstrome30f3962015-09-14 01:24:41 -0700959 ret = (*man->func->get_node)(man, bo, place, mem);
960 if (unlikely(ret))
961 return ret;
Christian König3ddf4ad2016-06-15 13:44:03 +0200962
963 if (mem->mm_node) {
964 ret = ttm_bo_add_move_fence(bo, man, mem);
965 if (unlikely(ret)) {
966 (*man->func->put_node)(man, mem);
967 return ret;
968 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200969 break;
Christian König3ddf4ad2016-06-15 13:44:03 +0200970 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200971 }
972
Ben Skeggsd961db72010-08-05 10:48:18 +1000973 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200974 mem->mem_type = mem_type;
975 mem->placement = cur_flags;
976 return 0;
977 }
978
Dave Airlieb6637522009-12-14 14:51:35 +1000979 for (i = 0; i < placement->num_busy_placement; ++i) {
Christian Königf1217ed2014-08-27 13:16:04 +0200980 const struct ttm_place *place = &placement->busy_placement[i];
981
982 ret = ttm_mem_type_from_place(place, &mem_type);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100983 if (ret)
984 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200985 man = &bdev->man[mem_type];
Thomas Hellstrome30f3962015-09-14 01:24:41 -0700986 if (!man->has_type || !man->use_type)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200987 continue;
Christian Königf1217ed2014-08-27 13:16:04 +0200988 if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200989 continue;
990
Thomas Hellstrome30f3962015-09-14 01:24:41 -0700991 type_found = true;
Thomas Hellstromae3e8122009-06-24 19:57:34 +0200992 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
993 cur_flags);
Jerome Glisseca262a9992009-12-08 15:33:32 +0100994 /*
995 * Use the access and other non-mapping-related flag bits from
996 * the memory placement flags to the current flags
997 */
Christian Königf1217ed2014-08-27 13:16:04 +0200998 ttm_flag_masked(&cur_flags, place->flags,
Jerome Glisseca262a9992009-12-08 15:33:32 +0100999 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromae3e8122009-06-24 19:57:34 +02001000
Thomas Hellstrom0eaddb22010-01-16 16:05:04 +01001001 if (mem_type == TTM_PL_SYSTEM) {
1002 mem->mem_type = mem_type;
1003 mem->placement = cur_flags;
1004 mem->mm_node = NULL;
1005 return 0;
1006 }
1007
Christian Königf1217ed2014-08-27 13:16:04 +02001008 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001009 interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001010 if (ret == 0 && mem->mm_node) {
1011 mem->placement = cur_flags;
1012 return 0;
1013 }
Thomas Hellstrom98ffc4152009-12-07 18:36:18 +01001014 if (ret == -ERESTARTSYS)
1015 has_erestartsys = true;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001016 }
Thomas Hellstrome30f3962015-09-14 01:24:41 -07001017
1018 if (!type_found) {
Joe Perches8dfe1622017-02-28 04:55:54 -08001019 pr_err(TTM_PFX "No compatible memory type found\n");
Thomas Hellstrome30f3962015-09-14 01:24:41 -07001020 return -EINVAL;
1021 }
1022
1023 return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001024}
1025EXPORT_SYMBOL(ttm_bo_mem_space);
1026
Rashika Kheria6e87fa42014-01-06 22:12:58 +05301027static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001028 struct ttm_placement *placement,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001029 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001030 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001031{
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001032 int ret = 0;
1033 struct ttm_mem_reg mem;
1034
Maarten Lankhorst009a9da2013-06-27 13:48:25 +02001035 lockdep_assert_held(&bo->resv->lock.base);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001036
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001037 mem.num_pages = bo->num_pages;
1038 mem.size = mem.num_pages << PAGE_SHIFT;
1039 mem.page_alignment = bo->mem.page_alignment;
Thomas Hellstromeba67092010-11-11 09:41:57 +01001040 mem.bus.io_reserved_vm = false;
1041 mem.bus.io_reserved_count = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001042 /*
1043 * Determine where to move the buffer.
1044 */
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001045 ret = ttm_bo_mem_space(bo, placement, &mem,
1046 interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001047 if (ret)
1048 goto out_unlock;
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001049 ret = ttm_bo_handle_move_mem(bo, &mem, false,
1050 interruptible, no_wait_gpu);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001051out_unlock:
Ben Skeggsd961db72010-08-05 10:48:18 +10001052 if (ret && mem.mm_node)
1053 ttm_bo_mem_put(bo, &mem);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001054 return ret;
1055}
1056
Christian König018b7fc2017-03-29 11:47:04 +02001057static bool ttm_bo_places_compat(const struct ttm_place *places,
1058 unsigned num_placement,
1059 struct ttm_mem_reg *mem,
1060 uint32_t *new_flags)
1061{
1062 unsigned i;
1063
1064 for (i = 0; i < num_placement; i++) {
1065 const struct ttm_place *heap = &places[i];
1066
1067 if (mem->mm_node && (mem->start < heap->fpfn ||
1068 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1069 continue;
1070
1071 *new_flags = heap->flags;
1072 if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
Christian Königc8b26bd12017-03-29 12:13:54 +02001073 (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
1074 (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
1075 (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
Christian König018b7fc2017-03-29 11:47:04 +02001076 return true;
1077 }
1078 return false;
1079}
1080
Sinclair Yeh94477bf2016-06-29 12:58:49 -07001081bool ttm_bo_mem_compat(struct ttm_placement *placement,
1082 struct ttm_mem_reg *mem,
1083 uint32_t *new_flags)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001084{
Christian König018b7fc2017-03-29 11:47:04 +02001085 if (ttm_bo_places_compat(placement->placement, placement->num_placement,
1086 mem, new_flags))
1087 return true;
Thomas Hellstrome22238e2010-02-12 00:18:00 +01001088
Christian König018b7fc2017-03-29 11:47:04 +02001089 if ((placement->busy_placement != placement->placement ||
1090 placement->num_busy_placement > placement->num_placement) &&
1091 ttm_bo_places_compat(placement->busy_placement,
1092 placement->num_busy_placement,
1093 mem, new_flags))
1094 return true;
Thomas Hellstrom59c8e662013-10-28 02:02:19 -07001095
1096 return false;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001097}
Sinclair Yeh94477bf2016-06-29 12:58:49 -07001098EXPORT_SYMBOL(ttm_bo_mem_compat);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001099
Jerome Glisse09855ac2009-12-10 17:16:27 +01001100int ttm_bo_validate(struct ttm_buffer_object *bo,
1101 struct ttm_placement *placement,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001102 bool interruptible,
Jerome Glisse9d87fa22010-04-07 10:21:19 +00001103 bool no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001104{
1105 int ret;
Thomas Hellstrom59c8e662013-10-28 02:02:19 -07001106 uint32_t new_flags;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001107
Maarten Lankhorst009a9da2013-06-27 13:48:25 +02001108 lockdep_assert_held(&bo->resv->lock.base);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001109 /*
1110 * Check whether we need to move buffer.
1111 */
Thomas Hellstrom59c8e662013-10-28 02:02:19 -07001112 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001113 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1114 no_wait_gpu);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001115 if (ret)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001116 return ret;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001117 } else {
1118 /*
1119 * Use the access and other non-mapping-related flag bits from
1120 * the compatible memory placement flags to the active flags
1121 */
Thomas Hellstrom59c8e662013-10-28 02:02:19 -07001122 ttm_flag_masked(&bo->mem.placement, new_flags,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001123 ~TTM_PL_MASK_MEMTYPE);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001124 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001125 /*
1126 * We might need to add a TTM.
1127 */
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001128 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1129 ret = ttm_bo_add_ttm(bo, true);
1130 if (ret)
1131 return ret;
1132 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001133 return 0;
1134}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001135EXPORT_SYMBOL(ttm_bo_validate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001136
Nicolai Hähnleca9cf68d2017-02-16 10:56:40 +01001137int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
1138 struct ttm_buffer_object *bo,
1139 unsigned long size,
1140 enum ttm_bo_type type,
1141 struct ttm_placement *placement,
1142 uint32_t page_alignment,
1143 bool interruptible,
1144 struct file *persistent_swap_storage,
1145 size_t acc_size,
1146 struct sg_table *sg,
1147 struct reservation_object *resv,
1148 void (*destroy) (struct ttm_buffer_object *))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001149{
Jerome Glisse09855ac2009-12-10 17:16:27 +01001150 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001151 unsigned long num_pages;
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001152 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
Maarten Lankhorst5e338402013-06-27 13:48:19 +02001153 bool locked;
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001154
1155 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1156 if (ret) {
Joe Perches25d04792012-03-16 21:43:50 -07001157 pr_err("Out of kernel memory\n");
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001158 if (destroy)
1159 (*destroy)(bo);
1160 else
1161 kfree(bo);
1162 return -ENOMEM;
1163 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001164
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001165 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1166 if (num_pages == 0) {
Joe Perches25d04792012-03-16 21:43:50 -07001167 pr_err("Illegal buffer object size\n");
Thomas Hellstrom7dfbbdc2010-11-09 21:31:44 +01001168 if (destroy)
1169 (*destroy)(bo);
1170 else
1171 kfree(bo);
Thomas Hellstroma393c732012-06-12 13:28:42 +02001172 ttm_mem_global_free(mem_glob, acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001173 return -EINVAL;
1174 }
1175 bo->destroy = destroy;
1176
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001177 kref_init(&bo->kref);
1178 kref_init(&bo->list_kref);
1179 atomic_set(&bo->cpu_writers, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001180 INIT_LIST_HEAD(&bo->lru);
1181 INIT_LIST_HEAD(&bo->ddestroy);
1182 INIT_LIST_HEAD(&bo->swap);
Thomas Hellstromeba67092010-11-11 09:41:57 +01001183 INIT_LIST_HEAD(&bo->io_reserve_lru);
Thomas Hellstromc58f0092013-11-14 10:49:05 -08001184 mutex_init(&bo->wu_mutex);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001185 bo->bdev = bdev;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001186 bo->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001187 bo->type = type;
1188 bo->num_pages = num_pages;
Jerome Glisseeb6d2c32009-12-10 16:15:52 +01001189 bo->mem.size = num_pages << PAGE_SHIFT;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001190 bo->mem.mem_type = TTM_PL_SYSTEM;
1191 bo->mem.num_pages = bo->num_pages;
1192 bo->mem.mm_node = NULL;
1193 bo->mem.page_alignment = page_alignment;
Thomas Hellstromeba67092010-11-11 09:41:57 +01001194 bo->mem.bus.io_reserved_vm = false;
1195 bo->mem.bus.io_reserved_count = 0;
Christian König5bc73062016-06-15 13:44:01 +02001196 bo->moving = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001197 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
Jan Engelhardt5df23972011-04-04 01:25:18 +02001198 bo->persistent_swap_storage = persistent_swap_storage;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001199 bo->acc_size = acc_size;
Dave Airlie129b78b2012-04-02 11:46:06 +01001200 bo->sg = sg;
Maarten Lankhorstf4f4e3e2014-01-09 11:03:15 +01001201 if (resv) {
1202 bo->resv = resv;
1203 lockdep_assert_held(&bo->resv->lock.base);
1204 } else {
1205 bo->resv = &bo->ttm_resv;
1206 reservation_object_init(&bo->ttm_resv);
1207 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001208 atomic_inc(&bo->glob->bo_count);
David Herrmann72525b32013-07-24 21:08:53 +02001209 drm_vma_node_reset(&bo->vma_node);
Christian Königcf6c4672017-01-10 14:08:28 +01001210 bo->priority = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001211
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001212 /*
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001213 * For ttm_bo_type_device buffers, allocate
1214 * address space from the device.
1215 */
Christian Königf1217ed2014-08-27 13:16:04 +02001216 if (bo->type == ttm_bo_type_device ||
1217 bo->type == ttm_bo_type_sg)
David Herrmannabf19032013-07-25 14:08:51 +02001218 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
1219 bo->mem.num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001220
Maarten Lankhorstf4f4e3e2014-01-09 11:03:15 +01001221 /* passed reservation objects should already be locked,
1222 * since otherwise lockdep will be angered in radeon.
1223 */
1224 if (!resv) {
1225 locked = ww_mutex_trylock(&bo->resv->lock);
1226 WARN_ON(!locked);
1227 }
Maarten Lankhorst5e338402013-06-27 13:48:19 +02001228
1229 if (likely(!ret))
1230 ret = ttm_bo_validate(bo, placement, interruptible, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001231
Nicolai Hähnlec2c139c2017-02-14 09:37:12 +01001232 if (unlikely(ret)) {
Nicolai Hähnleca9cf68d2017-02-16 10:56:40 +01001233 if (!resv)
1234 ttm_bo_unreserve(bo);
1235
Nicolai Hähnlec2c139c2017-02-14 09:37:12 +01001236 ttm_bo_unref(&bo);
1237 return ret;
1238 }
1239
1240 if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
Christian König33d48cf2016-01-11 15:35:18 +01001241 spin_lock(&bo->glob->lru_lock);
1242 ttm_bo_add_to_lru(bo);
1243 spin_unlock(&bo->glob->lru_lock);
1244 }
1245
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001246 return ret;
1247}
Nicolai Hähnleca9cf68d2017-02-16 10:56:40 +01001248EXPORT_SYMBOL(ttm_bo_init_reserved);
1249
1250int ttm_bo_init(struct ttm_bo_device *bdev,
1251 struct ttm_buffer_object *bo,
1252 unsigned long size,
1253 enum ttm_bo_type type,
1254 struct ttm_placement *placement,
1255 uint32_t page_alignment,
1256 bool interruptible,
1257 struct file *persistent_swap_storage,
1258 size_t acc_size,
1259 struct sg_table *sg,
1260 struct reservation_object *resv,
1261 void (*destroy) (struct ttm_buffer_object *))
1262{
1263 int ret;
1264
1265 ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
1266 page_alignment, interruptible,
1267 persistent_swap_storage, acc_size,
1268 sg, resv, destroy);
1269 if (ret)
1270 return ret;
1271
1272 if (!resv)
1273 ttm_bo_unreserve(bo);
1274
1275 return 0;
1276}
Jerome Glisse09855ac2009-12-10 17:16:27 +01001277EXPORT_SYMBOL(ttm_bo_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001278
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001279size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1280 unsigned long bo_size,
1281 unsigned struct_size)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001282{
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001283 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1284 size_t size = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001285
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001286 size += ttm_round_pot(struct_size);
Felix Kuehling85621632016-04-07 21:42:17 -04001287 size += ttm_round_pot(npages * sizeof(void *));
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001288 size += ttm_round_pot(sizeof(struct ttm_tt));
1289 return size;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001290}
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001291EXPORT_SYMBOL(ttm_bo_acc_size);
1292
1293size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1294 unsigned long bo_size,
1295 unsigned struct_size)
1296{
1297 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1298 size_t size = 0;
1299
1300 size += ttm_round_pot(struct_size);
Felix Kuehling85621632016-04-07 21:42:17 -04001301 size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001302 size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1303 return size;
1304}
1305EXPORT_SYMBOL(ttm_bo_dma_acc_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001306
Jerome Glisse09855ac2009-12-10 17:16:27 +01001307int ttm_bo_create(struct ttm_bo_device *bdev,
1308 unsigned long size,
1309 enum ttm_bo_type type,
1310 struct ttm_placement *placement,
1311 uint32_t page_alignment,
Jerome Glisse09855ac2009-12-10 17:16:27 +01001312 bool interruptible,
Jan Engelhardt5df23972011-04-04 01:25:18 +02001313 struct file *persistent_swap_storage,
Jerome Glisse09855ac2009-12-10 17:16:27 +01001314 struct ttm_buffer_object **p_bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001315{
1316 struct ttm_buffer_object *bo;
Jerome Glisse57de4ba2011-11-11 15:42:57 -05001317 size_t acc_size;
Jerome Glisseca262a9992009-12-08 15:33:32 +01001318 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001319
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001320 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
Thomas Hellstroma393c732012-06-12 13:28:42 +02001321 if (unlikely(bo == NULL))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001322 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001323
Thomas Hellstroma393c732012-06-12 13:28:42 +02001324 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
Jerome Glisse09855ac2009-12-10 17:16:27 +01001325 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
Marcin Slusarz0b91c4a2012-11-06 21:49:51 +00001326 interruptible, persistent_swap_storage, acc_size,
Maarten Lankhorstf4f4e3e2014-01-09 11:03:15 +01001327 NULL, NULL, NULL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001328 if (likely(ret == 0))
1329 *p_bo = bo;
1330
1331 return ret;
1332}
Thomas Hellstrom4d798932011-10-04 20:13:11 +02001333EXPORT_SYMBOL(ttm_bo_create);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001334
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001335static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
Christian König2ee7fc92017-01-06 19:16:07 +01001336 unsigned mem_type)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001337{
Jerome Glisseca262a9992009-12-08 15:33:32 +01001338 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001339 struct ttm_bo_global *glob = bdev->glob;
Chris Wilsonf54d1862016-10-25 13:00:45 +01001340 struct dma_fence *fence;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001341 int ret;
Christian Königcf6c4672017-01-10 14:08:28 +01001342 unsigned i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001343
1344 /*
1345 * Can't use standard list traversal since we're unlocking.
1346 */
1347
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001348 spin_lock(&glob->lru_lock);
Christian Königcf6c4672017-01-10 14:08:28 +01001349 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1350 while (!list_empty(&man->lru[i])) {
1351 spin_unlock(&glob->lru_lock);
1352 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
1353 if (ret)
Jerome Glisseca262a9992009-12-08 15:33:32 +01001354 return ret;
Christian Königcf6c4672017-01-10 14:08:28 +01001355 spin_lock(&glob->lru_lock);
Jerome Glisseca262a9992009-12-08 15:33:32 +01001356 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001357 }
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001358 spin_unlock(&glob->lru_lock);
Christian Königaff98ba2016-06-22 14:16:28 +02001359
1360 spin_lock(&man->move_lock);
Chris Wilsonf54d1862016-10-25 13:00:45 +01001361 fence = dma_fence_get(man->move);
Christian Königaff98ba2016-06-22 14:16:28 +02001362 spin_unlock(&man->move_lock);
1363
1364 if (fence) {
Chris Wilsonf54d1862016-10-25 13:00:45 +01001365 ret = dma_fence_wait(fence, false);
1366 dma_fence_put(fence);
Christian König2ee7fc92017-01-06 19:16:07 +01001367 if (ret)
1368 return ret;
Christian Königaff98ba2016-06-22 14:16:28 +02001369 }
1370
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001371 return 0;
1372}
1373
1374int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1375{
Roel Kluinc96e7c72009-08-03 14:22:53 +02001376 struct ttm_mem_type_manager *man;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001377 int ret = -EINVAL;
1378
1379 if (mem_type >= TTM_NUM_MEM_TYPES) {
Joe Perches25d04792012-03-16 21:43:50 -07001380 pr_err("Illegal memory type %d\n", mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001381 return ret;
1382 }
Roel Kluinc96e7c72009-08-03 14:22:53 +02001383 man = &bdev->man[mem_type];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001384
1385 if (!man->has_type) {
Joe Perches25d04792012-03-16 21:43:50 -07001386 pr_err("Trying to take down uninitialized memory manager type %u\n",
1387 mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001388 return ret;
1389 }
1390
1391 man->use_type = false;
1392 man->has_type = false;
1393
1394 ret = 0;
1395 if (mem_type > 0) {
Christian König2ee7fc92017-01-06 19:16:07 +01001396 ret = ttm_bo_force_list_clean(bdev, mem_type);
1397 if (ret) {
1398 pr_err("Cleanup eviction failed\n");
1399 return ret;
1400 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001401
Ben Skeggsd961db72010-08-05 10:48:18 +10001402 ret = (*man->func->takedown)(man);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001403 }
1404
John Brooks8046e192017-07-03 14:05:34 -04001405 dma_fence_put(man->move);
1406 man->move = NULL;
1407
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001408 return ret;
1409}
1410EXPORT_SYMBOL(ttm_bo_clean_mm);
1411
1412int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1413{
1414 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1415
1416 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
Joe Perches25d04792012-03-16 21:43:50 -07001417 pr_err("Illegal memory manager memory type %u\n", mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001418 return -EINVAL;
1419 }
1420
1421 if (!man->has_type) {
Joe Perches25d04792012-03-16 21:43:50 -07001422 pr_err("Memory type %u has not been initialized\n", mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001423 return 0;
1424 }
1425
Christian König2ee7fc92017-01-06 19:16:07 +01001426 return ttm_bo_force_list_clean(bdev, mem_type);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001427}
1428EXPORT_SYMBOL(ttm_bo_evict_mm);
1429
1430int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
Jerome Glisseca262a9992009-12-08 15:33:32 +01001431 unsigned long p_size)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001432{
Huang Ruiaef1ba52017-04-17 15:32:52 +08001433 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001434 struct ttm_mem_type_manager *man;
Christian Königcf6c4672017-01-10 14:08:28 +01001435 unsigned i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001436
Thomas Hellstromdbc4a5b2010-10-29 10:46:47 +02001437 BUG_ON(type >= TTM_NUM_MEM_TYPES);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001438 man = &bdev->man[type];
Thomas Hellstromdbc4a5b2010-10-29 10:46:47 +02001439 BUG_ON(man->has_type);
Thomas Hellstromeba67092010-11-11 09:41:57 +01001440 man->io_reserve_fastpath = true;
1441 man->use_io_reserve_lru = false;
1442 mutex_init(&man->io_reserve_mutex);
Christian König3ddf4ad2016-06-15 13:44:03 +02001443 spin_lock_init(&man->move_lock);
Thomas Hellstromeba67092010-11-11 09:41:57 +01001444 INIT_LIST_HEAD(&man->io_reserve_lru);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001445
1446 ret = bdev->driver->init_mem_type(bdev, type, man);
1447 if (ret)
1448 return ret;
Ben Skeggsd961db72010-08-05 10:48:18 +10001449 man->bdev = bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001450
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001451 if (type != TTM_PL_SYSTEM) {
Ben Skeggsd961db72010-08-05 10:48:18 +10001452 ret = (*man->func->init)(man, p_size);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001453 if (ret)
1454 return ret;
1455 }
1456 man->has_type = true;
1457 man->use_type = true;
1458 man->size = p_size;
1459
Christian Königcf6c4672017-01-10 14:08:28 +01001460 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1461 INIT_LIST_HEAD(&man->lru[i]);
Christian König3ddf4ad2016-06-15 13:44:03 +02001462 man->move = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001463
1464 return 0;
1465}
1466EXPORT_SYMBOL(ttm_bo_init_mm);
1467
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001468static void ttm_bo_global_kobj_release(struct kobject *kobj)
1469{
1470 struct ttm_bo_global *glob =
1471 container_of(kobj, struct ttm_bo_global, kobj);
1472
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001473 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1474 __free_page(glob->dummy_read_page);
1475 kfree(glob);
1476}
1477
Dave Airlieba4420c2010-03-09 10:56:52 +10001478void ttm_bo_global_release(struct drm_global_reference *ref)
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001479{
1480 struct ttm_bo_global *glob = ref->object;
1481
1482 kobject_del(&glob->kobj);
1483 kobject_put(&glob->kobj);
1484}
1485EXPORT_SYMBOL(ttm_bo_global_release);
1486
Dave Airlieba4420c2010-03-09 10:56:52 +10001487int ttm_bo_global_init(struct drm_global_reference *ref)
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001488{
1489 struct ttm_bo_global_ref *bo_ref =
1490 container_of(ref, struct ttm_bo_global_ref, ref);
1491 struct ttm_bo_global *glob = ref->object;
1492 int ret;
Christian Königcf6c4672017-01-10 14:08:28 +01001493 unsigned i;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001494
1495 mutex_init(&glob->device_list_mutex);
1496 spin_lock_init(&glob->lru_lock);
1497 glob->mem_glob = bo_ref->mem_glob;
1498 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1499
1500 if (unlikely(glob->dummy_read_page == NULL)) {
1501 ret = -ENOMEM;
1502 goto out_no_drp;
1503 }
1504
Christian Königcf6c4672017-01-10 14:08:28 +01001505 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1506 INIT_LIST_HEAD(&glob->swap_lru[i]);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001507 INIT_LIST_HEAD(&glob->device_list);
1508
1509 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1510 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1511 if (unlikely(ret != 0)) {
Joe Perches25d04792012-03-16 21:43:50 -07001512 pr_err("Could not register buffer object swapout\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001513 goto out_no_shrink;
1514 }
1515
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001516 atomic_set(&glob->bo_count, 0);
1517
Robert P. J. Dayb642ed02010-03-13 10:36:32 +00001518 ret = kobject_init_and_add(
1519 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001520 if (unlikely(ret != 0))
1521 kobject_put(&glob->kobj);
1522 return ret;
1523out_no_shrink:
1524 __free_page(glob->dummy_read_page);
1525out_no_drp:
1526 kfree(glob);
1527 return ret;
1528}
1529EXPORT_SYMBOL(ttm_bo_global_init);
1530
1531
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001532int ttm_bo_device_release(struct ttm_bo_device *bdev)
1533{
1534 int ret = 0;
1535 unsigned i = TTM_NUM_MEM_TYPES;
1536 struct ttm_mem_type_manager *man;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001537 struct ttm_bo_global *glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001538
1539 while (i--) {
1540 man = &bdev->man[i];
1541 if (man->has_type) {
1542 man->use_type = false;
1543 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1544 ret = -EBUSY;
Joe Perches25d04792012-03-16 21:43:50 -07001545 pr_err("DRM memory manager type %d is not clean\n",
1546 i);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001547 }
1548 man->has_type = false;
1549 }
1550 }
1551
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001552 mutex_lock(&glob->device_list_mutex);
1553 list_del(&bdev->device_list);
1554 mutex_unlock(&glob->device_list_mutex);
1555
Tejun Heof094cfc2010-12-24 15:59:06 +01001556 cancel_delayed_work_sync(&bdev->wq);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001557
1558 while (ttm_bo_delayed_delete(bdev, true))
1559 ;
1560
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001561 spin_lock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001562 if (list_empty(&bdev->ddestroy))
1563 TTM_DEBUG("Delayed destroy list was clean\n");
1564
Christian Königcf6c4672017-01-10 14:08:28 +01001565 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1566 if (list_empty(&bdev->man[0].lru[0]))
1567 TTM_DEBUG("Swap list %d was clean\n", i);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001568 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001569
David Herrmann72525b32013-07-24 21:08:53 +02001570 drm_vma_offset_manager_destroy(&bdev->vma_manager);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001571
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001572 return ret;
1573}
1574EXPORT_SYMBOL(ttm_bo_device_release);
1575
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001576int ttm_bo_device_init(struct ttm_bo_device *bdev,
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001577 struct ttm_bo_global *glob,
1578 struct ttm_bo_driver *driver,
David Herrmann44d847b2013-08-13 19:10:30 +02001579 struct address_space *mapping,
Dave Airlie51c8b402009-08-20 13:38:04 +10001580 uint64_t file_page_offset,
Dave Airliead49f502009-07-10 22:36:26 +10001581 bool need_dma32)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001582{
1583 int ret = -EINVAL;
1584
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001585 bdev->driver = driver;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001586
1587 memset(bdev->man, 0, sizeof(bdev->man));
1588
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001589 /*
1590 * Initialize the system memory buffer type.
1591 * Other types need to be driver / IOCTL initialized.
1592 */
Jerome Glisseca262a9992009-12-08 15:33:32 +01001593 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001594 if (unlikely(ret != 0))
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001595 goto out_no_sys;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001596
David Herrmann72525b32013-07-24 21:08:53 +02001597 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
1598 0x10000000);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001599 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001600 INIT_LIST_HEAD(&bdev->ddestroy);
David Herrmann44d847b2013-08-13 19:10:30 +02001601 bdev->dev_mapping = mapping;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001602 bdev->glob = glob;
Dave Airliead49f502009-07-10 22:36:26 +10001603 bdev->need_dma32 = need_dma32;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001604 mutex_lock(&glob->device_list_mutex);
1605 list_add_tail(&bdev->device_list, &glob->device_list);
1606 mutex_unlock(&glob->device_list_mutex);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001607
1608 return 0;
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001609out_no_sys:
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001610 return ret;
1611}
1612EXPORT_SYMBOL(ttm_bo_device_init);
1613
1614/*
1615 * buffer object vm functions.
1616 */
1617
1618bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1619{
1620 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1621
1622 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1623 if (mem->mem_type == TTM_PL_SYSTEM)
1624 return false;
1625
1626 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1627 return false;
1628
1629 if (mem->placement & TTM_PL_FLAG_CACHED)
1630 return false;
1631 }
1632 return true;
1633}
1634
Thomas Hellstromeba67092010-11-11 09:41:57 +01001635void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001636{
1637 struct ttm_bo_device *bdev = bo->bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001638
David Herrmann51335df2013-07-24 21:10:03 +02001639 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
Thomas Hellstromeba67092010-11-11 09:41:57 +01001640 ttm_mem_io_free_vm(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001641}
Thomas Hellstromeba67092010-11-11 09:41:57 +01001642
1643void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1644{
1645 struct ttm_bo_device *bdev = bo->bdev;
1646 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1647
1648 ttm_mem_io_lock(man, false);
1649 ttm_bo_unmap_virtual_locked(bo);
1650 ttm_mem_io_unlock(man);
1651}
1652
1653
Dave Airliee024e112009-06-24 09:48:08 +10001654EXPORT_SYMBOL(ttm_bo_unmap_virtual);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001655
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001656int ttm_bo_wait(struct ttm_buffer_object *bo,
Christian König8aa6d4f2016-04-06 11:12:04 +02001657 bool interruptible, bool no_wait)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001658{
Christian König98a6dd92016-11-07 16:16:15 -05001659 long timeout = 15 * HZ;
1660
1661 if (no_wait) {
1662 if (reservation_object_test_signaled_rcu(bo->resv, true))
1663 return 0;
1664 else
1665 return -EBUSY;
1666 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001667
Christian Königf849c6d2016-06-15 13:44:02 +02001668 timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1669 interruptible, timeout);
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001670 if (timeout < 0)
1671 return timeout;
1672
1673 if (timeout == 0)
1674 return -EBUSY;
1675
Christian Königf849c6d2016-06-15 13:44:02 +02001676 reservation_object_add_excl_fence(bo->resv, NULL);
Maarten Lankhorstf2c24b82014-04-02 17:14:48 +02001677 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001678}
1679EXPORT_SYMBOL(ttm_bo_wait);
1680
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001681int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1682{
1683 int ret = 0;
1684
1685 /*
Thomas Hellstrom8cfe92d2010-04-28 11:33:25 +02001686 * Using ttm_bo_reserve makes sure the lru lists are updated.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001687 */
1688
Christian Königdfd5e502016-04-06 11:12:03 +02001689 ret = ttm_bo_reserve(bo, true, no_wait, NULL);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001690 if (unlikely(ret != 0))
1691 return ret;
Christian König8aa6d4f2016-04-06 11:12:04 +02001692 ret = ttm_bo_wait(bo, true, no_wait);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001693 if (likely(ret == 0))
1694 atomic_inc(&bo->cpu_writers);
1695 ttm_bo_unreserve(bo);
1696 return ret;
1697}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001698EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001699
1700void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1701{
Maarten Lankhorst654aa7922012-11-06 14:39:43 +01001702 atomic_dec(&bo->cpu_writers);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001703}
Ben Skeggsd1ede142009-12-11 15:13:00 +10001704EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001705
1706/**
1707 * A buffer object shrink method that tries to swap out the first
1708 * buffer object on the bo_global::swap_lru list.
1709 */
1710
1711static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1712{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001713 struct ttm_bo_global *glob =
1714 container_of(shrink, struct ttm_bo_global, shrink);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001715 struct ttm_buffer_object *bo;
1716 int ret = -EBUSY;
Christian Königcf6c4672017-01-10 14:08:28 +01001717 unsigned i;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001718
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001719 spin_lock(&glob->lru_lock);
Christian Königcf6c4672017-01-10 14:08:28 +01001720 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1721 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1722 ret = __ttm_bo_reserve(bo, false, true, NULL);
1723 if (!ret)
1724 break;
1725 }
Maarten Lankhorst2b7b3ad2012-11-28 11:25:42 +00001726 if (!ret)
1727 break;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001728 }
1729
Maarten Lankhorst2b7b3ad2012-11-28 11:25:42 +00001730 if (ret) {
1731 spin_unlock(&glob->lru_lock);
1732 return ret;
1733 }
1734
1735 kref_get(&bo->list_kref);
1736
1737 if (!list_empty(&bo->ddestroy)) {
1738 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1739 kref_put(&bo->list_kref, ttm_bo_release_list);
1740 return ret;
1741 }
1742
Peter Zijlstrabdfafc42016-11-14 17:34:19 +01001743 ttm_bo_del_from_lru(bo);
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001744 spin_unlock(&glob->lru_lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001745
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001746 /**
Christian König61ede072016-06-06 10:17:57 +02001747 * Move to system cached
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001748 */
1749
Michel Dänzer239ac652017-01-25 17:21:31 +09001750 if (bo->mem.mem_type != TTM_PL_SYSTEM ||
1751 bo->ttm->caching_state != tt_cached) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001752 struct ttm_mem_reg evict_mem;
1753
1754 evict_mem = bo->mem;
1755 evict_mem.mm_node = NULL;
1756 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1757 evict_mem.mem_type = TTM_PL_SYSTEM;
1758
1759 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
Maarten Lankhorst97a875c2012-11-28 11:25:44 +00001760 false, false);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001761 if (unlikely(ret != 0))
1762 goto out;
1763 }
1764
Christian König61ede072016-06-06 10:17:57 +02001765 /**
1766 * Make sure BO is idle.
1767 */
1768
1769 ret = ttm_bo_wait(bo, false, false);
1770 if (unlikely(ret != 0))
1771 goto out;
1772
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001773 ttm_bo_unmap_virtual(bo);
1774
1775 /**
1776 * Swap out. Buffer will be swapped in again as soon as
1777 * anyone tries to access a ttm page.
1778 */
1779
Thomas Hellstrom3f09ea42010-01-13 22:28:40 +01001780 if (bo->bdev->driver->swap_notify)
1781 bo->bdev->driver->swap_notify(bo);
1782
Jan Engelhardt5df23972011-04-04 01:25:18 +02001783 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001784out:
1785
1786 /**
1787 *
1788 * Unreserve without putting on LRU to avoid swapping out an
1789 * already swapped buffer.
1790 */
1791
Thomas Hellstromc7523082014-02-20 11:36:25 +01001792 __ttm_bo_unreserve(bo);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001793 kref_put(&bo->list_kref, ttm_bo_release_list);
1794 return ret;
1795}
1796
1797void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1798{
Thomas Hellstroma987fca2009-08-18 16:51:56 +02001799 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001800 ;
1801}
Thomas Hellstrome99e1e72010-01-13 22:28:42 +01001802EXPORT_SYMBOL(ttm_bo_swapout_all);
Thomas Hellstromc58f0092013-11-14 10:49:05 -08001803
1804/**
1805 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1806 * unreserved
1807 *
1808 * @bo: Pointer to buffer
1809 */
1810int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1811{
1812 int ret;
1813
1814 /*
1815 * In the absense of a wait_unlocked API,
1816 * Use the bo::wu_mutex to avoid triggering livelocks due to
1817 * concurrent use of this function. Note that this use of
1818 * bo::wu_mutex can go away if we change locking order to
1819 * mmap_sem -> bo::reserve.
1820 */
1821 ret = mutex_lock_interruptible(&bo->wu_mutex);
1822 if (unlikely(ret != 0))
1823 return -ERESTARTSYS;
1824 if (!ww_mutex_is_locked(&bo->resv->lock))
1825 goto out_unlock;
Christian Königdfd5e502016-04-06 11:12:03 +02001826 ret = __ttm_bo_reserve(bo, true, false, NULL);
Thomas Hellstromc58f0092013-11-14 10:49:05 -08001827 if (unlikely(ret != 0))
1828 goto out_unlock;
Thomas Hellstromc7523082014-02-20 11:36:25 +01001829 __ttm_bo_unreserve(bo);
Thomas Hellstromc58f0092013-11-14 10:49:05 -08001830
1831out_unlock:
1832 mutex_unlock(&bo->wu_mutex);
1833 return ret;
1834}