blob: c68b435d406484596e202b5517f37e6b736f1f20 [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Chris Wilson10195b12018-06-28 14:22:06 +010024
Chris Wilson112ed2d2019-04-24 18:48:39 +010025#include "gt/intel_engine.h"
26
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020027#include "i915_vma.h"
28
29#include "i915_drv.h"
Chris Wilson103b76ee2019-03-05 21:38:30 +000030#include "i915_globals.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020031#include "intel_frontbuffer.h"
32
33#include <drm/drm_gem.h>
34
Chris Wilson13f1bfd2019-02-28 10:20:34 +000035static struct i915_global_vma {
Chris Wilson103b76ee2019-03-05 21:38:30 +000036 struct i915_global base;
Chris Wilson13f1bfd2019-02-28 10:20:34 +000037 struct kmem_cache *slab_vmas;
38} global;
39
40struct i915_vma *i915_vma_alloc(void)
41{
42 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
43}
44
45void i915_vma_free(struct i915_vma *vma)
46{
47 return kmem_cache_free(global.slab_vmas, vma);
48}
49
Chris Wilson1eca65d2018-07-06 07:53:06 +010050#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
Chris Wilson10195b12018-06-28 14:22:06 +010051
52#include <linux/stackdepot.h>
53
54static void vma_print_allocator(struct i915_vma *vma, const char *reason)
55{
56 unsigned long entries[12];
57 struct stack_trace trace = {
58 .entries = entries,
59 .max_entries = ARRAY_SIZE(entries),
60 };
61 char buf[512];
62
63 if (!vma->node.stack) {
64 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
65 vma->node.start, vma->node.size, reason);
66 return;
67 }
68
69 depot_fetch_stack(vma->node.stack, &trace);
70 snprint_stack_trace(buf, sizeof(buf), &trace, 0);
71 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
72 vma->node.start, vma->node.size, reason, buf);
73}
74
75#else
76
77static void vma_print_allocator(struct i915_vma *vma, const char *reason)
78{
79}
80
81#endif
82
Chris Wilson64d6c502019-02-05 13:00:02 +000083static void obj_bump_mru(struct drm_i915_gem_object *obj)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020084{
Chris Wilson64d6c502019-02-05 13:00:02 +000085 struct drm_i915_private *i915 = to_i915(obj->base.dev);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020086
Chris Wilson64d6c502019-02-05 13:00:02 +000087 spin_lock(&i915->mm.obj_lock);
88 if (obj->bind_count)
89 list_move_tail(&obj->mm.link, &i915->mm.bound_list);
90 spin_unlock(&i915->mm.obj_lock);
91
92 obj->mm.dirty = true; /* be paranoid */
93}
94
95static void __i915_vma_retire(struct i915_active *ref)
96{
97 struct i915_vma *vma = container_of(ref, typeof(*vma), active);
98 struct drm_i915_gem_object *obj = vma->obj;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020099
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200100 GEM_BUG_ON(!i915_gem_object_is_active(obj));
101 if (--obj->active_count)
102 return;
103
Chris Wilson1ab22352017-11-07 22:06:56 +0000104 /* Prune the shared fence arrays iff completely idle (inc. external) */
105 if (reservation_object_trylock(obj->resv)) {
106 if (reservation_object_test_signaled_rcu(obj->resv, true))
107 reservation_object_add_excl_fence(obj->resv, NULL);
108 reservation_object_unlock(obj->resv);
109 }
110
Chris Wilson64d6c502019-02-05 13:00:02 +0000111 /*
112 * Bump our place on the bound list to keep it roughly in LRU order
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200113 * so that we don't steal from recently used but inactive objects
114 * (unless we are forced to ofc!)
115 */
Chris Wilson64d6c502019-02-05 13:00:02 +0000116 obj_bump_mru(obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200117
118 if (i915_gem_object_has_active_reference(obj)) {
119 i915_gem_object_clear_active_reference(obj);
120 i915_gem_object_put(obj);
121 }
122}
123
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200124static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000125vma_create(struct drm_i915_gem_object *obj,
126 struct i915_address_space *vm,
127 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200128{
129 struct i915_vma *vma;
130 struct rb_node *rb, **p;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200131
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000132 /* The aliasing_ppgtt should never be used directly! */
Chris Wilson82ad6442018-06-05 16:37:58 +0100133 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000134
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000135 vma = i915_vma_alloc();
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200136 if (vma == NULL)
137 return ERR_PTR(-ENOMEM);
138
Chris Wilson64d6c502019-02-05 13:00:02 +0000139 i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
Chris Wilson21950ee2019-02-05 13:00:05 +0000140 INIT_ACTIVE_REQUEST(&vma->last_fence);
Chris Wilson64d6c502019-02-05 13:00:02 +0000141
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200142 vma->vm = vm;
Chris Wilson93f2cde2018-06-07 16:40:46 +0100143 vma->ops = &vm->vma_ops;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200144 vma->obj = obj;
Chris Wilson95ff7c72017-06-16 15:05:25 +0100145 vma->resv = obj->resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200146 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000147 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200148
Chris Wilson7c518462017-01-23 14:52:45 +0000149 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200150 vma->ggtt_view = *view;
151 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000152 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000153 view->partial.offset,
154 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000155 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000156 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200157 vma->size <<= PAGE_SHIFT;
Chris Wilson7e7367d2018-06-30 10:05:09 +0100158 GEM_BUG_ON(vma->size > obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200159 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000160 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200161 vma->size <<= PAGE_SHIFT;
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300162 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
163 vma->size = intel_remapped_info_size(&view->remapped);
164 vma->size <<= PAGE_SHIFT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200165 }
166 }
167
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000168 if (unlikely(vma->size > vm->total))
169 goto err_vma;
170
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000171 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
172
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200173 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000174 if (unlikely(overflows_type(vma->size, u32)))
175 goto err_vma;
176
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000177 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
178 i915_gem_object_get_tiling(obj),
179 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000180 if (unlikely(vma->fence_size < vma->size || /* overflow */
181 vma->fence_size > vm->total))
182 goto err_vma;
183
Chris Wilsonf51455d2017-01-10 14:47:34 +0000184 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000185
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000186 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
187 i915_gem_object_get_tiling(obj),
188 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000189 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
190
Chris Wilson528cbd12019-01-28 10:23:54 +0000191 vma->flags |= I915_VMA_GGTT;
192 }
193
194 spin_lock(&obj->vma.lock);
195
196 rb = NULL;
197 p = &obj->vma.tree.rb_node;
198 while (*p) {
199 struct i915_vma *pos;
200 long cmp;
201
202 rb = *p;
203 pos = rb_entry(rb, struct i915_vma, obj_node);
204
205 /*
206 * If the view already exists in the tree, another thread
207 * already created a matching vma, so return the older instance
208 * and dispose of ours.
209 */
210 cmp = i915_vma_compare(pos, vm, view);
211 if (cmp == 0) {
212 spin_unlock(&obj->vma.lock);
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000213 i915_vma_free(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000214 return pos;
215 }
216
217 if (cmp < 0)
218 p = &rb->rb_right;
219 else
220 p = &rb->rb_left;
221 }
222 rb_link_node(&vma->obj_node, rb, p);
223 rb_insert_color(&vma->obj_node, &obj->vma.tree);
224
225 if (i915_vma_is_ggtt(vma))
Chris Wilsone2189dd2017-12-07 21:14:07 +0000226 /*
227 * We put the GGTT vma at the start of the vma-list, followed
228 * by the ppGGTT vma. This allows us to break early when
229 * iterating over only the GGTT vma for an object, see
230 * for_each_ggtt_vma()
231 */
Chris Wilson528cbd12019-01-28 10:23:54 +0000232 list_add(&vma->obj_link, &obj->vma.list);
233 else
234 list_add_tail(&vma->obj_link, &obj->vma.list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200235
Chris Wilson528cbd12019-01-28 10:23:54 +0000236 spin_unlock(&obj->vma.lock);
Chris Wilson09d7e462019-01-28 10:23:53 +0000237
238 mutex_lock(&vm->mutex);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000239 list_add(&vma->vm_link, &vm->unbound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000240 mutex_unlock(&vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200241
242 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000243
244err_vma:
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000245 i915_vma_free(vma);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000246 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200247}
248
Chris Wilson481a6f72017-01-16 15:21:31 +0000249static struct i915_vma *
250vma_lookup(struct drm_i915_gem_object *obj,
251 struct i915_address_space *vm,
252 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000253{
254 struct rb_node *rb;
255
Chris Wilson528cbd12019-01-28 10:23:54 +0000256 rb = obj->vma.tree.rb_node;
Chris Wilson718659a2017-01-16 15:21:28 +0000257 while (rb) {
258 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
259 long cmp;
260
261 cmp = i915_vma_compare(vma, vm, view);
262 if (cmp == 0)
263 return vma;
264
265 if (cmp < 0)
266 rb = rb->rb_right;
267 else
268 rb = rb->rb_left;
269 }
270
271 return NULL;
272}
273
274/**
Chris Wilson718659a2017-01-16 15:21:28 +0000275 * i915_vma_instance - return the singleton instance of the VMA
276 * @obj: parent &struct drm_i915_gem_object to be mapped
277 * @vm: address space in which the mapping is located
278 * @view: additional mapping requirements
279 *
280 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
281 * the same @view characteristics. If a match is not found, one is created.
282 * Once created, the VMA is kept until either the object is freed, or the
283 * address space is closed.
284 *
285 * Must be called with struct_mutex held.
286 *
287 * Returns the vma, or an error pointer.
288 */
289struct i915_vma *
290i915_vma_instance(struct drm_i915_gem_object *obj,
291 struct i915_address_space *vm,
292 const struct i915_ggtt_view *view)
293{
294 struct i915_vma *vma;
295
Chris Wilson718659a2017-01-16 15:21:28 +0000296 GEM_BUG_ON(view && !i915_is_ggtt(vm));
297 GEM_BUG_ON(vm->closed);
298
Chris Wilson528cbd12019-01-28 10:23:54 +0000299 spin_lock(&obj->vma.lock);
Chris Wilson481a6f72017-01-16 15:21:31 +0000300 vma = vma_lookup(obj, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000301 spin_unlock(&obj->vma.lock);
302
303 /* vma_create() will resolve the race if another creates the vma */
304 if (unlikely(!vma))
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000305 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000306
Chris Wilson4ea95272017-01-16 15:21:29 +0000307 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson718659a2017-01-16 15:21:28 +0000308 return vma;
309}
310
311/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200312 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
313 * @vma: VMA to map
314 * @cache_level: mapping cache level
315 * @flags: flags like global or local mapping
316 *
317 * DMA addresses are taken from the scatter-gather table of this object (or of
318 * this VMA in case of non-default GGTT views) and PTE entries set up.
319 * Note that DMA addresses are also the only part of the SG table we care about.
320 */
321int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
322 u32 flags)
323{
324 u32 bind_flags;
325 u32 vma_flags;
326 int ret;
327
Chris Wilsonaa149432017-02-25 18:11:21 +0000328 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
329 GEM_BUG_ON(vma->size > vma->node.size);
330
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100331 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
332 vma->node.size,
333 vma->vm->total)))
Chris Wilsonaa149432017-02-25 18:11:21 +0000334 return -ENODEV;
335
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100336 if (GEM_DEBUG_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200337 return -EINVAL;
338
339 bind_flags = 0;
340 if (flags & PIN_GLOBAL)
341 bind_flags |= I915_VMA_GLOBAL_BIND;
342 if (flags & PIN_USER)
343 bind_flags |= I915_VMA_LOCAL_BIND;
344
345 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
346 if (flags & PIN_UPDATE)
347 bind_flags |= vma_flags;
348 else
349 bind_flags &= ~vma_flags;
350 if (bind_flags == 0)
351 return 0;
352
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100353 GEM_BUG_ON(!vma->pages);
354
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800355 trace_i915_vma_bind(vma, bind_flags);
Chris Wilson93f2cde2018-06-07 16:40:46 +0100356 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200357 if (ret)
358 return ret;
359
360 vma->flags |= bind_flags;
361 return 0;
362}
363
364void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
365{
366 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100367 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200368
369 /* Access through the GTT requires the device to be awake. */
Chris Wilson49d73912016-11-29 09:50:08 +0000370 assert_rpm_wakelock_held(vma->vm->i915);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200371
Chris Wilson49d73912016-11-29 09:50:08 +0000372 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100373 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
374 err = -ENODEV;
375 goto err;
376 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200377
378 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
379 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
380
381 ptr = vma->iomap;
382 if (ptr == NULL) {
Matthew Auld73ebd502017-12-11 15:18:20 +0000383 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200384 vma->node.start,
385 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100386 if (ptr == NULL) {
387 err = -ENOMEM;
388 goto err;
389 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200390
391 vma->iomap = ptr;
392 }
393
394 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100395
Chris Wilson3bd40732017-10-09 09:43:56 +0100396 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100397 if (err)
398 goto err_unpin;
399
Chris Wilson7125397b2017-12-06 12:49:14 +0000400 i915_vma_set_ggtt_write(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200401 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100402
403err_unpin:
404 __i915_vma_unpin(vma);
405err:
406 return IO_ERR_PTR(err);
407}
408
Chris Wilson7125397b2017-12-06 12:49:14 +0000409void i915_vma_flush_writes(struct i915_vma *vma)
410{
411 if (!i915_vma_has_ggtt_write(vma))
412 return;
413
414 i915_gem_flush_ggtt_writes(vma->vm->i915);
415
416 i915_vma_unset_ggtt_write(vma);
417}
418
Chris Wilsonb4563f52017-10-09 09:43:55 +0100419void i915_vma_unpin_iomap(struct i915_vma *vma)
420{
Chris Wilson520ea7c2018-06-07 16:40:45 +0100421 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100422
423 GEM_BUG_ON(vma->iomap == NULL);
424
Chris Wilson7125397b2017-12-06 12:49:14 +0000425 i915_vma_flush_writes(vma);
426
Chris Wilsonb4563f52017-10-09 09:43:55 +0100427 i915_vma_unpin_fence(vma);
428 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200429}
430
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100431void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200432{
433 struct i915_vma *vma;
434 struct drm_i915_gem_object *obj;
435
436 vma = fetch_and_zero(p_vma);
437 if (!vma)
438 return;
439
440 obj = vma->obj;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100441 GEM_BUG_ON(!obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200442
443 i915_vma_unpin(vma);
444 i915_vma_close(vma);
445
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100446 if (flags & I915_VMA_RELEASE_MAP)
447 i915_gem_object_unpin_map(obj);
448
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200449 __i915_gem_object_release_unless_active(obj);
450}
451
Chris Wilson782a3e92017-02-13 17:15:46 +0000452bool i915_vma_misplaced(const struct i915_vma *vma,
453 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200454{
455 if (!drm_mm_node_allocated(&vma->node))
456 return false;
457
458 if (vma->node.size < size)
459 return true;
460
Chris Wilsonf51455d2017-01-10 14:47:34 +0000461 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
462 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200463 return true;
464
465 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
466 return true;
467
468 if (flags & PIN_OFFSET_BIAS &&
469 vma->node.start < (flags & PIN_OFFSET_MASK))
470 return true;
471
472 if (flags & PIN_OFFSET_FIXED &&
473 vma->node.start != (flags & PIN_OFFSET_MASK))
474 return true;
475
476 return false;
477}
478
479void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
480{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200481 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200482
Chris Wilson944397f2017-01-09 16:16:11 +0000483 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
484 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200485
486 /*
487 * Explicitly disable for rotated VMA since the display does not
488 * need the fence and the VMA is not accessible to other users.
489 */
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300490 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED ||
491 vma->ggtt_view.type == I915_GGTT_VIEW_REMAPPED)
Chris Wilson944397f2017-01-09 16:16:11 +0000492 return;
493
494 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000495 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000496
497 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
498
499 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200500 vma->flags |= I915_VMA_CAN_FENCE;
501 else
502 vma->flags &= ~I915_VMA_CAN_FENCE;
503}
504
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000505static bool color_differs(struct drm_mm_node *node, unsigned long color)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200506{
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000507 return node->allocated && node->color != color;
508}
509
510bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
511{
512 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200513 struct drm_mm_node *other;
514
515 /*
516 * On some machines we have to be careful when putting differing types
517 * of snoopable memory together to avoid the prefetcher crossing memory
518 * domains and dying. During vm initialisation, we decide whether or not
519 * these constraints apply and set the drm_mm.color_adjust
520 * appropriately.
521 */
522 if (vma->vm->mm.color_adjust == NULL)
523 return true;
524
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000525 /* Only valid to be called on an already inserted vma */
526 GEM_BUG_ON(!drm_mm_node_allocated(node));
527 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200528
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000529 other = list_prev_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100530 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200531 return false;
532
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000533 other = list_next_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100534 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200535 return false;
536
537 return true;
538}
539
Chris Wilson83d317a2018-06-05 10:41:07 +0100540static void assert_bind_count(const struct drm_i915_gem_object *obj)
541{
542 /*
543 * Combine the assertion that the object is bound and that we have
544 * pinned its pages. But we should never have bound the object
545 * more than we have pinned its pages. (For complete accuracy, we
546 * assume that no else is pinning the pages, but as a rough assertion
547 * that we will not run into problems later, this will do!)
548 */
549 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
550}
551
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200552/**
553 * i915_vma_insert - finds a slot for the vma in its address space
554 * @vma: the vma
555 * @size: requested size in bytes (can be larger than the VMA)
556 * @alignment: required alignment
557 * @flags: mask of PIN_* flags to use
558 *
559 * First we try to allocate some free space that meets the requirements for
560 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
561 * preferrably the oldest idle entry to make room for the new VMA.
562 *
563 * Returns:
564 * 0 on success, negative error code otherwise.
565 */
566static int
567i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
568{
Chris Wilson49d73912016-11-29 09:50:08 +0000569 struct drm_i915_private *dev_priv = vma->vm->i915;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100570 unsigned int cache_level;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200571 u64 start, end;
572 int ret;
573
Chris Wilson010e3e62017-12-06 12:49:13 +0000574 GEM_BUG_ON(i915_vma_is_closed(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200575 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
576 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
577
578 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000579 alignment = max(alignment, vma->display_alignment);
580 if (flags & PIN_MAPPABLE) {
581 size = max_t(typeof(size), size, vma->fence_size);
582 alignment = max_t(typeof(alignment),
583 alignment, vma->fence_alignment);
584 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200585
Chris Wilsonf51455d2017-01-10 14:47:34 +0000586 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
587 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
588 GEM_BUG_ON(!is_power_of_2(alignment));
589
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200590 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000591 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200592
593 end = vma->vm->total;
594 if (flags & PIN_MAPPABLE)
595 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
596 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000597 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
598 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200599
600 /* If binding the object/GGTT view requires more space than the entire
601 * aperture has, reject it early before evicting everything in a vain
602 * attempt to find space.
603 */
604 if (size > end) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100605 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
606 size, flags & PIN_MAPPABLE ? "mappable" : "total",
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200607 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100608 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200609 }
610
Chris Wilson520ea7c2018-06-07 16:40:45 +0100611 if (vma->obj) {
612 ret = i915_gem_object_pin_pages(vma->obj);
613 if (ret)
614 return ret;
615
616 cache_level = vma->obj->cache_level;
617 } else {
618 cache_level = 0;
619 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200620
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100621 GEM_BUG_ON(vma->pages);
622
Chris Wilson93f2cde2018-06-07 16:40:46 +0100623 ret = vma->ops->set_pages(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100624 if (ret)
625 goto err_unpin;
626
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200627 if (flags & PIN_OFFSET_FIXED) {
628 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000629 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000630 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200631 ret = -EINVAL;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100632 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200633 }
634
Chris Wilson625d9882017-01-11 11:23:11 +0000635 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
Chris Wilson520ea7c2018-06-07 16:40:45 +0100636 size, offset, cache_level,
Chris Wilson625d9882017-01-11 11:23:11 +0000637 flags);
638 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100639 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200640 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100641 /*
642 * We only support huge gtt pages through the 48b PPGTT,
643 * however we also don't want to force any alignment for
644 * objects which need to be tightly packed into the low 32bits.
645 *
646 * Note that we assume that GGTT are limited to 4GiB for the
647 * forseeable future. See also i915_ggtt_offset().
648 */
649 if (upper_32_bits(end - 1) &&
650 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100651 /*
652 * We can't mix 64K and 4K PTEs in the same page-table
653 * (2M block), and so to avoid the ugliness and
654 * complexity of coloring we opt for just aligning 64K
655 * objects to 2M.
656 */
Matthew Auld74642842017-10-06 23:18:20 +0100657 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100658 rounddown_pow_of_two(vma->page_sizes.sg |
659 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100660
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100661 /*
662 * Check we don't expand for the limited Global GTT
663 * (mappable aperture is even more precious!). This
664 * also checks that we exclude the aliasing-ppgtt.
665 */
666 GEM_BUG_ON(i915_vma_is_ggtt(vma));
667
Matthew Auld74642842017-10-06 23:18:20 +0100668 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100669
670 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
671 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100672 }
673
Chris Wilsone007b192017-01-11 11:23:10 +0000674 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
Chris Wilson520ea7c2018-06-07 16:40:45 +0100675 size, alignment, cache_level,
Chris Wilsone007b192017-01-11 11:23:10 +0000676 start, end, flags);
677 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100678 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200679
680 GEM_BUG_ON(vma->node.start < start);
681 GEM_BUG_ON(vma->node.start + vma->node.size > end);
682 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000683 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Chris Wilson520ea7c2018-06-07 16:40:45 +0100684 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200685
Chris Wilson09d7e462019-01-28 10:23:53 +0000686 mutex_lock(&vma->vm->mutex);
Chris Wilson499197d2019-01-28 10:23:52 +0000687 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000688 mutex_unlock(&vma->vm->mutex);
Chris Wilsonf2123812017-10-16 12:40:37 +0100689
Chris Wilson520ea7c2018-06-07 16:40:45 +0100690 if (vma->obj) {
691 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsonf2123812017-10-16 12:40:37 +0100692
Chris Wilson520ea7c2018-06-07 16:40:45 +0100693 spin_lock(&dev_priv->mm.obj_lock);
694 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
695 obj->bind_count++;
696 spin_unlock(&dev_priv->mm.obj_lock);
697
698 assert_bind_count(obj);
699 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200700
701 return 0;
702
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100703err_clear:
Chris Wilson93f2cde2018-06-07 16:40:46 +0100704 vma->ops->clear_pages(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200705err_unpin:
Chris Wilson520ea7c2018-06-07 16:40:45 +0100706 if (vma->obj)
707 i915_gem_object_unpin_pages(vma->obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200708 return ret;
709}
710
Chris Wilson31c7eff2017-02-27 12:26:54 +0000711static void
712i915_vma_remove(struct i915_vma *vma)
713{
Chris Wilsonf2123812017-10-16 12:40:37 +0100714 struct drm_i915_private *i915 = vma->vm->i915;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000715
716 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
717 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
718
Chris Wilson93f2cde2018-06-07 16:40:46 +0100719 vma->ops->clear_pages(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100720
Chris Wilson09d7e462019-01-28 10:23:53 +0000721 mutex_lock(&vma->vm->mutex);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000722 drm_mm_remove_node(&vma->node);
723 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000724 mutex_unlock(&vma->vm->mutex);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000725
Chris Wilson520ea7c2018-06-07 16:40:45 +0100726 /*
727 * Since the unbound list is global, only move to that list if
Chris Wilson31c7eff2017-02-27 12:26:54 +0000728 * no more VMAs exist.
729 */
Chris Wilson520ea7c2018-06-07 16:40:45 +0100730 if (vma->obj) {
731 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000732
Chris Wilson520ea7c2018-06-07 16:40:45 +0100733 spin_lock(&i915->mm.obj_lock);
734 if (--obj->bind_count == 0)
735 list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
736 spin_unlock(&i915->mm.obj_lock);
737
738 /*
739 * And finally now the object is completely decoupled from this
740 * vma, we can drop its hold on the backing storage and allow
741 * it to be reaped by the shrinker.
742 */
743 i915_gem_object_unpin_pages(obj);
744 assert_bind_count(obj);
745 }
Chris Wilson31c7eff2017-02-27 12:26:54 +0000746}
747
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200748int __i915_vma_do_pin(struct i915_vma *vma,
749 u64 size, u64 alignment, u64 flags)
750{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000751 const unsigned int bound = vma->flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200752 int ret;
753
Chris Wilson49d73912016-11-29 09:50:08 +0000754 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200755 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
756 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
757
758 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
759 ret = -EBUSY;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000760 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200761 }
762
763 if ((bound & I915_VMA_BIND_MASK) == 0) {
764 ret = i915_vma_insert(vma, size, alignment, flags);
765 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000766 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200767 }
Chris Wilsond36caee2017-11-05 12:45:50 +0000768 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200769
Chris Wilson520ea7c2018-06-07 16:40:45 +0100770 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200771 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000772 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200773
Chris Wilsond36caee2017-11-05 12:45:50 +0000774 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
775
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200776 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
777 __i915_vma_set_map_and_fenceable(vma);
778
779 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
780 return 0;
781
Chris Wilson31c7eff2017-02-27 12:26:54 +0000782err_remove:
783 if ((bound & I915_VMA_BIND_MASK) == 0) {
Chris Wilson31c7eff2017-02-27 12:26:54 +0000784 i915_vma_remove(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100785 GEM_BUG_ON(vma->pages);
Chris Wilsond36caee2017-11-05 12:45:50 +0000786 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000787 }
788err_unpin:
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200789 __i915_vma_unpin(vma);
790 return ret;
791}
792
Chris Wilson3365e222018-05-03 20:51:14 +0100793void i915_vma_close(struct i915_vma *vma)
794{
795 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
796
797 GEM_BUG_ON(i915_vma_is_closed(vma));
798 vma->flags |= I915_VMA_CLOSED;
799
800 /*
801 * We defer actually closing, unbinding and destroying the VMA until
802 * the next idle point, or if the object is freed in the meantime. By
803 * postponing the unbind, we allow for it to be resurrected by the
804 * client, avoiding the work required to rebind the VMA. This is
805 * advantageous for DRI, where the client/server pass objects
806 * between themselves, temporarily opening a local VMA to the
807 * object, and then closing it again. The same object is then reused
808 * on the next frame (or two, depending on the depth of the swap queue)
809 * causing us to rebind the VMA once more. This ends up being a lot
810 * of wasted work for the steady state.
811 */
812 list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
813}
814
815void i915_vma_reopen(struct i915_vma *vma)
816{
817 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
818
819 if (vma->flags & I915_VMA_CLOSED) {
820 vma->flags &= ~I915_VMA_CLOSED;
821 list_del(&vma->closed_link);
822 }
823}
824
825static void __i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200826{
827 GEM_BUG_ON(vma->node.allocated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200828 GEM_BUG_ON(vma->fence);
829
Chris Wilson21950ee2019-02-05 13:00:05 +0000830 GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
Chris Wilson7a3bc032017-06-20 13:43:21 +0100831
Chris Wilson09d7e462019-01-28 10:23:53 +0000832 mutex_lock(&vma->vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200833 list_del(&vma->vm_link);
Chris Wilson09d7e462019-01-28 10:23:53 +0000834 mutex_unlock(&vma->vm->mutex);
835
Chris Wilson528cbd12019-01-28 10:23:54 +0000836 if (vma->obj) {
837 struct drm_i915_gem_object *obj = vma->obj;
838
839 spin_lock(&obj->vma.lock);
840 list_del(&vma->obj_link);
841 rb_erase(&vma->obj_node, &vma->obj->vma.tree);
842 spin_unlock(&obj->vma.lock);
843 }
Chris Wilson010e3e62017-12-06 12:49:13 +0000844
Chris Wilson64d6c502019-02-05 13:00:02 +0000845 i915_active_fini(&vma->active);
Chris Wilson5c3f8c22018-07-06 11:39:46 +0100846
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000847 i915_vma_free(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200848}
849
Chris Wilson3365e222018-05-03 20:51:14 +0100850void i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200851{
Chris Wilson3365e222018-05-03 20:51:14 +0100852 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200853
Chris Wilson3365e222018-05-03 20:51:14 +0100854 GEM_BUG_ON(i915_vma_is_active(vma));
855 GEM_BUG_ON(i915_vma_is_pinned(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200856
Chris Wilson3365e222018-05-03 20:51:14 +0100857 if (i915_vma_is_closed(vma))
858 list_del(&vma->closed_link);
859
860 WARN_ON(i915_vma_unbind(vma));
861 __i915_vma_destroy(vma);
862}
863
864void i915_vma_parked(struct drm_i915_private *i915)
865{
866 struct i915_vma *vma, *next;
867
868 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
869 GEM_BUG_ON(!i915_vma_is_closed(vma));
870 i915_vma_destroy(vma);
871 }
872
873 GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200874}
875
876static void __i915_vma_iounmap(struct i915_vma *vma)
877{
878 GEM_BUG_ON(i915_vma_is_pinned(vma));
879
880 if (vma->iomap == NULL)
881 return;
882
883 io_mapping_unmap(vma->iomap);
884 vma->iomap = NULL;
885}
886
Chris Wilsona65adaf2017-10-09 09:43:57 +0100887void i915_vma_revoke_mmap(struct i915_vma *vma)
888{
889 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
890 u64 vma_offset;
891
892 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
893
894 if (!i915_vma_has_userfault(vma))
895 return;
896
897 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
898 GEM_BUG_ON(!vma->obj->userfault_count);
899
900 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
901 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
902 drm_vma_node_offset_addr(node) + vma_offset,
903 vma->size,
904 1);
905
906 i915_vma_unset_userfault(vma);
907 if (!--vma->obj->userfault_count)
908 list_del(&vma->obj->userfault_link);
909}
910
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100911static void export_fence(struct i915_vma *vma,
912 struct i915_request *rq,
913 unsigned int flags)
914{
915 struct reservation_object *resv = vma->resv;
916
917 /*
918 * Ignore errors from failing to allocate the new fence, we can't
919 * handle an error right now. Worst case should be missed
920 * synchronisation leading to rendering corruption.
921 */
922 reservation_object_lock(resv, NULL);
923 if (flags & EXEC_OBJECT_WRITE)
924 reservation_object_add_excl_fence(resv, &rq->fence);
Christian Königca053592018-09-19 16:12:25 +0200925 else if (reservation_object_reserve_shared(resv, 1) == 0)
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100926 reservation_object_add_shared_fence(resv, &rq->fence);
927 reservation_object_unlock(resv);
928}
929
930int i915_vma_move_to_active(struct i915_vma *vma,
931 struct i915_request *rq,
932 unsigned int flags)
933{
934 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100935
936 lockdep_assert_held(&rq->i915->drm.struct_mutex);
937 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
938
939 /*
940 * Add a reference if we're newly entering the active list.
941 * The order in which we add operations to the retirement queue is
942 * vital here: mark_active adds to the start of the callback list,
943 * such that subsequent callbacks are called first. Therefore we
944 * add the active reference first and queue for it to be dropped
945 * *last*.
946 */
Chris Wilson64d6c502019-02-05 13:00:02 +0000947 if (!vma->active.count)
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100948 obj->active_count++;
Chris Wilson64d6c502019-02-05 13:00:02 +0000949
950 if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
951 if (!vma->active.count)
952 obj->active_count--;
953 return -ENOMEM;
954 }
955
Chris Wilson5c3f8c22018-07-06 11:39:46 +0100956 GEM_BUG_ON(!i915_vma_is_active(vma));
957 GEM_BUG_ON(!obj->active_count);
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100958
959 obj->write_domain = 0;
960 if (flags & EXEC_OBJECT_WRITE) {
961 obj->write_domain = I915_GEM_DOMAIN_RENDER;
962
963 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
Chris Wilson21950ee2019-02-05 13:00:05 +0000964 __i915_active_request_set(&obj->frontbuffer_write, rq);
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100965
966 obj->read_domains = 0;
967 }
968 obj->read_domains |= I915_GEM_GPU_DOMAINS;
969
970 if (flags & EXEC_OBJECT_NEEDS_FENCE)
Chris Wilson21950ee2019-02-05 13:00:05 +0000971 __i915_active_request_set(&vma->last_fence, rq);
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100972
973 export_fence(vma, rq, flags);
974 return 0;
975}
976
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200977int i915_vma_unbind(struct i915_vma *vma)
978{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200979 int ret;
980
Chris Wilson520ea7c2018-06-07 16:40:45 +0100981 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200982
Chris Wilson520ea7c2018-06-07 16:40:45 +0100983 /*
984 * First wait upon any activity as retiring the request may
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200985 * have side-effects such as unpinning or even unbinding this vma.
986 */
Chris Wilson7f017b12017-11-09 21:34:50 +0000987 might_sleep();
Chris Wilson5c3f8c22018-07-06 11:39:46 +0100988 if (i915_vma_is_active(vma)) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100989 /*
990 * When a closed VMA is retired, it is unbound - eek.
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200991 * In order to prevent it from being recursively closed,
992 * take a pin on the vma so that the second unbind is
993 * aborted.
994 *
995 * Even more scary is that the retire callback may free
996 * the object (last active vma). To prevent the explosion
997 * we defer the actual object free to a worker that can
998 * only proceed once it acquires the struct_mutex (which
999 * we currently hold, therefore it cannot free this object
1000 * before we are finished).
1001 */
1002 __i915_vma_pin(vma);
1003
Chris Wilson64d6c502019-02-05 13:00:02 +00001004 ret = i915_active_wait(&vma->active);
Chris Wilson8b293eb2018-07-06 13:31:57 +01001005 if (ret)
1006 goto unpin;
1007
Chris Wilson21950ee2019-02-05 13:00:05 +00001008 ret = i915_active_request_retire(&vma->last_fence,
1009 &vma->vm->i915->drm.struct_mutex);
Chris Wilson5c3f8c22018-07-06 11:39:46 +01001010unpin:
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001011 __i915_vma_unpin(vma);
1012 if (ret)
1013 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001014 }
Chris Wilson7a3bc032017-06-20 13:43:21 +01001015 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001016
Chris Wilson10195b12018-06-28 14:22:06 +01001017 if (i915_vma_is_pinned(vma)) {
1018 vma_print_allocator(vma, "is pinned");
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001019 return -EBUSY;
Chris Wilson10195b12018-06-28 14:22:06 +01001020 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001021
1022 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson3365e222018-05-03 20:51:14 +01001023 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001024
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001025 if (i915_vma_is_map_and_fenceable(vma)) {
Chris Wilson7125397b2017-12-06 12:49:14 +00001026 /*
1027 * Check that we have flushed all writes through the GGTT
1028 * before the unbind, other due to non-strict nature of those
1029 * indirect writes they may end up referencing the GGTT PTE
1030 * after the unbind.
1031 */
1032 i915_vma_flush_writes(vma);
1033 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1034
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001035 /* release the fence reg _after_ flushing */
1036 ret = i915_vma_put_fence(vma);
1037 if (ret)
1038 return ret;
1039
1040 /* Force a pagefault for domain tracking on next user access */
Chris Wilsona65adaf2017-10-09 09:43:57 +01001041 i915_vma_revoke_mmap(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001042
1043 __i915_vma_iounmap(vma);
1044 vma->flags &= ~I915_VMA_CAN_FENCE;
1045 }
Chris Wilsona65adaf2017-10-09 09:43:57 +01001046 GEM_BUG_ON(vma->fence);
1047 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001048
1049 if (likely(!vma->vm->closed)) {
1050 trace_i915_vma_unbind(vma);
Chris Wilson93f2cde2018-06-07 16:40:46 +01001051 vma->ops->unbind_vma(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001052 }
1053 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1054
Chris Wilson31c7eff2017-02-27 12:26:54 +00001055 i915_vma_remove(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001056
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001057 return 0;
1058}
1059
Chris Wilsone3c7a1c2017-02-13 17:15:45 +00001060#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1061#include "selftests/i915_vma.c"
1062#endif
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001063
Chris Wilson103b76ee2019-03-05 21:38:30 +00001064static void i915_global_vma_shrink(void)
1065{
1066 kmem_cache_shrink(global.slab_vmas);
1067}
1068
1069static void i915_global_vma_exit(void)
1070{
1071 kmem_cache_destroy(global.slab_vmas);
1072}
1073
1074static struct i915_global_vma global = { {
1075 .shrink = i915_global_vma_shrink,
1076 .exit = i915_global_vma_exit,
1077} };
1078
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001079int __init i915_global_vma_init(void)
1080{
1081 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1082 if (!global.slab_vmas)
1083 return -ENOMEM;
1084
Chris Wilson103b76ee2019-03-05 21:38:30 +00001085 i915_global_register(&global.base);
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001086 return 0;
1087}