blob: 7cdf34800549c46b5f9c4255e08e9ac41b3b9b01 [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_vma.h"
26
27#include "i915_drv.h"
28#include "intel_ringbuffer.h"
29#include "intel_frontbuffer.h"
30
31#include <drm/drm_gem.h>
32
33static void
34i915_vma_retire(struct i915_gem_active *active,
35 struct drm_i915_gem_request *rq)
36{
37 const unsigned int idx = rq->engine->id;
38 struct i915_vma *vma =
39 container_of(active, struct i915_vma, last_read[idx]);
40 struct drm_i915_gem_object *obj = vma->obj;
41
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
43
44 i915_vma_clear_active(vma, idx);
45 if (i915_vma_is_active(vma))
46 return;
47
Chris Wilson44a0ec02017-01-19 19:26:58 +000048 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020049 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
50 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
51 WARN_ON(i915_vma_unbind(vma));
52
53 GEM_BUG_ON(!i915_gem_object_is_active(obj));
54 if (--obj->active_count)
55 return;
56
57 /* Bump our place on the bound list to keep it roughly in LRU order
58 * so that we don't steal from recently used but inactive objects
59 * (unless we are forced to ofc!)
60 */
Chris Wilsonf2123812017-10-16 12:40:37 +010061 spin_lock(&rq->i915->mm.obj_lock);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020062 if (obj->bind_count)
Chris Wilsonf2123812017-10-16 12:40:37 +010063 list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
64 spin_unlock(&rq->i915->mm.obj_lock);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020065
66 obj->mm.dirty = true; /* be paranoid */
67
68 if (i915_gem_object_has_active_reference(obj)) {
69 i915_gem_object_clear_active_reference(obj);
70 i915_gem_object_put(obj);
71 }
72}
73
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020074static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +000075vma_create(struct drm_i915_gem_object *obj,
76 struct i915_address_space *vm,
77 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020078{
79 struct i915_vma *vma;
80 struct rb_node *rb, **p;
81 int i;
82
Chris Wilsone1cc3db2017-02-09 11:19:33 +000083 /* The aliasing_ppgtt should never be used directly! */
84 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
85
Chris Wilson1fcdaa72017-01-19 19:26:56 +000086 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020087 if (vma == NULL)
88 return ERR_PTR(-ENOMEM);
89
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020090 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
91 init_request_active(&vma->last_read[i], i915_vma_retire);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020092 init_request_active(&vma->last_fence, NULL);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020093 vma->vm = vm;
94 vma->obj = obj;
Chris Wilson95ff7c72017-06-16 15:05:25 +010095 vma->resv = obj->resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020096 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +000097 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020098
Chris Wilson7c518462017-01-23 14:52:45 +000099 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200100 vma->ggtt_view = *view;
101 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000102 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000103 view->partial.offset,
104 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000105 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000106 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200107 vma->size <<= PAGE_SHIFT;
Chris Wilson07e19ea2016-12-23 14:57:59 +0000108 GEM_BUG_ON(vma->size >= obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200109 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000110 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200111 vma->size <<= PAGE_SHIFT;
112 }
113 }
114
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000115 if (unlikely(vma->size > vm->total))
116 goto err_vma;
117
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000118 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
119
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200120 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000121 if (unlikely(overflows_type(vma->size, u32)))
122 goto err_vma;
123
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000124 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
125 i915_gem_object_get_tiling(obj),
126 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000127 if (unlikely(vma->fence_size < vma->size || /* overflow */
128 vma->fence_size > vm->total))
129 goto err_vma;
130
Chris Wilsonf51455d2017-01-10 14:47:34 +0000131 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000132
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000133 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
134 i915_gem_object_get_tiling(obj),
135 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000136 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
137
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200138 vma->flags |= I915_VMA_GGTT;
139 list_add(&vma->obj_link, &obj->vma_list);
140 } else {
141 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
142 list_add_tail(&vma->obj_link, &obj->vma_list);
143 }
144
145 rb = NULL;
146 p = &obj->vma_tree.rb_node;
147 while (*p) {
148 struct i915_vma *pos;
149
150 rb = *p;
151 pos = rb_entry(rb, struct i915_vma, obj_node);
152 if (i915_vma_compare(pos, vm, view) < 0)
153 p = &rb->rb_right;
154 else
155 p = &rb->rb_left;
156 }
157 rb_link_node(&vma->obj_node, rb, p);
158 rb_insert_color(&vma->obj_node, &obj->vma_tree);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000159 list_add(&vma->vm_link, &vm->unbound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200160
161 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000162
163err_vma:
164 kmem_cache_free(vm->i915->vmas, vma);
165 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200166}
167
Chris Wilson481a6f72017-01-16 15:21:31 +0000168static struct i915_vma *
169vma_lookup(struct drm_i915_gem_object *obj,
170 struct i915_address_space *vm,
171 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000172{
173 struct rb_node *rb;
174
Chris Wilson718659a2017-01-16 15:21:28 +0000175 rb = obj->vma_tree.rb_node;
176 while (rb) {
177 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
178 long cmp;
179
180 cmp = i915_vma_compare(vma, vm, view);
181 if (cmp == 0)
182 return vma;
183
184 if (cmp < 0)
185 rb = rb->rb_right;
186 else
187 rb = rb->rb_left;
188 }
189
190 return NULL;
191}
192
193/**
Chris Wilson718659a2017-01-16 15:21:28 +0000194 * i915_vma_instance - return the singleton instance of the VMA
195 * @obj: parent &struct drm_i915_gem_object to be mapped
196 * @vm: address space in which the mapping is located
197 * @view: additional mapping requirements
198 *
199 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
200 * the same @view characteristics. If a match is not found, one is created.
201 * Once created, the VMA is kept until either the object is freed, or the
202 * address space is closed.
203 *
204 * Must be called with struct_mutex held.
205 *
206 * Returns the vma, or an error pointer.
207 */
208struct i915_vma *
209i915_vma_instance(struct drm_i915_gem_object *obj,
210 struct i915_address_space *vm,
211 const struct i915_ggtt_view *view)
212{
213 struct i915_vma *vma;
214
215 lockdep_assert_held(&obj->base.dev->struct_mutex);
216 GEM_BUG_ON(view && !i915_is_ggtt(vm));
217 GEM_BUG_ON(vm->closed);
218
Chris Wilson481a6f72017-01-16 15:21:31 +0000219 vma = vma_lookup(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000220 if (!vma)
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000221 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000222
223 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
Chris Wilson4ea95272017-01-16 15:21:29 +0000224 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson481a6f72017-01-16 15:21:31 +0000225 GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
Chris Wilson718659a2017-01-16 15:21:28 +0000226 return vma;
227}
228
229/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200230 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
231 * @vma: VMA to map
232 * @cache_level: mapping cache level
233 * @flags: flags like global or local mapping
234 *
235 * DMA addresses are taken from the scatter-gather table of this object (or of
236 * this VMA in case of non-default GGTT views) and PTE entries set up.
237 * Note that DMA addresses are also the only part of the SG table we care about.
238 */
239int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
240 u32 flags)
241{
242 u32 bind_flags;
243 u32 vma_flags;
244 int ret;
245
Chris Wilsonaa149432017-02-25 18:11:21 +0000246 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
247 GEM_BUG_ON(vma->size > vma->node.size);
248
249 if (GEM_WARN_ON(range_overflows(vma->node.start,
250 vma->node.size,
251 vma->vm->total)))
252 return -ENODEV;
253
254 if (GEM_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200255 return -EINVAL;
256
257 bind_flags = 0;
258 if (flags & PIN_GLOBAL)
259 bind_flags |= I915_VMA_GLOBAL_BIND;
260 if (flags & PIN_USER)
261 bind_flags |= I915_VMA_LOCAL_BIND;
262
263 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
264 if (flags & PIN_UPDATE)
265 bind_flags |= vma_flags;
266 else
267 bind_flags &= ~vma_flags;
268 if (bind_flags == 0)
269 return 0;
270
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100271 GEM_BUG_ON(!vma->pages);
272
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800273 trace_i915_vma_bind(vma, bind_flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200274 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
275 if (ret)
276 return ret;
277
278 vma->flags |= bind_flags;
279 return 0;
280}
281
282void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
283{
284 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100285 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200286
287 /* Access through the GTT requires the device to be awake. */
Chris Wilson49d73912016-11-29 09:50:08 +0000288 assert_rpm_wakelock_held(vma->vm->i915);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200289
Chris Wilson49d73912016-11-29 09:50:08 +0000290 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100291 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
292 err = -ENODEV;
293 goto err;
294 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200295
296 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
297 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
298
299 ptr = vma->iomap;
300 if (ptr == NULL) {
301 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
302 vma->node.start,
303 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100304 if (ptr == NULL) {
305 err = -ENOMEM;
306 goto err;
307 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200308
309 vma->iomap = ptr;
310 }
311
312 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100313
Chris Wilson3bd40732017-10-09 09:43:56 +0100314 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100315 if (err)
316 goto err_unpin;
317
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200318 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100319
320err_unpin:
321 __i915_vma_unpin(vma);
322err:
323 return IO_ERR_PTR(err);
324}
325
326void i915_vma_unpin_iomap(struct i915_vma *vma)
327{
328 lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
329
330 GEM_BUG_ON(vma->iomap == NULL);
331
332 i915_vma_unpin_fence(vma);
333 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200334}
335
336void i915_vma_unpin_and_release(struct i915_vma **p_vma)
337{
338 struct i915_vma *vma;
339 struct drm_i915_gem_object *obj;
340
341 vma = fetch_and_zero(p_vma);
342 if (!vma)
343 return;
344
345 obj = vma->obj;
346
347 i915_vma_unpin(vma);
348 i915_vma_close(vma);
349
350 __i915_gem_object_release_unless_active(obj);
351}
352
Chris Wilson782a3e92017-02-13 17:15:46 +0000353bool i915_vma_misplaced(const struct i915_vma *vma,
354 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200355{
356 if (!drm_mm_node_allocated(&vma->node))
357 return false;
358
359 if (vma->node.size < size)
360 return true;
361
Chris Wilsonf51455d2017-01-10 14:47:34 +0000362 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
363 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200364 return true;
365
366 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
367 return true;
368
369 if (flags & PIN_OFFSET_BIAS &&
370 vma->node.start < (flags & PIN_OFFSET_MASK))
371 return true;
372
373 if (flags & PIN_OFFSET_FIXED &&
374 vma->node.start != (flags & PIN_OFFSET_MASK))
375 return true;
376
377 return false;
378}
379
380void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
381{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200382 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200383
Chris Wilson944397f2017-01-09 16:16:11 +0000384 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
385 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200386
387 /*
388 * Explicitly disable for rotated VMA since the display does not
389 * need the fence and the VMA is not accessible to other users.
390 */
Chris Wilson944397f2017-01-09 16:16:11 +0000391 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
392 return;
393
394 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000395 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000396
397 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
398
399 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200400 vma->flags |= I915_VMA_CAN_FENCE;
401 else
402 vma->flags &= ~I915_VMA_CAN_FENCE;
403}
404
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000405static bool color_differs(struct drm_mm_node *node, unsigned long color)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200406{
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000407 return node->allocated && node->color != color;
408}
409
410bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
411{
412 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200413 struct drm_mm_node *other;
414
415 /*
416 * On some machines we have to be careful when putting differing types
417 * of snoopable memory together to avoid the prefetcher crossing memory
418 * domains and dying. During vm initialisation, we decide whether or not
419 * these constraints apply and set the drm_mm.color_adjust
420 * appropriately.
421 */
422 if (vma->vm->mm.color_adjust == NULL)
423 return true;
424
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000425 /* Only valid to be called on an already inserted vma */
426 GEM_BUG_ON(!drm_mm_node_allocated(node));
427 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200428
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000429 other = list_prev_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100430 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200431 return false;
432
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000433 other = list_next_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100434 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200435 return false;
436
437 return true;
438}
439
440/**
441 * i915_vma_insert - finds a slot for the vma in its address space
442 * @vma: the vma
443 * @size: requested size in bytes (can be larger than the VMA)
444 * @alignment: required alignment
445 * @flags: mask of PIN_* flags to use
446 *
447 * First we try to allocate some free space that meets the requirements for
448 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
449 * preferrably the oldest idle entry to make room for the new VMA.
450 *
451 * Returns:
452 * 0 on success, negative error code otherwise.
453 */
454static int
455i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
456{
Chris Wilson49d73912016-11-29 09:50:08 +0000457 struct drm_i915_private *dev_priv = vma->vm->i915;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200458 struct drm_i915_gem_object *obj = vma->obj;
459 u64 start, end;
460 int ret;
461
462 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
463 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
464
465 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000466 alignment = max(alignment, vma->display_alignment);
467 if (flags & PIN_MAPPABLE) {
468 size = max_t(typeof(size), size, vma->fence_size);
469 alignment = max_t(typeof(alignment),
470 alignment, vma->fence_alignment);
471 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200472
Chris Wilsonf51455d2017-01-10 14:47:34 +0000473 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
474 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
475 GEM_BUG_ON(!is_power_of_2(alignment));
476
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200477 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000478 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200479
480 end = vma->vm->total;
481 if (flags & PIN_MAPPABLE)
482 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
483 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000484 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
485 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200486
487 /* If binding the object/GGTT view requires more space than the entire
488 * aperture has, reject it early before evicting everything in a vain
489 * attempt to find space.
490 */
491 if (size > end) {
492 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
493 size, obj->base.size,
494 flags & PIN_MAPPABLE ? "mappable" : "total",
495 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100496 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200497 }
498
499 ret = i915_gem_object_pin_pages(obj);
500 if (ret)
501 return ret;
502
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100503 GEM_BUG_ON(vma->pages);
504
505 ret = vma->vm->set_pages(vma);
506 if (ret)
507 goto err_unpin;
508
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200509 if (flags & PIN_OFFSET_FIXED) {
510 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000511 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000512 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200513 ret = -EINVAL;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100514 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200515 }
516
Chris Wilson625d9882017-01-11 11:23:11 +0000517 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
518 size, offset, obj->cache_level,
519 flags);
520 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100521 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200522 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100523 /*
524 * We only support huge gtt pages through the 48b PPGTT,
525 * however we also don't want to force any alignment for
526 * objects which need to be tightly packed into the low 32bits.
527 *
528 * Note that we assume that GGTT are limited to 4GiB for the
529 * forseeable future. See also i915_ggtt_offset().
530 */
531 if (upper_32_bits(end - 1) &&
532 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100533 /*
534 * We can't mix 64K and 4K PTEs in the same page-table
535 * (2M block), and so to avoid the ugliness and
536 * complexity of coloring we opt for just aligning 64K
537 * objects to 2M.
538 */
Matthew Auld74642842017-10-06 23:18:20 +0100539 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100540 rounddown_pow_of_two(vma->page_sizes.sg |
541 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100542
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100543 /*
544 * Check we don't expand for the limited Global GTT
545 * (mappable aperture is even more precious!). This
546 * also checks that we exclude the aliasing-ppgtt.
547 */
548 GEM_BUG_ON(i915_vma_is_ggtt(vma));
549
Matthew Auld74642842017-10-06 23:18:20 +0100550 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100551
552 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
553 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100554 }
555
Chris Wilsone007b192017-01-11 11:23:10 +0000556 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
557 size, alignment, obj->cache_level,
558 start, end, flags);
559 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100560 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200561
562 GEM_BUG_ON(vma->node.start < start);
563 GEM_BUG_ON(vma->node.start + vma->node.size > end);
564 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000565 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200566 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
567
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200568 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
Chris Wilsonf2123812017-10-16 12:40:37 +0100569
570 spin_lock(&dev_priv->mm.obj_lock);
571 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200572 obj->bind_count++;
Chris Wilsonf2123812017-10-16 12:40:37 +0100573 spin_unlock(&dev_priv->mm.obj_lock);
574
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200575 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
576
577 return 0;
578
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100579err_clear:
580 vma->vm->clear_pages(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200581err_unpin:
582 i915_gem_object_unpin_pages(obj);
583 return ret;
584}
585
Chris Wilson31c7eff2017-02-27 12:26:54 +0000586static void
587i915_vma_remove(struct i915_vma *vma)
588{
Chris Wilsonf2123812017-10-16 12:40:37 +0100589 struct drm_i915_private *i915 = vma->vm->i915;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000590 struct drm_i915_gem_object *obj = vma->obj;
591
592 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
593 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
594
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100595 vma->vm->clear_pages(vma);
596
Chris Wilson31c7eff2017-02-27 12:26:54 +0000597 drm_mm_remove_node(&vma->node);
598 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
599
600 /* Since the unbound list is global, only move to that list if
601 * no more VMAs exist.
602 */
Chris Wilsonf2123812017-10-16 12:40:37 +0100603 spin_lock(&i915->mm.obj_lock);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000604 if (--obj->bind_count == 0)
Chris Wilsonf2123812017-10-16 12:40:37 +0100605 list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
606 spin_unlock(&i915->mm.obj_lock);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000607
608 /* And finally now the object is completely decoupled from this vma,
609 * we can drop its hold on the backing storage and allow it to be
610 * reaped by the shrinker.
611 */
612 i915_gem_object_unpin_pages(obj);
613 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
614}
615
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200616int __i915_vma_do_pin(struct i915_vma *vma,
617 u64 size, u64 alignment, u64 flags)
618{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000619 const unsigned int bound = vma->flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200620 int ret;
621
Chris Wilson49d73912016-11-29 09:50:08 +0000622 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200623 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
624 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
625
626 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
627 ret = -EBUSY;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000628 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200629 }
630
631 if ((bound & I915_VMA_BIND_MASK) == 0) {
632 ret = i915_vma_insert(vma, size, alignment, flags);
633 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000634 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200635 }
Chris Wilsond36caee2017-11-05 12:45:50 +0000636 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200637
638 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
639 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000640 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200641
Chris Wilsond36caee2017-11-05 12:45:50 +0000642 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
643
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200644 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
645 __i915_vma_set_map_and_fenceable(vma);
646
647 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
648 return 0;
649
Chris Wilson31c7eff2017-02-27 12:26:54 +0000650err_remove:
651 if ((bound & I915_VMA_BIND_MASK) == 0) {
Chris Wilson31c7eff2017-02-27 12:26:54 +0000652 i915_vma_remove(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100653 GEM_BUG_ON(vma->pages);
Chris Wilsond36caee2017-11-05 12:45:50 +0000654 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000655 }
656err_unpin:
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200657 __i915_vma_unpin(vma);
658 return ret;
659}
660
Chris Wilsonb8e5d2e2017-06-16 13:35:08 +0100661static void i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200662{
Chris Wilson7a3bc032017-06-20 13:43:21 +0100663 int i;
664
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200665 GEM_BUG_ON(vma->node.allocated);
666 GEM_BUG_ON(i915_vma_is_active(vma));
667 GEM_BUG_ON(!i915_vma_is_closed(vma));
668 GEM_BUG_ON(vma->fence);
669
Chris Wilson7a3bc032017-06-20 13:43:21 +0100670 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
671 GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
672 GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
673
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200674 list_del(&vma->vm_link);
675 if (!i915_vma_is_ggtt(vma))
676 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
677
678 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
679}
680
681void i915_vma_close(struct i915_vma *vma)
682{
683 GEM_BUG_ON(i915_vma_is_closed(vma));
684 vma->flags |= I915_VMA_CLOSED;
685
686 list_del(&vma->obj_link);
687 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
688
689 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
690 WARN_ON(i915_vma_unbind(vma));
691}
692
693static void __i915_vma_iounmap(struct i915_vma *vma)
694{
695 GEM_BUG_ON(i915_vma_is_pinned(vma));
696
697 if (vma->iomap == NULL)
698 return;
699
700 io_mapping_unmap(vma->iomap);
701 vma->iomap = NULL;
702}
703
Chris Wilsona65adaf2017-10-09 09:43:57 +0100704void i915_vma_revoke_mmap(struct i915_vma *vma)
705{
706 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
707 u64 vma_offset;
708
709 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
710
711 if (!i915_vma_has_userfault(vma))
712 return;
713
714 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
715 GEM_BUG_ON(!vma->obj->userfault_count);
716
717 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
718 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
719 drm_vma_node_offset_addr(node) + vma_offset,
720 vma->size,
721 1);
722
723 i915_vma_unset_userfault(vma);
724 if (!--vma->obj->userfault_count)
725 list_del(&vma->obj->userfault_link);
726}
727
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200728int i915_vma_unbind(struct i915_vma *vma)
729{
730 struct drm_i915_gem_object *obj = vma->obj;
731 unsigned long active;
732 int ret;
733
734 lockdep_assert_held(&obj->base.dev->struct_mutex);
735
736 /* First wait upon any activity as retiring the request may
737 * have side-effects such as unpinning or even unbinding this vma.
738 */
739 active = i915_vma_get_active(vma);
740 if (active) {
741 int idx;
742
743 /* When a closed VMA is retired, it is unbound - eek.
744 * In order to prevent it from being recursively closed,
745 * take a pin on the vma so that the second unbind is
746 * aborted.
747 *
748 * Even more scary is that the retire callback may free
749 * the object (last active vma). To prevent the explosion
750 * we defer the actual object free to a worker that can
751 * only proceed once it acquires the struct_mutex (which
752 * we currently hold, therefore it cannot free this object
753 * before we are finished).
754 */
755 __i915_vma_pin(vma);
756
757 for_each_active(active, idx) {
758 ret = i915_gem_active_retire(&vma->last_read[idx],
Chris Wilson49d73912016-11-29 09:50:08 +0000759 &vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200760 if (ret)
761 break;
762 }
763
Chris Wilson760a8982017-06-20 13:43:19 +0100764 if (!ret) {
765 ret = i915_gem_active_retire(&vma->last_fence,
766 &vma->vm->i915->drm.struct_mutex);
767 }
768
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200769 __i915_vma_unpin(vma);
770 if (ret)
771 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200772 }
Chris Wilson7a3bc032017-06-20 13:43:21 +0100773 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200774
775 if (i915_vma_is_pinned(vma))
776 return -EBUSY;
777
778 if (!drm_mm_node_allocated(&vma->node))
779 goto destroy;
780
781 GEM_BUG_ON(obj->bind_count == 0);
782 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
783
784 if (i915_vma_is_map_and_fenceable(vma)) {
785 /* release the fence reg _after_ flushing */
786 ret = i915_vma_put_fence(vma);
787 if (ret)
788 return ret;
789
790 /* Force a pagefault for domain tracking on next user access */
Chris Wilsona65adaf2017-10-09 09:43:57 +0100791 i915_vma_revoke_mmap(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200792
793 __i915_vma_iounmap(vma);
794 vma->flags &= ~I915_VMA_CAN_FENCE;
795 }
Chris Wilsona65adaf2017-10-09 09:43:57 +0100796 GEM_BUG_ON(vma->fence);
797 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200798
799 if (likely(!vma->vm->closed)) {
800 trace_i915_vma_unbind(vma);
801 vma->vm->unbind_vma(vma);
802 }
803 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
804
Chris Wilson31c7eff2017-02-27 12:26:54 +0000805 i915_vma_remove(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200806
807destroy:
808 if (unlikely(i915_vma_is_closed(vma)))
809 i915_vma_destroy(vma);
810
811 return 0;
812}
813
Chris Wilsone3c7a1c2017-02-13 17:15:45 +0000814#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
815#include "selftests/i915_vma.c"
816#endif