blob: 2bad88cc927b489785d323f7206a74e5569eb441 [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_vma.h"
26
27#include "i915_drv.h"
28#include "intel_ringbuffer.h"
29#include "intel_frontbuffer.h"
30
31#include <drm/drm_gem.h>
32
33static void
34i915_vma_retire(struct i915_gem_active *active,
35 struct drm_i915_gem_request *rq)
36{
37 const unsigned int idx = rq->engine->id;
38 struct i915_vma *vma =
39 container_of(active, struct i915_vma, last_read[idx]);
40 struct drm_i915_gem_object *obj = vma->obj;
41
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
43
44 i915_vma_clear_active(vma, idx);
45 if (i915_vma_is_active(vma))
46 return;
47
Chris Wilson44a0ec02017-01-19 19:26:58 +000048 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020049 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
50 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
51 WARN_ON(i915_vma_unbind(vma));
52
53 GEM_BUG_ON(!i915_gem_object_is_active(obj));
54 if (--obj->active_count)
55 return;
56
57 /* Bump our place on the bound list to keep it roughly in LRU order
58 * so that we don't steal from recently used but inactive objects
59 * (unless we are forced to ofc!)
60 */
61 if (obj->bind_count)
62 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
63
64 obj->mm.dirty = true; /* be paranoid */
65
66 if (i915_gem_object_has_active_reference(obj)) {
67 i915_gem_object_clear_active_reference(obj);
68 i915_gem_object_put(obj);
69 }
70}
71
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020072static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +000073vma_create(struct drm_i915_gem_object *obj,
74 struct i915_address_space *vm,
75 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020076{
77 struct i915_vma *vma;
78 struct rb_node *rb, **p;
79 int i;
80
Chris Wilsone1cc3db2017-02-09 11:19:33 +000081 /* The aliasing_ppgtt should never be used directly! */
82 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
83
Chris Wilson1fcdaa72017-01-19 19:26:56 +000084 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020085 if (vma == NULL)
86 return ERR_PTR(-ENOMEM);
87
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020088 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
89 init_request_active(&vma->last_read[i], i915_vma_retire);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020090 init_request_active(&vma->last_fence, NULL);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020091 vma->vm = vm;
92 vma->obj = obj;
Chris Wilson95ff7c72017-06-16 15:05:25 +010093 vma->resv = obj->resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020094 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +000095 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020096
Chris Wilson7c518462017-01-23 14:52:45 +000097 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020098 vma->ggtt_view = *view;
99 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000100 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000101 view->partial.offset,
102 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000103 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000104 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200105 vma->size <<= PAGE_SHIFT;
Chris Wilson07e19ea2016-12-23 14:57:59 +0000106 GEM_BUG_ON(vma->size >= obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200107 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000108 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200109 vma->size <<= PAGE_SHIFT;
110 }
111 }
112
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000113 if (unlikely(vma->size > vm->total))
114 goto err_vma;
115
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000116 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
117
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200118 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000119 if (unlikely(overflows_type(vma->size, u32)))
120 goto err_vma;
121
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000122 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
123 i915_gem_object_get_tiling(obj),
124 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000125 if (unlikely(vma->fence_size < vma->size || /* overflow */
126 vma->fence_size > vm->total))
127 goto err_vma;
128
Chris Wilsonf51455d2017-01-10 14:47:34 +0000129 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000130
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000131 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
132 i915_gem_object_get_tiling(obj),
133 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000134 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
135
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200136 vma->flags |= I915_VMA_GGTT;
137 list_add(&vma->obj_link, &obj->vma_list);
138 } else {
139 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
140 list_add_tail(&vma->obj_link, &obj->vma_list);
141 }
142
143 rb = NULL;
144 p = &obj->vma_tree.rb_node;
145 while (*p) {
146 struct i915_vma *pos;
147
148 rb = *p;
149 pos = rb_entry(rb, struct i915_vma, obj_node);
150 if (i915_vma_compare(pos, vm, view) < 0)
151 p = &rb->rb_right;
152 else
153 p = &rb->rb_left;
154 }
155 rb_link_node(&vma->obj_node, rb, p);
156 rb_insert_color(&vma->obj_node, &obj->vma_tree);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000157 list_add(&vma->vm_link, &vm->unbound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200158
159 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000160
161err_vma:
162 kmem_cache_free(vm->i915->vmas, vma);
163 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200164}
165
Chris Wilson481a6f72017-01-16 15:21:31 +0000166static struct i915_vma *
167vma_lookup(struct drm_i915_gem_object *obj,
168 struct i915_address_space *vm,
169 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000170{
171 struct rb_node *rb;
172
Chris Wilson718659a2017-01-16 15:21:28 +0000173 rb = obj->vma_tree.rb_node;
174 while (rb) {
175 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
176 long cmp;
177
178 cmp = i915_vma_compare(vma, vm, view);
179 if (cmp == 0)
180 return vma;
181
182 if (cmp < 0)
183 rb = rb->rb_right;
184 else
185 rb = rb->rb_left;
186 }
187
188 return NULL;
189}
190
191/**
Chris Wilson718659a2017-01-16 15:21:28 +0000192 * i915_vma_instance - return the singleton instance of the VMA
193 * @obj: parent &struct drm_i915_gem_object to be mapped
194 * @vm: address space in which the mapping is located
195 * @view: additional mapping requirements
196 *
197 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
198 * the same @view characteristics. If a match is not found, one is created.
199 * Once created, the VMA is kept until either the object is freed, or the
200 * address space is closed.
201 *
202 * Must be called with struct_mutex held.
203 *
204 * Returns the vma, or an error pointer.
205 */
206struct i915_vma *
207i915_vma_instance(struct drm_i915_gem_object *obj,
208 struct i915_address_space *vm,
209 const struct i915_ggtt_view *view)
210{
211 struct i915_vma *vma;
212
213 lockdep_assert_held(&obj->base.dev->struct_mutex);
214 GEM_BUG_ON(view && !i915_is_ggtt(vm));
215 GEM_BUG_ON(vm->closed);
216
Chris Wilson481a6f72017-01-16 15:21:31 +0000217 vma = vma_lookup(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000218 if (!vma)
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000219 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000220
221 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
Chris Wilson4ea95272017-01-16 15:21:29 +0000222 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson481a6f72017-01-16 15:21:31 +0000223 GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
Chris Wilson718659a2017-01-16 15:21:28 +0000224 return vma;
225}
226
227/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200228 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
229 * @vma: VMA to map
230 * @cache_level: mapping cache level
231 * @flags: flags like global or local mapping
232 *
233 * DMA addresses are taken from the scatter-gather table of this object (or of
234 * this VMA in case of non-default GGTT views) and PTE entries set up.
235 * Note that DMA addresses are also the only part of the SG table we care about.
236 */
237int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
238 u32 flags)
239{
240 u32 bind_flags;
241 u32 vma_flags;
242 int ret;
243
Chris Wilsonaa149432017-02-25 18:11:21 +0000244 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
245 GEM_BUG_ON(vma->size > vma->node.size);
246
247 if (GEM_WARN_ON(range_overflows(vma->node.start,
248 vma->node.size,
249 vma->vm->total)))
250 return -ENODEV;
251
252 if (GEM_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200253 return -EINVAL;
254
255 bind_flags = 0;
256 if (flags & PIN_GLOBAL)
257 bind_flags |= I915_VMA_GLOBAL_BIND;
258 if (flags & PIN_USER)
259 bind_flags |= I915_VMA_LOCAL_BIND;
260
261 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
262 if (flags & PIN_UPDATE)
263 bind_flags |= vma_flags;
264 else
265 bind_flags &= ~vma_flags;
266 if (bind_flags == 0)
267 return 0;
268
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100269 GEM_BUG_ON(!vma->pages);
270
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800271 trace_i915_vma_bind(vma, bind_flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200272 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
273 if (ret)
274 return ret;
275
276 vma->flags |= bind_flags;
277 return 0;
278}
279
280void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
281{
282 void __iomem *ptr;
283
284 /* Access through the GTT requires the device to be awake. */
Chris Wilson49d73912016-11-29 09:50:08 +0000285 assert_rpm_wakelock_held(vma->vm->i915);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200286
Chris Wilson49d73912016-11-29 09:50:08 +0000287 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200288 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
289 return IO_ERR_PTR(-ENODEV);
290
291 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
292 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
293
294 ptr = vma->iomap;
295 if (ptr == NULL) {
296 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
297 vma->node.start,
298 vma->node.size);
299 if (ptr == NULL)
300 return IO_ERR_PTR(-ENOMEM);
301
302 vma->iomap = ptr;
303 }
304
305 __i915_vma_pin(vma);
306 return ptr;
307}
308
309void i915_vma_unpin_and_release(struct i915_vma **p_vma)
310{
311 struct i915_vma *vma;
312 struct drm_i915_gem_object *obj;
313
314 vma = fetch_and_zero(p_vma);
315 if (!vma)
316 return;
317
318 obj = vma->obj;
319
320 i915_vma_unpin(vma);
321 i915_vma_close(vma);
322
323 __i915_gem_object_release_unless_active(obj);
324}
325
Chris Wilson782a3e92017-02-13 17:15:46 +0000326bool i915_vma_misplaced(const struct i915_vma *vma,
327 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200328{
329 if (!drm_mm_node_allocated(&vma->node))
330 return false;
331
332 if (vma->node.size < size)
333 return true;
334
Chris Wilsonf51455d2017-01-10 14:47:34 +0000335 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
336 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200337 return true;
338
339 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
340 return true;
341
342 if (flags & PIN_OFFSET_BIAS &&
343 vma->node.start < (flags & PIN_OFFSET_MASK))
344 return true;
345
346 if (flags & PIN_OFFSET_FIXED &&
347 vma->node.start != (flags & PIN_OFFSET_MASK))
348 return true;
349
350 return false;
351}
352
353void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
354{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200355 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200356
Chris Wilson944397f2017-01-09 16:16:11 +0000357 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
358 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200359
360 /*
361 * Explicitly disable for rotated VMA since the display does not
362 * need the fence and the VMA is not accessible to other users.
363 */
Chris Wilson944397f2017-01-09 16:16:11 +0000364 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
365 return;
366
367 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000368 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000369
370 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
371
372 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200373 vma->flags |= I915_VMA_CAN_FENCE;
374 else
375 vma->flags &= ~I915_VMA_CAN_FENCE;
376}
377
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000378static bool color_differs(struct drm_mm_node *node, unsigned long color)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200379{
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000380 return node->allocated && node->color != color;
381}
382
383bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
384{
385 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200386 struct drm_mm_node *other;
387
388 /*
389 * On some machines we have to be careful when putting differing types
390 * of snoopable memory together to avoid the prefetcher crossing memory
391 * domains and dying. During vm initialisation, we decide whether or not
392 * these constraints apply and set the drm_mm.color_adjust
393 * appropriately.
394 */
395 if (vma->vm->mm.color_adjust == NULL)
396 return true;
397
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000398 /* Only valid to be called on an already inserted vma */
399 GEM_BUG_ON(!drm_mm_node_allocated(node));
400 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200401
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000402 other = list_prev_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100403 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200404 return false;
405
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000406 other = list_next_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100407 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200408 return false;
409
410 return true;
411}
412
413/**
414 * i915_vma_insert - finds a slot for the vma in its address space
415 * @vma: the vma
416 * @size: requested size in bytes (can be larger than the VMA)
417 * @alignment: required alignment
418 * @flags: mask of PIN_* flags to use
419 *
420 * First we try to allocate some free space that meets the requirements for
421 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
422 * preferrably the oldest idle entry to make room for the new VMA.
423 *
424 * Returns:
425 * 0 on success, negative error code otherwise.
426 */
427static int
428i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
429{
Chris Wilson49d73912016-11-29 09:50:08 +0000430 struct drm_i915_private *dev_priv = vma->vm->i915;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200431 struct drm_i915_gem_object *obj = vma->obj;
432 u64 start, end;
433 int ret;
434
435 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
436 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
437
438 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000439 alignment = max(alignment, vma->display_alignment);
440 if (flags & PIN_MAPPABLE) {
441 size = max_t(typeof(size), size, vma->fence_size);
442 alignment = max_t(typeof(alignment),
443 alignment, vma->fence_alignment);
444 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200445
Chris Wilsonf51455d2017-01-10 14:47:34 +0000446 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
447 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
448 GEM_BUG_ON(!is_power_of_2(alignment));
449
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200450 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000451 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200452
453 end = vma->vm->total;
454 if (flags & PIN_MAPPABLE)
455 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
456 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000457 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
458 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200459
460 /* If binding the object/GGTT view requires more space than the entire
461 * aperture has, reject it early before evicting everything in a vain
462 * attempt to find space.
463 */
464 if (size > end) {
465 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
466 size, obj->base.size,
467 flags & PIN_MAPPABLE ? "mappable" : "total",
468 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100469 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200470 }
471
472 ret = i915_gem_object_pin_pages(obj);
473 if (ret)
474 return ret;
475
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100476 GEM_BUG_ON(vma->pages);
477
478 ret = vma->vm->set_pages(vma);
479 if (ret)
480 goto err_unpin;
481
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200482 if (flags & PIN_OFFSET_FIXED) {
483 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000484 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000485 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200486 ret = -EINVAL;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100487 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200488 }
489
Chris Wilson625d9882017-01-11 11:23:11 +0000490 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
491 size, offset, obj->cache_level,
492 flags);
493 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100494 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200495 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100496 /*
497 * We only support huge gtt pages through the 48b PPGTT,
498 * however we also don't want to force any alignment for
499 * objects which need to be tightly packed into the low 32bits.
500 *
501 * Note that we assume that GGTT are limited to 4GiB for the
502 * forseeable future. See also i915_ggtt_offset().
503 */
504 if (upper_32_bits(end - 1) &&
505 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100506 /*
507 * We can't mix 64K and 4K PTEs in the same page-table
508 * (2M block), and so to avoid the ugliness and
509 * complexity of coloring we opt for just aligning 64K
510 * objects to 2M.
511 */
Matthew Auld74642842017-10-06 23:18:20 +0100512 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100513 rounddown_pow_of_two(vma->page_sizes.sg |
514 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100515
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100516 /*
517 * Check we don't expand for the limited Global GTT
518 * (mappable aperture is even more precious!). This
519 * also checks that we exclude the aliasing-ppgtt.
520 */
521 GEM_BUG_ON(i915_vma_is_ggtt(vma));
522
Matthew Auld74642842017-10-06 23:18:20 +0100523 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100524
525 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
526 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100527 }
528
Chris Wilsone007b192017-01-11 11:23:10 +0000529 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
530 size, alignment, obj->cache_level,
531 start, end, flags);
532 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100533 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200534
535 GEM_BUG_ON(vma->node.start < start);
536 GEM_BUG_ON(vma->node.start + vma->node.size > end);
537 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000538 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200539 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
540
541 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
542 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
543 obj->bind_count++;
544 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
545
546 return 0;
547
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100548err_clear:
549 vma->vm->clear_pages(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200550err_unpin:
551 i915_gem_object_unpin_pages(obj);
552 return ret;
553}
554
Chris Wilson31c7eff2017-02-27 12:26:54 +0000555static void
556i915_vma_remove(struct i915_vma *vma)
557{
558 struct drm_i915_gem_object *obj = vma->obj;
559
560 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
561 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
562
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100563 vma->vm->clear_pages(vma);
564
Chris Wilson31c7eff2017-02-27 12:26:54 +0000565 drm_mm_remove_node(&vma->node);
566 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
567
568 /* Since the unbound list is global, only move to that list if
569 * no more VMAs exist.
570 */
571 if (--obj->bind_count == 0)
572 list_move_tail(&obj->global_link,
573 &to_i915(obj->base.dev)->mm.unbound_list);
574
575 /* And finally now the object is completely decoupled from this vma,
576 * we can drop its hold on the backing storage and allow it to be
577 * reaped by the shrinker.
578 */
579 i915_gem_object_unpin_pages(obj);
580 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
581}
582
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200583int __i915_vma_do_pin(struct i915_vma *vma,
584 u64 size, u64 alignment, u64 flags)
585{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000586 const unsigned int bound = vma->flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200587 int ret;
588
Chris Wilson49d73912016-11-29 09:50:08 +0000589 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200590 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
591 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
592
593 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
594 ret = -EBUSY;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000595 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200596 }
597
598 if ((bound & I915_VMA_BIND_MASK) == 0) {
599 ret = i915_vma_insert(vma, size, alignment, flags);
600 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000601 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200602 }
603
604 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
605 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000606 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200607
608 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
609 __i915_vma_set_map_and_fenceable(vma);
610
Chris Wilson03257012017-01-11 21:09:26 +0000611 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200612 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
613 return 0;
614
Chris Wilson31c7eff2017-02-27 12:26:54 +0000615err_remove:
616 if ((bound & I915_VMA_BIND_MASK) == 0) {
Chris Wilson31c7eff2017-02-27 12:26:54 +0000617 i915_vma_remove(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100618 GEM_BUG_ON(vma->pages);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000619 }
620err_unpin:
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200621 __i915_vma_unpin(vma);
622 return ret;
623}
624
Chris Wilsonb8e5d2e2017-06-16 13:35:08 +0100625static void i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200626{
Chris Wilson7a3bc032017-06-20 13:43:21 +0100627 int i;
628
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200629 GEM_BUG_ON(vma->node.allocated);
630 GEM_BUG_ON(i915_vma_is_active(vma));
631 GEM_BUG_ON(!i915_vma_is_closed(vma));
632 GEM_BUG_ON(vma->fence);
633
Chris Wilson7a3bc032017-06-20 13:43:21 +0100634 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
635 GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
636 GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
637
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200638 list_del(&vma->vm_link);
639 if (!i915_vma_is_ggtt(vma))
640 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
641
642 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
643}
644
645void i915_vma_close(struct i915_vma *vma)
646{
647 GEM_BUG_ON(i915_vma_is_closed(vma));
648 vma->flags |= I915_VMA_CLOSED;
649
650 list_del(&vma->obj_link);
651 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
652
653 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
654 WARN_ON(i915_vma_unbind(vma));
655}
656
657static void __i915_vma_iounmap(struct i915_vma *vma)
658{
659 GEM_BUG_ON(i915_vma_is_pinned(vma));
660
661 if (vma->iomap == NULL)
662 return;
663
664 io_mapping_unmap(vma->iomap);
665 vma->iomap = NULL;
666}
667
668int i915_vma_unbind(struct i915_vma *vma)
669{
670 struct drm_i915_gem_object *obj = vma->obj;
671 unsigned long active;
672 int ret;
673
674 lockdep_assert_held(&obj->base.dev->struct_mutex);
675
676 /* First wait upon any activity as retiring the request may
677 * have side-effects such as unpinning or even unbinding this vma.
678 */
679 active = i915_vma_get_active(vma);
680 if (active) {
681 int idx;
682
683 /* When a closed VMA is retired, it is unbound - eek.
684 * In order to prevent it from being recursively closed,
685 * take a pin on the vma so that the second unbind is
686 * aborted.
687 *
688 * Even more scary is that the retire callback may free
689 * the object (last active vma). To prevent the explosion
690 * we defer the actual object free to a worker that can
691 * only proceed once it acquires the struct_mutex (which
692 * we currently hold, therefore it cannot free this object
693 * before we are finished).
694 */
695 __i915_vma_pin(vma);
696
697 for_each_active(active, idx) {
698 ret = i915_gem_active_retire(&vma->last_read[idx],
Chris Wilson49d73912016-11-29 09:50:08 +0000699 &vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200700 if (ret)
701 break;
702 }
703
Chris Wilson760a8982017-06-20 13:43:19 +0100704 if (!ret) {
705 ret = i915_gem_active_retire(&vma->last_fence,
706 &vma->vm->i915->drm.struct_mutex);
707 }
708
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200709 __i915_vma_unpin(vma);
710 if (ret)
711 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200712 }
Chris Wilson7a3bc032017-06-20 13:43:21 +0100713 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200714
715 if (i915_vma_is_pinned(vma))
716 return -EBUSY;
717
718 if (!drm_mm_node_allocated(&vma->node))
719 goto destroy;
720
721 GEM_BUG_ON(obj->bind_count == 0);
722 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
723
724 if (i915_vma_is_map_and_fenceable(vma)) {
725 /* release the fence reg _after_ flushing */
726 ret = i915_vma_put_fence(vma);
727 if (ret)
728 return ret;
729
730 /* Force a pagefault for domain tracking on next user access */
731 i915_gem_release_mmap(obj);
732
733 __i915_vma_iounmap(vma);
734 vma->flags &= ~I915_VMA_CAN_FENCE;
735 }
736
737 if (likely(!vma->vm->closed)) {
738 trace_i915_vma_unbind(vma);
739 vma->vm->unbind_vma(vma);
740 }
741 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
742
Chris Wilson31c7eff2017-02-27 12:26:54 +0000743 i915_vma_remove(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200744
745destroy:
746 if (unlikely(i915_vma_is_closed(vma)))
747 i915_vma_destroy(vma);
748
749 return 0;
750}
751
Chris Wilsone3c7a1c2017-02-13 17:15:45 +0000752#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
753#include "selftests/i915_vma.c"
754#endif