blob: 155906e848120ae2e1de533d81658080c546888d [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_vma.h"
26
27#include "i915_drv.h"
28#include "intel_ringbuffer.h"
29#include "intel_frontbuffer.h"
30
31#include <drm/drm_gem.h>
32
33static void
34i915_vma_retire(struct i915_gem_active *active,
35 struct drm_i915_gem_request *rq)
36{
37 const unsigned int idx = rq->engine->id;
38 struct i915_vma *vma =
39 container_of(active, struct i915_vma, last_read[idx]);
40 struct drm_i915_gem_object *obj = vma->obj;
41
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
43
44 i915_vma_clear_active(vma, idx);
45 if (i915_vma_is_active(vma))
46 return;
47
Chris Wilson44a0ec02017-01-19 19:26:58 +000048 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020049 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
50 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
51 WARN_ON(i915_vma_unbind(vma));
52
53 GEM_BUG_ON(!i915_gem_object_is_active(obj));
54 if (--obj->active_count)
55 return;
56
57 /* Bump our place on the bound list to keep it roughly in LRU order
58 * so that we don't steal from recently used but inactive objects
59 * (unless we are forced to ofc!)
60 */
61 if (obj->bind_count)
62 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
63
64 obj->mm.dirty = true; /* be paranoid */
65
66 if (i915_gem_object_has_active_reference(obj)) {
67 i915_gem_object_clear_active_reference(obj);
68 i915_gem_object_put(obj);
69 }
70}
71
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020072static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +000073vma_create(struct drm_i915_gem_object *obj,
74 struct i915_address_space *vm,
75 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020076{
77 struct i915_vma *vma;
78 struct rb_node *rb, **p;
79 int i;
80
Chris Wilson1fcdaa72017-01-19 19:26:56 +000081 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020082 if (vma == NULL)
83 return ERR_PTR(-ENOMEM);
84
85 INIT_LIST_HEAD(&vma->exec_list);
86 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
87 init_request_active(&vma->last_read[i], i915_vma_retire);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020088 init_request_active(&vma->last_fence, NULL);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020089 vma->vm = vm;
90 vma->obj = obj;
91 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +000092 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020093
Chris Wilson7c518462017-01-23 14:52:45 +000094 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020095 vma->ggtt_view = *view;
96 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +000097 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +000098 view->partial.offset,
99 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000100 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000101 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200102 vma->size <<= PAGE_SHIFT;
Chris Wilson07e19ea2016-12-23 14:57:59 +0000103 GEM_BUG_ON(vma->size >= obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200104 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000105 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200106 vma->size <<= PAGE_SHIFT;
107 }
108 }
109
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000110 if (unlikely(vma->size > vm->total))
111 goto err_vma;
112
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000113 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
114
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200115 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000116 if (unlikely(overflows_type(vma->size, u32)))
117 goto err_vma;
118
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000119 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
120 i915_gem_object_get_tiling(obj),
121 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000122 if (unlikely(vma->fence_size < vma->size || /* overflow */
123 vma->fence_size > vm->total))
124 goto err_vma;
125
Chris Wilsonf51455d2017-01-10 14:47:34 +0000126 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000127
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000128 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
129 i915_gem_object_get_tiling(obj),
130 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000131 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
132
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200133 vma->flags |= I915_VMA_GGTT;
134 list_add(&vma->obj_link, &obj->vma_list);
135 } else {
136 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
137 list_add_tail(&vma->obj_link, &obj->vma_list);
138 }
139
140 rb = NULL;
141 p = &obj->vma_tree.rb_node;
142 while (*p) {
143 struct i915_vma *pos;
144
145 rb = *p;
146 pos = rb_entry(rb, struct i915_vma, obj_node);
147 if (i915_vma_compare(pos, vm, view) < 0)
148 p = &rb->rb_right;
149 else
150 p = &rb->rb_left;
151 }
152 rb_link_node(&vma->obj_node, rb, p);
153 rb_insert_color(&vma->obj_node, &obj->vma_tree);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000154 list_add(&vma->vm_link, &vm->unbound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200155
156 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000157
158err_vma:
159 kmem_cache_free(vm->i915->vmas, vma);
160 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200161}
162
Chris Wilson481a6f72017-01-16 15:21:31 +0000163static struct i915_vma *
164vma_lookup(struct drm_i915_gem_object *obj,
165 struct i915_address_space *vm,
166 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000167{
168 struct rb_node *rb;
169
Chris Wilson718659a2017-01-16 15:21:28 +0000170 rb = obj->vma_tree.rb_node;
171 while (rb) {
172 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
173 long cmp;
174
175 cmp = i915_vma_compare(vma, vm, view);
176 if (cmp == 0)
177 return vma;
178
179 if (cmp < 0)
180 rb = rb->rb_right;
181 else
182 rb = rb->rb_left;
183 }
184
185 return NULL;
186}
187
188/**
Chris Wilson718659a2017-01-16 15:21:28 +0000189 * i915_vma_instance - return the singleton instance of the VMA
190 * @obj: parent &struct drm_i915_gem_object to be mapped
191 * @vm: address space in which the mapping is located
192 * @view: additional mapping requirements
193 *
194 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
195 * the same @view characteristics. If a match is not found, one is created.
196 * Once created, the VMA is kept until either the object is freed, or the
197 * address space is closed.
198 *
199 * Must be called with struct_mutex held.
200 *
201 * Returns the vma, or an error pointer.
202 */
203struct i915_vma *
204i915_vma_instance(struct drm_i915_gem_object *obj,
205 struct i915_address_space *vm,
206 const struct i915_ggtt_view *view)
207{
208 struct i915_vma *vma;
209
210 lockdep_assert_held(&obj->base.dev->struct_mutex);
211 GEM_BUG_ON(view && !i915_is_ggtt(vm));
212 GEM_BUG_ON(vm->closed);
213
Chris Wilson481a6f72017-01-16 15:21:31 +0000214 vma = vma_lookup(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000215 if (!vma)
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000216 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000217
218 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
Chris Wilson4ea95272017-01-16 15:21:29 +0000219 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson481a6f72017-01-16 15:21:31 +0000220 GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
Chris Wilson718659a2017-01-16 15:21:28 +0000221 return vma;
222}
223
224/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200225 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
226 * @vma: VMA to map
227 * @cache_level: mapping cache level
228 * @flags: flags like global or local mapping
229 *
230 * DMA addresses are taken from the scatter-gather table of this object (or of
231 * this VMA in case of non-default GGTT views) and PTE entries set up.
232 * Note that DMA addresses are also the only part of the SG table we care about.
233 */
234int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
235 u32 flags)
236{
237 u32 bind_flags;
238 u32 vma_flags;
239 int ret;
240
241 if (WARN_ON(flags == 0))
242 return -EINVAL;
243
244 bind_flags = 0;
245 if (flags & PIN_GLOBAL)
246 bind_flags |= I915_VMA_GLOBAL_BIND;
247 if (flags & PIN_USER)
248 bind_flags |= I915_VMA_LOCAL_BIND;
249
250 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
251 if (flags & PIN_UPDATE)
252 bind_flags |= vma_flags;
253 else
254 bind_flags &= ~vma_flags;
255 if (bind_flags == 0)
256 return 0;
257
Matthew Auld966d5bf2016-12-13 20:32:22 +0000258 if (GEM_WARN_ON(range_overflows(vma->node.start,
259 vma->node.size,
260 vma->vm->total)))
Matthew Auld7a0499a2016-12-13 20:32:20 +0000261 return -ENODEV;
262
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200263 if (vma_flags == 0 && vma->vm->allocate_va_range) {
264 trace_i915_va_alloc(vma);
265 ret = vma->vm->allocate_va_range(vma->vm,
266 vma->node.start,
267 vma->node.size);
268 if (ret)
269 return ret;
270 }
271
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800272 trace_i915_vma_bind(vma, bind_flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200273 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
274 if (ret)
275 return ret;
276
277 vma->flags |= bind_flags;
278 return 0;
279}
280
281void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
282{
283 void __iomem *ptr;
284
285 /* Access through the GTT requires the device to be awake. */
Chris Wilson49d73912016-11-29 09:50:08 +0000286 assert_rpm_wakelock_held(vma->vm->i915);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200287
Chris Wilson49d73912016-11-29 09:50:08 +0000288 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200289 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
290 return IO_ERR_PTR(-ENODEV);
291
292 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
293 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
294
295 ptr = vma->iomap;
296 if (ptr == NULL) {
297 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
298 vma->node.start,
299 vma->node.size);
300 if (ptr == NULL)
301 return IO_ERR_PTR(-ENOMEM);
302
303 vma->iomap = ptr;
304 }
305
306 __i915_vma_pin(vma);
307 return ptr;
308}
309
310void i915_vma_unpin_and_release(struct i915_vma **p_vma)
311{
312 struct i915_vma *vma;
313 struct drm_i915_gem_object *obj;
314
315 vma = fetch_and_zero(p_vma);
316 if (!vma)
317 return;
318
319 obj = vma->obj;
320
321 i915_vma_unpin(vma);
322 i915_vma_close(vma);
323
324 __i915_gem_object_release_unless_active(obj);
325}
326
327bool
328i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
329{
330 if (!drm_mm_node_allocated(&vma->node))
331 return false;
332
333 if (vma->node.size < size)
334 return true;
335
Chris Wilsonf51455d2017-01-10 14:47:34 +0000336 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
337 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200338 return true;
339
340 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
341 return true;
342
343 if (flags & PIN_OFFSET_BIAS &&
344 vma->node.start < (flags & PIN_OFFSET_MASK))
345 return true;
346
347 if (flags & PIN_OFFSET_FIXED &&
348 vma->node.start != (flags & PIN_OFFSET_MASK))
349 return true;
350
351 return false;
352}
353
354void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
355{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200356 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200357
Chris Wilson944397f2017-01-09 16:16:11 +0000358 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
359 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200360
361 /*
362 * Explicitly disable for rotated VMA since the display does not
363 * need the fence and the VMA is not accessible to other users.
364 */
Chris Wilson944397f2017-01-09 16:16:11 +0000365 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
366 return;
367
368 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000369 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000370
371 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
372
373 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200374 vma->flags |= I915_VMA_CAN_FENCE;
375 else
376 vma->flags &= ~I915_VMA_CAN_FENCE;
377}
378
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000379static bool color_differs(struct drm_mm_node *node, unsigned long color)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200380{
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000381 return node->allocated && node->color != color;
382}
383
384bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
385{
386 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200387 struct drm_mm_node *other;
388
389 /*
390 * On some machines we have to be careful when putting differing types
391 * of snoopable memory together to avoid the prefetcher crossing memory
392 * domains and dying. During vm initialisation, we decide whether or not
393 * these constraints apply and set the drm_mm.color_adjust
394 * appropriately.
395 */
396 if (vma->vm->mm.color_adjust == NULL)
397 return true;
398
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000399 /* Only valid to be called on an already inserted vma */
400 GEM_BUG_ON(!drm_mm_node_allocated(node));
401 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200402
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000403 other = list_prev_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100404 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200405 return false;
406
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000407 other = list_next_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100408 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200409 return false;
410
411 return true;
412}
413
414/**
415 * i915_vma_insert - finds a slot for the vma in its address space
416 * @vma: the vma
417 * @size: requested size in bytes (can be larger than the VMA)
418 * @alignment: required alignment
419 * @flags: mask of PIN_* flags to use
420 *
421 * First we try to allocate some free space that meets the requirements for
422 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
423 * preferrably the oldest idle entry to make room for the new VMA.
424 *
425 * Returns:
426 * 0 on success, negative error code otherwise.
427 */
428static int
429i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
430{
Chris Wilson49d73912016-11-29 09:50:08 +0000431 struct drm_i915_private *dev_priv = vma->vm->i915;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200432 struct drm_i915_gem_object *obj = vma->obj;
433 u64 start, end;
434 int ret;
435
436 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
437 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
438
439 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000440 alignment = max(alignment, vma->display_alignment);
441 if (flags & PIN_MAPPABLE) {
442 size = max_t(typeof(size), size, vma->fence_size);
443 alignment = max_t(typeof(alignment),
444 alignment, vma->fence_alignment);
445 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200446
Chris Wilsonf51455d2017-01-10 14:47:34 +0000447 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
448 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
449 GEM_BUG_ON(!is_power_of_2(alignment));
450
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200451 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000452 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200453
454 end = vma->vm->total;
455 if (flags & PIN_MAPPABLE)
456 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
457 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000458 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
459 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200460
461 /* If binding the object/GGTT view requires more space than the entire
462 * aperture has, reject it early before evicting everything in a vain
463 * attempt to find space.
464 */
465 if (size > end) {
466 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
467 size, obj->base.size,
468 flags & PIN_MAPPABLE ? "mappable" : "total",
469 end);
470 return -E2BIG;
471 }
472
473 ret = i915_gem_object_pin_pages(obj);
474 if (ret)
475 return ret;
476
477 if (flags & PIN_OFFSET_FIXED) {
478 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000479 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000480 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200481 ret = -EINVAL;
482 goto err_unpin;
483 }
484
Chris Wilson625d9882017-01-11 11:23:11 +0000485 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
486 size, offset, obj->cache_level,
487 flags);
488 if (ret)
489 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200490 } else {
Chris Wilsone007b192017-01-11 11:23:10 +0000491 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
492 size, alignment, obj->cache_level,
493 start, end, flags);
494 if (ret)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200495 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200496
497 GEM_BUG_ON(vma->node.start < start);
498 GEM_BUG_ON(vma->node.start + vma->node.size > end);
499 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000500 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200501 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
502
503 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
504 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
505 obj->bind_count++;
506 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
507
508 return 0;
509
510err_unpin:
511 i915_gem_object_unpin_pages(obj);
512 return ret;
513}
514
515int __i915_vma_do_pin(struct i915_vma *vma,
516 u64 size, u64 alignment, u64 flags)
517{
518 unsigned int bound = vma->flags;
519 int ret;
520
Chris Wilson49d73912016-11-29 09:50:08 +0000521 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200522 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
523 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
524
525 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
526 ret = -EBUSY;
527 goto err;
528 }
529
530 if ((bound & I915_VMA_BIND_MASK) == 0) {
531 ret = i915_vma_insert(vma, size, alignment, flags);
532 if (ret)
533 goto err;
534 }
535
536 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
537 if (ret)
538 goto err;
539
540 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
541 __i915_vma_set_map_and_fenceable(vma);
542
Chris Wilson03257012017-01-11 21:09:26 +0000543 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200544 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
545 return 0;
546
547err:
548 __i915_vma_unpin(vma);
549 return ret;
550}
551
552void i915_vma_destroy(struct i915_vma *vma)
553{
554 GEM_BUG_ON(vma->node.allocated);
555 GEM_BUG_ON(i915_vma_is_active(vma));
556 GEM_BUG_ON(!i915_vma_is_closed(vma));
557 GEM_BUG_ON(vma->fence);
558
559 list_del(&vma->vm_link);
560 if (!i915_vma_is_ggtt(vma))
561 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
562
563 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
564}
565
566void i915_vma_close(struct i915_vma *vma)
567{
568 GEM_BUG_ON(i915_vma_is_closed(vma));
569 vma->flags |= I915_VMA_CLOSED;
570
571 list_del(&vma->obj_link);
572 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
573
574 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
575 WARN_ON(i915_vma_unbind(vma));
576}
577
578static void __i915_vma_iounmap(struct i915_vma *vma)
579{
580 GEM_BUG_ON(i915_vma_is_pinned(vma));
581
582 if (vma->iomap == NULL)
583 return;
584
585 io_mapping_unmap(vma->iomap);
586 vma->iomap = NULL;
587}
588
589int i915_vma_unbind(struct i915_vma *vma)
590{
591 struct drm_i915_gem_object *obj = vma->obj;
592 unsigned long active;
593 int ret;
594
595 lockdep_assert_held(&obj->base.dev->struct_mutex);
596
597 /* First wait upon any activity as retiring the request may
598 * have side-effects such as unpinning or even unbinding this vma.
599 */
600 active = i915_vma_get_active(vma);
601 if (active) {
602 int idx;
603
604 /* When a closed VMA is retired, it is unbound - eek.
605 * In order to prevent it from being recursively closed,
606 * take a pin on the vma so that the second unbind is
607 * aborted.
608 *
609 * Even more scary is that the retire callback may free
610 * the object (last active vma). To prevent the explosion
611 * we defer the actual object free to a worker that can
612 * only proceed once it acquires the struct_mutex (which
613 * we currently hold, therefore it cannot free this object
614 * before we are finished).
615 */
616 __i915_vma_pin(vma);
617
618 for_each_active(active, idx) {
619 ret = i915_gem_active_retire(&vma->last_read[idx],
Chris Wilson49d73912016-11-29 09:50:08 +0000620 &vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200621 if (ret)
622 break;
623 }
624
625 __i915_vma_unpin(vma);
626 if (ret)
627 return ret;
628
629 GEM_BUG_ON(i915_vma_is_active(vma));
630 }
631
632 if (i915_vma_is_pinned(vma))
633 return -EBUSY;
634
635 if (!drm_mm_node_allocated(&vma->node))
636 goto destroy;
637
638 GEM_BUG_ON(obj->bind_count == 0);
639 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
640
641 if (i915_vma_is_map_and_fenceable(vma)) {
642 /* release the fence reg _after_ flushing */
643 ret = i915_vma_put_fence(vma);
644 if (ret)
645 return ret;
646
647 /* Force a pagefault for domain tracking on next user access */
648 i915_gem_release_mmap(obj);
649
650 __i915_vma_iounmap(vma);
651 vma->flags &= ~I915_VMA_CAN_FENCE;
652 }
653
654 if (likely(!vma->vm->closed)) {
655 trace_i915_vma_unbind(vma);
656 vma->vm->unbind_vma(vma);
657 }
658 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
659
660 drm_mm_remove_node(&vma->node);
661 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
662
663 if (vma->pages != obj->mm.pages) {
664 GEM_BUG_ON(!vma->pages);
665 sg_free_table(vma->pages);
666 kfree(vma->pages);
667 }
668 vma->pages = NULL;
669
670 /* Since the unbound list is global, only move to that list if
671 * no more VMAs exist. */
672 if (--obj->bind_count == 0)
673 list_move_tail(&obj->global_link,
674 &to_i915(obj->base.dev)->mm.unbound_list);
675
676 /* And finally now the object is completely decoupled from this vma,
677 * we can drop its hold on the backing storage and allow it to be
678 * reaped by the shrinker.
679 */
680 i915_gem_object_unpin_pages(obj);
Chris Wilson7a5580a2016-12-31 11:20:09 +0000681 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200682
683destroy:
684 if (unlikely(i915_vma_is_closed(vma)))
685 i915_vma_destroy(vma);
686
687 return 0;
688}
689