blob: 9e6a473233625653ef4f5adf2b4c97e4ea675c1c [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_vma.h"
26
27#include "i915_drv.h"
28#include "intel_ringbuffer.h"
29#include "intel_frontbuffer.h"
30
31#include <drm/drm_gem.h>
32
33static void
34i915_vma_retire(struct i915_gem_active *active,
35 struct drm_i915_gem_request *rq)
36{
37 const unsigned int idx = rq->engine->id;
38 struct i915_vma *vma =
39 container_of(active, struct i915_vma, last_read[idx]);
40 struct drm_i915_gem_object *obj = vma->obj;
41
42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
43
44 i915_vma_clear_active(vma, idx);
45 if (i915_vma_is_active(vma))
46 return;
47
Chris Wilson44a0ec02017-01-19 19:26:58 +000048 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020049 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
50 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
51 WARN_ON(i915_vma_unbind(vma));
52
53 GEM_BUG_ON(!i915_gem_object_is_active(obj));
54 if (--obj->active_count)
55 return;
56
57 /* Bump our place on the bound list to keep it roughly in LRU order
58 * so that we don't steal from recently used but inactive objects
59 * (unless we are forced to ofc!)
60 */
61 if (obj->bind_count)
62 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
63
64 obj->mm.dirty = true; /* be paranoid */
65
66 if (i915_gem_object_has_active_reference(obj)) {
67 i915_gem_object_clear_active_reference(obj);
68 i915_gem_object_put(obj);
69 }
70}
71
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020072static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +000073vma_create(struct drm_i915_gem_object *obj,
74 struct i915_address_space *vm,
75 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020076{
77 struct i915_vma *vma;
78 struct rb_node *rb, **p;
79 int i;
80
Chris Wilsone1cc3db2017-02-09 11:19:33 +000081 /* The aliasing_ppgtt should never be used directly! */
82 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
83
Chris Wilson1fcdaa72017-01-19 19:26:56 +000084 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020085 if (vma == NULL)
86 return ERR_PTR(-ENOMEM);
87
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020088 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
89 init_request_active(&vma->last_read[i], i915_vma_retire);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020090 init_request_active(&vma->last_fence, NULL);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020091 vma->vm = vm;
92 vma->obj = obj;
93 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +000094 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020095
Chris Wilson7c518462017-01-23 14:52:45 +000096 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020097 vma->ggtt_view = *view;
98 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +000099 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000100 view->partial.offset,
101 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000102 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000103 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200104 vma->size <<= PAGE_SHIFT;
Chris Wilson07e19ea2016-12-23 14:57:59 +0000105 GEM_BUG_ON(vma->size >= obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200106 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000107 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200108 vma->size <<= PAGE_SHIFT;
109 }
110 }
111
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000112 if (unlikely(vma->size > vm->total))
113 goto err_vma;
114
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000115 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
116
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200117 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000118 if (unlikely(overflows_type(vma->size, u32)))
119 goto err_vma;
120
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000121 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
122 i915_gem_object_get_tiling(obj),
123 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000124 if (unlikely(vma->fence_size < vma->size || /* overflow */
125 vma->fence_size > vm->total))
126 goto err_vma;
127
Chris Wilsonf51455d2017-01-10 14:47:34 +0000128 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000129
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000130 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
131 i915_gem_object_get_tiling(obj),
132 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000133 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
134
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200135 vma->flags |= I915_VMA_GGTT;
136 list_add(&vma->obj_link, &obj->vma_list);
137 } else {
138 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
139 list_add_tail(&vma->obj_link, &obj->vma_list);
140 }
141
142 rb = NULL;
143 p = &obj->vma_tree.rb_node;
144 while (*p) {
145 struct i915_vma *pos;
146
147 rb = *p;
148 pos = rb_entry(rb, struct i915_vma, obj_node);
149 if (i915_vma_compare(pos, vm, view) < 0)
150 p = &rb->rb_right;
151 else
152 p = &rb->rb_left;
153 }
154 rb_link_node(&vma->obj_node, rb, p);
155 rb_insert_color(&vma->obj_node, &obj->vma_tree);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000156 list_add(&vma->vm_link, &vm->unbound_list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200157
158 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000159
160err_vma:
161 kmem_cache_free(vm->i915->vmas, vma);
162 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200163}
164
Chris Wilson481a6f72017-01-16 15:21:31 +0000165static struct i915_vma *
166vma_lookup(struct drm_i915_gem_object *obj,
167 struct i915_address_space *vm,
168 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000169{
170 struct rb_node *rb;
171
Chris Wilson718659a2017-01-16 15:21:28 +0000172 rb = obj->vma_tree.rb_node;
173 while (rb) {
174 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
175 long cmp;
176
177 cmp = i915_vma_compare(vma, vm, view);
178 if (cmp == 0)
179 return vma;
180
181 if (cmp < 0)
182 rb = rb->rb_right;
183 else
184 rb = rb->rb_left;
185 }
186
187 return NULL;
188}
189
190/**
Chris Wilson718659a2017-01-16 15:21:28 +0000191 * i915_vma_instance - return the singleton instance of the VMA
192 * @obj: parent &struct drm_i915_gem_object to be mapped
193 * @vm: address space in which the mapping is located
194 * @view: additional mapping requirements
195 *
196 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
197 * the same @view characteristics. If a match is not found, one is created.
198 * Once created, the VMA is kept until either the object is freed, or the
199 * address space is closed.
200 *
201 * Must be called with struct_mutex held.
202 *
203 * Returns the vma, or an error pointer.
204 */
205struct i915_vma *
206i915_vma_instance(struct drm_i915_gem_object *obj,
207 struct i915_address_space *vm,
208 const struct i915_ggtt_view *view)
209{
210 struct i915_vma *vma;
211
212 lockdep_assert_held(&obj->base.dev->struct_mutex);
213 GEM_BUG_ON(view && !i915_is_ggtt(vm));
214 GEM_BUG_ON(vm->closed);
215
Chris Wilson481a6f72017-01-16 15:21:31 +0000216 vma = vma_lookup(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000217 if (!vma)
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000218 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000219
220 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
Chris Wilson4ea95272017-01-16 15:21:29 +0000221 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson481a6f72017-01-16 15:21:31 +0000222 GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
Chris Wilson718659a2017-01-16 15:21:28 +0000223 return vma;
224}
225
226/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200227 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
228 * @vma: VMA to map
229 * @cache_level: mapping cache level
230 * @flags: flags like global or local mapping
231 *
232 * DMA addresses are taken from the scatter-gather table of this object (or of
233 * this VMA in case of non-default GGTT views) and PTE entries set up.
234 * Note that DMA addresses are also the only part of the SG table we care about.
235 */
236int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
237 u32 flags)
238{
239 u32 bind_flags;
240 u32 vma_flags;
241 int ret;
242
Chris Wilsonaa149432017-02-25 18:11:21 +0000243 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
244 GEM_BUG_ON(vma->size > vma->node.size);
245
246 if (GEM_WARN_ON(range_overflows(vma->node.start,
247 vma->node.size,
248 vma->vm->total)))
249 return -ENODEV;
250
251 if (GEM_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200252 return -EINVAL;
253
254 bind_flags = 0;
255 if (flags & PIN_GLOBAL)
256 bind_flags |= I915_VMA_GLOBAL_BIND;
257 if (flags & PIN_USER)
258 bind_flags |= I915_VMA_LOCAL_BIND;
259
260 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
261 if (flags & PIN_UPDATE)
262 bind_flags |= vma_flags;
263 else
264 bind_flags &= ~vma_flags;
265 if (bind_flags == 0)
266 return 0;
267
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800268 trace_i915_vma_bind(vma, bind_flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200269 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
270 if (ret)
271 return ret;
272
273 vma->flags |= bind_flags;
274 return 0;
275}
276
277void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
278{
279 void __iomem *ptr;
280
281 /* Access through the GTT requires the device to be awake. */
Chris Wilson49d73912016-11-29 09:50:08 +0000282 assert_rpm_wakelock_held(vma->vm->i915);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200283
Chris Wilson49d73912016-11-29 09:50:08 +0000284 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200285 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
286 return IO_ERR_PTR(-ENODEV);
287
288 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
289 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
290
291 ptr = vma->iomap;
292 if (ptr == NULL) {
293 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
294 vma->node.start,
295 vma->node.size);
296 if (ptr == NULL)
297 return IO_ERR_PTR(-ENOMEM);
298
299 vma->iomap = ptr;
300 }
301
302 __i915_vma_pin(vma);
303 return ptr;
304}
305
306void i915_vma_unpin_and_release(struct i915_vma **p_vma)
307{
308 struct i915_vma *vma;
309 struct drm_i915_gem_object *obj;
310
311 vma = fetch_and_zero(p_vma);
312 if (!vma)
313 return;
314
315 obj = vma->obj;
316
317 i915_vma_unpin(vma);
318 i915_vma_close(vma);
319
320 __i915_gem_object_release_unless_active(obj);
321}
322
Chris Wilson782a3e92017-02-13 17:15:46 +0000323bool i915_vma_misplaced(const struct i915_vma *vma,
324 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200325{
326 if (!drm_mm_node_allocated(&vma->node))
327 return false;
328
329 if (vma->node.size < size)
330 return true;
331
Chris Wilsonf51455d2017-01-10 14:47:34 +0000332 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
333 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200334 return true;
335
336 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
337 return true;
338
339 if (flags & PIN_OFFSET_BIAS &&
340 vma->node.start < (flags & PIN_OFFSET_MASK))
341 return true;
342
343 if (flags & PIN_OFFSET_FIXED &&
344 vma->node.start != (flags & PIN_OFFSET_MASK))
345 return true;
346
347 return false;
348}
349
350void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
351{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200352 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200353
Chris Wilson944397f2017-01-09 16:16:11 +0000354 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
355 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200356
357 /*
358 * Explicitly disable for rotated VMA since the display does not
359 * need the fence and the VMA is not accessible to other users.
360 */
Chris Wilson944397f2017-01-09 16:16:11 +0000361 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
362 return;
363
364 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000365 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000366
367 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
368
369 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200370 vma->flags |= I915_VMA_CAN_FENCE;
371 else
372 vma->flags &= ~I915_VMA_CAN_FENCE;
373}
374
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000375static bool color_differs(struct drm_mm_node *node, unsigned long color)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200376{
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000377 return node->allocated && node->color != color;
378}
379
380bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
381{
382 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200383 struct drm_mm_node *other;
384
385 /*
386 * On some machines we have to be careful when putting differing types
387 * of snoopable memory together to avoid the prefetcher crossing memory
388 * domains and dying. During vm initialisation, we decide whether or not
389 * these constraints apply and set the drm_mm.color_adjust
390 * appropriately.
391 */
392 if (vma->vm->mm.color_adjust == NULL)
393 return true;
394
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000395 /* Only valid to be called on an already inserted vma */
396 GEM_BUG_ON(!drm_mm_node_allocated(node));
397 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200398
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000399 other = list_prev_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100400 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200401 return false;
402
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000403 other = list_next_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100404 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200405 return false;
406
407 return true;
408}
409
410/**
411 * i915_vma_insert - finds a slot for the vma in its address space
412 * @vma: the vma
413 * @size: requested size in bytes (can be larger than the VMA)
414 * @alignment: required alignment
415 * @flags: mask of PIN_* flags to use
416 *
417 * First we try to allocate some free space that meets the requirements for
418 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
419 * preferrably the oldest idle entry to make room for the new VMA.
420 *
421 * Returns:
422 * 0 on success, negative error code otherwise.
423 */
424static int
425i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
426{
Chris Wilson49d73912016-11-29 09:50:08 +0000427 struct drm_i915_private *dev_priv = vma->vm->i915;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200428 struct drm_i915_gem_object *obj = vma->obj;
429 u64 start, end;
430 int ret;
431
432 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
433 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
434
435 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000436 alignment = max(alignment, vma->display_alignment);
437 if (flags & PIN_MAPPABLE) {
438 size = max_t(typeof(size), size, vma->fence_size);
439 alignment = max_t(typeof(alignment),
440 alignment, vma->fence_alignment);
441 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200442
Chris Wilsonf51455d2017-01-10 14:47:34 +0000443 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
444 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
445 GEM_BUG_ON(!is_power_of_2(alignment));
446
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200447 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000448 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200449
450 end = vma->vm->total;
451 if (flags & PIN_MAPPABLE)
452 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
453 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000454 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
455 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200456
457 /* If binding the object/GGTT view requires more space than the entire
458 * aperture has, reject it early before evicting everything in a vain
459 * attempt to find space.
460 */
461 if (size > end) {
462 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
463 size, obj->base.size,
464 flags & PIN_MAPPABLE ? "mappable" : "total",
465 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100466 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200467 }
468
469 ret = i915_gem_object_pin_pages(obj);
470 if (ret)
471 return ret;
472
473 if (flags & PIN_OFFSET_FIXED) {
474 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000475 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000476 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200477 ret = -EINVAL;
478 goto err_unpin;
479 }
480
Chris Wilson625d9882017-01-11 11:23:11 +0000481 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
482 size, offset, obj->cache_level,
483 flags);
484 if (ret)
485 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200486 } else {
Chris Wilsone007b192017-01-11 11:23:10 +0000487 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
488 size, alignment, obj->cache_level,
489 start, end, flags);
490 if (ret)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200491 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200492
493 GEM_BUG_ON(vma->node.start < start);
494 GEM_BUG_ON(vma->node.start + vma->node.size > end);
495 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000496 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200497 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
498
499 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
500 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
501 obj->bind_count++;
502 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
503
504 return 0;
505
506err_unpin:
507 i915_gem_object_unpin_pages(obj);
508 return ret;
509}
510
Chris Wilson31c7eff2017-02-27 12:26:54 +0000511static void
512i915_vma_remove(struct i915_vma *vma)
513{
514 struct drm_i915_gem_object *obj = vma->obj;
515
516 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
517 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
518
519 drm_mm_remove_node(&vma->node);
520 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
521
522 /* Since the unbound list is global, only move to that list if
523 * no more VMAs exist.
524 */
525 if (--obj->bind_count == 0)
526 list_move_tail(&obj->global_link,
527 &to_i915(obj->base.dev)->mm.unbound_list);
528
529 /* And finally now the object is completely decoupled from this vma,
530 * we can drop its hold on the backing storage and allow it to be
531 * reaped by the shrinker.
532 */
533 i915_gem_object_unpin_pages(obj);
534 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
535}
536
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200537int __i915_vma_do_pin(struct i915_vma *vma,
538 u64 size, u64 alignment, u64 flags)
539{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000540 const unsigned int bound = vma->flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200541 int ret;
542
Chris Wilson49d73912016-11-29 09:50:08 +0000543 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200544 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
545 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
546
547 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
548 ret = -EBUSY;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000549 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200550 }
551
552 if ((bound & I915_VMA_BIND_MASK) == 0) {
553 ret = i915_vma_insert(vma, size, alignment, flags);
554 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000555 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200556 }
557
558 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
559 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000560 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200561
562 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
563 __i915_vma_set_map_and_fenceable(vma);
564
Chris Wilson03257012017-01-11 21:09:26 +0000565 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200566 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
567 return 0;
568
Chris Wilson31c7eff2017-02-27 12:26:54 +0000569err_remove:
570 if ((bound & I915_VMA_BIND_MASK) == 0) {
571 GEM_BUG_ON(vma->pages);
572 i915_vma_remove(vma);
573 }
574err_unpin:
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200575 __i915_vma_unpin(vma);
576 return ret;
577}
578
Chris Wilsonb8e5d2e2017-06-16 13:35:08 +0100579static void i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200580{
581 GEM_BUG_ON(vma->node.allocated);
582 GEM_BUG_ON(i915_vma_is_active(vma));
583 GEM_BUG_ON(!i915_vma_is_closed(vma));
584 GEM_BUG_ON(vma->fence);
585
586 list_del(&vma->vm_link);
587 if (!i915_vma_is_ggtt(vma))
588 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
589
590 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
591}
592
Chris Wilson4ff4b442017-06-16 15:05:16 +0100593void i915_vma_unlink_ctx(struct i915_vma *vma)
594{
595 struct i915_gem_context *ctx = vma->ctx;
596
597 if (ctx->vma_lut.ht_size & I915_CTX_RESIZE_IN_PROGRESS) {
598 cancel_work_sync(&ctx->vma_lut.resize);
599 ctx->vma_lut.ht_size &= ~I915_CTX_RESIZE_IN_PROGRESS;
600 }
601
602 __hlist_del(&vma->ctx_node);
603 ctx->vma_lut.ht_count--;
604
605 if (i915_vma_is_ggtt(vma))
606 vma->obj->vma_hashed = NULL;
607 vma->ctx = NULL;
608}
609
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200610void i915_vma_close(struct i915_vma *vma)
611{
612 GEM_BUG_ON(i915_vma_is_closed(vma));
613 vma->flags |= I915_VMA_CLOSED;
614
Chris Wilson4ff4b442017-06-16 15:05:16 +0100615 if (vma->ctx)
616 i915_vma_unlink_ctx(vma);
617
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200618 list_del(&vma->obj_link);
619 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
620
621 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
622 WARN_ON(i915_vma_unbind(vma));
623}
624
625static void __i915_vma_iounmap(struct i915_vma *vma)
626{
627 GEM_BUG_ON(i915_vma_is_pinned(vma));
628
629 if (vma->iomap == NULL)
630 return;
631
632 io_mapping_unmap(vma->iomap);
633 vma->iomap = NULL;
634}
635
636int i915_vma_unbind(struct i915_vma *vma)
637{
638 struct drm_i915_gem_object *obj = vma->obj;
639 unsigned long active;
640 int ret;
641
642 lockdep_assert_held(&obj->base.dev->struct_mutex);
643
644 /* First wait upon any activity as retiring the request may
645 * have side-effects such as unpinning or even unbinding this vma.
646 */
647 active = i915_vma_get_active(vma);
648 if (active) {
649 int idx;
650
651 /* When a closed VMA is retired, it is unbound - eek.
652 * In order to prevent it from being recursively closed,
653 * take a pin on the vma so that the second unbind is
654 * aborted.
655 *
656 * Even more scary is that the retire callback may free
657 * the object (last active vma). To prevent the explosion
658 * we defer the actual object free to a worker that can
659 * only proceed once it acquires the struct_mutex (which
660 * we currently hold, therefore it cannot free this object
661 * before we are finished).
662 */
663 __i915_vma_pin(vma);
664
665 for_each_active(active, idx) {
666 ret = i915_gem_active_retire(&vma->last_read[idx],
Chris Wilson49d73912016-11-29 09:50:08 +0000667 &vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200668 if (ret)
669 break;
670 }
671
672 __i915_vma_unpin(vma);
673 if (ret)
674 return ret;
675
676 GEM_BUG_ON(i915_vma_is_active(vma));
677 }
678
679 if (i915_vma_is_pinned(vma))
680 return -EBUSY;
681
682 if (!drm_mm_node_allocated(&vma->node))
683 goto destroy;
684
685 GEM_BUG_ON(obj->bind_count == 0);
686 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
687
688 if (i915_vma_is_map_and_fenceable(vma)) {
689 /* release the fence reg _after_ flushing */
690 ret = i915_vma_put_fence(vma);
691 if (ret)
692 return ret;
693
694 /* Force a pagefault for domain tracking on next user access */
695 i915_gem_release_mmap(obj);
696
697 __i915_vma_iounmap(vma);
698 vma->flags &= ~I915_VMA_CAN_FENCE;
699 }
700
701 if (likely(!vma->vm->closed)) {
702 trace_i915_vma_unbind(vma);
703 vma->vm->unbind_vma(vma);
704 }
705 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
706
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200707 if (vma->pages != obj->mm.pages) {
708 GEM_BUG_ON(!vma->pages);
709 sg_free_table(vma->pages);
710 kfree(vma->pages);
711 }
712 vma->pages = NULL;
713
Chris Wilson31c7eff2017-02-27 12:26:54 +0000714 i915_vma_remove(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200715
716destroy:
717 if (unlikely(i915_vma_is_closed(vma)))
718 i915_vma_destroy(vma);
719
720 return 0;
721}
722
Chris Wilsone3c7a1c2017-02-13 17:15:45 +0000723#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
724#include "selftests/i915_vma.c"
725#endif