blob: 7734d6218ce79379beba646dd4b9b20d683a3003 [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Chris Wilson10195b12018-06-28 14:22:06 +010024
Chris Wilson09480072019-07-03 10:17:19 +010025#include <linux/sched/mm.h>
Jani Nikuladf0566a2019-06-13 11:44:16 +030026#include <drm/drm_gem.h>
Chris Wilson112ed2d2019-04-24 18:48:39 +010027
Jani Nikuladf0566a2019-06-13 11:44:16 +030028#include "display/intel_frontbuffer.h"
29
30#include "gt/intel_engine.h"
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +010031#include "gt/intel_gt.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020032
33#include "i915_drv.h"
Chris Wilson103b76ee2019-03-05 21:38:30 +000034#include "i915_globals.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030035#include "i915_vma.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020036
Chris Wilson13f1bfd2019-02-28 10:20:34 +000037static struct i915_global_vma {
Chris Wilson103b76ee2019-03-05 21:38:30 +000038 struct i915_global base;
Chris Wilson13f1bfd2019-02-28 10:20:34 +000039 struct kmem_cache *slab_vmas;
40} global;
41
42struct i915_vma *i915_vma_alloc(void)
43{
44 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
45}
46
47void i915_vma_free(struct i915_vma *vma)
48{
49 return kmem_cache_free(global.slab_vmas, vma);
50}
51
Chris Wilson1eca65d2018-07-06 07:53:06 +010052#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
Chris Wilson10195b12018-06-28 14:22:06 +010053
54#include <linux/stackdepot.h>
55
56static void vma_print_allocator(struct i915_vma *vma, const char *reason)
57{
Thomas Gleixner487f3c72019-04-25 11:45:09 +020058 unsigned long *entries;
59 unsigned int nr_entries;
Chris Wilson10195b12018-06-28 14:22:06 +010060 char buf[512];
61
62 if (!vma->node.stack) {
63 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
64 vma->node.start, vma->node.size, reason);
65 return;
66 }
67
Thomas Gleixner487f3c72019-04-25 11:45:09 +020068 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
69 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
Chris Wilson10195b12018-06-28 14:22:06 +010070 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
71 vma->node.start, vma->node.size, reason, buf);
72}
73
74#else
75
76static void vma_print_allocator(struct i915_vma *vma, const char *reason)
77{
78}
79
80#endif
81
Chris Wilson12c255b2019-06-21 19:38:00 +010082static inline struct i915_vma *active_to_vma(struct i915_active *ref)
83{
84 return container_of(ref, typeof(struct i915_vma), active);
85}
86
87static int __i915_vma_active(struct i915_active *ref)
88{
89 i915_vma_get(active_to_vma(ref));
90 return 0;
91}
92
Chris Wilson64d6c502019-02-05 13:00:02 +000093static void __i915_vma_retire(struct i915_active *ref)
94{
Chris Wilson12c255b2019-06-21 19:38:00 +010095 i915_vma_put(active_to_vma(ref));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020096}
97
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020098static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +000099vma_create(struct drm_i915_gem_object *obj,
100 struct i915_address_space *vm,
101 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200102{
103 struct i915_vma *vma;
104 struct rb_node *rb, **p;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200105
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000106 /* The aliasing_ppgtt should never be used directly! */
Chris Wilsonc082afa2019-07-30 15:32:08 +0100107 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000108
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000109 vma = i915_vma_alloc();
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200110 if (vma == NULL)
111 return ERR_PTR(-ENOMEM);
112
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200113 vma->vm = vm;
Chris Wilson93f2cde2018-06-07 16:40:46 +0100114 vma->ops = &vm->vma_ops;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200115 vma->obj = obj;
Chris Wilsonef78f7b2019-06-18 13:58:58 +0100116 vma->resv = obj->base.resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200117 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000118 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200119
Chris Wilson12c255b2019-06-21 19:38:00 +0100120 i915_active_init(vm->i915, &vma->active,
121 __i915_vma_active, __i915_vma_retire);
Chris Wilson155ab882019-06-06 12:23:20 +0100122 INIT_ACTIVE_REQUEST(&vma->last_fence);
123
Chris Wilson09480072019-07-03 10:17:19 +0100124 /* Declare ourselves safe for use inside shrinkers */
125 if (IS_ENABLED(CONFIG_LOCKDEP)) {
126 fs_reclaim_acquire(GFP_KERNEL);
127 might_lock(&vma->active.mutex);
128 fs_reclaim_release(GFP_KERNEL);
129 }
130
Chris Wilson155ab882019-06-06 12:23:20 +0100131 INIT_LIST_HEAD(&vma->closed_link);
132
Chris Wilson7c518462017-01-23 14:52:45 +0000133 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200134 vma->ggtt_view = *view;
135 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000136 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000137 view->partial.offset,
138 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000139 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000140 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200141 vma->size <<= PAGE_SHIFT;
Chris Wilson7e7367d2018-06-30 10:05:09 +0100142 GEM_BUG_ON(vma->size > obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200143 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000144 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200145 vma->size <<= PAGE_SHIFT;
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300146 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
147 vma->size = intel_remapped_info_size(&view->remapped);
148 vma->size <<= PAGE_SHIFT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200149 }
150 }
151
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000152 if (unlikely(vma->size > vm->total))
153 goto err_vma;
154
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000155 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
156
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200157 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000158 if (unlikely(overflows_type(vma->size, u32)))
159 goto err_vma;
160
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000161 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
162 i915_gem_object_get_tiling(obj),
163 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000164 if (unlikely(vma->fence_size < vma->size || /* overflow */
165 vma->fence_size > vm->total))
166 goto err_vma;
167
Chris Wilsonf51455d2017-01-10 14:47:34 +0000168 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000169
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000170 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
171 i915_gem_object_get_tiling(obj),
172 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000173 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
174
Chris Wilson528cbd12019-01-28 10:23:54 +0000175 vma->flags |= I915_VMA_GGTT;
176 }
177
178 spin_lock(&obj->vma.lock);
179
180 rb = NULL;
181 p = &obj->vma.tree.rb_node;
182 while (*p) {
183 struct i915_vma *pos;
184 long cmp;
185
186 rb = *p;
187 pos = rb_entry(rb, struct i915_vma, obj_node);
188
189 /*
190 * If the view already exists in the tree, another thread
191 * already created a matching vma, so return the older instance
192 * and dispose of ours.
193 */
194 cmp = i915_vma_compare(pos, vm, view);
195 if (cmp == 0) {
196 spin_unlock(&obj->vma.lock);
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000197 i915_vma_free(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000198 return pos;
199 }
200
201 if (cmp < 0)
202 p = &rb->rb_right;
203 else
204 p = &rb->rb_left;
205 }
206 rb_link_node(&vma->obj_node, rb, p);
207 rb_insert_color(&vma->obj_node, &obj->vma.tree);
208
209 if (i915_vma_is_ggtt(vma))
Chris Wilsone2189dd2017-12-07 21:14:07 +0000210 /*
211 * We put the GGTT vma at the start of the vma-list, followed
212 * by the ppGGTT vma. This allows us to break early when
213 * iterating over only the GGTT vma for an object, see
214 * for_each_ggtt_vma()
215 */
Chris Wilson528cbd12019-01-28 10:23:54 +0000216 list_add(&vma->obj_link, &obj->vma.list);
217 else
218 list_add_tail(&vma->obj_link, &obj->vma.list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200219
Chris Wilson528cbd12019-01-28 10:23:54 +0000220 spin_unlock(&obj->vma.lock);
Chris Wilson09d7e462019-01-28 10:23:53 +0000221
222 mutex_lock(&vm->mutex);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000223 list_add(&vma->vm_link, &vm->unbound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000224 mutex_unlock(&vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200225
226 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000227
228err_vma:
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000229 i915_vma_free(vma);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000230 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200231}
232
Chris Wilson481a6f72017-01-16 15:21:31 +0000233static struct i915_vma *
234vma_lookup(struct drm_i915_gem_object *obj,
235 struct i915_address_space *vm,
236 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000237{
238 struct rb_node *rb;
239
Chris Wilson528cbd12019-01-28 10:23:54 +0000240 rb = obj->vma.tree.rb_node;
Chris Wilson718659a2017-01-16 15:21:28 +0000241 while (rb) {
242 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
243 long cmp;
244
245 cmp = i915_vma_compare(vma, vm, view);
246 if (cmp == 0)
247 return vma;
248
249 if (cmp < 0)
250 rb = rb->rb_right;
251 else
252 rb = rb->rb_left;
253 }
254
255 return NULL;
256}
257
258/**
Chris Wilson718659a2017-01-16 15:21:28 +0000259 * i915_vma_instance - return the singleton instance of the VMA
260 * @obj: parent &struct drm_i915_gem_object to be mapped
261 * @vm: address space in which the mapping is located
262 * @view: additional mapping requirements
263 *
264 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
265 * the same @view characteristics. If a match is not found, one is created.
266 * Once created, the VMA is kept until either the object is freed, or the
267 * address space is closed.
268 *
269 * Must be called with struct_mutex held.
270 *
271 * Returns the vma, or an error pointer.
272 */
273struct i915_vma *
274i915_vma_instance(struct drm_i915_gem_object *obj,
275 struct i915_address_space *vm,
276 const struct i915_ggtt_view *view)
277{
278 struct i915_vma *vma;
279
Chris Wilson718659a2017-01-16 15:21:28 +0000280 GEM_BUG_ON(view && !i915_is_ggtt(vm));
281 GEM_BUG_ON(vm->closed);
282
Chris Wilson528cbd12019-01-28 10:23:54 +0000283 spin_lock(&obj->vma.lock);
Chris Wilson481a6f72017-01-16 15:21:31 +0000284 vma = vma_lookup(obj, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000285 spin_unlock(&obj->vma.lock);
286
287 /* vma_create() will resolve the race if another creates the vma */
288 if (unlikely(!vma))
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000289 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000290
Chris Wilson4ea95272017-01-16 15:21:29 +0000291 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson718659a2017-01-16 15:21:28 +0000292 return vma;
293}
294
295/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200296 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
297 * @vma: VMA to map
298 * @cache_level: mapping cache level
299 * @flags: flags like global or local mapping
300 *
301 * DMA addresses are taken from the scatter-gather table of this object (or of
302 * this VMA in case of non-default GGTT views) and PTE entries set up.
303 * Note that DMA addresses are also the only part of the SG table we care about.
304 */
305int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
306 u32 flags)
307{
308 u32 bind_flags;
309 u32 vma_flags;
310 int ret;
311
Chris Wilsonaa149432017-02-25 18:11:21 +0000312 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
313 GEM_BUG_ON(vma->size > vma->node.size);
314
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100315 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
316 vma->node.size,
317 vma->vm->total)))
Chris Wilsonaa149432017-02-25 18:11:21 +0000318 return -ENODEV;
319
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100320 if (GEM_DEBUG_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200321 return -EINVAL;
322
323 bind_flags = 0;
324 if (flags & PIN_GLOBAL)
325 bind_flags |= I915_VMA_GLOBAL_BIND;
326 if (flags & PIN_USER)
327 bind_flags |= I915_VMA_LOCAL_BIND;
328
329 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
330 if (flags & PIN_UPDATE)
331 bind_flags |= vma_flags;
332 else
333 bind_flags &= ~vma_flags;
334 if (bind_flags == 0)
335 return 0;
336
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100337 GEM_BUG_ON(!vma->pages);
338
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800339 trace_i915_vma_bind(vma, bind_flags);
Chris Wilson93f2cde2018-06-07 16:40:46 +0100340 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200341 if (ret)
342 return ret;
343
344 vma->flags |= bind_flags;
345 return 0;
346}
347
348void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
349{
350 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100351 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200352
353 /* Access through the GTT requires the device to be awake. */
Daniele Ceraolo Spurio87b391b92019-06-13 16:21:50 -0700354 assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200355
Chris Wilson49d73912016-11-29 09:50:08 +0000356 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100357 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
358 err = -ENODEV;
359 goto err;
360 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200361
362 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
363 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
364
365 ptr = vma->iomap;
366 if (ptr == NULL) {
Matthew Auld73ebd502017-12-11 15:18:20 +0000367 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200368 vma->node.start,
369 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100370 if (ptr == NULL) {
371 err = -ENOMEM;
372 goto err;
373 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200374
375 vma->iomap = ptr;
376 }
377
378 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100379
Chris Wilson3bd40732017-10-09 09:43:56 +0100380 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100381 if (err)
382 goto err_unpin;
383
Chris Wilson7125397b2017-12-06 12:49:14 +0000384 i915_vma_set_ggtt_write(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200385 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100386
387err_unpin:
388 __i915_vma_unpin(vma);
389err:
390 return IO_ERR_PTR(err);
391}
392
Chris Wilson7125397b2017-12-06 12:49:14 +0000393void i915_vma_flush_writes(struct i915_vma *vma)
394{
395 if (!i915_vma_has_ggtt_write(vma))
396 return;
397
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +0100398 intel_gt_flush_ggtt_writes(vma->vm->gt);
Chris Wilson7125397b2017-12-06 12:49:14 +0000399
400 i915_vma_unset_ggtt_write(vma);
401}
402
Chris Wilsonb4563f52017-10-09 09:43:55 +0100403void i915_vma_unpin_iomap(struct i915_vma *vma)
404{
Chris Wilson520ea7c2018-06-07 16:40:45 +0100405 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100406
407 GEM_BUG_ON(vma->iomap == NULL);
408
Chris Wilson7125397b2017-12-06 12:49:14 +0000409 i915_vma_flush_writes(vma);
410
Chris Wilsonb4563f52017-10-09 09:43:55 +0100411 i915_vma_unpin_fence(vma);
412 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200413}
414
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100415void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200416{
417 struct i915_vma *vma;
418 struct drm_i915_gem_object *obj;
419
420 vma = fetch_and_zero(p_vma);
421 if (!vma)
422 return;
423
424 obj = vma->obj;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100425 GEM_BUG_ON(!obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200426
427 i915_vma_unpin(vma);
428 i915_vma_close(vma);
429
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100430 if (flags & I915_VMA_RELEASE_MAP)
431 i915_gem_object_unpin_map(obj);
432
Chris Wilsonc017cf62019-05-28 10:29:56 +0100433 i915_gem_object_put(obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200434}
435
Chris Wilson782a3e92017-02-13 17:15:46 +0000436bool i915_vma_misplaced(const struct i915_vma *vma,
437 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200438{
439 if (!drm_mm_node_allocated(&vma->node))
440 return false;
441
442 if (vma->node.size < size)
443 return true;
444
Chris Wilsonf51455d2017-01-10 14:47:34 +0000445 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
446 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200447 return true;
448
449 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
450 return true;
451
452 if (flags & PIN_OFFSET_BIAS &&
453 vma->node.start < (flags & PIN_OFFSET_MASK))
454 return true;
455
456 if (flags & PIN_OFFSET_FIXED &&
457 vma->node.start != (flags & PIN_OFFSET_MASK))
458 return true;
459
460 return false;
461}
462
463void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
464{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200465 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200466
Chris Wilson944397f2017-01-09 16:16:11 +0000467 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
468 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200469
Chris Wilson944397f2017-01-09 16:16:11 +0000470 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000471 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000472
473 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
474
475 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200476 vma->flags |= I915_VMA_CAN_FENCE;
477 else
478 vma->flags &= ~I915_VMA_CAN_FENCE;
479}
480
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000481static bool color_differs(struct drm_mm_node *node, unsigned long color)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200482{
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000483 return node->allocated && node->color != color;
484}
485
486bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
487{
488 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200489 struct drm_mm_node *other;
490
491 /*
492 * On some machines we have to be careful when putting differing types
493 * of snoopable memory together to avoid the prefetcher crossing memory
494 * domains and dying. During vm initialisation, we decide whether or not
495 * these constraints apply and set the drm_mm.color_adjust
496 * appropriately.
497 */
498 if (vma->vm->mm.color_adjust == NULL)
499 return true;
500
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000501 /* Only valid to be called on an already inserted vma */
502 GEM_BUG_ON(!drm_mm_node_allocated(node));
503 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200504
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000505 other = list_prev_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100506 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200507 return false;
508
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000509 other = list_next_entry(node, node_list);
Daniel Vetteref426c12017-01-04 11:41:10 +0100510 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200511 return false;
512
513 return true;
514}
515
Chris Wilson83d317a2018-06-05 10:41:07 +0100516static void assert_bind_count(const struct drm_i915_gem_object *obj)
517{
518 /*
519 * Combine the assertion that the object is bound and that we have
520 * pinned its pages. But we should never have bound the object
521 * more than we have pinned its pages. (For complete accuracy, we
522 * assume that no else is pinning the pages, but as a rough assertion
523 * that we will not run into problems later, this will do!)
524 */
Chris Wilsonecab9be2019-06-12 11:57:20 +0100525 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
Chris Wilson83d317a2018-06-05 10:41:07 +0100526}
527
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200528/**
529 * i915_vma_insert - finds a slot for the vma in its address space
530 * @vma: the vma
531 * @size: requested size in bytes (can be larger than the VMA)
532 * @alignment: required alignment
533 * @flags: mask of PIN_* flags to use
534 *
535 * First we try to allocate some free space that meets the requirements for
536 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
537 * preferrably the oldest idle entry to make room for the new VMA.
538 *
539 * Returns:
540 * 0 on success, negative error code otherwise.
541 */
542static int
543i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
544{
Chris Wilson49d73912016-11-29 09:50:08 +0000545 struct drm_i915_private *dev_priv = vma->vm->i915;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100546 unsigned int cache_level;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200547 u64 start, end;
548 int ret;
549
Chris Wilson010e3e62017-12-06 12:49:13 +0000550 GEM_BUG_ON(i915_vma_is_closed(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200551 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
552 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
553
554 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000555 alignment = max(alignment, vma->display_alignment);
556 if (flags & PIN_MAPPABLE) {
557 size = max_t(typeof(size), size, vma->fence_size);
558 alignment = max_t(typeof(alignment),
559 alignment, vma->fence_alignment);
560 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200561
Chris Wilsonf51455d2017-01-10 14:47:34 +0000562 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
563 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
564 GEM_BUG_ON(!is_power_of_2(alignment));
565
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200566 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000567 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200568
569 end = vma->vm->total;
570 if (flags & PIN_MAPPABLE)
571 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
572 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000573 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
574 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200575
576 /* If binding the object/GGTT view requires more space than the entire
577 * aperture has, reject it early before evicting everything in a vain
578 * attempt to find space.
579 */
580 if (size > end) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100581 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
582 size, flags & PIN_MAPPABLE ? "mappable" : "total",
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200583 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100584 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200585 }
586
Chris Wilson520ea7c2018-06-07 16:40:45 +0100587 if (vma->obj) {
588 ret = i915_gem_object_pin_pages(vma->obj);
589 if (ret)
590 return ret;
591
592 cache_level = vma->obj->cache_level;
593 } else {
594 cache_level = 0;
595 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200596
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100597 GEM_BUG_ON(vma->pages);
598
Chris Wilson93f2cde2018-06-07 16:40:46 +0100599 ret = vma->ops->set_pages(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100600 if (ret)
601 goto err_unpin;
602
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200603 if (flags & PIN_OFFSET_FIXED) {
604 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000605 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000606 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200607 ret = -EINVAL;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100608 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200609 }
610
Chris Wilson625d9882017-01-11 11:23:11 +0000611 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
Chris Wilson520ea7c2018-06-07 16:40:45 +0100612 size, offset, cache_level,
Chris Wilson625d9882017-01-11 11:23:11 +0000613 flags);
614 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100615 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200616 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100617 /*
618 * We only support huge gtt pages through the 48b PPGTT,
619 * however we also don't want to force any alignment for
620 * objects which need to be tightly packed into the low 32bits.
621 *
622 * Note that we assume that GGTT are limited to 4GiB for the
623 * forseeable future. See also i915_ggtt_offset().
624 */
625 if (upper_32_bits(end - 1) &&
626 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100627 /*
628 * We can't mix 64K and 4K PTEs in the same page-table
629 * (2M block), and so to avoid the ugliness and
630 * complexity of coloring we opt for just aligning 64K
631 * objects to 2M.
632 */
Matthew Auld74642842017-10-06 23:18:20 +0100633 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100634 rounddown_pow_of_two(vma->page_sizes.sg |
635 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100636
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100637 /*
638 * Check we don't expand for the limited Global GTT
639 * (mappable aperture is even more precious!). This
640 * also checks that we exclude the aliasing-ppgtt.
641 */
642 GEM_BUG_ON(i915_vma_is_ggtt(vma));
643
Matthew Auld74642842017-10-06 23:18:20 +0100644 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100645
646 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
647 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100648 }
649
Chris Wilsone007b192017-01-11 11:23:10 +0000650 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
Chris Wilson520ea7c2018-06-07 16:40:45 +0100651 size, alignment, cache_level,
Chris Wilsone007b192017-01-11 11:23:10 +0000652 start, end, flags);
653 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100654 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200655
656 GEM_BUG_ON(vma->node.start < start);
657 GEM_BUG_ON(vma->node.start + vma->node.size > end);
658 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000659 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Chris Wilson520ea7c2018-06-07 16:40:45 +0100660 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200661
Chris Wilson09d7e462019-01-28 10:23:53 +0000662 mutex_lock(&vma->vm->mutex);
Chris Wilson499197d2019-01-28 10:23:52 +0000663 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000664 mutex_unlock(&vma->vm->mutex);
Chris Wilsonf2123812017-10-16 12:40:37 +0100665
Chris Wilson520ea7c2018-06-07 16:40:45 +0100666 if (vma->obj) {
Chris Wilsonecab9be2019-06-12 11:57:20 +0100667 atomic_inc(&vma->obj->bind_count);
668 assert_bind_count(vma->obj);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100669 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200670
671 return 0;
672
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100673err_clear:
Chris Wilson93f2cde2018-06-07 16:40:46 +0100674 vma->ops->clear_pages(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200675err_unpin:
Chris Wilson520ea7c2018-06-07 16:40:45 +0100676 if (vma->obj)
677 i915_gem_object_unpin_pages(vma->obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200678 return ret;
679}
680
Chris Wilson31c7eff2017-02-27 12:26:54 +0000681static void
682i915_vma_remove(struct i915_vma *vma)
683{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000684 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
685 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
686
Chris Wilson93f2cde2018-06-07 16:40:46 +0100687 vma->ops->clear_pages(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100688
Chris Wilson09d7e462019-01-28 10:23:53 +0000689 mutex_lock(&vma->vm->mutex);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000690 drm_mm_remove_node(&vma->node);
691 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000692 mutex_unlock(&vma->vm->mutex);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000693
Chris Wilson520ea7c2018-06-07 16:40:45 +0100694 /*
695 * Since the unbound list is global, only move to that list if
Chris Wilson31c7eff2017-02-27 12:26:54 +0000696 * no more VMAs exist.
697 */
Chris Wilson520ea7c2018-06-07 16:40:45 +0100698 if (vma->obj) {
699 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000700
Chris Wilsonecab9be2019-06-12 11:57:20 +0100701 atomic_dec(&obj->bind_count);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100702
703 /*
704 * And finally now the object is completely decoupled from this
705 * vma, we can drop its hold on the backing storage and allow
706 * it to be reaped by the shrinker.
707 */
708 i915_gem_object_unpin_pages(obj);
709 assert_bind_count(obj);
710 }
Chris Wilson31c7eff2017-02-27 12:26:54 +0000711}
712
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200713int __i915_vma_do_pin(struct i915_vma *vma,
714 u64 size, u64 alignment, u64 flags)
715{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000716 const unsigned int bound = vma->flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200717 int ret;
718
Chris Wilson49d73912016-11-29 09:50:08 +0000719 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200720 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
721 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
722
723 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
724 ret = -EBUSY;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000725 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200726 }
727
728 if ((bound & I915_VMA_BIND_MASK) == 0) {
729 ret = i915_vma_insert(vma, size, alignment, flags);
730 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000731 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200732 }
Chris Wilsond36caee2017-11-05 12:45:50 +0000733 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200734
Chris Wilson520ea7c2018-06-07 16:40:45 +0100735 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200736 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000737 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200738
Chris Wilsond36caee2017-11-05 12:45:50 +0000739 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
740
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200741 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
742 __i915_vma_set_map_and_fenceable(vma);
743
744 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
745 return 0;
746
Chris Wilson31c7eff2017-02-27 12:26:54 +0000747err_remove:
748 if ((bound & I915_VMA_BIND_MASK) == 0) {
Chris Wilson31c7eff2017-02-27 12:26:54 +0000749 i915_vma_remove(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100750 GEM_BUG_ON(vma->pages);
Chris Wilsond36caee2017-11-05 12:45:50 +0000751 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000752 }
753err_unpin:
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200754 __i915_vma_unpin(vma);
755 return ret;
756}
757
Chris Wilson3365e222018-05-03 20:51:14 +0100758void i915_vma_close(struct i915_vma *vma)
759{
Chris Wilson155ab882019-06-06 12:23:20 +0100760 struct drm_i915_private *i915 = vma->vm->i915;
761 unsigned long flags;
Chris Wilson3365e222018-05-03 20:51:14 +0100762
763 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson3365e222018-05-03 20:51:14 +0100764
765 /*
766 * We defer actually closing, unbinding and destroying the VMA until
767 * the next idle point, or if the object is freed in the meantime. By
768 * postponing the unbind, we allow for it to be resurrected by the
769 * client, avoiding the work required to rebind the VMA. This is
770 * advantageous for DRI, where the client/server pass objects
771 * between themselves, temporarily opening a local VMA to the
772 * object, and then closing it again. The same object is then reused
773 * on the next frame (or two, depending on the depth of the swap queue)
774 * causing us to rebind the VMA once more. This ends up being a lot
775 * of wasted work for the steady state.
776 */
Chris Wilson155ab882019-06-06 12:23:20 +0100777 spin_lock_irqsave(&i915->gt.closed_lock, flags);
778 list_add(&vma->closed_link, &i915->gt.closed_vma);
779 spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
780}
781
782static void __i915_vma_remove_closed(struct i915_vma *vma)
783{
784 struct drm_i915_private *i915 = vma->vm->i915;
785
786 if (!i915_vma_is_closed(vma))
787 return;
788
789 spin_lock_irq(&i915->gt.closed_lock);
790 list_del_init(&vma->closed_link);
791 spin_unlock_irq(&i915->gt.closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +0100792}
793
794void i915_vma_reopen(struct i915_vma *vma)
795{
Chris Wilson155ab882019-06-06 12:23:20 +0100796 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +0100797}
798
799static void __i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200800{
801 GEM_BUG_ON(vma->node.allocated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200802 GEM_BUG_ON(vma->fence);
803
Chris Wilson21950ee2019-02-05 13:00:05 +0000804 GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
Chris Wilson7a3bc032017-06-20 13:43:21 +0100805
Chris Wilson09d7e462019-01-28 10:23:53 +0000806 mutex_lock(&vma->vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200807 list_del(&vma->vm_link);
Chris Wilson09d7e462019-01-28 10:23:53 +0000808 mutex_unlock(&vma->vm->mutex);
809
Chris Wilson528cbd12019-01-28 10:23:54 +0000810 if (vma->obj) {
811 struct drm_i915_gem_object *obj = vma->obj;
812
813 spin_lock(&obj->vma.lock);
814 list_del(&vma->obj_link);
815 rb_erase(&vma->obj_node, &vma->obj->vma.tree);
816 spin_unlock(&obj->vma.lock);
817 }
Chris Wilson010e3e62017-12-06 12:49:13 +0000818
Chris Wilson64d6c502019-02-05 13:00:02 +0000819 i915_active_fini(&vma->active);
Chris Wilson5c3f8c22018-07-06 11:39:46 +0100820
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000821 i915_vma_free(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200822}
823
Chris Wilson3365e222018-05-03 20:51:14 +0100824void i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200825{
Chris Wilson3365e222018-05-03 20:51:14 +0100826 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200827
Chris Wilson3365e222018-05-03 20:51:14 +0100828 GEM_BUG_ON(i915_vma_is_pinned(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200829
Chris Wilson155ab882019-06-06 12:23:20 +0100830 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +0100831
832 WARN_ON(i915_vma_unbind(vma));
Chris Wilson6951e582019-05-28 10:29:51 +0100833 GEM_BUG_ON(i915_vma_is_active(vma));
834
Chris Wilson3365e222018-05-03 20:51:14 +0100835 __i915_vma_destroy(vma);
836}
837
838void i915_vma_parked(struct drm_i915_private *i915)
839{
840 struct i915_vma *vma, *next;
841
Chris Wilson155ab882019-06-06 12:23:20 +0100842 spin_lock_irq(&i915->gt.closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +0100843 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
Chris Wilson155ab882019-06-06 12:23:20 +0100844 list_del_init(&vma->closed_link);
845 spin_unlock_irq(&i915->gt.closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +0100846
Chris Wilson155ab882019-06-06 12:23:20 +0100847 i915_vma_destroy(vma);
848
849 spin_lock_irq(&i915->gt.closed_lock);
850 }
851 spin_unlock_irq(&i915->gt.closed_lock);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200852}
853
854static void __i915_vma_iounmap(struct i915_vma *vma)
855{
856 GEM_BUG_ON(i915_vma_is_pinned(vma));
857
858 if (vma->iomap == NULL)
859 return;
860
861 io_mapping_unmap(vma->iomap);
862 vma->iomap = NULL;
863}
864
Chris Wilsona65adaf2017-10-09 09:43:57 +0100865void i915_vma_revoke_mmap(struct i915_vma *vma)
866{
867 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
868 u64 vma_offset;
869
870 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
871
872 if (!i915_vma_has_userfault(vma))
873 return;
874
875 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
876 GEM_BUG_ON(!vma->obj->userfault_count);
877
878 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
879 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
880 drm_vma_node_offset_addr(node) + vma_offset,
881 vma->size,
882 1);
883
884 i915_vma_unset_userfault(vma);
885 if (!--vma->obj->userfault_count)
886 list_del(&vma->obj->userfault_link);
887}
888
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100889int i915_vma_move_to_active(struct i915_vma *vma,
890 struct i915_request *rq,
891 unsigned int flags)
892{
893 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona93615f2019-06-21 19:37:59 +0100894 int err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100895
Chris Wilson6951e582019-05-28 10:29:51 +0100896 assert_vma_held(vma);
897 assert_object_held(obj);
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100898 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
899
900 /*
901 * Add a reference if we're newly entering the active list.
902 * The order in which we add operations to the retirement queue is
903 * vital here: mark_active adds to the start of the callback list,
904 * such that subsequent callbacks are called first. Therefore we
905 * add the active reference first and queue for it to be dropped
906 * *last*.
907 */
Chris Wilsona93615f2019-06-21 19:37:59 +0100908 err = i915_active_ref(&vma->active, rq->fence.context, rq);
Chris Wilsona93615f2019-06-21 19:37:59 +0100909 if (unlikely(err))
910 return err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100911
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100912 if (flags & EXEC_OBJECT_WRITE) {
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100913 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
Chris Wilson21950ee2019-02-05 13:00:05 +0000914 __i915_active_request_set(&obj->frontbuffer_write, rq);
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100915
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +0100916 reservation_object_add_excl_fence(vma->resv, &rq->fence);
917 obj->write_domain = I915_GEM_DOMAIN_RENDER;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100918 obj->read_domains = 0;
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +0100919 } else {
920 err = reservation_object_reserve_shared(vma->resv, 1);
921 if (unlikely(err))
922 return err;
923
924 reservation_object_add_shared_fence(vma->resv, &rq->fence);
925 obj->write_domain = 0;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100926 }
927 obj->read_domains |= I915_GEM_GPU_DOMAINS;
Chris Wilsona93615f2019-06-21 19:37:59 +0100928 obj->mm.dirty = true;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100929
930 if (flags & EXEC_OBJECT_NEEDS_FENCE)
Chris Wilson21950ee2019-02-05 13:00:05 +0000931 __i915_active_request_set(&vma->last_fence, rq);
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100932
Chris Wilsona93615f2019-06-21 19:37:59 +0100933 GEM_BUG_ON(!i915_vma_is_active(vma));
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100934 return 0;
935}
936
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200937int i915_vma_unbind(struct i915_vma *vma)
938{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200939 int ret;
940
Chris Wilson520ea7c2018-06-07 16:40:45 +0100941 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200942
Chris Wilson520ea7c2018-06-07 16:40:45 +0100943 /*
944 * First wait upon any activity as retiring the request may
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200945 * have side-effects such as unpinning or even unbinding this vma.
946 */
Chris Wilson7f017b12017-11-09 21:34:50 +0000947 might_sleep();
Chris Wilson5c3f8c22018-07-06 11:39:46 +0100948 if (i915_vma_is_active(vma)) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100949 /*
950 * When a closed VMA is retired, it is unbound - eek.
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200951 * In order to prevent it from being recursively closed,
952 * take a pin on the vma so that the second unbind is
953 * aborted.
954 *
955 * Even more scary is that the retire callback may free
956 * the object (last active vma). To prevent the explosion
957 * we defer the actual object free to a worker that can
958 * only proceed once it acquires the struct_mutex (which
959 * we currently hold, therefore it cannot free this object
960 * before we are finished).
961 */
962 __i915_vma_pin(vma);
963
Chris Wilson64d6c502019-02-05 13:00:02 +0000964 ret = i915_active_wait(&vma->active);
Chris Wilson8b293eb2018-07-06 13:31:57 +0100965 if (ret)
966 goto unpin;
967
Chris Wilson21950ee2019-02-05 13:00:05 +0000968 ret = i915_active_request_retire(&vma->last_fence,
969 &vma->vm->i915->drm.struct_mutex);
Chris Wilson5c3f8c22018-07-06 11:39:46 +0100970unpin:
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200971 __i915_vma_unpin(vma);
972 if (ret)
973 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200974 }
Chris Wilson7a3bc032017-06-20 13:43:21 +0100975 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200976
Chris Wilson10195b12018-06-28 14:22:06 +0100977 if (i915_vma_is_pinned(vma)) {
978 vma_print_allocator(vma, "is pinned");
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200979 return -EBUSY;
Chris Wilson10195b12018-06-28 14:22:06 +0100980 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200981
982 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson3365e222018-05-03 20:51:14 +0100983 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200984
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200985 if (i915_vma_is_map_and_fenceable(vma)) {
Chris Wilson7125397b2017-12-06 12:49:14 +0000986 /*
987 * Check that we have flushed all writes through the GGTT
988 * before the unbind, other due to non-strict nature of those
989 * indirect writes they may end up referencing the GGTT PTE
990 * after the unbind.
991 */
992 i915_vma_flush_writes(vma);
993 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
994
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200995 /* release the fence reg _after_ flushing */
996 ret = i915_vma_put_fence(vma);
997 if (ret)
998 return ret;
999
1000 /* Force a pagefault for domain tracking on next user access */
Chris Wilsona65adaf2017-10-09 09:43:57 +01001001 i915_vma_revoke_mmap(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001002
1003 __i915_vma_iounmap(vma);
1004 vma->flags &= ~I915_VMA_CAN_FENCE;
1005 }
Chris Wilsona65adaf2017-10-09 09:43:57 +01001006 GEM_BUG_ON(vma->fence);
1007 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001008
1009 if (likely(!vma->vm->closed)) {
1010 trace_i915_vma_unbind(vma);
Chris Wilson93f2cde2018-06-07 16:40:46 +01001011 vma->ops->unbind_vma(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001012 }
1013 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1014
Chris Wilson31c7eff2017-02-27 12:26:54 +00001015 i915_vma_remove(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001016
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001017 return 0;
1018}
1019
Chris Wilsone3c7a1c2017-02-13 17:15:45 +00001020#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1021#include "selftests/i915_vma.c"
1022#endif
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001023
Chris Wilson103b76ee2019-03-05 21:38:30 +00001024static void i915_global_vma_shrink(void)
1025{
1026 kmem_cache_shrink(global.slab_vmas);
1027}
1028
1029static void i915_global_vma_exit(void)
1030{
1031 kmem_cache_destroy(global.slab_vmas);
1032}
1033
1034static struct i915_global_vma global = { {
1035 .shrink = i915_global_vma_shrink,
1036 .exit = i915_global_vma_exit,
1037} };
1038
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001039int __init i915_global_vma_init(void)
1040{
1041 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1042 if (!global.slab_vmas)
1043 return -ENOMEM;
1044
Chris Wilson103b76ee2019-03-05 21:38:30 +00001045 i915_global_register(&global.base);
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001046 return 0;
1047}