blob: 68d9f9b4d050e88e6b8e987744ebc884aa30d526 [file] [log] [blame]
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
Chris Wilson10195b12018-06-28 14:22:06 +010024
Chris Wilson09480072019-07-03 10:17:19 +010025#include <linux/sched/mm.h>
Jani Nikuladf0566a2019-06-13 11:44:16 +030026#include <drm/drm_gem.h>
Chris Wilson112ed2d2019-04-24 18:48:39 +010027
Jani Nikuladf0566a2019-06-13 11:44:16 +030028#include "display/intel_frontbuffer.h"
29
30#include "gt/intel_engine.h"
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +010031#include "gt/intel_gt.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020032
33#include "i915_drv.h"
Chris Wilson103b76ee2019-03-05 21:38:30 +000034#include "i915_globals.h"
Jani Nikulaa09d9a82019-08-06 13:07:28 +030035#include "i915_trace.h"
Jani Nikuladf0566a2019-06-13 11:44:16 +030036#include "i915_vma.h"
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020037
Chris Wilson13f1bfd2019-02-28 10:20:34 +000038static struct i915_global_vma {
Chris Wilson103b76ee2019-03-05 21:38:30 +000039 struct i915_global base;
Chris Wilson13f1bfd2019-02-28 10:20:34 +000040 struct kmem_cache *slab_vmas;
41} global;
42
43struct i915_vma *i915_vma_alloc(void)
44{
45 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
46}
47
48void i915_vma_free(struct i915_vma *vma)
49{
50 return kmem_cache_free(global.slab_vmas, vma);
51}
52
Chris Wilson1eca65d2018-07-06 07:53:06 +010053#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
Chris Wilson10195b12018-06-28 14:22:06 +010054
55#include <linux/stackdepot.h>
56
57static void vma_print_allocator(struct i915_vma *vma, const char *reason)
58{
Thomas Gleixner487f3c72019-04-25 11:45:09 +020059 unsigned long *entries;
60 unsigned int nr_entries;
Chris Wilson10195b12018-06-28 14:22:06 +010061 char buf[512];
62
63 if (!vma->node.stack) {
64 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
65 vma->node.start, vma->node.size, reason);
66 return;
67 }
68
Thomas Gleixner487f3c72019-04-25 11:45:09 +020069 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
70 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
Chris Wilson10195b12018-06-28 14:22:06 +010071 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
72 vma->node.start, vma->node.size, reason, buf);
73}
74
75#else
76
77static void vma_print_allocator(struct i915_vma *vma, const char *reason)
78{
79}
80
81#endif
82
Chris Wilson12c255b2019-06-21 19:38:00 +010083static inline struct i915_vma *active_to_vma(struct i915_active *ref)
84{
85 return container_of(ref, typeof(struct i915_vma), active);
86}
87
88static int __i915_vma_active(struct i915_active *ref)
89{
Chris Wilson2833ddc2019-08-20 11:05:31 +010090 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
Chris Wilson12c255b2019-06-21 19:38:00 +010091}
92
Chris Wilson64d6c502019-02-05 13:00:02 +000093static void __i915_vma_retire(struct i915_active *ref)
94{
Chris Wilson12c255b2019-06-21 19:38:00 +010095 i915_vma_put(active_to_vma(ref));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020096}
97
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +020098static struct i915_vma *
Chris Wilsona01cb37a2017-01-16 15:21:30 +000099vma_create(struct drm_i915_gem_object *obj,
100 struct i915_address_space *vm,
101 const struct i915_ggtt_view *view)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200102{
103 struct i915_vma *vma;
104 struct rb_node *rb, **p;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200105
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000106 /* The aliasing_ppgtt should never be used directly! */
Chris Wilsonc082afa2019-07-30 15:32:08 +0100107 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
Chris Wilsone1cc3db2017-02-09 11:19:33 +0000108
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000109 vma = i915_vma_alloc();
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200110 if (vma == NULL)
111 return ERR_PTR(-ENOMEM);
112
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200113 vma->vm = vm;
Chris Wilson93f2cde2018-06-07 16:40:46 +0100114 vma->ops = &vm->vma_ops;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200115 vma->obj = obj;
Chris Wilsonef78f7b2019-06-18 13:58:58 +0100116 vma->resv = obj->base.resv;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200117 vma->size = obj->base.size;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000118 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200119
Chris Wilson12c255b2019-06-21 19:38:00 +0100120 i915_active_init(vm->i915, &vma->active,
121 __i915_vma_active, __i915_vma_retire);
Chris Wilson155ab882019-06-06 12:23:20 +0100122
Chris Wilson09480072019-07-03 10:17:19 +0100123 /* Declare ourselves safe for use inside shrinkers */
124 if (IS_ENABLED(CONFIG_LOCKDEP)) {
125 fs_reclaim_acquire(GFP_KERNEL);
126 might_lock(&vma->active.mutex);
127 fs_reclaim_release(GFP_KERNEL);
128 }
129
Chris Wilson155ab882019-06-06 12:23:20 +0100130 INIT_LIST_HEAD(&vma->closed_link);
131
Chris Wilson7c518462017-01-23 14:52:45 +0000132 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200133 vma->ggtt_view = *view;
134 if (view->type == I915_GGTT_VIEW_PARTIAL) {
Chris Wilson07e19ea2016-12-23 14:57:59 +0000135 GEM_BUG_ON(range_overflows_t(u64,
Chris Wilson8bab11932017-01-14 00:28:25 +0000136 view->partial.offset,
137 view->partial.size,
Chris Wilson07e19ea2016-12-23 14:57:59 +0000138 obj->base.size >> PAGE_SHIFT));
Chris Wilson8bab11932017-01-14 00:28:25 +0000139 vma->size = view->partial.size;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200140 vma->size <<= PAGE_SHIFT;
Chris Wilson7e7367d2018-06-30 10:05:09 +0100141 GEM_BUG_ON(vma->size > obj->base.size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200142 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
Chris Wilson8bab11932017-01-14 00:28:25 +0000143 vma->size = intel_rotation_info_size(&view->rotated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200144 vma->size <<= PAGE_SHIFT;
Ville Syrjälä1a74fc02019-05-09 15:21:52 +0300145 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
146 vma->size = intel_remapped_info_size(&view->remapped);
147 vma->size <<= PAGE_SHIFT;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200148 }
149 }
150
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000151 if (unlikely(vma->size > vm->total))
152 goto err_vma;
153
Chris Wilsonb00ddb22017-01-19 19:26:59 +0000154 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
155
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200156 if (i915_is_ggtt(vm)) {
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000157 if (unlikely(overflows_type(vma->size, u32)))
158 goto err_vma;
159
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000160 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
161 i915_gem_object_get_tiling(obj),
162 i915_gem_object_get_stride(obj));
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000163 if (unlikely(vma->fence_size < vma->size || /* overflow */
164 vma->fence_size > vm->total))
165 goto err_vma;
166
Chris Wilsonf51455d2017-01-10 14:47:34 +0000167 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
Chris Wilson944397f2017-01-09 16:16:11 +0000168
Chris Wilson91d4e0aa2017-01-09 16:16:13 +0000169 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
170 i915_gem_object_get_tiling(obj),
171 i915_gem_object_get_stride(obj));
Chris Wilson944397f2017-01-09 16:16:11 +0000172 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
173
Chris Wilson528cbd12019-01-28 10:23:54 +0000174 vma->flags |= I915_VMA_GGTT;
175 }
176
177 spin_lock(&obj->vma.lock);
178
179 rb = NULL;
180 p = &obj->vma.tree.rb_node;
181 while (*p) {
182 struct i915_vma *pos;
183 long cmp;
184
185 rb = *p;
186 pos = rb_entry(rb, struct i915_vma, obj_node);
187
188 /*
189 * If the view already exists in the tree, another thread
190 * already created a matching vma, so return the older instance
191 * and dispose of ours.
192 */
193 cmp = i915_vma_compare(pos, vm, view);
194 if (cmp == 0) {
195 spin_unlock(&obj->vma.lock);
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000196 i915_vma_free(vma);
Chris Wilson528cbd12019-01-28 10:23:54 +0000197 return pos;
198 }
199
200 if (cmp < 0)
201 p = &rb->rb_right;
202 else
203 p = &rb->rb_left;
204 }
205 rb_link_node(&vma->obj_node, rb, p);
206 rb_insert_color(&vma->obj_node, &obj->vma.tree);
207
208 if (i915_vma_is_ggtt(vma))
Chris Wilsone2189dd2017-12-07 21:14:07 +0000209 /*
210 * We put the GGTT vma at the start of the vma-list, followed
211 * by the ppGGTT vma. This allows us to break early when
212 * iterating over only the GGTT vma for an object, see
213 * for_each_ggtt_vma()
214 */
Chris Wilson528cbd12019-01-28 10:23:54 +0000215 list_add(&vma->obj_link, &obj->vma.list);
216 else
217 list_add_tail(&vma->obj_link, &obj->vma.list);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200218
Chris Wilson528cbd12019-01-28 10:23:54 +0000219 spin_unlock(&obj->vma.lock);
Chris Wilson09d7e462019-01-28 10:23:53 +0000220
221 mutex_lock(&vm->mutex);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000222 list_add(&vma->vm_link, &vm->unbound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000223 mutex_unlock(&vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200224
225 return vma;
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000226
227err_vma:
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000228 i915_vma_free(vma);
Chris Wilson1fcdaa72017-01-19 19:26:56 +0000229 return ERR_PTR(-E2BIG);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200230}
231
Chris Wilson481a6f72017-01-16 15:21:31 +0000232static struct i915_vma *
233vma_lookup(struct drm_i915_gem_object *obj,
234 struct i915_address_space *vm,
235 const struct i915_ggtt_view *view)
Chris Wilson718659a2017-01-16 15:21:28 +0000236{
237 struct rb_node *rb;
238
Chris Wilson528cbd12019-01-28 10:23:54 +0000239 rb = obj->vma.tree.rb_node;
Chris Wilson718659a2017-01-16 15:21:28 +0000240 while (rb) {
241 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
242 long cmp;
243
244 cmp = i915_vma_compare(vma, vm, view);
245 if (cmp == 0)
246 return vma;
247
248 if (cmp < 0)
249 rb = rb->rb_right;
250 else
251 rb = rb->rb_left;
252 }
253
254 return NULL;
255}
256
257/**
Chris Wilson718659a2017-01-16 15:21:28 +0000258 * i915_vma_instance - return the singleton instance of the VMA
259 * @obj: parent &struct drm_i915_gem_object to be mapped
260 * @vm: address space in which the mapping is located
261 * @view: additional mapping requirements
262 *
263 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264 * the same @view characteristics. If a match is not found, one is created.
265 * Once created, the VMA is kept until either the object is freed, or the
266 * address space is closed.
267 *
268 * Must be called with struct_mutex held.
269 *
270 * Returns the vma, or an error pointer.
271 */
272struct i915_vma *
273i915_vma_instance(struct drm_i915_gem_object *obj,
274 struct i915_address_space *vm,
275 const struct i915_ggtt_view *view)
276{
277 struct i915_vma *vma;
278
Chris Wilson718659a2017-01-16 15:21:28 +0000279 GEM_BUG_ON(view && !i915_is_ggtt(vm));
280 GEM_BUG_ON(vm->closed);
281
Chris Wilson528cbd12019-01-28 10:23:54 +0000282 spin_lock(&obj->vma.lock);
Chris Wilson481a6f72017-01-16 15:21:31 +0000283 vma = vma_lookup(obj, vm, view);
Chris Wilson528cbd12019-01-28 10:23:54 +0000284 spin_unlock(&obj->vma.lock);
285
286 /* vma_create() will resolve the race if another creates the vma */
287 if (unlikely(!vma))
Chris Wilsona01cb37a2017-01-16 15:21:30 +0000288 vma = vma_create(obj, vm, view);
Chris Wilson718659a2017-01-16 15:21:28 +0000289
Chris Wilson4ea95272017-01-16 15:21:29 +0000290 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
Chris Wilson718659a2017-01-16 15:21:28 +0000291 return vma;
292}
293
294/**
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200295 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
296 * @vma: VMA to map
297 * @cache_level: mapping cache level
298 * @flags: flags like global or local mapping
299 *
300 * DMA addresses are taken from the scatter-gather table of this object (or of
301 * this VMA in case of non-default GGTT views) and PTE entries set up.
302 * Note that DMA addresses are also the only part of the SG table we care about.
303 */
304int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
305 u32 flags)
306{
307 u32 bind_flags;
308 u32 vma_flags;
309 int ret;
310
Chris Wilsonaa149432017-02-25 18:11:21 +0000311 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
312 GEM_BUG_ON(vma->size > vma->node.size);
313
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100314 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
315 vma->node.size,
316 vma->vm->total)))
Chris Wilsonaa149432017-02-25 18:11:21 +0000317 return -ENODEV;
318
Tvrtko Ursulinbbb8a9d2018-10-12 07:31:42 +0100319 if (GEM_DEBUG_WARN_ON(!flags))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200320 return -EINVAL;
321
322 bind_flags = 0;
323 if (flags & PIN_GLOBAL)
324 bind_flags |= I915_VMA_GLOBAL_BIND;
325 if (flags & PIN_USER)
326 bind_flags |= I915_VMA_LOCAL_BIND;
327
328 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
329 if (flags & PIN_UPDATE)
330 bind_flags |= vma_flags;
331 else
332 bind_flags &= ~vma_flags;
333 if (bind_flags == 0)
334 return 0;
335
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100336 GEM_BUG_ON(!vma->pages);
337
Daniele Ceraolo Spurio6146e6d2017-01-20 13:51:23 -0800338 trace_i915_vma_bind(vma, bind_flags);
Chris Wilson93f2cde2018-06-07 16:40:46 +0100339 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200340 if (ret)
341 return ret;
342
343 vma->flags |= bind_flags;
344 return 0;
345}
346
347void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
348{
349 void __iomem *ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100350 int err;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200351
352 /* Access through the GTT requires the device to be awake. */
Daniele Ceraolo Spurio87b391b92019-06-13 16:21:50 -0700353 assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200354
Chris Wilson49d73912016-11-29 09:50:08 +0000355 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100356 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
357 err = -ENODEV;
358 goto err;
359 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200360
361 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
362 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
363
364 ptr = vma->iomap;
365 if (ptr == NULL) {
Matthew Auld73ebd502017-12-11 15:18:20 +0000366 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200367 vma->node.start,
368 vma->node.size);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100369 if (ptr == NULL) {
370 err = -ENOMEM;
371 goto err;
372 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200373
374 vma->iomap = ptr;
375 }
376
377 __i915_vma_pin(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100378
Chris Wilson3bd40732017-10-09 09:43:56 +0100379 err = i915_vma_pin_fence(vma);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100380 if (err)
381 goto err_unpin;
382
Chris Wilson7125397b2017-12-06 12:49:14 +0000383 i915_vma_set_ggtt_write(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200384 return ptr;
Chris Wilsonb4563f52017-10-09 09:43:55 +0100385
386err_unpin:
387 __i915_vma_unpin(vma);
388err:
389 return IO_ERR_PTR(err);
390}
391
Chris Wilson7125397b2017-12-06 12:49:14 +0000392void i915_vma_flush_writes(struct i915_vma *vma)
393{
394 if (!i915_vma_has_ggtt_write(vma))
395 return;
396
Tvrtko Ursulina1c8a092019-06-21 08:08:01 +0100397 intel_gt_flush_ggtt_writes(vma->vm->gt);
Chris Wilson7125397b2017-12-06 12:49:14 +0000398
399 i915_vma_unset_ggtt_write(vma);
400}
401
Chris Wilsonb4563f52017-10-09 09:43:55 +0100402void i915_vma_unpin_iomap(struct i915_vma *vma)
403{
Chris Wilson520ea7c2018-06-07 16:40:45 +0100404 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Chris Wilsonb4563f52017-10-09 09:43:55 +0100405
406 GEM_BUG_ON(vma->iomap == NULL);
407
Chris Wilson7125397b2017-12-06 12:49:14 +0000408 i915_vma_flush_writes(vma);
409
Chris Wilsonb4563f52017-10-09 09:43:55 +0100410 i915_vma_unpin_fence(vma);
411 i915_vma_unpin(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200412}
413
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100414void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200415{
416 struct i915_vma *vma;
417 struct drm_i915_gem_object *obj;
418
419 vma = fetch_and_zero(p_vma);
420 if (!vma)
421 return;
422
423 obj = vma->obj;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100424 GEM_BUG_ON(!obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200425
426 i915_vma_unpin(vma);
427 i915_vma_close(vma);
428
Chris Wilson6a2f59e2018-07-21 13:50:37 +0100429 if (flags & I915_VMA_RELEASE_MAP)
430 i915_gem_object_unpin_map(obj);
431
Chris Wilsonc017cf62019-05-28 10:29:56 +0100432 i915_gem_object_put(obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200433}
434
Chris Wilson782a3e92017-02-13 17:15:46 +0000435bool i915_vma_misplaced(const struct i915_vma *vma,
436 u64 size, u64 alignment, u64 flags)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200437{
438 if (!drm_mm_node_allocated(&vma->node))
439 return false;
440
441 if (vma->node.size < size)
442 return true;
443
Chris Wilsonf51455d2017-01-10 14:47:34 +0000444 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
445 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200446 return true;
447
448 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
449 return true;
450
451 if (flags & PIN_OFFSET_BIAS &&
452 vma->node.start < (flags & PIN_OFFSET_MASK))
453 return true;
454
455 if (flags & PIN_OFFSET_FIXED &&
456 vma->node.start != (flags & PIN_OFFSET_MASK))
457 return true;
458
459 return false;
460}
461
462void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
463{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200464 bool mappable, fenceable;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200465
Chris Wilson944397f2017-01-09 16:16:11 +0000466 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
467 GEM_BUG_ON(!vma->fence_size);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200468
Chris Wilson944397f2017-01-09 16:16:11 +0000469 fenceable = (vma->node.size >= vma->fence_size &&
Chris Wilsonf51455d2017-01-10 14:47:34 +0000470 IS_ALIGNED(vma->node.start, vma->fence_alignment));
Chris Wilson944397f2017-01-09 16:16:11 +0000471
472 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
473
474 if (mappable && fenceable)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200475 vma->flags |= I915_VMA_CAN_FENCE;
476 else
477 vma->flags &= ~I915_VMA_CAN_FENCE;
478}
479
Matthew Auld33dd8892019-09-09 13:40:52 +0100480bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000481{
482 struct drm_mm_node *node = &vma->node;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200483 struct drm_mm_node *other;
484
485 /*
486 * On some machines we have to be careful when putting differing types
487 * of snoopable memory together to avoid the prefetcher crossing memory
488 * domains and dying. During vm initialisation, we decide whether or not
489 * these constraints apply and set the drm_mm.color_adjust
490 * appropriately.
491 */
Matthew Auld33dd8892019-09-09 13:40:52 +0100492 if (!i915_vm_has_cache_coloring(vma->vm))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200493 return true;
494
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000495 /* Only valid to be called on an already inserted vma */
496 GEM_BUG_ON(!drm_mm_node_allocated(node));
497 GEM_BUG_ON(list_empty(&node->node_list));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200498
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000499 other = list_prev_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100500 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100501 !drm_mm_hole_follows(other))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200502 return false;
503
Chris Wilson7d1d9ae2016-12-05 14:29:38 +0000504 other = list_next_entry(node, node_list);
Matthew Auld33dd8892019-09-09 13:40:52 +0100505 if (i915_node_color_differs(other, color) &&
Matthew Auld1e0a96e2019-09-09 13:40:50 +0100506 !drm_mm_hole_follows(node))
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200507 return false;
508
509 return true;
510}
511
Chris Wilson83d317a2018-06-05 10:41:07 +0100512static void assert_bind_count(const struct drm_i915_gem_object *obj)
513{
514 /*
515 * Combine the assertion that the object is bound and that we have
516 * pinned its pages. But we should never have bound the object
517 * more than we have pinned its pages. (For complete accuracy, we
518 * assume that no else is pinning the pages, but as a rough assertion
519 * that we will not run into problems later, this will do!)
520 */
Chris Wilsonecab9be2019-06-12 11:57:20 +0100521 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
Chris Wilson83d317a2018-06-05 10:41:07 +0100522}
523
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200524/**
525 * i915_vma_insert - finds a slot for the vma in its address space
526 * @vma: the vma
527 * @size: requested size in bytes (can be larger than the VMA)
528 * @alignment: required alignment
529 * @flags: mask of PIN_* flags to use
530 *
531 * First we try to allocate some free space that meets the requirements for
532 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
533 * preferrably the oldest idle entry to make room for the new VMA.
534 *
535 * Returns:
536 * 0 on success, negative error code otherwise.
537 */
538static int
539i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
540{
Chris Wilson49d73912016-11-29 09:50:08 +0000541 struct drm_i915_private *dev_priv = vma->vm->i915;
Matthew Auld33dd8892019-09-09 13:40:52 +0100542 unsigned long color;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200543 u64 start, end;
544 int ret;
545
Chris Wilson010e3e62017-12-06 12:49:13 +0000546 GEM_BUG_ON(i915_vma_is_closed(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200547 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
548 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
549
550 size = max(size, vma->size);
Chris Wilson944397f2017-01-09 16:16:11 +0000551 alignment = max(alignment, vma->display_alignment);
552 if (flags & PIN_MAPPABLE) {
553 size = max_t(typeof(size), size, vma->fence_size);
554 alignment = max_t(typeof(alignment),
555 alignment, vma->fence_alignment);
556 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200557
Chris Wilsonf51455d2017-01-10 14:47:34 +0000558 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
559 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
560 GEM_BUG_ON(!is_power_of_2(alignment));
561
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200562 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000563 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200564
565 end = vma->vm->total;
566 if (flags & PIN_MAPPABLE)
567 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
568 if (flags & PIN_ZONE_4G)
Chris Wilsonf51455d2017-01-10 14:47:34 +0000569 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
570 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200571
572 /* If binding the object/GGTT view requires more space than the entire
573 * aperture has, reject it early before evicting everything in a vain
574 * attempt to find space.
575 */
576 if (size > end) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100577 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
578 size, flags & PIN_MAPPABLE ? "mappable" : "total",
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200579 end);
Chris Wilson2889caa2017-06-16 15:05:19 +0100580 return -ENOSPC;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200581 }
582
Matthew Auld33dd8892019-09-09 13:40:52 +0100583 color = 0;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100584 if (vma->obj) {
585 ret = i915_gem_object_pin_pages(vma->obj);
586 if (ret)
587 return ret;
588
Matthew Auld33dd8892019-09-09 13:40:52 +0100589 if (i915_vm_has_cache_coloring(vma->vm))
590 color = vma->obj->cache_level;
Chris Wilson520ea7c2018-06-07 16:40:45 +0100591 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200592
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100593 GEM_BUG_ON(vma->pages);
594
Chris Wilson93f2cde2018-06-07 16:40:46 +0100595 ret = vma->ops->set_pages(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100596 if (ret)
597 goto err_unpin;
598
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200599 if (flags & PIN_OFFSET_FIXED) {
600 u64 offset = flags & PIN_OFFSET_MASK;
Chris Wilsonf51455d2017-01-10 14:47:34 +0000601 if (!IS_ALIGNED(offset, alignment) ||
Chris Wilsone8f9ae92017-01-06 15:20:12 +0000602 range_overflows(offset, size, end)) {
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200603 ret = -EINVAL;
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100604 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200605 }
606
Chris Wilson625d9882017-01-11 11:23:11 +0000607 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100608 size, offset, color,
Chris Wilson625d9882017-01-11 11:23:11 +0000609 flags);
610 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100611 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200612 } else {
Matthew Auld74642842017-10-06 23:18:20 +0100613 /*
614 * We only support huge gtt pages through the 48b PPGTT,
615 * however we also don't want to force any alignment for
616 * objects which need to be tightly packed into the low 32bits.
617 *
618 * Note that we assume that GGTT are limited to 4GiB for the
619 * forseeable future. See also i915_ggtt_offset().
620 */
621 if (upper_32_bits(end - 1) &&
622 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
Matthew Auld855822b2017-10-06 23:18:21 +0100623 /*
624 * We can't mix 64K and 4K PTEs in the same page-table
625 * (2M block), and so to avoid the ugliness and
626 * complexity of coloring we opt for just aligning 64K
627 * objects to 2M.
628 */
Matthew Auld74642842017-10-06 23:18:20 +0100629 u64 page_alignment =
Matthew Auld855822b2017-10-06 23:18:21 +0100630 rounddown_pow_of_two(vma->page_sizes.sg |
631 I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100632
Chris Wilsonbef27bdb2017-10-09 10:20:19 +0100633 /*
634 * Check we don't expand for the limited Global GTT
635 * (mappable aperture is even more precious!). This
636 * also checks that we exclude the aliasing-ppgtt.
637 */
638 GEM_BUG_ON(i915_vma_is_ggtt(vma));
639
Matthew Auld74642842017-10-06 23:18:20 +0100640 alignment = max(alignment, page_alignment);
Matthew Auld855822b2017-10-06 23:18:21 +0100641
642 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
643 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
Matthew Auld74642842017-10-06 23:18:20 +0100644 }
645
Chris Wilsone007b192017-01-11 11:23:10 +0000646 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
Matthew Auld33dd8892019-09-09 13:40:52 +0100647 size, alignment, color,
Chris Wilsone007b192017-01-11 11:23:10 +0000648 start, end, flags);
649 if (ret)
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100650 goto err_clear;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200651
652 GEM_BUG_ON(vma->node.start < start);
653 GEM_BUG_ON(vma->node.start + vma->node.size > end);
654 }
Chris Wilson44a0ec02017-01-19 19:26:58 +0000655 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Matthew Auld33dd8892019-09-09 13:40:52 +0100656 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200657
Chris Wilson09d7e462019-01-28 10:23:53 +0000658 mutex_lock(&vma->vm->mutex);
Chris Wilson499197d2019-01-28 10:23:52 +0000659 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000660 mutex_unlock(&vma->vm->mutex);
Chris Wilsonf2123812017-10-16 12:40:37 +0100661
Chris Wilson520ea7c2018-06-07 16:40:45 +0100662 if (vma->obj) {
Chris Wilsonecab9be2019-06-12 11:57:20 +0100663 atomic_inc(&vma->obj->bind_count);
664 assert_bind_count(vma->obj);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100665 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200666
667 return 0;
668
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100669err_clear:
Chris Wilson93f2cde2018-06-07 16:40:46 +0100670 vma->ops->clear_pages(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200671err_unpin:
Chris Wilson520ea7c2018-06-07 16:40:45 +0100672 if (vma->obj)
673 i915_gem_object_unpin_pages(vma->obj);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200674 return ret;
675}
676
Chris Wilson31c7eff2017-02-27 12:26:54 +0000677static void
678i915_vma_remove(struct i915_vma *vma)
679{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000680 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
681 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
682
Chris Wilson93f2cde2018-06-07 16:40:46 +0100683 vma->ops->clear_pages(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100684
Chris Wilson09d7e462019-01-28 10:23:53 +0000685 mutex_lock(&vma->vm->mutex);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000686 drm_mm_remove_node(&vma->node);
687 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
Chris Wilson09d7e462019-01-28 10:23:53 +0000688 mutex_unlock(&vma->vm->mutex);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000689
Chris Wilson520ea7c2018-06-07 16:40:45 +0100690 /*
691 * Since the unbound list is global, only move to that list if
Chris Wilson31c7eff2017-02-27 12:26:54 +0000692 * no more VMAs exist.
693 */
Chris Wilson520ea7c2018-06-07 16:40:45 +0100694 if (vma->obj) {
695 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000696
Chris Wilsonecab9be2019-06-12 11:57:20 +0100697 atomic_dec(&obj->bind_count);
Chris Wilson520ea7c2018-06-07 16:40:45 +0100698
699 /*
700 * And finally now the object is completely decoupled from this
701 * vma, we can drop its hold on the backing storage and allow
702 * it to be reaped by the shrinker.
703 */
704 i915_gem_object_unpin_pages(obj);
705 assert_bind_count(obj);
706 }
Chris Wilson31c7eff2017-02-27 12:26:54 +0000707}
708
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200709int __i915_vma_do_pin(struct i915_vma *vma,
710 u64 size, u64 alignment, u64 flags)
711{
Chris Wilson31c7eff2017-02-27 12:26:54 +0000712 const unsigned int bound = vma->flags;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200713 int ret;
714
Chris Wilson49d73912016-11-29 09:50:08 +0000715 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200716 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
717 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
718
719 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
720 ret = -EBUSY;
Chris Wilson31c7eff2017-02-27 12:26:54 +0000721 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200722 }
723
724 if ((bound & I915_VMA_BIND_MASK) == 0) {
725 ret = i915_vma_insert(vma, size, alignment, flags);
726 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000727 goto err_unpin;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200728 }
Chris Wilsond36caee2017-11-05 12:45:50 +0000729 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200730
Chris Wilson520ea7c2018-06-07 16:40:45 +0100731 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200732 if (ret)
Chris Wilson31c7eff2017-02-27 12:26:54 +0000733 goto err_remove;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200734
Chris Wilsond36caee2017-11-05 12:45:50 +0000735 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
736
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200737 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
738 __i915_vma_set_map_and_fenceable(vma);
739
740 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
741 return 0;
742
Chris Wilson31c7eff2017-02-27 12:26:54 +0000743err_remove:
744 if ((bound & I915_VMA_BIND_MASK) == 0) {
Chris Wilson31c7eff2017-02-27 12:26:54 +0000745 i915_vma_remove(vma);
Matthew Auldfa3f46a2017-10-06 23:18:19 +0100746 GEM_BUG_ON(vma->pages);
Chris Wilsond36caee2017-11-05 12:45:50 +0000747 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
Chris Wilson31c7eff2017-02-27 12:26:54 +0000748 }
749err_unpin:
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200750 __i915_vma_unpin(vma);
751 return ret;
752}
753
Chris Wilson3365e222018-05-03 20:51:14 +0100754void i915_vma_close(struct i915_vma *vma)
755{
Chris Wilson155ab882019-06-06 12:23:20 +0100756 struct drm_i915_private *i915 = vma->vm->i915;
757 unsigned long flags;
Chris Wilson3365e222018-05-03 20:51:14 +0100758
759 GEM_BUG_ON(i915_vma_is_closed(vma));
Chris Wilson3365e222018-05-03 20:51:14 +0100760
761 /*
762 * We defer actually closing, unbinding and destroying the VMA until
763 * the next idle point, or if the object is freed in the meantime. By
764 * postponing the unbind, we allow for it to be resurrected by the
765 * client, avoiding the work required to rebind the VMA. This is
766 * advantageous for DRI, where the client/server pass objects
767 * between themselves, temporarily opening a local VMA to the
768 * object, and then closing it again. The same object is then reused
769 * on the next frame (or two, depending on the depth of the swap queue)
770 * causing us to rebind the VMA once more. This ends up being a lot
771 * of wasted work for the steady state.
772 */
Chris Wilson155ab882019-06-06 12:23:20 +0100773 spin_lock_irqsave(&i915->gt.closed_lock, flags);
774 list_add(&vma->closed_link, &i915->gt.closed_vma);
775 spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
776}
777
778static void __i915_vma_remove_closed(struct i915_vma *vma)
779{
780 struct drm_i915_private *i915 = vma->vm->i915;
781
782 if (!i915_vma_is_closed(vma))
783 return;
784
785 spin_lock_irq(&i915->gt.closed_lock);
786 list_del_init(&vma->closed_link);
787 spin_unlock_irq(&i915->gt.closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +0100788}
789
790void i915_vma_reopen(struct i915_vma *vma)
791{
Chris Wilson155ab882019-06-06 12:23:20 +0100792 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +0100793}
794
795static void __i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200796{
797 GEM_BUG_ON(vma->node.allocated);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200798 GEM_BUG_ON(vma->fence);
799
Chris Wilson09d7e462019-01-28 10:23:53 +0000800 mutex_lock(&vma->vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200801 list_del(&vma->vm_link);
Chris Wilson09d7e462019-01-28 10:23:53 +0000802 mutex_unlock(&vma->vm->mutex);
803
Chris Wilson528cbd12019-01-28 10:23:54 +0000804 if (vma->obj) {
805 struct drm_i915_gem_object *obj = vma->obj;
806
807 spin_lock(&obj->vma.lock);
808 list_del(&vma->obj_link);
809 rb_erase(&vma->obj_node, &vma->obj->vma.tree);
810 spin_unlock(&obj->vma.lock);
811 }
Chris Wilson010e3e62017-12-06 12:49:13 +0000812
Chris Wilson64d6c502019-02-05 13:00:02 +0000813 i915_active_fini(&vma->active);
Chris Wilson5c3f8c22018-07-06 11:39:46 +0100814
Chris Wilson13f1bfd2019-02-28 10:20:34 +0000815 i915_vma_free(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200816}
817
Chris Wilson3365e222018-05-03 20:51:14 +0100818void i915_vma_destroy(struct i915_vma *vma)
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200819{
Chris Wilson3365e222018-05-03 20:51:14 +0100820 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200821
Chris Wilson3365e222018-05-03 20:51:14 +0100822 GEM_BUG_ON(i915_vma_is_pinned(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200823
Chris Wilson155ab882019-06-06 12:23:20 +0100824 __i915_vma_remove_closed(vma);
Chris Wilson3365e222018-05-03 20:51:14 +0100825
826 WARN_ON(i915_vma_unbind(vma));
Chris Wilson6951e582019-05-28 10:29:51 +0100827 GEM_BUG_ON(i915_vma_is_active(vma));
828
Chris Wilson3365e222018-05-03 20:51:14 +0100829 __i915_vma_destroy(vma);
830}
831
832void i915_vma_parked(struct drm_i915_private *i915)
833{
834 struct i915_vma *vma, *next;
835
Chris Wilson155ab882019-06-06 12:23:20 +0100836 spin_lock_irq(&i915->gt.closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +0100837 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
Chris Wilson155ab882019-06-06 12:23:20 +0100838 list_del_init(&vma->closed_link);
839 spin_unlock_irq(&i915->gt.closed_lock);
Chris Wilson3365e222018-05-03 20:51:14 +0100840
Chris Wilson155ab882019-06-06 12:23:20 +0100841 i915_vma_destroy(vma);
842
843 spin_lock_irq(&i915->gt.closed_lock);
844 }
845 spin_unlock_irq(&i915->gt.closed_lock);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200846}
847
848static void __i915_vma_iounmap(struct i915_vma *vma)
849{
850 GEM_BUG_ON(i915_vma_is_pinned(vma));
851
852 if (vma->iomap == NULL)
853 return;
854
855 io_mapping_unmap(vma->iomap);
856 vma->iomap = NULL;
857}
858
Chris Wilsona65adaf2017-10-09 09:43:57 +0100859void i915_vma_revoke_mmap(struct i915_vma *vma)
860{
861 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
862 u64 vma_offset;
863
Chris Wilsonb7d151b2019-08-22 07:09:13 +0100864 lockdep_assert_held(&vma->vm->mutex);
Chris Wilsona65adaf2017-10-09 09:43:57 +0100865
866 if (!i915_vma_has_userfault(vma))
867 return;
868
869 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
870 GEM_BUG_ON(!vma->obj->userfault_count);
871
872 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
873 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
874 drm_vma_node_offset_addr(node) + vma_offset,
875 vma->size,
876 1);
877
878 i915_vma_unset_userfault(vma);
879 if (!--vma->obj->userfault_count)
880 list_del(&vma->obj->userfault_link);
881}
882
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100883int i915_vma_move_to_active(struct i915_vma *vma,
884 struct i915_request *rq,
885 unsigned int flags)
886{
887 struct drm_i915_gem_object *obj = vma->obj;
Chris Wilsona93615f2019-06-21 19:37:59 +0100888 int err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100889
Chris Wilson6951e582019-05-28 10:29:51 +0100890 assert_vma_held(vma);
891 assert_object_held(obj);
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100892 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
893
894 /*
895 * Add a reference if we're newly entering the active list.
896 * The order in which we add operations to the retirement queue is
897 * vital here: mark_active adds to the start of the callback list,
898 * such that subsequent callbacks are called first. Therefore we
899 * add the active reference first and queue for it to be dropped
900 * *last*.
901 */
Chris Wilson25ffd4b2019-08-16 13:10:00 +0100902 err = i915_active_ref(&vma->active, rq->timeline, rq);
Chris Wilsona93615f2019-06-21 19:37:59 +0100903 if (unlikely(err))
904 return err;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100905
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100906 if (flags & EXEC_OBJECT_WRITE) {
Chris Wilson8e7cb172019-08-16 08:46:35 +0100907 if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
908 i915_active_ref(&obj->frontbuffer->write,
Chris Wilson25ffd4b2019-08-16 13:10:00 +0100909 rq->timeline,
Chris Wilson8e7cb172019-08-16 08:46:35 +0100910 rq);
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100911
Rodrigo Vivi829e8de2019-08-21 22:47:35 -0700912 dma_resv_add_excl_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +0100913 obj->write_domain = I915_GEM_DOMAIN_RENDER;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100914 obj->read_domains = 0;
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +0100915 } else {
Rodrigo Vivi829e8de2019-08-21 22:47:35 -0700916 err = dma_resv_reserve_shared(vma->resv, 1);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +0100917 if (unlikely(err))
918 return err;
919
Rodrigo Vivi829e8de2019-08-21 22:47:35 -0700920 dma_resv_add_shared_fence(vma->resv, &rq->fence);
Chris Wilsoncd2a4ea2019-07-30 21:58:05 +0100921 obj->write_domain = 0;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100922 }
923 obj->read_domains |= I915_GEM_GPU_DOMAINS;
Chris Wilsona93615f2019-06-21 19:37:59 +0100924 obj->mm.dirty = true;
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100925
Chris Wilsona93615f2019-06-21 19:37:59 +0100926 GEM_BUG_ON(!i915_vma_is_active(vma));
Chris Wilsone6bb1d72018-07-06 11:39:45 +0100927 return 0;
928}
929
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200930int i915_vma_unbind(struct i915_vma *vma)
931{
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200932 int ret;
933
Chris Wilson520ea7c2018-06-07 16:40:45 +0100934 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200935
Chris Wilson520ea7c2018-06-07 16:40:45 +0100936 /*
937 * First wait upon any activity as retiring the request may
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200938 * have side-effects such as unpinning or even unbinding this vma.
939 */
Chris Wilson7f017b12017-11-09 21:34:50 +0000940 might_sleep();
Chris Wilson5c3f8c22018-07-06 11:39:46 +0100941 if (i915_vma_is_active(vma)) {
Chris Wilson520ea7c2018-06-07 16:40:45 +0100942 /*
943 * When a closed VMA is retired, it is unbound - eek.
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200944 * In order to prevent it from being recursively closed,
945 * take a pin on the vma so that the second unbind is
946 * aborted.
947 *
948 * Even more scary is that the retire callback may free
949 * the object (last active vma). To prevent the explosion
950 * we defer the actual object free to a worker that can
951 * only proceed once it acquires the struct_mutex (which
952 * we currently hold, therefore it cannot free this object
953 * before we are finished).
954 */
955 __i915_vma_pin(vma);
Chris Wilson64d6c502019-02-05 13:00:02 +0000956 ret = i915_active_wait(&vma->active);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200957 __i915_vma_unpin(vma);
958 if (ret)
959 return ret;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200960 }
Chris Wilson7a3bc032017-06-20 13:43:21 +0100961 GEM_BUG_ON(i915_vma_is_active(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200962
Chris Wilson10195b12018-06-28 14:22:06 +0100963 if (i915_vma_is_pinned(vma)) {
964 vma_print_allocator(vma, "is pinned");
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200965 return -EBUSY;
Chris Wilson10195b12018-06-28 14:22:06 +0100966 }
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200967
968 if (!drm_mm_node_allocated(&vma->node))
Chris Wilson3365e222018-05-03 20:51:14 +0100969 return 0;
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200970
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200971 if (i915_vma_is_map_and_fenceable(vma)) {
Chris Wilson7125397b2017-12-06 12:49:14 +0000972 /*
973 * Check that we have flushed all writes through the GGTT
974 * before the unbind, other due to non-strict nature of those
975 * indirect writes they may end up referencing the GGTT PTE
976 * after the unbind.
977 */
978 i915_vma_flush_writes(vma);
979 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
980
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200981 /* release the fence reg _after_ flushing */
Chris Wilson1f7fd482019-08-22 07:15:57 +0100982 mutex_lock(&vma->vm->mutex);
983 ret = i915_vma_revoke_fence(vma);
984 mutex_unlock(&vma->vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200985 if (ret)
986 return ret;
987
988 /* Force a pagefault for domain tracking on next user access */
Chris Wilsonb7d151b2019-08-22 07:09:13 +0100989 mutex_lock(&vma->vm->mutex);
Chris Wilsona65adaf2017-10-09 09:43:57 +0100990 i915_vma_revoke_mmap(vma);
Chris Wilsonb7d151b2019-08-22 07:09:13 +0100991 mutex_unlock(&vma->vm->mutex);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200992
993 __i915_vma_iounmap(vma);
994 vma->flags &= ~I915_VMA_CAN_FENCE;
995 }
Chris Wilsona65adaf2017-10-09 09:43:57 +0100996 GEM_BUG_ON(vma->fence);
997 GEM_BUG_ON(i915_vma_has_userfault(vma));
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +0200998
999 if (likely(!vma->vm->closed)) {
1000 trace_i915_vma_unbind(vma);
Chris Wilson93f2cde2018-06-07 16:40:46 +01001001 vma->ops->unbind_vma(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001002 }
1003 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1004
Chris Wilson31c7eff2017-02-27 12:26:54 +00001005 i915_vma_remove(vma);
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001006
Joonas Lahtinenb42fe9c2016-11-11 12:43:54 +02001007 return 0;
1008}
1009
Chris Wilson1aff1902019-08-02 22:21:36 +01001010struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1011{
1012 i915_gem_object_make_unshrinkable(vma->obj);
1013 return vma;
1014}
1015
1016void i915_vma_make_shrinkable(struct i915_vma *vma)
1017{
1018 i915_gem_object_make_shrinkable(vma->obj);
1019}
1020
1021void i915_vma_make_purgeable(struct i915_vma *vma)
1022{
1023 i915_gem_object_make_purgeable(vma->obj);
1024}
1025
Chris Wilsone3c7a1c2017-02-13 17:15:45 +00001026#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1027#include "selftests/i915_vma.c"
1028#endif
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001029
Chris Wilson103b76ee2019-03-05 21:38:30 +00001030static void i915_global_vma_shrink(void)
1031{
1032 kmem_cache_shrink(global.slab_vmas);
1033}
1034
1035static void i915_global_vma_exit(void)
1036{
1037 kmem_cache_destroy(global.slab_vmas);
1038}
1039
1040static struct i915_global_vma global = { {
1041 .shrink = i915_global_vma_shrink,
1042 .exit = i915_global_vma_exit,
1043} };
1044
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001045int __init i915_global_vma_init(void)
1046{
1047 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1048 if (!global.slab_vmas)
1049 return -ENOMEM;
1050
Chris Wilson103b76ee2019-03-05 21:38:30 +00001051 i915_global_register(&global.base);
Chris Wilson13f1bfd2019-02-28 10:20:34 +00001052 return 0;
1053}